COVID-19-KSH.zip
大小:2.06MB
价格:26积分
下载量:0
评分:
5.0
上传者:hhtt19820919
更新日期:2025-09-22

流感检测源码,深度学习项目

资源文件列表(大概)

文件名
大小
demo/
-
demo/__init__.py
-
demo/__pycache__/
-
demo/__pycache__/__init__.cpython-36.pyc
186B
demo/__pycache__/__init__.cpython-37.pyc
148B
demo/__pycache__/admin.cpython-36.pyc
227B
demo/__pycache__/admin.cpython-37.pyc
189B
demo/__pycache__/ksh.cpython-36.pyc
6.66KB
demo/__pycache__/ksh.cpython-37.pyc
6.61KB
demo/__pycache__/models.cpython-36.pyc
5.08KB
demo/__pycache__/models.cpython-37.pyc
5.04KB
demo/__pycache__/pachong.cpython-37.pyc
8.44KB
demo/__pycache__/serializer.cpython-36.pyc
1.3KB
demo/__pycache__/serializer.cpython-37.pyc
1.26KB
demo/__pycache__/urls.cpython-36.pyc
1.35KB
demo/__pycache__/urls.cpython-37.pyc
1.31KB
demo/__pycache__/views.cpython-36.pyc
4.15KB
demo/__pycache__/views.cpython-37.pyc
4.11KB
demo/admin.py
66B
demo/apps.py
88B
demo/csv/
-
demo/csv/丁香园世界疫情.csv
11.83KB
demo/csv/丁香园国内每日疫情情况.csv
38.7KB
demo/csv/丁香园国内疫情.csv
15.49KB
demo/csv/丁香园每日世界疫情数据.csv
52.42KB
demo/csv/国内疫情数据.csv
1.92MB
demo/csv/近2个月新增情况.csv
3.34KB
demo/csv/近2个月累计情况.csv
5.46KB
demo/csv/近31省市区现有本土病例.csv
1.92KB
demo/data/
-
demo/data/UpdateTime.json
71B
demo/data/中国疫情.json
459.86KB
demo/data/中风险.json
6.76KB
demo/data/实时热点.json
9.1KB
demo/data/风险地区.json
4.2KB
demo/data/高风险.json
1.19KB
demo/ksh.py
9.36KB
demo/migrations/
-
demo/migrations/__init__.py
-
demo/migrations/__pycache__/
-
demo/migrations/__pycache__/__init__.cpython-36.pyc
197B
demo/migrations/__pycache__/__init__.cpython-37.pyc
205B
demo/models.py
6.91KB
demo/pachong.py
10.94KB
demo/serializer.py
956B
demo/tests.py
63B
demo/urls.py
1.44KB
demo/views.py
2.59KB
keshihua/
-
keshihua/__init__.py
91B
keshihua/__pycache__/
-
keshihua/__pycache__/__init__.cpython-36.pyc
310B
keshihua/__pycache__/__init__.cpython-37.pyc
252B
keshihua/__pycache__/celery.cpython-37.pyc
548B
keshihua/__pycache__/settings.cpython-36.pyc
2.85KB
keshihua/__pycache__/settings.cpython-37.pyc
2.81KB
keshihua/__pycache__/urls.cpython-36.pyc
1.21KB
keshihua/__pycache__/urls.cpython-37.pyc
1.17KB
keshihua/__pycache__/wsgi.cpython-37.pyc
557B
keshihua/celery.py
469B
keshihua/settings.py
4.4KB
keshihua/urls.py
1.03KB
keshihua/wsgi.py
409B
manage.py
555B
static/
-
static/css/
-
static/css/2.css
3.04KB
static/css/ssbb.css
2.87KB
static/font/
-
static/font/LcdD.ttf
53.62KB
static/js/
-
static/js/chalk.js
4.61KB
static/js/china.js
59.29KB
static/js/echarts.min.js
762.41KB
static/js/index.js
8.61KB
static/js/jquery.min.js
84.33KB
static/video/
-
static/video/1.mp4
1.45MB
templates/
-
templates/index.html
4.38KB

资源内容介绍

流感检测源码,深度学习项目【项目资源】:包含前端、后端、移动开发、操作系统、人工智能、物联网、信息化管理、数据库、硬件开发、大数据、课程资源、音视频、网站开发等各种技术项目的源码。包括STM32、ESP8266、PHP、QT、Linux、iOS、C++、Java、python、web、C#、EDA、proteus、RTOS等项目的源码。【项目质量】:所有源码都经过严格测试,可以直接运行。功能在确认正常工作后才上传。【适用人群】:适用于希望学习不同技术领域的小白或进阶学习者。可作为毕设项目、课程设计、大作业、工程实训或初期项目立项。【附加价值】:项目具有较高的学习借鉴价值,也可直接拿来修改复刻。对于有一定基础或热衷于研究的人来说,可以在这些基础代码上进行修改和扩展,实现其他功能。【沟通交流】:有任何使用上的问题,欢迎随时与博主沟通,博主会及时解答。鼓励下载和使用,并欢迎大家互相学习,共同进步。
import datetimeimport jsonimport reimport pandas as pdimport requestsfrom bs4 import BeautifulSoupfrom sqlalchemy import create_engineheaders = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'}con = create_engine("mysql+mysqlconnector://root:147123@localhost:3306/ksh")# 历史数据def lishishuju(): df = pd.read_csv('demo/csv/丁香园国内疫情.csv', engine='c') # 读取数据 df = df.sort_values('累计确诊', ascending=False) # 根据累计确诊去排序-降序 df = df.drop_duplicates('省份', keep='first') # 根据省份去重,取第一次出现的数据 df['省份'] = df['省份'].str.strip('省').str.strip('市').str.strip('壮族自治区').str.strip('自治区').str.strip('回族自治区').str.strip('维吾尔自治区') url = 'https://api.inews.qq.com/newsqa/v1/query/pubished/daily/list?province=湖北' data = json.loads(requests.get(url, headers=headers).content.decode())['data'] for i in df['省份']: if i != '湖北': url = 'https://api.inews.qq.com/newsqa/v1/query/pubished/daily/list?province=' + i x = json.loads(requests.get(url).content.decode())['data'] data = data + x def funx(x): if len(x) == 3: x = x + '0' return x df = pd.DataFrame(data) x = df['year'].astype('str') + '.' y = (df['date'].astype('str')) y = y.apply(lambda x: funx(x)) df['dateId'] = x + y df['dateId'] = pd.DatetimeIndex(df['dateId']).astype('str').str[:7] df.to_csv('demo/csv/国内疫情数据.csv', index=False, encoding='utf-8-sig') df.to_sql('gnlssj', if_exists='replace', con=con, index=False) con.execute('ALTER TABLE gnlssj ADD id INT(16) NOT NULL PRIMARY KEY AUTO_INCREMENT FIRST;') # 添加自增字段id# 中国今日疫情情况def yqday(): url = 'https://api.inews.qq.com/newsqa/v1/query/inner/publish/modules/list?modules=statisGradeCityDetail,diseaseh5Shelf' data = json.loads(requests.post(url=url, headers=headers).content.decode())['data'] x = data['statisGradeCityDetail'] y = [] for i in x: j = [i['province'] + i['city'], i['confirmAdd'], i['nowConfirm'], i['grade']] y.append(j) x = ['address', 'addqz', 'xyqz', 'fxqy'] # 使用create_engine + pandas 快捷保存数据库 df = pd.DataFrame(y, columns=x) df.to_sql('bentuxianyou31', if_exists='replace', con=con, index=False) con.execute('ALTER TABLE bentuxianyou31 ADD id INT(16) NOT NULL PRIMARY KEY AUTO_INCREMENT FIRST;') # 添加自增字段id with open('demo/data/中国疫情.json', 'w', encoding='utf-8') as f: f.write(json.dumps(data, ensure_ascii=False, indent=4)) pd.DataFrame(y).to_csv('demo/csv/近31省市区现有本土病例.csv', index=False, encoding='utf-8', header=x) # 使用create_engine + pandas 快捷保存数据库# 中国每日疫情def yqveryday(): url = 'https://file1.dxycdn.com/2021/1228/171/2851867762198723253-135.json?t=27344362' # url head_data = requests.get(url=url, headers=headers).content # 获取数据 data = json.loads(head_data)['data'] # 把取到的数据返回 # 使用pandas快捷保存csv pd.DataFrame(data).to_csv('demo/csv/丁香园国内每日疫情情况.csv', encoding='utf-8', index=False) pd.DataFrame(data).to_sql('mrsj', if_exists='replace', con=con, index=False) con.execute('ALTER TABLE mrsj ADD id INT(16) NOT NULL PRIMARY KEY AUTO_INCREMENT FIRST;') # 添加自增字段id# 实时热点def ssrd(): url = 'https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E5%9B%BD%E5%86%85%E6%96%B0%E5%9E%8B%E8%82%BA%E7%82%8E%E6%9C%80%E6%96%B0%E5%8A%A8%E6%80%81&cb=jsonp_1642854207390_27502' data = json.loads(requests.get(url=url, headers=headers).content.decode().split('(')[1][:-1])['Result'][0]['DisplayData']['result'][ 'items'] with open('demo/data/实时热点.json', 'w', encoding='utf-8') as f: f.write(json.dumps(data, ensure_ascii=False, indent=4)) pd.DataFrame(data).to_sql('ssrd', if_exists='replace', con=con, index=False) con.execute('ALTER TABLE ssrd ADD id INT(16) NOT NULL PRIMARY KEY AUTO_INCREMENT FIRST;') # 添加自增字段id# 国内各省目前疫情def parse(): data = [] # 定义全局列表t data1 = [] url = 'https://ncov.dxy.cn/ncovh5/view/pneumonia' # url head_data = requests.get(url=url, headers=headers).content # 获取数据 res = BeautifulSoup(head_data, 'html.parser') # 利用bs4解析数据 res = res.find('script', {'id': 'getAreaStat'}).text # 利用bs4获取国内的数据 res = re.findall('try \{ window.getAreaStat = (.*)}catch', res, re.S)[0] # 利用正则表达式先取得里面的所有数据 res = re.findall('\{(.*?)]}', res) # 利用正则表达式再去取每个省的数据 for i in res: provinceName = re.findall('"provinceName":"(.*?)"', i) # 取省份名 cityName = re.findall('"cityName":"(.*?)"', i) # 取城市名 if len(cityName) == 0: # 判断城市的长度是否为0 cityName = provinceName # 为零则把城市 = 省份 方便后面的保存 else: cityName.insert(0, provinceName[0]) # 在城市列表最开始的位置插入省份名 currentConfirmedCount = re.findall('"currentConfirmedCount":(.*?),', i) # 取现有确诊 confirmedCount = re.findall('"confirmedCount":(.*?),', i) # 去取累计确诊 curedCount = re.findall('"curedCount":(.*?),', i) # 取累计治愈 deadCount = re.findall('"deadCount":(.*?),', i) # 取累计死亡 for i in range(0, len(currentConfirmedCount)): # 遍历存到列表t data.append({ 'provinceName': cityName[0], 'cityName': cityName[i], 'currentConfirmedCount': currentConfirmedCount[i], 'confirmedCount': confirmedCount[i], 'curedCount': curedCount[i], 'deadCount': deadCount[i], }) for i in range(0, len(currentConfirmedCount)): # 遍历存到列表t data1.append({ '省份': cityName[0], '城市': cityName[i], '现有确诊': currentConfirmedCount[i], '累计确诊': confirmedCount[i], '累计治愈': curedCount[i], '累计死亡': deadCount[i], }) pd.DataFrame(data1).to_csv('demo/csv/丁香园国内疫情.csv', encoding='utf-8', index=False) pd.DataFrame(data).to_sql('xyyq', if_exists='replace', con=con, index=False) con.execute('ALTER TABLE xyyq ADD id INT(16) NOT NULL PRIMARY KEY AUTO_INCREMENT FIRST;') # 添加自增字段id# 风险地区def fxdq(): url = 'https://file1.dxycdn.com/2021/0202/196/1680100273140422643-135.json' resdata = json.loads(requests.get(url, headers=headers).content.decode())['data'] fxlevel = ['高风险', '中风险'] data = { 'code': 200, 'data': [] } for i in range(len(resdata)): tt = i res = resdata[i]['dangerPros'] x = [] for i in range(len(res)): for j in res[i]["dangerAreas"]: x.append({ "provinceName": res[i]["provinceShortName"], "cityName": j["cityName"], "areaName": j["areaName"], }) df = pd.DataFrame(x) df['全称'] = df['provinceName'] + df['cityName'] + df['areaName'] df['address'] = df['provinceName'] + df['cityName'] data['data'].append({'风险等级': fxlevel[tt], '数量': len(df), '地区': [

用户评论 (0)

发表评论

captcha