代码拉取完成,页面将自动刷新
#!/usr/bin/env python
# coding: utf-8
'''
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
. ' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
.............................................
佛祖保佑 永无BUG
'''
import re
import bs4
import requests
import urllib3
from bs4 import BeautifulSoup
import sys
import os
import shutil
urllib3.disable_warnings()
print("自动获取当日网课链接程序 Ver:开发者版")
print("使用前请确保你会使用,否则请读源码自行了解使用方法或咨询开发者")
print("BuildWith:Python Author:2844829687@qq.com")
print("")
'''
try:
print("正在检查更新...")
update_url = "https://yellow-stone.tpddns.cn:8000/.%E7%BD%91%E8%AF%BE%E9%93%BE%E6%8E%A5%E8%8E%B7%E5%8F%96%E5%99%A8/latest.html"
response = requests.get(url=update_url, verify=False)
ver_latest = response.content.decode('utf-8')
ver_code = requests.get(update_url, verify=False).status_code
ver = "Ver999"
#print (ver_code)
if (ver_code == 404):
print("无法连接服务器,请检查网络或联系开发者")
print("")
elif (ver >= ver_latest):
print("当前已是最新版:", ver_latest)
print("")
#print("")
elif (ver < ver_latest):
print("当前有可用更新:", ver_latest)
print("")
print("正在下载最新版本:", ver_latest)
download_url = 'https://yellow-stone.tpddns.cn:8000/.%E7%BD%91%E8%AF%BE%E9%93%BE%E6%8E%A5%E8%8E%B7%E5%8F%96%E5%99%A8/%E7%BD%91%E8%AF%BE%E9%93%BE%E6%8E%A5%E8%8E%B7%E5%8F%96%E5%99%A8_' + ver_latest + '.exe'
download = requests.get(download_url, verify=False)
with open("网课链接获取器_" + ver_latest + ".exe", "wb") as code:
code.write(download.content)
print("最新版:", ver_latest,"已下载")
print("")
else:
print("检查更新异常,请向开发者反馈")
print("")
except:
print("检查更新失败,未知错误,请联系开发者")
print("")
'''
page_url = "https://mp.weixin.qq.com/s/XaGTbzYT1VuUMB6y3V-_nQ"
cli_url = "https://cli.im/Api/Browser/deqr"
def qrcode(qrcode):
cli_data = {"data": qrcode}
# print(cli_data)
response = requests.post(url=cli_url, data=cli_data) # , verify=False
# print(response.content.decode('utf-8'))
qrcode_sesult = response.content.decode('utf-8')
match_rule = re.compile(r'"RawData":"(.*?)"', re.S)
end_page_url_disclear_list = re.findall(match_rule, qrcode_sesult)
# print(end_page_url_disclear_list)
end_page_url_disclear = end_page_url_disclear_list[0]
end_page_url = end_page_url_disclear.replace('\\/', '/')
# print(end_page_url)
return(end_page_url)
def style(str):
str = str.replace("第一","第1")
str = str.replace("第二","第2")
str = str.replace("第三","第3")
str = str.replace("第四","第4")
str = str.replace("第五","第5")
str = str.replace("第六","第6")
str = str.replace("第七","第7")
str = str.replace("第八","第8")
str = str.replace("第九","第9")
str = str.replace("政治第","政治 第")
str = str.replace("历史第","历史 第")
str = str.replace("地理第","地理 第")
str = str.replace("生物第","生物 第")
str = str.replace("数学第","数学 第")
str = str.replace("语文第","语文 第")
str = str.replace("英语第","英语 第")
str = str.replace("化学第","化学 第")
str = str.replace("物理第","物理 第")
return str
def replace(a,b,c):
with open(a,'r+',encoding='utf-8') as ca: #打开文件
wc = ca.readlines() #把文件内容读到列表中
with open('new','w+',encoding='utf-8') as cb: #打开一个空文件
for i in wc: #判断要替换的字符是否存在与源文件的行中
if b in i:
i = i.replace(b,c)
cb.write(i)
else:
cb.write(i)
today = input("累计日数:")
print ("")
response = requests.get(page_url) # get原网页
classes_html = response.content.decode('utf-8')
# print(response.content.decode('utf-8'))
clsasses_html_clear = BeautifulSoup(classes_html, 'lxml') # bs4格式化
# print(clsasses_html_clear)
classes_qrcodes = clsasses_html_clear.find('div', id="js_content")
# print(classes_qrcodes)
all_img_tag = classes_qrcodes('img')
print("今日课程列表:")
print("")
try:
os.remove("lessons/源/今日源.html")
os.remove("lessons/index.htm")
except:
pass
shutil.copyfile('lessons/源/源index.htm', 'lessons/index.htm')
for i in range(7):
qrcode_url = all_img_tag[i+1]['data-src']
exec('qrcode_' + str(i+1) + ' = ' + '"'+str(qrcode_url)+'"')
# print(all_img_tag[i+1]['data-src'])
qrcode(eval('qrcode_' + str(i+1)))
exec('real_url_' + str(i+1) + ' = ' + '"' +
str(qrcode(eval('qrcode_' + str(i+1))))+'"')
#print(eval('real_url_' + str(i+1)))
response = requests.get(url=eval('real_url_' + str(i+1)))
vedio_html = response.content.decode('utf-8')
title_rule = re.compile(r'<title>(.*?)</title>', re.S)
title_list = re.findall(title_rule, vedio_html)
title = style(title_list[0])
print(title, ':', (eval('real_url_' + str(i+1))))
#os.remove("lessons/源/今日源.html")
replace('lessons/源/源.html','%today%',today)
os.rename('new',"lessons/源/今日源.html") #替换文件
replace('lessons/源/今日源.html','%title%',title)
os.rename('new',"lessons/" + title + ".html") #替换文件
os.remove("lessons/源/今日源.html")
replace('lessons/index.htm',"%title" + (str(i+1)) + "%",title) #调用函数
os.remove("lessons/index.htm")
os.rename('new',"lessons/index.htm") #替换文件
replace('lessons/index.htm',"%today%",today) #调用函数
os.remove("lessons/index.htm")
os.rename('new',"lessons/index.htm") #替换文件
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。