代码拉取完成,页面将自动刷新
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName :cnnic_report.py
# @Time :2023/11/9
# @Author :CL
# @email :1037654919@qq.com
#https://www.cnnic.net.cn/6/86/88/index1.html
import requests
from bs4 import BeautifulSoup
from utils import mongo_manager,get_kuai_proxy
cnnic_report = mongo_manager("cnnic_report",db='public_data')
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Referer": "https://www.cnnic.net.cn/6/86/index.html",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\""
}
cookies = {
"browsertype": "chrome",
"domain": "www.cnnic.cn",
"filetype": "0",
"nodeid": "1",
"newsid": "%24%7BCONTENTID%7D",
"identity": "5a16d9396eb37476bee1070c3e59d4b1"
}
def get_cnnic_88(url = "https://www.cnnic.net.cn/6/86/88/index.html" ):
response = requests.get(url, headers=headers, cookies=cookies)
response.encoding = "utf-8"
# print(response.text)p_news-box
print(response)
from bs4 import BeautifulSoup
sooups =BeautifulSoup(response.text,'lxml')
datas = sooups.find('div',class_= 'p_news-box').find_all('li')
lists=[]
for data in datas:
name = data.find('a').get_text()
href = 'https://www.cnnic.net.cn'+ data.find('a').get('href')
lists.append({'name':name,'href':href})
# print(datas)
return lists
def get_urllist(url = "https://www.cnnic.net.cn/30/98/index.html"):
response = requests.get(url, headers=headers)
response.encoding = "utf-8"
print(response)
from bs4 import BeautifulSoup
sooups =BeautifulSoup(response.text,'lxml')
datas = sooups.find_all('a')
lists=[]
for data in datas:
name = data.get_text()
href = 'https://www.cnnic.net.cn'+ data.get('href')
lists.append({'name':name,'href':href})
return lists
# 下载 https://www.cnnic.net.cn/6/86/88/index.html
def main():
urllist=get_urllist()
for ll in urllist:
if ll['name'] == '每日推荐':
continue
url = ll['href']
print(f'url:{url}')
lists = get_cnnic_88(url)
for ll in lists:
url2 = ll['href']
print(f'url2:{url2}')
result={'_id':url2,"url":url2}
try:
response = requests.get(url2, headers=headers, cookies=cookies)
response.encoding = "utf-8"
soups =BeautifulSoup(response.text,'lxml')
data = soups.find('div',class_= 'box-text').find_all('p')
strs=''
for d in data:
# print(d.get_text())
strs +=d.get_text() +'\n'
result['data'] = strs.strip()
timeinfo = soups.find('span',class_= 'time g-mr-30').get_text()
result['time'] = timeinfo
except:
pass
try:
cnnic_report.insertOne(result)
except Exception as e:
print(e)
if __name__ =="__main__":
print()
# print(get_urllist())
main()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。