1 Star 0 Fork 0

bluedream_pp/pythonFirstInHead

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
zgxx.py 2.61 KB
一键复制 编辑 原始数据 按行查看 历史
#coding:UTF-8
import urllib2
from bs4 import BeautifulSoup
import time
import sys
import xlsxwriter
import httplib
reload(sys)
sys.setdefaultencoding('utf8')
def scrapyList(url,index):
print url
user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.82 Safari/537.36'
headers = {'User-Agent': user_agent, 'timeout': 60}
request = urllib2.Request(url, headers=headers)
content =""
try:
response = urllib2.urlopen(request)
except (IOError, httplib.HTTPException, httplib.BadStatusLine) as e:
print url + '列表页面发生异常', e
time.sleep(10)
scrapyList(url)
else:
content = response.read()
soup = BeautifulSoup(content, 'lxml')
c = soup.find(attrs={"id": "documentContainer"})
workbook = xlsxwriter.Workbook("/Users/wangyifei/Documents/zq/zq"+str(index)+".xlsx")
header_style = workbook.add_format({'bold': True})
header_style.set_bg_color("white")
header_style.set_align('center')
header_style.set_color("black")
worksheet = workbook.add_worksheet("content")
book_a = c.findAll(attrs={"class": "row"})
i = 0
for div in book_a:
s1 = BeautifulSoup(str(div), 'lxml')
a = s1.find(attrs={"target": "_blank"})
scrapyContent("http://www.csrc.gov.cn/pub/zjhpublicofbj" + a["href"].replace("..", ""),i,workbook,worksheet)
workbook.close()
i = i +1
time.sleep(4)
def scrapyContent(url,index,workbook,worksheet):
print url
user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.82 Safari/537.36'
headers = {'User-Agent': user_agent, 'timeout': 60}
request = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(request)
except (IOError, httplib.HTTPException, httplib.BadStatusLine) as e:
print url + '列表页面发生异常', e
time.sleep(10)
scrapyContent(url)
else:
content = response.read()
# print content
soup = BeautifulSoup(content, 'lxml')
c = soup.select('#lTitle')
str = ""
for t in c:
str = str + t.get_text().encode('utf-8')+ "\n"
worksheet.write(index, 0,str )
cc = soup.select('#ContentRegion > div > p')
str = ""
for tt in cc:
str = str +tt.get_text().encode('utf-8') + "\n"
worksheet.write(index, 1,str)
for i in range(1 , 147):
scrapyList('http://www.csrc.gov.cn/pub/zjhpublicofbj/2280/index_887_'+str(i)+'.htm',i)
time.sleep(6)
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/bluedream_pp/pythonFirstInHead.git
git@gitee.com:bluedream_pp/pythonFirstInHead.git
bluedream_pp
pythonFirstInHead
pythonFirstInHead
master

搜索帮助