代码拉取完成,页面将自动刷新
同步操作将从 mktime/python-learn 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
from threading import Thread
from Queue import Queue
import random, time
from bs4 import BeautifulSoup
import os, sys, urllib2
import urllib2,os,socket
import thread
'''
date: 2014-03-07 20:50:15
url: http://www.dbmeizi.com
desc: using mulitithreading download pictures from dbmeizi.com
email: withfaker@gmail.com
'''
queue = Queue()
pic_path = os.path.join(os.curdir, "images")
class ProducerThread(Thread):
def run(self):
page_loop()
class ConsumerThread(Thread):
def run(self):
while True:
if queue.empty():
thread.exit()
url = queue.get()
queue.task_done()
fetch(url)
def page_loop(page=0):
url = 'http://www.dbmeizi.com/?p=%s' % page
try:
content = urllib2.urlopen(url)
soup = BeautifulSoup(content)
except:
print "internal error:[%s]" % url
page_loop(int(page)+1)
my_girl = soup.find_all('img')
if my_girl == []:
print 'finished!'
sys.exit(0)
#print "BEGIN TO FETCH PAGE:[%s]" % page
for girl in my_girl:
link = girl.get('src')
flink = 'http:' + link
queue.put(flink)
page = int(page) + 1
page_loop(page)
#fetch pictures
def fetch(url):
print "url:[%s]" % url
p = os.path.join(os.curdir, pic_path, url[-11:])
if os.path.isfile(p):
st = os.stat(p)
if st.st_size > 0:
print "file[%s] is already exists." % url[-11:]
return
else:
print "file[%s] exists. but size is too small." % url[-11:]
req = urllib2.Request(url)
req.add_header('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
req.add_header('Accept-Encoding','gzip,deflate,sdch')
req.add_header('Accept-Language','zh-CN,zh;q=0.8,en;q=0.6')
req.add_header('Cache-Control','max-age=0')
req.add_header('Connection','keep-alive')
req.add_header('Referer','https://www.dbmeizi.com')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1660.0 Safari/537.36')
try:
resp = urllib2.urlopen(req)
except:
print "internal error:[%s]" % url
return
f = open(p, 'wb')
f.write(resp.read())
f.close()
resp.close()
#print "fetch url done.[%s]" % url
if __name__ == '__main__':
if not os.path.isdir(pic_path):
os.mkdir(pic_path)
ProducerThread(name="Producer1").start()
ConsumerThread(name="Consumer1").start()
ConsumerThread(name="Consumer2").start()
ConsumerThread(name="Consumer3").start()
ConsumerThread(name="Consumer4").start()
ConsumerThread(name="Consumer5").start()
#ConsumerThread(name="Consumer6").start()
#ConsumerThread(name="Consumer7").start()
#ConsumerThread(name="Consumer8").start()
#ConsumerThread(name="Consumer9").start()
#ConsumerThread(name="Consumer10").start()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。