代码拉取完成,页面将自动刷新
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import Queue
from contrib.spider_http import *
from contrib.spider_html import *
import config
from contrib import lib
from contrib.spider_frontier import *
from contrib.dosql import *
import test
import sys
from threading import Thread
import thread
reload(sys)
sys.setdefaultencoding('utf-8') # @UndefinedVariable
class Crawler():
def __init__(self, nest, link_queue):
self.nest = nest
self.link_queue = link_queue
def receive(self, url):
self.url = url
pass
def run(self):
downloader = Downloader()
data = downloader.downLoadFile(self.url)
if data == None:
return
htmlparsertool = test.MyHtmlTool(data)
urls = htmlparsertool.extractUrlsFromData(self.url)
if config.urlFilter(downloader.url):
questions = htmlparsertool.extractItems(pipeline=config.pickQuestion)
if questions != None:
question = questions.next()
answers = htmlparsertool.extractItems(pipeline=config.pickAnswer, extra=question)
flag = self.nest.put(('question', question))
if flag:
for answer in answers:
self.nest.put(('answer', answer))
for url in filter(config.urlFilter, urls):
self.link_queue.add(url)
class Schedule():
def __init__(self):
self.link_queue = WorkQueue()
self.nest = Nest('nest', 'crawler.db')
self.nest.initFromSql('answer.sql')
self.count = 0
pass
def initCrawlerWithSeeds(self, seeds):
for url in seeds:
self.link_queue.add(url)
pass
def scheduling(self):
spider = Crawler(self.nest, self.link_queue)
self.nest.start()
while not self.link_queue.empty():
import time
print self.count
self.count += 1
url = self.link_queue.get()
spider.receive(url)
spider.run()
time.sleep(1)
if __name__ == '__main__':
start_urls = ['http://www.zhihu.com/question/20821374']
sc = Schedule()
sc.initCrawlerWithSeeds(start_urls)
sc.scheduling()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。