pyspider 定时抓取无效、一直没有触发on_finished task任务确认都已经完成

发布于 2022-09-06 20:56:45 字数 4547 浏览 7 评论 0

RT
代码如下:

from pyspider.libs.base_handler import *
from pyspider.libs.utils import md5string
import logging
fhandler = logging.FileHandler('/tmp/aaaa')

class Handler(BaseHandler):
    retry_delay = {
        0: 1,
        1: 1*5,
        2: 2*5,
        3: 3*5,
        4: 4*5,
        5: 5*5,
        6: 1*30,
        7: 1*40,
        8: 1*60,
        9: 2*60,
        10: 2*60,
        '': 3*60
    }
    crawl_config = {
        'force_update': True,
        'retries': 99,
        'auto_recrawl': False,
        'proxy': "xxxxxxxxxxxxxxxxxxx",
        'itag': 'v0007',
        'headers': {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
            'Cache-Control': 'max-age=0',
            'User-Agent':'ozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
        }
    }

    def __init__(self):
        self.kw = '春装2018春款'
        self.base_url = 'https://pub.alimama.com/promo/search/index.htm?q=%s&toPage=%s&perPageSize=50'
        self.page_num = 1
        self.total_num = 1
        logger.addHandler(fhandler)

    def get_taskid(self, task):
        """md5string(task['url']+json.dumps(task['fetch'].get('data', '')))"""
        return super(Handler, self).get_taskid(task)

    def on_message(self, project, msg):
        pass

    def on_result(self, result):
        # pyspider result_worker --result-cls "myresult.MyResultWorker"
        logger.info('on_resultttttttttttt')
        super(Handler, self).on_result(result)

    def on_finished(self, response, task):
        logger.info('on_finishedddddddddd')
        pass

    @every(minutes=5)
    def on_start(self):
        while self.page_num <= self.total_num:
            url = self.base_url%(self.kw, self.page_num)
            self.crawl(url, fetch_type='js', callback=self.index_page)
            self.page_num += 1

    @config(age=3 * 60)
    def index_page(self, response):
        scode = 404
        for each in response.doc('div.block-search-box').items():
            scode = 200
            save = {}
            save['title'] = each('a.color-m').text()
            self.crawl(each('a.color-m').attr.href.replace('http://','https://'), connect_timeout=50, fetch_type='js', callback=self.detail_page, save=save)
        # 标记为失败准备重试
        if scode != 200:
            response.status_code = scode
            response.raise_for_status()

    @config(priority=0, age=3 * 60)
    def detail_page(self, response):
        #response.content = response.content.decode('gbk', 'ignore').encode('gbk')
        save = response.save
        #logger.info(response.doc('.tb-rmb-num').text())
        return {
            "url": response.url,
            "title": save['title']
        }

[I 180317 18:50:00 scheduler:965] select z_taobao_a:_on_cronjob data:,_on_cronjob
[I 180317 18:50:56 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:51:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:52:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:53:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:54:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:55:00 scheduler:965] select z_taobao_a:_on_cronjob data:,_on_cronjob
[I 180317 18:55:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:56:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:57:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:58:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 18:59:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 19:00:00 scheduler:965] select z_taobao_a:_on_cronjob data:,_on_cronjob
[I 180317 19:00:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 19:01:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 19:02:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 19:03:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 19:04:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
[I 180317 19:05:00 scheduler:965] select z_taobao_a:_on_cronjob data:,_on_cronjob
[I 180317 19:05:57 scheduler:586] in 5m: new:0,success:0,retry:0,failed:0
是我那里的配置问题吗?

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

唯憾梦倾城 2022-09-13 20:56:45

on_finished没有执行是因为设置的fail-pause-num太大了
every 定时未执行不知道什么原因重启"processor"进程后就可以执行一次下一次依然不知道什么时侯会执行
这个问题没有人知道吗?

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文