我们如何在scrapy中使用pipelines项

发布于 2024-12-27 16:31:15 字数 5285 浏览 0 评论 0原文

我是 scrapy 的新用户,用于抓取我的网站。我想将抓取的数据存储到 mysql 数据库中。 myspider.py:

class MininovaSpider(CrawlSpider):

    name = 'myspider'
    allowed_domains = ['example.com']
    start_urls = ['http://www.example.com']
    rules = [Rule(SgmlLinkExtractor(allow=('/categorie/.*'),restrict_xpaths=('//div[@id="contLeftNavig"]',)), 'parse_t')]

    def parse_t(self, response):
        x = HtmlXPathSelector(response)

        torrent = Torrent() 
        torrent['url'] = response.url
        torrent['title']=x.select("//h1[@class='infoAneTitre']/text()").extract()
        torrent['wilaya'] = x.select("//span[@class='ville_t']/text()").extract()
        #torrent['prix'] = x.select("//div[@id='datail_ann']/ul[1]/li[4]/span/text()").extract()
        #torrent['surface'] = x.select("//div[@id='datail_ann']/ul[3]/li[1]/span/text()").extract()
        torrent['description'] = x.select("//div[@class='box_pad']/text()").extract()
        return torrent 

对于 pipelines.py,我修改并使用了 googldir 的示例。因此,当我运行爬网时,我收到此错误:

  • exceptions.AttributeError:'MininovaSpider'对象没有属性'iterkeys'
  • exceptions.TypeError:'MininovaSpider'对象不可订阅

pipeline.py:

from scrapy import log
from twisted.enterprise import adbapi
import time
import MySQLdb.cursors

class Pipeline(object):
    def __init__(self):


        self.dbpool = adbapi.ConnectionPool('MySQLdb',
                db='test',
                user='root',
                passwd='',
                cursorclass=MySQLdb.cursors.DictCursor,
                charset='utf8',
                use_unicode=True
            )

    def process_item(self, spider, item):

        query = self.dbpool.runInteraction(self._conditional_insert, item)
        query.addErrback(self.handle_error)

        return item

    def _conditional_insert(self, tx, item):


        tx.execute("select * from database where url = %s", (item['url'] ))
        result = tx.fetchone()
        if result:
            log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
        else:
            tx.execute(\
            "insert into database (wilaya,titre, site, lien,resume,timestamp) "
            "values (%s, %s, %s, %s,%s,%s)",
            (item['wilaya'],
            item['title'],
            'example.com',item['url'],item['description'],
            time.time())
            )
            log.msg("Item stored in db: %s" % item, level=log.DEBUG)

    def handle_error(self, e):
        log.err(e)

和回溯:

Traceback (most recent call last):
      File "/usr/lib/python2.7/twisted/internet/defer.py", line 287, in addCallbacks
        self._runCallbacks()
      File "/usr/lib/python2.7/twisted/internet/defer.py", line 545, in _runCallbacks
        current.result = callback(current.result, *args, **kw)
      File "/usr/lib/python2.7/site-packages/scrapy/core/scraper.py", line 208, in _itemproc_finished
        item=output, response=response, spider=spider)
      File "/usr/lib/python2.7/site-packages/scrapy/utils/signal.py", line 53, in send_catch_log_deferred
        *arguments, **named)
    --- <exception caught here> ---
      File "/usr/lib/python2.7/twisted/internet/defer.py", line 134, in maybeDeferred
        result = f(*args, **kw)
      File "/usr/lib/python2.7/site-packages/scrapy/xlib/pydispatch/robustapply.py", line 47, in robustApply
        return receiver(*arguments, **named)
      File "/usr/lib/python2.7/site-packages/scrapy/contrib/feedexport.py", line 177, in item_scraped
        slot.exporter.export_item(item)
      File "/usr/lib/python2.7/site-packages/scrapy/contrib/exporter/__init__.py", line 109, in export_item
        itemdict = dict(self._get_serialized_fields(item))
      File "/usr/lib/python2.7/site-packages/scrapy/contrib/exporter/__init__.py", line 60, in _get_serialized_fields
        field_iter = item.iterkeys()
    **exceptions.AttributeError: 'MininovaSpider' object has no attribute 'iterkeys'

2012-01-18 16:00:43-0600 [scrapy] Unhandled Error
    Traceback (most recent call last):
      File "/usr/lib/python2.7/threading.py", line 503, in __bootstrap
        self.__bootstrap_inner()
      File "/usr/lib/python2.7/threading.py", line 530, in __bootstrap_inner
        self.run()
      File "/usr/lib/python2.7/threading.py", line 483, in run
        self.__target(*self.__args, **self.__kwargs)
    --- <exception caught here> ---
      File "/usr/lib/python2.7/twisted/python/threadpool.py", line 207, in _worker
        result = context.call(ctx, function, *args, **kwargs)
      File "/usr/lib/python2.7/twisted/python/context.py", line 118, in callWithContext
        return self.currentContext().callWithContext(ctx, func, *args, **kw)
      File "/usr/lib/python2.7/twisted/python/context.py", line 81, in callWithContext
        return func(*args,**kw)
      File "/usr/lib/python2.7/twisted/enterprise/adbapi.py", line 448, in _runInteraction
        result = interaction(trans, *args, **kw)
      File "/opt/scrapy/test/pipelines.py", line 33, in _conditional_insert
        tx.execute("select * from database where url = %s", (item['url'] ))
    **exceptions.TypeError: 'MininovaSpider' object is not subscriptable

I'm new user of scrapy to crawl my websites.I want to store data crawled into mysql database.
myspider.py:

class MininovaSpider(CrawlSpider):

    name = 'myspider'
    allowed_domains = ['example.com']
    start_urls = ['http://www.example.com']
    rules = [Rule(SgmlLinkExtractor(allow=('/categorie/.*'),restrict_xpaths=('//div[@id="contLeftNavig"]',)), 'parse_t')]

    def parse_t(self, response):
        x = HtmlXPathSelector(response)

        torrent = Torrent() 
        torrent['url'] = response.url
        torrent['title']=x.select("//h1[@class='infoAneTitre']/text()").extract()
        torrent['wilaya'] = x.select("//span[@class='ville_t']/text()").extract()
        #torrent['prix'] = x.select("//div[@id='datail_ann']/ul[1]/li[4]/span/text()").extract()
        #torrent['surface'] = x.select("//div[@id='datail_ann']/ul[3]/li[1]/span/text()").extract()
        torrent['description'] = x.select("//div[@class='box_pad']/text()").extract()
        return torrent 

and for pipelines.py, i modified and used the example of googldir.So when i run crawl i get this error :

  • exceptions.AttributeError: 'MininovaSpider' object has no attribute 'iterkeys'
  • exceptions.TypeError: 'MininovaSpider' object is not subscriptable

pipeline.py:

from scrapy import log
from twisted.enterprise import adbapi
import time
import MySQLdb.cursors

class Pipeline(object):
    def __init__(self):


        self.dbpool = adbapi.ConnectionPool('MySQLdb',
                db='test',
                user='root',
                passwd='',
                cursorclass=MySQLdb.cursors.DictCursor,
                charset='utf8',
                use_unicode=True
            )

    def process_item(self, spider, item):

        query = self.dbpool.runInteraction(self._conditional_insert, item)
        query.addErrback(self.handle_error)

        return item

    def _conditional_insert(self, tx, item):


        tx.execute("select * from database where url = %s", (item['url'] ))
        result = tx.fetchone()
        if result:
            log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
        else:
            tx.execute(\
            "insert into database (wilaya,titre, site, lien,resume,timestamp) "
            "values (%s, %s, %s, %s,%s,%s)",
            (item['wilaya'],
            item['title'],
            'example.com',item['url'],item['description'],
            time.time())
            )
            log.msg("Item stored in db: %s" % item, level=log.DEBUG)

    def handle_error(self, e):
        log.err(e)

and traceback:

Traceback (most recent call last):
      File "/usr/lib/python2.7/twisted/internet/defer.py", line 287, in addCallbacks
        self._runCallbacks()
      File "/usr/lib/python2.7/twisted/internet/defer.py", line 545, in _runCallbacks
        current.result = callback(current.result, *args, **kw)
      File "/usr/lib/python2.7/site-packages/scrapy/core/scraper.py", line 208, in _itemproc_finished
        item=output, response=response, spider=spider)
      File "/usr/lib/python2.7/site-packages/scrapy/utils/signal.py", line 53, in send_catch_log_deferred
        *arguments, **named)
    --- <exception caught here> ---
      File "/usr/lib/python2.7/twisted/internet/defer.py", line 134, in maybeDeferred
        result = f(*args, **kw)
      File "/usr/lib/python2.7/site-packages/scrapy/xlib/pydispatch/robustapply.py", line 47, in robustApply
        return receiver(*arguments, **named)
      File "/usr/lib/python2.7/site-packages/scrapy/contrib/feedexport.py", line 177, in item_scraped
        slot.exporter.export_item(item)
      File "/usr/lib/python2.7/site-packages/scrapy/contrib/exporter/__init__.py", line 109, in export_item
        itemdict = dict(self._get_serialized_fields(item))
      File "/usr/lib/python2.7/site-packages/scrapy/contrib/exporter/__init__.py", line 60, in _get_serialized_fields
        field_iter = item.iterkeys()
    **exceptions.AttributeError: 'MininovaSpider' object has no attribute 'iterkeys'

2012-01-18 16:00:43-0600 [scrapy] Unhandled Error
    Traceback (most recent call last):
      File "/usr/lib/python2.7/threading.py", line 503, in __bootstrap
        self.__bootstrap_inner()
      File "/usr/lib/python2.7/threading.py", line 530, in __bootstrap_inner
        self.run()
      File "/usr/lib/python2.7/threading.py", line 483, in run
        self.__target(*self.__args, **self.__kwargs)
    --- <exception caught here> ---
      File "/usr/lib/python2.7/twisted/python/threadpool.py", line 207, in _worker
        result = context.call(ctx, function, *args, **kwargs)
      File "/usr/lib/python2.7/twisted/python/context.py", line 118, in callWithContext
        return self.currentContext().callWithContext(ctx, func, *args, **kw)
      File "/usr/lib/python2.7/twisted/python/context.py", line 81, in callWithContext
        return func(*args,**kw)
      File "/usr/lib/python2.7/twisted/enterprise/adbapi.py", line 448, in _runInteraction
        result = interaction(trans, *args, **kw)
      File "/opt/scrapy/test/pipelines.py", line 33, in _conditional_insert
        tx.execute("select * from database where url = %s", (item['url'] ))
    **exceptions.TypeError: 'MininovaSpider' object is not subscriptable

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

深空失忆 2025-01-03 16:31:15
exceptions.TypeError: 'MininovaSpider' object is not subscriptable

看起来您在某处生成了蜘蛛 (MininovaSpider) 实例而不是项目。我认为您还有更多未显示的代码。

在 Pipeline.process_item() 中输入以下内容进行确认:

def process_item(self, spider, item):

    assert isinstance(item, Torrent), 'Here should be Torrent instance!'

    query = self.dbpool.runInteraction(self._conditional_insert, item)
    query.addErrback(self.handle_error)

    return item
exceptions.TypeError: 'MininovaSpider' object is not subscriptable

Looks like you have yielded somewhere a spider (MininovaSpider) instance instead of an item. I think you have there more code you haven't shown.

In Pipeline.process_item() put this to confirm:

def process_item(self, spider, item):

    assert isinstance(item, Torrent), 'Here should be Torrent instance!'

    query = self.dbpool.runInteraction(self._conditional_insert, item)
    query.addErrback(self.handle_error)

    return item
~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文