根据来自scrapy的信号更新主线程内的PyQt5 Gui

Posted

技术标签:

【中文标题】根据来自scrapy的信号更新主线程内的PyQt5 Gui【英文标题】:Update PyQt5 Gui inside a main thread based on signal from scrapy 【发布时间】:2020-05-05 07:27:43 【问题描述】:

我有一个非常基本的蜘蛛,它看起来像来自 scrapy testpiders 的 followall 蜘蛛。

import re

import scrapy.signals
from scrapy.http import Request, htmlResponse
from scrapy.linkextractors import LinkExtractor
from six.moves.urllib.parse import urlparse

from page import Page


class ZenSpider( scrapy.Spider ) :
    def __init__(self) :
        super().__init__()

    name = 'followall'
    custom_settings = 
        'CLOSESPIDER_PAGECOUNT' : 2,
        "FEEDS" : 
            "items.csv" : "format" : "csv",
        ,
    

    def __init__(self, **kw) :
        super( ZenSpider, self ).__init__( **kw )
        url = kw.get( 'url' ) or kw.get( 'domain' ) or 'http://scrapinghub.com/'
        if not url.startswith( 'http://' ) and not url.startswith( 'https://' ) :
            url = 'http://%s/' % url
        self.url = url
        self.allowed_domains = [re.sub(r'^www\.', '', urlparse(url).hostname)]
        self.link_extractor = LinkExtractor()

    def start_requests(self):
        return [Request(self.url, callback=self.parse, dont_filter=True)]

    def parse(self, response):
        """Parse a PageItem and all requests to follow

        @url http://www.scrapinghub.com/
        @returns items 1 1
        @returns requests 1
        @scrapes url title foo
        """
        page = self._get_item(response)
        r = [page]
        r.extend(self._extract_requests(response))
        return r

    def _get_item(self, response):
        items = []
        item = Page(
            url=response.url,
            size=str( len( response.body ) ),
            status=response.status,
            # content_type=response.request.headers.get('Content-Type'),
            # encoding=response.request.headers.get('encoding'),
            # referer=response.request.headers.get('Referer'),
        )
        self._set_title( item, response )
        self._set_description( item, response )
        return item

    def _extract_requests(self, response):
        r = []
        if isinstance(response, HtmlResponse):
            links = self.link_extractor.extract_links( response )
            r.extend( Request( x.url, callback=self.parse ) for x in links )
        return r

    def _set_title(self, page, response) :
        if isinstance( response, HtmlResponse ) :
            title = response.xpath( "//title/text()" ).extract()
            if title :
                page['title'] = title[0]

    def _set_description(self, page, response) :
        if isinstance( response, HtmlResponse ) :
            description = response.xpath( "//meta[@name='description']/@content" ).extract()
            if description :
                page['description'] = description[0]

我从下面的脚本中调用这个蜘蛛。蜘蛛使用 CrawlRunner 类运行,当它获取一个项目时,会发出一个信号作为 p.signals.connect,然后调用方法 crawler_results 并打印被抓取的项目。

据我了解,我无法将爬行移动到它自己的类中,因为那样信号将无法与 PyQt5 一起使用

import scrapy
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import QRunnable, pyqtSlot, QThread, pyqtSignal, QTimer
from PyQt5.QtWidgets import QTableWidgetItem, QLabel
from scrapy import signals
from scrapy.crawler import CrawlerProcess, CrawlerRunner
from twisted.internet import reactor
from scrapy.utils.log import configure_logging

from Layout import Ui_MainWindow
from ZenSpider import ZenSpider


class MainWindow( QtWidgets.QMainWindow, Ui_MainWindow ) :

    def __init__(self, parent=None) :
        super(MainWindow, self).__init__()

        self.setupUi( self )
        self.pushButton.pressed.connect( self.on_url_entered )

    def crawler_results(self, item) :
        print( "SCRAPED AN ITEM" )
        ##Do Something here ##

    def on_url_entered(self) :
        # global userInput
        # userInput = self.urlbar.text()
        configure_logging()
        runner = CrawlerRunner()
        runner.crawl(ZenSpider, domain="google.com.au")
        for p in runner.crawlers :
            p.signals.connect(self.crawler_results, signal=signals.item_scraped)
        reactor.run()

if __name__ == "__main__" :
    app = QtWidgets.QApplication( [] )
    main_window = MainWindow()
    main_window.show()
    app.exec_()

我有一个带有简单 QTableWidget 和按钮的布局

# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'basic.ui'
#
# Created by: PyQt5 UI code generator 5.14.2
#
# WARNING! All changes made in this file will be lost!


from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_MainWindow(object):
    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.resize(1034, 803)
        self.centralwidget = QtWidgets.QWidget(MainWindow)
        self.centralwidget.setObjectName("centralwidget")
        self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
        self.tableWidget.setGeometry(QtCore.QRect(140, 200, 831, 401))
        self.tableWidget.setObjectName("tableWidget")
        self.tableWidget.setColumnCount(1)
        self.tableWidget.setRowCount(0)
        item = QtWidgets.QTableWidgetItem()
        self.tableWidget.setHorizontalHeaderItem(0, item)
        self.pushButton = QtWidgets.QPushButton(self.centralwidget)
        self.pushButton.setGeometry(QtCore.QRect(880, 610, 89, 25))
        self.pushButton.setObjectName("pushButton")
        MainWindow.setCentralWidget(self.centralwidget)
        self.statusbar = QtWidgets.QStatusBar(MainWindow)
        self.statusbar.setObjectName("statusbar")
        MainWindow.setStatusBar(self.statusbar)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
        item = self.tableWidget.horizontalHeaderItem(0)
        item.setText(_translate("MainWindow", "URL"))
        self.pushButton.setText(_translate("MainWindow", "Start"))


if __name__ == "__main__":
    import sys
    app = QtWidgets.QApplication(sys.argv)
    MainWindow = QtWidgets.QMainWindow()
    ui = Ui_MainWindow()
    ui.setupUi(MainWindow)
    MainWindow.show()
    sys.exit(app.exec_())

当我按下按钮时,我可以看到爬虫正在运行并进入 crawler_results 方法,因为它打印了抓取的项目。蜘蛛将每个项目返回为以下值

'size': '164125',
 'status': 200,
 'title': 'Google Advanced Search',
 'url': 'https://www.google.com.au/advanced_search?hl=en-AU&authuser=0'

页面只是我的scrapy项目

import scrapy

class Page(scrapy.Item):
    url = scrapy.Field()
    size = scrapy.Field()
    status = scrapy.Field()
    title = scrapy.Field()

我的问题是如何将这些数据转换到 GUI 中并让它在蜘蛛运行时自动刷新。这意味着每次抓取一个项目时,GUI 都会更新,然后蜘蛛会继续。

到目前为止我已经探索过了

    使用scrapy deferred 运气不佳 插槽/信号,但无法更新 GUI。 Qtimer 函数每秒更新一次 GUI,但同样不会产生任何结果。

非常感谢任何帮助

【问题讨论】:

@eyllanesc - 只是我的scrapy字段 【参考方案1】:

您必须安装与 Qt 事件循环兼容的反应器,例如使用:

qt5reactor (python -m pip install qt5reactor), qt-reactor (python -m pip install qt-reactor)
import sys

from PyQt5 import QtWidgets, QtCore, QtGui

import qt5reactor
# import qreactor

from scrapy import signals
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging

import twisted

from Layout import Ui_MainWindow
from ZenSpider import ZenSpider


class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
    def __init__(self, parent=None):
        super(MainWindow, self).__init__()

        self.setupUi(self)
        self.pushButton.pressed.connect(self.on_url_entered)
        self.tableWidget.horizontalHeader().setSectionResizeMode(
            QtWidgets.QHeaderView.ResizeToContents
        )

    def crawler_results(self, item):
        row = self.tableWidget.rowCount()

        url = item["url"]

        it = QtWidgets.QTableWidgetItem(url)
        self.tableWidget.insertRow(row)
        self.tableWidget.setItem(row, 0, it)

    def on_url_entered(self):
        configure_logging()
        runner = CrawlerRunner()
        runner.crawl(ZenSpider, domain="google.com.au")
        for p in runner.crawlers:
            p.signals.connect(self.crawler_results, signal=signals.item_scraped)

    def closeEvent(self, event):
        super(MainWindow, self).closeEvent(event)
        twisted.internet.reactor.stop()


if __name__ == "__main__":
    app = QtWidgets.QApplication([])

    qt5reactor.install()
    # qreactor.install()

    main_window = MainWindow()
    main_window.show()
    twisted.internet.reactor.run()

【讨论】:

像魅力一样工作。谢谢 !我也尝试过 qt5reactor,尽管我永远无法停止反应器并且我的 GUI 冻结了。 def closeEvent(self, event): super(MainWindow, self).closeEvent(event) twisted.internet.reactor.stop() 是我需要的代码,包括在 on_url_entered 方法中删除 reactor.start 。谢谢传奇。 仅供参考,GUI 会挂起,直到 scrapy 完成爬网。如果爬取为 200 或更多 URL,则程序崩溃。我想知道这是否用 Qthread 写得更好?或者 Qprocess ?甚至可能使用钩针或异步?

以上是关于根据来自scrapy的信号更新主线程内的PyQt5 Gui的主要内容,如果未能解决你的问题,请参考以下文章

PyQt5:如何向工作线程发送信号

当连接类型 = Qt.DirectConnection 时,来自线程对象的 PyQt5 信号会导致段错误

PYQT5怎么更新UI显示的内容

无法让PyQt5 Designer LCD盒接收显示信号并更新显示

线程之间的信号

是否可以从 PyQt5 中的线程获取数组?