Не удается разобрать ответ html сценария lua scrapy-splash - PullRequest
0 голосов
/ 04 октября 2018

Я пытаюсь проанализировать html, возвращенный из конечной точки выполнения SplashRequest, которая должна вернуть html, однако, когда я передаю ее в функцию обратного вызова, она ничего не печатает (не анализирует).Мой журнал не показывает ошибок, код ниже

import scrapy

from scrapy_splash import SplashRequest


class Kapow_crawler(scrapy.Spider):
    name = "Kapow_crawler"

    def start_requests(self):

        urls = [
            "https://vmatrix1.brevardclerk.us/beca/name_search.cfm"
        ]
        for url in urls:
            yield SplashRequest(url=url, callback=self.parse_first_page,endpoint='render.html')

    def parse_first_page(self, response):
        id_list=[]
        inputs = response.css('input').extract()
        input_count = 0
        for inp in inputs:
            input_count = input_count+1 
            if 'Yes'.upper() in inp.upper():
                disc_yes = input_count -1 
                try:
                    id_list.append(['id',response.css('input::attr(id)')[disc_yes].extract()])
                except Exception:
                    id_list.append(['name',response.css('input::attr(name)')[disc_yes].extract()])
                if 'radio'.upper() in inp.upper():
                    inputs = response.css('input').extract()
                    input_count = 0
                    for inp in inputs:
                        input_count = input_count+1 
                        if 'Submit'.upper() in inp.upper():
                            disc_submit = input_count -1
                            try:                               
id_list.append(['id',response.css('input::attr(id)')[disc_submit].extract()])
                            except Exception:             
id_list.append(['name',response.css('input::attr(name)')[disc_submit].extract()])
        if len(id_list) == 2:
           disclaimer_script="""
           function main(splash)
             assert(splash:go("{0}"))
             assert(splash:wait(0.5))
             local disc_check = splash:select('[{1}="{2}"]')
             local disc_sub = splash:select('[{3}="{4}"]')
             assert(disc_check:mouse_click())
             assert(disc_sub:mouse_click())
             assert(splash:wait(0.5))
             return splash:html()
           end
           """.format(response.url,id_list[0][0],id_list[0][1],id_list[1][0],id_list[1][1])
        if len(id_list) >= 1:
            yield SplashRequest(url=response.url, callback=self.parse_second_page, endpoint='execute',args={'har':1,'html':1,'lua_source':disclaimer_script,'wait':0.5})
        else:
            yield SplashRequest(url=response.url, callback=self.parse_second_page, endpoint='render.html')


    def parse_second_page(self, response):
        inputs = response.css('input').extract()
        for inp in inputs:
            yield print(inp)

Я добавил журнал ниже, поскольку вы можете видеть, что ошибок нет, но html SplashRequest также не анализируется.splash: html () должен отображать html, и я знаю, что обратный вызов может передать ответ различным методам анализа.Любая помощь будет оценена.

2018-10-03 14:57:00 [scrapy.utils.log] INFO: Scrapy 1.5.1 started (bot: 
kapow_crawler)
2018-10-03 14:57:00 [scrapy.utils.log] INFO: Versions: lxml 4.2.1.0, libxml2 
2.9.8, cssselect 1.0.3, parsel 1.5.0, w3lib 1.19.0, Twisted 18.7.0, Python 
3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit 
(AMD64)], pyOpenSSL 18.0.0 (OpenSSL 1.0.2o  27 Mar 2018), cryptography 
2.2.2, Platform Windows-10-10.0.15063-SP0
2018-10-03 14:57:00 [scrapy.crawler] INFO: Overridden settings: {'BOT_NAME': 
'kapow_crawler', 'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter', 
'HTTPCACHE_STORAGE': 'scrapy_splash.SplashAwareFSCacheStorage', 'LOG_FILE': 
'C:\\Users\\jswordy\\.spyder-py3\\kapow_crawler\\log.txt', 
'NEWSPIDER_MODULE': 'kapow_crawler.spiders', 'SPIDER_MODULES': 
['kapow_crawler.spiders'], 'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; 
Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 
Safari/537.36'}
2018-10-03 14:57:00 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.corestats.CoreStats',
 'scrapy.extensions.telnet.TelnetConsole',
 'scrapy.extensions.logstats.LogStats']
2018-10-03 14:57:00 [scrapy.middleware] INFO: Enabled downloader 
middlewares:
['scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
 'kapow_crawler.middlewares.KapowCrawlerDownloaderMiddleware',
 'scrapy.downloadermiddlewares.retry.RetryMiddleware',
 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
 'scrapy_splash.SplashCookiesMiddleware',
 'scrapy_splash.SplashMiddleware',
 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware',
 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
 'scrapy.downloadermiddlewares.stats.DownloaderStats']
2018-10-03 14:57:00 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
 'scrapy_splash.SplashDeduplicateArgsMiddleware',
 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
 'kapow_crawler.middlewares.KapowCrawlerSpiderMiddleware',
 'scrapy.spidermiddlewares.referer.RefererMiddleware',
 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
 'scrapy.spidermiddlewares.depth.DepthMiddleware']
2018-10-03 14:57:00 [scrapy.middleware] INFO: Enabled item pipelines:
['kapow_crawler.pipelines.KapowCrawlerPipeline',
 'kapow_crawler.pipelines.ScreenshotPipeline']
2018-10-03 14:57:00 [scrapy.core.engine] INFO: Spider opened
2018-10-03 14:57:00 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 
pages/min), scraped 0 items (at 0 items/min)
2018-10-03 14:57:00 [Kapow_crawler] INFO: Spider opened: Kapow_crawler
2018-10-03 14:57:00 [Kapow_crawler] INFO: Spider opened: Kapow_crawler
2018-10-03 14:57:00 [scrapy.extensions.telnet] DEBUG: Telnet console 
listening on 127.0.0.1:6023
2018-10-03 14:57:01 [scrapy.core.engine] DEBUG: Crawled (200) <GET 
https://vmatrix1.brevardclerk.us/beca/name_search.cfm via 
http://localhost:8050/render.html> (referer: None)
2018-10-03 14:57:02 [scrapy.core.engine] DEBUG: Crawled (200) <GET 
https://vmatrix1.brevardclerk.us/beca/name_search.cfm via 
http://localhost:8050/execute> (referer: None)  
2018-10-03 14:57:02 [scrapy.core.engine] INFO: Closing spider (finished)
2018-10-03 14:57:02 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 2024,
 'downloader/request_count': 2,
 'downloader/request_method_count/POST': 2,
 'downloader/response_bytes': 7633,
 'downloader/response_count': 2,
 'downloader/response_status_count/200': 2,
 'finish_reason': 'finished',
 'finish_time': datetime.datetime(2018, 10, 3, 19, 57, 2, 715628),
 'log_count/DEBUG': 3,
 'log_count/INFO': 9,
 'request_depth_max': 1,
 'response_received_count': 2,
 'scheduler/dequeued': 4,
 'scheduler/dequeued/memory': 4,
 'scheduler/enqueued': 4,
 'scheduler/enqueued/memory': 4,
 'splash/execute/request_count': 1,
 'splash/execute/response_count/200': 1,
 'splash/render.html/request_count': 1,
 'splash/render.html/response_count/200': 1,
 'start_time': datetime.datetime(2018, 10, 3, 19, 57, 0, 758627)}
2018-10-03 14:57:02 [scrapy.core.engine] INFO: Spider closed (finished)
...