stockInfo.py
содержит:
import scrapy
import re
import pkgutil
class QuotesSpider(scrapy.Spider):
name = "stockInfo"
data = pkgutil.get_data("tutorial", "resources/urls.txt")
data = data.decode()
start_urls = data.split("\r\n")
def parse(self, response):
company = re.findall("[0-9]{6}",response.url)[0]
filename = '%s_info.html' % company
with open(filename, 'wb') as f:
f.write(response.body)
Чтобы выполнить паука stockInfo
в cmd окна.
d:
cd tutorial
scrapy crawl stockInfo
Теперь вся веб-страница URL в resources/urls.txt
будет загружена в каталог d:/tutorial
.
Затем разверните паука в Scrapinghub
и запустите stockInfo spider
.

Нет ошибок, где находится загруженная веб-страница?
Как следующие командные строки выполняются в Scrapinghub
?
with open(filename, 'wb') as f:
f.write(response.body)
Как сохранить данные в scrapinghub и загрузить их из scrapinghub после завершения задания?
Сначала установить scrapinghub.
pip install scrapinghub[msgpack]
Перепишите, как Thiago Curvelo
скажем, и разверните его в моем скребковом узле.
Deploy log location: C:\Users\dreams\AppData\Local\Temp\shub_deploy_yzstvtj8.log
Error: Deploy failed: b'{"status": "error", "message": "Internal error"}'
_get_apisettings, commands_module='sh_scrapy.commands')
File "/usr/local/lib/python2.7/site-packages/sh_scrapy/crawl.py", line 148, in _run_usercode
_run(args, settings)
File "/usr/local/lib/python2.7/site-packages/sh_scrapy/crawl.py", line 103, in _run
_run_scrapy(args, settings)
File "/usr/local/lib/python2.7/site-packages/sh_scrapy/crawl.py", line 111, in _run_scrapy
execute(settings=settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 148, in execute
cmd.crawler_process = CrawlerProcess(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 243, in __init__
super(CrawlerProcess, self).__init__(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 134, in __init__
self.spider_loader = _get_spider_loader(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 330, in _get_spider_loader
return loader_cls.from_settings(settings.frozencopy())
File "/usr/local/lib/python2.7/site-packages/scrapy/spiderloader.py", line 61, in from_settings
return cls(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/spiderloader.py", line 25, in __init__
self._load_all_spiders()
File "/usr/local/lib/python2.7/site-packages/scrapy/spiderloader.py", line 47, in _load_all_spiders
for module in walk_modules(name):
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/misc.py", line 71, in walk_modules
submod = import_module(fullpath)
File "/usr/local/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/app/__main__.egg/mySpider/spiders/stockInfo.py", line 4, in <module>
ImportError: cannot import name ScrapinghubClient
{"message": "shub-image-info exit code: 1", "details": null, "error": "image_info_error"}
{"status": "error", "message": "Internal error"}
файлтренш-файл содержит только одну строку:
scrapinghub[msgpack]
В файле scrapinghub.yml содержится:
project: 123456
requirements:
file: requirements.tx
Теперь разверните его.
D:\mySpider>shub deploy 123456
Packing version 1.0
Deploying to Scrapy Cloud project "123456"
Deploy log last 30 lines:
Deploy log location: C:\Users\dreams\AppData\Local\Temp\shub_deploy_4u7kb9ml.log
Error: Deploy failed: b'{"status": "error", "message": "Internal error"}'
File "/usr/local/lib/python2.7/site-packages/sh_scrapy/crawl.py", line 148, in _run_usercode
_run(args, settings)
File "/usr/local/lib/python2.7/site-packages/sh_scrapy/crawl.py", line 103, in _run
_run_scrapy(args, settings)
File "/usr/local/lib/python2.7/site-packages/sh_scrapy/crawl.py", line 111, in _run_scrapy
execute(settings=settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 148, in execute
cmd.crawler_process = CrawlerProcess(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 243, in __init__
super(CrawlerProcess, self).__init__(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 134, in __init__
self.spider_loader = _get_spider_loader(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/crawler.py", line 330, in _get_spider_loader
return loader_cls.from_settings(settings.frozencopy())
File "/usr/local/lib/python2.7/site-packages/scrapy/spiderloader.py", line 61, in from_settings
return cls(settings)
File "/usr/local/lib/python2.7/site-packages/scrapy/spiderloader.py", line 25, in __init__
self._load_all_spiders()
File "/usr/local/lib/python2.7/site-packages/scrapy/spiderloader.py", line 47, in _load_all_spiders
for module in walk_modules(name):
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/misc.py", line 71, in walk_modules
submod = import_module(fullpath)
File "/usr/local/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/tmp/unpacked-eggs/__main__.egg/mySpider/spiders/stockInfo.py", line 5, in <module>
from scrapinghub import ScrapinghubClient
ImportError: cannot import name ScrapinghubClient
{"message": "shub-image-info exit code: 1", "details": null, "error": "image_info_error"}
{"status": "error", "message": "Internal error"}
1. остается материал.
ImportError: cannot import name ScrapinghubClient
2. На моем локальном компьютере установлены только python3.7 и win7, почему информация об ошибке говорит:
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/misc.py", line 71, in walk_modules
Является ли информация об ошибке на scrapinghub (удаленный конец)? Просто отправить на мой локальный конец, чтобы показать?