Я далеко продвинулся, но на данный момент мне нужна помощь для завершения моего кода, который я пытаюсь получить из своих URL-адресов на мой parse_static_content
для проверки названия компании, единственная помощь, которая мне понадобится, это как получить URL профиля parse_static_content
Этот код
import scrapy
from selenium import webdriver
import os
import logging
from selenium.webdriver.chrome.options import Options as ChromeOptions
from ..items import WebcrawlerItem
CHROME_DRIVER_WINDOW_PATH = "C:/Users/RAJ/PycharmProjects/WebCrawler/WebCrawler/WebCrawler/spiders/chromedriver.exe"
logging.basicConfig(filename='msg.log', filemode='a', format='%(message)s', level=logging.DEBUG)
class ProductSpider(scrapy.Spider):
name = "product_spider"
allowed_domains = ['https://www.startupindia.gov.in/']
base_url = allowed_domains[0]
start_urls = (
'https://www.startupindia.gov.in/content/sih/en/search.html?industries=sih:industry/advertising&states=sih:location/india/andhra-pradesh&stages=Prototype&roles=Startup&page=0'
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
cwd = os.getcwd()
opts = ChromeOptions()
opts.add_argument("--headless") # for headless browser it's not necessary
self.driver = webdriver.Chrome(executable_path=CHROME_DRIVER_WINDOW_PATH)
self.start_urls = [
'https://www.startupindia.gov.in/content/sih/en/search.html?industries=sih:industry/advertising&states=sih:location/india/andhra-pradesh&stages=Prototype&roles=Startup&page=0'
]
def parse(self, response):
self.driver.get(response.url)
next = self.driver.find_elements_by_xpath("//*[@id='persona-results']//a[@class='img-wrap']")
# next = self.driver.find_elements_by_css_selector("div#persona-results div.category-card")
for i in next:
try:
i.click() # click on image in page
# move to new tab open
self.driver.switch_to.window(self.driver.window_handles[next.index(i) + 1])
# yield scrapy.Request(current_url, callback=self.parse_static_content)
#
self.driver.switch_to.window(self.driver.window_handles[0])
yield scrapy.Request(self.driver.current_url, callback=self.parse_static_content())
# get the data and write it to scrapy items
except Exception as e:
print(e)
def parse_static_content(self, response):
logging.info(response.url)
Код за помощь мне будет оценен.