Я новичок в Python и Scrapy.Я пытаюсь создать паука для очистки: https://www.festicket.com/festivals/
Мне удалось заставить паука работать, проблема в том, что некоторые URL выглядят так: https://www.festicket.com/festivals/electric-daisy-carnival-edc-las-vegas/2018/
ик некоторым URL-адресам добавлены: / shop / # ticket, что мешает пауку сканировать страницу со списком.
Мой вопрос: есть ли способ, если паук найдет URL с / shop / # ticketон просто удаляет билет / shop / #, но сохраняет оставшуюся часть URL ???
Мой код до сих пор находится ниже:
import scrapy
class AuthorsSpider(scrapy.Spider):
name = "festicket"
start_urls = ['https://www.festicket.com/festivals/']
npages = 20
# This mimics getting the pages using the next button.
for i in range(2, npages + 2):
start_urls.append("https://www.festicket.com/festivals/?page=" + str(i) + "")
#Scrape and follow listings
def parse(self, response):
urls = response.xpath(
"//h3[@class='festival-title heading-3ry notranslate']//@href").extract()
for url in urls:
url = response.urljoin(url)
yield scrapy.Request(url=url, callback=self.parse_details)
def parse_details(self, response):
yield {
'title': response.xpath("//h1[@class='sc-jzJRlG gbLQoU']/text()").extract_first(),
'festival_url': response.xpath("//meta[@property='og:url']/@content").extract_first(),
'location': response.xpath("//ul[contains(@class,'styles__StyledList')][1]/li[contains(@class,'styles__DotSeparatorSpan-h0jg7b')][1]/descendant::text()").extract_first(),
'address': response.xpath("//div[@class='sc-gzVnrw bpJeJY'][2]/section[@class='sc-gZMcBi gDrvBk']/div/p[@class='sc-chPdSV hifsJb']/descendant::text()").extract_first(),
'date': response.xpath("//ul[contains(@class,'styles__StyledList')][1]/li[contains(@class,'styles__DotSeparatorSpan-h0jg7b')][2]/descendant::text()").extract_first(),
'genre1': response.xpath("//ul[contains(@class,'styles__StyledList')][2]/li[contains(@class,'styles__DotSeparatorSpan-h0jg7b')][1]/descendant::text()").extract_first(),
'genre2': response.xpath("//ul[contains(@class,'styles__StyledList')][2]/li[contains(@class,'styles__DotSeparatorSpan-h0jg7b')][2]/descendant::text()").extract_first(),
'genre3': response.xpath("//ul[contains(@class,'styles__StyledList')][2]/li[contains(@class,'styles__DotSeparatorSpan-h0jg7b')][3]/descendant::text()").extract_first(),
'subtitle2': response.xpath( "//span[@class='styles__StyledHtmlWrapper-l0qhyk-0 cUaVYv sc-jAaTju jlDUtI']/p/descendant::text()").extract_first(),
'subtitle1': response.xpath("//h2[@class='sc-cSHVUG gCeeYI']/descendant::text()").extract_first(),
'para1': response.xpath("//span[@class='styles__StyledHtmlWrapper-s1eywhsl-0 cJBjEA sc-jAaTju jlDUtI']/p[1]/descendant::text()").extract_first(),
'para2': response.xpath("//span[@class='styles__StyledHtmlWrapper-s1eywhsl-0 cJBjEA sc-jAaTju jlDUtI']/p[2]/descendant::text()").extract_first(),
'para3': response.xpath("//span[@class='styles__StyledHtmlWrapper-s1eywhsl-0 cJBjEA sc-jAaTju jlDUtI']/p[3]/descendant::text()").extract_first(),
'flyer': response.xpath("//img[contains(@class,'styles__Artwork')]/@src").extract_first(),
'subtitle2': response.xpath("//span[@class='styles__StyledHtmlWrapper-l0qhyk-0 cUaVYv sc-jAaTju jlDUtI']/p/descendant::text()").extract_first(),
'banner_image_1': response.xpath("//div[@class='styles__PhotoWrapper-s1brd5dy-2 cpnBtx'][1]/div[@class='styles__PhotoInnerWrapper-s1brd5dy-3 gVsbNY']/img[@class='styles__PhotoImage-s1brd5dy-4 cqQHmb']/@src").extract_first(),
'banner_image_2': response.xpath("//div[@class='styles__PhotoWrapper-s1brd5dy-2 cpnBtx'][2]/div[@class='styles__PhotoInnerWrapper-s1brd5dy-3 gVsbNY']/img[@class='styles__PhotoImage-s1brd5dy-4 cqQHmb']/@src").extract_first(),
'banner_image_3': response.xpath("//div[@class='styles__PhotoWrapper-s1brd5dy-2 cpnBtx'][3]/div[@class='styles__PhotoInnerWrapper-s1brd5dy-3 gVsbNY']/img[@class='styles__PhotoImage-s1brd5dy-4 cqQHmb']/@src").extract_first(),
}