Я работаю над извлечением данных с веб-сайта действительно для проекта по науке о данных, над которым я работаю. Хотя я могу успешно очистить различные части страницы, у меня есть некоторые проблемы, связанные с удалением элементов из JSON-части страницы.
Кто-нибудь знает, как я мог бы извлечь элементы из URL-адреса ниже? >>> view-source:https://www.indeed.com/viewjob?jk=41abec7fde3513dc&tk=1dn0mslbr352v000&from=serp&vjs=3&advn=9434814581076032&adid=197003786&sjdu=BbcXv7z69Xez4bal0Fx7iYB6jxzlBG3p6CfmfgjyGDErM4mqXgOsfEsOF5maJ2GRnKJsHskFl8aEbb4LlD5LibXOuIs0dzzHfVCmKB00C2c43rDVhEZX_8Zmg4zqEyqG5LEfQjRfoyOhULxXHTMitWOUjMOdLRt367-ZewSzfkqUSnPzHungl7uY7NcfOFLy
.
Элементы, которые будут извлечены ниже: \nPOT-Creation-Date:
\nPO-Revision-Date:
"jobLocation":"Arlington, TX
Образец сценария, который я запускаю, находится ниже
import scrapy
from scrapy import Request
from scrapy.crawler import CrawlerProcess
import boto3
class JobsSpider1(scrapy.Spider):
name = "indeed"
allowed_domains = ["indeed.com"]
start_urls = ["https://www.indeed.com/jobs?q=\"owner+operator\"+\"truck\"&l=augusta"]
custom_settings = {
'FEED_FORMAT': 'json',
'FEED_URI':'me_test.json'
}
def parse(self, response):
jobs = response.xpath('//div[@class="title"]')
for job in jobs:
title = job.xpath('a//@title').extract_first()
posting_link = job.xpath('a//@href').extract_first()
posting_url = "https://indeed.com" + posting_link
yield Request(posting_url, callback=self.parse_page, meta={'title': title, 'posting_url':posting_url})
relative_next_url = response.xpath('//link[@rel="next"]/@href').extract_first()
absolute_next_url = "https://indeed.com" + relative_next_url
yield Request(absolute_next_url, callback=self.parse)
def parse_page(self, response):
posting_url = response.meta.get('posting_url')
job_title = response.meta.get('title')
#job_name= response.xpath('//*[@class="icl-u-xs-mb--xs icl-u-xs-mt--none jobsearch-JobInfoHeader-title"]/text()').extract_first()
job_descriptions=response.xpath('//*[@class="jobsearch-jobDescriptionText"]/ul').extract_first()
job_listing_header=response.xpath('//*[@class="jobSectionHeader"]/ul').extract_first()
posted_on_date= response.xpath('//*[@class="jobsearch-JobMetadataFooter"]/text()').extract_first()
job_location=response.xpath('//*[@class="jobsearch-InlineCompanyRating icl-u-xs-mt--xs jobsearch-DesktopStickyContainer-companyrating"]/div[3]/text()').extract_first()
yield {
'job_title':job_title,
'posting_url':posting_url,
# 'job_name':job_name,
'job_listing_header':job_listing_header,
'job_location': job_location,
'job_descriptions':job_descriptions,
'posted_on_date':posted_on_date
}
```````````````````````````````````````````````````````````````
######################################################
############# UPDATED CODE #########################
############# UPDATED CODE ###########################
#############################################
import time
import os, sys
import json
import scrapy
from scrapy import Request
from scrapy.crawler import CrawlerProcess
import boto3
from scrapy.loader.processors import Join
import re
class JobsSpider1(scrapy.Spider):
name = "indeed"
allowed_domains = ["indeed.com"]
start_urls = ["https://www.indeed.com/jobs?q=\"owner+operator\"+\"truck\"&l=augusta"]
custom_settings = {
'FEED_FORMAT': 'csv',
'FEED_URI':'test.csv'
}
def parse(self, response):
jobs = response.xpath('//div[@class="title"]')
for job in jobs:
title = job.xpath('a//@title').extract_first()
posting_link = job.xpath('a//@href').extract_first()
posting_url = "https://indeed.com" + posting_link
yield Request(posting_url, callback=self.parse_page, meta={'title': title, 'posting_url':posting_url})
relative_next_url = response.xpath('//link[@rel="next"]/@href').extract_first()
absolute_next_url = "https://indeed.com" + relative_next_url
yield Request(absolute_next_url, callback=self.parse)
def parse_page(self, response):
posting_url = response.meta.get('posting_url')
job_title = response.meta.get('title')
#job_name= response.xpath('//*[@class="icl-u-xs-mb--xs icl-u-xs-mt--none jobsearch-JobInfoHeader-title"]/text()').extract_first()
#job_descriptions_1=response.xpath('//<[@class="jobsearch-jobDescriptionText"]/ul').extract_first()
#job_descriptions_2=response.xpath('//*[@class="jobsearch-jobDescriptionText"]/p').extract_first()
#job_descriptions_3=response.xpath('//*[@class="jobsearch-jobDescriptionText"]/div').extract_first()
#job_descriptions_4=response.xpath('//*[@class="jobsearch-jobDescriptionText"]/br').extract_first()
#job_descriptions_5=response.xpath('//*[@class="jobDescriptionText"]/div').extract_first()
job_listing_header=response.xpath('//*[@class="jobSectionHeader"]/b').extract_first()
#posted_on_date=response.xpath('//*[@class="jobsearch-JobMetadataFooter"]/text()').extract_first()
#posted_on_date=response.xpath('//*[@class="jobsearch-JobMetadataFooter"]/<span').extract_first()
#job_location=response.xpath('//*[@class="jobsearch-InlineCompanyRating icl-u-xs-mt--xs jobsearch-DesktopStickyContainer-companyrating"]/div[3]/text()').extract_first()
data_obj = json.loads(re.search(r'window\._initialData\=(\{.+\});', script_text).group(1))
renew = data_obj['jobLocation']
yield {
'job_title':job_title,
'renew':renew,
'posting_url':posting_url,
'job_listing_header':job_listing_header
#'job_location': job_location
#'job_descriptions_1':job_descriptions_1,
#'job_descriptions_2':job_descriptions_2,
#'job_descriptions_3':job_descriptions_3,
#'job_descriptions_4':job_descriptions_4,
#'job_descriptions_5':job_descriptions_5
#'posted_on_date':posted_on_date
}