Обнаружение сгенерированного Javascript контента с использованием BeautifulSoup и Selenium - PullRequest
0 голосов
/ 04 ноября 2019

Я пытаюсь получить все книги по информатике с сайта Пирсона (начиная с этого URL: https://www.pearson.com/us/higher-education/professional---career/computer-science/computer-science.html), но список книг в каждой категории создается с помощью javascript.

IЯ пытался использовать Selenium, чтобы открыть страницу, а затем проанализировать ее с помощью BeautifulSoup. После открытия страницы категории я не могу найти тег, содержащий всю информацию о книге.

from selenium.webdriver.support import expected_conditions as ec
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup

driver = webdriver.Safari()
driver.get('https://www.pearson.com/us/higher-education/professional---career/computer-science/computer-science.html')
wait = WebDriverWait(driver, 2)
content = driver.page_source
soup = BeautifulSoup(content)

#first I loop through categories
categories = list(driver.find_elements_by_xpath('//ul[@class="category-child-list-level-2"]//a'))
for i in range(len(categories)):
    print('CATEGORY : {}/170'.format(i+1))
    categories[i].click()
    while next_page_link != None:
    WebDriverWait(driver, 10).until(ec.visibility_of_element_located((By.CLASS_NAME, "content-tile-book-box")))
    soup = BeautifulSoup(driver.page_source, 'html.parser')
    print(soup.findAll('li', attrs={'class':'content-tile-book-box visible'})) #it results always empty
    for a in soup.findAll('li', attrs={'class':'content-tile-book-box visible'}):
        #I would like to have access to the books' links
        book_title_link = a.find_element_by_xpath('/div[@class="wrap-list-block"]//a')
    #loop through all the book pages of the current category
    next_page_link = driver.find_element_by_xpath('//a[@aria-label="Next"]')
    next_page_link.click()

НадеждаВы можете помочь мне, спасибо!

1 Ответ

0 голосов
/ 04 ноября 2019

Поскольку вам нужно перемещаться назад и вперед между страницами, я предоставил здесь решение для селена и не использовал BS. Я также использовал chromedriver.

from selenium.webdriver.support import expected_conditions as ec
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException

driver = webdriver.Chrome(executable_path='C:\\Selenium\\chromedriver.exe')
url = 'https://www.pearson.com/us/higher-education/professional---career/computer-science/computer-science.html'
driver.get(url)

#first I loop through categories
categories = list(driver.find_elements_by_xpath('//ul[@class="category-child-list-level-2"]//a'))
Total_Category = len(categories)
for i in range(Total_Category):
    WebDriverWait(driver, 10).until(ec.visibility_of_all_elements_located((By.XPATH, '//ul[@class="category-child-list-level-2"]//a')))
    categories = list(driver.find_elements_by_xpath('//ul[@class="category-child-list-level-2"]//a'))
    print('CATEGORY : {}/170'.format(i+1))
    categories[i].click()
    print("Category: " + categories[i].text)
    try:
        #loop through all the book pages of the current category
        WebDriverWait(driver, 10).until(ec.visibility_of_element_located((By.XPATH, "//a[@aria-label='Next']")))
        next_page_link = driver.find_element_by_xpath('//a[@aria-label="Next"]')
        while next_page_link != None:
            WebDriverWait(driver, 10).until(ec.visibility_of_element_located((By.CLASS_NAME, "content-tile-book-box")))
            soup = BeautifulSoup(driver.page_source, 'html.parser')
        #print(soup.findAll('li', attrs={'class':'content-tile-book-box visible'})) #it results always empty
        #for a in soup.findAll('li', attrs={'class':'content-tile-book-box visible'}):
            #I would like to have access to the books' links
        #   book_title_link = a.find_element_by_xpath('//div[@class="wrap-list-block"]//a')
            WebDriverWait(driver, 10).until(ec.visibility_of_any_elements_located((By.XPATH, "//div[@class='product-search-results-list section']//li")))
            links = driver.find_elements_by_xpath('//div[@class="wrap-list-block"]//a')
            print(len(links))
            book_links =[link.get_attribute('href') for link in links]
            #for link in links:
            print(book_links)
            try:
                next_page_link = driver.find_element_by_xpath('//a[@aria-label="Next"]')
            except NoSuchElementException as exception:
                print("Reached end of all books in this category")
                driver.get(url)#Go back to main listing
                break     
            next_page_link.click()
    except TimeoutException as exception:
        print("Next button is not available")
        WebDriverWait(driver, 10).until(ec.visibility_of_any_elements_located((By.XPATH, "//div[@class='product-search-results-list section']//li")))
        links = driver.find_elements_by_xpath('//div[@class="wrap-list-block"]//a')
        print(len(links))
        book_links =[link.get_attribute('href') for link in links]
        #for link in links:
        print(book_links)
        driver.get(url)#Go back to main listing
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...