Ошибка при передаче URL-адреса из базы данных веб-драйверу - PullRequest
1 голос
/ 21 января 2020

Я использую селен с Python для сбора некоторых данных. Код работает нормально для одного URL (если мы жестко закодировали URL), В нашем случае у нас много URL, и я планировал передать URL веб-драйвер из базы данных.

Но когда я сделал это, он дает исключения, ниже приведен код и исключения. Может кто-нибудь сообщить мне, что я делаю неправильно?

я получаю исключение в этой строке browser.get (прошло_url) , но если я передаю его как строку, как показано ниже, это работает browser.get ('https://www.google.com/search?q=vitamin+b12')

from bs4 import BeautifulSoup
from selenium import webdriver 
from selenium.webdriver.common.by import By 
from selenium.webdriver.support.ui import WebDriverWait 
from selenium.webdriver.support import expected_conditions as EC 
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import glob
import datetime


option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
#browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver/', chrome_options=option)
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)

try:
#Database connection string
 DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
 #DWH table to which data is ported
 TABLE_NAME = 'staging.search_url'
 #Connecting DB..
 conn = psycopg2.connect(DSN)
 print("Database connected...")
 conn.set_client_encoding('latin-1')
 cur = conn.cursor()
 cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
 print('database connection failed')
 quit()



search_url_fetch="""select url_to_be_searched from staging.search_url"""
psql_cursor = conn.cursor()
psql_cursor.execute(search_url_fetch)
serach_url_list = psql_cursor.fetchall()
print('Fetched DB values')
for row in serach_url_list:
    passed_url=''
    passed_url=str(row)
    passed_url=passed_url.replace(',)','')
    passed_url=passed_url.replace('(','')
    print(passed_url)
    print("\n")

    browser.get('https://www.google.com/search?q=vitamin+b12')
    #browser.get(passed_url)
    full_titles_element = browser.find_elements_by_xpath("//div[@class='mnr-c pla-unit']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    full_text_title = [x.text for x in full_titles_element]
    # print out all the titles.
    print('Whole names that appear in site:')
    print(full_text_title, '\n')


    product_name_list = browser.find_elements_by_xpath("//span[@class='pymv4e']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    #upd_product_name_list=list(filter(None, product_name_list))

    upd_product_name_list=list(filter(None, product_name_list))
    product_name = [x.text for x in product_name_list]
    print('Product names:')
    print(product_name, '\n')
    filtered = [x for x in product_name if len(x.strip()) > 0]
    print(filtered)
    element_length=(len(filtered))
    print(element_length)
    print("\n")

    positions=[]
    for x in range(1, element_length+1):
        positions.append(x)
    print(positions)    
    print("\n")

    company_name_list = browser.find_elements_by_xpath("//div[@class='LbUacb']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    company = [x.text for x in company_name_list]
    # print out all the titles.
    print('Company Name:')
    print(company, '\n')

    urls=[]
    find_href = browser.find_elements_by_xpath("//a[@class='plantl pla-unit-single-clickable-target clickable-card']")
    for my_href in find_href:
        url_list=my_href.get_attribute("href")
        urls.append(url_list)
        #print(my_href.get_attribute("href"))
    print(urls)
    print("\n")
    result = zip(positions,filtered, urls, company)

    print(tuple(result))

Исключения:

Warning (from warnings module):
  File "/Users/user_123/Documents/PLA/selenium_chrome_with_beautiful_soup.py", line 16
    browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)
DeprecationWarning: use options instead of chrome_options

Database connected...
Fetched DB values
'https://www.google.com/search?q=vitamin+b12'


Traceback (most recent call last):
  File "/Users/user_123/Documents/PLA/selenium_chrome_with_beautiful_soup.py", line 49, in <module>
    browser.get(passed_url)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 333, in get
    self.execute(Command.GET, {'url': url})
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 321, in execute
    self.error_handler.check_response(response)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/errorhandler.py", line 242, in check_response
    raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.InvalidArgumentException: Message: invalid argument
  (Session info: chrome=79.0.3945.130)

1 Ответ

3 голосов
/ 21 января 2020

Вы передаете кавычку ', а также начало и конец строки. См. Ниже, я обрезал их от строки и назначил новую переменную new_url.

Ответ:

new_url = passed_url[1:len(passed_url)-1]
browser.get(new_url)

Пример:

a = "'https://www.google.com/search?q=vitamin+b12'"
b = a[1:len(a)-1]
print(a)
print(b)

Ваша редакция код ниже:


from bs4 import BeautifulSoup
from selenium import webdriver 
from selenium.webdriver.common.by import By 
from selenium.webdriver.support.ui import WebDriverWait 
from selenium.webdriver.support import expected_conditions as EC 
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import glob
import datetime

option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
#browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver/', chrome_options=option)
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)
try:
#Database connection string
 DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
 #DWH table to which data is ported
 TABLE_NAME = 'staging.search_url'
 #Connecting DB..
 conn = psycopg2.connect(DSN)
 print("Database connected...")
 conn.set_client_encoding('latin-1')
 cur = conn.cursor()
 cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
 print('database connection failed')
 quit()



search_url_fetch="""select url_to_be_searched from staging.search_url"""
psql_cursor = conn.cursor()
psql_cursor.execute(search_url_fetch)
serach_url_list = psql_cursor.fetchall()
print('Fetched DB values')
for row in serach_url_list:
    passed_url=''
    passed_url=str(row)
    passed_url=passed_url.replace(',)','')
    passed_url=passed_url.replace('(','')
    new_url = passed_url[1:len(passed_url)-1]
    print(passed_url)
    print("\n")

    #browser.get('https://www.google.com/search?q=vitamin+b12')
    browser.get(new_url)
    full_titles_element = browser.find_elements_by_xpath("//div[@class='mnr-c pla-unit']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    full_text_title = [x.text for x in full_titles_element]
    # print out all the titles.
    print('Whole names that appear in site:')
    print(full_text_title, '\n')


    product_name_list = browser.find_elements_by_xpath("//span[@class='pymv4e']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    #upd_product_name_list=list(filter(None, product_name_list))

    upd_product_name_list=list(filter(None, product_name_list))
    product_name = [x.text for x in product_name_list]
    print('Product names:')
    print(product_name, '\n')
    filtered = [x for x in product_name if len(x.strip()) > 0]
    print(filtered)
    element_length=(len(filtered))
    print(element_length)
    print("\n")

    positions=[]
    for x in range(1, element_length+1):
        positions.append(x)
    print(positions)    
    print("\n")

    company_name_list = browser.find_elements_by_xpath("//div[@class='LbUacb']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    company = [x.text for x in company_name_list]
    # print out all the titles.
    print('Company Name:')
    print(company, '\n')

    urls=[]
    find_href = browser.find_elements_by_xpath("//a[@class='plantl pla-unit-single-clickable-target clickable-card']")
    for my_href in find_href:
        url_list=my_href.get_attribute("href")
        urls.append(url_list)
        #print(my_href.get_attribute("href"))
    print(urls)
    print("\n")
    result = zip(positions,filtered, urls, company)

    print(tuple(result))
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...