Вы можете просто пройти через 2 URL.Но если вы искали способ сначала выполнить их, а затем выполнить цикл, это работает:
import time
import csv
from selenium import webdriver
import selenium.webdriver.chrome.service as service
import requests
from bs4 import BeautifulSoup
import pandas as pd
root_url = 'https://www.vatainc.com/'
service = service.Service('C:\chromedriver_win32\chromedriver.exe')
service.start()
capabilities = {'chrome.binary': '/Google/Chrome/Application/chrome.exe'}
driver = webdriver.Remote(service.service_url, capabilities)
driver.get(root_url)
time.sleep(2)
# Grab the urls, but only keep the ones of interest
urls = [x.get_attribute('href') for x in driver.find_elements_by_xpath("//ol[contains(@class, 'nav-primary')]/li/a")]
urls = [ x for x in urls if 'html' in x ]
# It produces duplicates, so drop those and include ?limit=all to query all products
urls_list = pd.Series(urls).drop_duplicates().tolist()
urls_list = [ x +'?limit=all' for x in urls_list]
driver.close()
all_product = []
# loop through those urls and the links to generate a final product list
for url in urls_list:
print ('Url: '+url)
driver = webdriver.Remote(service.service_url, capabilities)
driver.get(url)
time.sleep(2)
links = [x.get_attribute('href') for x in driver.find_elements_by_xpath("//*[contains(@class, 'product-name')]/a")]
for link in links:
html = requests.get(link).text
soup = BeautifulSoup(html, "html.parser")
products = soup.findAll("div", {"class": "product-view"})
all_product.append(link)
print(link)
driver.close()
создает список из 303 ссылок