Для очистки страницы с помощью Python вам не нужен Selenium.Гораздо быстрее и проще использовать, например, запросы и beautifulsoap Вот базовый пример кода для поиска стульев на ikea , вы получите все стулья (723) со всех страниц за секунды:
import requests
from bs4 import BeautifulSoup
# array of all items
result = []
# request first page of query. "query=chair&pageNumber=1"
response = requests.get('https://www.ikea.com/sa/en/search/?query=chair&pageNumber=1')
# assert for response is OK
assert response.ok
# parse response test using html.parser
page = BeautifulSoup(response.text, "html.parser")
# get last page number and convert to integer.
last_page_number = int(page.select_one(".pagination a:last-child").text)
# iterate throw from 1 to 30 pages
for i in range(1, last_page_number + 1):
# if i==1 skip request again, because we already get response for the first page
if i > 1:
# request using i as parameter
response = requests.get(f'https://www.ikea.com/sa/en/search/?query=chair&pageNumber={str(i)}')
assert response.ok
page = BeautifulSoup(response.text, "html.parser")
# get all products containers, that contains name, price and description
products = page.select("#productsTable .parentContainer")
# iterate throw all products in the page. get name, price and description and add to result as map
for product in products:
name = product.select_one(".prodName").text.strip()
desc = product.select_one(".prodDesc").text.strip()
price = product.select_one(".prodPrice,.prodNlpTroPrice").text.strip()
result.append({"name": name, "desc": desc, "price": price})
# print results, you can do anything..
for r in result:
print(f"name: {r['name']}, price: {r['price']}, description: {r['desc']}")
print("the end")