[Ответить Сэму Мейсону]
Вот полный код, который я использовал:
Это список импортируемых библиотек: (re, selenium, lxml, urllib3, numpy, beautifulSoup)
browser = webdriver.Chrome ("/ Users / gdeange1 / dev / chromedriver")
trustedprotxtfile = open ("/ Users / gdeange1 / Dev /rustpros / test.txt", "w +", кодировка = 'utf-8')
links = ['ns / halifax',]
для л в ссылках:
ссылка = "https://trustedpros.ca/" + l
driver = browser.get("https://trustedpros.ca/" + l)
page0 = requests.get(link)
soup0 = bs(page0.content, "lxml")
nextpages = soup0.findAll('div', attrs={'class': 'paging-sec'})
pagination = []
if nextpages:
for ul in nextpages:
for li in ul.find_all('li'):
liText = li.text
if liText != '-':
pagination.append(int(liText))
maxpagination = max(pagination)
freeNames = []
fullPhones = []
fullStreets = []
fullWebsites = []
i = 0
while i < maxpagination:
time.sleep(1)
i += 1
try:
inputElement = browser.find_elements_by_xpath('//*[@id="final-search"]/div/div[1]/div[2]/a')
allLinksTim = [];
for url in inputElement:
allLinksTim.append(url.get_attribute("href"))
except:
pass
for eachLink in allLinksTim:
driver = browser.get(eachLink)
page = requests.get(eachLink)
tree = html.fromstring(page.content)
soup = bs(page.content, "lxml")
try:
namess = browser.find_elements_by_class_name('name-alt')
if len(namess) > 0:
for name in namess:
freeNames.append(name.text)
print(name.text)
else:
names = browser.find_elements_by_class_name('name-altimg')
for names1 in names:
freeNames.append(names1.text)
print(names1.text)
except:
print("Error while trying to get the names")
pass
try:
phones = browser.find_elements_by_class_name('taptel')
if phones:
for phone in phones:
fullPhones.append(phone.text)
print(phone.text)
else:
print("No phones found")
except:
print('Error while trying to get the phones')
pass
try:
streets = browser.find_elements_by_class_name('address')
if streets:
for street in streets:
fullStreets.append(street.text)
print(street.text)
else:
print("No street address found")
except:
print('Error while trying to get the streets')
pass
try:
websites = soup.findAll('div', attrs={'class': 'contact-prom'})
#print('Entered the Div!')
if websites:
for div in websites:
for url in div.find_all('a'):
if url.has_attr('target'):
fullWebsites.append(url['href'])
print(url['href'])
else:
print("No websites found")
except:
print('Error while trying to get the websites')
pass
browser.back()
inputElement = browser.find_element_by_class_name('next-page')
inputElement.click()
contents = []
print("Size of free names: ", len(freeNames))
print("Size of full phones: ", len(fullPhones))
print("Size of full streets: ", len(fullStreets))
print("Size of full websites: ", len(fullWebsites))
try:
print("list with everything")
for i in range(min(len(freeNames),len(fullPhones),len(fullStreets),len(fullWebsites))):
c = ' ~ ' + freeNames[i] + ' ~ ' + fullPhones[i] + ' ~ ' + fullStreets[i] + ' ~ ' + fullWebsites[i] + ' ~ '
contents.append(c)
print(c)
trustedprotxtfile.write(c + '\n')
except:
print('not working 1')
pass
try:
print("list without websites")
for i in range(min(len(freeNames),len(fullPhones),len(fullStreets),len(fullWebsites)), max(len(freeNames),len(fullPhones),len(fullStreets))):
c = ' ~ ' + freeNames[i] + ' ~ ' + fullPhones[i] + ' ~ ' + fullStreets[i] + ' ~ '
contents.append(c)
print(c)
trustedprotxtfile.write(c + '\n')
except:
print('not working')
pass
print ('[Сканирование завершено, спасибо за ожидание!]')
trustedprotxtfile.close ()