Я пытаюсь очистить спортивное расписание по множеству ссылок на сайте. URL-адреса обнаруживаются и печатаются правильно, но в консоль и текстовый файл выводятся только данные из последнего очищенного URL-адреса.
Мой код ниже:
import requests
import time
from bs4 import BeautifulSoup
def makesoup(url):
cookies = {'mycountries' : '101,28,3,102,42,10,18,4,2,22', 'user_time_zone': 'Europe/London', 'user_time_zone_id': '1'}
r = requests.post(url, cookies=cookies)
return BeautifulSoup(r.text,"lxml")
def linkscrape(links, savefile):
baseurl = "https://sport-tv-guide.live"
urllist = []
for link in links:
finalurl = (baseurl+ link['href'])
urllist.append(finalurl)
print(finalurl)
for singleurl in urllist:
soup2=makesoup(url=singleurl)
g_data=soup2.find_all('div', {'id': 'channelInfo'})
c_data=soup2.find_all('div', {'class': 'liveOtherStations clearfix'})
with open(savefile ,"w") as text_file:
for match in g_data:
try:
event = match.find('div', class_='title full').text.strip()
except:
event = ""
try:
extrainfo= match.find_previous('div', class_='pt-5 text-center').text.strip()
except:
extrainfo = ""
try:
startime = match.find('div', class_='time full').text.strip()
print("Time; ", startime)
except:
startime = "Time element not found"
try:
dateandtime = match.find('div', class_='date full').text.strip()
print("Date:", dateandtime)
except:
dateandtime = "Date not found"
try:
sport = match.find_previous('div', class_='sportTitle').text.strip()
print("Sport:", sport)
except:
sport = "Sport element not found"
try:
singlechannel = match.find('div', class_='station full').text.strip()
print("Main Channel:", singlechannel)
print("-----")
except:
singlechannel = "Single Channel element not found"
extra_channels = []
for channel in c_data:
try:
channels = match.find('div', class_='stationLive active col-wrap')
print("Extra Channels:", channel.text)
extra_channels.append(channel.text)
except:
channels = "No channels found"
print(channels)
extra_channels.append(channel.text)
print("-------")
if extra_channels:
for channel in extra_channels:
text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" \n"+"__**Competitors:**__" +':' + ' '+ event+" \n"+ "__**Extra Info:**__" +':' + ' '+ extrainfo+" \n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" \n"+"__**Match Time:**__"+':' + ' ' +startime +" \n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" \n" + "__**Channels**__"+':' + ' '+channel+" \n"+'-' *20 + " \n")
else:
text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" \n"+"__**Competitors:**__" +':' + ' '+ event+" \n"+ "__**Extra Info:**__" +':' + ' '+ extrainfo+" \n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" \n"+"__**Match Time:**__"+':' + ' ' +startime +" \n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" \n" + "__**Channels**__"+':' + " \n"+'-' *20 + " \n")
def matches():
dict = {"https://sport-tv-guide.live/live/darts":"/var/scripts/output/darts.txt",
"https://sport-tv-guide.live/live/mma":"/var/scripts/output/mma.txt",
"https://sport-tv-guide.live/live/wwe":"/var/scripts/output/wrestling.txt",
"https://sport-tv-guide.live/live/motorsport":"/var/scripts/output/motorsport.txt",
"https://sport-tv-guide.live/live/rugby-union":"/var/scripts/output/rugbyunion.txt",
"https://sport-tv-guide.live/live/rugby-league":"/var/scripts/output/rugbyleague.txt",
"https://sport-tv-guide.live/live/cricket":"/var/scripts/output/cricket.txt",
"https://sport-tv-guide.live/live/tennis":"/var/scripts/output/tennis.txt",
"https://sport-tv-guide.live/live/snooker":"/var/scripts/output/snooker.txt",
"https://sport-tv-guide.live/live/golf":"/var/scripts/output/golf.txt",
"https://sport-tv-guide.live/live/netball":"/var/scripts/output/netball.txt",
"https://sport-tv-guide.live/live/basketball":"/var/scripts/output/nba.txt",
"https://sport-tv-guide.live/live/baseball":"/var/scripts/output/mlb.txt",
"https://sport-tv-guide.live/live/ice-hockey":"/var/scripts/output/nhl.txt",
"https://sport-tv-guide.live/live/nfl":"/var/scripts/output/nfl.txt",
"https://sport-tv-guide.live/live/boxing":"/var/scripts/output/boxing.txt"}
for key, value in dict.items():
soup=makesoup(url=key)
game_check = soup.find('div', class_='alert alert-info')
if game_check is not None:
with open(value ,"w") as text_file:
text_file.writelines("No games found for event")
else:
linkscrape(links=soup.find_all('a', {'class': 'article flag', 'href': True}), savefile = value)
matches()
Это дает мне следующий вывод
неправильный выход
У меня есть старый код ниже, который работает правильно и выводит все данные из каждого очищенного URL-адреса с главной страницы.
import requests
from bs4 import BeautifulSoup
def makesoup(url):
cookies = {'mycountries' : '101,28,3,102,42,10,18,4,2,22', 'user_time_zone': 'Europe/London', 'user_time_zone_id': '1'}
r = requests.post(url, cookies=cookies)
return BeautifulSoup(r.text,"lxml")
def linkscrape(links):
baseurl = "https://sport-tv-guide.live"
urllist = []
for link in links:
finalurl = (baseurl+ link['href'])
urllist.append(finalurl)
# print(finalurl)
for singleurl in urllist:
soup2=makesoup(url=singleurl)
g_data=soup2.find_all('div', {'id': 'channelInfo'})
c_data=soup2.find_all('div', {'class': 'liveOtherStations clearfix'})
for match in g_data:
try:
hometeam = match.find_previous('div', class_='cell40 text-center teamName1').text.strip()
awayteam = match.find_previous('div', class_='cell40 text-center teamName2').text.strip()
print("Competitors; ", hometeam +" "+ "vs" +" "+ awayteam)
except:
hometeam = "Home Team element not found"
awayteam = "Away Team element not found"
try:
startime = match.find('div', class_='time full').text.strip()
print("Time; ", startime)
except:
startime = "Time element not found"
try:
event= match.find('div', class_='title full').text.strip()
print("Event:", event)
except:
event = "Event element not found"
try:
dateandtime = match.find('div', class_='date full').text.strip()
print("Date:", dateandtime)
except:
dateandtime = "Date not found"
try:
sport = match.find('div', class_='text full').text.strip()
print("Sport:", sport)
except:
sport = "Sport element not found"
try:
singlechannel = match.find('div', class_='station full').text.strip()
print("Main Channel:", singlechannel)
print("-----")
except:
singlechannel = "Single Channel element not found"
for channel in c_data:
try:
channels = match.find('div', class_='stationLive active col-wrap')
print("Extra Channels:", channel.text)
except:
channels = "No channels found"
print(channels)
print("-------")
def matches():
soup=makesoup(url = "https://sport-tv-guide.live/live/mma")
linkscrape(links= soup.find_all('a', {'class': 'article flag', 'href' : True}))
matches()
Я считаю, что проблема могла быть ниже:
with open(savefile ,"w") as text_file:
, поэтому я попытался переместить это внутри for link in links:
для l oop, но результат был таким же (выводятся только данные из последнего URL-адреса, извлекаемого с главной страницы ).
Спасибо всем, кто может оказать помощь в решении этой проблемы.