Ошибка при парсинге с помощью python - PullRequest
2 голосов
/ 09 июля 2020

Я пытаюсь удалить данные с веб-сайта espncricinfo. Я запрашиваю страницы для каждого совпадения IPL, но иногда получаю ошибку после 10 совпадений, 20 или даже 2 иногда, но это не завершается. Ниже мой код и моя ошибка помогает мне вне. Я использую метод request.get () для получения веб-страницы по указанной ссылке

import requests 
from bs4 import BeautifulSoup
import html5lib as h5l
import json
import pandas as pd
import os
import time

X = [['ID', 'Season', 'Home', 'Away', 'TossWin', 'TossDec', 'Venue', 'Winner']]

webpages = ["https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2007/08;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2009;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2010;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2011;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2012;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2013;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2014;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2015;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2016;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2017;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2018;trophy=117;type=season",
            "https://stats.espncricinfo.com/ci/engine/records/team/match_results.html?id=2019;trophy=117;type=season"
        ]

# For Match ID
match_id = 1

# Iterating over Given webpages of Seasonal Match
for page in webpages:
    r = requests.get(page)
    htmlContent = r.content
    soup = BeautifulSoup(htmlContent, 'html.parser')
    # print(soup)
    # print(soup.prettify)
    
    # Finding link for all Matches Summary in given Season
    links = soup.find_all("a", class_ = "data-link", text = "T20")

    # Iterating over Matches
    for link in links:
        # print(link['href'])
        r = requests.get("https://stats.espncricinfo.com:443" + link['href'])
        # print("https://stats.espncricinfo.com" + link['href'])
        htmlContent = r.content
        soup = BeautifulSoup(htmlContent, 'html.parser')

        #finding Season
        Season_var = soup.find("a", class_ = "d-block").getText()
        season = Season_var[21:]

        #finding Short Names of Teams
        teams = []
        T = soup.find_all("a", class_ = "team-name")
        for tt in T:
            teams.append(tt.getText())

        # Finding Full Names of Teams
        full_team_names = []
        TN = soup.find_all("a", class_ = "team-name")
        for ttt in TN:
            span = ttt.find("span")
            full_team_names.append(span['title'])
        # print(full_team_names)

        # Toss Details
        toss_det = soup.find("td", text = "Toss").findNext("td").getText()
        toss_det = toss_det.split(',')
        toss_det[0] = toss_det[0][:-1]
        
        # Toss Winner
        toss_win = ""
        # print(toss_det[0],len(toss_det[0]), full_team_names[0], len(full_team_names[0]))
        if toss_det[0] == full_team_names[0]:
            toss_win = toss_win + "Team 1"
            # print(toss_det[0], full_team_names[0])
        else:
            toss_win = toss_win + "Team 2"
            # print(toss_det[0], full_team_names[1])
        # print(toss_win)
        
        # Toss Decision
        toss_array = toss_det[1].split()
        toss_dec = toss_array[2]
        # print(toss_dec)

        # Finding Ground
        full_place = soup.find("td", class_ = "match-venue").getText()
        places = full_place.split(',')
        stadium = places[0]
        
        # Finding Winner of match
        win = ""
        winner_tag = soup.find("td", text = "Points").findNext("td").getText()
        winner_arr = winner_tag.split(',')
        # print(winner_arr[0])
        # print(winner_arr[0][-1])
        # print(winner_arr[0][:-2])
        if winner_arr[0][-1] == 1:
            win = win + "Tie"
            # print(winner_arr, win)
        elif winner_arr[0][:-2] == full_team_names[0]:
            win = win + "Team 1"
            # print(winner_arr[0][:-2], full_team_names[0], win)
        else:
            win = win + "Team 2"
            # print(winner_arr[0][:-2], full_team_names[1], win)
        # print(win)
        
        temp = [match_id, season, teams[0], teams[1], toss_win, toss_dec, stadium, win]
        del season, teams, toss_win, toss_dec, stadium, win
        match_id = match_id + 1
        X.append(temp)
        del temp
        print("Running", match_id-1)
        time.sleep(2)


df = pd.DataFrame(X)
df.to_csv('Matches.csv')
print("Completed")
**Error**:

Running 1
Running 2
Running 3
Traceback (most recent call last):
  File "C:\Python38\lib\site-packages\urllib3\connection.py", line 159, in _new_conn
    conn = connection.create_connection(
  File "C:\Python38\lib\site-packages\urllib3\util\connection.py", line 61, in create_connection
    for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
  File "C:\Python38\lib\socket.py", line 918, in getaddrinfo
    for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
socket.gaierror: [Errno 11001] getaddrinfo failed

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "C:\Python38\lib\site-packages\urllib3\connectionpool.py", line 670, in urlopen
    httplib_response = self._make_request(
  File "C:\Python38\lib\site-packages\urllib3\connectionpool.py", line 381, in _make_request
    self._validate_conn(conn)
  File "C:\Python38\lib\site-packages\urllib3\connectionpool.py", line 976, in _validate_conn
    conn.connect()
  File "C:\Python38\lib\site-packages\urllib3\connection.py", line 308, in connect
    conn = self._new_conn()
  File "C:\Python38\lib\site-packages\urllib3\connection.py", line 171, in _new_conn
    raise NewConnectionError(
urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x00000155C7FE7E50>: Failed to establish a new connection: [Errno 11001] getaddrinfo failed

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "C:\Python38\lib\site-packages\requests\adapters.py", line 439, in send
    resp = conn.urlopen(
  File "C:\Python38\lib\site-packages\urllib3\connectionpool.py", line 724, in urlopen
    retries = retries.increment(
  File "C:\Python38\lib\site-packages\urllib3\util\retry.py", line 439, in increment
    raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='stats.espncricinfo.com', port=443): Max retries exceeded with url: /ci/engine/matnnection: [Errno 11001] getaddrinfo failed'))

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "f:\GITHUB\Web-Scraping\IPL matches data\scraper.py", line 42, in <module>
    r = requests.get("https://stats.espncricinfo.com:443" + link['href'])
  File "C:\Python38\lib\site-packages\requests\api.py", line 76, in get
    return request('get', url, params=params, **kwargs)
  File "C:\Python38\lib\site-packages\requests\api.py", line 61, in request
    return session.request(method=method, url=url, **kwargs)
  File "C:\Python38\lib\site-packages\requests\sessions.py", line 530, in request
    resp = self.send(prep, **send_kwargs)
  File "C:\Python38\lib\site-packages\requests\sessions.py", line 643, in send
    r = adapter.send(request, **kwargs)
  File "C:\Python38\lib\site-packages\requests\adapters.py", line 516, in send
    raise ConnectionError(e, request=request)
requests.exceptions.ConnectionError: HTTPSConnectionPool(host='stats.espncricinfo.com', port=443): Max retries exceeded with url: /ci/engine/match/335986.html (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000155C7FE7E50>: Failed to establish a new connection: [Errno 11001] getaddrinfo failed'))```

1 Ответ

1 голос
/ 09 июля 2020

Похоже, есть защита от парсинга.

Первым шагом будет добавление заголовков к вашему запросу:

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
    'Referer': 'https://www.espncricinfo.com/',
    'Upgrade-Insecure-Requests': '1',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Cache-Control': 'no-cache',
}

и изменение следующей строки в вашем коде:

r = requests.get(page, headers = headers)

Тогда вы можете подумать о более «случайном» ожидании между запросами:

import random
...
time.sleep(random.random()*10)

У меня он отлично работал с вашим кодом, за исключением одной ошибки, когда toss_det равно no_toss, но это не проблема, связанная с веб-сайтом

...