Как очистить несколько страниц на Yahoo Finance с Beautiful Soup BS4 - PullRequest
1 голос
/ 11 января 2020

Я новичок в Python и пытаюсь получить некоторые финансовые данные из Yahoo Finance, используя BS4. С одной страницей скрипт работает отлично. Тем не менее, сейчас я пытаюсь очистить несколько страниц одновременно, но почему-то for url in urls: l oop не работает, как ожидалось. Это просто очистит данные от последнего URL.

Кто-нибудь знает, как это решить?

#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
import ssl
import json
import ast
import os
from urllib2 import Request, urlopen
import datetime 

# For ignoring SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

# Input from the user
urls = ['https://finance.yahoo.com/quote/ALV.DE?p=ALV.DE&.tsrc=fin-srch', 'https://finance.yahoo.com/quote/SAP?p=SAP&.tsrc=fin-srch']

# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()

# Creating a BeautifulSoup object of the HTML page for easy extraction of data.
soup = BeautifulSoup(webpage, 'html.parser')
html = soup.prettify('utf-8')
company_json = {}
other_details = {}

for h1 in soup.findAll('h1'):
    company_json['TICKER'] = h1.text.strip()
for span in soup.findAll('span',attrs={'class': 'Trsdu(0.3s) Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(b)'}):
    company_json['PRESENT_VALUE'] = span.text.strip()
for div in soup.findAll('div', attrs={'class': 'D(ib) Va(t)'}):
    for span in div.findAll('span', recursive=False):
        company_json['PRESENT_GROWTH'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'PREV_CLOSE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['PREV_CLOSE'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'OPEN-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['OPEN'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'BID-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['BID'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'ASK-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['ASK'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'DAYS_RANGE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['DAYS_RANGE'] = span.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'FIFTY_TWO_WK_RANGE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['FIFTY_TWO_WK_RANGE'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'TD_VOLUME-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['TD_VOLUME'] = span.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'AVERAGE_VOLUME_3MONTH-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['AVERAGE_VOLUME_3MONTH'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'MARKET_CAP-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['MARKET_CAP'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'BETA_3Y-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['BETA_3Y'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'PE_RATIO-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['PE_RATIO'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'EPS_RATIO-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['EPS_RATIO'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'EARNINGS_DATE-value'}):
    other_details['EARNINGS_DATE'] = []
    for span in td.findAll('span', recursive=False):
        other_details['EARNINGS_DATE'].append(span.text.strip())
for td in soup.findAll('td',attrs={'data-test': 'DIVIDEND_AND_YIELD-value'}):
    other_details['DIVIDEND_AND_YIELD'] = td.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'EX_DIVIDEND_DATE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['EX_DIVIDEND_DATE'] = span.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'ONE_YEAR_TARGET_PRICE-value' }):
    for span in td.findAll('span', recursive=False):
        other_details['ONE_YEAR_TARGET_PRICE'] = span.text.strip()
other_details['DATE'] = str(datetime.datetime.now())
company_json['OTHER_DETAILS'] = other_details
with open('dax30_kpis.json', 'a') as outfile:
    json.dump(company_json, outfile, indent=4)
print company_json

print '----------Extraction of data is complete. Check json file.----------'

1 Ответ

2 голосов
/ 11 января 2020

ваш отступ кажется неправильным:

# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()
# Creating a BeautifulSoup object of the HTML page for easy extraction of data.
soup = BeautifulSoup(webpage, 'html.parser')
# ... rest of the code

должно быть

# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()
    # Creating a BeautifulSoup object of the HTML page for easy extraction of data.
    soup = BeautifulSoup(webpage, 'html.parser')
# ... rest of the code

Это изменение необходимо, потому что вы вызываете все URL-адреса в l oop и сохраняете их в одной переменной. Таким образом, ваша реализация перезаписывает все очищенные веб-сайты и обрабатывает только результат, возвращенный последним URL.

Вам нужно поместить всю свою обработку до

#... website processing code
with open('dax30_kpis.json', 'a') as outfile:
    json.dump(company_json, outfile, indent=4)

в for л oop как:

###code before###
# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()

    # Creating a BeautifulSoup object of the HTML page for easy extraction of data.
    soup = BeautifulSoup(webpage, 'html.parser')
    html = soup.prettify('utf-8')
    company_json = {}
    other_details = {}

    for h1 in soup.findAll('h1'):
        company_json['TICKER'] = h1.text.strip()
    for span in soup.findAll('span',attrs={'class': 'Trsdu(0.3s) Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(b)'}):
        company_json['PRESENT_VALUE'] = span.text.strip()
    for div in soup.findAll('div', attrs={'class': 'D(ib) Va(t)'}):
        for span in div.findAll('span', recursive=False):
            company_json['PRESENT_GROWTH'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'PREV_CLOSE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['PREV_CLOSE'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'OPEN-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['OPEN'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'BID-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['BID'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'ASK-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['ASK'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'DAYS_RANGE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['DAYS_RANGE'] = span.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'FIFTY_TWO_WK_RANGE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['FIFTY_TWO_WK_RANGE'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'TD_VOLUME-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['TD_VOLUME'] = span.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'AVERAGE_VOLUME_3MONTH-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['AVERAGE_VOLUME_3MONTH'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'MARKET_CAP-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['MARKET_CAP'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'BETA_3Y-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['BETA_3Y'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'PE_RATIO-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['PE_RATIO'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'EPS_RATIO-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['EPS_RATIO'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'EARNINGS_DATE-value'}):
        other_details['EARNINGS_DATE'] = []
        for span in td.findAll('span', recursive=False):
            other_details['EARNINGS_DATE'].append(span.text.strip())
    for td in soup.findAll('td',attrs={'data-test': 'DIVIDEND_AND_YIELD-value'}):
        other_details['DIVIDEND_AND_YIELD'] = td.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'EX_DIVIDEND_DATE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['EX_DIVIDEND_DATE'] = span.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'ONE_YEAR_TARGET_PRICE-value' }):
        for span in td.findAll('span', recursive=False):
            other_details['ONE_YEAR_TARGET_PRICE'] = span.text.strip()
    other_details['DATE'] = str(datetime.datetime.now())
    company_json['OTHER_DETAILS'] = other_details
    with open('dax30_kpis.json', 'a') as outfile:
        json.dump(company_json, outfile, indent=4)
    print company_json
### Code after ###
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...