Решение BeautifulSoup (хакерское и, вероятно, очень неэффективное):
# Import the required packages:
from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
# Store the url as a string scalar: url => str
url = "https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports"
# Issue request: r => requests.models.Response
r = requests.get(url)
# Extract text: html_doc => str
html_doc = r.text
# Parse the HTML: soup => bs4.BeautifulSoup
soup = BeautifulSoup(html_doc)
# Find all 'a' tags (which define hyperlinks): a_tags => bs4.element.ResultSet
a_tags = soup.find_all('a')
# Store a list of urls ending in .csv: urls => list
urls = ['https://raw.githubusercontent.com'+re.sub('/blob', '', link.get('href'))
for link in a_tags if '.csv' in link.get('href')]
# Store a list of Data Frame names to be assigned to the list: df_list_names => list
df_list_names = [url.split('.csv')[0].split('/')[url.count('/')] for url in urls]
# Initialise an empty list the same length as the urls list: df_list => list
df_list = [pd.DataFrame([None]) for i in range(len(urls))]
# Store an empty list of dataframes: df_list => list
df_list = [pd.read_csv(url, sep = ',') for url in urls]
# Name the dataframes in the list, coerce to a dictionary: df_dict => dict
df_dict = dict(zip(df_list_names, df_list))