Создание таблицы в папке Templates в Django - PullRequest
0 голосов
/ 25 марта 2019

Я пытаюсь создать таблицу в Django, используя файл шаблона, который я назвал law.html, с данными, отформатированными в кадре данных из функции, которую я создал для очистки информации с общедоступной веб-страницы.Я пытаюсь использовать цикл for для итерации по данным, но желаемый вывод не может быть достигнут по какой-то причине.

До сих пор у меня есть DataFrame с именем newlaw, который вызывается функцией all_data.Фрейм данных newlaw представляет собой список имен и должностей адвокатов.Затем я импортировал all_data в свою папку views.py и дал ей словарь all_data.В моей папке law.html я пытаюсь создать таблицу, используя цикл for, чтобы я мог разместить каждый фрагмент данных в одной ячейке.

Код в моем views.py

def law_view(request, *args, **kwargs):
 data = combine_data()
 return render(request, "law.html", {'data': data})

The code in my law.html
```<table class="table table-striped">
    <thead>
        <tr>
            <th>Solicitor_Names</th>
            <th>Offices</th>     
        </tr>
    </thead>
    <tbody>
        {%for solicitor in all_data%}
          <tr>
              <td>{{ solicitor }}</td>
          </tr>
        {% ednfor %}
    </tbody>
</table>```

Этот код выводит только имена столбцов.Мой желаемый вывод будет выглядеть так:

Solicitor_Name        Office
John Marston          Ernst & Young
Amy Smith             Kingston Smith
....
....

Это all_data = объединение ()

def combine():
from bs4 import BeautifulSoup
import requests
import pandas as pd

urlh = 'http://solicitors.lawsociety.org.uk/search/results?Type=1&IncludeNlsp=True&Pro=True&parameters=%2C1%3BAPL%2C0%3B%2C1%3BPUB%2C0%3B%2C1%3BADV%2C0%3B%2C1%3BAGR%2C0%3B%2C1%3BAVI%2C0%3B%2C1%3BBAN%2C1%3B%2C1%3BBEN%2C0%3B%2C1%3BCHA%2C0%3B%2C1%3BCHI%2C0%3B%2C1%3BCLI%2C0%3B%2C1%3BCOL%2C1%3B%2C1%3BPCO%2C1%3B%2C1%3BCCL%2C0%3B%2C1%3BCOS%2C1%3B%2C1%3BCOM%2C1%3B%2C1%3BCON%2C1%3B%2C1%3BCSU%2C0%3B%2C1%3BCSF%2C0%3B%2C1%3BCSG%2C0%3B%2C1%3BCUT%2C0%3B%2C1%3BCTR%2C1%3B%2C1%3BPRE%2C0%3B%2C1%3BCFI%2C1%3B%2C1%3BCRD%2C0%3B%2C1%3BCRF%2C0%3B%2C1%3BCRG%2C0%3B%2C1%3BCRJ%2C0%3B%2C1%3BCRL%2C0%3B%2C1%3BCRM%2C0%3B%2C1%3BCRS%2C0%3B%2C1%3BCRO%2C1%3B%2C1%3BDEB%2C0%3B%2C1%3BDTR%2C1%3B%2C1%3BDEF%2C0%3B%2C1%3BDRC%2C0%3B%2C1%3BDRO%2C1%3B%2C1%3BEDU%2C0%3B%2C1%3BELC%2C0%3B%2C1%3BELH%2C0%3B%2C1%3BEMP%2C1%3B%2C1%3BENE%2C0%3B%2C1%3BENV%2C0%3B%2C1%3BEUN%2C0%3B%2C1%3BFDS%2C0%3B%2C1%3BFAM%2C0%3B%2C1%3BFAL%2C0%3B%2C1%3BFMC%2C0%3B%2C1%3BFME%2C0%3B%2C1%3BFML%2C0%3B%2C1%3BFPL%2C0%3B%2C1%3BFIS%2C0%3B%2C1%3BHRI%2C0%3B%2C1%3BIMA%2C0%3B%2C1%3BIML%2C0%3B%2C1%3BIMM%2C0%3B%2C1%3BIMG%2C0%3B%2C1%3BIMN%2C0%3B%2C1%3BITE%2C1%3B%2C1%3BINS%2C1%3B%2C1%3BIUR%2C1%3B%2C1%3BIPR%2C1%3B%2C1%3BJRW%2C0%3B%2C1%3BJRL%2C0%3B%2C1%3BLCO%2C1%3B%2C1%3BLRE%2C0%3B%2C1%3BPOA%2C0%3B%2C1%3BLIC%2C1%3B%2C1%3BLIV%2C0%3B%2C1%3BLIS%2C0%3B%2C1%3BLIT%2C0%3B%2C1%3BLPH%2C0%3B%2C1%3BLPP%2C0%3B%2C1%3BMAR%2C0%3B%2C1%3BMED%2C1%3B%2C1%3BMHE%2C0%3B%2C1%3BMHL%2C0%3B%2C1%3BMAA%2C1%3B%2C1%3BMIL%2C0%3B%2C1%3BNDI%2C0%3B%2C1%3BPEN%2C1%3B%2C1%3BPIN%2C0%3B%2C1%3BPIR%2C0%3B%2C1%3BPLA%2C0%3B%2C1%3BPRZ%2C0%3B%2C1%3BPRP%2C0%3B%2C1%3BPRT%2C0%3B%2C1%3BPRW%2C0%3B%2C1%3BPCI%2C0%3B%2C1%3BPCP%2C0%3B%2C1%3BPCT%2C0%3B%2C1%3BPCW%2C0%3B%2C1%3BPNE%2C0%3B%2C1%3BTAX%2C0%3B%2C1%3BTAC%2C1%3B%2C1%3BTAE%2C0%3B%2C1%3BTAH%2C1%3B%2C1%3BTAM%2C0%3B%2C1%3BTAP%2C0%3B%2C1%3BTAT%2C0%3B+'
r = requests.get(urlh)
soup = BeautifulSoup(r.content, 'html.parser')

names = []
roles = []
offices = []
locations = []

for i in range(1,2):
    url = 'http://solicitors.lawsociety.org.uk/search/results?Type=1&IncludeNlsp=True&Pro=True&parameters=%2C1%3BAPL%2C0%3B%2C1%3BPUB%2C0%3B%2C1%3BADV%2C0%3B%2C1%3BAGR%2C0%3B%2C1%3BAVI%2C0%3B%2C1%3BBAN%2C1%3B%2C1%3BBEN%2C0%3B%2C1%3BCHA%2C0%3B%2C1%3BCHI%2C0%3B%2C1%3BCLI%2C0%3B%2C1%3BCOL%2C1%3B%2C1%3BPCO%2C1%3B%2C1%3BCCL%2C0%3B%2C1%3BCOS%2C1%3B%2C1%3BCOM%2C1%3B%2C1%3BCON%2C1%3B%2C1%3BCSU%2C0%3B%2C1%3BCSF%2C0%3B%2C1%3BCSG%2C0%3B%2C1%3BCUT%2C0%3B%2C1%3BCTR%2C1%3B%2C1%3BPRE%2C0%3B%2C1%3BCFI%2C1%3B%2C1%3BCRD%2C0%3B%2C1%3BCRF%2C0%3B%2C1%3BCRG%2C0%3B%2C1%3BCRJ%2C0%3B%2C1%3BCRL%2C0%3B%2C1%3BCRM%2C0%3B%2C1%3BCRS%2C0%3B%2C1%3BCRO%2C1%3B%2C1%3BDEB%2C0%3B%2C1%3BDTR%2C1%3B%2C1%3BDEF%2C0%3B%2C1%3BDRC%2C0%3B%2C1%3BDRO%2C1%3B%2C1%3BEDU%2C0%3B%2C1%3BELC%2C0%3B%2C1%3BELH%2C0%3B%2C1%3BEMP%2C1%3B%2C1%3BENE%2C0%3B%2C1%3BENV%2C0%3B%2C1%3BEUN%2C0%3B%2C1%3BFDS%2C0%3B%2C1%3BFAM%2C0%3B%2C1%3BFAL%2C0%3B%2C1%3BFMC%2C0%3B%2C1%3BFME%2C0%3B%2C1%3BFML%2C0%3B%2C1%3BFPL%2C0%3B%2C1%3BFIS%2C0%3B%2C1%3BHRI%2C0%3B%2C1%3BIMA%2C0%3B%2C1%3BIML%2C0%3B%2C1%3BIMM%2C0%3B%2C1%3BIMG%2C0%3B%2C1%3BIMN%2C0%3B%2C1%3BITE%2C1%3B%2C1%3BINS%2C1%3B%2C1%3BIUR%2C1%3B%2C1%3BIPR%2C1%3B%2C1%3BJRW%2C0%3B%2C1%3BJRL%2C0%3B%2C1%3BLCO%2C1%3B%2C1%3BLRE%2C0%3B%2C1%3BPOA%2C0%3B%2C1%3BLIC%2C1%3B%2C1%3BLIV%2C0%3B%2C1%3BLIS%2C0%3B%2C1%3BLIT%2C0%3B%2C1%3BLPH%2C0%3B%2C1%3BLPP%2C0%3B%2C1%3BMAR%2C0%3B%2C1%3BMED%2C1%3B%2C1%3BMHE%2C0%3B%2C1%3BMHL%2C0%3B%2C1%3BMAA%2C1%3B%2C1%3BMIL%2C0%3B%2C1%3BNDI%2C0%3B%2C1%3BPEN%2C1%3B%2C1%3BPIN%2C0%3B%2C1%3BPIR%2C0%3B%2C1%3BPLA%2C0%3B%2C1%3BPRZ%2C0%3B%2C1%3BPRP%2C0%3B%2C1%3BPRT%2C0%3B%2C1%3BPRW%2C0%3B%2C1%3BPCI%2C0%3B%2C1%3BPCP%2C0%3B%2C1%3BPCT%2C0%3B%2C1%3BPCW%2C0%3B%2C1%3BPNE%2C0%3B%2C1%3BTAX%2C0%3B%2C1%3BTAC%2C1%3B%2C1%3BTAE%2C0%3B%2C1%3BTAH%2C1%3B%2C1%3BTAM%2C0%3B%2C1%3BTAP%2C0%3B%2C1%3BTAT%2C0%3B+' + '=&Page=' + str(i)
    response = requests.get(url)
    response.raise_for_status()
    soup = BeautifulSoup(response.content, 'html.parser')

    hp_sol_data = soup.find_all('section', {'class':'solicitor'})

    for sol in hp_sol_data:
        try:
            addy = sol.contents[7].find_all('dd', {'class':'feature highlight'})[0].text
            locations.append(addy)
        except IndexError:
            locations.append('None Found')
        try:
            office_names = sol.contents[7].find_all('dd', {'class':'highlight'})[0].text
            offices.append(office_names.strip())
        except IndexError:
            offices.append('None Found')

    for link in soup.find_all('a', href=True):
        if link.get('href').startswith('/person/'):
            tags = (link.get('href'))

            url2 = 'http://solicitors.lawsociety.org.uk' + str(tags)
            r2 = requests.get(url2)
            soup = BeautifulSoup(r2.content, 'html.parser')

            s_data = soup.find_all('article', {'class':'solicitor solicitor-type-individual details'})

            for item in s_data:
                solicitor_names = (item.contents[3].find_all('h1')[0].text)
                names.append(solicitor_names)
                try:
                    role = (item.find_all('div', {'class':'panel-half'})[1].find('dd').get_text(''))
                    roles.append(role.strip())
                except IndexError:
                    roles.append('Role not specified')


tls_solicitors = pd.DataFrame({'Solicitor_Name': names, 'Role': roles, 'Office': offices,'Address': locations},
                         columns = ['Solicitor_Name', 'Office', 'Address', 'Role'])
law = tls_solicitors
newd = law['Role'].str.split('\n', n=3, expand = True)
#law['Primary_Role'] = newd[0]
#law['Secondary_Role'] = newd[1]
role_1 = newd[0]
role_2 = newd[1]
law.drop('Role', axis=1)

all_data = [{'name': names, 'office': offices, 'address': locations, 'primary_role': role_1, 'secondary_role': role_2}]

return all_data

1 Ответ

0 голосов
/ 25 марта 2019
...
{% for a in data %}
    <tr>
        <td>{{a.name}}</td>
        <td>{{a.office}}</td>
    </tr>
{% endfor %}
...

В этом случае ваши данные должны быть списком словарей [{'name': 'some name', 'office': 'some office'}, ...]

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...