После ввода логов heroku --tail --app "app_name" я нахожу вышеупомянутую ошибку НЕТ Такая таблица Моя база данных работает нормально, и тогда она без ошибок это показывает эту ошибку, что нет такой таблицы
import pandas as pd
import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import sqlalchemy as sa
from datetime import date, timedelta
import sqlite3
import flask
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
con = sqlite3.connect('C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db', check_same_thread=False)
df = pd.read_sql_query('SELECT * FROM ABCC1;', con)
dataaa = df
### SQL Engine
disk_engine = sa.create_engine("sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db")
connection = disk_engine.connect()
metadata = sa.MetaData()
SQL_table = sa.Table(
"ABCC1",
metadata,
sa.Column("Site", sa.VARCHAR),
sa.Column("Last_Greased_Date:YYYY-MM-DD", sa.TEXT),
sa.Column("Department", sa.VARCHAR),
sa.Column("Equipment_ID", sa.VARCHAR),
sa.Column("Equipment_Name", sa.VARCHAR),
sa.Column("HAC_Code", sa.VARCHAR),
sa.Column("Frequency_Schedule_Days", sa.INTEGER),
sa.Column("NEXT_Date:YYYY-MM-DD", sa.TEXT),
sa.Column("Grease_Grade", sa.VARCHAR),
sa.Column("Point", sa.INTEGER),
sa.Column("Stroke", sa.INTEGER),
sa.Column("Grease_Gun_No(gm_per_stroke)", sa.VARCHAR),
sa.Column("Quantity_Grease_In_Grams(in_one_stroke)", sa.FLOAT),
sa.Column("Total_Quantity_Grease_Used(in_gms)", sa.FLOAT),
sa.Column("Name_Of_Technicians", sa.TEXT),
sa.Column("Remarks", sa.VARCHAR),
)
disk_engine = sa.create_engine(
"sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db",
connect_args={"check_same_thread": False})
connection = disk_engine.connect()
metadata = sa.MetaData()
SQL_TABLE = sa.Table(
"Scheduler",
metadata,
sa.Column("Department", sa.VARCHAR),
sa.Column("EqName", sa.VARCHAR),
sa.Column("EqId", sa.VARCHAR),
sa.Column("GreaseGrade", sa.VARCHAR),
sa.Column("Point", sa.INTEGER),
sa.Column("Stroke", sa.INTEGER),
sa.Column("gmperstroke", sa.FLOAT),
sa.Column("TotalGreaseused(ingms.)", sa.FLOAT),
sa.Column('GreaseNippleStatus', sa.TEXT),
sa.Column("Schedule.freqDays", sa.INTEGER),
sa.Column("AttendeBy", sa.VARCHAR),
sa.Column("Remark/anyabnormalitiesfound", sa.VARCHAR),
)
dss = pd.read_sql_query('SELECT * FROM Scheduler;', con)
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, )
server = flask.Flask("C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Database")
def serve_layout():
layout = html.Div([
html.H4("Next Greasing Date"),
html.H4("Upload"),
dcc.Upload(
id="upload-data",
children=html.Div(
["Drag and drop or click to select a file to upload."]
),
multiple=True,
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "10px",
}
),
html.Div(id='output-of-upload'),
html.P([
html.Label('Choose a Department:', style={'fontSize': 18}),
dcc.Dropdown(
id='dept_input',
options=[{'label': i, 'value': i} for i in dataa['Department'].unique()],
style={'height': '30px', 'width': '300px'}
)], className="three columns"),
html.P([
html.Label('Choose Date:Day-Month-Year', style={'fontSize': 18}),
dcc.DatePickerSingle(
id="single",
month_format='MMMM Y',
placeholder='DD-MM-YYYY',
with_portal=True,
clearable=True,
display_format="DD-MM-YYYY",
)], ),
html.P([
html.Label('Next Greasing Dates', style={'fontSize': 18}),
html.Div([
dash_table.DataTable(
id='next_greasing_dates', sort_action="native", sort_mode="multi",
columns=[{"name": i, "id": i} for i in ['Site', 'Last_Greased_Date:YYYY-MM-DD', 'Department',
'Equipment_ID', 'Equipment_Name',
'HAC_Code', 'Frequency_Schedule_Days',
'NEXT_Date:YYYY-MM-DD', 'Grease_Grade', 'Point', 'Stroke',
'Grease_Gun(gm_per_stroke)',
'Quantity_Grease_In_Grams(in_one_stroke)',
'Total_Quantity_Grease_Used(in_gms)',
'Name_Of_Technicians', 'Remarks']],
export_format='csv',
export_columns="all",
export_headers='display',
merge_duplicate_headers=True,
style_cell={'textAlign': 'left'},
style_cell_conditional=[
{'if': {'column_id': 'Equipment_ID'},
'textAlign': 'center'},
{'if': {'column_id': 'HAC_CODE'},
'textAlign': 'center'},
{'if': {'column_id': 'Last_Greased_Date:YYYY-MM-DD'},
'textAlign': 'center'},
{'if': {'column_id': 'Frequency_Schedule_Days'},
'textAlign': 'center'},
{'if': {'column_id': 'NEXT_Date:YYYY-MM-DD'},
'textAlign': 'center'},
{'if': {'column_id': 'Point'},
'textAlign': 'center'},
{'if': {'column_id': 'Quantity_Grease_In_Grams(in_one_stroke)'},
'textAlign': 'center'},
{'if': {'column_id': 'Stroke'},
'textAlign': 'center'},
{'if': {'column_id': 'Date:Year-Month-Date'},
'width': '40%'},
{'if': {'column_id': 'NEXT_Date:YYYY-MM-DD'},
'width': '40%'}],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'},
style_table={'overflowX': 'scroll'}, )
])
]),
]
)
return layout
def parse_contents(contents, filename, date):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
dg = pd.read_csv(io.StringIO(decoded.decode('utf-8'))).to_sql('ABCC1', con, if_exists='append', index=False)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
dg = pd.read_excel(io.BytesIO(decoded)).to_sql('ABCC1', con, if_exists='append', index=False)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return html.Div([
html.H5(filename),
html.H6(datetime.datetime.fromtimestamp(date))
])
# adding one column in dataa by setting its value to 0
dataaa['freq'] = 0
dataaa['grade'] = 0
dataaa['qty'] = 0
dataaa['stroke'] = 0
dataaa['point'] = 0
dataaa['man'] = 0
dataaa['depty'] = 0
# converting date object to date time format
dataaa['Last_Greased_Date:YYYY-MM-DD'] = pd.to_datetime(dataaa['Last_Greased_Date:YYYY-MM-DD'])
dataaa['Last_Greased_Date:YYYY-MM-DD'].dt.strftime("%Y-%m-%d")
dataa = dataaa
dataa['date'] = pd.to_datetime(dataa['Last_Greased_Date:YYYY-MM-DD'])
# checking eq id and eq name in scheduler dataset and then filling the respective new columns
for i in range(0, len(dss)):
m = dss['EqName'][i]
n = dss['EqId'][i]
if (dataa['Equipment_ID'] == n).any():
dataa['freq'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Schedule.freqDays'][i]
dataa['depty'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Department'][i]
dataa['grade'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['GreaseGrade'][i]
dataa['point'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Point'][i]
dataa['qty'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['TotalGreaseused(ingms.)'][i]
dataa['stroke'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Stroke'][i]
dataa['man'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['AttendeBy'][i]
else:
dataa['freq'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Schedule.freqDays'][i]
dataa['depty'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Department'][i]
dataa['grade'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['GreaseGrade'][i]
dataa['point'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Point'][i]
dataa['qty'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['TotalGreaseused(ingms.)'][i]
dataa['stroke'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Stroke'][i]
dataa['man'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['AttendeBy'][i]
# Sorting date in decending order inorder to get last greasing date of every equipment
dataa['Last_Greased_Date:YYYY-MM-DD'] = dataa['Last_Greased_Date:YYYY-MM-DD'].astype(str)
d = dataa.sort_values("Last_Greased_Date:YYYY-MM-DD", ascending=False)
d.reset_index(inplace=True)
# droping the duplicates of eqid and eqname and keeping them first inorder to get the last graesing dates of equipments
di = d.drop_duplicates(["Equipment_Name", "HAC_Code"], keep='first')
di = d.drop_duplicates(["Equipment_ID", "Equipment_Name"], keep='first')
di.drop('index', axis=1, inplace=True)
di.reset_index(inplace=True)
app.layout = serve_layout
@app.callback(Output('next_greasing_dates', 'data'),
[Input('single', 'date'),
Input('dept_input', 'value')])
def ngrease_table(datee, dept):
data = di[di['Department'] == dept]
data1 = data[['Site', 'Last_Greased_Date:YYYY-MM-DD', 'Department', 'Equipment_ID', 'Equipment_Name',
'HAC_Code', 'Frequency_Schedule_Days', 'NEXT_Date:YYYY-MM-DD',
'Grease_Grade', 'Point', 'Stroke', 'Grease_Gun_No(gm_per_stroke)',
'Quantity_Grease_In_Grams(in_one_stroke)', 'Total_Quantity_Grease_Used(in_gms)',
'Name_Of_Technicians', 'Remarks', 'freq', 'date', "grade", 'point', "qty", "stroke", "man"]]
data1.reset_index(inplace=True)
data1.drop('index', axis=1, inplace=True)
yl = int(
datee.split('-')[0]) # splitting the end date in year month and date and putting it in separate index 0,1,2
ml = int(datee.split('-')[1])
dl = int(datee.split('-')[2])
df2 = pd.DataFrame()
for i in range(0, len(data1)):
# splitting last date in year month and date and putting it in separate index 0,1,2
y = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[0])
m = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[1])
d = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[2])
if data1['freq'][i] != 0: # giving freq not = 0 because it gives us error
# subtracting end date with the last date to get number of days in between and then divding it by freq
# to get the quotient so we can come to know that how many times it should get greased
# for example if the diff betwen dates are 30 and the and the freq we get is 10 when diveded we get 3days
# that means 3 times we can apply greasing between to selected dates
p = (date(year=yl, month=ml, day=dl) - date(year=y, month=m, day=d)) // data1['freq'][i]
# making new data frame changing column names adding new column NEXT_Date
# which is initially 0 and then multiplying p.days to it ie. 3days will get
# multiplied to it and number of rows will be 3
df1 = pd.DataFrame({'Site': [data1['Site'][i]] * p.days,
'Last_Greased_Date:YYYY-MM-DD': [data1['Last_Greased_Date:YYYY-MM-DD'][i]] * p.days,
'Department': [data1['Department'][i]] * p.days,
'Equipment_ID': [data1['Equipment_ID'][i]] * p.days,
'Equipment_Name': [data1['Equipment_Name'][i]] * p.days,
'HAC_Code': [data1['HAC_Code'][i]] * p.days,
'Frequency_Schedule_Days': [data1['freq'][i]] * p.days,
'NEXT_Date:YYYY-MM-DD': [0] * p.days,
"Grease_Grade": [data1["grade"][i]] * p.days,
"Point": [data1["point"][i]] * p.days,
"Stroke": [data1["stroke"][i]] * p.days,
"Quantity_Grease_In_Grams(in_one_stroke)": [data1["qty"][i]] * p.days, })
for j in range(0, len(df1)): # it will take lenght according to df1
# now adding date with freq and saving it in NEXT_Date
df1['NEXT_Date:YYYY-MM-DD'][j] = data1.date[i] + (timedelta(days=int(data1['freq'][i] * (j + 1))))
df2 = pd.concat([df1, df2])
# above it wil give all the dates like last date to end date
# therefore, here we have given start date so that it can only display from selected start date to end date
df3 = df2[df2['NEXT_Date:YYYY-MM-DD'] >= datetime.datetime.strptime(datee, '%Y-%m-%d')]
df3.sort_values(by='NEXT_Date:YYYY-MM-DD', inplace=True)
# here the seconds with the date will get cut off
df3['NEXT_Date:YYYY-MM-DD'] = df3['NEXT_Date:YYYY-MM-DD'].apply(lambda x: str(x.date()))
df3.reset_index(inplace=True)
df3.drop('index', axis=1, inplace=True)
con = sa.create_engine('sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db')
df3.to_sql('Next_Datee', con, if_exists='append', index=False)
return df3.to_dict('records')
@app.callback(Output('output-of-upload', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
children = [
parse_contents(contents, filename, date) for contents, filename, date in
zip(list_of_contents, list_of_names, list_of_dates)]
return children
if __name__ == "__main__":
app.run_server()
выше мой код
Текст требования
Click==7.0
dash==1.9.0
dash-core-components==1.8.0
dash-html-components==1.0.2
dash-renderer==1.2.4
dash-table==4.6.0
Flask==1.1.1
Flask-Compress==1.4.0
Flask-SeaSurf==0.2.2
future==0.18.2
gunicorn==20.0.4
itsdangerous==1.1.0
Jinja2==2.11.1
MarkupSafe==1.1.1
numpy==1.18.1
pandas==1.0.1
plotly==4.5.0
python-dateutil==2.8.1
pytz==2019.3
retrying==1.3.3
six==1.14.0
SQLAlchemy==1.3.13
ua-parser==0.9.0
Werkzeug==1.0.0
Это мой файл gitignore
venv * .pyc .DS_Store .env
Это мой Procfile web: приложение gunicorn: сервер