Я пытаюсь написать свое собственное маленькое приложение-флягу для Python для мониторинга жестких дисков моего сервера.
Но с тех пор у меня возникают проблемы с использованием sqljobstore apscheduler.Пока сервер работает, все хорошо.Но после перезапуска я не могу получить доступ к веб-интерфейсу и получить следующий вывод:
Unable to restore job "refresh_disks" -- removing it
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apscheduler/util.py", line 289, in ref_to_obj
obj = getattr(obj, name)
AttributeError: module 'dirkules.tasks' has no attribute 'refresh_disks'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/apscheduler/jobstores/sqlalchemy.py", line 141, in _get_jobs
jobs.append(self._reconstitute_job(row.job_state))
File "/usr/local/lib/python3.6/dist-packages/apscheduler/jobstores/sqlalchemy.py", line 128, in _reconstitute_job
job.__setstate__(job_state)
File "/usr/local/lib/python3.6/dist-packages/apscheduler/job.py", line 272, in __setstate__
self.func = ref_to_obj(self.func_ref)
File "/usr/local/lib/python3.6/dist-packages/apscheduler/util.py", line 292, in ref_to_obj
raise LookupError('Error resolving reference %s: error looking up object' % ref)
LookupError: Error resolving reference dirkules.tasks:refresh_disks: error looking up object
[2019-04-26 15:46:39 +0200] [13296] [INFO] Shutting down: Master
Вот мой config.py:
import os
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
#from apscheduler.jobstores.memory import MemoryJobStore
baseDir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(baseDir, 'dirkules.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
# The SCHEDULER_JOB_DEFAULTS configuration is per job, that means each job can execute at most 3 threads at the same time.
# The SCHEDULER_EXECUTORS is a global configuration, in this case, only 1 thread will be used for all the jobs.
# I believe the best way for you is to use max_workers: 1 when running locally
SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url='sqlite:///' + os.path.join(baseDir, 'dirkules.db'))}
#SCHEDULER_JOBSTORES = {'default': MemoryJobStore()}
SCHEDULER_EXECUTORS = {'default': {'type': 'threadpool', 'max_workers': 3}}
SCHEDULER_JOB_DEFAULTS = {'coalesce': False, 'max_instances': 1}
SCHEDULER_API_ENABLED = True
init.py:
import dirkules.config as config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_apscheduler import APScheduler
app = Flask(__name__)
app.config.from_object(config)
db = SQLAlchemy(app)
import dirkules.models
db.create_all()
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
#@app.before_first_request
from dirkules import tasks
# from dirkules.models import Time
# from sqlalchemy.orm.exc import NoResultFound
#
# try:
# Time.query.one()
# except NoResultFound:
# db.session.add(Time("Drives"))
# db.session.commit()
import dirkules.views
и tasks.py:
from dirkules import scheduler
import datetime
import dirkules.driveManagement.driveController as drico
@scheduler.task('interval', id='refresh_disks', seconds=10)
def refresh_disks():
#drives = drico.getAllDrives()
print("Drives refreshed")
Надеюсь, вы мне поможете!