Всякий раз, когда я перезагружаю logsta sh, он начинает получать записи базы данных с нуля, вот мой .conf
файл, который я выполняю
input {
jdbc {
# Postgres jdbc connection string to our database, mydb
jdbc_connection_string => "jdbc:postgresql://**************:5432/******"
# The user we wish to execute our statement as
jdbc_user => "*******"
jdbc_password => "******"
# The path to our downloaded jdbc driver
jdbc_driver_library => "/usr/share/logstash/lib/postgresql-42.2.9.jar"
# The name of the driver class for Postgresql
jdbc_driver_class => "org.postgresql.Driver"
jdbc_paging_enabled => true
# the column which should be stored to .logstash_jdbc_last_run
tracking_column => "unix_ts_in_secs"
use_column_value => true
tracking_column_type => "numeric"
schedule => "*/5 * * * * *"
jdbc_page_size => 500
# sql_last_value is stored in this file so each pipeline should have different
# file name so it doesn't get mixed
last_run_metadata_path => "/etc/logstash/conf.d/lastrun/.logstash_jdbc_last_run_alerts"
# our query
statement_filepath => "/etc/logstash/db-queries/alerts-query.sql"
}
}
filter {
mutate {
remove_field => ["unix_ts_in_secs"]
}
}
output {
elasticsearch {
index => "typec"
document_id => "%{id}"
hosts => ["************"]
user => "******"
password => "******"
}
}
Файл: alerts-query. sql
SELECT *
FROM alerts as a
WHERE (
extract(epoch FROM a."updated_at") > :sql_last_value
AND a."updated_at" < NOW()
)
ORDER BY a."updated_at" ASC
Есть ли способ сохранить :sql_last_value
, и даже если перезапустить logsta sh, он начинается с ранее сохраненного :sql_last_value
, а не с 0
?