Я пытаюсь настроить logsta sh с помощью драйвера jdb c mysql для индексации данных в elasticsearch. Я продолжаю получать эту ошибку
[logstash.inputs.jdbc ][pipeline_1] Exception when executing JDBC query {:exception=>#<Sequel::DatabaseError: Java::JavaSql::SQLException: Data truncated for column '_prim' at row 1>}
mysql QUERY отлично работает в mysql workebnch!
Вот файл конфигурации logsta sh конвейера
input {
jdbc {
jdbc_driver_library => "${LOGSTASH_JDBC_DRIVER_JAR_LOCATION}"
jdbc_driver_class => "${LOGSTASH_JDBC_DRIVER}"
jdbc_connection_string => "${LOGSTASH_JDBC_URL}"
jdbc_user => "${LOGSTASH_JDBC_USERNAME}"
jdbc_password => "${LOGSTASH_JDBC_PASSWORD}"
jdbc_paging_enabled => true
schedule => "*/2 * * * *"
statement => "
SELECT
t1.prim, t1.title, t1.description, tokeep
FROM
(
SELECT
a.prim, a.title, a.description
FROM
b
inner join a on a.prim=b.prim
GROUP BY
b.courseid
) t1
LIMIT 1000
"
}
}
filter {
if [tokeep] == 1 {
mutate { add_field => { "[@metadata][action]" => "index" } }
}
if [tokeep] == 0 {
mutate { add_field => { "[@metadata][action]" => "delete" } }
}
mutate {
copy => { "prim" => "[@metadata][_id]" }
}
}
output {
# stdout {codec => rubydebug}
elasticsearch {
hosts => ["${LOGSTASH_ELASTICSEARCH_HOST}"]
sniffing => true
action => "%{[@metadata][action]}"
index => "my_idx"
document_id => "%{[@metadata][_id]}"
doc_as_upsert => true
}
}
prim -> varchar (36), title -> varchar (1000), description -> text
Пожалуйста, помогите