Выделите слова с пробелами в Elasticsearch 7,6 - PullRequest
1 голос
/ 01 мая 2020

Я хотел бы использовать выделение Elasticsearch для получения подходящих ключевых слов, найденных внутри текста. Это мои настройки / отображения

{
  "settings": {
    "analysis": {
      "char_filter": {
        "my_char_filter": {
          "type": "mapping",
          "mappings": [
            "- => _",
          ]
        }
      },
      "analyzer": {
        "my_analyzer": {
          "tokenizer": "standard",
          "char_filter": [
            "my_char_filter"
          ],
          "filter": [
            "lowercase"
          ]
        }
      }
    }
  },
  "mappings": {
      "properties": {
        "title": {
          "type": "text",
          "analyzer": "my_analyzer"
        },
        "description": {
          "type": "text",
          "analyzer": "my_analyzer",
          "fielddata": True
        }
      }
  }
}

Я использую char_filter для поиска и выделения пропущенных слов. Вот мой пример документа:

{
    "_index": "test_tokenizer",
    "_type": "_doc",
    "_id": "DbBIxXEBL7VGAl98vIRl",
    "_score": 1.0,
    "_source": {
        "title": "Best places: New Mexico and Sedro-Woolley",
        "description": "This is an example text containing some cities like New York, Toronto, Rome and many other. So, there are also Milton-Freewater and Las Vegas!"
    }
}

, и этот запрос я использую

{
    "query": {
        "query_string" : {
            "query" : "\"New York\" OR \"Rome\" OR \"Milton-Freewater\"",
            "default_field": "description"
        }
    },
    "highlight" : {
        "pre_tags" : ["<key>"],
        "post_tags" : ["</key>"],
        "fields" : {
            "description" : {
                "number_of_fragments" : 0
            }
        }
    }
}

, и это вывод, который у меня есть

...
"hits": [
    {
        "_index": "test_tokenizer",
        "_type": "_doc",
        "_id": "GrDNz3EBL7VGAl98EITg",
        "_score": 0.72928625,
        "_source": {
            "title": "Best places: New Mexico and Sedro-Woolley",
            "description": "This is an example text containing some cities like New York, Toronto, Rome and many other. So, there are also Milton-Freewater and Las Vegas!"
        },
        "highlight": {
            "description": [
                "This is an example text containing some cities like <key>New</key> <key>York</key>, Toronto, <key>Rome</key> and many other. So, there are also <key>Milton-Freewater</key> and Las Vegas!"
            ]
        }
    }
]
...

Рим и Milton-Freewater выделены правильно. Нью-Йорк не

Как я могу получить <key>New York</key> вместо <key>New</key> и <key>York</key>?

1 Ответ

1 голос
/ 01 мая 2020

В связи с этим существует открытый PR , но я бы предложил следующее временное решение:

  1. Добавить настройку term_vector
PUT test_tokenizer
{
  "settings": {
    "analysis": {
      "char_filter": {
        "my_char_filter": {
          "type": "mapping",
          "mappings": [
            "- => _"
          ]
        }
      },
      "analyzer": {
        "my_analyzer": {
          "tokenizer": "standard",
          "char_filter": [
            "my_char_filter"
          ],
          "filter": [
            "lowercase"
          ]
        }
      }
    }
  },
  "mappings": {
    "properties": {
      "title": {
        "type": "text",
        "analyzer": "my_analyzer"
      },
      "description": {
        "type": "text",
        "analyzer": "my_analyzer",
        "term_vector": "with_positions_offsets",
        "fielddata": true
      }
    }
  }
}
Син c до c
POST test_tokenizer/_doc
{"title":"Best places: New Mexico and Sedro-Woolley","description":"This is an example text containing some cities like New York, Toronto, Rome and many other. So, there are also Milton-Freewater and Las Vegas!"}
Конвертируйте query_string в кучу логических символов match_phrases внутри highlight_query и используйте type: fvh
GET test_tokenizer/_search
{
  "query": {
    "query_string": {
      "query": "'New York' OR 'Rome' OR 'Milton-Freewater'",
      "default_field": "description"
    }
  },
  "highlight": {
    "pre_tags": [
      "<key>"
    ],
    "post_tags": [
      "</key>"
    ],
    "fields": {
      "description": {
        "highlight_query": {
          "bool": {
            "should": [
              {
                "match_phrase": {
                  "description": "New York"
                }
              },
              {
                "match_phrase": {
                  "description": "Rome"
                }
              },
              {
                "match_phrase": {
                  "description": "Milton-Freewater"
                }
              }
            ]
          }
        },
        "type": "fvh",
        "number_of_fragments": 0
      }
    }
  }
}

, получая

{
  "highlight":{
    "description":[
      "This is an example text containing some cities like <key>New York</key>, Toronto, <key>Rome</key> and many other. So, there are also <key>Milton-Freewater</key> and Las Vegas!"
    ]
  }
}
...