From 0405882380272d7826a11a5aeeba083a49224039 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 10 Dec 2024 15:27:04 +0000 Subject: [PATCH] add whitespace analyzer docs (#8531) * add whitespace analyzer docs Signed-off-by: Anton Rubin * Doc review Signed-off-by: Fanit Kolchina * Update _analyzers/whitespace.md Co-authored-by: Nathan Bower Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> --------- Signed-off-by: Anton Rubin Signed-off-by: Fanit Kolchina Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Co-authored-by: Fanit Kolchina Co-authored-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Co-authored-by: Nathan Bower (cherry picked from commit 8693afbf56f43270980ecf8b4af59940af37830a) Signed-off-by: github-actions[bot] --- _analyzers/whitespace.md | 86 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 _analyzers/whitespace.md diff --git a/_analyzers/whitespace.md b/_analyzers/whitespace.md new file mode 100644 index 0000000000..67fee61295 --- /dev/null +++ b/_analyzers/whitespace.md @@ -0,0 +1,86 @@ +--- +layout: default +title: Whitespace analyzer +nav_order: 60 +--- + +# Whitespace analyzer + +The `whitespace` analyzer breaks text into tokens based only on white space characters (for example, spaces and tabs). It does not apply any transformations, such as lowercasing or removing stopwords, so the original case of the text is retained and punctuation is included as part of the tokens. + +## Example + +Use the following command to create an index named `my_whitespace_index` with a `whitespace` analyzer: + +```json +PUT /my_whitespace_index +{ + "mappings": { + "properties": { + "my_field": { + "type": "text", + "analyzer": "whitespace" + } + } + } +} +``` +{% include copy-curl.html %} + +## Configuring a custom analyzer + +Use the following command to configure an index with a custom analyzer that is equivalent to a `whitespace` analyzer with an added `lowercase` character filter: + +```json +PUT /my_custom_whitespace_index +{ + "settings": { + "analysis": { + "analyzer": { + "my_custom_whitespace_analyzer": { + "type": "custom", + "tokenizer": "whitespace", + "filter": ["lowercase"] + } + } + } + }, + "mappings": { + "properties": { + "my_field": { + "type": "text", + "analyzer": "my_custom_whitespace_analyzer" + } + } + } +} +``` +{% include copy-curl.html %} + +## Generated tokens + +Use the following request to examine the tokens generated using the analyzer: + +```json +POST /my_custom_whitespace_index/_analyze +{ + "analyzer": "my_custom_whitespace_analyzer", + "text": "The SLOW turtle swims away! 123" +} +``` +{% include copy-curl.html %} + +The response contains the generated tokens: + +```json +{ + "tokens": [ + {"token": "the","start_offset": 0,"end_offset": 3,"type": "word","position": 0}, + {"token": "slow","start_offset": 4,"end_offset": 8,"type": "word","position": 1}, + {"token": "turtle","start_offset": 9,"end_offset": 15,"type": "word","position": 2}, + {"token": "swims","start_offset": 16,"end_offset": 21,"type": "word","position": 3}, + {"token": "away!","start_offset": 22,"end_offset": 27,"type": "word","position": 4}, + {"token": "123","start_offset": 28,"end_offset": 31,"type": "word","position": 5} + ] +} +```