From a29d90ee00da1b22e3e0d7966ba5f1a12c85eba9 Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Fri, 8 Mar 2024 15:38:52 -0800 Subject: [PATCH] Test cases improvement Signed-off-by: Sandesh Kumar --- htmlReport/css/coverage.css | 154 ++ htmlReport/css/idea.min.css | 118 ++ htmlReport/img/arrowDown.gif | Bin 0 -> 89 bytes htmlReport/img/arrowUp.gif | Bin 0 -> 91 bytes htmlReport/index.html | 169 ++ htmlReport/index_SORT_BY_BLOCK.html | 169 ++ htmlReport/index_SORT_BY_BLOCK_DESC.html | 169 ++ htmlReport/index_SORT_BY_CLASS.html | 169 ++ htmlReport/index_SORT_BY_CLASS_DESC.html | 169 ++ htmlReport/index_SORT_BY_LINE.html | 169 ++ htmlReport/index_SORT_BY_LINE_DESC.html | 169 ++ htmlReport/index_SORT_BY_METHOD.html | 169 ++ htmlReport/index_SORT_BY_METHOD_DESC.html | 169 ++ htmlReport/index_SORT_BY_NAME_DESC.html | 169 ++ htmlReport/js/highlight.min.js | 1388 ++++++++++++++ htmlReport/js/highlightjs-line-numbers.min.js | 24 + htmlReport/ns-1/index.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_BLOCK.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_BLOCK_DESC.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_CLASS.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_CLASS_DESC.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_LINE.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_LINE_DESC.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_METHOD.html | 1601 +++++++++++++++++ .../ns-1/index_SORT_BY_METHOD_DESC.html | 1601 +++++++++++++++++ htmlReport/ns-1/index_SORT_BY_NAME_DESC.html | 1601 +++++++++++++++++ htmlReport/ns-1/sources/source-1.html | 246 +++ htmlReport/ns-1/sources/source-10.html | 544 ++++++ htmlReport/ns-1/sources/source-11.html | 309 ++++ htmlReport/ns-1/sources/source-12.html | 327 ++++ htmlReport/ns-1/sources/source-13.html | 432 +++++ htmlReport/ns-1/sources/source-14.html | 827 +++++++++ htmlReport/ns-1/sources/source-15.html | 549 ++++++ htmlReport/ns-1/sources/source-16.html | 287 +++ htmlReport/ns-1/sources/source-17.html | 743 ++++++++ htmlReport/ns-1/sources/source-18.html | 969 ++++++++++ htmlReport/ns-1/sources/source-19.html | 241 +++ htmlReport/ns-1/sources/source-1a.html | 241 +++ htmlReport/ns-1/sources/source-1b.html | 241 +++ htmlReport/ns-1/sources/source-1c.html | 217 +++ htmlReport/ns-1/sources/source-1d.html | 270 +++ htmlReport/ns-1/sources/source-1e.html | 236 +++ htmlReport/ns-1/sources/source-1f.html | 241 +++ htmlReport/ns-1/sources/source-2.html | 210 +++ htmlReport/ns-1/sources/source-20.html | 358 ++++ htmlReport/ns-1/sources/source-21.html | 249 +++ htmlReport/ns-1/sources/source-22.html | 249 +++ htmlReport/ns-1/sources/source-23.html | 311 ++++ htmlReport/ns-1/sources/source-24.html | 218 +++ htmlReport/ns-1/sources/source-25.html | 374 ++++ htmlReport/ns-1/sources/source-26.html | 537 ++++++ htmlReport/ns-1/sources/source-27.html | 421 +++++ htmlReport/ns-1/sources/source-28.html | 344 ++++ htmlReport/ns-1/sources/source-29.html | 349 ++++ htmlReport/ns-1/sources/source-2a.html | 502 ++++++ htmlReport/ns-1/sources/source-2b.html | 680 +++++++ htmlReport/ns-1/sources/source-2c.html | 527 ++++++ htmlReport/ns-1/sources/source-2d.html | 469 +++++ htmlReport/ns-1/sources/source-2e.html | 320 ++++ htmlReport/ns-1/sources/source-2f.html | 332 ++++ htmlReport/ns-1/sources/source-3.html | 160 ++ htmlReport/ns-1/sources/source-30.html | 358 ++++ htmlReport/ns-1/sources/source-31.html | 573 ++++++ htmlReport/ns-1/sources/source-32.html | 445 +++++ htmlReport/ns-1/sources/source-33.html | 790 ++++++++ htmlReport/ns-1/sources/source-34.html | 273 +++ htmlReport/ns-1/sources/source-35.html | 317 ++++ htmlReport/ns-1/sources/source-36.html | 303 ++++ htmlReport/ns-1/sources/source-37.html | 411 +++++ htmlReport/ns-1/sources/source-38.html | 131 ++ htmlReport/ns-1/sources/source-39.html | 166 ++ htmlReport/ns-1/sources/source-3a.html | 135 ++ htmlReport/ns-1/sources/source-3b.html | 150 ++ htmlReport/ns-1/sources/source-3c.html | 137 ++ htmlReport/ns-1/sources/source-3d.html | 134 ++ htmlReport/ns-1/sources/source-4.html | 156 ++ htmlReport/ns-1/sources/source-5.html | 467 +++++ htmlReport/ns-1/sources/source-6.html | 396 ++++ htmlReport/ns-1/sources/source-7.html | 1507 ++++++++++++++++ htmlReport/ns-1/sources/source-8.html | 1176 ++++++++++++ htmlReport/ns-1/sources/source-9.html | 310 ++++ htmlReport/ns-1/sources/source-a.html | 273 +++ htmlReport/ns-1/sources/source-b.html | 269 +++ htmlReport/ns-1/sources/source-c.html | 592 ++++++ htmlReport/ns-1/sources/source-d.html | 365 ++++ htmlReport/ns-1/sources/source-e.html | 501 ++++++ htmlReport/ns-1/sources/source-f.html | 765 ++++++++ htmlReport/ns-2/index.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_BLOCK.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_BLOCK_DESC.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_CLASS.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_CLASS_DESC.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_LINE.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_LINE_DESC.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_METHOD.html | 332 ++++ .../ns-2/index_SORT_BY_METHOD_DESC.html | 332 ++++ htmlReport/ns-2/index_SORT_BY_NAME_DESC.html | 332 ++++ htmlReport/ns-2/sources/source-1.html | 259 +++ htmlReport/ns-2/sources/source-2.html | 278 +++ htmlReport/ns-2/sources/source-3.html | 275 +++ htmlReport/ns-2/sources/source-4.html | 298 +++ htmlReport/ns-2/sources/source-5.html | 370 ++++ htmlReport/ns-2/sources/source-6.html | 255 +++ htmlReport/ns-2/sources/source-7.html | 367 ++++ htmlReport/ns-2/sources/source-8.html | 199 ++ htmlReport/ns-2/sources/source-9.html | 110 ++ .../GlobalOrdinalsStringTermsAggregator.java | 10 +- .../terms/KeywordTermsAggregatorTests.java | 75 +- .../bucket/terms/TermsAggregatorTests.java | 213 ++- .../aggregations/AggregatorTestCase.java | 131 +- 110 files changed, 49988 insertions(+), 186 deletions(-) create mode 100644 htmlReport/css/coverage.css create mode 100644 htmlReport/css/idea.min.css create mode 100644 htmlReport/img/arrowDown.gif create mode 100644 htmlReport/img/arrowUp.gif create mode 100644 htmlReport/index.html create mode 100644 htmlReport/index_SORT_BY_BLOCK.html create mode 100644 htmlReport/index_SORT_BY_BLOCK_DESC.html create mode 100644 htmlReport/index_SORT_BY_CLASS.html create mode 100644 htmlReport/index_SORT_BY_CLASS_DESC.html create mode 100644 htmlReport/index_SORT_BY_LINE.html create mode 100644 htmlReport/index_SORT_BY_LINE_DESC.html create mode 100644 htmlReport/index_SORT_BY_METHOD.html create mode 100644 htmlReport/index_SORT_BY_METHOD_DESC.html create mode 100644 htmlReport/index_SORT_BY_NAME_DESC.html create mode 100644 htmlReport/js/highlight.min.js create mode 100644 htmlReport/js/highlightjs-line-numbers.min.js create mode 100644 htmlReport/ns-1/index.html create mode 100644 htmlReport/ns-1/index_SORT_BY_BLOCK.html create mode 100644 htmlReport/ns-1/index_SORT_BY_BLOCK_DESC.html create mode 100644 htmlReport/ns-1/index_SORT_BY_CLASS.html create mode 100644 htmlReport/ns-1/index_SORT_BY_CLASS_DESC.html create mode 100644 htmlReport/ns-1/index_SORT_BY_LINE.html create mode 100644 htmlReport/ns-1/index_SORT_BY_LINE_DESC.html create mode 100644 htmlReport/ns-1/index_SORT_BY_METHOD.html create mode 100644 htmlReport/ns-1/index_SORT_BY_METHOD_DESC.html create mode 100644 htmlReport/ns-1/index_SORT_BY_NAME_DESC.html create mode 100644 htmlReport/ns-1/sources/source-1.html create mode 100644 htmlReport/ns-1/sources/source-10.html create mode 100644 htmlReport/ns-1/sources/source-11.html create mode 100644 htmlReport/ns-1/sources/source-12.html create mode 100644 htmlReport/ns-1/sources/source-13.html create mode 100644 htmlReport/ns-1/sources/source-14.html create mode 100644 htmlReport/ns-1/sources/source-15.html create mode 100644 htmlReport/ns-1/sources/source-16.html create mode 100644 htmlReport/ns-1/sources/source-17.html create mode 100644 htmlReport/ns-1/sources/source-18.html create mode 100644 htmlReport/ns-1/sources/source-19.html create mode 100644 htmlReport/ns-1/sources/source-1a.html create mode 100644 htmlReport/ns-1/sources/source-1b.html create mode 100644 htmlReport/ns-1/sources/source-1c.html create mode 100644 htmlReport/ns-1/sources/source-1d.html create mode 100644 htmlReport/ns-1/sources/source-1e.html create mode 100644 htmlReport/ns-1/sources/source-1f.html create mode 100644 htmlReport/ns-1/sources/source-2.html create mode 100644 htmlReport/ns-1/sources/source-20.html create mode 100644 htmlReport/ns-1/sources/source-21.html create mode 100644 htmlReport/ns-1/sources/source-22.html create mode 100644 htmlReport/ns-1/sources/source-23.html create mode 100644 htmlReport/ns-1/sources/source-24.html create mode 100644 htmlReport/ns-1/sources/source-25.html create mode 100644 htmlReport/ns-1/sources/source-26.html create mode 100644 htmlReport/ns-1/sources/source-27.html create mode 100644 htmlReport/ns-1/sources/source-28.html create mode 100644 htmlReport/ns-1/sources/source-29.html create mode 100644 htmlReport/ns-1/sources/source-2a.html create mode 100644 htmlReport/ns-1/sources/source-2b.html create mode 100644 htmlReport/ns-1/sources/source-2c.html create mode 100644 htmlReport/ns-1/sources/source-2d.html create mode 100644 htmlReport/ns-1/sources/source-2e.html create mode 100644 htmlReport/ns-1/sources/source-2f.html create mode 100644 htmlReport/ns-1/sources/source-3.html create mode 100644 htmlReport/ns-1/sources/source-30.html create mode 100644 htmlReport/ns-1/sources/source-31.html create mode 100644 htmlReport/ns-1/sources/source-32.html create mode 100644 htmlReport/ns-1/sources/source-33.html create mode 100644 htmlReport/ns-1/sources/source-34.html create mode 100644 htmlReport/ns-1/sources/source-35.html create mode 100644 htmlReport/ns-1/sources/source-36.html create mode 100644 htmlReport/ns-1/sources/source-37.html create mode 100644 htmlReport/ns-1/sources/source-38.html create mode 100644 htmlReport/ns-1/sources/source-39.html create mode 100644 htmlReport/ns-1/sources/source-3a.html create mode 100644 htmlReport/ns-1/sources/source-3b.html create mode 100644 htmlReport/ns-1/sources/source-3c.html create mode 100644 htmlReport/ns-1/sources/source-3d.html create mode 100644 htmlReport/ns-1/sources/source-4.html create mode 100644 htmlReport/ns-1/sources/source-5.html create mode 100644 htmlReport/ns-1/sources/source-6.html create mode 100644 htmlReport/ns-1/sources/source-7.html create mode 100644 htmlReport/ns-1/sources/source-8.html create mode 100644 htmlReport/ns-1/sources/source-9.html create mode 100644 htmlReport/ns-1/sources/source-a.html create mode 100644 htmlReport/ns-1/sources/source-b.html create mode 100644 htmlReport/ns-1/sources/source-c.html create mode 100644 htmlReport/ns-1/sources/source-d.html create mode 100644 htmlReport/ns-1/sources/source-e.html create mode 100644 htmlReport/ns-1/sources/source-f.html create mode 100644 htmlReport/ns-2/index.html create mode 100644 htmlReport/ns-2/index_SORT_BY_BLOCK.html create mode 100644 htmlReport/ns-2/index_SORT_BY_BLOCK_DESC.html create mode 100644 htmlReport/ns-2/index_SORT_BY_CLASS.html create mode 100644 htmlReport/ns-2/index_SORT_BY_CLASS_DESC.html create mode 100644 htmlReport/ns-2/index_SORT_BY_LINE.html create mode 100644 htmlReport/ns-2/index_SORT_BY_LINE_DESC.html create mode 100644 htmlReport/ns-2/index_SORT_BY_METHOD.html create mode 100644 htmlReport/ns-2/index_SORT_BY_METHOD_DESC.html create mode 100644 htmlReport/ns-2/index_SORT_BY_NAME_DESC.html create mode 100644 htmlReport/ns-2/sources/source-1.html create mode 100644 htmlReport/ns-2/sources/source-2.html create mode 100644 htmlReport/ns-2/sources/source-3.html create mode 100644 htmlReport/ns-2/sources/source-4.html create mode 100644 htmlReport/ns-2/sources/source-5.html create mode 100644 htmlReport/ns-2/sources/source-6.html create mode 100644 htmlReport/ns-2/sources/source-7.html create mode 100644 htmlReport/ns-2/sources/source-8.html create mode 100644 htmlReport/ns-2/sources/source-9.html diff --git a/htmlReport/css/coverage.css b/htmlReport/css/coverage.css new file mode 100644 index 0000000000000..cef776517f5b7 --- /dev/null +++ b/htmlReport/css/coverage.css @@ -0,0 +1,154 @@ +/* + * Copyright 2000-2021 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +* { + margin: 0; + padding: 0; +} + +body { + background-color: #fff; + font-family: helvetica neue, tahoma, arial, sans-serif; + font-size: 82%; + color: #151515; +} + +h1 { + margin: 0.5em 0; + color: #010101; + font-weight: normal; + font-size: 18px; +} + +h2 { + margin: 0.5em 0; + color: #010101; + font-weight: normal; + font-size: 16px; +} + +a { + color: #1564C2; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +span.separator { + color: #9BA9BA; + padding-left: 5px; + padding-right: 5px; +} + +div.content { + width: 99%; +} + +table.coverageStats { + width: 100%; + border-collapse: collapse; +} + +table.overallStats { + width: 20%; +} + +table.coverageStats td, table.coverageStats th { + padding: 4px 2px; + border-bottom: 1px solid #ccc; +} + +table.coverageStats th { + background-color: #959BA4; + border: none; + font-weight: bold; + text-align: left; + color: #FFF; +} + +table.coverageStats th.coverageStat { + width: 15%; +} + +table.coverageStats th a { + color: #FFF; +} + +table.coverageStats th a:hover { + text-decoration: none; +} + +table.coverageStats th.sortedDesc a { + background: url(../img/arrowDown.gif) no-repeat 100% 2px; + padding-right: 20px; +} + +table.coverageStats th.sortedAsc a { + background: url(../img/arrowUp.gif) no-repeat 100% 2px; + padding-right: 20px; +} + +div.footer { + margin: 2em .5em; + font-size: 85%; + text-align: left; + line-height: 140%; +} + +code.sourceCode { + width: 100%; + border: 1px solid #ccc; + font: normal 12px 'Menlo', 'Bitstream Vera Sans Mono', 'Courier New', 'Courier', monospace; + white-space: pre; +} + +code.sourceCode b { + font-weight: normal; +} + +code.sourceCode span.number { + color: #151515; +} + +code.sourceCode .fc { + background-color: #cfc; +} + +code.sourceCode .pc { + background-color: #ffc; +} + +code.sourceCode .nc { + background-color: #fcc; +} + +.percent, .absValue { + font-size: 90%; +} + +.percent .green, .absValue .green { + color: #32cc32; +} + +.percent .red, .absValue .red { + color: #f00; +} + +.percent .totalDiff { + color: #3f3f3f; +} diff --git a/htmlReport/css/idea.min.css b/htmlReport/css/idea.min.css new file mode 100644 index 0000000000000..a8d5292bd501d --- /dev/null +++ b/htmlReport/css/idea.min.css @@ -0,0 +1,118 @@ +/* + * Copyright 2000-2021 JetBrains s.r.o. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +Intellij Idea-like styling (c) Vasily Polovnyov +*/ + +.hljs { + color: #000; + background: #fff; +} + +.hljs-subst, +.hljs-title { + font-weight: normal; + color: #000; +} + +.hljs-comment, +.hljs-quote { + color: #808080; + font-style: italic; +} + +.hljs-meta { + color: #808000; +} + +.hljs-tag { + background: #efefef; +} + +.hljs-section, +.hljs-name, +.hljs-literal, +.hljs-keyword, +.hljs-selector-tag, +.hljs-type, +.hljs-selector-id, +.hljs-selector-class { + font-weight: bold; + color: #000080; +} + +.hljs-attribute, +.hljs-number, +.hljs-regexp, +.hljs-link { + font-weight: bold; + color: #0000ff; +} + +.hljs-number, +.hljs-regexp, +.hljs-link { + font-weight: normal; +} + +.hljs-string { + color: #008000; + font-weight: bold; +} + +.hljs-symbol, +.hljs-bullet, +.hljs-formula { + color: #000; + background: #d0eded; + font-style: italic; +} + +.hljs-doctag { + text-decoration: underline; +} + +.hljs-variable, +.hljs-template-variable { + color: #660e7a; +} + +.hljs-addition { + background: #baeeba; +} + +.hljs-deletion { + background: #ffc8bd; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} + +.hljs-ln-numbers { + display: block; + float: left; + width: 3em; + border-right: 1px solid #ccc; + font-style: normal; + text-align: right; + background-color: #eee; +} diff --git a/htmlReport/img/arrowDown.gif b/htmlReport/img/arrowDown.gif new file mode 100644 index 0000000000000000000000000000000000000000..a4ac9b4b0f5eee9fc82deb7f03d0cc7f197b01c7 GIT binary patch literal 89 zcmZ?wbhEHbv%yJ&P?))?G g5?!@7agD+*@rGjs@joUks8}}Ha%HfNHz$KN0Orjd82|tP literal 0 HcmV?d00001 diff --git a/htmlReport/img/arrowUp.gif b/htmlReport/img/arrowUp.gif new file mode 100644 index 0000000000000000000000000000000000000000..d488db0089f15409b83a6f39718384cac89ea3c9 GIT binary patch literal 91 zcmZ?wbhEHbv%nBa6?))=2 j#jeJ<$W6!S$=vG=3s*2Wu3C5I!M+a(XH6zEFjxZs9OxeQ literal 0 HcmV?d00001 diff --git a/htmlReport/index.html b/htmlReport/index.html new file mode 100644 index 0000000000000..457031f9bfded --- /dev/null +++ b/htmlReport/index.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_BLOCK.html b/htmlReport/index_SORT_BY_BLOCK.html new file mode 100644 index 0000000000000..6a6af2a7add9f --- /dev/null +++ b/htmlReport/index_SORT_BY_BLOCK.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_BLOCK_DESC.html b/htmlReport/index_SORT_BY_BLOCK_DESC.html new file mode 100644 index 0000000000000..a7013fd25dc81 --- /dev/null +++ b/htmlReport/index_SORT_BY_BLOCK_DESC.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_CLASS.html b/htmlReport/index_SORT_BY_CLASS.html new file mode 100644 index 0000000000000..7a4cbdb616f94 --- /dev/null +++ b/htmlReport/index_SORT_BY_CLASS.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_CLASS_DESC.html b/htmlReport/index_SORT_BY_CLASS_DESC.html new file mode 100644 index 0000000000000..973de88c33e4f --- /dev/null +++ b/htmlReport/index_SORT_BY_CLASS_DESC.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_LINE.html b/htmlReport/index_SORT_BY_LINE.html new file mode 100644 index 0000000000000..affe8e65bea48 --- /dev/null +++ b/htmlReport/index_SORT_BY_LINE.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_LINE_DESC.html b/htmlReport/index_SORT_BY_LINE_DESC.html new file mode 100644 index 0000000000000..36dd1dfd313e0 --- /dev/null +++ b/htmlReport/index_SORT_BY_LINE_DESC.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_METHOD.html b/htmlReport/index_SORT_BY_METHOD.html new file mode 100644 index 0000000000000..b30db3d59313d --- /dev/null +++ b/htmlReport/index_SORT_BY_METHOD.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_METHOD_DESC.html b/htmlReport/index_SORT_BY_METHOD_DESC.html new file mode 100644 index 0000000000000..378562a681c59 --- /dev/null +++ b/htmlReport/index_SORT_BY_METHOD_DESC.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+
+ + + + + + + diff --git a/htmlReport/index_SORT_BY_NAME_DESC.html b/htmlReport/index_SORT_BY_NAME_DESC.html new file mode 100644 index 0000000000000..191acf8bfad77 --- /dev/null +++ b/htmlReport/index_SORT_BY_NAME_DESC.html @@ -0,0 +1,169 @@ + + + + + + + Coverage Report > Summary + + + + + + +
+ + +

Overall Coverage Summary

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
all classes + + 29.1% + + + (55/189) + + + + 13.8% + + + (175/1270) + + + + 15% + + + (629/4186) + +
+ +
+

Coverage Breakdown

+ + + + + + + + + + + + + + + + + + + + +
+Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+
+ + + + + + + diff --git a/htmlReport/js/highlight.min.js b/htmlReport/js/highlight.min.js new file mode 100644 index 0000000000000..e88731520e4cc --- /dev/null +++ b/htmlReport/js/highlight.min.js @@ -0,0 +1,1388 @@ +/* + Highlight.js 10.7.2 (00233d63) + License: BSD-3-Clause + Copyright (c) 2006-2021, Ivan Sagalaev + + BSD 3-Clause License + + Copyright (c) 2006-2021, Ivan Sagalaev. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +var hljs=function(){"use strict";function e(t){ +return t instanceof Map?t.clear=t.delete=t.set=()=>{ +throw Error("map is read-only")}:t instanceof Set&&(t.add=t.clear=t.delete=()=>{ +throw Error("set is read-only") +}),Object.freeze(t),Object.getOwnPropertyNames(t).forEach((n=>{var i=t[n] +;"object"!=typeof i||Object.isFrozen(i)||e(i)})),t}var t=e,n=e;t.default=n +;class i{constructor(e){ +void 0===e.data&&(e.data={}),this.data=e.data,this.isMatchIgnored=!1} +ignoreMatch(){this.isMatchIgnored=!0}}function s(e){ +return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'") +}function a(e,...t){const n=Object.create(null);for(const t in e)n[t]=e[t] +;return t.forEach((e=>{for(const t in e)n[t]=e[t]})),n}const r=e=>!!e.kind +;class l{constructor(e,t){ +this.buffer="",this.classPrefix=t.classPrefix,e.walk(this)}addText(e){ +this.buffer+=s(e)}openNode(e){if(!r(e))return;let t=e.kind +;e.sublanguage||(t=`${this.classPrefix}${t}`),this.span(t)}closeNode(e){ +r(e)&&(this.buffer+="")}value(){return this.buffer}span(e){ +this.buffer+=``}}class o{constructor(){this.rootNode={ +children:[]},this.stack=[this.rootNode]}get top(){ +return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){ +this.top.children.push(e)}openNode(e){const t={kind:e,children:[]} +;this.add(t),this.stack.push(t)}closeNode(){ +if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){ +for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)} +walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,t){ +return"string"==typeof t?e.addText(t):t.children&&(e.openNode(t), +t.children.forEach((t=>this._walk(e,t))),e.closeNode(t)),e}static _collapse(e){ +"string"!=typeof e&&e.children&&(e.children.every((e=>"string"==typeof e))?e.children=[e.children.join("")]:e.children.forEach((e=>{ +o._collapse(e)})))}}class c extends o{constructor(e){super(),this.options=e} +addKeyword(e,t){""!==e&&(this.openNode(t),this.addText(e),this.closeNode())} +addText(e){""!==e&&this.add(e)}addSublanguage(e,t){const n=e.root +;n.kind=t,n.sublanguage=!0,this.add(n)}toHTML(){ +return new l(this,this.options).value()}finalize(){return!0}}function g(e){ +return e?"string"==typeof e?e:e.source:null} +const u=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./,h="[a-zA-Z]\\w*",d="[a-zA-Z_]\\w*",f="\\b\\d+(\\.\\d+)?",p="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",m="\\b(0b[01]+)",b={ +begin:"\\\\[\\s\\S]",relevance:0},E={className:"string",begin:"'",end:"'", +illegal:"\\n",contains:[b]},x={className:"string",begin:'"',end:'"', +illegal:"\\n",contains:[b]},v={ +begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/ +},w=(e,t,n={})=>{const i=a({className:"comment",begin:e,end:t,contains:[]},n) +;return i.contains.push(v),i.contains.push({className:"doctag", +begin:"(?:TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):",relevance:0}),i +},y=w("//","$"),N=w("/\\*","\\*/"),R=w("#","$");var _=Object.freeze({ +__proto__:null,MATCH_NOTHING_RE:/\b\B/,IDENT_RE:h,UNDERSCORE_IDENT_RE:d, +NUMBER_RE:f,C_NUMBER_RE:p,BINARY_NUMBER_RE:m, +RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~", +SHEBANG:(e={})=>{const t=/^#![ ]*\// +;return e.binary&&(e.begin=((...e)=>e.map((e=>g(e))).join(""))(t,/.*\b/,e.binary,/\b.*/)), +a({className:"meta",begin:t,end:/$/,relevance:0,"on:begin":(e,t)=>{ +0!==e.index&&t.ignoreMatch()}},e)},BACKSLASH_ESCAPE:b,APOS_STRING_MODE:E, +QUOTE_STRING_MODE:x,PHRASAL_WORDS_MODE:v,COMMENT:w,C_LINE_COMMENT_MODE:y, +C_BLOCK_COMMENT_MODE:N,HASH_COMMENT_MODE:R,NUMBER_MODE:{className:"number", +begin:f,relevance:0},C_NUMBER_MODE:{className:"number",begin:p,relevance:0}, +BINARY_NUMBER_MODE:{className:"number",begin:m,relevance:0},CSS_NUMBER_MODE:{ +className:"number", +begin:f+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?", +relevance:0},REGEXP_MODE:{begin:/(?=\/[^/\n]*\/)/,contains:[{className:"regexp", +begin:/\//,end:/\/[gimuy]*/,illegal:/\n/,contains:[b,{begin:/\[/,end:/\]/, +relevance:0,contains:[b]}]}]},TITLE_MODE:{className:"title",begin:h,relevance:0 +},UNDERSCORE_TITLE_MODE:{className:"title",begin:d,relevance:0},METHOD_GUARD:{ +begin:"\\.\\s*[a-zA-Z_]\\w*",relevance:0},END_SAME_AS_BEGIN:e=>Object.assign(e,{ +"on:begin":(e,t)=>{t.data._beginMatch=e[1]},"on:end":(e,t)=>{ +t.data._beginMatch!==e[1]&&t.ignoreMatch()}})});function k(e,t){ +"."===e.input[e.index-1]&&t.ignoreMatch()}function M(e,t){ +t&&e.beginKeywords&&(e.begin="\\b("+e.beginKeywords.split(" ").join("|")+")(?!\\.)(?=\\b|\\s)", +e.__beforeBegin=k,e.keywords=e.keywords||e.beginKeywords,delete e.beginKeywords, +void 0===e.relevance&&(e.relevance=0))}function O(e,t){ +Array.isArray(e.illegal)&&(e.illegal=((...e)=>"("+e.map((e=>g(e))).join("|")+")")(...e.illegal)) +}function A(e,t){if(e.match){ +if(e.begin||e.end)throw Error("begin & end are not supported with match") +;e.begin=e.match,delete e.match}}function L(e,t){ +void 0===e.relevance&&(e.relevance=1)} +const I=["of","and","for","in","not","or","if","then","parent","list","value"] +;function j(e,t,n="keyword"){const i={} +;return"string"==typeof e?s(n,e.split(" ")):Array.isArray(e)?s(n,e):Object.keys(e).forEach((n=>{ +Object.assign(i,j(e[n],t,n))})),i;function s(e,n){ +t&&(n=n.map((e=>e.toLowerCase()))),n.forEach((t=>{const n=t.split("|") +;i[n[0]]=[e,B(n[0],n[1])]}))}}function B(e,t){ +return t?Number(t):(e=>I.includes(e.toLowerCase()))(e)?0:1} +function T(e,{plugins:t}){function n(t,n){ +return RegExp(g(t),"m"+(e.case_insensitive?"i":"")+(n?"g":""))}class i{ +constructor(){ +this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0} +addRule(e,t){ +t.position=this.position++,this.matchIndexes[this.matchAt]=t,this.regexes.push([t,e]), +this.matchAt+=(e=>RegExp(e.toString()+"|").exec("").length-1)(e)+1}compile(){ +0===this.regexes.length&&(this.exec=()=>null) +;const e=this.regexes.map((e=>e[1]));this.matcherRe=n(((e,t="|")=>{let n=0 +;return e.map((e=>{n+=1;const t=n;let i=g(e),s="";for(;i.length>0;){ +const e=u.exec(i);if(!e){s+=i;break} +s+=i.substring(0,e.index),i=i.substring(e.index+e[0].length), +"\\"===e[0][0]&&e[1]?s+="\\"+(Number(e[1])+t):(s+=e[0],"("===e[0]&&n++)}return s +})).map((e=>`(${e})`)).join(t)})(e),!0),this.lastIndex=0}exec(e){ +this.matcherRe.lastIndex=this.lastIndex;const t=this.matcherRe.exec(e) +;if(!t)return null +;const n=t.findIndex(((e,t)=>t>0&&void 0!==e)),i=this.matchIndexes[n] +;return t.splice(0,n),Object.assign(t,i)}}class s{constructor(){ +this.rules=[],this.multiRegexes=[], +this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){ +if(this.multiRegexes[e])return this.multiRegexes[e];const t=new i +;return this.rules.slice(e).forEach((([e,n])=>t.addRule(e,n))), +t.compile(),this.multiRegexes[e]=t,t}resumingScanAtSamePosition(){ +return 0!==this.regexIndex}considerAll(){this.regexIndex=0}addRule(e,t){ +this.rules.push([e,t]),"begin"===t.type&&this.count++}exec(e){ +const t=this.getMatcher(this.regexIndex);t.lastIndex=this.lastIndex +;let n=t.exec(e) +;if(this.resumingScanAtSamePosition())if(n&&n.index===this.lastIndex);else{ +const t=this.getMatcher(0);t.lastIndex=this.lastIndex+1,n=t.exec(e)} +return n&&(this.regexIndex+=n.position+1, +this.regexIndex===this.count&&this.considerAll()),n}} +if(e.compilerExtensions||(e.compilerExtensions=[]), +e.contains&&e.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.") +;return e.classNameAliases=a(e.classNameAliases||{}),function t(i,r){const l=i +;if(i.isCompiled)return l +;[A].forEach((e=>e(i,r))),e.compilerExtensions.forEach((e=>e(i,r))), +i.__beforeBegin=null,[M,O,L].forEach((e=>e(i,r))),i.isCompiled=!0;let o=null +;if("object"==typeof i.keywords&&(o=i.keywords.$pattern, +delete i.keywords.$pattern), +i.keywords&&(i.keywords=j(i.keywords,e.case_insensitive)), +i.lexemes&&o)throw Error("ERR: Prefer `keywords.$pattern` to `mode.lexemes`, BOTH are not allowed. (see mode reference) ") +;return o=o||i.lexemes||/\w+/, +l.keywordPatternRe=n(o,!0),r&&(i.begin||(i.begin=/\B|\b/), +l.beginRe=n(i.begin),i.endSameAsBegin&&(i.end=i.begin), +i.end||i.endsWithParent||(i.end=/\B|\b/), +i.end&&(l.endRe=n(i.end)),l.terminatorEnd=g(i.end)||"", +i.endsWithParent&&r.terminatorEnd&&(l.terminatorEnd+=(i.end?"|":"")+r.terminatorEnd)), +i.illegal&&(l.illegalRe=n(i.illegal)), +i.contains||(i.contains=[]),i.contains=[].concat(...i.contains.map((e=>(e=>(e.variants&&!e.cachedVariants&&(e.cachedVariants=e.variants.map((t=>a(e,{ +variants:null},t)))),e.cachedVariants?e.cachedVariants:S(e)?a(e,{ +starts:e.starts?a(e.starts):null +}):Object.isFrozen(e)?a(e):e))("self"===e?i:e)))),i.contains.forEach((e=>{t(e,l) +})),i.starts&&t(i.starts,r),l.matcher=(e=>{const t=new s +;return e.contains.forEach((e=>t.addRule(e.begin,{rule:e,type:"begin" +}))),e.terminatorEnd&&t.addRule(e.terminatorEnd,{type:"end" +}),e.illegal&&t.addRule(e.illegal,{type:"illegal"}),t})(l),l}(e)}function S(e){ +return!!e&&(e.endsWithParent||S(e.starts))}function P(e){const t={ +props:["language","code","autodetect"],data:()=>({detectedLanguage:"", +unknownLanguage:!1}),computed:{className(){ +return this.unknownLanguage?"":"hljs "+this.detectedLanguage},highlighted(){ +if(!this.autoDetect&&!e.getLanguage(this.language))return console.warn(`The language "${this.language}" you specified could not be found.`), +this.unknownLanguage=!0,s(this.code);let t={} +;return this.autoDetect?(t=e.highlightAuto(this.code), +this.detectedLanguage=t.language):(t=e.highlight(this.language,this.code,this.ignoreIllegals), +this.detectedLanguage=this.language),t.value},autoDetect(){ +return!(this.language&&(e=this.autodetect,!e&&""!==e));var e}, +ignoreIllegals:()=>!0},render(e){return e("pre",{},[e("code",{ +class:this.className,domProps:{innerHTML:this.highlighted}})])}};return{ +Component:t,VuePlugin:{install(e){e.component("highlightjs",t)}}}}const D={ +"after:highlightElement":({el:e,result:t,text:n})=>{const i=H(e) +;if(!i.length)return;const a=document.createElement("div") +;a.innerHTML=t.value,t.value=((e,t,n)=>{let i=0,a="";const r=[];function l(){ +return e.length&&t.length?e[0].offset!==t[0].offset?e[0].offset"}function c(e){ +a+=""}function g(e){("start"===e.event?o:c)(e.node)} +for(;e.length||t.length;){let t=l() +;if(a+=s(n.substring(i,t[0].offset)),i=t[0].offset,t===e){r.reverse().forEach(c) +;do{g(t.splice(0,1)[0]),t=l()}while(t===e&&t.length&&t[0].offset===i) +;r.reverse().forEach(o) +}else"start"===t[0].event?r.push(t[0].node):r.pop(),g(t.splice(0,1)[0])} +return a+s(n.substr(i))})(i,H(a),n)}};function C(e){ +return e.nodeName.toLowerCase()}function H(e){const t=[];return function e(n,i){ +for(let s=n.firstChild;s;s=s.nextSibling)3===s.nodeType?i+=s.nodeValue.length:1===s.nodeType&&(t.push({ +event:"start",offset:i,node:s}),i=e(s,i),C(s).match(/br|hr|img|input/)||t.push({ +event:"stop",offset:i,node:s}));return i}(e,0),t}const $={},U=e=>{ +console.error(e)},z=(e,...t)=>{console.log("WARN: "+e,...t)},K=(e,t)=>{ +$[`${e}/${t}`]||(console.log(`Deprecated as of ${e}. ${t}`),$[`${e}/${t}`]=!0) +},G=s,V=a,W=Symbol("nomatch");return(e=>{ +const n=Object.create(null),s=Object.create(null),a=[];let r=!0 +;const l=/(^(<[^>]+>|\t|)+|\n)/gm,o="Could not find the language '{}', did you forget to load/include a language module?",g={ +disableAutodetect:!0,name:"Plain text",contains:[]};let u={ +noHighlightRe:/^(no-?highlight)$/i, +languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-", +tabReplace:null,useBR:!1,languages:null,__emitter:c};function h(e){ +return u.noHighlightRe.test(e)}function d(e,t,n,i){let s="",a="" +;"object"==typeof t?(s=e, +n=t.ignoreIllegals,a=t.language,i=void 0):(K("10.7.0","highlight(lang, code, ...args) has been deprecated."), +K("10.7.0","Please use highlight(code, options) instead.\nhttps://github.com/highlightjs/highlight.js/issues/2277"), +a=e,s=t);const r={code:s,language:a};M("before:highlight",r) +;const l=r.result?r.result:f(r.language,r.code,n,i) +;return l.code=r.code,M("after:highlight",l),l}function f(e,t,s,l){ +function c(e,t){const n=v.case_insensitive?t[0].toLowerCase():t[0] +;return Object.prototype.hasOwnProperty.call(e.keywords,n)&&e.keywords[n]} +function g(){null!=R.subLanguage?(()=>{if(""===M)return;let e=null +;if("string"==typeof R.subLanguage){ +if(!n[R.subLanguage])return void k.addText(M) +;e=f(R.subLanguage,M,!0,_[R.subLanguage]),_[R.subLanguage]=e.top +}else e=p(M,R.subLanguage.length?R.subLanguage:null) +;R.relevance>0&&(O+=e.relevance),k.addSublanguage(e.emitter,e.language) +})():(()=>{if(!R.keywords)return void k.addText(M);let e=0 +;R.keywordPatternRe.lastIndex=0;let t=R.keywordPatternRe.exec(M),n="";for(;t;){ +n+=M.substring(e,t.index);const i=c(R,t);if(i){const[e,s]=i +;if(k.addText(n),n="",O+=s,e.startsWith("_"))n+=t[0];else{ +const n=v.classNameAliases[e]||e;k.addKeyword(t[0],n)}}else n+=t[0] +;e=R.keywordPatternRe.lastIndex,t=R.keywordPatternRe.exec(M)} +n+=M.substr(e),k.addText(n)})(),M=""}function h(e){ +return e.className&&k.openNode(v.classNameAliases[e.className]||e.className), +R=Object.create(e,{parent:{value:R}}),R}function d(e,t,n){let s=((e,t)=>{ +const n=e&&e.exec(t);return n&&0===n.index})(e.endRe,n);if(s){if(e["on:end"]){ +const n=new i(e);e["on:end"](t,n),n.isMatchIgnored&&(s=!1)}if(s){ +for(;e.endsParent&&e.parent;)e=e.parent;return e}} +if(e.endsWithParent)return d(e.parent,t,n)}function m(e){ +return 0===R.matcher.regexIndex?(M+=e[0],1):(I=!0,0)}function b(e){ +const n=e[0],i=t.substr(e.index),s=d(R,e,i);if(!s)return W;const a=R +;a.skip?M+=n:(a.returnEnd||a.excludeEnd||(M+=n),g(),a.excludeEnd&&(M=n));do{ +R.className&&k.closeNode(),R.skip||R.subLanguage||(O+=R.relevance),R=R.parent +}while(R!==s.parent) +;return s.starts&&(s.endSameAsBegin&&(s.starts.endRe=s.endRe), +h(s.starts)),a.returnEnd?0:n.length}let E={};function x(n,a){const l=a&&a[0] +;if(M+=n,null==l)return g(),0 +;if("begin"===E.type&&"end"===a.type&&E.index===a.index&&""===l){ +if(M+=t.slice(a.index,a.index+1),!r){const t=Error("0 width match regex") +;throw t.languageName=e,t.badRule=E.rule,t}return 1} +if(E=a,"begin"===a.type)return function(e){ +const t=e[0],n=e.rule,s=new i(n),a=[n.__beforeBegin,n["on:begin"]] +;for(const n of a)if(n&&(n(e,s),s.isMatchIgnored))return m(t) +;return n&&n.endSameAsBegin&&(n.endRe=RegExp(t.replace(/[-/\\^$*+?.()|[\]{}]/g,"\\$&"),"m")), +n.skip?M+=t:(n.excludeBegin&&(M+=t), +g(),n.returnBegin||n.excludeBegin||(M=t)),h(n),n.returnBegin?0:t.length}(a) +;if("illegal"===a.type&&!s){ +const e=Error('Illegal lexeme "'+l+'" for mode "'+(R.className||"")+'"') +;throw e.mode=R,e}if("end"===a.type){const e=b(a);if(e!==W)return e} +if("illegal"===a.type&&""===l)return 1 +;if(L>1e5&&L>3*a.index)throw Error("potential infinite loop, way more iterations than matches") +;return M+=l,l.length}const v=N(e) +;if(!v)throw U(o.replace("{}",e)),Error('Unknown language: "'+e+'"') +;const w=T(v,{plugins:a});let y="",R=l||w;const _={},k=new u.__emitter(u);(()=>{ +const e=[];for(let t=R;t!==v;t=t.parent)t.className&&e.unshift(t.className) +;e.forEach((e=>k.openNode(e)))})();let M="",O=0,A=0,L=0,I=!1;try{ +for(R.matcher.considerAll();;){ +L++,I?I=!1:R.matcher.considerAll(),R.matcher.lastIndex=A +;const e=R.matcher.exec(t);if(!e)break;const n=x(t.substring(A,e.index),e) +;A=e.index+n}return x(t.substr(A)),k.closeAllNodes(),k.finalize(),y=k.toHTML(),{ +relevance:Math.floor(O),value:y,language:e,illegal:!1,emitter:k,top:R}}catch(n){ +if(n.message&&n.message.includes("Illegal"))return{illegal:!0,illegalBy:{ +msg:n.message,context:t.slice(A-100,A+100),mode:n.mode},sofar:y,relevance:0, +value:G(t),emitter:k};if(r)return{illegal:!1,relevance:0,value:G(t),emitter:k, +language:e,top:R,errorRaised:n};throw n}}function p(e,t){ +t=t||u.languages||Object.keys(n);const i=(e=>{const t={relevance:0, +emitter:new u.__emitter(u),value:G(e),illegal:!1,top:g} +;return t.emitter.addText(e),t})(e),s=t.filter(N).filter(k).map((t=>f(t,e,!1))) +;s.unshift(i);const a=s.sort(((e,t)=>{ +if(e.relevance!==t.relevance)return t.relevance-e.relevance +;if(e.language&&t.language){if(N(e.language).supersetOf===t.language)return 1 +;if(N(t.language).supersetOf===e.language)return-1}return 0})),[r,l]=a,o=r +;return o.second_best=l,o}const m={"before:highlightElement":({el:e})=>{ +u.useBR&&(e.innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n")) +},"after:highlightElement":({result:e})=>{ +u.useBR&&(e.value=e.value.replace(/\n/g,"
"))}},b=/^(<[^>]+>|\t)+/gm,E={ +"after:highlightElement":({result:e})=>{ +u.tabReplace&&(e.value=e.value.replace(b,(e=>e.replace(/\t/g,u.tabReplace))))}} +;function x(e){let t=null;const n=(e=>{let t=e.className+" " +;t+=e.parentNode?e.parentNode.className:"";const n=u.languageDetectRe.exec(t) +;if(n){const t=N(n[1]) +;return t||(z(o.replace("{}",n[1])),z("Falling back to no-highlight mode for this block.",e)), +t?n[1]:"no-highlight"}return t.split(/\s+/).find((e=>h(e)||N(e)))})(e) +;if(h(n))return;M("before:highlightElement",{el:e,language:n}),t=e +;const i=t.textContent,a=n?d(i,{language:n,ignoreIllegals:!0}):p(i) +;M("after:highlightElement",{el:e,result:a,text:i +}),e.innerHTML=a.value,((e,t,n)=>{const i=t?s[t]:n +;e.classList.add("hljs"),i&&e.classList.add(i)})(e,n,a.language),e.result={ +language:a.language,re:a.relevance,relavance:a.relevance +},a.second_best&&(e.second_best={language:a.second_best.language, +re:a.second_best.relevance,relavance:a.second_best.relevance})}const v=()=>{ +v.called||(v.called=!0, +K("10.6.0","initHighlighting() is deprecated. Use highlightAll() instead."), +document.querySelectorAll("pre code").forEach(x))};let w=!1;function y(){ +"loading"!==document.readyState?document.querySelectorAll("pre code").forEach(x):w=!0 +}function N(e){return e=(e||"").toLowerCase(),n[e]||n[s[e]]} +function R(e,{languageName:t}){"string"==typeof e&&(e=[e]),e.forEach((e=>{ +s[e.toLowerCase()]=t}))}function k(e){const t=N(e) +;return t&&!t.disableAutodetect}function M(e,t){const n=e;a.forEach((e=>{ +e[n]&&e[n](t)}))} +"undefined"!=typeof window&&window.addEventListener&&window.addEventListener("DOMContentLoaded",(()=>{ +w&&y()}),!1),Object.assign(e,{highlight:d,highlightAuto:p,highlightAll:y, +fixMarkup:e=>{ +return K("10.2.0","fixMarkup will be removed entirely in v11.0"),K("10.2.0","Please see https://github.com/highlightjs/highlight.js/issues/2534"), +t=e, +u.tabReplace||u.useBR?t.replace(l,(e=>"\n"===e?u.useBR?"
":e:u.tabReplace?e.replace(/\t/g,u.tabReplace):e)):t +;var t},highlightElement:x, +highlightBlock:e=>(K("10.7.0","highlightBlock will be removed entirely in v12.0"), +K("10.7.0","Please use highlightElement now."),x(e)),configure:e=>{ +e.useBR&&(K("10.3.0","'useBR' will be removed entirely in v11.0"), +K("10.3.0","Please see https://github.com/highlightjs/highlight.js/issues/2559")), +u=V(u,e)},initHighlighting:v,initHighlightingOnLoad:()=>{ +K("10.6.0","initHighlightingOnLoad() is deprecated. Use highlightAll() instead."), +w=!0},registerLanguage:(t,i)=>{let s=null;try{s=i(e)}catch(e){ +if(U("Language definition for '{}' could not be registered.".replace("{}",t)), +!r)throw e;U(e),s=g} +s.name||(s.name=t),n[t]=s,s.rawDefinition=i.bind(null,e),s.aliases&&R(s.aliases,{ +languageName:t})},unregisterLanguage:e=>{delete n[e] +;for(const t of Object.keys(s))s[t]===e&&delete s[t]}, +listLanguages:()=>Object.keys(n),getLanguage:N,registerAliases:R, +requireLanguage:e=>{ +K("10.4.0","requireLanguage will be removed entirely in v11."), +K("10.4.0","Please see https://github.com/highlightjs/highlight.js/pull/2844") +;const t=N(e);if(t)return t +;throw Error("The '{}' language is required, but not loaded.".replace("{}",e))}, +autoDetection:k,inherit:V,addPlugin:e=>{(e=>{ +e["before:highlightBlock"]&&!e["before:highlightElement"]&&(e["before:highlightElement"]=t=>{ +e["before:highlightBlock"](Object.assign({block:t.el},t)) +}),e["after:highlightBlock"]&&!e["after:highlightElement"]&&(e["after:highlightElement"]=t=>{ +e["after:highlightBlock"](Object.assign({block:t.el},t))})})(e),a.push(e)}, +vuePlugin:P(e).VuePlugin}),e.debugMode=()=>{r=!1},e.safeMode=()=>{r=!0 +},e.versionString="10.7.2";for(const e in _)"object"==typeof _[e]&&t(_[e]) +;return Object.assign(e,_),e.addPlugin(m),e.addPlugin(D),e.addPlugin(E),e})({}) +}();"object"==typeof exports&&"undefined"!=typeof module&&(module.exports=hljs); +hljs.registerLanguage("apache",(()=>{"use strict";return e=>{const n={ +className:"number",begin:/\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d{1,5})?/} +;return{name:"Apache config",aliases:["apacheconf"],case_insensitive:!0, +contains:[e.HASH_COMMENT_MODE,{className:"section",begin:/<\/?/,end:/>/, +contains:[n,{className:"number",begin:/:\d{1,5}/ +},e.inherit(e.QUOTE_STRING_MODE,{relevance:0})]},{className:"attribute", +begin:/\w+/,relevance:0,keywords:{ +nomarkup:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername" +},starts:{end:/$/,relevance:0,keywords:{literal:"on off all deny allow"}, +contains:[{className:"meta",begin:/\s\[/,end:/\]$/},{className:"variable", +begin:/[\$%]\{/,end:/\}/,contains:["self",{className:"number",begin:/[$%]\d+/}] +},n,{className:"number",begin:/\d+/},e.QUOTE_STRING_MODE]}}],illegal:/\S/}} +})()); +hljs.registerLanguage("bash",(()=>{"use strict";function e(...e){ +return e.map((e=>{return(s=e)?"string"==typeof s?s:s.source:null;var s +})).join("")}return s=>{const n={},t={begin:/\$\{/,end:/\}/,contains:["self",{ +begin:/:-/,contains:[n]}]};Object.assign(n,{className:"variable",variants:[{ +begin:e(/\$[\w\d#@][\w\d_]*/,"(?![\\w\\d])(?![$])")},t]});const a={ +className:"subst",begin:/\$\(/,end:/\)/,contains:[s.BACKSLASH_ESCAPE]},i={ +begin:/<<-?\s*(?=\w+)/,starts:{contains:[s.END_SAME_AS_BEGIN({begin:/(\w+)/, +end:/(\w+)/,className:"string"})]}},c={className:"string",begin:/"/,end:/"/, +contains:[s.BACKSLASH_ESCAPE,n,a]};a.contains.push(c);const o={begin:/\$\(\(/, +end:/\)\)/,contains:[{begin:/\d+#[0-9a-f]+/,className:"number"},s.NUMBER_MODE,n] +},r=s.SHEBANG({binary:"(fish|bash|zsh|sh|csh|ksh|tcsh|dash|scsh)",relevance:10 +}),l={className:"function",begin:/\w[\w\d_]*\s*\(\s*\)\s*\{/,returnBegin:!0, +contains:[s.inherit(s.TITLE_MODE,{begin:/\w[\w\d_]*/})],relevance:0};return{ +name:"Bash",aliases:["sh","zsh"],keywords:{$pattern:/\b[a-z._-]+\b/, +keyword:"if then else elif fi for while in do done case esac function", +literal:"true false", +built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp" +},contains:[r,s.SHEBANG(),l,o,s.HASH_COMMENT_MODE,i,c,{className:"",begin:/\\"/ +},{className:"string",begin:/'/,end:/'/},n]}}})()); +hljs.registerLanguage("c",(()=>{"use strict";function e(e){ +return((...e)=>e.map((e=>(e=>e?"string"==typeof e?e:e.source:null)(e))).join(""))("(",e,")?") +}return t=>{const n=t.COMMENT("//","$",{contains:[{begin:/\\\n/}] +}),r="[a-zA-Z_]\\w*::",a="(decltype\\(auto\\)|"+e(r)+"[a-zA-Z_]\\w*"+e("<[^<>]+>")+")",i={ +className:"keyword",begin:"\\b[a-z\\d_]*_t\\b"},s={className:"string", +variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n", +contains:[t.BACKSLASH_ESCAPE]},{ +begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)", +end:"'",illegal:"."},t.END_SAME_AS_BEGIN({ +begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/})]},o={ +className:"number",variants:[{begin:"\\b(0b[01']+)"},{ +begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)" +},{ +begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)" +}],relevance:0},c={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{ +"meta-keyword":"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include" +},contains:[{begin:/\\\n/,relevance:0},t.inherit(s,{className:"meta-string"}),{ +className:"meta-string",begin:/<.*?>/},n,t.C_BLOCK_COMMENT_MODE]},l={ +className:"title",begin:e(r)+t.IDENT_RE,relevance:0 +},d=e(r)+t.IDENT_RE+"\\s*\\(",u={ +keyword:"int float while private char char8_t char16_t char32_t catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid wchar_t short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignas alignof constexpr consteval constinit decltype concept co_await co_return co_yield requires noexcept static_assert thread_local restrict final override atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and and_eq bitand bitor compl not not_eq or or_eq xor xor_eq", +built_in:"std string wstring cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set pair bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap priority_queue make_pair array shared_ptr abort terminate abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf future isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr _Bool complex _Complex imaginary _Imaginary", +literal:"true false nullptr NULL"},m=[c,i,n,t.C_BLOCK_COMMENT_MODE,o,s],p={ +variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{ +beginKeywords:"new throw return else",end:/;/}],keywords:u,contains:m.concat([{ +begin:/\(/,end:/\)/,keywords:u,contains:m.concat(["self"]),relevance:0}]), +relevance:0},_={className:"function",begin:"("+a+"[\\*&\\s]+)+"+d, +returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:u,illegal:/[^\w\s\*&:<>.]/, +contains:[{begin:"decltype\\(auto\\)",keywords:u,relevance:0},{begin:d, +returnBegin:!0,contains:[l],relevance:0},{className:"params",begin:/\(/, +end:/\)/,keywords:u,relevance:0,contains:[n,t.C_BLOCK_COMMENT_MODE,s,o,i,{ +begin:/\(/,end:/\)/,keywords:u,relevance:0, +contains:["self",n,t.C_BLOCK_COMMENT_MODE,s,o,i]}] +},i,n,t.C_BLOCK_COMMENT_MODE,c]};return{name:"C",aliases:["h"],keywords:u, +disableAutodetect:!0,illegal:"",keywords:u,contains:["self",i]},{begin:t.IDENT_RE+"::",keywords:u},{ +className:"class",beginKeywords:"enum class struct union",end:/[{;:<>=]/, +contains:[{beginKeywords:"final class struct"},t.TITLE_MODE]}]),exports:{ +preprocessor:c,strings:s,keywords:u}}}})()); +hljs.registerLanguage("coffeescript",(()=>{"use strict" +;const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer","BigInt64Array","BigUint64Array","BigInt"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]) +;return r=>{const t={ +keyword:e.concat(["then","unless","until","loop","by","when","and","or","is","isnt","not"]).filter((i=["var","const","let","function","static"], +e=>!i.includes(e))),literal:n.concat(["yes","no","on","off"]), +built_in:a.concat(["npm","print"])};var i;const s="[A-Za-z$_][0-9A-Za-z$_]*",o={ +className:"subst",begin:/#\{/,end:/\}/,keywords:t +},c=[r.BINARY_NUMBER_MODE,r.inherit(r.C_NUMBER_MODE,{starts:{end:"(\\s*/)?", +relevance:0}}),{className:"string",variants:[{begin:/'''/,end:/'''/, +contains:[r.BACKSLASH_ESCAPE]},{begin:/'/,end:/'/,contains:[r.BACKSLASH_ESCAPE] +},{begin:/"""/,end:/"""/,contains:[r.BACKSLASH_ESCAPE,o]},{begin:/"/,end:/"/, +contains:[r.BACKSLASH_ESCAPE,o]}]},{className:"regexp",variants:[{begin:"///", +end:"///",contains:[o,r.HASH_COMMENT_MODE]},{begin:"//[gim]{0,3}(?=\\W)", +relevance:0},{begin:/\/(?![ *]).*?(?![\\]).\/[gim]{0,3}(?=\W)/}]},{begin:"@"+s +},{subLanguage:"javascript",excludeBegin:!0,excludeEnd:!0,variants:[{ +begin:"```",end:"```"},{begin:"`",end:"`"}]}];o.contains=c +;const l=r.inherit(r.TITLE_MODE,{begin:s}),d="(\\(.*\\)\\s*)?\\B[-=]>",g={ +className:"params",begin:"\\([^\\(]",returnBegin:!0,contains:[{begin:/\(/, +end:/\)/,keywords:t,contains:["self"].concat(c)}]};return{name:"CoffeeScript", +aliases:["coffee","cson","iced"],keywords:t,illegal:/\/\*/, +contains:c.concat([r.COMMENT("###","###"),r.HASH_COMMENT_MODE,{ +className:"function",begin:"^\\s*"+s+"\\s*=\\s*"+d,end:"[-=]>",returnBegin:!0, +contains:[l,g]},{begin:/[:\(,=]\s*/,relevance:0,contains:[{className:"function", +begin:d,end:"[-=]>",returnBegin:!0,contains:[g]}]},{className:"class", +beginKeywords:"class",end:"$",illegal:/[:="\[\]]/,contains:[{ +beginKeywords:"extends",endsWithParent:!0,illegal:/[:="\[\]]/,contains:[l]},l] +},{begin:s+":",end:":",returnBegin:!0,returnEnd:!0,relevance:0}])}}})()); +hljs.registerLanguage("cpp",(()=>{"use strict";function e(e){ +return t("(",e,")?")}function t(...e){return e.map((e=>{ +return(t=e)?"string"==typeof t?t:t.source:null;var t})).join("")}return n=>{ +const r=n.COMMENT("//","$",{contains:[{begin:/\\\n/}] +}),a="[a-zA-Z_]\\w*::",i="(decltype\\(auto\\)|"+e(a)+"[a-zA-Z_]\\w*"+e("<[^<>]+>")+")",s={ +className:"keyword",begin:"\\b[a-z\\d_]*_t\\b"},c={className:"string", +variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n", +contains:[n.BACKSLASH_ESCAPE]},{ +begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)", +end:"'",illegal:"."},n.END_SAME_AS_BEGIN({ +begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/})]},o={ +className:"number",variants:[{begin:"\\b(0b[01']+)"},{ +begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)((ll|LL|l|L)(u|U)?|(u|U)(ll|LL|l|L)?|f|F|b|B)" +},{ +begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)" +}],relevance:0},l={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{ +"meta-keyword":"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include" +},contains:[{begin:/\\\n/,relevance:0},n.inherit(c,{className:"meta-string"}),{ +className:"meta-string",begin:/<.*?>/},r,n.C_BLOCK_COMMENT_MODE]},d={ +className:"title",begin:e(a)+n.IDENT_RE,relevance:0 +},u=e(a)+n.IDENT_RE+"\\s*\\(",m={ +keyword:"int float while private char char8_t char16_t char32_t catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid wchar_t short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignas alignof constexpr consteval constinit decltype concept co_await co_return co_yield requires noexcept static_assert thread_local restrict final override atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and and_eq bitand bitor compl not not_eq or or_eq xor xor_eq", +built_in:"_Bool _Complex _Imaginary", +_relevance_hints:["asin","atan2","atan","calloc","ceil","cosh","cos","exit","exp","fabs","floor","fmod","fprintf","fputs","free","frexp","auto_ptr","deque","list","queue","stack","vector","map","set","pair","bitset","multiset","multimap","unordered_set","fscanf","future","isalnum","isalpha","iscntrl","isdigit","isgraph","islower","isprint","ispunct","isspace","isupper","isxdigit","tolower","toupper","labs","ldexp","log10","log","malloc","realloc","memchr","memcmp","memcpy","memset","modf","pow","printf","putchar","puts","scanf","sinh","sin","snprintf","sprintf","sqrt","sscanf","strcat","strchr","strcmp","strcpy","strcspn","strlen","strncat","strncmp","strncpy","strpbrk","strrchr","strspn","strstr","tanh","tan","unordered_map","unordered_multiset","unordered_multimap","priority_queue","make_pair","array","shared_ptr","abort","terminate","abs","acos","vfprintf","vprintf","vsprintf","endl","initializer_list","unique_ptr","complex","imaginary","std","string","wstring","cin","cout","cerr","clog","stdin","stdout","stderr","stringstream","istringstream","ostringstream"], +literal:"true false nullptr NULL"},p={className:"function.dispatch",relevance:0, +keywords:m, +begin:t(/\b/,/(?!decltype)/,/(?!if)/,/(?!for)/,/(?!while)/,n.IDENT_RE,(_=/\s*\(/, +t("(?=",_,")")))};var _;const g=[p,l,s,r,n.C_BLOCK_COMMENT_MODE,o,c],b={ +variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{ +beginKeywords:"new throw return else",end:/;/}],keywords:m,contains:g.concat([{ +begin:/\(/,end:/\)/,keywords:m,contains:g.concat(["self"]),relevance:0}]), +relevance:0},f={className:"function",begin:"("+i+"[\\*&\\s]+)+"+u, +returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:m,illegal:/[^\w\s\*&:<>.]/, +contains:[{begin:"decltype\\(auto\\)",keywords:m,relevance:0},{begin:u, +returnBegin:!0,contains:[d],relevance:0},{begin:/::/,relevance:0},{begin:/:/, +endsWithParent:!0,contains:[c,o]},{className:"params",begin:/\(/,end:/\)/, +keywords:m,relevance:0,contains:[r,n.C_BLOCK_COMMENT_MODE,c,o,s,{begin:/\(/, +end:/\)/,keywords:m,relevance:0,contains:["self",r,n.C_BLOCK_COMMENT_MODE,c,o,s] +}]},s,r,n.C_BLOCK_COMMENT_MODE,l]};return{name:"C++", +aliases:["cc","c++","h++","hpp","hh","hxx","cxx"],keywords:m,illegal:"",keywords:m,contains:["self",s]},{begin:n.IDENT_RE+"::",keywords:m},{ +className:"class",beginKeywords:"enum class struct union",end:/[{;:<>=]/, +contains:[{beginKeywords:"final class struct"},n.TITLE_MODE]}]),exports:{ +preprocessor:l,strings:c,keywords:m}}}})()); +hljs.registerLanguage("csharp",(()=>{"use strict";return e=>{const n={ +keyword:["abstract","as","base","break","case","class","const","continue","do","else","event","explicit","extern","finally","fixed","for","foreach","goto","if","implicit","in","interface","internal","is","lock","namespace","new","operator","out","override","params","private","protected","public","readonly","record","ref","return","sealed","sizeof","stackalloc","static","struct","switch","this","throw","try","typeof","unchecked","unsafe","using","virtual","void","volatile","while"].concat(["add","alias","and","ascending","async","await","by","descending","equals","from","get","global","group","init","into","join","let","nameof","not","notnull","on","or","orderby","partial","remove","select","set","unmanaged","value|0","var","when","where","with","yield"]), +built_in:["bool","byte","char","decimal","delegate","double","dynamic","enum","float","int","long","nint","nuint","object","sbyte","short","string","ulong","uint","ushort"], +literal:["default","false","null","true"]},a=e.inherit(e.TITLE_MODE,{ +begin:"[a-zA-Z](\\.?\\w)*"}),i={className:"number",variants:[{ +begin:"\\b(0b[01']+)"},{ +begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{ +begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)" +}],relevance:0},s={className:"string",begin:'@"',end:'"',contains:[{begin:'""'}] +},t=e.inherit(s,{illegal:/\n/}),r={className:"subst",begin:/\{/,end:/\}/, +keywords:n},l=e.inherit(r,{illegal:/\n/}),c={className:"string",begin:/\$"/, +end:'"',illegal:/\n/,contains:[{begin:/\{\{/},{begin:/\}\}/ +},e.BACKSLASH_ESCAPE,l]},o={className:"string",begin:/\$@"/,end:'"',contains:[{ +begin:/\{\{/},{begin:/\}\}/},{begin:'""'},r]},d=e.inherit(o,{illegal:/\n/, +contains:[{begin:/\{\{/},{begin:/\}\}/},{begin:'""'},l]}) +;r.contains=[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,i,e.C_BLOCK_COMMENT_MODE], +l.contains=[d,c,t,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,i,e.inherit(e.C_BLOCK_COMMENT_MODE,{ +illegal:/\n/})];const g={variants:[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE] +},E={begin:"<",end:">",contains:[{beginKeywords:"in out"},a] +},_=e.IDENT_RE+"(<"+e.IDENT_RE+"(\\s*,\\s*"+e.IDENT_RE+")*>)?(\\[\\])?",b={ +begin:"@"+e.IDENT_RE,relevance:0};return{name:"C#",aliases:["cs","c#"], +keywords:n,illegal:/::/,contains:[e.COMMENT("///","$",{returnBegin:!0, +contains:[{className:"doctag",variants:[{begin:"///",relevance:0},{ +begin:"\x3c!--|--\x3e"},{begin:""}]}] +}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"meta",begin:"#", +end:"$",keywords:{ +"meta-keyword":"if else elif endif define undef warning error line region endregion pragma checksum" +}},g,i,{beginKeywords:"class interface",relevance:0,end:/[{;=]/, +illegal:/[^\s:,]/,contains:[{beginKeywords:"where class" +},a,E,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"namespace", +relevance:0,end:/[{;=]/,illegal:/[^\s:]/, +contains:[a,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{ +beginKeywords:"record",relevance:0,end:/[{;=]/,illegal:/[^\s:]/, +contains:[a,E,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"meta", +begin:"^\\s*\\[",excludeBegin:!0,end:"\\]",excludeEnd:!0,contains:[{ +className:"meta-string",begin:/"/,end:/"/}]},{ +beginKeywords:"new return throw await else",relevance:0},{className:"function", +begin:"("+_+"\\s+)+"+e.IDENT_RE+"\\s*(<.+>\\s*)?\\(",returnBegin:!0, +end:/\s*[{;=]/,excludeEnd:!0,keywords:n,contains:[{ +beginKeywords:"public private protected static internal protected abstract async extern override unsafe virtual new sealed partial", +relevance:0},{begin:e.IDENT_RE+"\\s*(<.+>\\s*)?\\(",returnBegin:!0, +contains:[e.TITLE_MODE,E],relevance:0},{className:"params",begin:/\(/,end:/\)/, +excludeBegin:!0,excludeEnd:!0,keywords:n,relevance:0, +contains:[g,i,e.C_BLOCK_COMMENT_MODE] +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},b]}}})()); +hljs.registerLanguage("css",(()=>{"use strict" +;const e=["a","abbr","address","article","aside","audio","b","blockquote","body","button","canvas","caption","cite","code","dd","del","details","dfn","div","dl","dt","em","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","header","hgroup","html","i","iframe","img","input","ins","kbd","label","legend","li","main","mark","menu","nav","object","ol","p","q","quote","samp","section","span","strong","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","tr","ul","var","video"],t=["any-hover","any-pointer","aspect-ratio","color","color-gamut","color-index","device-aspect-ratio","device-height","device-width","display-mode","forced-colors","grid","height","hover","inverted-colors","monochrome","orientation","overflow-block","overflow-inline","pointer","prefers-color-scheme","prefers-contrast","prefers-reduced-motion","prefers-reduced-transparency","resolution","scan","scripting","update","width","min-width","max-width","min-height","max-height"],i=["active","any-link","blank","checked","current","default","defined","dir","disabled","drop","empty","enabled","first","first-child","first-of-type","fullscreen","future","focus","focus-visible","focus-within","has","host","host-context","hover","indeterminate","in-range","invalid","is","lang","last-child","last-of-type","left","link","local-link","not","nth-child","nth-col","nth-last-child","nth-last-col","nth-last-of-type","nth-of-type","only-child","only-of-type","optional","out-of-range","past","placeholder-shown","read-only","read-write","required","right","root","scope","target","target-within","user-invalid","valid","visited","where"],o=["after","backdrop","before","cue","cue-region","first-letter","first-line","grammar-error","marker","part","placeholder","selection","slotted","spelling-error"],r=["align-content","align-items","align-self","animation","animation-delay","animation-direction","animation-duration","animation-fill-mode","animation-iteration-count","animation-name","animation-play-state","animation-timing-function","auto","backface-visibility","background","background-attachment","background-clip","background-color","background-image","background-origin","background-position","background-repeat","background-size","border","border-bottom","border-bottom-color","border-bottom-left-radius","border-bottom-right-radius","border-bottom-style","border-bottom-width","border-collapse","border-color","border-image","border-image-outset","border-image-repeat","border-image-slice","border-image-source","border-image-width","border-left","border-left-color","border-left-style","border-left-width","border-radius","border-right","border-right-color","border-right-style","border-right-width","border-spacing","border-style","border-top","border-top-color","border-top-left-radius","border-top-right-radius","border-top-style","border-top-width","border-width","bottom","box-decoration-break","box-shadow","box-sizing","break-after","break-before","break-inside","caption-side","clear","clip","clip-path","color","column-count","column-fill","column-gap","column-rule","column-rule-color","column-rule-style","column-rule-width","column-span","column-width","columns","content","counter-increment","counter-reset","cursor","direction","display","empty-cells","filter","flex","flex-basis","flex-direction","flex-flow","flex-grow","flex-shrink","flex-wrap","float","font","font-display","font-family","font-feature-settings","font-kerning","font-language-override","font-size","font-size-adjust","font-smoothing","font-stretch","font-style","font-variant","font-variant-ligatures","font-variation-settings","font-weight","height","hyphens","icon","image-orientation","image-rendering","image-resolution","ime-mode","inherit","initial","justify-content","left","letter-spacing","line-height","list-style","list-style-image","list-style-position","list-style-type","margin","margin-bottom","margin-left","margin-right","margin-top","marks","mask","max-height","max-width","min-height","min-width","nav-down","nav-index","nav-left","nav-right","nav-up","none","normal","object-fit","object-position","opacity","order","orphans","outline","outline-color","outline-offset","outline-style","outline-width","overflow","overflow-wrap","overflow-x","overflow-y","padding","padding-bottom","padding-left","padding-right","padding-top","page-break-after","page-break-before","page-break-inside","perspective","perspective-origin","pointer-events","position","quotes","resize","right","src","tab-size","table-layout","text-align","text-align-last","text-decoration","text-decoration-color","text-decoration-line","text-decoration-style","text-indent","text-overflow","text-rendering","text-shadow","text-transform","text-underline-position","top","transform","transform-origin","transform-style","transition","transition-delay","transition-duration","transition-property","transition-timing-function","unicode-bidi","vertical-align","visibility","white-space","widows","width","word-break","word-spacing","word-wrap","z-index"].reverse() +;return n=>{const a=(e=>({IMPORTANT:{className:"meta",begin:"!important"}, +HEXCOLOR:{className:"number",begin:"#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})"}, +ATTRIBUTE_SELECTOR_MODE:{className:"selector-attr",begin:/\[/,end:/\]/, +illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]} +}))(n),l=[n.APOS_STRING_MODE,n.QUOTE_STRING_MODE];return{name:"CSS", +case_insensitive:!0,illegal:/[=|'\$]/,keywords:{keyframePosition:"from to"}, +classNameAliases:{keyframePosition:"selector-tag"}, +contains:[n.C_BLOCK_COMMENT_MODE,{begin:/-(webkit|moz|ms|o)-(?=[a-z])/ +},n.CSS_NUMBER_MODE,{className:"selector-id",begin:/#[A-Za-z0-9_-]+/,relevance:0 +},{className:"selector-class",begin:"\\.[a-zA-Z-][a-zA-Z0-9_-]*",relevance:0 +},a.ATTRIBUTE_SELECTOR_MODE,{className:"selector-pseudo",variants:[{ +begin:":("+i.join("|")+")"},{begin:"::("+o.join("|")+")"}]},{ +className:"attribute",begin:"\\b("+r.join("|")+")\\b"},{begin:":",end:"[;}]", +contains:[a.HEXCOLOR,a.IMPORTANT,n.CSS_NUMBER_MODE,...l,{ +begin:/(url|data-uri)\(/,end:/\)/,relevance:0,keywords:{built_in:"url data-uri" +},contains:[{className:"string",begin:/[^)]/,endsWithParent:!0,excludeEnd:!0}] +},{className:"built_in",begin:/[\w-]+(?=\()/}]},{ +begin:(s=/@/,((...e)=>e.map((e=>(e=>e?"string"==typeof e?e:e.source:null)(e))).join(""))("(?=",s,")")), +end:"[{;]",relevance:0,illegal:/:/,contains:[{className:"keyword", +begin:/@-?\w[\w]*(-\w+)*/},{begin:/\s/,endsWithParent:!0,excludeEnd:!0, +relevance:0,keywords:{$pattern:/[a-z-]+/,keyword:"and or not only", +attribute:t.join(" ")},contains:[{begin:/[a-z-]+(?=:)/,className:"attribute" +},...l,n.CSS_NUMBER_MODE]}]},{className:"selector-tag", +begin:"\\b("+e.join("|")+")\\b"}]};var s}})()); +hljs.registerLanguage("diff",(()=>{"use strict";return e=>({name:"Diff", +aliases:["patch"],contains:[{className:"meta",relevance:10,variants:[{ +begin:/^@@ +-\d+,\d+ +\+\d+,\d+ +@@/},{begin:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{ +begin:/^--- +\d+,\d+ +----$/}]},{className:"comment",variants:[{begin:/Index: /, +end:/$/},{begin:/^index/,end:/$/},{begin:/={3,}/,end:/$/},{begin:/^-{3}/,end:/$/ +},{begin:/^\*{3} /,end:/$/},{begin:/^\+{3}/,end:/$/},{begin:/^\*{15}$/},{ +begin:/^diff --git/,end:/$/}]},{className:"addition",begin:/^\+/,end:/$/},{ +className:"deletion",begin:/^-/,end:/$/},{className:"addition",begin:/^!/, +end:/$/}]})})()); +hljs.registerLanguage("go",(()=>{"use strict";return e=>{const n={ +keyword:"break default func interface select case map struct chan else goto package switch const fallthrough if range type continue for import return var go defer bool byte complex64 complex128 float32 float64 int8 int16 int32 int64 string uint8 uint16 uint32 uint64 int uint uintptr rune", +literal:"true false iota nil", +built_in:"append cap close complex copy imag len make new panic print println real recover delete" +};return{name:"Go",aliases:["golang"],keywords:n,illegal:"{"use strict";function e(...e){ +return e.map((e=>{return(n=e)?"string"==typeof n?n:n.source:null;var n +})).join("")}return n=>{const a="HTTP/(2|1\\.[01])",s={className:"attribute", +begin:e("^",/[A-Za-z][A-Za-z0-9-]*/,"(?=\\:\\s)"),starts:{contains:[{ +className:"punctuation",begin:/: /,relevance:0,starts:{end:"$",relevance:0}}]} +},t=[s,{begin:"\\n\\n",starts:{subLanguage:[],endsWithParent:!0}}];return{ +name:"HTTP",aliases:["https"],illegal:/\S/,contains:[{begin:"^(?="+a+" \\d{3})", +end:/$/,contains:[{className:"meta",begin:a},{className:"number", +begin:"\\b\\d{3}\\b"}],starts:{end:/\b\B/,illegal:/\S/,contains:t}},{ +begin:"(?=^[A-Z]+ (.*?) "+a+"$)",end:/$/,contains:[{className:"string", +begin:" ",end:" ",excludeBegin:!0,excludeEnd:!0},{className:"meta",begin:a},{ +className:"keyword",begin:"[A-Z]+"}],starts:{end:/\b\B/,illegal:/\S/,contains:t} +},n.inherit(s,{relevance:0})]}}})()); +hljs.registerLanguage("ini",(()=>{"use strict";function e(e){ +return e?"string"==typeof e?e:e.source:null}function n(...n){ +return n.map((n=>e(n))).join("")}return s=>{const a={className:"number", +relevance:0,variants:[{begin:/([+-]+)?[\d]+_[\d_]+/},{begin:s.NUMBER_RE}] +},i=s.COMMENT();i.variants=[{begin:/;/,end:/$/},{begin:/#/,end:/$/}];const t={ +className:"variable",variants:[{begin:/\$[\w\d"][\w\d_]*/},{begin:/\$\{(.*?)\}/ +}]},r={className:"literal",begin:/\bon|off|true|false|yes|no\b/},l={ +className:"string",contains:[s.BACKSLASH_ESCAPE],variants:[{begin:"'''", +end:"'''",relevance:10},{begin:'"""',end:'"""',relevance:10},{begin:'"',end:'"' +},{begin:"'",end:"'"}]},c={begin:/\[/,end:/\]/,contains:[i,r,t,l,a,"self"], +relevance:0 +},g="("+[/[A-Za-z0-9_-]+/,/"(\\"|[^"])*"/,/'[^']*'/].map((n=>e(n))).join("|")+")" +;return{name:"TOML, also INI",aliases:["toml"],case_insensitive:!0,illegal:/\S/, +contains:[i,{className:"section",begin:/\[+/,end:/\]+/},{ +begin:n(g,"(\\s*\\.\\s*",g,")*",n("(?=",/\s*=\s*[^#\s]/,")")),className:"attr", +starts:{end:/$/,contains:[i,c,r,t,l,a]}}]}}})()); +hljs.registerLanguage("java",(()=>{"use strict" +;var e="\\.([0-9](_*[0-9])*)",n="[0-9a-fA-F](_*[0-9a-fA-F])*",a={ +className:"number",variants:[{ +begin:`(\\b([0-9](_*[0-9])*)((${e})|\\.)?|(${e}))[eE][+-]?([0-9](_*[0-9])*)[fFdD]?\\b` +},{begin:`\\b([0-9](_*[0-9])*)((${e})[fFdD]?\\b|\\.([fFdD]\\b)?)`},{ +begin:`(${e})[fFdD]?\\b`},{begin:"\\b([0-9](_*[0-9])*)[fFdD]\\b"},{ +begin:`\\b0[xX]((${n})\\.?|(${n})?\\.(${n}))[pP][+-]?([0-9](_*[0-9])*)[fFdD]?\\b` +},{begin:"\\b(0|[1-9](_*[0-9])*)[lL]?\\b"},{begin:`\\b0[xX](${n})[lL]?\\b`},{ +begin:"\\b0(_*[0-7])*[lL]?\\b"},{begin:"\\b0[bB][01](_*[01])*[lL]?\\b"}], +relevance:0};return e=>{ +var n="false synchronized int abstract float private char boolean var static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",s={ +className:"meta",begin:"@[\xc0-\u02b8a-zA-Z_$][\xc0-\u02b8a-zA-Z_$0-9]*", +contains:[{begin:/\(/,end:/\)/,contains:["self"]}]};const r=a;return{ +name:"Java",aliases:["jsp"],keywords:n,illegal:/<\/|#/, +contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{begin:/\w+@/, +relevance:0},{className:"doctag",begin:"@[A-Za-z]+"}]}),{ +begin:/import java\.[a-z]+\./,keywords:"import",relevance:2 +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{ +className:"class",beginKeywords:"class interface enum",end:/[{;=]/, +excludeEnd:!0,relevance:1,keywords:"class interface enum",illegal:/[:"\[\]]/, +contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{ +beginKeywords:"new throw return else",relevance:0},{className:"class", +begin:"record\\s+"+e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,excludeEnd:!0, +end:/[{;=]/,keywords:n,contains:[{beginKeywords:"record"},{ +begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0, +contains:[e.UNDERSCORE_TITLE_MODE]},{className:"params",begin:/\(/,end:/\)/, +keywords:n,relevance:0,contains:[e.C_BLOCK_COMMENT_MODE] +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"function", +begin:"([\xc0-\u02b8a-zA-Z_$][\xc0-\u02b8a-zA-Z_$0-9]*(<[\xc0-\u02b8a-zA-Z_$][\xc0-\u02b8a-zA-Z_$0-9]*(\\s*,\\s*[\xc0-\u02b8a-zA-Z_$][\xc0-\u02b8a-zA-Z_$0-9]*)*>)?\\s+)+"+e.UNDERSCORE_IDENT_RE+"\\s*\\(", +returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:n,contains:[{ +begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0, +contains:[e.UNDERSCORE_TITLE_MODE]},{className:"params",begin:/\(/,end:/\)/, +keywords:n,relevance:0, +contains:[s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,r,e.C_BLOCK_COMMENT_MODE] +},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},r,s]}}})()); +hljs.registerLanguage("javascript",(()=>{"use strict" +;const e="[A-Za-z$_][0-9A-Za-z$_]*",n=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],a=["true","false","null","undefined","NaN","Infinity"],s=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer","BigInt64Array","BigUint64Array","BigInt"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]) +;function r(e){return t("(?=",e,")")}function t(...e){return e.map((e=>{ +return(n=e)?"string"==typeof n?n:n.source:null;var n})).join("")}return i=>{ +const c=e,o={begin:/<[A-Za-z0-9\\._:-]+/,end:/\/[A-Za-z0-9\\._:-]+>|\/>/, +isTrulyOpeningTag:(e,n)=>{const a=e[0].length+e.index,s=e.input[a] +;"<"!==s?">"===s&&(((e,{after:n})=>{const a="", +returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{ +begin:i.UNDERSCORE_IDENT_RE,relevance:0},{className:null,begin:/\(\s*\)/,skip:!0 +},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:l,contains:f}]}] +},{begin:/,/,relevance:0},{className:"",begin:/\s/,end:/\s*/,skip:!0},{ +variants:[{begin:"<>",end:""},{begin:o.begin,"on:begin":o.isTrulyOpeningTag, +end:o.end}],subLanguage:"xml",contains:[{begin:o.begin,end:o.end,skip:!0, +contains:["self"]}]}],relevance:0},{className:"function", +beginKeywords:"function",end:/[{;]/,excludeEnd:!0,keywords:l, +contains:["self",i.inherit(i.TITLE_MODE,{begin:c}),p],illegal:/%/},{ +beginKeywords:"while if switch catch for"},{className:"function", +begin:i.UNDERSCORE_IDENT_RE+"\\([^()]*(\\([^()]*(\\([^()]*\\)[^()]*)*\\)[^()]*)*\\)\\s*\\{", +returnBegin:!0,contains:[p,i.inherit(i.TITLE_MODE,{begin:c})]},{variants:[{ +begin:"\\."+c},{begin:"\\$"+c}],relevance:0},{className:"class", +beginKeywords:"class",end:/[{;=]/,excludeEnd:!0,illegal:/[:"[\]]/,contains:[{ +beginKeywords:"extends"},i.UNDERSCORE_TITLE_MODE]},{begin:/\b(?=constructor)/, +end:/[{;]/,excludeEnd:!0,contains:[i.inherit(i.TITLE_MODE,{begin:c}),"self",p] +},{begin:"(get|set)\\s+(?="+c+"\\()",end:/\{/,keywords:"get set", +contains:[i.inherit(i.TITLE_MODE,{begin:c}),{begin:/\(\)/},p]},{begin:/\$[(.]/}] +}}})()); +hljs.registerLanguage("json",(()=>{"use strict";return n=>{const e={ +literal:"true false null" +},i=[n.C_LINE_COMMENT_MODE,n.C_BLOCK_COMMENT_MODE],a=[n.QUOTE_STRING_MODE,n.C_NUMBER_MODE],l={ +end:",",endsWithParent:!0,excludeEnd:!0,contains:a,keywords:e},t={begin:/\{/, +end:/\}/,contains:[{className:"attr",begin:/"/,end:/"/, +contains:[n.BACKSLASH_ESCAPE],illegal:"\\n"},n.inherit(l,{begin:/:/ +})].concat(i),illegal:"\\S"},s={begin:"\\[",end:"\\]",contains:[n.inherit(l)], +illegal:"\\S"};return a.push(t,s),i.forEach((n=>{a.push(n)})),{name:"JSON", +contains:a,keywords:e,illegal:"\\S"}}})()); +hljs.registerLanguage("kotlin",(()=>{"use strict" +;var e="\\.([0-9](_*[0-9])*)",n="[0-9a-fA-F](_*[0-9a-fA-F])*",a={ +className:"number",variants:[{ +begin:`(\\b([0-9](_*[0-9])*)((${e})|\\.)?|(${e}))[eE][+-]?([0-9](_*[0-9])*)[fFdD]?\\b` +},{begin:`\\b([0-9](_*[0-9])*)((${e})[fFdD]?\\b|\\.([fFdD]\\b)?)`},{ +begin:`(${e})[fFdD]?\\b`},{begin:"\\b([0-9](_*[0-9])*)[fFdD]\\b"},{ +begin:`\\b0[xX]((${n})\\.?|(${n})?\\.(${n}))[pP][+-]?([0-9](_*[0-9])*)[fFdD]?\\b` +},{begin:"\\b(0|[1-9](_*[0-9])*)[lL]?\\b"},{begin:`\\b0[xX](${n})[lL]?\\b`},{ +begin:"\\b0(_*[0-7])*[lL]?\\b"},{begin:"\\b0[bB][01](_*[01])*[lL]?\\b"}], +relevance:0};return e=>{const n={ +keyword:"abstract as val var vararg get set class object open private protected public noinline crossinline dynamic final enum if else do while for when throw try catch finally import package is in fun override companion reified inline lateinit init interface annotation data sealed internal infix operator out by constructor super tailrec where const inner suspend typealias external expect actual", +built_in:"Byte Short Char Int Long Boolean Float Double Void Unit Nothing", +literal:"true false null"},i={className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"@" +},s={className:"subst",begin:/\$\{/,end:/\}/,contains:[e.C_NUMBER_MODE]},t={ +className:"variable",begin:"\\$"+e.UNDERSCORE_IDENT_RE},r={className:"string", +variants:[{begin:'"""',end:'"""(?=[^"])',contains:[t,s]},{begin:"'",end:"'", +illegal:/\n/,contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"',illegal:/\n/, +contains:[e.BACKSLASH_ESCAPE,t,s]}]};s.contains.push(r);const l={ +className:"meta", +begin:"@(?:file|property|field|get|set|receiver|param|setparam|delegate)\\s*:(?:\\s*"+e.UNDERSCORE_IDENT_RE+")?" +},c={className:"meta",begin:"@"+e.UNDERSCORE_IDENT_RE,contains:[{begin:/\(/, +end:/\)/,contains:[e.inherit(r,{className:"meta-string"})]}] +},o=a,b=e.COMMENT("/\\*","\\*/",{contains:[e.C_BLOCK_COMMENT_MODE]}),E={ +variants:[{className:"type",begin:e.UNDERSCORE_IDENT_RE},{begin:/\(/,end:/\)/, +contains:[]}]},d=E;return d.variants[1].contains=[E],E.variants[1].contains=[d], +{name:"Kotlin",aliases:["kt","kts"],keywords:n, +contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag", +begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,b,{className:"keyword", +begin:/\b(break|continue|return|this)\b/,starts:{contains:[{className:"symbol", +begin:/@\w+/}]}},i,l,c,{className:"function",beginKeywords:"fun",end:"[(]|$", +returnBegin:!0,excludeEnd:!0,keywords:n,relevance:5,contains:[{ +begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0, +contains:[e.UNDERSCORE_TITLE_MODE]},{className:"type",begin://, +keywords:"reified",relevance:0},{className:"params",begin:/\(/,end:/\)/, +endsParent:!0,keywords:n,relevance:0,contains:[{begin:/:/,end:/[=,\/]/, +endsWithParent:!0,contains:[E,e.C_LINE_COMMENT_MODE,b],relevance:0 +},e.C_LINE_COMMENT_MODE,b,l,c,r,e.C_NUMBER_MODE]},b]},{className:"class", +beginKeywords:"class interface trait",end:/[:\{(]|$/,excludeEnd:!0, +illegal:"extends implements",contains:[{ +beginKeywords:"public protected internal private constructor" +},e.UNDERSCORE_TITLE_MODE,{className:"type",begin://,excludeBegin:!0, +excludeEnd:!0,relevance:0},{className:"type",begin:/[,:]\s*/,end:/[<\(,]|$/, +excludeBegin:!0,returnEnd:!0},l,c]},r,{className:"meta",begin:"^#!/usr/bin/env", +end:"$",illegal:"\n"},o]}}})()); +hljs.registerLanguage("less",(()=>{"use strict" +;const e=["a","abbr","address","article","aside","audio","b","blockquote","body","button","canvas","caption","cite","code","dd","del","details","dfn","div","dl","dt","em","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","header","hgroup","html","i","iframe","img","input","ins","kbd","label","legend","li","main","mark","menu","nav","object","ol","p","q","quote","samp","section","span","strong","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","tr","ul","var","video"],t=["any-hover","any-pointer","aspect-ratio","color","color-gamut","color-index","device-aspect-ratio","device-height","device-width","display-mode","forced-colors","grid","height","hover","inverted-colors","monochrome","orientation","overflow-block","overflow-inline","pointer","prefers-color-scheme","prefers-contrast","prefers-reduced-motion","prefers-reduced-transparency","resolution","scan","scripting","update","width","min-width","max-width","min-height","max-height"],i=["active","any-link","blank","checked","current","default","defined","dir","disabled","drop","empty","enabled","first","first-child","first-of-type","fullscreen","future","focus","focus-visible","focus-within","has","host","host-context","hover","indeterminate","in-range","invalid","is","lang","last-child","last-of-type","left","link","local-link","not","nth-child","nth-col","nth-last-child","nth-last-col","nth-last-of-type","nth-of-type","only-child","only-of-type","optional","out-of-range","past","placeholder-shown","read-only","read-write","required","right","root","scope","target","target-within","user-invalid","valid","visited","where"],o=["after","backdrop","before","cue","cue-region","first-letter","first-line","grammar-error","marker","part","placeholder","selection","slotted","spelling-error"],n=["align-content","align-items","align-self","animation","animation-delay","animation-direction","animation-duration","animation-fill-mode","animation-iteration-count","animation-name","animation-play-state","animation-timing-function","auto","backface-visibility","background","background-attachment","background-clip","background-color","background-image","background-origin","background-position","background-repeat","background-size","border","border-bottom","border-bottom-color","border-bottom-left-radius","border-bottom-right-radius","border-bottom-style","border-bottom-width","border-collapse","border-color","border-image","border-image-outset","border-image-repeat","border-image-slice","border-image-source","border-image-width","border-left","border-left-color","border-left-style","border-left-width","border-radius","border-right","border-right-color","border-right-style","border-right-width","border-spacing","border-style","border-top","border-top-color","border-top-left-radius","border-top-right-radius","border-top-style","border-top-width","border-width","bottom","box-decoration-break","box-shadow","box-sizing","break-after","break-before","break-inside","caption-side","clear","clip","clip-path","color","column-count","column-fill","column-gap","column-rule","column-rule-color","column-rule-style","column-rule-width","column-span","column-width","columns","content","counter-increment","counter-reset","cursor","direction","display","empty-cells","filter","flex","flex-basis","flex-direction","flex-flow","flex-grow","flex-shrink","flex-wrap","float","font","font-display","font-family","font-feature-settings","font-kerning","font-language-override","font-size","font-size-adjust","font-smoothing","font-stretch","font-style","font-variant","font-variant-ligatures","font-variation-settings","font-weight","height","hyphens","icon","image-orientation","image-rendering","image-resolution","ime-mode","inherit","initial","justify-content","left","letter-spacing","line-height","list-style","list-style-image","list-style-position","list-style-type","margin","margin-bottom","margin-left","margin-right","margin-top","marks","mask","max-height","max-width","min-height","min-width","nav-down","nav-index","nav-left","nav-right","nav-up","none","normal","object-fit","object-position","opacity","order","orphans","outline","outline-color","outline-offset","outline-style","outline-width","overflow","overflow-wrap","overflow-x","overflow-y","padding","padding-bottom","padding-left","padding-right","padding-top","page-break-after","page-break-before","page-break-inside","perspective","perspective-origin","pointer-events","position","quotes","resize","right","src","tab-size","table-layout","text-align","text-align-last","text-decoration","text-decoration-color","text-decoration-line","text-decoration-style","text-indent","text-overflow","text-rendering","text-shadow","text-transform","text-underline-position","top","transform","transform-origin","transform-style","transition","transition-delay","transition-duration","transition-property","transition-timing-function","unicode-bidi","vertical-align","visibility","white-space","widows","width","word-break","word-spacing","word-wrap","z-index"].reverse(),r=i.concat(o) +;return a=>{const s=(e=>({IMPORTANT:{className:"meta",begin:"!important"}, +HEXCOLOR:{className:"number",begin:"#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})"}, +ATTRIBUTE_SELECTOR_MODE:{className:"selector-attr",begin:/\[/,end:/\]/, +illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]} +}))(a),l=r,d="([\\w-]+|@\\{[\\w-]+\\})",c=[],g=[],b=e=>({className:"string", +begin:"~?"+e+".*?"+e}),m=(e,t,i)=>({className:e,begin:t,relevance:i}),u={ +$pattern:/[a-z-]+/,keyword:"and or not only",attribute:t.join(" ")},p={ +begin:"\\(",end:"\\)",contains:g,keywords:u,relevance:0} +;g.push(a.C_LINE_COMMENT_MODE,a.C_BLOCK_COMMENT_MODE,b("'"),b('"'),a.CSS_NUMBER_MODE,{ +begin:"(url|data-uri)\\(",starts:{className:"string",end:"[\\)\\n]", +excludeEnd:!0} +},s.HEXCOLOR,p,m("variable","@@?[\\w-]+",10),m("variable","@\\{[\\w-]+\\}"),m("built_in","~?`[^`]*?`"),{ +className:"attribute",begin:"[\\w-]+\\s*:",end:":",returnBegin:!0,excludeEnd:!0 +},s.IMPORTANT);const f=g.concat({begin:/\{/,end:/\}/,contains:c}),h={ +beginKeywords:"when",endsWithParent:!0,contains:[{beginKeywords:"and not" +}].concat(g)},w={begin:d+"\\s*:",returnBegin:!0,end:/[;}]/,relevance:0, +contains:[{begin:/-(webkit|moz|ms|o)-/},{className:"attribute", +begin:"\\b("+n.join("|")+")\\b",end:/(?=:)/,starts:{endsWithParent:!0, +illegal:"[<=$]",relevance:0,contains:g}}]},v={className:"keyword", +begin:"@(import|media|charset|font-face|(-[a-z]+-)?keyframes|supports|document|namespace|page|viewport|host)\\b", +starts:{end:"[;{}]",keywords:u,returnEnd:!0,contains:g,relevance:0}},y={ +className:"variable",variants:[{begin:"@[\\w-]+\\s*:",relevance:15},{ +begin:"@[\\w-]+"}],starts:{end:"[;}]",returnEnd:!0,contains:f}},k={variants:[{ +begin:"[\\.#:&\\[>]",end:"[;{}]"},{begin:d,end:/\{/}],returnBegin:!0, +returnEnd:!0,illegal:"[<='$\"]",relevance:0, +contains:[a.C_LINE_COMMENT_MODE,a.C_BLOCK_COMMENT_MODE,h,m("keyword","all\\b"),m("variable","@\\{[\\w-]+\\}"),{ +begin:"\\b("+e.join("|")+")\\b",className:"selector-tag" +},m("selector-tag",d+"%?",0),m("selector-id","#"+d),m("selector-class","\\."+d,0),m("selector-tag","&",0),s.ATTRIBUTE_SELECTOR_MODE,{ +className:"selector-pseudo",begin:":("+i.join("|")+")"},{ +className:"selector-pseudo",begin:"::("+o.join("|")+")"},{begin:"\\(",end:"\\)", +contains:f},{begin:"!important"}]},E={begin:`[\\w-]+:(:)?(${l.join("|")})`, +returnBegin:!0,contains:[k]} +;return c.push(a.C_LINE_COMMENT_MODE,a.C_BLOCK_COMMENT_MODE,v,y,E,w,k),{ +name:"Less",case_insensitive:!0,illegal:"[=>'/<($\"]",contains:c}}})()); +hljs.registerLanguage("lua",(()=>{"use strict";return e=>{ +const t="\\[=*\\[",a="\\]=*\\]",n={begin:t,end:a,contains:["self"] +},o=[e.COMMENT("--(?!\\[=*\\[)","$"),e.COMMENT("--\\[=*\\[",a,{contains:[n], +relevance:10})];return{name:"Lua",keywords:{$pattern:e.UNDERSCORE_IDENT_RE, +literal:"true false nil", +keyword:"and break do else elseif end for goto if in local not or repeat return then until while", +built_in:"_G _ENV _VERSION __index __newindex __mode __call __metatable __tostring __len __gc __add __sub __mul __div __mod __pow __concat __unm __eq __lt __le assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall arg self coroutine resume yield status wrap create running debug getupvalue debug sethook getmetatable gethook setmetatable setlocal traceback setfenv getinfo setupvalue getlocal getregistry getfenv io lines write close flush open output type read stderr stdin input stdout popen tmpfile math log max acos huge ldexp pi cos tanh pow deg tan cosh sinh random randomseed frexp ceil floor rad abs sqrt modf asin min mod fmod log10 atan2 exp sin atan os exit setlocale date getenv difftime remove time clock tmpname rename execute package preload loadlib loaded loaders cpath config path seeall string sub upper len gfind rep find match char dump gmatch reverse byte format gsub lower table setn insert getn foreachi maxn foreach concat sort remove" +},contains:o.concat([{className:"function",beginKeywords:"function",end:"\\)", +contains:[e.inherit(e.TITLE_MODE,{ +begin:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{className:"params", +begin:"\\(",endsWithParent:!0,contains:o}].concat(o) +},e.C_NUMBER_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"string", +begin:t,end:a,contains:[n],relevance:5}])}}})()); +hljs.registerLanguage("makefile",(()=>{"use strict";return e=>{const i={ +className:"variable",variants:[{begin:"\\$\\("+e.UNDERSCORE_IDENT_RE+"\\)", +contains:[e.BACKSLASH_ESCAPE]},{begin:/\$[@%{"use strict";function e(e){ +return e?"string"==typeof e?e:e.source:null}function n(e){return a("(?=",e,")")} +function a(...n){return n.map((n=>e(n))).join("")}function s(...n){ +return"("+n.map((n=>e(n))).join("|")+")"}return e=>{ +const t=a(/[A-Z_]/,a("(",/[A-Z0-9_.-]*:/,")?"),/[A-Z0-9_.-]*/),i={ +className:"symbol",begin:/&[a-z]+;|&#[0-9]+;|&#x[a-f0-9]+;/},r={begin:/\s/, +contains:[{className:"meta-keyword",begin:/#?[a-z_][a-z1-9_-]+/,illegal:/\n/}] +},c=e.inherit(r,{begin:/\(/,end:/\)/}),l=e.inherit(e.APOS_STRING_MODE,{ +className:"meta-string"}),g=e.inherit(e.QUOTE_STRING_MODE,{ +className:"meta-string"}),m={endsWithParent:!0,illegal:/`]+/}]}] +}]};return{name:"HTML, XML", +aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf","svg"], +case_insensitive:!0,contains:[{className:"meta",begin://, +relevance:10,contains:[r,g,l,c,{begin:/\[/,end:/\]/,contains:[{className:"meta", +begin://,contains:[r,c,g,l]}]}]},e.COMMENT(//,{ +relevance:10}),{begin://,relevance:10},i,{ +className:"meta",begin:/<\?xml/,end:/\?>/,relevance:10},{className:"tag", +begin:/)/,end:/>/,keywords:{name:"style"},contains:[m],starts:{ +end:/<\/style>/,returnEnd:!0,subLanguage:["css","xml"]}},{className:"tag", +begin:/)/,end:/>/,keywords:{name:"script"},contains:[m],starts:{ +end:/<\/script>/,returnEnd:!0,subLanguage:["javascript","handlebars","xml"]}},{ +className:"tag",begin:/<>|<\/>/},{className:"tag", +begin:a(//,/>/,/\s/)))),end:/\/?>/,contains:[{className:"name", +begin:t,relevance:0,starts:m}]},{className:"tag",begin:a(/<\//,n(a(t,/>/))), +contains:[{className:"name",begin:t,relevance:0},{begin:/>/,relevance:0, +endsParent:!0}]}]}}})()); +hljs.registerLanguage("markdown",(()=>{"use strict";function n(...n){ +return n.map((n=>{return(e=n)?"string"==typeof e?e:e.source:null;var e +})).join("")}return e=>{const a={begin:/<\/?[A-Za-z_]/,end:">", +subLanguage:"xml",relevance:0},i={variants:[{begin:/\[.+?\]\[.*?\]/,relevance:0 +},{begin:/\[.+?\]\(((data|javascript|mailto):|(?:http|ftp)s?:\/\/).*?\)/, +relevance:2},{begin:n(/\[.+?\]\(/,/[A-Za-z][A-Za-z0-9+.-]*/,/:\/\/.*?\)/), +relevance:2},{begin:/\[.+?\]\([./?&#].*?\)/,relevance:1},{ +begin:/\[.+?\]\(.*?\)/,relevance:0}],returnBegin:!0,contains:[{ +className:"string",relevance:0,begin:"\\[",end:"\\]",excludeBegin:!0, +returnEnd:!0},{className:"link",relevance:0,begin:"\\]\\(",end:"\\)", +excludeBegin:!0,excludeEnd:!0},{className:"symbol",relevance:0,begin:"\\]\\[", +end:"\\]",excludeBegin:!0,excludeEnd:!0}]},s={className:"strong",contains:[], +variants:[{begin:/_{2}/,end:/_{2}/},{begin:/\*{2}/,end:/\*{2}/}]},c={ +className:"emphasis",contains:[],variants:[{begin:/\*(?!\*)/,end:/\*/},{ +begin:/_(?!_)/,end:/_/,relevance:0}]};s.contains.push(c),c.contains.push(s) +;let t=[a,i] +;return s.contains=s.contains.concat(t),c.contains=c.contains.concat(t), +t=t.concat(s,c),{name:"Markdown",aliases:["md","mkdown","mkd"],contains:[{ +className:"section",variants:[{begin:"^#{1,6}",end:"$",contains:t},{ +begin:"(?=^.+?\\n[=-]{2,}$)",contains:[{begin:"^[=-]*$"},{begin:"^",end:"\\n", +contains:t}]}]},a,{className:"bullet",begin:"^[ \t]*([*+-]|(\\d+\\.))(?=\\s+)", +end:"\\s+",excludeEnd:!0},s,c,{className:"quote",begin:"^>\\s+",contains:t, +end:"$"},{className:"code",variants:[{begin:"(`{3,})[^`](.|\\n)*?\\1`*[ ]*"},{ +begin:"(~{3,})[^~](.|\\n)*?\\1~*[ ]*"},{begin:"```",end:"```+[ ]*$"},{ +begin:"~~~",end:"~~~+[ ]*$"},{begin:"`.+?`"},{begin:"(?=^( {4}|\\t))", +contains:[{begin:"^( {4}|\\t)",end:"(\\n)$"}],relevance:0}]},{ +begin:"^[-\\*]{3,}",end:"$"},i,{begin:/^\[[^\n]+\]:/,returnBegin:!0,contains:[{ +className:"symbol",begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0},{ +className:"link",begin:/:\s*/,end:/$/,excludeBegin:!0}]}]}}})()); +hljs.registerLanguage("nginx",(()=>{"use strict";return e=>{const n={ +className:"variable",variants:[{begin:/\$\d+/},{begin:/\$\{/,end:/\}/},{ +begin:/[$@]/+e.UNDERSCORE_IDENT_RE}]},a={endsWithParent:!0,keywords:{ +$pattern:"[a-z/_]+", +literal:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll" +},relevance:0,illegal:"=>",contains:[e.HASH_COMMENT_MODE,{className:"string", +contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:/"/,end:/"/},{begin:/'/,end:/'/ +}]},{begin:"([a-z]+):/",end:"\\s",endsWithParent:!0,excludeEnd:!0,contains:[n] +},{className:"regexp",contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:"\\s\\^", +end:"\\s|\\{|;",returnEnd:!0},{begin:"~\\*?\\s+",end:"\\s|\\{|;",returnEnd:!0},{ +begin:"\\*(\\.[a-z\\-]+)+"},{begin:"([a-z\\-]+\\.)+\\*"}]},{className:"number", +begin:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{ +className:"number",begin:"\\b\\d+[kKmMgGdshdwy]*\\b",relevance:0},n]};return{ +name:"Nginx config",aliases:["nginxconf"],contains:[e.HASH_COMMENT_MODE,{ +begin:e.UNDERSCORE_IDENT_RE+"\\s+\\{",returnBegin:!0,end:/\{/,contains:[{ +className:"section",begin:e.UNDERSCORE_IDENT_RE}],relevance:0},{ +begin:e.UNDERSCORE_IDENT_RE+"\\s",end:";|\\{",returnBegin:!0,contains:[{ +className:"attribute",begin:e.UNDERSCORE_IDENT_RE,starts:a}],relevance:0}], +illegal:"[^\\s\\}]"}}})()); +hljs.registerLanguage("objectivec",(()=>{"use strict";return e=>{ +const n=/[a-zA-Z@][a-zA-Z0-9_]*/,_={$pattern:n, +keyword:"@interface @class @protocol @implementation"};return{ +name:"Objective-C",aliases:["mm","objc","obj-c","obj-c++","objective-c++"], +keywords:{$pattern:n, +keyword:"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required @encode @package @import @defs @compatibility_alias __bridge __bridge_transfer __bridge_retained __bridge_retain __covariant __contravariant __kindof _Nonnull _Nullable _Null_unspecified __FUNCTION__ __PRETTY_FUNCTION__ __attribute__ getter setter retain unsafe_unretained nonnull nullable null_unspecified null_resettable class instancetype NS_DESIGNATED_INITIALIZER NS_UNAVAILABLE NS_REQUIRES_SUPER NS_RETURNS_INNER_POINTER NS_INLINE NS_AVAILABLE NS_DEPRECATED NS_ENUM NS_OPTIONS NS_SWIFT_UNAVAILABLE NS_ASSUME_NONNULL_BEGIN NS_ASSUME_NONNULL_END NS_REFINED_FOR_SWIFT NS_SWIFT_NAME NS_SWIFT_NOTHROW NS_DURING NS_HANDLER NS_ENDHANDLER NS_VALUERETURN NS_VOIDRETURN", +literal:"false true FALSE TRUE nil YES NO NULL", +built_in:"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once" +},illegal:"/,end:/$/, +illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{ +className:"class",begin:"("+_.keyword.split(" ").join("|")+")\\b",end:/(\{|$)/, +excludeEnd:!0,keywords:_,contains:[e.UNDERSCORE_TITLE_MODE]},{ +begin:"\\."+e.UNDERSCORE_IDENT_RE,relevance:0}]}}})()); +hljs.registerLanguage("perl",(()=>{"use strict";function e(e){ +return e?"string"==typeof e?e:e.source:null}function n(...n){ +return n.map((n=>e(n))).join("")}function t(...n){ +return"("+n.map((n=>e(n))).join("|")+")"}return e=>{ +const r=/[dualxmsipngr]{0,12}/,s={$pattern:/[\w.]+/, +keyword:"abs accept alarm and atan2 bind binmode bless break caller chdir chmod chomp chop chown chr chroot close closedir connect continue cos crypt dbmclose dbmopen defined delete die do dump each else elsif endgrent endhostent endnetent endprotoent endpwent endservent eof eval exec exists exit exp fcntl fileno flock for foreach fork format formline getc getgrent getgrgid getgrnam gethostbyaddr gethostbyname gethostent getlogin getnetbyaddr getnetbyname getnetent getpeername getpgrp getpriority getprotobyname getprotobynumber getprotoent getpwent getpwnam getpwuid getservbyname getservbyport getservent getsockname getsockopt given glob gmtime goto grep gt hex if index int ioctl join keys kill last lc lcfirst length link listen local localtime log lstat lt ma map mkdir msgctl msgget msgrcv msgsnd my ne next no not oct open opendir or ord our pack package pipe pop pos print printf prototype push q|0 qq quotemeta qw qx rand read readdir readline readlink readpipe recv redo ref rename require reset return reverse rewinddir rindex rmdir say scalar seek seekdir select semctl semget semop send setgrent sethostent setnetent setpgrp setpriority setprotoent setpwent setservent setsockopt shift shmctl shmget shmread shmwrite shutdown sin sleep socket socketpair sort splice split sprintf sqrt srand stat state study sub substr symlink syscall sysopen sysread sysseek system syswrite tell telldir tie tied time times tr truncate uc ucfirst umask undef unless unlink unpack unshift untie until use utime values vec wait waitpid wantarray warn when while write x|0 xor y|0" +},i={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:s},a={begin:/->\{/, +end:/\}/},o={variants:[{begin:/\$\d/},{ +begin:n(/[$%@](\^\w\b|#\w+(::\w+)*|\{\w+\}|\w+(::\w*)*)/,"(?![A-Za-z])(?![@$%])") +},{begin:/[$%@][^\s\w{]/,relevance:0}] +},c=[e.BACKSLASH_ESCAPE,i,o],g=[/!/,/\//,/\|/,/\?/,/'/,/"/,/#/],l=(e,t,s="\\1")=>{ +const i="\\1"===s?s:n(s,t) +;return n(n("(?:",e,")"),t,/(?:\\.|[^\\\/])*?/,i,/(?:\\.|[^\\\/])*?/,s,r) +},d=(e,t,s)=>n(n("(?:",e,")"),t,/(?:\\.|[^\\\/])*?/,s,r),p=[o,e.HASH_COMMENT_MODE,e.COMMENT(/^=\w/,/=cut/,{ +endsWithParent:!0}),a,{className:"string",contains:c,variants:[{ +begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[", +end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{ +begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*<",end:">", +relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'", +contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`", +contains:[e.BACKSLASH_ESCAPE]},{begin:/\{\w+\}/,relevance:0},{ +begin:"-?\\w+\\s*=>",relevance:0}]},{className:"number", +begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b", +relevance:0},{ +begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*", +keywords:"split return print reverse grep",relevance:0, +contains:[e.HASH_COMMENT_MODE,{className:"regexp",variants:[{ +begin:l("s|tr|y",t(...g))},{begin:l("s|tr|y","\\(","\\)")},{ +begin:l("s|tr|y","\\[","\\]")},{begin:l("s|tr|y","\\{","\\}")}],relevance:2},{ +className:"regexp",variants:[{begin:/(m|qr)\/\//,relevance:0},{ +begin:d("(?:m|qr)?",/\//,/\//)},{begin:d("m|qr",t(...g),/\1/)},{ +begin:d("m|qr",/\(/,/\)/)},{begin:d("m|qr",/\[/,/\]/)},{ +begin:d("m|qr",/\{/,/\}/)}]}]},{className:"function",beginKeywords:"sub", +end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{ +begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$", +subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}] +}];return i.contains=p,a.contains=p,{name:"Perl",aliases:["pl","pm"],keywords:s, +contains:p}}})()); +hljs.registerLanguage("php",(()=>{"use strict";return e=>{const r={ +className:"variable", +begin:"\\$+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*(?![A-Za-z0-9])(?![$])"},t={ +className:"meta",variants:[{begin:/<\?php/,relevance:10},{begin:/<\?[=]?/},{ +begin:/\?>/}]},a={className:"subst",variants:[{begin:/\$\w+/},{begin:/\{\$/, +end:/\}/}]},n=e.inherit(e.APOS_STRING_MODE,{illegal:null +}),i=e.inherit(e.QUOTE_STRING_MODE,{illegal:null, +contains:e.QUOTE_STRING_MODE.contains.concat(a)}),o=e.END_SAME_AS_BEGIN({ +begin:/<<<[ \t]*(\w+)\n/,end:/[ \t]*(\w+)\b/, +contains:e.QUOTE_STRING_MODE.contains.concat(a)}),l={className:"string", +contains:[e.BACKSLASH_ESCAPE,t],variants:[e.inherit(n,{begin:"b'",end:"'" +}),e.inherit(i,{begin:'b"',end:'"'}),i,n,o]},s={className:"number",variants:[{ +begin:"\\b0b[01]+(?:_[01]+)*\\b"},{begin:"\\b0o[0-7]+(?:_[0-7]+)*\\b"},{ +begin:"\\b0x[\\da-f]+(?:_[\\da-f]+)*\\b"},{ +begin:"(?:\\b\\d+(?:_\\d+)*(\\.(?:\\d+(?:_\\d+)*))?|\\B\\.\\d+)(?:e[+-]?\\d+)?" +}],relevance:0},c={ +keyword:"__CLASS__ __DIR__ __FILE__ __FUNCTION__ __LINE__ __METHOD__ __NAMESPACE__ __TRAIT__ die echo exit include include_once print require require_once array abstract and as binary bool boolean break callable case catch class clone const continue declare default do double else elseif empty enddeclare endfor endforeach endif endswitch endwhile enum eval extends final finally float for foreach from global goto if implements instanceof insteadof int integer interface isset iterable list match|0 mixed new object or private protected public real return string switch throw trait try unset use var void while xor yield", +literal:"false null true", +built_in:"Error|0 AppendIterator ArgumentCountError ArithmeticError ArrayIterator ArrayObject AssertionError BadFunctionCallException BadMethodCallException CachingIterator CallbackFilterIterator CompileError Countable DirectoryIterator DivisionByZeroError DomainException EmptyIterator ErrorException Exception FilesystemIterator FilterIterator GlobIterator InfiniteIterator InvalidArgumentException IteratorIterator LengthException LimitIterator LogicException MultipleIterator NoRewindIterator OutOfBoundsException OutOfRangeException OuterIterator OverflowException ParentIterator ParseError RangeException RecursiveArrayIterator RecursiveCachingIterator RecursiveCallbackFilterIterator RecursiveDirectoryIterator RecursiveFilterIterator RecursiveIterator RecursiveIteratorIterator RecursiveRegexIterator RecursiveTreeIterator RegexIterator RuntimeException SeekableIterator SplDoublyLinkedList SplFileInfo SplFileObject SplFixedArray SplHeap SplMaxHeap SplMinHeap SplObjectStorage SplObserver SplObserver SplPriorityQueue SplQueue SplStack SplSubject SplSubject SplTempFileObject TypeError UnderflowException UnexpectedValueException UnhandledMatchError ArrayAccess Closure Generator Iterator IteratorAggregate Serializable Stringable Throwable Traversable WeakReference WeakMap Directory __PHP_Incomplete_Class parent php_user_filter self static stdClass" +};return{aliases:["php3","php4","php5","php6","php7","php8"], +case_insensitive:!0,keywords:c, +contains:[e.HASH_COMMENT_MODE,e.COMMENT("//","$",{contains:[t] +}),e.COMMENT("/\\*","\\*/",{contains:[{className:"doctag",begin:"@[A-Za-z]+"}] +}),e.COMMENT("__halt_compiler.+?;",!1,{endsWithParent:!0, +keywords:"__halt_compiler"}),t,{className:"keyword",begin:/\$this\b/},r,{ +begin:/(::|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/},{className:"function", +relevance:0,beginKeywords:"fn function",end:/[;{]/,excludeEnd:!0, +illegal:"[$%\\[]",contains:[{beginKeywords:"use"},e.UNDERSCORE_TITLE_MODE,{ +begin:"=>",endsParent:!0},{className:"params",begin:"\\(",end:"\\)", +excludeBegin:!0,excludeEnd:!0,keywords:c, +contains:["self",r,e.C_BLOCK_COMMENT_MODE,l,s]}]},{className:"class",variants:[{ +beginKeywords:"enum",illegal:/[($"]/},{beginKeywords:"class interface trait", +illegal:/[:($"]/}],relevance:0,end:/\{/,excludeEnd:!0,contains:[{ +beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{ +beginKeywords:"namespace",relevance:0,end:";",illegal:/[.']/, +contains:[e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"use",relevance:0,end:";", +contains:[e.UNDERSCORE_TITLE_MODE]},l,s]}}})()); +hljs.registerLanguage("php-template",(()=>{"use strict";return n=>({ +name:"PHP template",subLanguage:"xml",contains:[{begin:/<\?(php|=)?/,end:/\?>/, +subLanguage:"php",contains:[{begin:"/\\*",end:"\\*/",skip:!0},{begin:'b"', +end:'"',skip:!0},{begin:"b'",end:"'",skip:!0},n.inherit(n.APOS_STRING_MODE,{ +illegal:null,className:null,contains:null,skip:!0 +}),n.inherit(n.QUOTE_STRING_MODE,{illegal:null,className:null,contains:null, +skip:!0})]}]})})()); +hljs.registerLanguage("plaintext",(()=>{"use strict";return t=>({ +name:"Plain text",aliases:["text","txt"],disableAutodetect:!0})})()); +hljs.registerLanguage("properties",(()=>{"use strict";return e=>{ +var n="[ \\t\\f]*",a=n+"[:=]"+n,t="("+a+"|[ \\t\\f]+)",r="([^\\\\\\W:= \\t\\f\\n]|\\\\.)+",s="([^\\\\:= \\t\\f\\n]|\\\\.)+",i={ +end:t,relevance:0,starts:{className:"string",end:/$/,relevance:0,contains:[{ +begin:"\\\\\\\\"},{begin:"\\\\\\n"}]}};return{name:".properties", +case_insensitive:!0,illegal:/\S/,contains:[e.COMMENT("^\\s*[!#]","$"),{ +returnBegin:!0,variants:[{begin:r+a,relevance:1},{begin:r+"[ \\t\\f]+", +relevance:0}],contains:[{className:"attr",begin:r,endsParent:!0,relevance:0}], +starts:i},{begin:s+t,returnBegin:!0,relevance:0,contains:[{className:"meta", +begin:s,endsParent:!0,relevance:0}],starts:i},{className:"attr",relevance:0, +begin:s+n+"$"}]}}})()); +hljs.registerLanguage("python",(()=>{"use strict";return e=>{const n={ +$pattern:/[A-Za-z]\w+|__\w+__/, +keyword:["and","as","assert","async","await","break","class","continue","def","del","elif","else","except","finally","for","from","global","if","import","in","is","lambda","nonlocal|10","not","or","pass","raise","return","try","while","with","yield"], +built_in:["__import__","abs","all","any","ascii","bin","bool","breakpoint","bytearray","bytes","callable","chr","classmethod","compile","complex","delattr","dict","dir","divmod","enumerate","eval","exec","filter","float","format","frozenset","getattr","globals","hasattr","hash","help","hex","id","input","int","isinstance","issubclass","iter","len","list","locals","map","max","memoryview","min","next","object","oct","open","ord","pow","print","property","range","repr","reversed","round","set","setattr","slice","sorted","staticmethod","str","sum","super","tuple","type","vars","zip"], +literal:["__debug__","Ellipsis","False","None","NotImplemented","True"], +type:["Any","Callable","Coroutine","Dict","List","Literal","Generic","Optional","Sequence","Set","Tuple","Type","Union"] +},a={className:"meta",begin:/^(>>>|\.\.\.) /},i={className:"subst",begin:/\{/, +end:/\}/,keywords:n,illegal:/#/},s={begin:/\{\{/,relevance:0},t={ +className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{ +begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?'''/,end:/'''/, +contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{ +begin:/([uU]|[bB]|[rR]|[bB][rR]|[rR][bB])?"""/,end:/"""/, +contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{ +begin:/([fF][rR]|[rR][fF]|[fF])'''/,end:/'''/, +contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/([fF][rR]|[rR][fF]|[fF])"""/, +end:/"""/,contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/([uU]|[rR])'/,end:/'/, +relevance:10},{begin:/([uU]|[rR])"/,end:/"/,relevance:10},{ +begin:/([bB]|[bB][rR]|[rR][bB])'/,end:/'/},{begin:/([bB]|[bB][rR]|[rR][bB])"/, +end:/"/},{begin:/([fF][rR]|[rR][fF]|[fF])'/,end:/'/, +contains:[e.BACKSLASH_ESCAPE,s,i]},{begin:/([fF][rR]|[rR][fF]|[fF])"/,end:/"/, +contains:[e.BACKSLASH_ESCAPE,s,i]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE] +},r="[0-9](_?[0-9])*",l=`(\\b(${r}))?\\.(${r})|\\b(${r})\\.`,b={ +className:"number",relevance:0,variants:[{ +begin:`(\\b(${r})|(${l}))[eE][+-]?(${r})[jJ]?\\b`},{begin:`(${l})[jJ]?`},{ +begin:"\\b([1-9](_?[0-9])*|0+(_?0)*)[lLjJ]?\\b"},{ +begin:"\\b0[bB](_?[01])+[lL]?\\b"},{begin:"\\b0[oO](_?[0-7])+[lL]?\\b"},{ +begin:"\\b0[xX](_?[0-9a-fA-F])+[lL]?\\b"},{begin:`\\b(${r})[jJ]\\b`}]},o={ +className:"comment", +begin:(d=/# type:/,((...e)=>e.map((e=>(e=>e?"string"==typeof e?e:e.source:null)(e))).join(""))("(?=",d,")")), +end:/$/,keywords:n,contains:[{begin:/# type:/},{begin:/#/,end:/\b\B/, +endsWithParent:!0}]},c={className:"params",variants:[{className:"", +begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0, +keywords:n,contains:["self",a,b,t,e.HASH_COMMENT_MODE]}]};var d +;return i.contains=[t,b,a],{name:"Python",aliases:["py","gyp","ipython"], +keywords:n,illegal:/(<\/|->|\?)|=>/,contains:[a,b,{begin:/\bself\b/},{ +beginKeywords:"if",relevance:0},t,o,e.HASH_COMMENT_MODE,{variants:[{ +className:"function",beginKeywords:"def"},{className:"class", +beginKeywords:"class"}],end:/:/,illegal:/[${=;\n,]/, +contains:[e.UNDERSCORE_TITLE_MODE,c,{begin:/->/,endsWithParent:!0,keywords:n}] +},{className:"meta",begin:/^[\t ]*@/,end:/(?=#)|$/,contains:[b,c,t]}]}}})()); +hljs.registerLanguage("python-repl",(()=>{"use strict";return s=>({ +aliases:["pycon"],contains:[{className:"meta",starts:{end:/ |$/,starts:{end:"$", +subLanguage:"python"}},variants:[{begin:/^>>>(?=[ ]|$)/},{ +begin:/^\.\.\.(?=[ ]|$)/}]}]})})()); +hljs.registerLanguage("r",(()=>{"use strict";function e(...e){return e.map((e=>{ +return(a=e)?"string"==typeof a?a:a.source:null;var a})).join("")}return a=>{ +const n=/(?:(?:[a-zA-Z]|\.[._a-zA-Z])[._a-zA-Z0-9]*)|\.(?!\d)/;return{name:"R", +illegal:/->/,keywords:{$pattern:n, +keyword:"function if in break next repeat else for while", +literal:"NULL NA TRUE FALSE Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10", +built_in:"LETTERS letters month.abb month.name pi T F abs acos acosh all any anyNA Arg as.call as.character as.complex as.double as.environment as.integer as.logical as.null.default as.numeric as.raw asin asinh atan atanh attr attributes baseenv browser c call ceiling class Conj cos cosh cospi cummax cummin cumprod cumsum digamma dim dimnames emptyenv exp expression floor forceAndCall gamma gc.time globalenv Im interactive invisible is.array is.atomic is.call is.character is.complex is.double is.environment is.expression is.finite is.function is.infinite is.integer is.language is.list is.logical is.matrix is.na is.name is.nan is.null is.numeric is.object is.pairlist is.raw is.recursive is.single is.symbol lazyLoadDBfetch length lgamma list log max min missing Mod names nargs nzchar oldClass on.exit pos.to.env proc.time prod quote range Re rep retracemem return round seq_along seq_len seq.int sign signif sin sinh sinpi sqrt standardGeneric substitute sum switch tan tanh tanpi tracemem trigamma trunc unclass untracemem UseMethod xtfrm" +},compilerExtensions:[(a,n)=>{if(!a.beforeMatch)return +;if(a.starts)throw Error("beforeMatch cannot be used with starts") +;const i=Object.assign({},a);Object.keys(a).forEach((e=>{delete a[e] +})),a.begin=e(i.beforeMatch,e("(?=",i.begin,")")),a.starts={relevance:0, +contains:[Object.assign(i,{endsParent:!0})]},a.relevance=0,delete i.beforeMatch +}],contains:[a.COMMENT(/#'/,/$/,{contains:[{className:"doctag", +begin:"@examples",starts:{contains:[{begin:/\n/},{begin:/#'\s*(?=@[a-zA-Z]+)/, +endsParent:!0},{begin:/#'/,end:/$/,excludeBegin:!0}]}},{className:"doctag", +begin:"@param",end:/$/,contains:[{className:"variable",variants:[{begin:n},{ +begin:/`(?:\\.|[^`\\])+`/}],endsParent:!0}]},{className:"doctag", +begin:/@[a-zA-Z]+/},{className:"meta-keyword",begin:/\\[a-zA-Z]+/}] +}),a.HASH_COMMENT_MODE,{className:"string",contains:[a.BACKSLASH_ESCAPE], +variants:[a.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\(/,end:/\)(-*)"/ +}),a.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\{/,end:/\}(-*)"/ +}),a.END_SAME_AS_BEGIN({begin:/[rR]"(-*)\[/,end:/\](-*)"/ +}),a.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\(/,end:/\)(-*)'/ +}),a.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\{/,end:/\}(-*)'/ +}),a.END_SAME_AS_BEGIN({begin:/[rR]'(-*)\[/,end:/\](-*)'/}),{begin:'"',end:'"', +relevance:0},{begin:"'",end:"'",relevance:0}]},{className:"number",relevance:0, +beforeMatch:/([^a-zA-Z0-9._])/,variants:[{ +match:/0[xX][0-9a-fA-F]+\.[0-9a-fA-F]*[pP][+-]?\d+i?/},{ +match:/0[xX][0-9a-fA-F]+([pP][+-]?\d+)?[Li]?/},{ +match:/(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?[Li]?/}]},{begin:"%",end:"%"},{ +begin:e(/[a-zA-Z][a-zA-Z_0-9]*/,"\\s+<-\\s+")},{begin:"`",end:"`",contains:[{ +begin:/\\./}]}]}}})()); +hljs.registerLanguage("ruby",(()=>{"use strict";function e(...e){ +return e.map((e=>{return(n=e)?"string"==typeof n?n:n.source:null;var n +})).join("")}return n=>{ +const a="([a-zA-Z_]\\w*[!?=]?|[-+~]@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?)",i={ +keyword:"and then defined module in return redo if BEGIN retry end for self when next until do begin unless END rescue else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor __FILE__", +built_in:"proc lambda",literal:"true false nil"},s={className:"doctag", +begin:"@[A-Za-z]+"},r={begin:"#<",end:">"},b=[n.COMMENT("#","$",{contains:[s] +}),n.COMMENT("^=begin","^=end",{contains:[s],relevance:10 +}),n.COMMENT("^__END__","\\n$")],c={className:"subst",begin:/#\{/,end:/\}/, +keywords:i},t={className:"string",contains:[n.BACKSLASH_ESCAPE,c],variants:[{ +begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/`/,end:/`/},{begin:/%[qQwWx]?\(/, +end:/\)/},{begin:/%[qQwWx]?\[/,end:/\]/},{begin:/%[qQwWx]?\{/,end:/\}/},{ +begin:/%[qQwWx]?/},{begin:/%[qQwWx]?\//,end:/\//},{begin:/%[qQwWx]?%/, +end:/%/},{begin:/%[qQwWx]?-/,end:/-/},{begin:/%[qQwWx]?\|/,end:/\|/},{ +begin:/\B\?(\\\d{1,3})/},{begin:/\B\?(\\x[A-Fa-f0-9]{1,2})/},{ +begin:/\B\?(\\u\{?[A-Fa-f0-9]{1,6}\}?)/},{ +begin:/\B\?(\\M-\\C-|\\M-\\c|\\c\\M-|\\M-|\\C-\\M-)[\x20-\x7e]/},{ +begin:/\B\?\\(c|C-)[\x20-\x7e]/},{begin:/\B\?\\?\S/},{ +begin:/<<[-~]?'?(\w+)\n(?:[^\n]*\n)*?\s*\1\b/,returnBegin:!0,contains:[{ +begin:/<<[-~]?'?/},n.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/, +contains:[n.BACKSLASH_ESCAPE,c]})]}]},g="[0-9](_?[0-9])*",d={className:"number", +relevance:0,variants:[{ +begin:`\\b([1-9](_?[0-9])*|0)(\\.(${g}))?([eE][+-]?(${g})|r)?i?\\b`},{ +begin:"\\b0[dD][0-9](_?[0-9])*r?i?\\b"},{begin:"\\b0[bB][0-1](_?[0-1])*r?i?\\b" +},{begin:"\\b0[oO][0-7](_?[0-7])*r?i?\\b"},{ +begin:"\\b0[xX][0-9a-fA-F](_?[0-9a-fA-F])*r?i?\\b"},{ +begin:"\\b0(_?[0-7])+r?i?\\b"}]},l={className:"params",begin:"\\(",end:"\\)", +endsParent:!0,keywords:i},o=[t,{className:"class",beginKeywords:"class module", +end:"$|;",illegal:/=/,contains:[n.inherit(n.TITLE_MODE,{ +begin:"[A-Za-z_]\\w*(::\\w+)*(\\?|!)?"}),{begin:"<\\s*",contains:[{ +begin:"("+n.IDENT_RE+"::)?"+n.IDENT_RE,relevance:0}]}].concat(b)},{ +className:"function",begin:e(/def\s+/,(_=a+"\\s*(\\(|;|$)",e("(?=",_,")"))), +relevance:0,keywords:"def",end:"$|;",contains:[n.inherit(n.TITLE_MODE,{begin:a +}),l].concat(b)},{begin:n.IDENT_RE+"::"},{className:"symbol", +begin:n.UNDERSCORE_IDENT_RE+"(!|\\?)?:",relevance:0},{className:"symbol", +begin:":(?!\\s)",contains:[t,{begin:a}],relevance:0},d,{className:"variable", +begin:"(\\$\\W)|((\\$|@@?)(\\w+))(?=[^@$?])(?![A-Za-z])(?![@$?'])"},{ +className:"params",begin:/\|/,end:/\|/,relevance:0,keywords:i},{ +begin:"("+n.RE_STARTERS_RE+"|unless)\\s*",keywords:"unless",contains:[{ +className:"regexp",contains:[n.BACKSLASH_ESCAPE,c],illegal:/\n/,variants:[{ +begin:"/",end:"/[a-z]*"},{begin:/%r\{/,end:/\}[a-z]*/},{begin:"%r\\(", +end:"\\)[a-z]*"},{begin:"%r!",end:"![a-z]*"},{begin:"%r\\[",end:"\\][a-z]*"}] +}].concat(r,b),relevance:0}].concat(r,b);var _;c.contains=o,l.contains=o +;const E=[{begin:/^\s*=>/,starts:{end:"$",contains:o}},{className:"meta", +begin:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+>|(\\w+-)?\\d+\\.\\d+\\.\\d+(p\\d+)?[^\\d][^>]+>)(?=[ ])", +starts:{end:"$",contains:o}}];return b.unshift(r),{name:"Ruby", +aliases:["rb","gemspec","podspec","thor","irb"],keywords:i,illegal:/\/\*/, +contains:[n.SHEBANG({binary:"ruby"})].concat(E).concat(b).concat(o)}}})()); +hljs.registerLanguage("rust",(()=>{"use strict";return e=>{ +const n="([ui](8|16|32|64|128|size)|f(32|64))?",t="drop i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 str char bool Box Option Result String Vec Copy Send Sized Sync Drop Fn FnMut FnOnce ToOwned Clone Debug PartialEq PartialOrd Eq Ord AsRef AsMut Into From Default Iterator Extend IntoIterator DoubleEndedIterator ExactSizeIterator SliceConcatExt ToString assert! assert_eq! bitflags! bytes! cfg! col! concat! concat_idents! debug_assert! debug_assert_eq! env! panic! file! format! format_args! include_bin! include_str! line! local_data_key! module_path! option_env! print! println! select! stringify! try! unimplemented! unreachable! vec! write! writeln! macro_rules! assert_ne! debug_assert_ne!" +;return{name:"Rust",aliases:["rs"],keywords:{$pattern:e.IDENT_RE+"!?", +keyword:"abstract as async await become box break const continue crate do dyn else enum extern false final fn for if impl in let loop macro match mod move mut override priv pub ref return self Self static struct super trait true try type typeof unsafe unsized use virtual where while yield", +literal:"true false Some None Ok Err",built_in:t},illegal:""}]}}})()); +hljs.registerLanguage("scss",(()=>{"use strict" +;const e=["a","abbr","address","article","aside","audio","b","blockquote","body","button","canvas","caption","cite","code","dd","del","details","dfn","div","dl","dt","em","fieldset","figcaption","figure","footer","form","h1","h2","h3","h4","h5","h6","header","hgroup","html","i","iframe","img","input","ins","kbd","label","legend","li","main","mark","menu","nav","object","ol","p","q","quote","samp","section","span","strong","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","tr","ul","var","video"],t=["any-hover","any-pointer","aspect-ratio","color","color-gamut","color-index","device-aspect-ratio","device-height","device-width","display-mode","forced-colors","grid","height","hover","inverted-colors","monochrome","orientation","overflow-block","overflow-inline","pointer","prefers-color-scheme","prefers-contrast","prefers-reduced-motion","prefers-reduced-transparency","resolution","scan","scripting","update","width","min-width","max-width","min-height","max-height"],i=["active","any-link","blank","checked","current","default","defined","dir","disabled","drop","empty","enabled","first","first-child","first-of-type","fullscreen","future","focus","focus-visible","focus-within","has","host","host-context","hover","indeterminate","in-range","invalid","is","lang","last-child","last-of-type","left","link","local-link","not","nth-child","nth-col","nth-last-child","nth-last-col","nth-last-of-type","nth-of-type","only-child","only-of-type","optional","out-of-range","past","placeholder-shown","read-only","read-write","required","right","root","scope","target","target-within","user-invalid","valid","visited","where"],o=["after","backdrop","before","cue","cue-region","first-letter","first-line","grammar-error","marker","part","placeholder","selection","slotted","spelling-error"],r=["align-content","align-items","align-self","animation","animation-delay","animation-direction","animation-duration","animation-fill-mode","animation-iteration-count","animation-name","animation-play-state","animation-timing-function","auto","backface-visibility","background","background-attachment","background-clip","background-color","background-image","background-origin","background-position","background-repeat","background-size","border","border-bottom","border-bottom-color","border-bottom-left-radius","border-bottom-right-radius","border-bottom-style","border-bottom-width","border-collapse","border-color","border-image","border-image-outset","border-image-repeat","border-image-slice","border-image-source","border-image-width","border-left","border-left-color","border-left-style","border-left-width","border-radius","border-right","border-right-color","border-right-style","border-right-width","border-spacing","border-style","border-top","border-top-color","border-top-left-radius","border-top-right-radius","border-top-style","border-top-width","border-width","bottom","box-decoration-break","box-shadow","box-sizing","break-after","break-before","break-inside","caption-side","clear","clip","clip-path","color","column-count","column-fill","column-gap","column-rule","column-rule-color","column-rule-style","column-rule-width","column-span","column-width","columns","content","counter-increment","counter-reset","cursor","direction","display","empty-cells","filter","flex","flex-basis","flex-direction","flex-flow","flex-grow","flex-shrink","flex-wrap","float","font","font-display","font-family","font-feature-settings","font-kerning","font-language-override","font-size","font-size-adjust","font-smoothing","font-stretch","font-style","font-variant","font-variant-ligatures","font-variation-settings","font-weight","height","hyphens","icon","image-orientation","image-rendering","image-resolution","ime-mode","inherit","initial","justify-content","left","letter-spacing","line-height","list-style","list-style-image","list-style-position","list-style-type","margin","margin-bottom","margin-left","margin-right","margin-top","marks","mask","max-height","max-width","min-height","min-width","nav-down","nav-index","nav-left","nav-right","nav-up","none","normal","object-fit","object-position","opacity","order","orphans","outline","outline-color","outline-offset","outline-style","outline-width","overflow","overflow-wrap","overflow-x","overflow-y","padding","padding-bottom","padding-left","padding-right","padding-top","page-break-after","page-break-before","page-break-inside","perspective","perspective-origin","pointer-events","position","quotes","resize","right","src","tab-size","table-layout","text-align","text-align-last","text-decoration","text-decoration-color","text-decoration-line","text-decoration-style","text-indent","text-overflow","text-rendering","text-shadow","text-transform","text-underline-position","top","transform","transform-origin","transform-style","transition","transition-delay","transition-duration","transition-property","transition-timing-function","unicode-bidi","vertical-align","visibility","white-space","widows","width","word-break","word-spacing","word-wrap","z-index"].reverse() +;return a=>{const n=(e=>({IMPORTANT:{className:"meta",begin:"!important"}, +HEXCOLOR:{className:"number",begin:"#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})"}, +ATTRIBUTE_SELECTOR_MODE:{className:"selector-attr",begin:/\[/,end:/\]/, +illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]} +}))(a),l=o,s=i,d="@[a-z-]+",c={className:"variable", +begin:"(\\$[a-zA-Z-][a-zA-Z0-9_-]*)\\b"};return{name:"SCSS",case_insensitive:!0, +illegal:"[=/|']",contains:[a.C_LINE_COMMENT_MODE,a.C_BLOCK_COMMENT_MODE,{ +className:"selector-id",begin:"#[A-Za-z0-9_-]+",relevance:0},{ +className:"selector-class",begin:"\\.[A-Za-z0-9_-]+",relevance:0 +},n.ATTRIBUTE_SELECTOR_MODE,{className:"selector-tag", +begin:"\\b("+e.join("|")+")\\b",relevance:0},{className:"selector-pseudo", +begin:":("+s.join("|")+")"},{className:"selector-pseudo", +begin:"::("+l.join("|")+")"},c,{begin:/\(/,end:/\)/,contains:[a.CSS_NUMBER_MODE] +},{className:"attribute",begin:"\\b("+r.join("|")+")\\b"},{ +begin:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b" +},{begin:":",end:";", +contains:[c,n.HEXCOLOR,a.CSS_NUMBER_MODE,a.QUOTE_STRING_MODE,a.APOS_STRING_MODE,n.IMPORTANT] +},{begin:"@(page|font-face)",lexemes:d,keywords:"@page @font-face"},{begin:"@", +end:"[{;]",returnBegin:!0,keywords:{$pattern:/[a-z-]+/, +keyword:"and or not only",attribute:t.join(" ")},contains:[{begin:d, +className:"keyword"},{begin:/[a-z-]+(?=:)/,className:"attribute" +},c,a.QUOTE_STRING_MODE,a.APOS_STRING_MODE,n.HEXCOLOR,a.CSS_NUMBER_MODE]}]}} +})()); +hljs.registerLanguage("shell",(()=>{"use strict";return s=>({ +name:"Shell Session",aliases:["console"],contains:[{className:"meta", +begin:/^\s{0,3}[/~\w\d[\]()@-]*[>%$#]/,starts:{end:/[^\\](?=\s*$)/, +subLanguage:"bash"}}]})})()); +hljs.registerLanguage("sql",(()=>{"use strict";function e(e){ +return e?"string"==typeof e?e:e.source:null}function r(...r){ +return r.map((r=>e(r))).join("")}function t(...r){ +return"("+r.map((r=>e(r))).join("|")+")"}return e=>{ +const n=e.COMMENT("--","$"),a=["true","false","unknown"],i=["bigint","binary","blob","boolean","char","character","clob","date","dec","decfloat","decimal","float","int","integer","interval","nchar","nclob","national","numeric","real","row","smallint","time","timestamp","varchar","varying","varbinary"],s=["abs","acos","array_agg","asin","atan","avg","cast","ceil","ceiling","coalesce","corr","cos","cosh","count","covar_pop","covar_samp","cume_dist","dense_rank","deref","element","exp","extract","first_value","floor","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","last_value","lead","listagg","ln","log","log10","lower","max","min","mod","nth_value","ntile","nullif","percent_rank","percentile_cont","percentile_disc","position","position_regex","power","rank","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","row_number","sin","sinh","sqrt","stddev_pop","stddev_samp","substring","substring_regex","sum","tan","tanh","translate","translate_regex","treat","trim","trim_array","unnest","upper","value_of","var_pop","var_samp","width_bucket"],o=["create table","insert into","primary key","foreign key","not null","alter table","add constraint","grouping sets","on overflow","character set","respect nulls","ignore nulls","nulls first","nulls last","depth first","breadth first"],c=s,l=["abs","acos","all","allocate","alter","and","any","are","array","array_agg","array_max_cardinality","as","asensitive","asin","asymmetric","at","atan","atomic","authorization","avg","begin","begin_frame","begin_partition","between","bigint","binary","blob","boolean","both","by","call","called","cardinality","cascaded","case","cast","ceil","ceiling","char","char_length","character","character_length","check","classifier","clob","close","coalesce","collate","collect","column","commit","condition","connect","constraint","contains","convert","copy","corr","corresponding","cos","cosh","count","covar_pop","covar_samp","create","cross","cube","cume_dist","current","current_catalog","current_date","current_default_transform_group","current_path","current_role","current_row","current_schema","current_time","current_timestamp","current_path","current_role","current_transform_group_for_type","current_user","cursor","cycle","date","day","deallocate","dec","decimal","decfloat","declare","default","define","delete","dense_rank","deref","describe","deterministic","disconnect","distinct","double","drop","dynamic","each","element","else","empty","end","end_frame","end_partition","end-exec","equals","escape","every","except","exec","execute","exists","exp","external","extract","false","fetch","filter","first_value","float","floor","for","foreign","frame_row","free","from","full","function","fusion","get","global","grant","group","grouping","groups","having","hold","hour","identity","in","indicator","initial","inner","inout","insensitive","insert","int","integer","intersect","intersection","interval","into","is","join","json_array","json_arrayagg","json_exists","json_object","json_objectagg","json_query","json_table","json_table_primitive","json_value","lag","language","large","last_value","lateral","lead","leading","left","like","like_regex","listagg","ln","local","localtime","localtimestamp","log","log10","lower","match","match_number","match_recognize","matches","max","member","merge","method","min","minute","mod","modifies","module","month","multiset","national","natural","nchar","nclob","new","no","none","normalize","not","nth_value","ntile","null","nullif","numeric","octet_length","occurrences_regex","of","offset","old","omit","on","one","only","open","or","order","out","outer","over","overlaps","overlay","parameter","partition","pattern","per","percent","percent_rank","percentile_cont","percentile_disc","period","portion","position","position_regex","power","precedes","precision","prepare","primary","procedure","ptf","range","rank","reads","real","recursive","ref","references","referencing","regr_avgx","regr_avgy","regr_count","regr_intercept","regr_r2","regr_slope","regr_sxx","regr_sxy","regr_syy","release","result","return","returns","revoke","right","rollback","rollup","row","row_number","rows","running","savepoint","scope","scroll","search","second","seek","select","sensitive","session_user","set","show","similar","sin","sinh","skip","smallint","some","specific","specifictype","sql","sqlexception","sqlstate","sqlwarning","sqrt","start","static","stddev_pop","stddev_samp","submultiset","subset","substring","substring_regex","succeeds","sum","symmetric","system","system_time","system_user","table","tablesample","tan","tanh","then","time","timestamp","timezone_hour","timezone_minute","to","trailing","translate","translate_regex","translation","treat","trigger","trim","trim_array","true","truncate","uescape","union","unique","unknown","unnest","update ","upper","user","using","value","values","value_of","var_pop","var_samp","varbinary","varchar","varying","versioning","when","whenever","where","width_bucket","window","with","within","without","year","add","asc","collation","desc","final","first","last","view"].filter((e=>!s.includes(e))),u={ +begin:r(/\b/,t(...c),/\s*\(/),keywords:{built_in:c}};return{name:"SQL", +case_insensitive:!0,illegal:/[{}]|<\//,keywords:{$pattern:/\b[\w\.]+/, +keyword:((e,{exceptions:r,when:t}={})=>{const n=t +;return r=r||[],e.map((e=>e.match(/\|\d+$/)||r.includes(e)?e:n(e)?e+"|0":e)) +})(l,{when:e=>e.length<3}),literal:a,type:i, +built_in:["current_catalog","current_date","current_default_transform_group","current_path","current_role","current_schema","current_transform_group_for_type","current_user","session_user","system_time","system_user","current_time","localtime","current_timestamp","localtimestamp"] +},contains:[{begin:t(...o),keywords:{$pattern:/[\w\.]+/,keyword:l.concat(o), +literal:a,type:i}},{className:"type", +begin:t("double precision","large object","with timezone","without timezone") +},u,{className:"variable",begin:/@[a-z0-9]+/},{className:"string",variants:[{ +begin:/'/,end:/'/,contains:[{begin:/''/}]}]},{begin:/"/,end:/"/,contains:[{ +begin:/""/}]},e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE,n,{className:"operator", +begin:/[-+*/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?/,relevance:0}]}}})()); +hljs.registerLanguage("swift",(()=>{"use strict";function e(e){ +return e?"string"==typeof e?e:e.source:null}function n(e){return a("(?=",e,")")} +function a(...n){return n.map((n=>e(n))).join("")}function t(...n){ +return"("+n.map((n=>e(n))).join("|")+")"} +const i=e=>a(/\b/,e,/\w$/.test(e)?/\b/:/\B/),s=["Protocol","Type"].map(i),u=["init","self"].map(i),c=["Any","Self"],r=["associatedtype","async","await",/as\?/,/as!/,"as","break","case","catch","class","continue","convenience","default","defer","deinit","didSet","do","dynamic","else","enum","extension","fallthrough",/fileprivate\(set\)/,"fileprivate","final","for","func","get","guard","if","import","indirect","infix",/init\?/,/init!/,"inout",/internal\(set\)/,"internal","in","is","lazy","let","mutating","nonmutating",/open\(set\)/,"open","operator","optional","override","postfix","precedencegroup","prefix",/private\(set\)/,"private","protocol",/public\(set\)/,"public","repeat","required","rethrows","return","set","some","static","struct","subscript","super","switch","throws","throw",/try\?/,/try!/,"try","typealias",/unowned\(safe\)/,/unowned\(unsafe\)/,"unowned","var","weak","where","while","willSet"],o=["false","nil","true"],l=["assignment","associativity","higherThan","left","lowerThan","none","right"],m=["#colorLiteral","#column","#dsohandle","#else","#elseif","#endif","#error","#file","#fileID","#fileLiteral","#filePath","#function","#if","#imageLiteral","#keyPath","#line","#selector","#sourceLocation","#warn_unqualified_access","#warning"],d=["abs","all","any","assert","assertionFailure","debugPrint","dump","fatalError","getVaList","isKnownUniquelyReferenced","max","min","numericCast","pointwiseMax","pointwiseMin","precondition","preconditionFailure","print","readLine","repeatElement","sequence","stride","swap","swift_unboxFromSwiftValueWithType","transcode","type","unsafeBitCast","unsafeDowncast","withExtendedLifetime","withUnsafeMutablePointer","withUnsafePointer","withVaList","withoutActuallyEscaping","zip"],p=t(/[/=\-+!*%<>&|^~?]/,/[\u00A1-\u00A7]/,/[\u00A9\u00AB]/,/[\u00AC\u00AE]/,/[\u00B0\u00B1]/,/[\u00B6\u00BB\u00BF\u00D7\u00F7]/,/[\u2016-\u2017]/,/[\u2020-\u2027]/,/[\u2030-\u203E]/,/[\u2041-\u2053]/,/[\u2055-\u205E]/,/[\u2190-\u23FF]/,/[\u2500-\u2775]/,/[\u2794-\u2BFF]/,/[\u2E00-\u2E7F]/,/[\u3001-\u3003]/,/[\u3008-\u3020]/,/[\u3030]/),F=t(p,/[\u0300-\u036F]/,/[\u1DC0-\u1DFF]/,/[\u20D0-\u20FF]/,/[\uFE00-\uFE0F]/,/[\uFE20-\uFE2F]/),b=a(p,F,"*"),h=t(/[a-zA-Z_]/,/[\u00A8\u00AA\u00AD\u00AF\u00B2-\u00B5\u00B7-\u00BA]/,/[\u00BC-\u00BE\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF]/,/[\u0100-\u02FF\u0370-\u167F\u1681-\u180D\u180F-\u1DBF]/,/[\u1E00-\u1FFF]/,/[\u200B-\u200D\u202A-\u202E\u203F-\u2040\u2054\u2060-\u206F]/,/[\u2070-\u20CF\u2100-\u218F\u2460-\u24FF\u2776-\u2793]/,/[\u2C00-\u2DFF\u2E80-\u2FFF]/,/[\u3004-\u3007\u3021-\u302F\u3031-\u303F\u3040-\uD7FF]/,/[\uF900-\uFD3D\uFD40-\uFDCF\uFDF0-\uFE1F\uFE30-\uFE44]/,/[\uFE47-\uFEFE\uFF00-\uFFFD]/),f=t(h,/\d/,/[\u0300-\u036F\u1DC0-\u1DFF\u20D0-\u20FF\uFE20-\uFE2F]/),w=a(h,f,"*"),y=a(/[A-Z]/,f,"*"),g=["autoclosure",a(/convention\(/,t("swift","block","c"),/\)/),"discardableResult","dynamicCallable","dynamicMemberLookup","escaping","frozen","GKInspectable","IBAction","IBDesignable","IBInspectable","IBOutlet","IBSegueAction","inlinable","main","nonobjc","NSApplicationMain","NSCopying","NSManaged",a(/objc\(/,w,/\)/),"objc","objcMembers","propertyWrapper","requires_stored_property_inits","testable","UIApplicationMain","unknown","usableFromInline"],E=["iOS","iOSApplicationExtension","macOS","macOSApplicationExtension","macCatalyst","macCatalystApplicationExtension","watchOS","watchOSApplicationExtension","tvOS","tvOSApplicationExtension","swift"] +;return e=>{const p={match:/\s+/,relevance:0},h=e.COMMENT("/\\*","\\*/",{ +contains:["self"]}),v=[e.C_LINE_COMMENT_MODE,h],N={className:"keyword", +begin:a(/\./,n(t(...s,...u))),end:t(...s,...u),excludeBegin:!0},A={ +match:a(/\./,t(...r)),relevance:0 +},C=r.filter((e=>"string"==typeof e)).concat(["_|0"]),_={variants:[{ +className:"keyword", +match:t(...r.filter((e=>"string"!=typeof e)).concat(c).map(i),...u)}]},D={ +$pattern:t(/\b\w+/,/#\w+/),keyword:C.concat(m),literal:o},B=[N,A,_],k=[{ +match:a(/\./,t(...d)),relevance:0},{className:"built_in", +match:a(/\b/,t(...d),/(?=\()/)}],M={match:/->/,relevance:0},S=[M,{ +className:"operator",relevance:0,variants:[{match:b},{match:`\\.(\\.|${F})+`}] +}],x="([0-9a-fA-F]_*)+",I={className:"number",relevance:0,variants:[{ +match:"\\b(([0-9]_*)+)(\\.(([0-9]_*)+))?([eE][+-]?(([0-9]_*)+))?\\b"},{ +match:`\\b0x(${x})(\\.(${x}))?([pP][+-]?(([0-9]_*)+))?\\b`},{ +match:/\b0o([0-7]_*)+\b/},{match:/\b0b([01]_*)+\b/}]},O=(e="")=>({ +className:"subst",variants:[{match:a(/\\/,e,/[0\\tnr"']/)},{ +match:a(/\\/,e,/u\{[0-9a-fA-F]{1,8}\}/)}]}),T=(e="")=>({className:"subst", +match:a(/\\/,e,/[\t ]*(?:[\r\n]|\r\n)/)}),L=(e="")=>({className:"subst", +label:"interpol",begin:a(/\\/,e,/\(/),end:/\)/}),P=(e="")=>({begin:a(e,/"""/), +end:a(/"""/,e),contains:[O(e),T(e),L(e)]}),$=(e="")=>({begin:a(e,/"/), +end:a(/"/,e),contains:[O(e),L(e)]}),K={className:"string", +variants:[P(),P("#"),P("##"),P("###"),$(),$("#"),$("##"),$("###")]},j={ +match:a(/`/,w,/`/)},z=[j,{className:"variable",match:/\$\d+/},{ +className:"variable",match:`\\$${f}+`}],q=[{match:/(@|#)available/, +className:"keyword",starts:{contains:[{begin:/\(/,end:/\)/,keywords:E, +contains:[...S,I,K]}]}},{className:"keyword",match:a(/@/,t(...g))},{ +className:"meta",match:a(/@/,w)}],U={match:n(/\b[A-Z]/),relevance:0,contains:[{ +className:"type", +match:a(/(AV|CA|CF|CG|CI|CL|CM|CN|CT|MK|MP|MTK|MTL|NS|SCN|SK|UI|WK|XC)/,f,"+") +},{className:"type",match:y,relevance:0},{match:/[?!]+/,relevance:0},{ +match:/\.\.\./,relevance:0},{match:a(/\s+&\s+/,n(y)),relevance:0}]},Z={ +begin://,keywords:D,contains:[...v,...B,...q,M,U]};U.contains.push(Z) +;const G={begin:/\(/,end:/\)/,relevance:0,keywords:D,contains:["self",{ +match:a(w,/\s*:/),keywords:"_|0",relevance:0 +},...v,...B,...k,...S,I,K,...z,...q,U]},H={beginKeywords:"func",contains:[{ +className:"title",match:t(j.match,w,b),endsParent:!0,relevance:0},p]},R={ +begin://,contains:[...v,U]},V={begin:/\(/,end:/\)/,keywords:D, +contains:[{begin:t(n(a(w,/\s*:/)),n(a(w,/\s+/,w,/\s*:/))),end:/:/,relevance:0, +contains:[{className:"keyword",match:/\b_\b/},{className:"params",match:w}] +},...v,...B,...S,I,K,...q,U,G],endsParent:!0,illegal:/["']/},W={ +className:"function",match:n(/\bfunc\b/),contains:[H,R,V,p],illegal:[/\[/,/%/] +},X={className:"function",match:/\b(subscript|init[?!]?)\s*(?=[<(])/,keywords:{ +keyword:"subscript init init? init!",$pattern:/\w+[?!]?/},contains:[R,V,p], +illegal:/\[|%/},J={beginKeywords:"operator",end:e.MATCH_NOTHING_RE,contains:[{ +className:"title",match:b,endsParent:!0,relevance:0}]},Q={ +beginKeywords:"precedencegroup",end:e.MATCH_NOTHING_RE,contains:[{ +className:"title",match:y,relevance:0},{begin:/{/,end:/}/,relevance:0, +endsParent:!0,keywords:[...l,...o],contains:[U]}]};for(const e of K.variants){ +const n=e.contains.find((e=>"interpol"===e.label));n.keywords=D +;const a=[...B,...k,...S,I,K,...z];n.contains=[...a,{begin:/\(/,end:/\)/, +contains:["self",...a]}]}return{name:"Swift",keywords:D,contains:[...v,W,X,{ +className:"class",beginKeywords:"struct protocol class extension enum", +end:"\\{",excludeEnd:!0,keywords:D,contains:[e.inherit(e.TITLE_MODE,{ +begin:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/}),...B]},J,Q,{ +beginKeywords:"import",end:/$/,contains:[...v],relevance:0 +},...B,...k,...S,I,K,...z,...q,U,G]}}})()); +hljs.registerLanguage("typescript",(()=>{"use strict" +;const e="[A-Za-z$_][0-9A-Za-z$_]*",n=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],a=["true","false","null","undefined","NaN","Infinity"],s=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer","BigInt64Array","BigUint64Array","BigInt"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]) +;function t(e){return r("(?=",e,")")}function r(...e){return e.map((e=>{ +return(n=e)?"string"==typeof n?n:n.source:null;var n})).join("")}return i=>{ +const c={$pattern:e, +keyword:n.concat(["type","namespace","typedef","interface","public","private","protected","implements","declare","abstract","readonly"]), +literal:a, +built_in:s.concat(["any","void","number","boolean","string","object","never","enum"]) +},o={className:"meta",begin:"@[A-Za-z$_][0-9A-Za-z$_]*"},l=(e,n,a)=>{ +const s=e.contains.findIndex((e=>e.label===n)) +;if(-1===s)throw Error("can not find mode to replace");e.contains.splice(s,1,a) +},b=(i=>{const c=e,o={begin:/<[A-Za-z0-9\\._:-]+/, +end:/\/[A-Za-z0-9\\._:-]+>|\/>/,isTrulyOpeningTag:(e,n)=>{ +const a=e[0].length+e.index,s=e.input[a];"<"!==s?">"===s&&(((e,{after:n})=>{ +const a="", +returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{ +begin:i.UNDERSCORE_IDENT_RE,relevance:0},{className:null,begin:/\(\s*\)/,skip:!0 +},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:l,contains:f}]}] +},{begin:/,/,relevance:0},{className:"",begin:/\s/,end:/\s*/,skip:!0},{ +variants:[{begin:"<>",end:""},{begin:o.begin,"on:begin":o.isTrulyOpeningTag, +end:o.end}],subLanguage:"xml",contains:[{begin:o.begin,end:o.end,skip:!0, +contains:["self"]}]}],relevance:0},{className:"function", +beginKeywords:"function",end:/[{;]/,excludeEnd:!0,keywords:l, +contains:["self",i.inherit(i.TITLE_MODE,{begin:c}),A],illegal:/%/},{ +beginKeywords:"while if switch catch for"},{className:"function", +begin:i.UNDERSCORE_IDENT_RE+"\\([^()]*(\\([^()]*(\\([^()]*\\)[^()]*)*\\)[^()]*)*\\)\\s*\\{", +returnBegin:!0,contains:[A,i.inherit(i.TITLE_MODE,{begin:c})]},{variants:[{ +begin:"\\."+c},{begin:"\\$"+c}],relevance:0},{className:"class", +beginKeywords:"class",end:/[{;=]/,excludeEnd:!0,illegal:/[:"[\]]/,contains:[{ +beginKeywords:"extends"},i.UNDERSCORE_TITLE_MODE]},{begin:/\b(?=constructor)/, +end:/[{;]/,excludeEnd:!0,contains:[i.inherit(i.TITLE_MODE,{begin:c}),"self",A] +},{begin:"(get|set)\\s+(?="+c+"\\()",end:/\{/,keywords:"get set", +contains:[i.inherit(i.TITLE_MODE,{begin:c}),{begin:/\(\)/},A]},{begin:/\$[(.]/}] +}})(i) +;return Object.assign(b.keywords,c),b.exports.PARAMS_CONTAINS.push(o),b.contains=b.contains.concat([o,{ +beginKeywords:"namespace",end:/\{/,excludeEnd:!0},{beginKeywords:"interface", +end:/\{/,excludeEnd:!0,keywords:"interface extends" +}]),l(b,"shebang",i.SHEBANG()),l(b,"use_strict",{className:"meta",relevance:10, +begin:/^\s*['"]use strict['"]/ +}),b.contains.find((e=>"function"===e.className)).relevance=0,Object.assign(b,{ +name:"TypeScript",aliases:["ts","tsx"]}),b}})()); +hljs.registerLanguage("vbnet",(()=>{"use strict";function e(e){ +return e?"string"==typeof e?e:e.source:null}function n(...n){ +return n.map((n=>e(n))).join("")}function t(...n){ +return"("+n.map((n=>e(n))).join("|")+")"}return e=>{ +const a=/\d{1,2}\/\d{1,2}\/\d{4}/,i=/\d{4}-\d{1,2}-\d{1,2}/,s=/(\d|1[012])(:\d+){0,2} *(AM|PM)/,r=/\d{1,2}(:\d{1,2}){1,2}/,o={ +className:"literal",variants:[{begin:n(/# */,t(i,a),/ *#/)},{ +begin:n(/# */,r,/ *#/)},{begin:n(/# */,s,/ *#/)},{ +begin:n(/# */,t(i,a),/ +/,t(s,r),/ *#/)}]},l=e.COMMENT(/'''/,/$/,{contains:[{ +className:"doctag",begin:/<\/?/,end:/>/}]}),c=e.COMMENT(null,/$/,{variants:[{ +begin:/'/},{begin:/([\t ]|^)REM(?=\s)/}]});return{name:"Visual Basic .NET", +aliases:["vb"],case_insensitive:!0,classNameAliases:{label:"symbol"},keywords:{ +keyword:"addhandler alias aggregate ansi as async assembly auto binary by byref byval call case catch class compare const continue custom declare default delegate dim distinct do each equals else elseif end enum erase error event exit explicit finally for friend from function get global goto group handles if implements imports in inherits interface into iterator join key let lib loop me mid module mustinherit mustoverride mybase myclass namespace narrowing new next notinheritable notoverridable of off on operator option optional order overloads overridable overrides paramarray partial preserve private property protected public raiseevent readonly redim removehandler resume return select set shadows shared skip static step stop structure strict sub synclock take text then throw to try unicode until using when where while widening with withevents writeonly yield", +built_in:"addressof and andalso await directcast gettype getxmlnamespace is isfalse isnot istrue like mod nameof new not or orelse trycast typeof xor cbool cbyte cchar cdate cdbl cdec cint clng cobj csbyte cshort csng cstr cuint culng cushort", +type:"boolean byte char date decimal double integer long object sbyte short single string uinteger ulong ushort", +literal:"true false nothing"}, +illegal:"//|\\{|\\}|endif|gosub|variant|wend|^\\$ ",contains:[{ +className:"string",begin:/"(""|[^/n])"C\b/},{className:"string",begin:/"/, +end:/"/,illegal:/\n/,contains:[{begin:/""/}]},o,{className:"number",relevance:0, +variants:[{begin:/\b\d[\d_]*((\.[\d_]+(E[+-]?[\d_]+)?)|(E[+-]?[\d_]+))[RFD@!#]?/ +},{begin:/\b\d[\d_]*((U?[SIL])|[%&])?/},{begin:/&H[\dA-F_]+((U?[SIL])|[%&])?/},{ +begin:/&O[0-7_]+((U?[SIL])|[%&])?/},{begin:/&B[01_]+((U?[SIL])|[%&])?/}]},{ +className:"label",begin:/^\w+:/},l,c,{className:"meta", +begin:/[\t ]*#(const|disable|else|elseif|enable|end|externalsource|if|region)\b/, +end:/$/,keywords:{ +"meta-keyword":"const disable else elseif enable end externalsource if region then" +},contains:[c]}]}}})()); +hljs.registerLanguage("yaml",(()=>{"use strict";return e=>{ +var n="true false yes no null",a="[\\w#;/?:@&=+$,.~*'()[\\]]+",s={ +className:"string",relevance:0,variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/ +},{begin:/\S+/}],contains:[e.BACKSLASH_ESCAPE,{className:"template-variable", +variants:[{begin:/\{\{/,end:/\}\}/},{begin:/%\{/,end:/\}/}]}]},i=e.inherit(s,{ +variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/[^\s,{}[\]]+/}]}),l={ +end:",",endsWithParent:!0,excludeEnd:!0,keywords:n,relevance:0},t={begin:/\{/, +end:/\}/,contains:[l],illegal:"\\n",relevance:0},g={begin:"\\[",end:"\\]", +contains:[l],illegal:"\\n",relevance:0},b=[{className:"attr",variants:[{ +begin:"\\w[\\w :\\/.-]*:(?=[ \t]|$)"},{begin:'"\\w[\\w :\\/.-]*":(?=[ \t]|$)'},{ +begin:"'\\w[\\w :\\/.-]*':(?=[ \t]|$)"}]},{className:"meta",begin:"^---\\s*$", +relevance:10},{className:"string", +begin:"[\\|>]([1-9]?[+-])?[ ]*\\n( +)[^ ][^\\n]*\\n(\\2[^\\n]+\\n?)*"},{ +begin:"<%[%=-]?",end:"[%-]?%>",subLanguage:"ruby",excludeBegin:!0,excludeEnd:!0, +relevance:0},{className:"type",begin:"!\\w+!"+a},{className:"type", +begin:"!<"+a+">"},{className:"type",begin:"!"+a},{className:"type",begin:"!!"+a +},{className:"meta",begin:"&"+e.UNDERSCORE_IDENT_RE+"$"},{className:"meta", +begin:"\\*"+e.UNDERSCORE_IDENT_RE+"$"},{className:"bullet",begin:"-(?=[ ]|$)", +relevance:0},e.HASH_COMMENT_MODE,{beginKeywords:n,keywords:{literal:n}},{ +className:"number", +begin:"\\b[0-9]{4}(-[0-9][0-9]){0,2}([Tt \\t][0-9][0-9]?(:[0-9][0-9]){2})?(\\.[0-9]*)?([ \\t])*(Z|[-+][0-9][0-9]?(:[0-9][0-9])?)?\\b" +},{className:"number",begin:e.C_NUMBER_RE+"\\b",relevance:0},t,g,s],r=[...b] +;return r.pop(),r.push(i),l.contains=r,{name:"YAML",case_insensitive:!0, +aliases:["yml"],contains:b}}})()); \ No newline at end of file diff --git a/htmlReport/js/highlightjs-line-numbers.min.js b/htmlReport/js/highlightjs-line-numbers.min.js new file mode 100644 index 0000000000000..85485767040ef --- /dev/null +++ b/htmlReport/js/highlightjs-line-numbers.min.js @@ -0,0 +1,24 @@ +/* +The MIT License (MIT) + +Copyright (c) 2017 Yauheni Pakala + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + */ +!function(r,o){"use strict";var e,i="hljs-ln",l="hljs-ln-line",h="hljs-ln-code",s="hljs-ln-numbers",c="hljs-ln-n",m="data-line-number",a=/\r\n|\r|\n/g;function u(e){for(var n=e.toString(),t=e.anchorNode;"TD"!==t.nodeName;)t=t.parentNode;for(var r=e.focusNode;"TD"!==r.nodeName;)r=r.parentNode;var o=parseInt(t.dataset.lineNumber),a=parseInt(r.dataset.lineNumber);if(o==a)return n;var i,l=t.textContent,s=r.textContent;for(a
{6}',[l,s,c,m,h,o+n.startFrom,0{1}',[i,r])}return e}(e.innerHTML,o)}function v(e){var n=e.className;if(/hljs-/.test(n)){for(var t=g(e.innerHTML),r=0,o="";r{1}
\n',[n,0 + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_BLOCK.html b/htmlReport/ns-1/index_SORT_BY_BLOCK.html new file mode 100644 index 0000000000000..9b949f12a2a52 --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_BLOCK.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_BLOCK_DESC.html b/htmlReport/ns-1/index_SORT_BY_BLOCK_DESC.html new file mode 100644 index 0000000000000..30a5dfc619e05 --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_BLOCK_DESC.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_CLASS.html b/htmlReport/ns-1/index_SORT_BY_CLASS.html new file mode 100644 index 0000000000000..ed2baf2c72e50 --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_CLASS.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_CLASS_DESC.html b/htmlReport/ns-1/index_SORT_BY_CLASS_DESC.html new file mode 100644 index 0000000000000..bcd1ff0c60e1c --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_CLASS_DESC.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_LINE.html b/htmlReport/ns-1/index_SORT_BY_LINE.html new file mode 100644 index 0000000000000..b9c5188246fee --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_LINE.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_LINE_DESC.html b/htmlReport/ns-1/index_SORT_BY_LINE_DESC.html new file mode 100644 index 0000000000000..2d31d68f4b948 --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_LINE_DESC.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_METHOD.html b/htmlReport/ns-1/index_SORT_BY_METHOD.html new file mode 100644 index 0000000000000..9aaa3d489104c --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_METHOD.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_METHOD_DESC.html b/htmlReport/ns-1/index_SORT_BY_METHOD_DESC.html new file mode 100644 index 0000000000000..1866bece5bed6 --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_METHOD_DESC.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/index_SORT_BY_NAME_DESC.html b/htmlReport/ns-1/index_SORT_BY_NAME_DESC.html new file mode 100644 index 0000000000000..424b4c5549b72 --- /dev/null +++ b/htmlReport/ns-1/index_SORT_BY_NAME_DESC.html @@ -0,0 +1,1601 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms + + 27.3% + + + (47/172) + + + + 14% + + + (164/1175) + + + + 15.5% + + + (607/3916) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
UnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
UnmappedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
UnmappedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
UnmappedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
TermsAggregatorFactory + + 85.7% + + + (6/7) + + + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
TermsAggregator + + 66.7% + + + (2/3) + + + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
StringTerms + + 100% + + + (2/2) + + + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
StringRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
StringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
SignificantTextAggregatorFactory + + 0% + + + (0/3) + + + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
SignificantTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
SignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
SignificanceLookup + + 0% + + + (0/5) + + + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
RareTermsAggregatorFactory + + 50% + + + (3/6) + + + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
ParsedUnsignedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedTerms + + 0% + + + (0/2) + + + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
ParsedStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedStringRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
ParsedSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
ParsedSignificantStringTerms + + 0% + + + (0/2) + + + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
ParsedSignificantLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
ParsedRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
ParsedMultiTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedLongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
ParsedDoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
NumericTermsAggregator + + 0% + + + (0/9) + + + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
MultiTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
MultiTermsAggregationFactory + + 100% + + + (1/1) + + + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
MapStringTermsAggregator + + 0% + + + (0/7) + + + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
LongTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
LongRareTermsAggregator + + 0% + + + (0/2) + + + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
LongRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
LongKeyedBucketOrds + + 42.9% + + + (3/7) + + + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
InternalTerms + + 100% + + + (4/4) + + + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
InternalSignificantTerms + + 0% + + + (0/2) + + + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
InternalRareTerms + + 0% + + + (0/2) + + + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
InternalMultiTerms + + 0% + + + (0/3) + + + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
IncludeExclude + + 42.9% + + + (6/14) + + + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
GlobalOrdinalsStringTermsAggregator + + 58.8% + + + (10/17) + + + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
DoubleTerms + + 0% + + + (0/2) + + + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
BytesKeyedBucketOrds + + 0% + + + (0/7) + + + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-1.html b/htmlReport/ns-1/sources/source-1.html new file mode 100644 index 0000000000000..d605991874f92 --- /dev/null +++ b/htmlReport/ns-1/sources/source-1.html @@ -0,0 +1,246 @@ + + + + + + + + Coverage Report > AbstractRareTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: AbstractRareTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
AbstractRareTermsAggregator + + 0% + + + (0/1) + + + + 0% + + + (0/7) + + + + 0% + + + (0/26) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator;
+ import org.opensearch.search.aggregations.bucket.DeferringBucketCollector;
+ import org.opensearch.search.aggregations.bucket.MergingBucketsDeferringCollector;
+ import org.opensearch.search.aggregations.bucket.nested.NestedAggregator;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Map;
+ import java.util.Random;
+ 
+ /**
+  * Base class to Aggregate all docs that contain rare terms
+  *
+  * @opensearch.internal
+  */
+ public abstract class AbstractRareTermsAggregator extends DeferableBucketAggregator {
+ 
+     static final BucketOrder ORDER = BucketOrder.compound(BucketOrder.count(true), BucketOrder.key(true)); // sort by count ascending
+ 
+     protected final long maxDocCount;
+     private final double precision;
+     protected final DocValueFormat format;
+     private final int filterSeed;
+ 
+     protected MergingBucketsDeferringCollector deferringCollector;
+ 
+     AbstractRareTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         SearchContext context,
+         Aggregator parent,
+         Map<String, Object> metadata,
+         long maxDocCount,
+         double precision,
+         DocValueFormat format
+     ) throws IOException {
+         super(name, factories, context, parent, metadata);
+ 
+         this.maxDocCount = maxDocCount;
+         this.precision = precision;
+         this.format = format;
+         // We seed the rng with the ShardID so results are deterministic and don't change randomly
+         this.filterSeed = context.indexShard().shardId().hashCode();
+         String scoringAgg = subAggsNeedScore();
+         String nestedAgg = descendsFromNestedAggregator(parent);
+         if (scoringAgg != null && nestedAgg != null) {
+             /*
+              * Terms agg would force the collect mode to depth_first here, because
+              * we need to access the score of nested documents in a sub-aggregation
+              * and we are not able to generate this score while replaying deferred documents.
+              *
+              * But the RareTerms agg _must_ execute in breadth first since it relies on
+              * deferring execution, so we just have to throw up our hands and refuse
+              */
+             throw new IllegalStateException(
+                 "RareTerms agg ["
+                     + name()
+                     + "] is the child of the nested agg ["
+                     + nestedAgg
+                     + "], and also has a scoring child agg ["
+                     + scoringAgg
+                     + "].  This combination is not supported because "
+                     + "it requires executing in [depth_first] mode, which the RareTerms agg cannot do."
+             );
+         }
+     }
+ 
+     protected SetBackedScalingCuckooFilter newFilter() {
+         SetBackedScalingCuckooFilter filter = new SetBackedScalingCuckooFilter(10000, new Random(filterSeed), precision);
+         filter.registerBreaker(this::addRequestCircuitBreakerBytes);
+         return filter;
+     }
+ 
+     @Override
+     protected boolean shouldDefer(Aggregator aggregator) {
+         return true;
+     }
+ 
+     @Override
+     public DeferringBucketCollector getDeferringCollector() {
+         deferringCollector = new MergingBucketsDeferringCollector(context, descendsFromGlobalAggregator(parent()));
+         return deferringCollector;
+     }
+ 
+     private String subAggsNeedScore() {
+         for (Aggregator subAgg : subAggregators) {
+             if (subAgg.scoreMode().needsScores()) {
+                 return subAgg.name();
+             }
+         }
+         return null;
+     }
+ 
+     private String descendsFromNestedAggregator(Aggregator parent) {
+         while (parent != null) {
+             if (parent.getClass() == NestedAggregator.class) {
+                 return parent.name();
+             }
+             parent = parent.parent();
+         }
+         return null;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-10.html b/htmlReport/ns-1/sources/source-10.html new file mode 100644 index 0000000000000..59c7beb098a5e --- /dev/null +++ b/htmlReport/ns-1/sources/source-10.html @@ -0,0 +1,544 @@ + + + + + + + + Coverage Report > LongKeyedBucketOrds + + + + + + +
+ + +

Coverage Summary for Class: LongKeyedBucketOrds (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
LongKeyedBucketOrds + + 100% + + + (2/2) + + + + 100% + + + (2/2) + +
LongKeyedBucketOrds$BucketOrdsEnum + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
LongKeyedBucketOrds$BucketOrdsEnum$1 + + 0% + + + (0/4) + + + + 0% + + + (0/4) + +
LongKeyedBucketOrds$FromMany + + 0% + + + (0/9) + + + + 0% + + + (0/17) + +
LongKeyedBucketOrds$FromMany$1 + + 0% + + + (0/4) + + + + 0% + + + (0/10) + +
LongKeyedBucketOrds$FromSingle + + 50% + + + (5/10) + + + + 53.3% + + + (8/15) + +
LongKeyedBucketOrds$FromSingle$1 + + 100% + + + (4/4) + + + + 100% + + + (9/9) + +
Total + + 32.4% + + + (11/34) + + + + 32.8% + + + (19/58) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.lease.Releasable;
+ import org.opensearch.common.util.BigArrays;
+ import org.opensearch.common.util.LongLongHash;
+ import org.opensearch.common.util.ReorganizingLongHash;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ 
+ /**
+  * Maps long bucket keys to bucket ordinals.
+  *
+  * @opensearch.internal
+  */
+ public abstract class LongKeyedBucketOrds implements Releasable {
+     /**
+      * Build a {@link LongKeyedBucketOrds}.
+      */
+     public static LongKeyedBucketOrds build(BigArrays bigArrays, CardinalityUpperBound cardinality) {
+         return cardinality.map(estimate -> estimate < 2 ? new FromSingle(bigArrays) : new FromMany(bigArrays));
+     }
+ 
+     private LongKeyedBucketOrds() {}
+ 
+     /**
+      * Add the {@code owningBucketOrd, value} pair. Return the ord for
+      * their bucket if they have yet to be added, or {@code -1-ord}
+      * if they were already present.
+      */
+     public abstract long add(long owningBucketOrd, long value);
+ 
+     /**
+      * Count the buckets in {@code owningBucketOrd}.
+      * <p>
+      * Some aggregations expect this to be fast but most wouldn't
+      * mind particularly if it weren't.
+      */
+     public abstract long bucketsInOrd(long owningBucketOrd);
+ 
+     /**
+      * Find the {@code owningBucketOrd, value} pair. Return the ord for
+      * their bucket if they have been added or {@code -1} if they haven't.
+      */
+     public abstract long find(long owningBucketOrd, long value);
+ 
+     /**
+      * Returns the value currently associated with the bucket ordinal
+      */
+     public abstract long get(long ordinal);
+ 
+     /**
+      * The number of collected buckets.
+      */
+     public abstract long size();
+ 
+     /**
+      * The maximum possible used {@code owningBucketOrd}.
+      */
+     public abstract long maxOwningBucketOrd();
+ 
+     /**
+      * Build an iterator for buckets inside {@code owningBucketOrd} in order
+      * of increasing ord.
+      * <p>
+      * When this is first returns it is "unpositioned" and you must call
+      * {@link BucketOrdsEnum#next()} to move it to the first value.
+      */
+     public abstract BucketOrdsEnum ordsEnum(long owningBucketOrd);
+ 
+     /**
+      * An iterator for buckets inside a particular {@code owningBucketOrd}.
+      *
+      * @opensearch.internal
+      */
+     public interface BucketOrdsEnum {
+         /**
+          * Advance to the next value.
+          * @return {@code true} if there *is* a next value,
+          *         {@code false} if there isn't
+          */
+         boolean next();
+ 
+         /**
+          * The ordinal of the current value.
+          */
+         long ord();
+ 
+         /**
+          * The current value.
+          */
+         long value();
+ 
+         /**
+          * An {@linkplain BucketOrdsEnum} that is empty.
+          */
+         BucketOrdsEnum EMPTY = new BucketOrdsEnum() {
+             @Override
+             public boolean next() {
+                 return false;
+             }
+ 
+             @Override
+             public long ord() {
+                 return 0;
+             }
+ 
+             @Override
+             public long value() {
+                 return 0;
+             }
+         };
+     }
+ 
+     /**
+      * Implementation that only works if it is collecting from a single bucket.
+      *
+      * @opensearch.internal
+      */
+     public static class FromSingle extends LongKeyedBucketOrds {
+         private final ReorganizingLongHash ords;
+ 
+         public FromSingle(BigArrays bigArrays) {
+             ords = new ReorganizingLongHash(bigArrays);
+         }
+ 
+         @Override
+         public long add(long owningBucketOrd, long value) {
+             // This is in the critical path for collecting most aggs. Be careful of performance.
+             assert owningBucketOrd == 0;
+             return ords.add(value);
+         }
+ 
+         @Override
+         public long find(long owningBucketOrd, long value) {
+             assert owningBucketOrd == 0;
+             return ords.find(value);
+         }
+ 
+         @Override
+         public long get(long ordinal) {
+             return ords.get(ordinal);
+         }
+ 
+         @Override
+         public long bucketsInOrd(long owningBucketOrd) {
+             assert owningBucketOrd == 0;
+             return ords.size();
+         }
+ 
+         @Override
+         public long size() {
+             return ords.size();
+         }
+ 
+         @Override
+         public long maxOwningBucketOrd() {
+             return 0;
+         }
+ 
+         @Override
+         public BucketOrdsEnum ordsEnum(long owningBucketOrd) {
+             assert owningBucketOrd == 0;
+             return new BucketOrdsEnum() {
+                 private long ord = -1;
+                 private long value;
+ 
+                 @Override
+                 public boolean next() {
+                     ord++;
+                     if (ord >= ords.size()) {
+                         return false;
+                     }
+                     value = ords.get(ord);
+                     return true;
+                 }
+ 
+                 @Override
+                 public long value() {
+                     return value;
+                 }
+ 
+                 @Override
+                 public long ord() {
+                     return ord;
+                 }
+             };
+         }
+ 
+         @Override
+         public void close() {
+             ords.close();
+         }
+     }
+ 
+     /**
+      * Implementation that works properly when collecting from many buckets.
+      *
+      * @opensearch.internal
+      */
+     public static class FromMany extends LongKeyedBucketOrds {
+         private final LongLongHash ords;
+ 
+         public FromMany(BigArrays bigArrays) {
+             ords = new LongLongHash(2, bigArrays);
+         }
+ 
+         @Override
+         public long add(long owningBucketOrd, long value) {
+             // This is in the critical path for collecting most aggs. Be careful of performance.
+             return ords.add(owningBucketOrd, value);
+         }
+ 
+         @Override
+         public long find(long owningBucketOrd, long value) {
+             return ords.find(owningBucketOrd, value);
+         }
+ 
+         @Override
+         public long get(long ordinal) {
+             return ords.getKey2(ordinal);
+         }
+ 
+         @Override
+         public long bucketsInOrd(long owningBucketOrd) {
+             // TODO it'd be faster to count the number of buckets in a list of these ords rather than one at a time
+             long count = 0;
+             for (long i = 0; i < ords.size(); i++) {
+                 if (ords.getKey1(i) == owningBucketOrd) {
+                     count++;
+                 }
+             }
+             return count;
+         }
+ 
+         @Override
+         public long size() {
+             return ords.size();
+         }
+ 
+         @Override
+         public long maxOwningBucketOrd() {
+             // TODO this is fairly expensive to compute. Can we avoid needing it?
+             long max = -1;
+             for (long i = 0; i < ords.size(); i++) {
+                 max = Math.max(max, ords.getKey1(i));
+             }
+             return max;
+         }
+ 
+         @Override
+         public BucketOrdsEnum ordsEnum(long owningBucketOrd) {
+             // TODO it'd be faster to iterate many ords at once rather than one at a time
+             return new BucketOrdsEnum() {
+                 private long ord = -1;
+                 private long value;
+ 
+                 @Override
+                 public boolean next() {
+                     while (true) {
+                         ord++;
+                         if (ord >= ords.size()) {
+                             return false;
+                         }
+                         if (ords.getKey1(ord) == owningBucketOrd) {
+                             value = ords.getKey2(ord);
+                             return true;
+                         }
+                     }
+                 }
+ 
+                 @Override
+                 public long value() {
+                     return value;
+                 }
+ 
+                 @Override
+                 public long ord() {
+                     return ord;
+                 }
+             };
+         }
+ 
+         @Override
+         public void close() {
+             ords.close();
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-11.html b/htmlReport/ns-1/sources/source-11.html new file mode 100644 index 0000000000000..c178c1b5a0e1b --- /dev/null +++ b/htmlReport/ns-1/sources/source-11.html @@ -0,0 +1,309 @@ + + + + + + + + Coverage Report > LongRareTerms + + + + + + +
+ + +

Coverage Summary for Class: LongRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
LongRareTerms + + 0% + + + (0/10) + + + + 0% + + + (0/10) + +
LongRareTerms$Bucket + + 0% + + + (0/10) + + + + 0% + + + (0/15) + +
Total + + 0% + + + (0/20) + + + + 0% + + + (0/25) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the RareTerms aggregation when the field is some kind of whole number like a integer, long, or a date.
+  *
+  * @opensearch.internal
+  */
+ public class LongRareTerms extends InternalMappedRareTerms<LongRareTerms, LongRareTerms.Bucket> {
+     public static final String NAME = "lrareterms";
+ 
+     /**
+      * Bucket for rare long valued terms
+      *
+      * @opensearch.internal
+      */
+     public static class Bucket extends InternalRareTerms.Bucket<Bucket> {
+         long term;
+ 
+         public Bucket(long term, long docCount, InternalAggregations aggregations, DocValueFormat format) {
+             super(docCount, aggregations, format);
+             this.term = term;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         public Bucket(StreamInput in, DocValueFormat format) throws IOException {
+             super(in, format);
+             term = in.readLong();
+         }
+ 
+         @Override
+         protected void writeTermTo(StreamOutput out) throws IOException {
+             out.writeLong(term);
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(term).toString();
+         }
+ 
+         @Override
+         public Object getKey() {
+             return term;
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             return term;
+         }
+ 
+         @Override
+         public int compareKey(Bucket other) {
+             return Long.compare(term, other.term);
+         }
+ 
+         @Override
+         protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), term);
+             if (format != DocValueFormat.RAW) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString());
+             }
+             return builder;
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             return super.equals(obj) && Objects.equals(term, ((Bucket) obj).term);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), term);
+         }
+     }
+ 
+     LongRareTerms(
+         String name,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         List<LongRareTerms.Bucket> buckets,
+         long maxDocCount,
+         SetBackedScalingCuckooFilter filter
+     ) {
+         super(name, order, metadata, format, buckets, maxDocCount, filter);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public LongRareTerms(StreamInput in) throws IOException {
+         super(in, LongRareTerms.Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public LongRareTerms create(List<LongRareTerms.Bucket> buckets) {
+         return new LongRareTerms(name, order, metadata, format, buckets, maxDocCount, filter);
+     }
+ 
+     @Override
+     public LongRareTerms.Bucket createBucket(InternalAggregations aggregations, LongRareTerms.Bucket prototype) {
+         return new LongRareTerms.Bucket(prototype.term, prototype.getDocCount(), aggregations, prototype.format);
+     }
+ 
+     @Override
+     protected LongRareTerms createWithFilter(String name, List<LongRareTerms.Bucket> buckets, SetBackedScalingCuckooFilter filter) {
+         return new LongRareTerms(name, order, getMetadata(), format, buckets, maxDocCount, filter);
+     }
+ 
+     @Override
+     protected LongRareTerms.Bucket[] createBucketsArray(int size) {
+         return new LongRareTerms.Bucket[size];
+     }
+ 
+     @Override
+     public boolean containsTerm(SetBackedScalingCuckooFilter filter, LongRareTerms.Bucket bucket) {
+         return filter.mightContain((long) bucket.getKey());
+     }
+ 
+     @Override
+     public void addToFilter(SetBackedScalingCuckooFilter filter, LongRareTerms.Bucket bucket) {
+         filter.add((long) bucket.getKey());
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, LongRareTerms.Bucket prototype) {
+         return new Bucket(prototype.term, docCount, aggs, format);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-12.html b/htmlReport/ns-1/sources/source-12.html new file mode 100644 index 0000000000000..9760fd9f96635 --- /dev/null +++ b/htmlReport/ns-1/sources/source-12.html @@ -0,0 +1,327 @@ + + + + + + + + Coverage Report > LongRareTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: LongRareTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
LongRareTermsAggregator + + 0% + + + (0/6) + + + + 0% + + + (0/46) + +
LongRareTermsAggregator$1 + + 0% + + + (0/2) + + + + 0% + + + (0/16) + +
Total + + 0% + + + (0/8) + + + + 0% + + + (0/62) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.index.SortedNumericDocValues;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.util.LongHash;
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.LeafBucketCollector;
+ import org.opensearch.search.aggregations.LeafBucketCollectorBase;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import static java.util.Collections.emptyList;
+ 
+ /**
+  * An aggregator that finds "rare" string values (e.g. terms agg that orders ascending)
+  *
+  * @opensearch.internal
+  */
+ public class LongRareTermsAggregator extends AbstractRareTermsAggregator {
+     private final ValuesSource.Numeric valuesSource;
+     private final IncludeExclude.LongFilter filter;
+     private final LongKeyedBucketOrds bucketOrds;
+ 
+     LongRareTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         ValuesSource.Numeric valuesSource,
+         DocValueFormat format,
+         SearchContext aggregationContext,
+         Aggregator parent,
+         IncludeExclude.LongFilter filter,
+         int maxDocCount,
+         double precision,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, factories, aggregationContext, parent, metadata, maxDocCount, precision, format);
+         this.valuesSource = valuesSource;
+         this.filter = filter;
+         this.bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality);
+     }
+ 
+     protected SortedNumericDocValues getValues(ValuesSource.Numeric valuesSource, LeafReaderContext ctx) throws IOException {
+         return valuesSource.longValues(ctx);
+     }
+ 
+     @Override
+     public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+         SortedNumericDocValues values = getValues(valuesSource, ctx);
+         return new LeafBucketCollectorBase(sub, values) {
+             @Override
+             public void collect(int docId, long owningBucketOrd) throws IOException {
+                 if (false == values.advanceExact(docId)) {
+                     return;
+                 }
+                 int valuesCount = values.docValueCount();
+                 long previous = Long.MAX_VALUE;
+                 for (int i = 0; i < valuesCount; ++i) {
+                     long val = values.nextValue();
+                     if (i == 0 && previous == val) {
+                         continue;
+                     }
+                     previous = val;
+                     if (filter != null && false == filter.accept(val)) {
+                         continue;
+                     }
+                     long bucketOrdinal = bucketOrds.add(owningBucketOrd, val);
+                     if (bucketOrdinal < 0) { // already seen
+                         bucketOrdinal = -1 - bucketOrdinal;
+                         collectExistingBucket(sub, docId, bucketOrdinal);
+                     } else {
+                         collectBucket(sub, docId, bucketOrdinal);
+                     }
+                 }
+             }
+         };
+     }
+ 
+     @Override
+     public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+         /*
+          * Collect the list of buckets, populate the filter with terms
+          * that are too frequent, and figure out how to merge sub-buckets.
+          */
+         LongRareTerms.Bucket[][] rarestPerOrd = new LongRareTerms.Bucket[owningBucketOrds.length][];
+         SetBackedScalingCuckooFilter[] filters = new SetBackedScalingCuckooFilter[owningBucketOrds.length];
+         long keepCount = 0;
+         long[] mergeMap = new long[(int) bucketOrds.size()];
+         Arrays.fill(mergeMap, -1);
+         long offset = 0;
+         for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) {
+             try (LongHash bucketsInThisOwningBucketToCollect = new LongHash(1, context.bigArrays())) {
+                 filters[owningOrdIdx] = newFilter();
+                 List<LongRareTerms.Bucket> builtBuckets = new ArrayList<>();
+                 LongKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]);
+                 while (collectedBuckets.next()) {
+                     long docCount = bucketDocCount(collectedBuckets.ord());
+                     // if the key is below threshold, reinsert into the new ords
+                     if (docCount <= maxDocCount) {
+                         LongRareTerms.Bucket bucket = new LongRareTerms.Bucket(collectedBuckets.value(), docCount, null, format);
+                         bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(collectedBuckets.value());
+                         mergeMap[(int) collectedBuckets.ord()] = bucket.bucketOrd;
+                         builtBuckets.add(bucket);
+                         keepCount++;
+                     } else {
+                         filters[owningOrdIdx].add(collectedBuckets.value());
+                     }
+                 }
+                 rarestPerOrd[owningOrdIdx] = builtBuckets.toArray(new LongRareTerms.Bucket[0]);
+                 offset += bucketsInThisOwningBucketToCollect.size();
+             }
+         }
+ 
+         /*
+          * Only merge/delete the ordinals if we have actually deleted one,
+          * to save on some redundant work.
+          */
+         if (keepCount != mergeMap.length) {
+             mergeBuckets(mergeMap, offset);
+             if (deferringCollector != null) {
+                 deferringCollector.mergeBuckets(mergeMap);
+             }
+         }
+ 
+         /*
+          * Now build the results!
+          */
+         buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+         InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
+         for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+             Arrays.sort(rarestPerOrd[ordIdx], ORDER.comparator());
+             result[ordIdx] = new LongRareTerms(
+                 name,
+                 ORDER,
+                 metadata(),
+                 format,
+                 Arrays.asList(rarestPerOrd[ordIdx]),
+                 maxDocCount,
+                 filters[ordIdx]
+             );
+         }
+         return result;
+     }
+ 
+     @Override
+     public InternalAggregation buildEmptyAggregation() {
+         return new LongRareTerms(name, ORDER, metadata(), format, emptyList(), 0, newFilter());
+     }
+ 
+     @Override
+     public void doClose() {
+         Releasables.close(bucketOrds);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-13.html b/htmlReport/ns-1/sources/source-13.html new file mode 100644 index 0000000000000..3bca5e6f1975e --- /dev/null +++ b/htmlReport/ns-1/sources/source-13.html @@ -0,0 +1,432 @@ + + + + + + + + Coverage Report > LongTerms + + + + + + +
+ + +

Coverage Summary for Class: LongTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
LongTerms + + 0% + + + (0/10) + + + + 0% + + + (0/44) + +
LongTerms$Bucket + + 0% + + + (0/10) + + + + 0% + + + (0/21) + +
Total + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the {@link TermsAggregator} when the field is some kind of whole number like a integer, long, or a date.
+  *
+  * @opensearch.internal
+  */
+ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket> {
+     public static final String NAME = "lterms";
+ 
+     /**
+      * Bucket for long terms
+      *
+      * @opensearch.internal
+      */
+     public static class Bucket extends InternalTerms.Bucket<Bucket> {
+         long term;
+ 
+         public Bucket(
+             long term,
+             long docCount,
+             InternalAggregations aggregations,
+             boolean showDocCountError,
+             long docCountError,
+             DocValueFormat format
+         ) {
+             super(docCount, aggregations, showDocCountError, docCountError, format);
+             this.term = term;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         public Bucket(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException {
+             super(in, format, showDocCountError);
+             term = in.readLong();
+         }
+ 
+         @Override
+         protected void writeTermTo(StreamOutput out) throws IOException {
+             out.writeLong(term);
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(term).toString();
+         }
+ 
+         @Override
+         public Object getKey() {
+             if (format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                 return format.format(term);
+             } else {
+                 return term;
+             }
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             if (format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                 return (Number) format.format(term);
+             } else {
+                 return term;
+             }
+         }
+ 
+         @Override
+         public int compareKey(Bucket other) {
+             return Long.compare(term, other.term);
+         }
+ 
+         @Override
+         protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             if (format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                 builder.field(CommonFields.KEY.getPreferredName(), format.format(term));
+             } else {
+                 builder.field(CommonFields.KEY.getPreferredName(), term);
+             }
+             if (format != DocValueFormat.RAW && format != DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString());
+             }
+             return builder;
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             return super.equals(obj) && Objects.equals(term, ((Bucket) obj).term);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), term);
+         }
+     }
+ 
+     public LongTerms(
+         String name,
+         BucketOrder reduceOrder,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         int shardSize,
+         boolean showTermDocCountError,
+         long otherDocCount,
+         List<Bucket> buckets,
+         long docCountError,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public LongTerms(StreamInput in) throws IOException {
+         super(in, Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public LongTerms create(List<Bucket> buckets) {
+         return new LongTerms(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+         return new Bucket(
+             prototype.term,
+             prototype.docCount,
+             aggregations,
+             prototype.showDocCountError,
+             prototype.docCountError,
+             prototype.format
+         );
+     }
+ 
+     @Override
+     protected LongTerms create(String name, List<Bucket> buckets, BucketOrder reduceOrder, long docCountError, long otherDocCount) {
+         return new LongTerms(
+             name,
+             reduceOrder,
+             order,
+             getMetadata(),
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         boolean unsignedLongFormat = false;
+         boolean rawFormat = false;
+         for (InternalAggregation agg : aggregations) {
+             if (agg instanceof DoubleTerms) {
+                 return agg.reduce(aggregations, reduceContext);
+             }
+             if (agg instanceof LongTerms) {
+                 if (((LongTerms) agg).format == DocValueFormat.RAW) {
+                     rawFormat = true;
+                 } else if (((LongTerms) agg).format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                     unsignedLongFormat = true;
+                 } else if (((LongTerms) agg).format == DocValueFormat.UNSIGNED_LONG) {
+                     unsignedLongFormat = true;
+                 }
+             }
+         }
+         if (rawFormat && unsignedLongFormat) { // if we have mixed formats, convert results to double format
+             List<InternalAggregation> newAggs = new ArrayList<>(aggregations.size());
+             for (InternalAggregation agg : aggregations) {
+                 if (agg instanceof LongTerms) {
+                     DoubleTerms dTerms = LongTerms.convertLongTermsToDouble((LongTerms) agg, format);
+                     newAggs.add(dTerms);
+                 } else {
+                     newAggs.add(agg);
+                 }
+             }
+             return newAggs.get(0).reduce(newAggs, reduceContext);
+         }
+         return super.reduce(aggregations, reduceContext);
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, LongTerms.Bucket prototype) {
+         return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format);
+     }
+ 
+     /**
+      * Converts a {@link LongTerms} into a {@link DoubleTerms}, returning the value of the specified long terms as doubles.
+      */
+     static DoubleTerms convertLongTermsToDouble(LongTerms longTerms, DocValueFormat decimalFormat) {
+         List<LongTerms.Bucket> buckets = longTerms.getBuckets();
+         List<DoubleTerms.Bucket> newBuckets = new ArrayList<>();
+         for (Terms.Bucket bucket : buckets) {
+             newBuckets.add(
+                 new DoubleTerms.Bucket(
+                     bucket.getKeyAsNumber().doubleValue(),
+                     bucket.getDocCount(),
+                     (InternalAggregations) bucket.getAggregations(),
+                     longTerms.showTermDocCountError,
+                     longTerms.showTermDocCountError ? bucket.getDocCountError() : 0,
+                     decimalFormat
+                 )
+             );
+         }
+         return new DoubleTerms(
+             longTerms.getName(),
+             longTerms.reduceOrder,
+             longTerms.order,
+             longTerms.metadata,
+             longTerms.format,
+             longTerms.shardSize,
+             longTerms.showTermDocCountError,
+             longTerms.otherDocCount,
+             newBuckets,
+             longTerms.docCountError,
+             longTerms.bucketCountThresholds
+         );
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-14.html b/htmlReport/ns-1/sources/source-14.html new file mode 100644 index 0000000000000..48bb1307ee76f --- /dev/null +++ b/htmlReport/ns-1/sources/source-14.html @@ -0,0 +1,827 @@ + + + + + + + + Coverage Report > MapStringTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: MapStringTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
MapStringTermsAggregator + + 0% + + + (0/10) + + + + 0% + + + (0/23) + +
MapStringTermsAggregator$CollectConsumer
MapStringTermsAggregator$CollectorSource
MapStringTermsAggregator$ResultStrategy + + 0% + + + (0/2) + + + + 0% + + + (0/31) + +
MapStringTermsAggregator$SignificantTermsResults + + 0% + + + (0/14) + + + + 0% + + + (0/28) + +
MapStringTermsAggregator$SignificantTermsResults$1 + + 0% + + + (0/2) + + + + 0% + + + (0/4) + +
MapStringTermsAggregator$StandardTermsResults + + 0% + + + (0/14) + + + + 0% + + + (0/36) + +
MapStringTermsAggregator$ValuesSourceCollectorSource + + 0% + + + (0/4) + + + + 0% + + + (0/6) + +
MapStringTermsAggregator$ValuesSourceCollectorSource$1 + + 0% + + + (0/2) + + + + 0% + + + (0/13) + +
Total + + 0% + + + (0/48) + + + + 0% + + + (0/141) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.search.ScoreMode;
+ import org.apache.lucene.util.BytesRef;
+ import org.apache.lucene.util.BytesRefBuilder;
+ import org.apache.lucene.util.PriorityQueue;
+ import org.opensearch.common.lease.Releasable;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.util.LongArray;
+ import org.opensearch.index.fielddata.SortedBinaryDocValues;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalMultiBucketAggregation;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.LeafBucketCollector;
+ import org.opensearch.search.aggregations.LeafBucketCollectorBase;
+ import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForBytes;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Arrays;
+ import java.util.Map;
+ import java.util.function.BiConsumer;
+ import java.util.function.Function;
+ import java.util.function.LongConsumer;
+ import java.util.function.Supplier;
+ 
+ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder;
+ 
+ /**
+  * An aggregator of string values that hashes the strings on the fly rather
+  * than up front like the {@link GlobalOrdinalsStringTermsAggregator}.
+  *
+  * @opensearch.internal
+  */
+ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
+     private final CollectorSource collectorSource;
+     private final ResultStrategy<?, ?> resultStrategy;
+     private final BytesKeyedBucketOrds bucketOrds;
+     private final IncludeExclude.StringFilter includeExclude;
+ 
+     public MapStringTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         CollectorSource collectorSource,
+         Function<MapStringTermsAggregator, ResultStrategy<?, ?>> resultStrategy,
+         BucketOrder order,
+         DocValueFormat format,
+         BucketCountThresholds bucketCountThresholds,
+         IncludeExclude.StringFilter includeExclude,
+         SearchContext context,
+         Aggregator parent,
+         SubAggCollectionMode collectionMode,
+         boolean showTermDocCountError,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata);
+         this.collectorSource = collectorSource;
+         this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
+         this.includeExclude = includeExclude;
+         bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality);
+     }
+ 
+     @Override
+     public ScoreMode scoreMode() {
+         if (collectorSource.needsScores()) {
+             return ScoreMode.COMPLETE;
+         }
+         return super.scoreMode();
+     }
+ 
+     @Override
+     public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+         return resultStrategy.wrapCollector(
+             collectorSource.getLeafCollector(
+                 includeExclude,
+                 ctx,
+                 sub,
+                 this::addRequestCircuitBreakerBytes,
+                 (s, doc, owningBucketOrd, bytes) -> {
+                     long bucketOrdinal = bucketOrds.add(owningBucketOrd, bytes);
+                     if (bucketOrdinal < 0) { // already seen
+                         bucketOrdinal = -1 - bucketOrdinal;
+                         collectExistingBucket(s, doc, bucketOrdinal);
+                     } else {
+                         collectBucket(s, doc, bucketOrdinal);
+                     }
+                 }
+             )
+         );
+     }
+ 
+     @Override
+     public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+         return resultStrategy.buildAggregations(owningBucketOrds);
+     }
+ 
+     @Override
+     public InternalAggregation buildEmptyAggregation() {
+         return resultStrategy.buildEmptyResult();
+     }
+ 
+     @Override
+     public void collectDebugInfo(BiConsumer<String, Object> add) {
+         super.collectDebugInfo(add);
+         add.accept("total_buckets", bucketOrds.size());
+         add.accept("result_strategy", resultStrategy.describe());
+     }
+ 
+     @Override
+     public void doClose() {
+         Releasables.close(collectorSource, resultStrategy, bucketOrds);
+     }
+ 
+     /**
+      * Abstaction on top of building collectors to fetch values.
+      *
+      * @opensearch.internal
+      */
+     public interface CollectorSource extends Releasable {
+         boolean needsScores();
+ 
+         LeafBucketCollector getLeafCollector(
+             IncludeExclude.StringFilter includeExclude,
+             LeafReaderContext ctx,
+             LeafBucketCollector sub,
+             LongConsumer addRequestCircuitBreakerBytes,
+             CollectConsumer consumer
+         ) throws IOException;
+     }
+ 
+     /**
+      * Consumer for the collector
+      *
+      * @opensearch.internal
+      */
+     @FunctionalInterface
+     public interface CollectConsumer {
+         void accept(LeafBucketCollector sub, int doc, long owningBucketOrd, BytesRef bytes) throws IOException;
+     }
+ 
+     /**
+      * Fetch values from a {@link ValuesSource}.
+      *
+      * @opensearch.internal
+      */
+     public static class ValuesSourceCollectorSource implements CollectorSource {
+         private final ValuesSource valuesSource;
+ 
+         public ValuesSourceCollectorSource(ValuesSource valuesSource) {
+             this.valuesSource = valuesSource;
+         }
+ 
+         @Override
+         public boolean needsScores() {
+             return valuesSource.needsScores();
+         }
+ 
+         @Override
+         public LeafBucketCollector getLeafCollector(
+             IncludeExclude.StringFilter includeExclude,
+             LeafReaderContext ctx,
+             LeafBucketCollector sub,
+             LongConsumer addRequestCircuitBreakerBytes,
+             CollectConsumer consumer
+         ) throws IOException {
+             SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
+             return new LeafBucketCollectorBase(sub, values) {
+                 final BytesRefBuilder previous = new BytesRefBuilder();
+ 
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     if (false == values.advanceExact(doc)) {
+                         return;
+                     }
+                     int valuesCount = values.docValueCount();
+ 
+                     // SortedBinaryDocValues don't guarantee uniqueness so we
+                     // need to take care of dups
+                     previous.clear();
+                     for (int i = 0; i < valuesCount; ++i) {
+                         BytesRef bytes = values.nextValue();
+                         if (includeExclude != null && false == includeExclude.accept(bytes)) {
+                             continue;
+                         }
+                         if (i > 0 && previous.get().equals(bytes)) {
+                             continue;
+                         }
+                         previous.copyBytes(bytes);
+                         consumer.accept(sub, doc, owningBucketOrd, bytes);
+                     }
+                 }
+             };
+         }
+ 
+         @Override
+         public void close() {}
+     }
+ 
+     /**
+      * Strategy for building results.
+      */
+     abstract class ResultStrategy<R extends InternalAggregation, B extends InternalMultiBucketAggregation.InternalBucket>
+         implements
+             Releasable {
+ 
+         private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+             LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds);
+             B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length);
+             long[] otherDocCounts = new long[owningBucketOrds.length];
+             for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+                 collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]);
+                 int size = (int) Math.min(bucketOrds.size(), localBucketCountThresholds.getRequiredSize());
+ 
+                 PriorityQueue<B> ordered = buildPriorityQueue(size);
+                 B spare = null;
+                 BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]);
+                 Supplier<B> emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]);
+                 while (ordsEnum.next()) {
+                     long docCount = bucketDocCount(ordsEnum.ord());
+                     otherDocCounts[ordIdx] += docCount;
+                     if (docCount < localBucketCountThresholds.getMinDocCount()) {
+                         continue;
+                     }
+                     if (spare == null) {
+                         spare = emptyBucketBuilder.get();
+                     }
+                     updateBucket(spare, ordsEnum, docCount);
+                     spare = ordered.insertWithOverflow(spare);
+                 }
+ 
+                 topBucketsPerOrd[ordIdx] = buildBuckets(ordered.size());
+                 for (int i = ordered.size() - 1; i >= 0; --i) {
+                     topBucketsPerOrd[ordIdx][i] = ordered.pop();
+                     otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount();
+                     finalizeBucket(topBucketsPerOrd[ordIdx][i]);
+                 }
+             }
+ 
+             buildSubAggs(topBucketsPerOrd);
+             InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
+             for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+                 result[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]);
+             }
+             return result;
+         }
+ 
+         /**
+          * Short description of the collection mechanism added to the profile
+          * output to help with debugging.
+          */
+         abstract String describe();
+ 
+         /**
+          * Wrap the "standard" numeric terms collector to collect any more
+          * information that this result type may need.
+          */
+         abstract LeafBucketCollector wrapCollector(LeafBucketCollector primary);
+ 
+         /**
+          * Collect extra entries for "zero" hit documents if they were requested
+          * and required.
+          */
+         abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException;
+ 
+         /**
+          * Build an empty temporary bucket.
+          */
+         abstract Supplier<B> emptyBucketBuilder(long owningBucketOrd);
+ 
+         /**
+          * Build a {@link PriorityQueue} to sort the buckets. After we've
+          * collected all of the buckets we'll collect all entries in the queue.
+          */
+         abstract PriorityQueue<B> buildPriorityQueue(int size);
+ 
+         /**
+          * Update fields in {@code spare} to reflect information collected for
+          * this bucket ordinal.
+          */
+         abstract void updateBucket(B spare, BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum, long docCount) throws IOException;
+ 
+         /**
+          * Build an array to hold the "top" buckets for each ordinal.
+          */
+         abstract B[][] buildTopBucketsPerOrd(int size);
+ 
+         /**
+          * Build an array of buckets for a particular ordinal to collect the
+          * results. The populated list is passed to {@link #buildResult}.
+          */
+         abstract B[] buildBuckets(int size);
+ 
+         /**
+          * Finalize building a bucket. Called once we know that the bucket will
+          * be included in the results.
+          */
+         abstract void finalizeBucket(B bucket);
+ 
+         /**
+          * Build the sub-aggregations into the buckets. This will usually
+          * delegate to {@link #buildSubAggsForAllBuckets}.
+          */
+         abstract void buildSubAggs(B[][] topBucketsPerOrd) throws IOException;
+ 
+         /**
+          * Turn the buckets into an aggregation result.
+          */
+         abstract R buildResult(long owningBucketOrd, long otherDocCount, B[] topBuckets);
+ 
+         /**
+          * Build an "empty" result. Only called if there isn't any data on this
+          * shard.
+          */
+         abstract R buildEmptyResult();
+     }
+ 
+     /**
+      * Builds results for the standard {@code terms} aggregation.
+      */
+     class StandardTermsResults extends ResultStrategy<StringTerms, StringTerms.Bucket> {
+         private final ValuesSource valuesSource;
+ 
+         StandardTermsResults(ValuesSource valuesSource) {
+             this.valuesSource = valuesSource;
+         }
+ 
+         @Override
+         String describe() {
+             return "terms";
+         }
+ 
+         @Override
+         LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
+             return primary;
+         }
+ 
+         @Override
+         void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {
+             if (bucketCountThresholds.getMinDocCount() != 0) {
+                 return;
+             }
+             if (InternalOrder.isCountDesc(order) && bucketOrds.bucketsInOrd(owningBucketOrd) >= bucketCountThresholds.getRequiredSize()) {
+                 return;
+             }
+             // we need to fill-in the blanks
+             for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) {
+                 SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
+                 // brute force
+                 for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
+                     if (values.advanceExact(docId)) {
+                         int valueCount = values.docValueCount();
+                         for (int i = 0; i < valueCount; ++i) {
+                             BytesRef term = values.nextValue();
+                             if (includeExclude == null || includeExclude.accept(term)) {
+                                 bucketOrds.add(owningBucketOrd, term);
+                             }
+                         }
+                     }
+                 }
+             }
+         }
+ 
+         @Override
+         Supplier<StringTerms.Bucket> emptyBucketBuilder(long owningBucketOrd) {
+             return () -> new StringTerms.Bucket(new BytesRef(), 0, null, showTermDocCountError, 0, format);
+         }
+ 
+         @Override
+         PriorityQueue<StringTerms.Bucket> buildPriorityQueue(int size) {
+             return new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator);
+         }
+ 
+         @Override
+         void updateBucket(StringTerms.Bucket spare, BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum, long docCount) throws IOException {
+             ordsEnum.readValue(spare.termBytes);
+             spare.docCount = docCount;
+             spare.bucketOrd = ordsEnum.ord();
+         }
+ 
+         @Override
+         StringTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new StringTerms.Bucket[size][];
+         }
+ 
+         @Override
+         StringTerms.Bucket[] buildBuckets(int size) {
+             return new StringTerms.Bucket[size];
+         }
+ 
+         @Override
+         void finalizeBucket(StringTerms.Bucket bucket) {
+             /*
+              * termBytes contains a reference to the bytes held by the
+              * bucketOrds which will be invalid once the aggregation is
+              * closed so we have to copy it.
+              */
+             bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes);
+         }
+ 
+         @Override
+         void buildSubAggs(StringTerms.Bucket[][] topBucketsPerOrd) throws IOException {
+             buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
+         }
+ 
+         @Override
+         StringTerms buildResult(long owningBucketOrd, long otherDocCount, StringTerms.Bucket[] topBuckets) {
+             final BucketOrder reduceOrder;
+             if (isKeyOrder(order) == false) {
+                 reduceOrder = InternalOrder.key(true);
+                 Arrays.sort(topBuckets, reduceOrder.comparator());
+             } else {
+                 reduceOrder = order;
+             }
+             return new StringTerms(
+                 name,
+                 reduceOrder,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 otherDocCount,
+                 Arrays.asList(topBuckets),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         StringTerms buildEmptyResult() {
+             return buildEmptyTermsAggregation();
+         }
+ 
+         @Override
+         public void close() {}
+     }
+ 
+     /**
+      * Builds results for the {@code significant_terms} aggregation.
+      */
+     class SignificantTermsResults extends ResultStrategy<SignificantStringTerms, SignificantStringTerms.Bucket> {
+         private final BackgroundFrequencyForBytes backgroundFrequencies;
+         private final long supersetSize;
+         private final SignificanceHeuristic significanceHeuristic;
+ 
+         private LongArray subsetSizes = context.bigArrays().newLongArray(1, true);
+ 
+         SignificantTermsResults(
+             SignificanceLookup significanceLookup,
+             SignificanceHeuristic significanceHeuristic,
+             CardinalityUpperBound cardinality
+         ) {
+             backgroundFrequencies = significanceLookup.bytesLookup(context.bigArrays(), cardinality);
+             supersetSize = significanceLookup.supersetSize();
+             this.significanceHeuristic = significanceHeuristic;
+         }
+ 
+         @Override
+         String describe() {
+             return "significant_terms";
+         }
+ 
+         @Override
+         LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
+             return new LeafBucketCollectorBase(primary, null) {
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     super.collect(doc, owningBucketOrd);
+                     subsetSizes = context.bigArrays().grow(subsetSizes, owningBucketOrd + 1);
+                     subsetSizes.increment(owningBucketOrd, 1);
+                 }
+             };
+         }
+ 
+         @Override
+         void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {}
+ 
+         @Override
+         Supplier<SignificantStringTerms.Bucket> emptyBucketBuilder(long owningBucketOrd) {
+             long subsetSize = subsetSizes.get(owningBucketOrd);
+             return () -> new SignificantStringTerms.Bucket(new BytesRef(), 0, subsetSize, 0, 0, null, format, 0);
+         }
+ 
+         @Override
+         PriorityQueue<SignificantStringTerms.Bucket> buildPriorityQueue(int size) {
+             return new BucketSignificancePriorityQueue<>(size);
+         }
+ 
+         @Override
+         void updateBucket(SignificantStringTerms.Bucket spare, BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum, long docCount)
+             throws IOException {
+ 
+             ordsEnum.readValue(spare.termBytes);
+             spare.bucketOrd = ordsEnum.ord();
+             spare.subsetDf = docCount;
+             spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
+             spare.supersetSize = supersetSize;
+             /*
+              * During shard-local down-selection we use subset/superset stats
+              * that are for this shard only. Back at the central reducer these
+              * properties will be updated with global stats.
+              */
+             spare.updateScore(significanceHeuristic);
+         }
+ 
+         @Override
+         SignificantStringTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new SignificantStringTerms.Bucket[size][];
+         }
+ 
+         @Override
+         SignificantStringTerms.Bucket[] buildBuckets(int size) {
+             return new SignificantStringTerms.Bucket[size];
+         }
+ 
+         @Override
+         void finalizeBucket(SignificantStringTerms.Bucket bucket) {
+             /*
+              * termBytes contains a reference to the bytes held by the
+              * bucketOrds which will be invalid once the aggregation is
+              * closed so we have to copy it.
+              */
+             bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes);
+         }
+ 
+         @Override
+         void buildSubAggs(SignificantStringTerms.Bucket[][] topBucketsPerOrd) throws IOException {
+             buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a);
+         }
+ 
+         @Override
+         SignificantStringTerms buildResult(long owningBucketOrd, long otherDocCount, SignificantStringTerms.Bucket[] topBuckets) {
+             return new SignificantStringTerms(
+                 name,
+                 metadata(),
+                 format,
+                 subsetSizes.get(owningBucketOrd),
+                 supersetSize,
+                 significanceHeuristic,
+                 Arrays.asList(topBuckets),
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         SignificantStringTerms buildEmptyResult() {
+             return buildEmptySignificantTermsAggregation(0, significanceHeuristic);
+         }
+ 
+         @Override
+         public void close() {
+             Releasables.close(backgroundFrequencies, subsetSizes);
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-15.html b/htmlReport/ns-1/sources/source-15.html new file mode 100644 index 0000000000000..403b85161f3e4 --- /dev/null +++ b/htmlReport/ns-1/sources/source-15.html @@ -0,0 +1,549 @@ + + + + + + + + Coverage Report > MultiTermsAggregationBuilder + + + + + + +
+ + +

Coverage Summary for Class: MultiTermsAggregationBuilder (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
MultiTermsAggregationBuilder + + 100% + + + (1/1) + + + + 9.7% + + + (3/31) + + + + 18.8% + + + (22/117) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.xcontent.LoggingDeprecationHandler;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.aggregations.AbstractAggregationBuilder;
+ import org.opensearch.search.aggregations.AggregationBuilder;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ import static org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS;
+ 
+ /**
+  * Multi-terms aggregation supports collecting terms from multiple fields in the same document.
+  *
+  * <p>
+  *   For example, using the multi-terms aggregation to group by two fields region, host, calculate max cpu, and sort by max cpu.
+  * </p>
+  * <pre>
+  *   GET test_000001/_search
+  *   {
+  *     "size": 0,
+  *     "aggs": {
+  *       "hot": {
+  *         "multi_terms": {
+  *           "terms": [{
+  *             "field": "region"
+  *           },{
+  *             "field": "host"
+  *           }],
+  *           "order": {"max-cpu": "desc"}
+  *         },
+  *         "aggs": {
+  *           "max-cpu": { "max": { "field": "cpu" } }
+  *         }
+  *       }
+  *     }
+  *   }
+  * </pre>
+  *
+  * <p>
+  *   The aggregation result contains
+  *     - key: a list of value extract from multiple fields in the same doc.
+  * </p>
+  * <pre>
+  *   {
+  *     "hot": {
+  *       "doc_count_error_upper_bound": 0,
+  *       "sum_other_doc_count": 0,
+  *       "buckets": [
+  *         {
+  *           "key": [
+  *             "dub",
+  *             "h1"
+  *           ],
+  *           "key_as_string": "dub|h1",
+  *           "doc_count": 2,
+  *           "max-cpu": {
+  *             "value": 90.0
+  *           }
+  *         },
+  *         {
+  *           "key": [
+  *             "dub",
+  *             "h2"
+  *           ],
+  *           "key_as_string": "dub|h2",
+  *           "doc_count": 2,
+  *           "max-cpu": {
+  *             "value": 70.0
+  *           }
+  *         }
+  *       ]
+  *     }
+  *   }
+  * </pre>
+  *
+  * <p>
+  *   <b>Notes:</b> The current implementation focuses on adding new type aggregates. Performance (latency) is not good,mainly because of
+  *   simply encoding/decoding a list of values as bucket keys.
+  * </p>
+  *
+  * @opensearch.internal
+  */
+ public class MultiTermsAggregationBuilder extends AbstractAggregationBuilder<MultiTermsAggregationBuilder> {
+     public static final String NAME = "multi_terms";
+     public static final ObjectParser<MultiTermsAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(
+         NAME,
+         MultiTermsAggregationBuilder::new
+     );
+ 
+     public static final ParseField TERMS_FIELD = new ParseField("terms");
+     public static final ParseField SHARD_SIZE_FIELD_NAME = new ParseField("shard_size");
+     public static final ParseField MIN_DOC_COUNT_FIELD_NAME = new ParseField("min_doc_count");
+     public static final ParseField SHARD_MIN_DOC_COUNT_FIELD_NAME = new ParseField("shard_min_doc_count");
+     public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size");
+     public static final ParseField SHOW_TERM_DOC_COUNT_ERROR = new ParseField("show_term_doc_count_error");
+     public static final ParseField ORDER_FIELD = new ParseField("order");
+ 
+     @Override
+     public String getType() {
+         return NAME;
+     }
+ 
+     static {
+         final ObjectParser<MultiTermsValuesSourceConfig.Builder, Void> parser = MultiTermsValuesSourceConfig.PARSER.apply(
+             true,
+             true,
+             true,
+             true
+         );
+         PARSER.declareObjectArray(MultiTermsAggregationBuilder::terms, (p, c) -> parser.parse(p, null).build(), TERMS_FIELD);
+ 
+         PARSER.declareBoolean(MultiTermsAggregationBuilder::showTermDocCountError, SHOW_TERM_DOC_COUNT_ERROR);
+ 
+         PARSER.declareInt(MultiTermsAggregationBuilder::shardSize, SHARD_SIZE_FIELD_NAME);
+ 
+         PARSER.declareLong(MultiTermsAggregationBuilder::minDocCount, MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareLong(MultiTermsAggregationBuilder::shardMinDocCount, SHARD_MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareInt(MultiTermsAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME);
+ 
+         PARSER.declareObjectArray(MultiTermsAggregationBuilder::order, (p, c) -> InternalOrder.Parser.parseOrderParam(p), ORDER_FIELD);
+ 
+         PARSER.declareField(
+             MultiTermsAggregationBuilder::collectMode,
+             (p, c) -> Aggregator.SubAggCollectionMode.parse(p.text(), LoggingDeprecationHandler.INSTANCE),
+             Aggregator.SubAggCollectionMode.KEY,
+             ObjectParser.ValueType.STRING
+         );
+     }
+ 
+     public static final ValuesSourceRegistry.RegistryKey<MultiTermsAggregationFactory.InternalValuesSourceSupplier> REGISTRY_KEY =
+         new ValuesSourceRegistry.RegistryKey<>(
+             MultiTermsAggregationBuilder.NAME,
+             MultiTermsAggregationFactory.InternalValuesSourceSupplier.class
+         );
+ 
+     private List<MultiTermsValuesSourceConfig> terms;
+ 
+     private BucketOrder order = BucketOrder.compound(BucketOrder.count(false)); // automatically adds tie-breaker key asc order
+     private Aggregator.SubAggCollectionMode collectMode = null;
+     private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(
+         DEFAULT_BUCKET_COUNT_THRESHOLDS
+     );
+     private boolean showTermDocCountError = false;
+ 
+     public MultiTermsAggregationBuilder(String name) {
+         super(name);
+     }
+ 
+     protected MultiTermsAggregationBuilder(
+         MultiTermsAggregationBuilder clone,
+         AggregatorFactories.Builder factoriesBuilder,
+         Map<String, Object> metadata
+     ) {
+         super(clone, factoriesBuilder, metadata);
+         this.terms = new ArrayList<>(clone.terms);
+         this.order = clone.order;
+         this.collectMode = clone.collectMode;
+         this.bucketCountThresholds = new TermsAggregator.BucketCountThresholds(clone.bucketCountThresholds);
+         this.showTermDocCountError = clone.showTermDocCountError;
+     }
+ 
+     @Override
+     protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
+         return new MultiTermsAggregationBuilder(this, factoriesBuilder, metadata);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public MultiTermsAggregationBuilder(StreamInput in) throws IOException {
+         super(in);
+         terms = in.readList(MultiTermsValuesSourceConfig::new);
+         bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in);
+         collectMode = in.readOptionalWriteable(Aggregator.SubAggCollectionMode::readFromStream);
+         order = InternalOrder.Streams.readOrder(in);
+         showTermDocCountError = in.readBoolean();
+     }
+ 
+     @Override
+     protected void doWriteTo(StreamOutput out) throws IOException {
+         out.writeList(terms);
+         bucketCountThresholds.writeTo(out);
+         out.writeOptionalWriteable(collectMode);
+         order.writeTo(out);
+         out.writeBoolean(showTermDocCountError);
+     }
+ 
+     @Override
+     protected AggregatorFactory doBuild(
+         QueryShardContext queryShardContext,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subfactoriesBuilder
+     ) throws IOException {
+         return new MultiTermsAggregationFactory(
+             name,
+             queryShardContext,
+             parent,
+             subfactoriesBuilder,
+             metadata,
+             terms,
+             order,
+             collectMode,
+             bucketCountThresholds,
+             showTermDocCountError
+         );
+     }
+ 
+     @Override
+     protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+         builder.startObject();
+         if (terms != null) {
+             builder.field(TERMS_FIELD.getPreferredName(), terms);
+         }
+         bucketCountThresholds.toXContent(builder, params);
+         builder.field(SHOW_TERM_DOC_COUNT_ERROR.getPreferredName(), showTermDocCountError);
+         builder.field(ORDER_FIELD.getPreferredName());
+         order.toXContent(builder, params);
+         if (collectMode != null) {
+             builder.field(Aggregator.SubAggCollectionMode.KEY.getPreferredName(), collectMode.parseField().getPreferredName());
+         }
+         builder.endObject();
+         return builder;
+     }
+ 
+     /**
+      * Set the terms.
+      */
+     public MultiTermsAggregationBuilder terms(List<MultiTermsValuesSourceConfig> terms) {
+         if (terms == null) {
+             throw new IllegalArgumentException("[terms] must not be null. Found null terms in [" + name + "]");
+         }
+         if (terms.size() < 2) {
+             throw new IllegalArgumentException(
+                 "multi term aggregation must has at least 2 terms. Found ["
+                     + terms.size()
+                     + "] in"
+                     + " ["
+                     + name
+                     + "]"
+                     + (terms.size() == 1 ? " Use terms aggregation for single term aggregation" : "")
+             );
+         }
+         this.terms = terms;
+         return this;
+     }
+ 
+     /**
+      * Sets the size - indicating how many term buckets should be returned
+      * (defaults to 10)
+      */
+     public MultiTermsAggregationBuilder size(int size) {
+         if (size <= 0) {
+             throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setRequiredSize(size);
+         return this;
+     }
+ 
+     /**
+      * Returns the number of term buckets currently configured
+      */
+     public int size() {
+         return bucketCountThresholds.getRequiredSize();
+     }
+ 
+     /**
+      * Sets the shard_size - indicating the number of term buckets each shard
+      * will return to the coordinating node (the node that coordinates the
+      * search execution). The higher the shard size is, the more accurate the
+      * results are.
+      */
+     public MultiTermsAggregationBuilder shardSize(int shardSize) {
+         if (shardSize <= 0) {
+             throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setShardSize(shardSize);
+         return this;
+     }
+ 
+     /**
+      * Returns the number of term buckets per shard that are currently configured
+      */
+     public int shardSize() {
+         return bucketCountThresholds.getShardSize();
+     }
+ 
+     /**
+      * Set the minimum document count terms should have in order to appear in
+      * the response.
+      */
+     public MultiTermsAggregationBuilder minDocCount(long minDocCount) {
+         if (minDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setMinDocCount(minDocCount);
+         return this;
+     }
+ 
+     /**
+      * Returns the minimum document count required per term
+      */
+     public long minDocCount() {
+         return bucketCountThresholds.getMinDocCount();
+     }
+ 
+     /**
+      * Set the minimum document count terms should have on the shard in order to
+      * appear in the response.
+      */
+     public MultiTermsAggregationBuilder shardMinDocCount(long shardMinDocCount) {
+         if (shardMinDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setShardMinDocCount(shardMinDocCount);
+         return this;
+     }
+ 
+     /**
+      * Returns the minimum document count required per term, per shard
+      */
+     public long shardMinDocCount() {
+         return bucketCountThresholds.getShardMinDocCount();
+     }
+ 
+     /** Set a new order on this builder and return the builder so that calls
+      *  can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */
+     public MultiTermsAggregationBuilder order(BucketOrder order) {
+         if (order == null) {
+             throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
+         }
+         if (order instanceof InternalOrder.CompoundOrder || InternalOrder.isKeyOrder(order)) {
+             this.order = order; // if order already contains a tie-breaker we are good to go
+         } else { // otherwise add a tie-breaker by using a compound order
+             this.order = BucketOrder.compound(order);
+         }
+         return this;
+     }
+ 
+     /**
+      * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic
+      * ordering.
+      */
+     public MultiTermsAggregationBuilder order(List<BucketOrder> orders) {
+         if (orders == null) {
+             throw new IllegalArgumentException("[orders] must not be null: [" + name + "]");
+         }
+         // if the list only contains one order use that to avoid inconsistent xcontent
+         order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0));
+         return this;
+     }
+ 
+     /**
+      * Gets the order in which the buckets will be returned.
+      */
+     public BucketOrder order() {
+         return order;
+     }
+ 
+     /**
+      * Expert: set the collection mode.
+      */
+     public MultiTermsAggregationBuilder collectMode(Aggregator.SubAggCollectionMode collectMode) {
+         if (collectMode == null) {
+             throw new IllegalArgumentException("[collectMode] must not be null: [" + name + "]");
+         }
+         this.collectMode = collectMode;
+         return this;
+     }
+ 
+     /**
+      * Expert: get the collection mode.
+      */
+     public Aggregator.SubAggCollectionMode collectMode() {
+         return collectMode;
+     }
+ 
+     /**
+      * Get whether doc count error will be return for individual terms
+      */
+     public boolean showTermDocCountError() {
+         return showTermDocCountError;
+     }
+ 
+     /**
+      * Set whether doc count error will be return for individual terms
+      */
+     public MultiTermsAggregationBuilder showTermDocCountError(boolean showTermDocCountError) {
+         this.showTermDocCountError = showTermDocCountError;
+         return this;
+     }
+ 
+     @Override
+     public BucketCardinality bucketCardinality() {
+         return BucketCardinality.MANY;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), bucketCountThresholds, collectMode, order, showTermDocCountError);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         MultiTermsAggregationBuilder other = (MultiTermsAggregationBuilder) obj;
+         return Objects.equals(terms, other.terms)
+             && Objects.equals(bucketCountThresholds, other.bucketCountThresholds)
+             && Objects.equals(collectMode, other.collectMode)
+             && Objects.equals(order, other.order)
+             && Objects.equals(showTermDocCountError, other.showTermDocCountError);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-16.html b/htmlReport/ns-1/sources/source-16.html new file mode 100644 index 0000000000000..f31139c3c1dbc --- /dev/null +++ b/htmlReport/ns-1/sources/source-16.html @@ -0,0 +1,287 @@ + + + + + + + + Coverage Report > MultiTermsAggregationFactory + + + + + + +
+ + +

Coverage Summary for Class: MultiTermsAggregationFactory (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
MultiTermsAggregationFactory + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
MultiTermsAggregationFactory$InternalValuesSourceSupplier
Total + + 14.3% + + + (1/7) + + + + 8% + + + (4/50) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.collect.Tuple;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.bucket.BucketUtils;
+ import org.opensearch.search.aggregations.support.CoreValuesSourceType;
+ import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.aggregations.support.ValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.stream.Collectors;
+ 
+ import static org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder.REGISTRY_KEY;
+ 
+ /**
+  * Factory of {@link MultiTermsAggregator}.
+  *
+  * @opensearch.internal
+  */
+ public class MultiTermsAggregationFactory extends AggregatorFactory {
+ 
+     private final List<Tuple<ValuesSourceConfig, IncludeExclude>> configs;
+     private final List<DocValueFormat> formats;
+     /**
+      * Fields inherent from Terms Aggregation Factory.
+      */
+     private final BucketOrder order;
+     private final Aggregator.SubAggCollectionMode collectMode;
+     private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+     private final boolean showTermDocCountError;
+ 
+     public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
+         builder.register(REGISTRY_KEY, List.of(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP), config -> {
+             final IncludeExclude.StringFilter filter = config.v2() == null ? null : config.v2().convertToStringFilter(config.v1().format());
+             return MultiTermsAggregator.InternalValuesSourceFactory.bytesValuesSource(config.v1().getValuesSource(), filter);
+         }, true);
+ 
+         builder.register(
+             REGISTRY_KEY,
+             List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.DATE),
+             config -> {
+                 ValuesSourceConfig valuesSourceConfig = config.v1();
+                 IncludeExclude includeExclude = config.v2();
+                 ValuesSource.Numeric valuesSource = ((ValuesSource.Numeric) valuesSourceConfig.getValuesSource());
+                 IncludeExclude.LongFilter longFilter = null;
+                 if (valuesSource.isFloatingPoint()) {
+                     if (includeExclude != null) {
+                         longFilter = includeExclude.convertToDoubleFilter();
+                     }
+                     return MultiTermsAggregator.InternalValuesSourceFactory.doubleValueSource(valuesSource, longFilter);
+                 } else if (valuesSource.isBigInteger()) {
+                     if (includeExclude != null) {
+                         longFilter = includeExclude.convertToDoubleFilter();
+                     }
+                     return MultiTermsAggregator.InternalValuesSourceFactory.unsignedLongValuesSource(valuesSource, longFilter);
+                 } else {
+                     if (includeExclude != null) {
+                         longFilter = includeExclude.convertToLongFilter(valuesSourceConfig.format());
+                     }
+                     return MultiTermsAggregator.InternalValuesSourceFactory.longValuesSource(valuesSource, longFilter);
+                 }
+             },
+             true
+         );
+ 
+         builder.registerUsage(MultiTermsAggregationBuilder.NAME);
+     }
+ 
+     public MultiTermsAggregationFactory(
+         String name,
+         QueryShardContext queryShardContext,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder,
+         Map<String, Object> metadata,
+         List<MultiTermsValuesSourceConfig> multiTermConfigs,
+         BucketOrder order,
+         Aggregator.SubAggCollectionMode collectMode,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         boolean showTermDocCountError
+     ) throws IOException {
+         super(name, queryShardContext, parent, subFactoriesBuilder, metadata);
+         this.configs = multiTermConfigs.stream()
+             .map(
+                 c -> new Tuple<ValuesSourceConfig, IncludeExclude>(
+                     ValuesSourceConfig.resolveUnregistered(
+                         queryShardContext,
+                         c.getUserValueTypeHint(),
+                         c.getFieldName(),
+                         c.getScript(),
+                         c.getMissing(),
+                         c.getTimeZone(),
+                         c.getFormat(),
+                         CoreValuesSourceType.BYTES
+                     ),
+                     c.getIncludeExclude()
+                 )
+             )
+             .collect(Collectors.toList());
+         this.formats = this.configs.stream().map(c -> c.v1().format()).collect(Collectors.toList());
+         this.order = order;
+         this.collectMode = collectMode;
+         this.bucketCountThresholds = bucketCountThresholds;
+         this.showTermDocCountError = showTermDocCountError;
+     }
+ 
+     @Override
+     protected Aggregator createInternal(
+         SearchContext searchContext,
+         Aggregator parent,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds);
+         if (InternalOrder.isKeyOrder(order) == false
+             && bucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
+             // The user has not made a shardSize selection. Use default
+             // heuristic to avoid any wrong-ranking caused by distributed
+             // counting
+             bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize()));
+         }
+         bucketCountThresholds.ensureValidity();
+         return new MultiTermsAggregator(
+             name,
+             factories,
+             showTermDocCountError,
+             configs.stream()
+                 .map(config -> queryShardContext.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config.v1()).build(config))
+                 .collect(Collectors.toList()),
+             configs.stream().map(c -> c.v1().format()).collect(Collectors.toList()),
+             order,
+             collectMode,
+             bucketCountThresholds,
+             searchContext,
+             parent,
+             cardinality,
+             metadata
+         );
+     }
+ 
+     @Override
+     protected boolean supportsConcurrentSegmentSearch() {
+         return true;
+     }
+ 
+     /**
+      * Supplier for internal values source
+      *
+      * @opensearch.internal
+      */
+     public interface InternalValuesSourceSupplier {
+         MultiTermsAggregator.InternalValuesSource build(Tuple<ValuesSourceConfig, IncludeExclude> config);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-17.html b/htmlReport/ns-1/sources/source-17.html new file mode 100644 index 0000000000000..6ca556d707c84 --- /dev/null +++ b/htmlReport/ns-1/sources/source-17.html @@ -0,0 +1,743 @@ + + + + + + + + Coverage Report > MultiTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: MultiTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
MultiTermsAggregator + + 0% + + + (0/10) + + + + 0% + + + (0/91) + +
MultiTermsAggregator$1 + + 0% + + + (0/2) + + + + 0% + + + (0/8) + +
MultiTermsAggregator$InternalValuesSource
MultiTermsAggregator$InternalValuesSourceCollector
MultiTermsAggregator$InternalValuesSourceFactory + + 0% + + + (0/13) + + + + 0% + + + (0/64) + +
MultiTermsAggregator$MultiTermsValuesSource + + 0% + + + (0/3) + + + + 0% + + + (0/9) + +
MultiTermsAggregator$MultiTermsValuesSource$1 + + 0% + + + (0/3) + + + + 0% + + + (0/18) + +
MultiTermsAggregator$MultiTermsValuesSourceCollector
MultiTermsAggregator$TermValue + + 0% + + + (0/7) + + + + 0% + + + (0/12) + +
Total + + 0% + + + (0/38) + + + + 0% + + + (0/202) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.index.SortedNumericDocValues;
+ import org.apache.lucene.util.BytesRef;
+ import org.apache.lucene.util.NumericUtils;
+ import org.apache.lucene.util.PriorityQueue;
+ import org.opensearch.ExceptionsHelper;
+ import org.opensearch.common.CheckedSupplier;
+ import org.opensearch.common.Numbers;
+ import org.opensearch.common.io.stream.BytesStreamOutput;
+ import org.opensearch.common.lease.Releasable;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.core.common.bytes.BytesArray;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.common.io.stream.Writeable;
+ import org.opensearch.index.fielddata.SortedBinaryDocValues;
+ import org.opensearch.index.fielddata.SortedNumericDoubleValues;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.LeafBucketCollector;
+ import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator;
+ import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds;
+ import org.opensearch.search.aggregations.support.AggregationPath;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.math.BigInteger;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ 
+ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder;
+ import static org.opensearch.search.aggregations.bucket.terms.TermsAggregator.descendsFromNestedAggregator;
+ 
+ /**
+  * An aggregator that aggregate with multi_terms.
+  *
+  * @opensearch.internal
+  */
+ public class MultiTermsAggregator extends DeferableBucketAggregator {
+ 
+     private final BytesKeyedBucketOrds bucketOrds;
+     private final MultiTermsValuesSource multiTermsValue;
+     private final boolean showTermDocCountError;
+     private final List<DocValueFormat> formats;
+     private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+     private final BucketOrder order;
+     private final Comparator<InternalMultiTerms.Bucket> partiallyBuiltBucketComparator;
+     private final SubAggCollectionMode collectMode;
+     private final Set<Aggregator> aggsUsedForSorting = new HashSet<>();
+ 
+     public MultiTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         boolean showTermDocCountError,
+         List<InternalValuesSource> internalValuesSources,
+         List<DocValueFormat> formats,
+         BucketOrder order,
+         SubAggCollectionMode collectMode,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         SearchContext context,
+         Aggregator parent,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, factories, context, parent, metadata);
+         this.bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality);
+         this.multiTermsValue = new MultiTermsValuesSource(internalValuesSources);
+         this.showTermDocCountError = showTermDocCountError;
+         this.formats = formats;
+         this.bucketCountThresholds = bucketCountThresholds;
+         this.order = order;
+         this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
+         // Todo, copy from TermsAggregator. need to remove duplicate code.
+         if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) {
+             /*
+               Force the execution to depth_first because we need to access the score of
+               nested documents in a sub-aggregation and we are not able to generate this score
+               while replaying deferred documents.
+              */
+             this.collectMode = SubAggCollectionMode.DEPTH_FIRST;
+         } else {
+             this.collectMode = collectMode;
+         }
+         // Don't defer any child agg if we are dependent on it for pruning results
+         if (order instanceof InternalOrder.Aggregation) {
+             AggregationPath path = ((InternalOrder.Aggregation) order).path();
+             aggsUsedForSorting.add(path.resolveTopmostAggregator(this));
+         } else if (order instanceof InternalOrder.CompoundOrder) {
+             InternalOrder.CompoundOrder compoundOrder = (InternalOrder.CompoundOrder) order;
+             for (BucketOrder orderElement : compoundOrder.orderElements()) {
+                 if (orderElement instanceof InternalOrder.Aggregation) {
+                     AggregationPath path = ((InternalOrder.Aggregation) orderElement).path();
+                     aggsUsedForSorting.add(path.resolveTopmostAggregator(this));
+                 }
+             }
+         }
+     }
+ 
+     @Override
+     public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+         LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds);
+         InternalMultiTerms.Bucket[][] topBucketsPerOrd = new InternalMultiTerms.Bucket[owningBucketOrds.length][];
+         long[] otherDocCounts = new long[owningBucketOrds.length];
+         for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+             collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]);
+             long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]);
+ 
+             int size = (int) Math.min(bucketsInOrd, localBucketCountThresholds.getRequiredSize());
+             PriorityQueue<InternalMultiTerms.Bucket> ordered = new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator);
+             InternalMultiTerms.Bucket spare = null;
+             BytesRef dest = null;
+             BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]);
+             CheckedSupplier<InternalMultiTerms.Bucket, IOException> emptyBucketBuilder = () -> InternalMultiTerms.Bucket.EMPTY(
+                 showTermDocCountError,
+                 formats
+             );
+             while (ordsEnum.next()) {
+                 long docCount = bucketDocCount(ordsEnum.ord());
+                 otherDocCounts[ordIdx] += docCount;
+                 if (docCount < localBucketCountThresholds.getMinDocCount()) {
+                     continue;
+                 }
+                 if (spare == null) {
+                     spare = emptyBucketBuilder.get();
+                     dest = new BytesRef();
+                 }
+ 
+                 ordsEnum.readValue(dest);
+ 
+                 spare.termValues = decode(dest);
+                 spare.docCount = docCount;
+                 spare.bucketOrd = ordsEnum.ord();
+                 spare = ordered.insertWithOverflow(spare);
+             }
+ 
+             // Get the top buckets
+             InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[ordered.size()];
+             topBucketsPerOrd[ordIdx] = bucketsForOrd;
+             for (int b = ordered.size() - 1; b >= 0; --b) {
+                 topBucketsPerOrd[ordIdx][b] = ordered.pop();
+                 otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount();
+             }
+         }
+ 
+         buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+ 
+         InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
+         for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+             result[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]);
+         }
+         return result;
+     }
+ 
+     InternalMultiTerms buildResult(long owningBucketOrd, long otherDocCount, InternalMultiTerms.Bucket[] topBuckets) {
+         BucketOrder reduceOrder;
+         if (isKeyOrder(order) == false) {
+             reduceOrder = InternalOrder.key(true);
+             Arrays.sort(topBuckets, reduceOrder.comparator());
+         } else {
+             reduceOrder = order;
+         }
+         return new InternalMultiTerms(
+             name,
+             reduceOrder,
+             order,
+             metadata(),
+             bucketCountThresholds.getShardSize(),
+             showTermDocCountError,
+             otherDocCount,
+             0,
+             formats,
+             List.of(topBuckets),
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public InternalAggregation buildEmptyAggregation() {
+         return new InternalMultiTerms(
+             name,
+             order,
+             order,
+             metadata(),
+             bucketCountThresholds.getShardSize(),
+             showTermDocCountError,
+             0,
+             0,
+             formats,
+             Collections.emptyList(),
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+         MultiTermsValuesSourceCollector collector = multiTermsValue.getValues(ctx);
+         return new LeafBucketCollector() {
+             @Override
+             public void collect(int doc, long owningBucketOrd) throws IOException {
+                 for (BytesRef compositeKey : collector.apply(doc)) {
+                     long bucketOrd = bucketOrds.add(owningBucketOrd, compositeKey);
+                     if (bucketOrd < 0) {
+                         bucketOrd = -1 - bucketOrd;
+                         collectExistingBucket(sub, doc, bucketOrd);
+                     } else {
+                         collectBucket(sub, doc, bucketOrd);
+                     }
+                 }
+             }
+         };
+     }
+ 
+     @Override
+     protected void doClose() {
+         Releasables.close(bucketOrds, multiTermsValue);
+     }
+ 
+     private static List<Object> decode(BytesRef bytesRef) {
+         try (StreamInput input = new BytesArray(bytesRef).streamInput()) {
+             return input.readList(StreamInput::readGenericValue);
+         } catch (IOException e) {
+             throw ExceptionsHelper.convertToRuntime(e);
+         }
+     }
+ 
+     private boolean subAggsNeedScore() {
+         for (Aggregator subAgg : subAggregators) {
+             if (subAgg.scoreMode().needsScores()) {
+                 return true;
+             }
+         }
+         return false;
+     }
+ 
+     @Override
+     protected boolean shouldDefer(Aggregator aggregator) {
+         return collectMode == Aggregator.SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator);
+     }
+ 
+     private void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {
+         if (bucketCountThresholds.getMinDocCount() != 0) {
+             return;
+         }
+         if (InternalOrder.isCountDesc(order) && bucketOrds.bucketsInOrd(owningBucketOrd) >= bucketCountThresholds.getRequiredSize()) {
+             return;
+         }
+         // we need to fill-in the blanks
+         for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) {
+             MultiTermsValuesSourceCollector collector = multiTermsValue.getValues(ctx);
+             // brute force
+             for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
+                 for (BytesRef compositeKey : collector.apply(docId)) {
+                     bucketOrds.add(owningBucketOrd, compositeKey);
+                 }
+             }
+         }
+     }
+ 
+     /**
+      * A multi_terms collector which collect values on each doc,
+      */
+     @FunctionalInterface
+     interface MultiTermsValuesSourceCollector {
+         /**
+          * Collect a list values of multi_terms on each doc.
+          * Each terms could have multi_values, so the result is the cartesian product of each term's values.
+          */
+         List<BytesRef> apply(int doc) throws IOException;
+     }
+ 
+     @FunctionalInterface
+     interface InternalValuesSource {
+         /**
+          * Create {@link InternalValuesSourceCollector} from existing {@link LeafReaderContext}.
+          */
+         InternalValuesSourceCollector apply(LeafReaderContext ctx) throws IOException;
+     }
+ 
+     /**
+      * A terms collector which collect values on each doc,
+      */
+     @FunctionalInterface
+     interface InternalValuesSourceCollector {
+         /**
+          * Collect a list values of a term on specific doc.
+          */
+         List<TermValue<?>> apply(int doc) throws IOException;
+     }
+ 
+     /**
+      * Represents an individual term value.
+      */
+     static class TermValue<T> implements Writeable {
+         private static final Writer<BytesRef> BYTES_REF_WRITER = StreamOutput.getWriter(BytesRef.class);
+         private static final Writer<Long> LONG_WRITER = StreamOutput.getWriter(Long.class);
+         private static final Writer<BigInteger> BIG_INTEGER_WRITER = StreamOutput.getWriter(BigInteger.class);
+         private static final Writer<Double> DOUBLE_WRITER = StreamOutput.getWriter(Double.class);
+ 
+         private final T value;
+         private final Writer<T> writer;
+ 
+         private TermValue(T value, Writer<T> writer) {
+             this.value = value;
+             this.writer = writer;
+         }
+ 
+         @Override
+         public void writeTo(StreamOutput out) throws IOException {
+             writer.write(out, value);
+         }
+ 
+         public static TermValue<BytesRef> of(BytesRef value) {
+             return new TermValue<>(value, BYTES_REF_WRITER);
+         }
+ 
+         public static TermValue<Long> of(Long value) {
+             return new TermValue<>(value, LONG_WRITER);
+         }
+ 
+         public static TermValue<BigInteger> of(BigInteger value) {
+             return new TermValue<>(value, BIG_INTEGER_WRITER);
+         }
+ 
+         public static TermValue<Double> of(Double value) {
+             return new TermValue<>(value, DOUBLE_WRITER);
+         }
+     }
+ 
+     /**
+      * Multi_Term ValuesSource, it is a collection of {@link InternalValuesSource}
+      *
+      * @opensearch.internal
+      */
+     static class MultiTermsValuesSource implements Releasable {
+         private final List<InternalValuesSource> valuesSources;
+         private final BytesStreamOutput scratch = new BytesStreamOutput();
+ 
+         public MultiTermsValuesSource(List<InternalValuesSource> valuesSources) {
+             this.valuesSources = valuesSources;
+         }
+ 
+         public MultiTermsValuesSourceCollector getValues(LeafReaderContext ctx) throws IOException {
+             List<InternalValuesSourceCollector> collectors = new ArrayList<>();
+             for (InternalValuesSource valuesSource : valuesSources) {
+                 collectors.add(valuesSource.apply(ctx));
+             }
+             return new MultiTermsValuesSourceCollector() {
+                 @Override
+                 public List<BytesRef> apply(int doc) throws IOException {
+                     List<List<TermValue<?>>> collectedValues = new ArrayList<>();
+                     for (InternalValuesSourceCollector collector : collectors) {
+                         collectedValues.add(collector.apply(doc));
+                     }
+                     List<BytesRef> result = new ArrayList<>();
+                     scratch.seek(0);
+                     scratch.writeVInt(collectors.size()); // number of fields per composite key
+                     cartesianProduct(result, scratch, collectedValues, 0);
+                     return result;
+                 }
+ 
+                 /**
+                  * Cartesian product using depth first search.
+                  *
+                  * <p>
+                  * Composite keys are encoded to a {@link BytesRef} in a format compatible with {@link StreamOutput::writeGenericValue},
+                  * but reuses the encoding of the shared prefixes from the previous levels to avoid wasteful work.
+                  */
+                 private void cartesianProduct(
+                     List<BytesRef> compositeKeys,
+                     BytesStreamOutput scratch,
+                     List<List<TermValue<?>>> collectedValues,
+                     int index
+                 ) throws IOException {
+                     if (collectedValues.size() == index) {
+                         compositeKeys.add(BytesRef.deepCopyOf(scratch.bytes().toBytesRef()));
+                         return;
+                     }
+ 
+                     long position = scratch.position();
+                     for (TermValue<?> value : collectedValues.get(index)) {
+                         value.writeTo(scratch); // encode the value
+                         cartesianProduct(compositeKeys, scratch, collectedValues, index + 1); // dfs
+                         scratch.seek(position); // backtrack
+                     }
+                 }
+             };
+         }
+ 
+         @Override
+         public void close() {
+             scratch.close();
+         }
+     }
+ 
+     /**
+      * Factory for construct {@link InternalValuesSource}.
+      *
+      * @opensearch.internal
+      */
+     static class InternalValuesSourceFactory {
+         static InternalValuesSource bytesValuesSource(ValuesSource valuesSource, IncludeExclude.StringFilter includeExclude) {
+             return ctx -> {
+                 SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
+                 return doc -> {
+                     if (false == values.advanceExact(doc)) {
+                         return Collections.emptyList();
+                     }
+                     int valuesCount = values.docValueCount();
+                     List<TermValue<?>> termValues = new ArrayList<>(valuesCount);
+ 
+                     // SortedBinaryDocValues don't guarantee uniqueness so we
+                     // need to take care of dups
+                     BytesRef previous = null;
+                     for (int i = 0; i < valuesCount; ++i) {
+                         BytesRef bytes = values.nextValue();
+                         if (includeExclude != null && false == includeExclude.accept(bytes)) {
+                             continue;
+                         }
+                         if (i > 0 && bytes.equals(previous)) {
+                             continue;
+                         }
+                         BytesRef copy = BytesRef.deepCopyOf(bytes);
+                         termValues.add(TermValue.of(copy));
+                         previous = copy;
+                     }
+                     return termValues;
+                 };
+             };
+         }
+ 
+         static InternalValuesSource unsignedLongValuesSource(ValuesSource.Numeric valuesSource, IncludeExclude.LongFilter longFilter) {
+             return ctx -> {
+                 SortedNumericDocValues values = valuesSource.longValues(ctx);
+                 return doc -> {
+                     if (values.advanceExact(doc)) {
+                         int valuesCount = values.docValueCount();
+ 
+                         BigInteger previous = Numbers.MAX_UNSIGNED_LONG_VALUE;
+                         List<TermValue<?>> termValues = new ArrayList<>(valuesCount);
+                         for (int i = 0; i < valuesCount; ++i) {
+                             BigInteger val = Numbers.toUnsignedBigInteger(values.nextValue());
+                             if (previous.compareTo(val) != 0 || i == 0) {
+                                 if (longFilter == null || longFilter.accept(NumericUtils.doubleToSortableLong(val.doubleValue()))) {
+                                     termValues.add(TermValue.of(val));
+                                 }
+                                 previous = val;
+                             }
+                         }
+                         return termValues;
+                     }
+                     return Collections.emptyList();
+                 };
+             };
+         }
+ 
+         static InternalValuesSource longValuesSource(ValuesSource.Numeric valuesSource, IncludeExclude.LongFilter longFilter) {
+             return ctx -> {
+                 SortedNumericDocValues values = valuesSource.longValues(ctx);
+                 return doc -> {
+                     if (values.advanceExact(doc)) {
+                         int valuesCount = values.docValueCount();
+ 
+                         long previous = Long.MAX_VALUE;
+                         List<TermValue<?>> termValues = new ArrayList<>(valuesCount);
+                         for (int i = 0; i < valuesCount; ++i) {
+                             long val = values.nextValue();
+                             if (previous != val || i == 0) {
+                                 if (longFilter == null || longFilter.accept(val)) {
+                                     termValues.add(TermValue.of(val));
+                                 }
+                                 previous = val;
+                             }
+                         }
+                         return termValues;
+                     }
+                     return Collections.emptyList();
+                 };
+             };
+         }
+ 
+         static InternalValuesSource doubleValueSource(ValuesSource.Numeric valuesSource, IncludeExclude.LongFilter longFilter) {
+             return ctx -> {
+                 SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
+                 return doc -> {
+                     if (values.advanceExact(doc)) {
+                         int valuesCount = values.docValueCount();
+ 
+                         double previous = Double.MAX_VALUE;
+                         List<TermValue<?>> termValues = new ArrayList<>(valuesCount);
+                         for (int i = 0; i < valuesCount; ++i) {
+                             double val = values.nextValue();
+                             if (previous != val || i == 0) {
+                                 if (longFilter == null || longFilter.accept(NumericUtils.doubleToSortableLong(val))) {
+                                     termValues.add(TermValue.of(val));
+                                 }
+                                 previous = val;
+                             }
+                         }
+                         return termValues;
+                     }
+                     return Collections.emptyList();
+                 };
+             };
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-18.html b/htmlReport/ns-1/sources/source-18.html new file mode 100644 index 0000000000000..2c33034418e11 --- /dev/null +++ b/htmlReport/ns-1/sources/source-18.html @@ -0,0 +1,969 @@ + + + + + + + + Coverage Report > NumericTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: NumericTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
NumericTermsAggregator + + 0% + + + (0/8) + + + + 0% + + + (0/17) + +
NumericTermsAggregator$1 + + 0% + + + (0/2) + + + + 0% + + + (0/14) + +
NumericTermsAggregator$DoubleTermsResults + + 0% + + + (0/9) + + + + 0% + + + (0/24) + +
NumericTermsAggregator$LongTermsResults + + 0% + + + (0/9) + + + + 0% + + + (0/24) + +
NumericTermsAggregator$ResultStrategy + + 0% + + + (0/2) + + + + 0% + + + (0/32) + +
NumericTermsAggregator$SignificantLongTermsResults + + 0% + + + (0/14) + + + + 0% + + + (0/34) + +
NumericTermsAggregator$SignificantLongTermsResults$1 + + 0% + + + (0/2) + + + + 0% + + + (0/4) + +
NumericTermsAggregator$StandardTermsResultStrategy + + 0% + + + (0/7) + + + + 0% + + + (0/19) + +
NumericTermsAggregator$UnsignedLongTermsResults + + 0% + + + (0/9) + + + + 0% + + + (0/24) + +
Total + + 0% + + + (0/62) + + + + 0% + + + (0/192) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.IndexReader;
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.index.SortedNumericDocValues;
+ import org.apache.lucene.search.ScoreMode;
+ import org.apache.lucene.util.NumericUtils;
+ import org.apache.lucene.util.PriorityQueue;
+ import org.opensearch.common.Numbers;
+ import org.opensearch.common.lease.Releasable;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.util.LongArray;
+ import org.opensearch.index.fielddata.FieldData;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalMultiBucketAggregation;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.LeafBucketCollector;
+ import org.opensearch.search.aggregations.LeafBucketCollectorBase;
+ import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.IncludeExclude.LongFilter;
+ import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds.BucketOrdsEnum;
+ import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForLong;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.ContextIndexSearcher;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.math.BigInteger;
+ import java.util.Arrays;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.function.BiConsumer;
+ import java.util.function.Function;
+ import java.util.function.Supplier;
+ 
+ import static java.util.Collections.emptyList;
+ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder;
+ 
+ /**
+  * Aggregate all docs that contain numeric terms
+  *
+  * @opensearch.internal
+  */
+ public class NumericTermsAggregator extends TermsAggregator {
+     private final ResultStrategy<?, ?> resultStrategy;
+     private final ValuesSource.Numeric valuesSource;
+     private final LongKeyedBucketOrds bucketOrds;
+     private final LongFilter longFilter;
+ 
+     public NumericTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         Function<NumericTermsAggregator, ResultStrategy<?, ?>> resultStrategy,
+         ValuesSource.Numeric valuesSource,
+         DocValueFormat format,
+         BucketOrder order,
+         BucketCountThresholds bucketCountThresholds,
+         SearchContext aggregationContext,
+         Aggregator parent,
+         SubAggCollectionMode subAggCollectMode,
+         IncludeExclude.LongFilter longFilter,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, factories, aggregationContext, parent, bucketCountThresholds, order, format, subAggCollectMode, metadata);
+         this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
+         this.valuesSource = valuesSource;
+         this.longFilter = longFilter;
+         bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality);
+     }
+ 
+     @Override
+     public ScoreMode scoreMode() {
+         if (valuesSource != null && valuesSource.needsScores()) {
+             return ScoreMode.COMPLETE;
+         }
+         return super.scoreMode();
+     }
+ 
+     @Override
+     public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+         SortedNumericDocValues values = resultStrategy.getValues(ctx);
+         return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, values) {
+             @Override
+             public void collect(int doc, long owningBucketOrd) throws IOException {
+                 if (values.advanceExact(doc)) {
+                     int valuesCount = values.docValueCount();
+ 
+                     long previous = Long.MAX_VALUE;
+                     for (int i = 0; i < valuesCount; ++i) {
+                         long val = values.nextValue();
+                         if (previous != val || i == 0) {
+                             if ((longFilter == null) || (longFilter.accept(val))) {
+                                 long bucketOrdinal = bucketOrds.add(owningBucketOrd, val);
+                                 if (bucketOrdinal < 0) { // already seen
+                                     bucketOrdinal = -1 - bucketOrdinal;
+                                     collectExistingBucket(sub, doc, bucketOrdinal);
+                                 } else {
+                                     collectBucket(sub, doc, bucketOrdinal);
+                                 }
+                             }
+ 
+                             previous = val;
+                         }
+                     }
+                 }
+             }
+         });
+     }
+ 
+     @Override
+     public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+         return resultStrategy.buildAggregations(owningBucketOrds);
+     }
+ 
+     @Override
+     public InternalAggregation buildEmptyAggregation() {
+         return resultStrategy.buildEmptyResult();
+     }
+ 
+     @Override
+     public void doClose() {
+         Releasables.close(super::doClose, bucketOrds, resultStrategy);
+     }
+ 
+     @Override
+     public void collectDebugInfo(BiConsumer<String, Object> add) {
+         super.collectDebugInfo(add);
+         add.accept("result_strategy", resultStrategy.describe());
+         add.accept("total_buckets", bucketOrds.size());
+     }
+ 
+     /**
+      * Strategy for building results.
+      */
+     abstract class ResultStrategy<R extends InternalAggregation, B extends InternalMultiBucketAggregation.InternalBucket>
+         implements
+             Releasable {
+         private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+             LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds);
+             B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length);
+             long[] otherDocCounts = new long[owningBucketOrds.length];
+             for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+                 collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]);
+                 long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]);
+ 
+                 int size = (int) Math.min(bucketsInOrd, localBucketCountThresholds.getRequiredSize());
+                 PriorityQueue<B> ordered = buildPriorityQueue(size);
+                 B spare = null;
+                 BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]);
+                 Supplier<B> emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]);
+                 while (ordsEnum.next()) {
+                     long docCount = bucketDocCount(ordsEnum.ord());
+                     otherDocCounts[ordIdx] += docCount;
+                     if (docCount < localBucketCountThresholds.getMinDocCount()) {
+                         continue;
+                     }
+                     if (spare == null) {
+                         spare = emptyBucketBuilder.get();
+                     }
+                     updateBucket(spare, ordsEnum, docCount);
+                     spare = ordered.insertWithOverflow(spare);
+                 }
+ 
+                 // Get the top buckets
+                 B[] bucketsForOrd = buildBuckets(ordered.size());
+                 topBucketsPerOrd[ordIdx] = bucketsForOrd;
+                 for (int b = ordered.size() - 1; b >= 0; --b) {
+                     topBucketsPerOrd[ordIdx][b] = ordered.pop();
+                     otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount();
+                 }
+             }
+ 
+             buildSubAggs(topBucketsPerOrd);
+ 
+             InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
+             for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+                 result[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]);
+             }
+             return result;
+         }
+ 
+         /**
+          * Short description of the collection mechanism added to the profile
+          * output to help with debugging.
+          */
+         abstract String describe();
+ 
+         /**
+          * Resolve the doc values to collect results of this type.
+          */
+         abstract SortedNumericDocValues getValues(LeafReaderContext ctx) throws IOException;
+ 
+         /**
+          * Wrap the "standard" numeric terms collector to collect any more
+          * information that this result type may need.
+          */
+         abstract LeafBucketCollector wrapCollector(LeafBucketCollector primary);
+ 
+         /**
+          * Build an array to hold the "top" buckets for each ordinal.
+          */
+         abstract B[][] buildTopBucketsPerOrd(int size);
+ 
+         /**
+          * Build an array of buckets for a particular ordinal. These arrays
+          * are asigned to the value returned by {@link #buildTopBucketsPerOrd}.
+          */
+         abstract B[] buildBuckets(int size);
+ 
+         /**
+          * Build a {@linkplain Supplier} that can be used to build "empty"
+          * buckets. Those buckets will then be {@link #updateBucket updated}
+          * for each collected bucket.
+          */
+         abstract Supplier<B> emptyBucketBuilder(long owningBucketOrd);
+ 
+         /**
+          * Update fields in {@code spare} to reflect information collected for
+          * this bucket ordinal.
+          */
+         abstract void updateBucket(B spare, BucketOrdsEnum ordsEnum, long docCount) throws IOException;
+ 
+         /**
+          * Build a {@link PriorityQueue} to sort the buckets. After we've
+          * collected all of the buckets we'll collect all entries in the queue.
+          */
+         abstract PriorityQueue<B> buildPriorityQueue(int size);
+ 
+         /**
+          * Build the sub-aggregations into the buckets. This will usually
+          * delegate to {@link #buildSubAggsForAllBuckets}.
+          */
+         abstract void buildSubAggs(B[][] topBucketsPerOrd) throws IOException;
+ 
+         /**
+          * Collect extra entries for "zero" hit documents if they were requested
+          * and required.
+          */
+         abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException;
+ 
+         /**
+          * Turn the buckets into an aggregation result.
+          */
+         abstract R buildResult(long owningBucketOrd, long otherDocCounts, B[] topBuckets);
+ 
+         /**
+          * Build an "empty" result. Only called if there isn't any data on this
+          * shard.
+          */
+         abstract R buildEmptyResult();
+     }
+ 
+     abstract class StandardTermsResultStrategy<R extends InternalMappedTerms<R, B>, B extends InternalTerms.Bucket<B>> extends
+         ResultStrategy<R, B> {
+         protected final boolean showTermDocCountError;
+ 
+         StandardTermsResultStrategy(boolean showTermDocCountError) {
+             this.showTermDocCountError = showTermDocCountError;
+         }
+ 
+         @Override
+         final LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
+             return primary;
+         }
+ 
+         @Override
+         final PriorityQueue<B> buildPriorityQueue(int size) {
+             return new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator);
+         }
+ 
+         @Override
+         final void buildSubAggs(B[][] topBucketsPerOrd) throws IOException {
+             buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+         }
+ 
+         @Override
+         Supplier<B> emptyBucketBuilder(long owningBucketOrd) {
+             return this::buildEmptyBucket;
+         }
+ 
+         abstract B buildEmptyBucket();
+ 
+         @Override
+         final void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {
+             if (bucketCountThresholds.getMinDocCount() != 0) {
+                 return;
+             }
+             if (InternalOrder.isCountDesc(order) && bucketOrds.bucketsInOrd(owningBucketOrd) >= bucketCountThresholds.getRequiredSize()) {
+                 return;
+             }
+             // we need to fill-in the blanks
+             for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) {
+                 SortedNumericDocValues values = getValues(ctx);
+                 for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
+                     if (values.advanceExact(docId)) {
+                         int valueCount = values.docValueCount();
+                         for (int v = 0; v < valueCount; ++v) {
+                             long value = values.nextValue();
+                             if (longFilter == null || longFilter.accept(value)) {
+                                 bucketOrds.add(owningBucketOrd, value);
+                             }
+                         }
+                     }
+                 }
+             }
+         }
+ 
+         @Override
+         public final void close() {}
+     }
+ 
+     class LongTermsResults extends StandardTermsResultStrategy<LongTerms, LongTerms.Bucket> {
+         LongTermsResults(boolean showTermDocCountError) {
+             super(showTermDocCountError);
+         }
+ 
+         @Override
+         String describe() {
+             return "long_terms";
+         }
+ 
+         @Override
+         SortedNumericDocValues getValues(LeafReaderContext ctx) throws IOException {
+             return valuesSource.longValues(ctx);
+         }
+ 
+         @Override
+         LongTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new LongTerms.Bucket[size][];
+         }
+ 
+         @Override
+         LongTerms.Bucket[] buildBuckets(int size) {
+             return new LongTerms.Bucket[size];
+         }
+ 
+         @Override
+         LongTerms.Bucket buildEmptyBucket() {
+             return new LongTerms.Bucket(0, 0, null, showTermDocCountError, 0, format);
+         }
+ 
+         @Override
+         void updateBucket(LongTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) {
+             spare.term = ordsEnum.value();
+             spare.docCount = docCount;
+             spare.bucketOrd = ordsEnum.ord();
+         }
+ 
+         @Override
+         LongTerms buildResult(long owningBucketOrd, long otherDocCount, LongTerms.Bucket[] topBuckets) {
+             final BucketOrder reduceOrder;
+             if (isKeyOrder(order) == false) {
+                 reduceOrder = InternalOrder.key(true);
+                 Arrays.sort(topBuckets, reduceOrder.comparator());
+             } else {
+                 reduceOrder = order;
+             }
+             return new LongTerms(
+                 name,
+                 reduceOrder,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 otherDocCount,
+                 List.of(topBuckets),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         LongTerms buildEmptyResult() {
+             return new LongTerms(
+                 name,
+                 order,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 0,
+                 emptyList(),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+     }
+ 
+     class DoubleTermsResults extends StandardTermsResultStrategy<DoubleTerms, DoubleTerms.Bucket> {
+ 
+         DoubleTermsResults(boolean showTermDocCountError) {
+             super(showTermDocCountError);
+         }
+ 
+         @Override
+         String describe() {
+             return "double_terms";
+         }
+ 
+         @Override
+         SortedNumericDocValues getValues(LeafReaderContext ctx) throws IOException {
+             return FieldData.toSortableLongBits(valuesSource.doubleValues(ctx));
+         }
+ 
+         @Override
+         DoubleTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new DoubleTerms.Bucket[size][];
+         }
+ 
+         @Override
+         DoubleTerms.Bucket[] buildBuckets(int size) {
+             return new DoubleTerms.Bucket[size];
+         }
+ 
+         @Override
+         DoubleTerms.Bucket buildEmptyBucket() {
+             return new DoubleTerms.Bucket(0, 0, null, showTermDocCountError, 0, format);
+         }
+ 
+         @Override
+         void updateBucket(DoubleTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) {
+             spare.term = NumericUtils.sortableLongToDouble(ordsEnum.value());
+             spare.docCount = docCount;
+             spare.bucketOrd = ordsEnum.ord();
+         }
+ 
+         @Override
+         DoubleTerms buildResult(long owningBucketOrd, long otherDocCount, DoubleTerms.Bucket[] topBuckets) {
+             final BucketOrder reduceOrder;
+             if (isKeyOrder(order) == false) {
+                 reduceOrder = InternalOrder.key(true);
+                 Arrays.sort(topBuckets, reduceOrder.comparator());
+             } else {
+                 reduceOrder = order;
+             }
+             return new DoubleTerms(
+                 name,
+                 reduceOrder,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 otherDocCount,
+                 List.of(topBuckets),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         DoubleTerms buildEmptyResult() {
+             return new DoubleTerms(
+                 name,
+                 order,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 0,
+                 emptyList(),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+     }
+ 
+     class UnsignedLongTermsResults extends StandardTermsResultStrategy<UnsignedLongTerms, UnsignedLongTerms.Bucket> {
+         UnsignedLongTermsResults(boolean showTermDocCountError) {
+             super(showTermDocCountError);
+         }
+ 
+         @Override
+         String describe() {
+             return "unsigned_long_terms";
+         }
+ 
+         @Override
+         SortedNumericDocValues getValues(LeafReaderContext ctx) throws IOException {
+             return valuesSource.longValues(ctx);
+         }
+ 
+         @Override
+         UnsignedLongTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new UnsignedLongTerms.Bucket[size][];
+         }
+ 
+         @Override
+         UnsignedLongTerms.Bucket[] buildBuckets(int size) {
+             return new UnsignedLongTerms.Bucket[size];
+         }
+ 
+         @Override
+         UnsignedLongTerms.Bucket buildEmptyBucket() {
+             return new UnsignedLongTerms.Bucket(BigInteger.ZERO, 0, null, showTermDocCountError, 0, format);
+         }
+ 
+         @Override
+         void updateBucket(UnsignedLongTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) {
+             spare.term = Numbers.toUnsignedBigInteger(ordsEnum.value());
+             spare.docCount = docCount;
+             spare.bucketOrd = ordsEnum.ord();
+         }
+ 
+         @Override
+         UnsignedLongTerms buildResult(long owningBucketOrd, long otherDocCount, UnsignedLongTerms.Bucket[] topBuckets) {
+             final BucketOrder reduceOrder;
+             if (isKeyOrder(order) == false) {
+                 reduceOrder = InternalOrder.key(true);
+                 Arrays.sort(topBuckets, reduceOrder.comparator());
+             } else {
+                 reduceOrder = order;
+             }
+             return new UnsignedLongTerms(
+                 name,
+                 reduceOrder,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 otherDocCount,
+                 List.of(topBuckets),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         UnsignedLongTerms buildEmptyResult() {
+             return new UnsignedLongTerms(
+                 name,
+                 order,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 0,
+                 emptyList(),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+     }
+ 
+     class SignificantLongTermsResults extends ResultStrategy<SignificantLongTerms, SignificantLongTerms.Bucket> {
+         private final BackgroundFrequencyForLong backgroundFrequencies;
+         private final long supersetSize;
+         private final SignificanceHeuristic significanceHeuristic;
+         private LongArray subsetSizes;
+ 
+         SignificantLongTermsResults(
+             SignificanceLookup significanceLookup,
+             SignificanceHeuristic significanceHeuristic,
+             CardinalityUpperBound cardinality
+         ) {
+             backgroundFrequencies = significanceLookup.longLookup(context.bigArrays(), cardinality);
+             supersetSize = significanceLookup.supersetSize();
+             this.significanceHeuristic = significanceHeuristic;
+             subsetSizes = context.bigArrays().newLongArray(1, true);
+         }
+ 
+         @Override
+         SortedNumericDocValues getValues(LeafReaderContext ctx) throws IOException {
+             return valuesSource.longValues(ctx);
+         }
+ 
+         @Override
+         String describe() {
+             return "significant_terms";
+         }
+ 
+         @Override
+         LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
+             return new LeafBucketCollectorBase(primary, null) {
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     super.collect(doc, owningBucketOrd);
+                     subsetSizes = context.bigArrays().grow(subsetSizes, owningBucketOrd + 1);
+                     subsetSizes.increment(owningBucketOrd, 1);
+                 }
+             };
+         }
+ 
+         @Override
+         SignificantLongTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new SignificantLongTerms.Bucket[size][];
+         }
+ 
+         @Override
+         SignificantLongTerms.Bucket[] buildBuckets(int size) {
+             return new SignificantLongTerms.Bucket[size];
+         }
+ 
+         @Override
+         Supplier<SignificantLongTerms.Bucket> emptyBucketBuilder(long owningBucketOrd) {
+             long subsetSize = subsetSizes.get(owningBucketOrd);
+             return () -> new SignificantLongTerms.Bucket(0, subsetSize, 0, supersetSize, 0, null, format, 0);
+         }
+ 
+         @Override
+         void updateBucket(SignificantLongTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) throws IOException {
+             spare.term = ordsEnum.value();
+             spare.subsetDf = docCount;
+             spare.supersetDf = backgroundFrequencies.freq(spare.term);
+             spare.bucketOrd = ordsEnum.ord();
+             // During shard-local down-selection we use subset/superset stats that are for this shard only
+             // Back at the central reducer these properties will be updated with global stats
+             spare.updateScore(significanceHeuristic);
+         }
+ 
+         @Override
+         PriorityQueue<SignificantLongTerms.Bucket> buildPriorityQueue(int size) {
+             return new BucketSignificancePriorityQueue<>(size);
+         }
+ 
+         @Override
+         void buildSubAggs(SignificantLongTerms.Bucket[][] topBucketsPerOrd) throws IOException {
+             buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+         }
+ 
+         @Override
+         void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {}
+ 
+         @Override
+         SignificantLongTerms buildResult(long owningBucketOrd, long otherDocCoun, SignificantLongTerms.Bucket[] topBuckets) {
+             SignificantLongTerms significantLongTerms = new SignificantLongTerms(
+                 name,
+                 metadata(),
+                 format,
+                 subsetSizes.get(owningBucketOrd),
+                 supersetSize,
+                 significanceHeuristic,
+                 List.of(topBuckets),
+                 bucketCountThresholds
+             );
+             return significantLongTerms;
+         }
+ 
+         @Override
+         SignificantLongTerms buildEmptyResult() {
+             // We need to account for the significance of a miss in our global stats - provide corpus size as context
+             ContextIndexSearcher searcher = context.searcher();
+             IndexReader topReader = searcher.getIndexReader();
+             int supersetSize = topReader.numDocs();
+             return new SignificantLongTerms(
+                 name,
+                 metadata(),
+                 format,
+                 0,
+                 supersetSize,
+                 significanceHeuristic,
+                 emptyList(),
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         public void close() {
+             Releasables.close(backgroundFrequencies, subsetSizes);
+         }
+     }
+ 
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-19.html b/htmlReport/ns-1/sources/source-19.html new file mode 100644 index 0000000000000..2dace149873ac --- /dev/null +++ b/htmlReport/ns-1/sources/source-19.html @@ -0,0 +1,241 @@ + + + + + + + + Coverage Report > ParsedDoubleTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedDoubleTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedDoubleTerms + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
ParsedDoubleTerms$ParsedBucket + + 0% + + + (0/6) + + + + 0% + + + (0/14) + +
Total + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * A double numeric terms result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedDoubleTerms extends ParsedTerms {
+ 
+     @Override
+     public String getType() {
+         return DoubleTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedDoubleTerms, Void> PARSER = new ObjectParser<>(
+         ParsedDoubleTerms.class.getSimpleName(),
+         true,
+         ParsedDoubleTerms::new
+     );
+     static {
+         declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedDoubleTerms fromXContent(XContentParser parser, String name) throws IOException {
+         ParsedDoubleTerms aggregation = PARSER.parse(parser, null);
+         aggregation.setName(name);
+         return aggregation;
+     }
+ 
+     /**
+      * Parsed bucket for double terms
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedTerms.ParsedBucket {
+ 
+         private Double key;
+ 
+         @Override
+         public Object getKey() {
+             return key;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             if (key != null) {
+                 return Double.toString(key);
+             }
+             return null;
+         }
+ 
+         public Number getKeyAsNumber() {
+             return key;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), key);
+             if (super.getKeyAsString() != null) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+             }
+             return builder;
+         }
+ 
+         static ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> bucket.key = p.doubleValue());
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-1a.html b/htmlReport/ns-1/sources/source-1a.html new file mode 100644 index 0000000000000..85bd552c4b3a4 --- /dev/null +++ b/htmlReport/ns-1/sources/source-1a.html @@ -0,0 +1,241 @@ + + + + + + + + Coverage Report > ParsedLongRareTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedLongRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedLongRareTerms + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
ParsedLongRareTerms$ParsedBucket + + 0% + + + (0/6) + + + + 0% + + + (0/14) + +
Total + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * A long numeric term result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedLongRareTerms extends ParsedRareTerms {
+     @Override
+     public String getType() {
+         return LongRareTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedLongRareTerms, Void> PARSER = new ObjectParser<>(
+         ParsedLongRareTerms.class.getSimpleName(),
+         true,
+         ParsedLongRareTerms::new
+     );
+ 
+     static {
+         declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedLongRareTerms fromXContent(XContentParser parser, String name) throws IOException {
+         ParsedLongRareTerms aggregation = PARSER.parse(parser, null);
+         aggregation.setName(name);
+         return aggregation;
+     }
+ 
+     /**
+      * Parsed bucket for rare long values
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedRareTerms.ParsedBucket {
+ 
+         private Long key;
+ 
+         @Override
+         public Object getKey() {
+             return key;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             if (key != null) {
+                 return Long.toString(key);
+             }
+             return null;
+         }
+ 
+         public Number getKeyAsNumber() {
+             return key;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), key);
+             if (super.getKeyAsString() != null) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+             }
+             return builder;
+         }
+ 
+         static ParsedLongRareTerms.ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseRareTermsBucketXContent(parser, ParsedLongRareTerms.ParsedBucket::new, (p, bucket) -> bucket.key = p.longValue());
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-1b.html b/htmlReport/ns-1/sources/source-1b.html new file mode 100644 index 0000000000000..180cc8eb613eb --- /dev/null +++ b/htmlReport/ns-1/sources/source-1b.html @@ -0,0 +1,241 @@ + + + + + + + + Coverage Report > ParsedLongTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedLongTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedLongTerms + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
ParsedLongTerms$ParsedBucket + + 0% + + + (0/6) + + + + 0% + + + (0/14) + +
Total + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * A long term agg result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedLongTerms extends ParsedTerms {
+ 
+     @Override
+     public String getType() {
+         return LongTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedLongTerms, Void> PARSER = new ObjectParser<>(
+         ParsedLongTerms.class.getSimpleName(),
+         true,
+         ParsedLongTerms::new
+     );
+     static {
+         declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedLongTerms fromXContent(XContentParser parser, String name) throws IOException {
+         ParsedLongTerms aggregation = PARSER.parse(parser, null);
+         aggregation.setName(name);
+         return aggregation;
+     }
+ 
+     /**
+      * Parsed bucket for long term values
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedTerms.ParsedBucket {
+ 
+         private Long key;
+ 
+         @Override
+         public Object getKey() {
+             return key;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             if (key != null) {
+                 return Long.toString(key);
+             }
+             return null;
+         }
+ 
+         public Number getKeyAsNumber() {
+             return key;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), key);
+             if (super.getKeyAsString() != null) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+             }
+             return builder;
+         }
+ 
+         static ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> bucket.key = p.longValue());
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-1c.html b/htmlReport/ns-1/sources/source-1c.html new file mode 100644 index 0000000000000..792a5485689ff --- /dev/null +++ b/htmlReport/ns-1/sources/source-1c.html @@ -0,0 +1,217 @@ + + + + + + + + Coverage Report > ParsedMultiTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedMultiTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedMultiTerms + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
ParsedMultiTerms$ParsedBucket + + 0% + + + (0/6) + + + + 0% + + + (0/14) + +
Total + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ 
+ /**
+  * A multi terms result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedMultiTerms extends ParsedTerms {
+     @Override
+     public String getType() {
+         return MultiTermsAggregationBuilder.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedMultiTerms, Void> PARSER = new ObjectParser<>(
+         ParsedMultiTerms.class.getSimpleName(),
+         true,
+         ParsedMultiTerms::new
+     );
+     static {
+         declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedMultiTerms fromXContent(XContentParser parser, String name) throws IOException {
+         ParsedMultiTerms aggregation = PARSER.parse(parser, null);
+         aggregation.setName(name);
+         return aggregation;
+     }
+ 
+     /**
+      * Parsed bucket for multi terms
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedTerms.ParsedBucket {
+ 
+         private List<Object> key;
+ 
+         @Override
+         public List<Object> getKey() {
+             return key;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             if (key != null) {
+                 return key.toString();
+             }
+             return null;
+         }
+ 
+         public Number getKeyAsNumber() {
+             throw new UnsupportedOperationException("not implemented");
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), key);
+             if (super.getKeyAsString() != null) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+             }
+             return builder;
+         }
+ 
+         static ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> { bucket.key = p.list(); });
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-1d.html b/htmlReport/ns-1/sources/source-1d.html new file mode 100644 index 0000000000000..0b07678296651 --- /dev/null +++ b/htmlReport/ns-1/sources/source-1d.html @@ -0,0 +1,270 @@ + + + + + + + + Coverage Report > ParsedRareTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedRareTerms + + 0% + + + (0/5) + + + + 0% + + + (0/14) + +
ParsedRareTerms$ParsedBucket + + 0% + + + (0/3) + + + + 0% + + + (0/25) + +
Total + + 0% + + + (0/8) + + + + 0% + + + (0/39) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.CheckedBiConsumer;
+ import org.opensearch.common.CheckedFunction;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ import org.opensearch.core.xcontent.XContentParserUtils;
+ import org.opensearch.search.aggregations.Aggregation;
+ import org.opensearch.search.aggregations.Aggregations;
+ import org.opensearch.search.aggregations.ParsedMultiBucketAggregation;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.function.Supplier;
+ 
+ /**
+  * A rare term result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public abstract class ParsedRareTerms extends ParsedMultiBucketAggregation<ParsedRareTerms.ParsedBucket> implements RareTerms {
+     @Override
+     public List<? extends RareTerms.Bucket> getBuckets() {
+         return buckets;
+     }
+ 
+     @Override
+     public RareTerms.Bucket getBucketByKey(String term) {
+         for (RareTerms.Bucket bucket : getBuckets()) {
+             if (bucket.getKeyAsString().equals(term)) {
+                 return bucket;
+             }
+         }
+         return null;
+     }
+ 
+     @Override
+     protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         builder.startArray(CommonFields.BUCKETS.getPreferredName());
+         for (RareTerms.Bucket bucket : getBuckets()) {
+             bucket.toXContent(builder, params);
+         }
+         builder.endArray();
+         return builder;
+     }
+ 
+     static void declareParsedTermsFields(
+         final ObjectParser<? extends ParsedRareTerms, Void> objectParser,
+         final CheckedFunction<XContentParser, ParsedBucket, IOException> bucketParser
+     ) {
+         declareMultiBucketAggregationFields(objectParser, bucketParser::apply, bucketParser::apply);
+     }
+ 
+     /**
+      * Parsed Bucket for rare term values
+      *
+      * @opensearch.internal
+      */
+     public abstract static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements RareTerms.Bucket {
+ 
+         @Override
+         public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject();
+             keyToXContent(builder);
+             builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+             getAggregations().toXContentInternal(builder, params);
+             builder.endObject();
+             return builder;
+         }
+ 
+         static <B extends ParsedBucket> B parseRareTermsBucketXContent(
+             final XContentParser parser,
+             final Supplier<B> bucketSupplier,
+             final CheckedBiConsumer<XContentParser, B, IOException> keyConsumer
+         ) throws IOException {
+ 
+             final B bucket = bucketSupplier.get();
+             final List<Aggregation> aggregations = new ArrayList<>();
+ 
+             XContentParser.Token token;
+             String currentFieldName = parser.currentName();
+             while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+                 if (token == XContentParser.Token.FIELD_NAME) {
+                     currentFieldName = parser.currentName();
+                 } else if (token.isValue()) {
+                     if (CommonFields.KEY_AS_STRING.getPreferredName().equals(currentFieldName)) {
+                         bucket.setKeyAsString(parser.text());
+                     } else if (CommonFields.KEY.getPreferredName().equals(currentFieldName)) {
+                         keyConsumer.accept(parser, bucket);
+                     } else if (CommonFields.DOC_COUNT.getPreferredName().equals(currentFieldName)) {
+                         bucket.setDocCount(parser.longValue());
+                     }
+                 } else if (token == XContentParser.Token.START_OBJECT) {
+                     XContentParserUtils.parseTypedKeysObject(
+                         parser,
+                         Aggregation.TYPED_KEYS_DELIMITER,
+                         Aggregation.class,
+                         aggregations::add
+                     );
+                 }
+             }
+             bucket.setAggregations(new Aggregations(aggregations));
+             return bucket;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-1e.html b/htmlReport/ns-1/sources/source-1e.html new file mode 100644 index 0000000000000..f0094a67d862c --- /dev/null +++ b/htmlReport/ns-1/sources/source-1e.html @@ -0,0 +1,236 @@ + + + + + + + + Coverage Report > ParsedSignificantLongTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedSignificantLongTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedSignificantLongTerms + + 0% + + + (0/4) + + + + 0% + + + (0/6) + +
ParsedSignificantLongTerms$ParsedBucket + + 0% + + + (0/6) + + + + 0% + + + (0/12) + +
Total + + 0% + + + (0/10) + + + + 0% + + + (0/18) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * A significant long terms result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedSignificantLongTerms extends ParsedSignificantTerms {
+ 
+     @Override
+     public String getType() {
+         return SignificantLongTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedSignificantLongTerms, Void> PARSER = new ObjectParser<>(
+         ParsedSignificantLongTerms.class.getSimpleName(),
+         true,
+         ParsedSignificantLongTerms::new
+     );
+     static {
+         declareParsedSignificantTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedSignificantLongTerms fromXContent(XContentParser parser, String name) throws IOException {
+         return parseSignificantTermsXContent(() -> PARSER.parse(parser, null), name);
+     }
+ 
+     /**
+      * Parsed bucket for significant long values
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedSignificantTerms.ParsedBucket {
+ 
+         private Long key;
+ 
+         @Override
+         public Object getKey() {
+             return key;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             return Long.toString(key);
+         }
+ 
+         public Number getKeyAsNumber() {
+             return key;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), key);
+             if (super.getKeyAsString() != null) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+             }
+             return builder;
+         }
+ 
+         static ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseSignificantTermsBucketXContent(parser, new ParsedBucket(), (p, bucket) -> bucket.key = p.longValue());
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-1f.html b/htmlReport/ns-1/sources/source-1f.html new file mode 100644 index 0000000000000..265a63e7d51e9 --- /dev/null +++ b/htmlReport/ns-1/sources/source-1f.html @@ -0,0 +1,241 @@ + + + + + + + + Coverage Report > ParsedSignificantStringTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedSignificantStringTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedSignificantStringTerms + + 0% + + + (0/4) + + + + 0% + + + (0/6) + +
ParsedSignificantStringTerms$ParsedBucket + + 0% + + + (0/7) + + + + 0% + + + (0/13) + +
Total + + 0% + + + (0/11) + + + + 0% + + + (0/19) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ import java.nio.CharBuffer;
+ 
+ /**
+  * A significant string terms result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedSignificantStringTerms extends ParsedSignificantTerms {
+ 
+     @Override
+     public String getType() {
+         return SignificantStringTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedSignificantStringTerms, Void> PARSER = new ObjectParser<>(
+         ParsedSignificantStringTerms.class.getSimpleName(),
+         true,
+         ParsedSignificantStringTerms::new
+     );
+     static {
+         declareParsedSignificantTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedSignificantStringTerms fromXContent(XContentParser parser, String name) throws IOException {
+         return parseSignificantTermsXContent(() -> PARSER.parse(parser, null), name);
+     }
+ 
+     /**
+      * Parsed bucket for significant string values
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedSignificantTerms.ParsedBucket {
+ 
+         private BytesRef key;
+ 
+         @Override
+         public Object getKey() {
+             return getKeyAsString();
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             return key.utf8ToString();
+         }
+ 
+         public Number getKeyAsNumber() {
+             return Double.parseDouble(key.utf8ToString());
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             return builder.field(CommonFields.KEY.getPreferredName(), getKey());
+         }
+ 
+         static ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseSignificantTermsBucketXContent(parser, new ParsedBucket(), (p, bucket) -> {
+                 CharBuffer cb = p.charBufferOrNull();
+                 if (cb == null) {
+                     bucket.key = null;
+                 } else {
+                     bucket.key = new BytesRef(cb);
+                 }
+             });
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-2.html b/htmlReport/ns-1/sources/source-2.html new file mode 100644 index 0000000000000..e694da75f8440 --- /dev/null +++ b/htmlReport/ns-1/sources/source-2.html @@ -0,0 +1,210 @@ + + + + + + + + Coverage Report > AbstractStringTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: AbstractStringTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
AbstractStringTermsAggregator + + 100% + + + (1/1) + + + + 33.3% + + + (1/3) + + + + 16.7% + + + (2/12) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.IndexReader;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.internal.ContextIndexSearcher;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Map;
+ 
+ import static java.util.Collections.emptyList;
+ 
+ /**
+  * Base Aggregator to collect all docs that contain significant terms
+  *
+  * @opensearch.internal
+  */
+ abstract class AbstractStringTermsAggregator extends TermsAggregator {
+ 
+     protected final boolean showTermDocCountError;
+ 
+     AbstractStringTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         SearchContext context,
+         Aggregator parent,
+         BucketOrder order,
+         DocValueFormat format,
+         BucketCountThresholds bucketCountThresholds,
+         SubAggCollectionMode subAggCollectMode,
+         boolean showTermDocCountError,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, factories, context, parent, bucketCountThresholds, order, format, subAggCollectMode, metadata);
+         this.showTermDocCountError = showTermDocCountError;
+     }
+ 
+     protected StringTerms buildEmptyTermsAggregation() {
+         return new StringTerms(
+             name,
+             order,
+             order,
+             metadata(),
+             format,
+             bucketCountThresholds.getShardSize(),
+             showTermDocCountError,
+             0,
+             emptyList(),
+             0,
+             bucketCountThresholds
+         );
+     }
+ 
+     protected SignificantStringTerms buildEmptySignificantTermsAggregation(long subsetSize, SignificanceHeuristic significanceHeuristic) {
+         // We need to account for the significance of a miss in our global stats - provide corpus size as context
+         ContextIndexSearcher searcher = context.searcher();
+         IndexReader topReader = searcher.getIndexReader();
+         int supersetSize = topReader.numDocs();
+         return new SignificantStringTerms(
+             name,
+             metadata(),
+             format,
+             subsetSize,
+             supersetSize,
+             significanceHeuristic,
+             emptyList(),
+             bucketCountThresholds
+         );
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-20.html b/htmlReport/ns-1/sources/source-20.html new file mode 100644 index 0000000000000..fede7c6fa42d9 --- /dev/null +++ b/htmlReport/ns-1/sources/source-20.html @@ -0,0 +1,358 @@ + + + + + + + + Coverage Report > ParsedSignificantTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedSignificantTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedSignificantTerms + + 0% + + + (0/10) + + + + 0% + + + (0/27) + +
ParsedSignificantTerms$ParsedBucket + + 0% + + + (0/9) + + + + 0% + + + (0/38) + +
Total + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.CheckedBiConsumer;
+ import org.opensearch.common.CheckedFunction;
+ import org.opensearch.common.CheckedSupplier;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ import org.opensearch.core.xcontent.XContentParserUtils;
+ import org.opensearch.search.aggregations.Aggregation;
+ import org.opensearch.search.aggregations.Aggregations;
+ import org.opensearch.search.aggregations.ParsedMultiBucketAggregation;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.function.Function;
+ import java.util.stream.Collectors;
+ 
+ /**
+  * A significant terms result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public abstract class ParsedSignificantTerms extends ParsedMultiBucketAggregation<ParsedSignificantTerms.ParsedBucket>
+     implements
+         SignificantTerms {
+ 
+     private Map<String, ParsedBucket> bucketMap;
+     protected long subsetSize;
+     protected long supersetSize;
+ 
+     protected long getSubsetSize() {
+         return subsetSize;
+     }
+ 
+     protected long getSupersetSize() {
+         return supersetSize;
+     }
+ 
+     @Override
+     public List<? extends SignificantTerms.Bucket> getBuckets() {
+         return buckets;
+     }
+ 
+     @Override
+     public SignificantTerms.Bucket getBucketByKey(String term) {
+         if (bucketMap == null) {
+             bucketMap = buckets.stream().collect(Collectors.toMap(SignificantTerms.Bucket::getKeyAsString, Function.identity()));
+         }
+         return bucketMap.get(term);
+     }
+ 
+     @Override
+     public Iterator<SignificantTerms.Bucket> iterator() {
+         return buckets.stream().map(bucket -> (SignificantTerms.Bucket) bucket).collect(Collectors.toList()).iterator();
+     }
+ 
+     @Override
+     protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         builder.field(CommonFields.DOC_COUNT.getPreferredName(), subsetSize);
+         builder.field(InternalMappedSignificantTerms.BG_COUNT, supersetSize);
+         builder.startArray(CommonFields.BUCKETS.getPreferredName());
+         for (SignificantTerms.Bucket bucket : buckets) {
+             bucket.toXContent(builder, params);
+         }
+         builder.endArray();
+         return builder;
+     }
+ 
+     static <T extends ParsedSignificantTerms> T parseSignificantTermsXContent(
+         final CheckedSupplier<T, IOException> aggregationSupplier,
+         final String name
+     ) throws IOException {
+         T aggregation = aggregationSupplier.get();
+         aggregation.setName(name);
+         for (ParsedBucket bucket : aggregation.buckets) {
+             bucket.subsetSize = aggregation.subsetSize;
+             bucket.supersetSize = aggregation.supersetSize;
+         }
+         return aggregation;
+     }
+ 
+     static void declareParsedSignificantTermsFields(
+         final ObjectParser<? extends ParsedSignificantTerms, Void> objectParser,
+         final CheckedFunction<XContentParser, ParsedSignificantTerms.ParsedBucket, IOException> bucketParser
+     ) {
+         declareMultiBucketAggregationFields(objectParser, bucketParser::apply, bucketParser::apply);
+         objectParser.declareLong((parsedTerms, value) -> parsedTerms.subsetSize = value, CommonFields.DOC_COUNT);
+         objectParser.declareLong(
+             (parsedTerms, value) -> parsedTerms.supersetSize = value,
+             new ParseField(InternalMappedSignificantTerms.BG_COUNT)
+         );
+     }
+ 
+     /**
+      * Parsed bucket for significant values
+      *
+      * @opensearch.internal
+      */
+     public abstract static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements SignificantTerms.Bucket {
+ 
+         protected long subsetDf;
+         protected long subsetSize;
+         protected long supersetDf;
+         protected long supersetSize;
+         protected double score;
+ 
+         @Override
+         public long getDocCount() {
+             return getSubsetDf();
+         }
+ 
+         @Override
+         public long getSubsetDf() {
+             return subsetDf;
+         }
+ 
+         @Override
+         public long getSupersetDf() {
+             return supersetDf;
+         }
+ 
+         @Override
+         public double getSignificanceScore() {
+             return score;
+         }
+ 
+         @Override
+         public long getSupersetSize() {
+             return supersetSize;
+         }
+ 
+         @Override
+         public long getSubsetSize() {
+             return subsetSize;
+         }
+ 
+         @Override
+         public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject();
+             keyToXContent(builder);
+             builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+             builder.field(InternalSignificantTerms.SCORE, getSignificanceScore());
+             builder.field(InternalSignificantTerms.BG_COUNT, getSupersetDf());
+             getAggregations().toXContentInternal(builder, params);
+             builder.endObject();
+             return builder;
+         }
+ 
+         @Override
+         protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException;
+ 
+         static <B extends ParsedBucket> B parseSignificantTermsBucketXContent(
+             final XContentParser parser,
+             final B bucket,
+             final CheckedBiConsumer<XContentParser, B, IOException> keyConsumer
+         ) throws IOException {
+ 
+             final List<Aggregation> aggregations = new ArrayList<>();
+             XContentParser.Token token;
+             String currentFieldName = parser.currentName();
+             while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+                 if (token == XContentParser.Token.FIELD_NAME) {
+                     currentFieldName = parser.currentName();
+                 } else if (token.isValue()) {
+                     if (CommonFields.KEY_AS_STRING.getPreferredName().equals(currentFieldName)) {
+                         bucket.setKeyAsString(parser.text());
+                     } else if (CommonFields.KEY.getPreferredName().equals(currentFieldName)) {
+                         keyConsumer.accept(parser, bucket);
+                     } else if (CommonFields.DOC_COUNT.getPreferredName().equals(currentFieldName)) {
+                         long value = parser.longValue();
+                         bucket.subsetDf = value;
+                         bucket.setDocCount(value);
+                     } else if (InternalSignificantTerms.SCORE.equals(currentFieldName)) {
+                         bucket.score = parser.doubleValue();
+                     } else if (InternalSignificantTerms.BG_COUNT.equals(currentFieldName)) {
+                         bucket.supersetDf = parser.longValue();
+                     }
+                 } else if (token == XContentParser.Token.START_OBJECT) {
+                     XContentParserUtils.parseTypedKeysObject(
+                         parser,
+                         Aggregation.TYPED_KEYS_DELIMITER,
+                         Aggregation.class,
+                         aggregations::add
+                     );
+                 }
+             }
+             bucket.setAggregations(new Aggregations(aggregations));
+             return bucket;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-21.html b/htmlReport/ns-1/sources/source-21.html new file mode 100644 index 0000000000000..aa523b3ad503b --- /dev/null +++ b/htmlReport/ns-1/sources/source-21.html @@ -0,0 +1,249 @@ + + + + + + + + Coverage Report > ParsedStringRareTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedStringRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedStringRareTerms + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
ParsedStringRareTerms$ParsedBucket + + 0% + + + (0/7) + + + + 0% + + + (0/17) + +
Total + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ import java.nio.CharBuffer;
+ 
+ /**
+  * A significant rare  result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedStringRareTerms extends ParsedRareTerms {
+     @Override
+     public String getType() {
+         return StringRareTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedStringRareTerms, Void> PARSER = new ObjectParser<>(
+         ParsedStringRareTerms.class.getSimpleName(),
+         true,
+         ParsedStringRareTerms::new
+     );
+ 
+     static {
+         declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedStringRareTerms fromXContent(XContentParser parser, String name) throws IOException {
+         ParsedStringRareTerms aggregation = PARSER.parse(parser, null);
+         aggregation.setName(name);
+         return aggregation;
+     }
+ 
+     /**
+      * Parsed bucket for rare string terms
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedRareTerms.ParsedBucket {
+ 
+         private BytesRef key;
+ 
+         @Override
+         public Object getKey() {
+             return getKeyAsString();
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             if (key != null) {
+                 return key.utf8ToString();
+             }
+             return null;
+         }
+ 
+         public Number getKeyAsNumber() {
+             if (key != null) {
+                 return Double.parseDouble(key.utf8ToString());
+             }
+             return null;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             return builder.field(CommonFields.KEY.getPreferredName(), getKey());
+         }
+ 
+         static ParsedStringRareTerms.ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseRareTermsBucketXContent(parser, ParsedStringRareTerms.ParsedBucket::new, (p, bucket) -> {
+                 CharBuffer cb = p.charBufferOrNull();
+                 if (cb == null) {
+                     bucket.key = null;
+                 } else {
+                     bucket.key = new BytesRef(cb);
+                 }
+             });
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-22.html b/htmlReport/ns-1/sources/source-22.html new file mode 100644 index 0000000000000..54ecad9cbf14e --- /dev/null +++ b/htmlReport/ns-1/sources/source-22.html @@ -0,0 +1,249 @@ + + + + + + + + Coverage Report > ParsedStringTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedStringTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedStringTerms + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
ParsedStringTerms$ParsedBucket + + 0% + + + (0/7) + + + + 0% + + + (0/17) + +
Total + + 0% + + + (0/11) + + + + 0% + + + (0/25) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ import java.nio.CharBuffer;
+ 
+ /**
+  * A string result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedStringTerms extends ParsedTerms {
+ 
+     @Override
+     public String getType() {
+         return StringTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedStringTerms, Void> PARSER = new ObjectParser<>(
+         ParsedStringTerms.class.getSimpleName(),
+         true,
+         ParsedStringTerms::new
+     );
+     static {
+         declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedStringTerms fromXContent(XContentParser parser, String name) throws IOException {
+         ParsedStringTerms aggregation = PARSER.parse(parser, null);
+         aggregation.setName(name);
+         return aggregation;
+     }
+ 
+     /**
+      * Parsed bucket for string values
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedTerms.ParsedBucket {
+ 
+         private BytesRef key;
+ 
+         @Override
+         public Object getKey() {
+             return getKeyAsString();
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             if (key != null) {
+                 return key.utf8ToString();
+             }
+             return null;
+         }
+ 
+         public Number getKeyAsNumber() {
+             if (key != null) {
+                 return Double.parseDouble(key.utf8ToString());
+             }
+             return null;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             return builder.field(CommonFields.KEY.getPreferredName(), getKey());
+         }
+ 
+         static ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> {
+                 CharBuffer cb = p.charBufferOrNull();
+                 if (cb == null) {
+                     bucket.key = null;
+                 } else {
+                     bucket.key = new BytesRef(cb);
+                 }
+             });
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-23.html b/htmlReport/ns-1/sources/source-23.html new file mode 100644 index 0000000000000..a60ab70f8689f --- /dev/null +++ b/htmlReport/ns-1/sources/source-23.html @@ -0,0 +1,311 @@ + + + + + + + + Coverage Report > ParsedTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedTerms + + 0% + + + (0/8) + + + + 0% + + + (0/21) + +
ParsedTerms$ParsedBucket + + 0% + + + (0/4) + + + + 0% + + + (0/33) + +
Total + + 0% + + + (0/12) + + + + 0% + + + (0/54) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.CheckedBiConsumer;
+ import org.opensearch.common.CheckedFunction;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ import org.opensearch.core.xcontent.XContentParserUtils;
+ import org.opensearch.search.aggregations.Aggregation;
+ import org.opensearch.search.aggregations.Aggregations;
+ import org.opensearch.search.aggregations.ParsedMultiBucketAggregation;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.function.Supplier;
+ 
+ import static org.opensearch.search.aggregations.bucket.terms.InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME;
+ import static org.opensearch.search.aggregations.bucket.terms.InternalTerms.SUM_OF_OTHER_DOC_COUNTS;
+ 
+ /**
+  * A terms result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public abstract class ParsedTerms extends ParsedMultiBucketAggregation<ParsedTerms.ParsedBucket> implements Terms {
+ 
+     protected long docCountErrorUpperBound;
+     protected long sumOtherDocCount;
+ 
+     @Override
+     public long getDocCountError() {
+         return docCountErrorUpperBound;
+     }
+ 
+     @Override
+     public long getSumOfOtherDocCounts() {
+         return sumOtherDocCount;
+     }
+ 
+     @Override
+     public List<? extends Terms.Bucket> getBuckets() {
+         return buckets;
+     }
+ 
+     @Override
+     public Terms.Bucket getBucketByKey(String term) {
+         for (Terms.Bucket bucket : getBuckets()) {
+             if (bucket.getKeyAsString().equals(term)) {
+                 return bucket;
+             }
+         }
+         return null;
+     }
+ 
+     @Override
+     protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
+         builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), getSumOfOtherDocCounts());
+         builder.startArray(CommonFields.BUCKETS.getPreferredName());
+         for (Terms.Bucket bucket : getBuckets()) {
+             bucket.toXContent(builder, params);
+         }
+         builder.endArray();
+         return builder;
+     }
+ 
+     static void declareParsedTermsFields(
+         final ObjectParser<? extends ParsedTerms, Void> objectParser,
+         final CheckedFunction<XContentParser, ParsedBucket, IOException> bucketParser
+     ) {
+         declareMultiBucketAggregationFields(objectParser, bucketParser::apply, bucketParser::apply);
+         objectParser.declareLong(
+             (parsedTerms, value) -> parsedTerms.docCountErrorUpperBound = value,
+             DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME
+         );
+         objectParser.declareLong((parsedTerms, value) -> parsedTerms.sumOtherDocCount = value, SUM_OF_OTHER_DOC_COUNTS);
+     }
+ 
+     /**
+      * Base parsed bucket
+      *
+      * @opensearch.internal
+      */
+     public abstract static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Terms.Bucket {
+ 
+         boolean showDocCountError = false;
+         protected long docCountError;
+ 
+         @Override
+         public long getDocCountError() {
+             return docCountError;
+         }
+ 
+         @Override
+         public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject();
+             keyToXContent(builder);
+             builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+             if (showDocCountError) {
+                 builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
+             }
+             getAggregations().toXContentInternal(builder, params);
+             builder.endObject();
+             return builder;
+         }
+ 
+         static <B extends ParsedBucket> B parseTermsBucketXContent(
+             final XContentParser parser,
+             final Supplier<B> bucketSupplier,
+             final CheckedBiConsumer<XContentParser, B, IOException> keyConsumer
+         ) throws IOException {
+ 
+             final B bucket = bucketSupplier.get();
+             final List<Aggregation> aggregations = new ArrayList<>();
+ 
+             XContentParser.Token token;
+             String currentFieldName = parser.currentName();
+             while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+                 // field value could be list, e.g. multi_terms aggregation.
+                 if ((token.isValue() || token == XContentParser.Token.START_ARRAY)
+                     && CommonFields.KEY.getPreferredName().equals(currentFieldName)) {
+                     keyConsumer.accept(parser, bucket);
+                 }
+                 if (token == XContentParser.Token.FIELD_NAME) {
+                     currentFieldName = parser.currentName();
+                 } else if (token.isValue()) {
+                     if (CommonFields.KEY_AS_STRING.getPreferredName().equals(currentFieldName)) {
+                         bucket.setKeyAsString(parser.text());
+                     } else if (CommonFields.DOC_COUNT.getPreferredName().equals(currentFieldName)) {
+                         bucket.setDocCount(parser.longValue());
+                     } else if (DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName().equals(currentFieldName)) {
+                         bucket.docCountError = parser.longValue();
+                         bucket.showDocCountError = true;
+                     }
+                 } else if (token == XContentParser.Token.START_OBJECT) {
+                     XContentParserUtils.parseTypedKeysObject(
+                         parser,
+                         Aggregation.TYPED_KEYS_DELIMITER,
+                         Aggregation.class,
+                         aggregations::add
+                     );
+                 }
+             }
+             bucket.setAggregations(new Aggregations(aggregations));
+             return bucket;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-24.html b/htmlReport/ns-1/sources/source-24.html new file mode 100644 index 0000000000000..7135c42dd4379 --- /dev/null +++ b/htmlReport/ns-1/sources/source-24.html @@ -0,0 +1,218 @@ + + + + + + + + Coverage Report > ParsedUnsignedLongTerms + + + + + + +
+ + +

Coverage Summary for Class: ParsedUnsignedLongTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ParsedUnsignedLongTerms + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
ParsedUnsignedLongTerms$ParsedBucket + + 0% + + + (0/6) + + + + 0% + + + (0/14) + +
Total + + 0% + + + (0/10) + + + + 0% + + + (0/22) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ 
+ import java.io.IOException;
+ import java.math.BigInteger;
+ 
+ /**
+  * A long term agg result parsed between nodes
+  *
+  * @opensearch.internal
+  */
+ public class ParsedUnsignedLongTerms extends ParsedTerms {
+ 
+     @Override
+     public String getType() {
+         return UnsignedLongTerms.NAME;
+     }
+ 
+     private static final ObjectParser<ParsedUnsignedLongTerms, Void> PARSER = new ObjectParser<>(
+         ParsedUnsignedLongTerms.class.getSimpleName(),
+         true,
+         ParsedUnsignedLongTerms::new
+     );
+     static {
+         declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
+     }
+ 
+     public static ParsedUnsignedLongTerms fromXContent(XContentParser parser, String name) throws IOException {
+         ParsedUnsignedLongTerms aggregation = PARSER.parse(parser, null);
+         aggregation.setName(name);
+         return aggregation;
+     }
+ 
+     /**
+      * Parsed bucket for long term values
+      *
+      * @opensearch.internal
+      */
+     public static class ParsedBucket extends ParsedTerms.ParsedBucket {
+ 
+         private BigInteger key;
+ 
+         @Override
+         public Object getKey() {
+             return key;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             String keyAsString = super.getKeyAsString();
+             if (keyAsString != null) {
+                 return keyAsString;
+             }
+             if (key != null) {
+                 return key.toString();
+             }
+             return null;
+         }
+ 
+         public Number getKeyAsNumber() {
+             return key;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), key);
+             if (super.getKeyAsString() != null) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+             }
+             return builder;
+         }
+ 
+         static ParsedBucket fromXContent(XContentParser parser) throws IOException {
+             return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> bucket.key = p.bigIntegerValue());
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-25.html b/htmlReport/ns-1/sources/source-25.html new file mode 100644 index 0000000000000..3373cd42965c5 --- /dev/null +++ b/htmlReport/ns-1/sources/source-25.html @@ -0,0 +1,374 @@ + + + + + + + + Coverage Report > RareTermsAggregationBuilder + + + + + + +
+ + +

Coverage Summary for Class: RareTermsAggregationBuilder (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
RareTermsAggregationBuilder + + 100% + + + (1/1) + + + + 8.7% + + + (2/23) + + + + 17.2% + + + (10/58) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.Version;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.aggregations.AggregationBuilder;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.support.CoreValuesSourceType;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory;
+ import org.opensearch.search.aggregations.support.ValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ import org.opensearch.search.aggregations.support.ValuesSourceType;
+ 
+ import java.io.IOException;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Aggregation Builder for rare_terms agg
+  *
+  * @opensearch.internal
+  */
+ public class RareTermsAggregationBuilder extends ValuesSourceAggregationBuilder<RareTermsAggregationBuilder> {
+     public static final String NAME = "rare_terms";
+     public static final ValuesSourceRegistry.RegistryKey<RareTermsAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
+         NAME,
+         RareTermsAggregatorSupplier.class
+     );
+ 
+     private static final ParseField MAX_DOC_COUNT_FIELD_NAME = new ParseField("max_doc_count");
+     private static final ParseField PRECISION = new ParseField("precision");
+ 
+     private static final int MAX_MAX_DOC_COUNT = 100;
+     public static final ObjectParser<RareTermsAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(
+         NAME,
+         RareTermsAggregationBuilder::new
+     );
+     static {
+         ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
+         PARSER.declareLong(RareTermsAggregationBuilder::maxDocCount, MAX_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())),
+             IncludeExclude::parseInclude,
+             IncludeExclude.INCLUDE_FIELD,
+             ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING
+         );
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),
+             IncludeExclude::parseExclude,
+             IncludeExclude.EXCLUDE_FIELD,
+             ObjectParser.ValueType.STRING_ARRAY
+         );
+ 
+         PARSER.declareDouble(RareTermsAggregationBuilder::setPrecision, PRECISION);
+     }
+ 
+     public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
+         RareTermsAggregatorFactory.registerAggregators(builder);
+     }
+ 
+     private IncludeExclude includeExclude = null;
+     private int maxDocCount = 1;
+     private double precision = 0.001;
+ 
+     public RareTermsAggregationBuilder(String name) {
+         super(name);
+     }
+ 
+     private RareTermsAggregationBuilder(
+         RareTermsAggregationBuilder clone,
+         AggregatorFactories.Builder factoriesBuilder,
+         Map<String, Object> metadata
+     ) {
+         super(clone, factoriesBuilder, metadata);
+         this.includeExclude = clone.includeExclude;
+     }
+ 
+     @Override
+     protected ValuesSourceType defaultValueSourceType() {
+         return CoreValuesSourceType.BYTES;
+     }
+ 
+     @Override
+     protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
+         return new RareTermsAggregationBuilder(this, factoriesBuilder, metadata);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public RareTermsAggregationBuilder(StreamInput in) throws IOException {
+         super(in);
+         includeExclude = in.readOptionalWriteable(IncludeExclude::new);
+         maxDocCount = in.readVInt();
+     }
+ 
+     @Override
+     protected boolean serializeTargetValueType(Version version) {
+         return true;
+     }
+ 
+     @Override
+     protected void innerWriteTo(StreamOutput out) throws IOException {
+         out.writeOptionalWriteable(includeExclude);
+         out.writeVInt(maxDocCount);
+     }
+ 
+     /**
+      * Set the maximum document count terms should have in order to appear in
+      * the response.
+      */
+     public RareTermsAggregationBuilder maxDocCount(long maxDocCount) {
+         if (maxDocCount <= 0) {
+             throw new IllegalArgumentException(
+                 "["
+                     + MAX_DOC_COUNT_FIELD_NAME.getPreferredName()
+                     + "] must be greater than 0. Found ["
+                     + maxDocCount
+                     + "] in ["
+                     + name
+                     + "]"
+             );
+         }
+         // TODO review: what size cap should we put on this?
+         if (maxDocCount > MAX_MAX_DOC_COUNT) {
+             throw new IllegalArgumentException(
+                 "[" + MAX_DOC_COUNT_FIELD_NAME.getPreferredName() + "] must be smaller" + "than " + MAX_MAX_DOC_COUNT + "in [" + name + "]"
+             );
+         }
+         this.maxDocCount = (int) maxDocCount;
+         return this;
+     }
+ 
+     /**
+      * Set terms to include and exclude from the aggregation results
+      */
+     public RareTermsAggregationBuilder includeExclude(IncludeExclude includeExclude) {
+         this.includeExclude = includeExclude;
+         return this;
+     }
+ 
+     /**
+      * Get terms to include and exclude from the aggregation results
+      */
+     public IncludeExclude includeExclude() {
+         return includeExclude;
+     }
+ 
+     /**
+      * Get the current false positive rate for individual cuckoo filters.
+      */
+     public double getPrecision() {
+         return precision;
+     }
+ 
+     /**
+      * Set's the false-positive rate for individual cuckoo filters.  Does not dictate the overall fpp rate
+      * since we use a "scaling" cuckoo filter which adds more filters as required, and the overall
+      * error rate grows differently than individual filters
+      * <p>
+      * This value does, however, affect the overall space usage of the filter.  Coarser precisions provide
+      * more compact filters.  The default is 0.01
+      */
+     public void setPrecision(double precision) {
+         if (precision < 0.00001) {
+             throw new IllegalArgumentException("[precision] must be greater than 0.00001");
+         }
+         this.precision = precision;
+     }
+ 
+     @Override
+     public BucketCardinality bucketCardinality() {
+         return BucketCardinality.MANY;
+     }
+ 
+     @Override
+     protected ValuesSourceAggregatorFactory innerBuild(
+         QueryShardContext queryShardContext,
+         ValuesSourceConfig config,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder
+     ) throws IOException {
+         return new RareTermsAggregatorFactory(
+             name,
+             config,
+             includeExclude,
+             queryShardContext,
+             parent,
+             subFactoriesBuilder,
+             metadata,
+             maxDocCount,
+             precision
+         );
+     }
+ 
+     @Override
+     protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         if (includeExclude != null) {
+             includeExclude.toXContent(builder, params);
+         }
+         builder.field(MAX_DOC_COUNT_FIELD_NAME.getPreferredName(), maxDocCount);
+         builder.field(PRECISION.getPreferredName(), precision);
+         return builder;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), includeExclude, maxDocCount, precision);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         RareTermsAggregationBuilder other = (RareTermsAggregationBuilder) obj;
+         return Objects.equals(includeExclude, other.includeExclude)
+             && Objects.equals(maxDocCount, other.maxDocCount)
+             && Objects.equals(precision, other.precision);
+     }
+ 
+     @Override
+     public String getType() {
+         return NAME;
+     }
+ 
+     @Override
+     protected ValuesSourceRegistry.RegistryKey<?> getRegistryKey() {
+         return REGISTRY_KEY;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-26.html b/htmlReport/ns-1/sources/source-26.html new file mode 100644 index 0000000000000..b235415becd54 --- /dev/null +++ b/htmlReport/ns-1/sources/source-26.html @@ -0,0 +1,537 @@ + + + + + + + + Coverage Report > RareTermsAggregatorFactory + + + + + + +
+ + +

Coverage Summary for Class: RareTermsAggregatorFactory (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
RareTermsAggregatorFactory + + 42.9% + + + (3/7) + + + + 40% + + + (8/20) + +
RareTermsAggregatorFactory$1 + + 50% + + + (1/2) + + + + 20% + + + (1/5) + +
RareTermsAggregatorFactory$2 + + 50% + + + (1/2) + + + + 11.1% + + + (1/9) + +
RareTermsAggregatorFactory$3 + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
RareTermsAggregatorFactory$ExecutionMode + + 0% + + + (0/5) + + + + 0% + + + (0/8) + +
RareTermsAggregatorFactory$ExecutionMode$1 + + 0% + + + (0/3) + + + + 0% + + + (0/7) + +
Total + + 23.8% + + + (5/21) + + + + 19.6% + + + (10/51) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.logging.DeprecationLogger;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.NonCollectingAggregator;
+ import org.opensearch.search.aggregations.support.CoreValuesSourceType;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory;
+ import org.opensearch.search.aggregations.support.ValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Arrays;
+ import java.util.Map;
+ 
+ /**
+  * Aggregation Factory for rare_terms agg
+  *
+  * @opensearch.internal
+  */
+ public class RareTermsAggregatorFactory extends ValuesSourceAggregatorFactory {
+     private final IncludeExclude includeExclude;
+     private final int maxDocCount;
+     private final double precision;
+ 
+     static void registerAggregators(ValuesSourceRegistry.Builder builder) {
+         builder.register(
+             RareTermsAggregationBuilder.REGISTRY_KEY,
+             Arrays.asList(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP),
+             RareTermsAggregatorFactory.bytesSupplier(),
+             true
+         );
+ 
+         builder.register(
+             RareTermsAggregationBuilder.REGISTRY_KEY,
+             Arrays.asList(CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.NUMERIC),
+             RareTermsAggregatorFactory.numericSupplier(),
+             true
+         );
+     }
+ 
+     /**
+      * This supplier is used for all the field types that should be aggregated as bytes/strings,
+      * including those that need global ordinals
+      */
+     private static RareTermsAggregatorSupplier bytesSupplier() {
+         return new RareTermsAggregatorSupplier() {
+             @Override
+             public Aggregator build(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 DocValueFormat format,
+                 int maxDocCount,
+                 double precision,
+                 IncludeExclude includeExclude,
+                 SearchContext context,
+                 Aggregator parent,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+ 
+                 ExecutionMode execution = ExecutionMode.MAP; // TODO global ords not implemented yet, only supports "map"
+ 
+                 if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) {
+                     throw new IllegalArgumentException(
+                         "Aggregation ["
+                             + name
+                             + "] cannot support "
+                             + "regular expression style include/exclude settings as they can only be applied to string fields. "
+                             + "Use an array of values for include/exclude clauses"
+                     );
+                 }
+ 
+                 return execution.create(
+                     name,
+                     factories,
+                     valuesSource,
+                     format,
+                     includeExclude,
+                     context,
+                     parent,
+                     metadata,
+                     maxDocCount,
+                     precision,
+                     cardinality
+                 );
+ 
+             }
+         };
+     }
+ 
+     /**
+      * This supplier is used for all fields that expect to be aggregated as a numeric value.
+      * This includes floating points, and formatted types that use numerics internally for storage (date, boolean, etc)
+      */
+     private static RareTermsAggregatorSupplier numericSupplier() {
+         return new RareTermsAggregatorSupplier() {
+             @Override
+             public Aggregator build(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 DocValueFormat format,
+                 int maxDocCount,
+                 double precision,
+                 IncludeExclude includeExclude,
+                 SearchContext context,
+                 Aggregator parent,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+ 
+                 if ((includeExclude != null) && (includeExclude.isRegexBased())) {
+                     throw new IllegalArgumentException(
+                         "Aggregation ["
+                             + name
+                             + "] cannot support regular expression "
+                             + "style include/exclude settings as they can only be applied to string fields. Use an array of numeric "
+                             + "values for include/exclude clauses used to filter numeric fields"
+                     );
+                 }
+ 
+                 IncludeExclude.LongFilter longFilter = null;
+                 if (((ValuesSource.Numeric) valuesSource).isFloatingPoint()) {
+                     throw new IllegalArgumentException("RareTerms aggregation does not support floating point fields.");
+                 }
+                 if (includeExclude != null) {
+                     longFilter = includeExclude.convertToLongFilter(format);
+                 }
+                 return new LongRareTermsAggregator(
+                     name,
+                     factories,
+                     (ValuesSource.Numeric) valuesSource,
+                     format,
+                     context,
+                     parent,
+                     longFilter,
+                     maxDocCount,
+                     precision,
+                     cardinality,
+                     metadata
+                 );
+             }
+         };
+     }
+ 
+     RareTermsAggregatorFactory(
+         String name,
+         ValuesSourceConfig config,
+         IncludeExclude includeExclude,
+         QueryShardContext queryShardContext,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder,
+         Map<String, Object> metadata,
+         int maxDocCount,
+         double precision
+     ) throws IOException {
+         super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata);
+         this.includeExclude = includeExclude;
+         this.maxDocCount = maxDocCount;
+         this.precision = precision;
+     }
+ 
+     @Override
+     protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map<String, Object> metadata) throws IOException {
+         final InternalAggregation aggregation = new UnmappedRareTerms(name, metadata);
+         return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) {
+             @Override
+             public InternalAggregation buildEmptyAggregation() {
+                 return aggregation;
+             }
+         };
+     }
+ 
+     @Override
+     protected Aggregator doCreateInternal(
+         SearchContext searchContext,
+         Aggregator parent,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         return queryShardContext.getValuesSourceRegistry()
+             .getAggregator(RareTermsAggregationBuilder.REGISTRY_KEY, config)
+             .build(
+                 name,
+                 factories,
+                 config.getValuesSource(),
+                 config.format(),
+                 maxDocCount,
+                 precision,
+                 includeExclude,
+                 searchContext,
+                 parent,
+                 cardinality,
+                 metadata
+             );
+     }
+ 
+     @Override
+     protected boolean supportsConcurrentSegmentSearch() {
+         return true;
+     }
+ 
+     /**
+      * Execution mode for rare terms agg
+      *
+      * @opensearch.internal
+      */
+     public enum ExecutionMode {
+ 
+         MAP(new ParseField("map")) {
+ 
+             @Override
+             Aggregator create(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 DocValueFormat format,
+                 IncludeExclude includeExclude,
+                 SearchContext context,
+                 Aggregator parent,
+                 Map<String, Object> metadata,
+                 long maxDocCount,
+                 double precision,
+                 CardinalityUpperBound cardinality
+             ) throws IOException {
+                 int maxRegexLength = context.getQueryShardContext().getIndexSettings().getMaxRegexLength();
+                 final IncludeExclude.StringFilter filter = includeExclude == null
+                     ? null
+                     : includeExclude.convertToStringFilter(format, maxRegexLength);
+                 return new StringRareTermsAggregator(
+                     name,
+                     factories,
+                     (ValuesSource.Bytes) valuesSource,
+                     format,
+                     filter,
+                     context,
+                     parent,
+                     metadata,
+                     maxDocCount,
+                     precision,
+                     cardinality
+                 );
+             }
+ 
+             @Override
+             boolean needsGlobalOrdinals() {
+                 return false;
+             }
+ 
+         };
+ 
+         public static ExecutionMode fromString(String value, final DeprecationLogger deprecationLogger) {
+             switch (value) {
+                 case "map":
+                     return MAP;
+                 default:
+                     throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of [map]");
+             }
+         }
+ 
+         private final ParseField parseField;
+ 
+         ExecutionMode(ParseField parseField) {
+             this.parseField = parseField;
+         }
+ 
+         abstract Aggregator create(
+             String name,
+             AggregatorFactories factories,
+             ValuesSource valuesSource,
+             DocValueFormat format,
+             IncludeExclude includeExclude,
+             SearchContext context,
+             Aggregator parent,
+             Map<String, Object> metadata,
+             long maxDocCount,
+             double precision,
+             CardinalityUpperBound cardinality
+         ) throws IOException;
+ 
+         abstract boolean needsGlobalOrdinals();
+ 
+         @Override
+         public String toString() {
+             return parseField.getPreferredName();
+         }
+     }
+ 
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-27.html b/htmlReport/ns-1/sources/source-27.html new file mode 100644 index 0000000000000..35513806237e7 --- /dev/null +++ b/htmlReport/ns-1/sources/source-27.html @@ -0,0 +1,421 @@ + + + + + + + + Coverage Report > SignificanceLookup + + + + + + +
+ + +

Coverage Summary for Class: SignificanceLookup (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
SignificanceLookup + + 0% + + + (0/8) + + + + 0% + + + (0/30) + +
SignificanceLookup$1 + + 0% + + + (0/3) + + + + 0% + + + (0/3) + +
SignificanceLookup$2 + + 0% + + + (0/3) + + + + 0% + + + (0/11) + +
SignificanceLookup$3 + + 0% + + + (0/3) + + + + 0% + + + (0/3) + +
SignificanceLookup$4 + + 0% + + + (0/3) + + + + 0% + + + (0/11) + +
SignificanceLookup$BackgroundFrequencyForBytes
SignificanceLookup$BackgroundFrequencyForLong
Total + + 0% + + + (0/20) + + + + 0% + + + (0/58) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.IndexReader;
+ import org.apache.lucene.index.PostingsEnum;
+ import org.apache.lucene.index.Term;
+ import org.apache.lucene.index.TermsEnum;
+ import org.apache.lucene.search.BooleanClause.Occur;
+ import org.apache.lucene.search.BooleanQuery;
+ import org.apache.lucene.search.IndexSearcher;
+ import org.apache.lucene.search.Query;
+ import org.apache.lucene.search.TermQuery;
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.common.lease.Releasable;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.lucene.index.FilterableTermsEnum;
+ import org.opensearch.common.util.BigArrays;
+ import org.opensearch.common.util.BytesRefHash;
+ import org.opensearch.common.util.LongArray;
+ import org.opensearch.common.util.LongHash;
+ import org.opensearch.index.mapper.MappedFieldType;
+ import org.opensearch.index.query.QueryBuilder;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * Looks up values used for {@link SignificanceHeuristic}s.
+  *
+  * @opensearch.internal
+  */
+ class SignificanceLookup {
+     /**
+      * Lookup frequencies for {@link BytesRef} terms.
+      */
+     interface BackgroundFrequencyForBytes extends Releasable {
+         long freq(BytesRef term) throws IOException;
+     }
+ 
+     /**
+      * Lookup frequencies for {@code long} terms.
+      */
+     interface BackgroundFrequencyForLong extends Releasable {
+         long freq(long term) throws IOException;
+     }
+ 
+     private final QueryShardContext context;
+     private final MappedFieldType fieldType;
+     private final DocValueFormat format;
+     private final Query backgroundFilter;
+     private final int supersetNumDocs;
+     private TermsEnum termsEnum;
+ 
+     SignificanceLookup(QueryShardContext context, MappedFieldType fieldType, DocValueFormat format, QueryBuilder backgroundFilter)
+         throws IOException {
+         this.context = context;
+         this.fieldType = fieldType;
+         this.format = format;
+         this.backgroundFilter = backgroundFilter == null ? null : backgroundFilter.toQuery(context);
+         /*
+          * We need to use a superset size that includes deleted docs or we
+          * could end up blowing up with bad statistics that cause us to blow
+          * up later on.
+          */
+         IndexSearcher searcher = context.searcher();
+         supersetNumDocs = backgroundFilter == null ? searcher.getIndexReader().maxDoc() : searcher.count(this.backgroundFilter);
+     }
+ 
+     /**
+      * Get the number of docs in the superset.
+      */
+     long supersetSize() {
+         return supersetNumDocs;
+     }
+ 
+     /**
+      * Get the background frequency of a {@link BytesRef} term.
+      */
+     BackgroundFrequencyForBytes bytesLookup(BigArrays bigArrays, CardinalityUpperBound cardinality) {
+         if (cardinality == CardinalityUpperBound.ONE) {
+             return new BackgroundFrequencyForBytes() {
+                 @Override
+                 public long freq(BytesRef term) throws IOException {
+                     return getBackgroundFrequency(term);
+                 }
+ 
+                 @Override
+                 public void close() {}
+             };
+         }
+         return new BackgroundFrequencyForBytes() {
+             private final BytesRefHash termToPosition = new BytesRefHash(bigArrays);
+             private LongArray positionToFreq = bigArrays.newLongArray(1, false);
+ 
+             @Override
+             public long freq(BytesRef term) throws IOException {
+                 long position = termToPosition.add(term);
+                 if (position < 0) {
+                     return positionToFreq.get(-1 - position);
+                 }
+                 long freq = getBackgroundFrequency(term);
+                 positionToFreq = bigArrays.grow(positionToFreq, position + 1);
+                 positionToFreq.set(position, freq);
+                 return freq;
+             }
+ 
+             @Override
+             public void close() {
+                 Releasables.close(termToPosition, positionToFreq);
+             }
+         };
+     }
+ 
+     /**
+      * Get the background frequency of a {@link BytesRef} term.
+      */
+     private long getBackgroundFrequency(BytesRef term) throws IOException {
+         return getBackgroundFrequency(fieldType.termQuery(format.format(term).toString(), context));
+     }
+ 
+     /**
+      * Get the background frequency of a {@code long} term.
+      */
+     BackgroundFrequencyForLong longLookup(BigArrays bigArrays, CardinalityUpperBound cardinality) {
+         if (cardinality == CardinalityUpperBound.ONE) {
+             return new BackgroundFrequencyForLong() {
+                 @Override
+                 public long freq(long term) throws IOException {
+                     return getBackgroundFrequency(term);
+                 }
+ 
+                 @Override
+                 public void close() {}
+             };
+         }
+         return new BackgroundFrequencyForLong() {
+             private final LongHash termToPosition = new LongHash(1, bigArrays);
+             private LongArray positionToFreq = bigArrays.newLongArray(1, false);
+ 
+             @Override
+             public long freq(long term) throws IOException {
+                 long position = termToPosition.add(term);
+                 if (position < 0) {
+                     return positionToFreq.get(-1 - position);
+                 }
+                 long freq = getBackgroundFrequency(term);
+                 positionToFreq = bigArrays.grow(positionToFreq, position + 1);
+                 positionToFreq.set(position, freq);
+                 return freq;
+             }
+ 
+             @Override
+             public void close() {
+                 Releasables.close(termToPosition, positionToFreq);
+             }
+         };
+     }
+ 
+     /**
+      * Get the background frequency of a {@code long} term.
+      */
+     private long getBackgroundFrequency(long term) throws IOException {
+         return getBackgroundFrequency(fieldType.termQuery(format.format(term).toString(), context));
+     }
+ 
+     private long getBackgroundFrequency(Query query) throws IOException {
+         if (query instanceof TermQuery) {
+             // for types that use the inverted index, we prefer using a terms
+             // enum that will do a better job at reusing index inputs
+             Term term = ((TermQuery) query).getTerm();
+             TermsEnum termsEnum = getTermsEnum(term.field());
+             if (termsEnum.seekExact(term.bytes())) {
+                 return termsEnum.docFreq();
+             }
+             return 0;
+         }
+         // otherwise do it the naive way
+         if (backgroundFilter != null) {
+             query = new BooleanQuery.Builder().add(query, Occur.FILTER).add(backgroundFilter, Occur.FILTER).build();
+         }
+         return context.searcher().count(query);
+     }
+ 
+     private TermsEnum getTermsEnum(String field) throws IOException {
+         // TODO this method helps because of asMultiBucketAggregator. Once we remove it we can move this logic into the aggregators.
+         if (termsEnum != null) {
+             return termsEnum;
+         }
+         IndexReader reader = context.getIndexReader();
+         termsEnum = new FilterableTermsEnum(reader, fieldType.name(), PostingsEnum.NONE, backgroundFilter);
+         return termsEnum;
+     }
+ 
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-28.html b/htmlReport/ns-1/sources/source-28.html new file mode 100644 index 0000000000000..5f43d38405d00 --- /dev/null +++ b/htmlReport/ns-1/sources/source-28.html @@ -0,0 +1,344 @@ + + + + + + + + Coverage Report > SignificantLongTerms + + + + + + +
+ + +

Coverage Summary for Class: SignificantLongTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
SignificantLongTerms + + 0% + + + (0/8) + + + + 0% + + + (0/10) + +
SignificantLongTerms$Bucket + + 0% + + + (0/9) + + + + 0% + + + (0/23) + +
Total + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the running the significant terms aggregation on a numeric field.
+  *
+  * @opensearch.internal
+  */
+ public class SignificantLongTerms extends InternalMappedSignificantTerms<SignificantLongTerms, SignificantLongTerms.Bucket> {
+     public static final String NAME = "siglterms";
+ 
+     /**
+      * Bucket for significant long values
+      *
+      * @opensearch.internal
+      */
+     static class Bucket extends InternalSignificantTerms.Bucket<Bucket> {
+ 
+         long term;
+ 
+         Bucket(
+             long subsetDf,
+             long subsetSize,
+             long supersetDf,
+             long supersetSize,
+             long term,
+             InternalAggregations aggregations,
+             DocValueFormat format,
+             double score
+         ) {
+             super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format);
+             this.term = term;
+             this.score = score;
+         }
+ 
+         Bucket(StreamInput in, long subsetSize, long supersetSize, DocValueFormat format) throws IOException {
+             super(in, subsetSize, supersetSize, format);
+             subsetDf = in.readVLong();
+             supersetDf = in.readVLong();
+             term = in.readLong();
+             score = in.readDouble();
+             aggregations = InternalAggregations.readFrom(in);
+         }
+ 
+         @Override
+         public void writeTo(StreamOutput out) throws IOException {
+             out.writeVLong(subsetDf);
+             out.writeVLong(supersetDf);
+             out.writeLong(term);
+             out.writeDouble(getSignificanceScore());
+             aggregations.writeTo(out);
+         }
+ 
+         @Override
+         public Object getKey() {
+             return term;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(term).toString();
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             return term;
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), term);
+             if (format != DocValueFormat.RAW) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term));
+             }
+             return builder;
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             return super.equals(obj) && Objects.equals(term, ((Bucket) obj).term);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), term);
+         }
+     }
+ 
+     public SignificantLongTerms(
+         String name,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         long subsetSize,
+         long supersetSize,
+         SignificanceHeuristic significanceHeuristic,
+         List<Bucket> buckets,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(name, metadata, format, subsetSize, supersetSize, significanceHeuristic, buckets, bucketCountThresholds);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public SignificantLongTerms(StreamInput in) throws IOException {
+         super(in, Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public SignificantLongTerms create(List<SignificantLongTerms.Bucket> buckets) {
+         return new SignificantLongTerms(
+             name,
+             metadata,
+             format,
+             subsetSize,
+             supersetSize,
+             significanceHeuristic,
+             buckets,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, SignificantLongTerms.Bucket prototype) {
+         return new Bucket(
+             prototype.subsetDf,
+             prototype.subsetSize,
+             prototype.supersetDf,
+             prototype.supersetSize,
+             prototype.term,
+             aggregations,
+             prototype.format,
+             prototype.score
+         );
+     }
+ 
+     @Override
+     protected SignificantLongTerms create(long subsetSize, long supersetSize, List<Bucket> buckets) {
+         return new SignificantLongTerms(
+             getName(),
+             getMetadata(),
+             format,
+             subsetSize,
+             supersetSize,
+             significanceHeuristic,
+             buckets,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ 
+     @Override
+     Bucket createBucket(
+         long subsetDf,
+         long subsetSize,
+         long supersetDf,
+         long supersetSize,
+         InternalAggregations aggregations,
+         SignificantLongTerms.Bucket prototype
+     ) {
+         return new Bucket(subsetDf, subsetSize, supersetDf, supersetSize, prototype.term, aggregations, format, prototype.score);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-29.html b/htmlReport/ns-1/sources/source-29.html new file mode 100644 index 0000000000000..a1e7fc9357208 --- /dev/null +++ b/htmlReport/ns-1/sources/source-29.html @@ -0,0 +1,349 @@ + + + + + + + + Coverage Report > SignificantStringTerms + + + + + + +
+ + +

Coverage Summary for Class: SignificantStringTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
SignificantStringTerms + + 0% + + + (0/8) + + + + 0% + + + (0/10) + +
SignificantStringTerms$Bucket + + 0% + + + (0/9) + + + + 0% + + + (0/23) + +
Total + + 0% + + + (0/17) + + + + 0% + + + (0/33) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the running the significant terms aggregation on a String field.
+  *
+  * @opensearch.internal
+  */
+ public class SignificantStringTerms extends InternalMappedSignificantTerms<SignificantStringTerms, SignificantStringTerms.Bucket> {
+     public static final String NAME = "sigsterms";
+ 
+     /**
+      * Bucket for significant string values
+      *
+      * @opensearch.internal
+      */
+     public static class Bucket extends InternalSignificantTerms.Bucket<Bucket> {
+ 
+         BytesRef termBytes;
+ 
+         public Bucket(
+             BytesRef term,
+             long subsetDf,
+             long subsetSize,
+             long supersetDf,
+             long supersetSize,
+             InternalAggregations aggregations,
+             DocValueFormat format,
+             double score
+         ) {
+             super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format);
+             this.termBytes = term;
+             this.score = score;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         public Bucket(StreamInput in, long subsetSize, long supersetSize, DocValueFormat format) throws IOException {
+             super(in, subsetSize, supersetSize, format);
+             termBytes = in.readBytesRef();
+             subsetDf = in.readVLong();
+             supersetDf = in.readVLong();
+             score = in.readDouble();
+             aggregations = InternalAggregations.readFrom(in);
+         }
+ 
+         @Override
+         public void writeTo(StreamOutput out) throws IOException {
+             out.writeBytesRef(termBytes);
+             out.writeVLong(subsetDf);
+             out.writeVLong(supersetDf);
+             out.writeDouble(getSignificanceScore());
+             aggregations.writeTo(out);
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             // this method is needed for scripted numeric aggregations
+             return Double.parseDouble(termBytes.utf8ToString());
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(termBytes).toString();
+         }
+ 
+         @Override
+         public String getKey() {
+             return getKeyAsString();
+         }
+ 
+         @Override
+         protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString());
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             if (this == obj) return true;
+             if (obj == null || getClass() != obj.getClass()) return false;
+             if (super.equals(obj) == false) return false;
+ 
+             return super.equals(obj) && Objects.equals(termBytes, ((SignificantStringTerms.Bucket) obj).termBytes);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), termBytes);
+         }
+     }
+ 
+     public SignificantStringTerms(
+         String name,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         long subsetSize,
+         long supersetSize,
+         SignificanceHeuristic significanceHeuristic,
+         List<Bucket> buckets,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(name, metadata, format, subsetSize, supersetSize, significanceHeuristic, buckets, bucketCountThresholds);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public SignificantStringTerms(StreamInput in) throws IOException {
+         super(in, Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public SignificantStringTerms create(List<SignificantStringTerms.Bucket> buckets) {
+         return new SignificantStringTerms(
+             name,
+             metadata,
+             format,
+             subsetSize,
+             supersetSize,
+             significanceHeuristic,
+             buckets,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, SignificantStringTerms.Bucket prototype) {
+         return new Bucket(
+             prototype.termBytes,
+             prototype.subsetDf,
+             prototype.subsetSize,
+             prototype.supersetDf,
+             prototype.supersetSize,
+             aggregations,
+             prototype.format,
+             prototype.score
+         );
+     }
+ 
+     @Override
+     protected SignificantStringTerms create(long subsetSize, long supersetSize, List<Bucket> buckets) {
+         return new SignificantStringTerms(
+             getName(),
+             getMetadata(),
+             format,
+             subsetSize,
+             supersetSize,
+             significanceHeuristic,
+             buckets,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ 
+     @Override
+     Bucket createBucket(
+         long subsetDf,
+         long subsetSize,
+         long supersetDf,
+         long supersetSize,
+         InternalAggregations aggregations,
+         SignificantStringTerms.Bucket prototype
+     ) {
+         return new Bucket(prototype.termBytes, subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format, prototype.score);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-2a.html b/htmlReport/ns-1/sources/source-2a.html new file mode 100644 index 0000000000000..bea1aff8e29f7 --- /dev/null +++ b/htmlReport/ns-1/sources/source-2a.html @@ -0,0 +1,502 @@ + + + + + + + + Coverage Report > SignificantTermsAggregationBuilder + + + + + + +
+ + +

Coverage Summary for Class: SignificantTermsAggregationBuilder (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
SignificantTermsAggregationBuilder + + 100% + + + (1/1) + + + + 5.6% + + + (2/36) + + + + 13.9% + + + (16/115) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.Version;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ import org.opensearch.index.query.QueryBuilder;
+ import org.opensearch.index.query.QueryRewriteContext;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.aggregations.AggregationBuilder;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.JLHScore;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.aggregations.support.CoreValuesSourceType;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory;
+ import org.opensearch.search.aggregations.support.ValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ import org.opensearch.search.aggregations.support.ValuesSourceType;
+ 
+ import java.io.IOException;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder;
+ 
+ /**
+  * Aggregation Builder for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationBuilder<SignificantTermsAggregationBuilder> {
+     public static final String NAME = "significant_terms";
+     public static final ValuesSourceRegistry.RegistryKey<SignificantTermsAggregatorSupplier> REGISTRY_KEY =
+         new ValuesSourceRegistry.RegistryKey<>(NAME, SignificantTermsAggregatorSupplier.class);
+ 
+     static final ParseField BACKGROUND_FILTER = new ParseField("background_filter");
+     static final ParseField HEURISTIC = new ParseField("significance_heuristic");
+ 
+     static final TermsAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS = new TermsAggregator.BucketCountThresholds(
+         3,
+         0,
+         10,
+         -1
+     );
+     static final SignificanceHeuristic DEFAULT_SIGNIFICANCE_HEURISTIC = new JLHScore();
+ 
+     private static final ObjectParser<SignificantTermsAggregationBuilder, Void> PARSER = new ObjectParser<>(
+         SignificantTermsAggregationBuilder.NAME,
+         SignificanceHeuristic.class,
+         SignificantTermsAggregationBuilder::significanceHeuristic,
+         null
+     );
+     static {
+         ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
+ 
+         PARSER.declareInt(SignificantTermsAggregationBuilder::shardSize, TermsAggregationBuilder.SHARD_SIZE_FIELD_NAME);
+ 
+         PARSER.declareLong(SignificantTermsAggregationBuilder::minDocCount, TermsAggregationBuilder.MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareLong(SignificantTermsAggregationBuilder::shardMinDocCount, TermsAggregationBuilder.SHARD_MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareInt(SignificantTermsAggregationBuilder::size, TermsAggregationBuilder.REQUIRED_SIZE_FIELD_NAME);
+ 
+         PARSER.declareString(SignificantTermsAggregationBuilder::executionHint, TermsAggregationBuilder.EXECUTION_HINT_FIELD_NAME);
+ 
+         PARSER.declareObject(
+             SignificantTermsAggregationBuilder::backgroundFilter,
+             (p, context) -> parseInnerQueryBuilder(p),
+             SignificantTermsAggregationBuilder.BACKGROUND_FILTER
+         );
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())),
+             IncludeExclude::parseInclude,
+             IncludeExclude.INCLUDE_FIELD,
+             ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING
+         );
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),
+             IncludeExclude::parseExclude,
+             IncludeExclude.EXCLUDE_FIELD,
+             ObjectParser.ValueType.STRING_ARRAY
+         );
+     }
+ 
+     public static SignificantTermsAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException {
+         return PARSER.parse(parser, new SignificantTermsAggregationBuilder(aggregationName), null);
+     }
+ 
+     public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
+         SignificantTermsAggregatorFactory.registerAggregators(builder);
+     }
+ 
+     private IncludeExclude includeExclude = null;
+     private String executionHint = null;
+     private QueryBuilder filterBuilder = null;
+     private TermsAggregator.BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(DEFAULT_BUCKET_COUNT_THRESHOLDS);
+     private SignificanceHeuristic significanceHeuristic = DEFAULT_SIGNIFICANCE_HEURISTIC;
+ 
+     public SignificantTermsAggregationBuilder(String name) {
+         super(name);
+     }
+ 
+     /**
+      * Read from a Stream.
+      */
+     public SignificantTermsAggregationBuilder(StreamInput in) throws IOException {
+         super(in);
+         bucketCountThresholds = new BucketCountThresholds(in);
+         executionHint = in.readOptionalString();
+         filterBuilder = in.readOptionalNamedWriteable(QueryBuilder.class);
+         includeExclude = in.readOptionalWriteable(IncludeExclude::new);
+         significanceHeuristic = in.readNamedWriteable(SignificanceHeuristic.class);
+     }
+ 
+     protected SignificantTermsAggregationBuilder(
+         SignificantTermsAggregationBuilder clone,
+         AggregatorFactories.Builder factoriesBuilder,
+         Map<String, Object> metadata
+     ) {
+         super(clone, factoriesBuilder, metadata);
+         this.bucketCountThresholds = new BucketCountThresholds(clone.bucketCountThresholds);
+         this.executionHint = clone.executionHint;
+         this.filterBuilder = clone.filterBuilder;
+         this.includeExclude = clone.includeExclude;
+         this.significanceHeuristic = clone.significanceHeuristic;
+     }
+ 
+     @Override
+     protected ValuesSourceType defaultValueSourceType() {
+         return CoreValuesSourceType.BYTES;
+     }
+ 
+     @Override
+     protected SignificantTermsAggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
+         return new SignificantTermsAggregationBuilder(this, factoriesBuilder, metadata);
+     }
+ 
+     protected AggregationBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException {
+         if (filterBuilder != null) {
+             QueryBuilder rewrittenFilter = filterBuilder.rewrite(queryShardContext);
+             if (rewrittenFilter != filterBuilder) {
+                 SignificantTermsAggregationBuilder rewritten = shallowCopy(factoriesBuilder, metadata);
+                 rewritten.backgroundFilter(rewrittenFilter);
+                 return rewritten;
+             }
+         }
+         return super.doRewrite(queryShardContext);
+     }
+ 
+     @Override
+     protected void innerWriteTo(StreamOutput out) throws IOException {
+         bucketCountThresholds.writeTo(out);
+         out.writeOptionalString(executionHint);
+         out.writeOptionalNamedWriteable(filterBuilder);
+         out.writeOptionalWriteable(includeExclude);
+         out.writeNamedWriteable(significanceHeuristic);
+     }
+ 
+     @Override
+     protected boolean serializeTargetValueType(Version version) {
+         return true;
+     }
+ 
+     protected TermsAggregator.BucketCountThresholds getBucketCountThresholds() {
+         return new TermsAggregator.BucketCountThresholds(bucketCountThresholds);
+     }
+ 
+     public TermsAggregator.BucketCountThresholds bucketCountThresholds() {
+         return bucketCountThresholds;
+     }
+ 
+     public SignificantTermsAggregationBuilder bucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) {
+         if (bucketCountThresholds == null) {
+             throw new IllegalArgumentException("[bucketCountThresholds] must not be null: [" + name + "]");
+         }
+         this.bucketCountThresholds = bucketCountThresholds;
+         return this;
+     }
+ 
+     /**
+      * Sets the size - indicating how many term buckets should be returned
+      * (defaults to 10)
+      */
+     public SignificantTermsAggregationBuilder size(int size) {
+         if (size <= 0) {
+             throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setRequiredSize(size);
+         return this;
+     }
+ 
+     /**
+      * Sets the shard_size - indicating the number of term buckets each shard
+      * will return to the coordinating node (the node that coordinates the
+      * search execution). The higher the shard size is, the more accurate the
+      * results are.
+      */
+     public SignificantTermsAggregationBuilder shardSize(int shardSize) {
+         if (shardSize <= 0) {
+             throw new IllegalArgumentException("[shardSize] must be greater than  0. Found [" + shardSize + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setShardSize(shardSize);
+         return this;
+     }
+ 
+     /**
+      * Set the minimum document count terms should have in order to appear in
+      * the response.
+      */
+     public SignificantTermsAggregationBuilder minDocCount(long minDocCount) {
+         if (minDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setMinDocCount(minDocCount);
+         return this;
+     }
+ 
+     /**
+      * Set the minimum document count terms should have on the shard in order to
+      * appear in the response.
+      */
+     public SignificantTermsAggregationBuilder shardMinDocCount(long shardMinDocCount) {
+         if (shardMinDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setShardMinDocCount(shardMinDocCount);
+         return this;
+     }
+ 
+     /**
+      * Expert: sets an execution hint to the aggregation.
+      */
+     public SignificantTermsAggregationBuilder executionHint(String executionHint) {
+         this.executionHint = executionHint;
+         return this;
+     }
+ 
+     /**
+      * Expert: gets an execution hint to the aggregation.
+      */
+     public String executionHint() {
+         return executionHint;
+     }
+ 
+     public SignificantTermsAggregationBuilder backgroundFilter(QueryBuilder backgroundFilter) {
+         if (backgroundFilter == null) {
+             throw new IllegalArgumentException("[backgroundFilter] must not be null: [" + name + "]");
+         }
+         this.filterBuilder = backgroundFilter;
+         return this;
+     }
+ 
+     public QueryBuilder backgroundFilter() {
+         return filterBuilder;
+     }
+ 
+     /**
+      * Set terms to include and exclude from the aggregation results
+      */
+     public SignificantTermsAggregationBuilder includeExclude(IncludeExclude includeExclude) {
+         this.includeExclude = includeExclude;
+         return this;
+     }
+ 
+     /**
+      * Get terms to include and exclude from the aggregation results
+      */
+     public IncludeExclude includeExclude() {
+         return includeExclude;
+     }
+ 
+     public SignificantTermsAggregationBuilder significanceHeuristic(SignificanceHeuristic significanceHeuristic) {
+         if (significanceHeuristic == null) {
+             throw new IllegalArgumentException("[significanceHeuristic] must not be null: [" + name + "]");
+         }
+         this.significanceHeuristic = significanceHeuristic;
+         return this;
+     }
+ 
+     public SignificanceHeuristic significanceHeuristic() {
+         return significanceHeuristic;
+     }
+ 
+     @Override
+     public BucketCardinality bucketCardinality() {
+         return BucketCardinality.MANY;
+     }
+ 
+     @Override
+     protected ValuesSourceAggregatorFactory innerBuild(
+         QueryShardContext queryShardContext,
+         ValuesSourceConfig config,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder
+     ) throws IOException {
+         SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(queryShardContext);
+         return new SignificantTermsAggregatorFactory(
+             name,
+             config,
+             includeExclude,
+             executionHint,
+             filterBuilder,
+             bucketCountThresholds,
+             executionHeuristic,
+             queryShardContext,
+             parent,
+             subFactoriesBuilder,
+             metadata
+         );
+     }
+ 
+     @Override
+     protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         bucketCountThresholds.toXContent(builder, params);
+         if (executionHint != null) {
+             builder.field(TermsAggregationBuilder.EXECUTION_HINT_FIELD_NAME.getPreferredName(), executionHint);
+         }
+         if (filterBuilder != null) {
+             builder.field(BACKGROUND_FILTER.getPreferredName(), filterBuilder);
+         }
+         if (includeExclude != null) {
+             includeExclude.toXContent(builder, params);
+         }
+         significanceHeuristic.toXContent(builder, params);
+         return builder;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), bucketCountThresholds, executionHint, filterBuilder, includeExclude, significanceHeuristic);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         SignificantTermsAggregationBuilder other = (SignificantTermsAggregationBuilder) obj;
+         return Objects.equals(bucketCountThresholds, other.bucketCountThresholds)
+             && Objects.equals(executionHint, other.executionHint)
+             && Objects.equals(filterBuilder, other.filterBuilder)
+             && Objects.equals(includeExclude, other.includeExclude)
+             && Objects.equals(significanceHeuristic, other.significanceHeuristic);
+     }
+ 
+     @Override
+     public String getType() {
+         return NAME;
+     }
+ 
+     @Override
+     protected ValuesSourceRegistry.RegistryKey<?> getRegistryKey() {
+         return REGISTRY_KEY;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-2b.html b/htmlReport/ns-1/sources/source-2b.html new file mode 100644 index 0000000000000..5acb19dede281 --- /dev/null +++ b/htmlReport/ns-1/sources/source-2b.html @@ -0,0 +1,680 @@ + + + + + + + + Coverage Report > SignificantTermsAggregatorFactory + + + + + + +
+ + +

Coverage Summary for Class: SignificantTermsAggregatorFactory (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
SignificantTermsAggregatorFactory + + 50% + + + (4/8) + + + + 27.3% + + + (9/33) + +
SignificantTermsAggregatorFactory$1 + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + +
SignificantTermsAggregatorFactory$2 + + 33.3% + + + (1/3) + + + + 9.1% + + + (1/11) + +
SignificantTermsAggregatorFactory$3 + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
SignificantTermsAggregatorFactory$ExecutionMode + + 0% + + + (0/5) + + + + 0% + + + (0/14) + +
SignificantTermsAggregatorFactory$ExecutionMode$1 + + 0% + + + (0/3) + + + + 0% + + + (0/7) + +
SignificantTermsAggregatorFactory$ExecutionMode$2 + + 0% + + + (0/3) + + + + 0% + + + (0/10) + +
Total + + 23.1% + + + (6/26) + + + + 12.5% + + + (11/88) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.logging.DeprecationLogger;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.index.query.QueryBuilder;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.NonCollectingAggregator;
+ import org.opensearch.search.aggregations.bucket.BucketUtils;
+ import org.opensearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.aggregations.support.CoreValuesSourceType;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory;
+ import org.opensearch.search.aggregations.support.ValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Arrays;
+ import java.util.Map;
+ 
+ /**
+  * Aggregation Factory for significant_terms agg
+  *
+  * @opensearch.internal
+  */
+ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFactory {
+     private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SignificantTermsAggregatorFactory.class);
+ 
+     static void registerAggregators(ValuesSourceRegistry.Builder builder) {
+         builder.register(
+             SignificantTermsAggregationBuilder.REGISTRY_KEY,
+             Arrays.asList(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP),
+             SignificantTermsAggregatorFactory.bytesSupplier(),
+             true
+         );
+ 
+         builder.register(
+             SignificantTermsAggregationBuilder.REGISTRY_KEY,
+             Arrays.asList(CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.NUMERIC),
+             SignificantTermsAggregatorFactory.numericSupplier(),
+             true
+         );
+     }
+ 
+     /**
+      * This supplier is used for all the field types that should be aggregated as bytes/strings,
+      * including those that need global ordinals
+      */
+     private static SignificantTermsAggregatorSupplier bytesSupplier() {
+         return new SignificantTermsAggregatorSupplier() {
+             @Override
+             public Aggregator build(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 String executionHint,
+                 SearchContext context,
+                 Aggregator parent,
+                 SignificanceHeuristic significanceHeuristic,
+                 SignificanceLookup lookup,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+ 
+                 ExecutionMode execution = null;
+                 if (executionHint != null) {
+                     execution = ExecutionMode.fromString(executionHint, deprecationLogger);
+                 }
+                 if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals == false) {
+                     execution = ExecutionMode.MAP;
+                 }
+                 if (execution == null) {
+                     execution = ExecutionMode.GLOBAL_ORDINALS;
+                 }
+ 
+                 if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) {
+                     throw new IllegalArgumentException(
+                         "Aggregation ["
+                             + name
+                             + "] cannot support regular expression style "
+                             + "include/exclude settings as they can only be applied to string fields. Use an array of values for "
+                             + "include/exclude clauses"
+                     );
+                 }
+ 
+                 return execution.create(
+                     name,
+                     factories,
+                     valuesSource,
+                     format,
+                     bucketCountThresholds,
+                     includeExclude,
+                     context,
+                     parent,
+                     significanceHeuristic,
+                     lookup,
+                     cardinality,
+                     metadata
+                 );
+             }
+         };
+     }
+ 
+     /**
+      * This supplier is used for all fields that expect to be aggregated as a numeric value.
+      * This includes floating points, and formatted types that use numerics internally for storage (date, boolean, etc)
+      */
+     private static SignificantTermsAggregatorSupplier numericSupplier() {
+         return new SignificantTermsAggregatorSupplier() {
+             @Override
+             public Aggregator build(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 String executionHint,
+                 SearchContext context,
+                 Aggregator parent,
+                 SignificanceHeuristic significanceHeuristic,
+                 SignificanceLookup lookup,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+ 
+                 if ((includeExclude != null) && (includeExclude.isRegexBased())) {
+                     throw new IllegalArgumentException(
+                         "Aggregation ["
+                             + name
+                             + "] cannot support regular expression style include/exclude "
+                             + "settings as they can only be applied to string fields. Use an array of numeric "
+                             + "values for include/exclude clauses used to filter numeric fields"
+                     );
+                 }
+ 
+                 ValuesSource.Numeric numericValuesSource = (ValuesSource.Numeric) valuesSource;
+                 if (numericValuesSource.isFloatingPoint()) {
+                     throw new UnsupportedOperationException("No support for examining floating point numerics");
+                 }
+ 
+                 IncludeExclude.LongFilter longFilter = null;
+                 if (includeExclude != null) {
+                     longFilter = includeExclude.convertToLongFilter(format);
+                 }
+ 
+                 return new NumericTermsAggregator(
+                     name,
+                     factories,
+                     agg -> agg.new SignificantLongTermsResults(lookup, significanceHeuristic, cardinality),
+                     numericValuesSource,
+                     format,
+                     null,
+                     bucketCountThresholds,
+                     context,
+                     parent,
+                     SubAggCollectionMode.BREADTH_FIRST,
+                     longFilter,
+                     cardinality,
+                     metadata
+                 );
+             }
+         };
+     }
+ 
+     private final IncludeExclude includeExclude;
+     private final String executionHint;
+     private final QueryBuilder backgroundFilter;
+     private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+     private final SignificanceHeuristic significanceHeuristic;
+ 
+     SignificantTermsAggregatorFactory(
+         String name,
+         ValuesSourceConfig config,
+         IncludeExclude includeExclude,
+         String executionHint,
+         QueryBuilder backgroundFilter,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         SignificanceHeuristic significanceHeuristic,
+         QueryShardContext queryShardContext,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata);
+ 
+         if (config.hasValues()) {
+             if (config.fieldContext().fieldType().isSearchable() == false) {
+                 throw new IllegalArgumentException(
+                     "SignificantText aggregation requires fields to be searchable, but ["
+                         + config.fieldContext().fieldType().name()
+                         + "] is not"
+                 );
+             }
+         }
+ 
+         this.includeExclude = includeExclude;
+         this.executionHint = executionHint;
+         this.backgroundFilter = backgroundFilter;
+         this.bucketCountThresholds = bucketCountThresholds;
+         this.significanceHeuristic = significanceHeuristic;
+     }
+ 
+     @Override
+     protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map<String, Object> metadata) throws IOException {
+         final InternalAggregation aggregation = new UnmappedSignificantTerms(name, bucketCountThresholds, metadata);
+         return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) {
+             @Override
+             public InternalAggregation buildEmptyAggregation() {
+                 return aggregation;
+             }
+         };
+     }
+ 
+     @Override
+     protected Aggregator doCreateInternal(
+         SearchContext searchContext,
+         Aggregator parent,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         SignificantTermsAggregatorSupplier aggregatorSupplier = queryShardContext.getValuesSourceRegistry()
+             .getAggregator(SignificantTermsAggregationBuilder.REGISTRY_KEY, config);
+ 
+         BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds);
+         if (bucketCountThresholds.getShardSize() == SignificantTermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
+             // The user has not made a shardSize selection .
+             // Use default heuristic to avoid any wrong-ranking caused by
+             // distributed counting
+             // but request double the usual amount.
+             // We typically need more than the number of "top" terms requested
+             // by other aggregations
+             // as the significance algorithm is in less of a position to
+             // down-select at shard-level -
+             // some of the things we want to find have only one occurrence on
+             // each shard and as
+             // such are impossible to differentiate from non-significant terms
+             // at that early stage.
+             bucketCountThresholds.setShardSize(2 * BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize()));
+         }
+ 
+         SignificanceLookup lookup = new SignificanceLookup(
+             queryShardContext,
+             config.fieldContext().fieldType(),
+             config.format(),
+             backgroundFilter
+         );
+ 
+         return aggregatorSupplier.build(
+             name,
+             factories,
+             config.getValuesSource(),
+             config.format(),
+             bucketCountThresholds,
+             includeExclude,
+             executionHint,
+             searchContext,
+             parent,
+             significanceHeuristic,
+             lookup,
+             cardinality,
+             metadata
+         );
+     }
+ 
+     @Override
+     protected boolean supportsConcurrentSegmentSearch() {
+         return true;
+     }
+ 
+     /**
+      * The execution mode for the significant terms agg
+      *
+      * @opensearch.internal
+      */
+     public enum ExecutionMode {
+ 
+         MAP(new ParseField("map")) {
+ 
+             @Override
+             Aggregator create(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 SearchContext aggregationContext,
+                 Aggregator parent,
+                 SignificanceHeuristic significanceHeuristic,
+                 SignificanceLookup lookup,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+                 int maxRegexLength = aggregationContext.getQueryShardContext().getIndexSettings().getMaxRegexLength();
+                 final IncludeExclude.StringFilter filter = includeExclude == null
+                     ? null
+                     : includeExclude.convertToStringFilter(format, maxRegexLength);
+                 return new MapStringTermsAggregator(
+                     name,
+                     factories,
+                     new MapStringTermsAggregator.ValuesSourceCollectorSource(valuesSource),
+                     a -> a.new SignificantTermsResults(lookup, significanceHeuristic, cardinality),
+                     null,
+                     format,
+                     bucketCountThresholds,
+                     filter,
+                     aggregationContext,
+                     parent,
+                     SubAggCollectionMode.BREADTH_FIRST,
+                     false,
+                     cardinality,
+                     metadata
+                 );
+ 
+             }
+ 
+         },
+         GLOBAL_ORDINALS(new ParseField("global_ordinals")) {
+ 
+             @Override
+             Aggregator create(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 SearchContext aggregationContext,
+                 Aggregator parent,
+                 SignificanceHeuristic significanceHeuristic,
+                 SignificanceLookup lookup,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+                 int maxRegexLength = aggregationContext.getQueryShardContext().getIndexSettings().getMaxRegexLength();
+                 final IncludeExclude.OrdinalsFilter filter = includeExclude == null
+                     ? null
+                     : includeExclude.convertToOrdinalsFilter(format, maxRegexLength);
+                 boolean remapGlobalOrd = true;
+                 if (cardinality == CardinalityUpperBound.ONE && factories == AggregatorFactories.EMPTY && includeExclude == null) {
+                     /*
+                      * We don't need to remap global ords iff this aggregator:
+                      *    - collects from a single bucket AND
+                      *    - has no include/exclude rules AND
+                      *    - has no sub-aggregator
+                      */
+                     remapGlobalOrd = false;
+                 }
+ 
+                 return new GlobalOrdinalsStringTermsAggregator(
+                     name,
+                     factories,
+                     a -> a.new SignificantTermsResults(lookup, significanceHeuristic, cardinality),
+                     (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource,
+                     null,
+                     format,
+                     bucketCountThresholds,
+                     filter,
+                     aggregationContext,
+                     parent,
+                     remapGlobalOrd,
+                     SubAggCollectionMode.BREADTH_FIRST,
+                     false,
+                     cardinality,
+                     metadata
+                 );
+             }
+         };
+ 
+         public static ExecutionMode fromString(String value, final DeprecationLogger deprecationLogger) {
+             if ("global_ordinals".equals(value)) {
+                 return GLOBAL_ORDINALS;
+             } else if ("global_ordinals_hash".equals(value)) {
+                 deprecationLogger.deprecate(
+                     "global_ordinals_hash",
+                     "global_ordinals_hash is deprecated. Please use [global_ordinals] instead."
+                 );
+                 return GLOBAL_ORDINALS;
+             } else if ("map".equals(value)) {
+                 return MAP;
+             }
+             throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of [map, global_ordinals]");
+         }
+ 
+         private final ParseField parseField;
+ 
+         ExecutionMode(ParseField parseField) {
+             this.parseField = parseField;
+         }
+ 
+         abstract Aggregator create(
+             String name,
+             AggregatorFactories factories,
+             ValuesSource valuesSource,
+             DocValueFormat format,
+             TermsAggregator.BucketCountThresholds bucketCountThresholds,
+             IncludeExclude includeExclude,
+             SearchContext aggregationContext,
+             Aggregator parent,
+             SignificanceHeuristic significanceHeuristic,
+             SignificanceLookup lookup,
+             CardinalityUpperBound cardinality,
+             Map<String, Object> metadata
+         ) throws IOException;
+ 
+         @Override
+         public String toString() {
+             return parseField.getPreferredName();
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-2c.html b/htmlReport/ns-1/sources/source-2c.html new file mode 100644 index 0000000000000..f2d3c56b7561d --- /dev/null +++ b/htmlReport/ns-1/sources/source-2c.html @@ -0,0 +1,527 @@ + + + + + + + + Coverage Report > SignificantTextAggregationBuilder + + + + + + +
+ + +

Coverage Summary for Class: SignificantTextAggregationBuilder (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
SignificantTextAggregationBuilder + + 0% + + + (0/1) + + + + 0% + + + (0/34) + + + + 0% + + + (0/131) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ import org.opensearch.index.query.AbstractQueryBuilder;
+ import org.opensearch.index.query.QueryBuilder;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.aggregations.AbstractAggregationBuilder;
+ import org.opensearch.search.aggregations.AggregationBuilder;
+ import org.opensearch.search.aggregations.AggregationInitializationException;
+ import org.opensearch.search.aggregations.AggregatorFactories.Builder;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ 
+ import java.io.IOException;
+ import java.util.Arrays;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Aggregation Builder for significant text agg
+  *
+  * @opensearch.internal
+  */
+ public class SignificantTextAggregationBuilder extends AbstractAggregationBuilder<SignificantTextAggregationBuilder> {
+     public static final String NAME = "significant_text";
+ 
+     static final ParseField FIELD_NAME = new ParseField("field");
+     static final ParseField SOURCE_FIELDS_NAME = new ParseField("source_fields");
+     static final ParseField FILTER_DUPLICATE_TEXT_FIELD_NAME = new ParseField("filter_duplicate_text");
+ 
+     static final TermsAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS =
+         SignificantTermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS;
+     static final SignificanceHeuristic DEFAULT_SIGNIFICANCE_HEURISTIC = SignificantTermsAggregationBuilder.DEFAULT_SIGNIFICANCE_HEURISTIC;
+ 
+     private String fieldName = null;
+     private String[] sourceFieldNames = null;
+     private boolean filterDuplicateText = false;
+     private IncludeExclude includeExclude = null;
+     private QueryBuilder filterBuilder = null;
+     private TermsAggregator.BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(DEFAULT_BUCKET_COUNT_THRESHOLDS);
+     private SignificanceHeuristic significanceHeuristic = DEFAULT_SIGNIFICANCE_HEURISTIC;
+ 
+     private static final ObjectParser<SignificantTextAggregationBuilder, Void> PARSER = new ObjectParser<>(
+         SignificantTextAggregationBuilder.NAME,
+         SignificanceHeuristic.class,
+         SignificantTextAggregationBuilder::significanceHeuristic,
+         null
+     );
+     static {
+         PARSER.declareInt(SignificantTextAggregationBuilder::shardSize, TermsAggregationBuilder.SHARD_SIZE_FIELD_NAME);
+ 
+         PARSER.declareLong(SignificantTextAggregationBuilder::minDocCount, TermsAggregationBuilder.MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareLong(SignificantTextAggregationBuilder::shardMinDocCount, TermsAggregationBuilder.SHARD_MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareInt(SignificantTextAggregationBuilder::size, TermsAggregationBuilder.REQUIRED_SIZE_FIELD_NAME);
+ 
+         PARSER.declareString(SignificantTextAggregationBuilder::fieldName, FIELD_NAME);
+ 
+         PARSER.declareStringArray(SignificantTextAggregationBuilder::sourceFieldNames, SOURCE_FIELDS_NAME);
+ 
+         PARSER.declareBoolean(SignificantTextAggregationBuilder::filterDuplicateText, FILTER_DUPLICATE_TEXT_FIELD_NAME);
+ 
+         PARSER.declareObject(
+             SignificantTextAggregationBuilder::backgroundFilter,
+             (p, context) -> AbstractQueryBuilder.parseInnerQueryBuilder(p),
+             SignificantTermsAggregationBuilder.BACKGROUND_FILTER
+         );
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())),
+             IncludeExclude::parseInclude,
+             IncludeExclude.INCLUDE_FIELD,
+             ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING
+         );
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),
+             IncludeExclude::parseExclude,
+             IncludeExclude.EXCLUDE_FIELD,
+             ObjectParser.ValueType.STRING_ARRAY
+         );
+     }
+ 
+     public static SignificantTextAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException {
+         return PARSER.parse(parser, new SignificantTextAggregationBuilder(aggregationName, null), null);
+     }
+ 
+     protected SignificantTextAggregationBuilder(
+         SignificantTextAggregationBuilder clone,
+         Builder factoriesBuilder,
+         Map<String, Object> metadata
+     ) {
+         super(clone, factoriesBuilder, metadata);
+         this.bucketCountThresholds = new BucketCountThresholds(clone.bucketCountThresholds);
+         this.fieldName = clone.fieldName;
+         this.filterBuilder = clone.filterBuilder;
+         this.filterDuplicateText = clone.filterDuplicateText;
+         this.includeExclude = clone.includeExclude;
+         this.significanceHeuristic = clone.significanceHeuristic;
+         this.sourceFieldNames = clone.sourceFieldNames;
+     }
+ 
+     @Override
+     protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map<String, Object> metadata) {
+         return new SignificantTextAggregationBuilder(this, factoriesBuilder, metadata);
+     }
+ 
+     protected TermsAggregator.BucketCountThresholds getBucketCountThresholds() {
+         return new TermsAggregator.BucketCountThresholds(bucketCountThresholds);
+     }
+ 
+     public TermsAggregator.BucketCountThresholds bucketCountThresholds() {
+         return bucketCountThresholds;
+     }
+ 
+     @Override
+     public SignificantTextAggregationBuilder subAggregations(Builder subFactories) {
+         throw new AggregationInitializationException(
+             "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"
+         );
+     }
+ 
+     @Override
+     public SignificantTextAggregationBuilder subAggregation(AggregationBuilder aggregation) {
+         throw new AggregationInitializationException(
+             "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations"
+         );
+     }
+ 
+     public SignificantTextAggregationBuilder bucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) {
+         if (bucketCountThresholds == null) {
+             throw new IllegalArgumentException("[bucketCountThresholds] must not be null: [" + name + "]");
+         }
+         this.bucketCountThresholds = bucketCountThresholds;
+         return this;
+     }
+ 
+     /**
+      * Sets the size - indicating how many term buckets should be returned
+      * (defaults to 10)
+      */
+     public SignificantTextAggregationBuilder size(int size) {
+         if (size <= 0) {
+             throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setRequiredSize(size);
+         return this;
+     }
+ 
+     /**
+      * Sets the shard_size - indicating the number of term buckets each shard
+      * will return to the coordinating node (the node that coordinates the
+      * search execution). The higher the shard size is, the more accurate the
+      * results are.
+      */
+     public SignificantTextAggregationBuilder shardSize(int shardSize) {
+         if (shardSize <= 0) {
+             throw new IllegalArgumentException("[shardSize] must be greater than  0. Found [" + shardSize + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setShardSize(shardSize);
+         return this;
+     }
+ 
+     /**
+      * Sets the name of the text field that will be the subject of this
+      * aggregation.
+      */
+     public SignificantTextAggregationBuilder fieldName(String fieldName) {
+         this.fieldName = fieldName;
+         return this;
+     }
+ 
+     /**
+      * Selects the fields to load from _source JSON and analyze.
+      * If none are specified, the indexed "fieldName" value is assumed
+      * to also be the name of the JSON field holding the value
+      */
+     public SignificantTextAggregationBuilder sourceFieldNames(List<String> names) {
+         this.sourceFieldNames = names.toArray(new String[0]);
+         return this;
+     }
+ 
+     /**
+      * Control if duplicate paragraphs of text should try be filtered from the
+      * statistical text analysis. Can improve results but slows down analysis.
+      * Default is false.
+      */
+     public SignificantTextAggregationBuilder filterDuplicateText(boolean filterDuplicateText) {
+         this.filterDuplicateText = filterDuplicateText;
+         return this;
+     }
+ 
+     /**
+      * Set the minimum document count terms should have in order to appear in
+      * the response.
+      */
+     public SignificantTextAggregationBuilder minDocCount(long minDocCount) {
+         if (minDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setMinDocCount(minDocCount);
+         return this;
+     }
+ 
+     /**
+      * Set the minimum document count terms should have on the shard in order to
+      * appear in the response.
+      */
+     public SignificantTextAggregationBuilder shardMinDocCount(long shardMinDocCount) {
+         if (shardMinDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setShardMinDocCount(shardMinDocCount);
+         return this;
+     }
+ 
+     public SignificantTextAggregationBuilder backgroundFilter(QueryBuilder backgroundFilter) {
+         if (backgroundFilter == null) {
+             throw new IllegalArgumentException("[backgroundFilter] must not be null: [" + name + "]");
+         }
+         this.filterBuilder = backgroundFilter;
+         return this;
+     }
+ 
+     public QueryBuilder backgroundFilter() {
+         return filterBuilder;
+     }
+ 
+     /**
+      * Set terms to include and exclude from the aggregation results
+      */
+     public SignificantTextAggregationBuilder includeExclude(IncludeExclude includeExclude) {
+         this.includeExclude = includeExclude;
+         return this;
+     }
+ 
+     /**
+      * Get terms to include and exclude from the aggregation results
+      */
+     public IncludeExclude includeExclude() {
+         return includeExclude;
+     }
+ 
+     public SignificantTextAggregationBuilder significanceHeuristic(SignificanceHeuristic significanceHeuristic) {
+         if (significanceHeuristic == null) {
+             throw new IllegalArgumentException("[significanceHeuristic] must not be null: [" + name + "]");
+         }
+         this.significanceHeuristic = significanceHeuristic;
+         return this;
+     }
+ 
+     public SignificanceHeuristic significanceHeuristic() {
+         return significanceHeuristic;
+     }
+ 
+     /**
+      * @param name
+      *            the name of this aggregation
+      * @param fieldName
+      *            the name of the text field that will be the subject of this
+      *            aggregation
+      *
+      */
+     public SignificantTextAggregationBuilder(String name, String fieldName) {
+         super(name);
+         this.fieldName = fieldName;
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public SignificantTextAggregationBuilder(StreamInput in) throws IOException {
+         super(in);
+         fieldName = in.readString();
+         filterDuplicateText = in.readBoolean();
+         bucketCountThresholds = new BucketCountThresholds(in);
+         filterBuilder = in.readOptionalNamedWriteable(QueryBuilder.class);
+         includeExclude = in.readOptionalWriteable(IncludeExclude::new);
+         significanceHeuristic = in.readNamedWriteable(SignificanceHeuristic.class);
+         sourceFieldNames = in.readOptionalStringArray();
+     }
+ 
+     @Override
+     protected void doWriteTo(StreamOutput out) throws IOException {
+         out.writeString(fieldName);
+         out.writeBoolean(filterDuplicateText);
+         bucketCountThresholds.writeTo(out);
+         out.writeOptionalNamedWriteable(filterBuilder);
+         out.writeOptionalWriteable(includeExclude);
+         out.writeNamedWriteable(significanceHeuristic);
+         out.writeOptionalStringArray(sourceFieldNames);
+     }
+ 
+     @Override
+     public BucketCardinality bucketCardinality() {
+         return BucketCardinality.MANY;
+     }
+ 
+     @Override
+     protected AggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, Builder subFactoriesBuilder)
+         throws IOException {
+         SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(queryShardContext);
+ 
+         return new SignificantTextAggregatorFactory(
+             name,
+             includeExclude,
+             filterBuilder,
+             bucketCountThresholds,
+             executionHeuristic,
+             queryShardContext,
+             parent,
+             subFactoriesBuilder,
+             fieldName,
+             sourceFieldNames,
+             filterDuplicateText,
+             metadata
+         );
+     }
+ 
+     @Override
+     protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
+         builder.startObject();
+         bucketCountThresholds.toXContent(builder, params);
+         if (fieldName != null) {
+             builder.field(FIELD_NAME.getPreferredName(), fieldName);
+         }
+         if (sourceFieldNames != null) {
+             builder.array(SOURCE_FIELDS_NAME.getPreferredName(), sourceFieldNames);
+         }
+ 
+         if (filterDuplicateText) {
+             builder.field(FILTER_DUPLICATE_TEXT_FIELD_NAME.getPreferredName(), filterDuplicateText);
+         }
+         if (filterBuilder != null) {
+             builder.field(SignificantTermsAggregationBuilder.BACKGROUND_FILTER.getPreferredName(), filterBuilder);
+         }
+         if (includeExclude != null) {
+             includeExclude.toXContent(builder, params);
+         }
+         significanceHeuristic.toXContent(builder, params);
+ 
+         builder.endObject();
+         return builder;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(
+             super.hashCode(),
+             bucketCountThresholds,
+             fieldName,
+             filterDuplicateText,
+             filterBuilder,
+             includeExclude,
+             significanceHeuristic,
+             Arrays.hashCode(sourceFieldNames)
+         );
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         SignificantTextAggregationBuilder other = (SignificantTextAggregationBuilder) obj;
+         return Objects.equals(bucketCountThresholds, other.bucketCountThresholds)
+             && Objects.equals(fieldName, other.fieldName)
+             && Arrays.equals(sourceFieldNames, other.sourceFieldNames)
+             && filterDuplicateText == other.filterDuplicateText
+             && Objects.equals(filterBuilder, other.filterBuilder)
+             && Objects.equals(includeExclude, other.includeExclude)
+             && Objects.equals(significanceHeuristic, other.significanceHeuristic);
+     }
+ 
+     @Override
+     public String getType() {
+         return NAME;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-2d.html b/htmlReport/ns-1/sources/source-2d.html new file mode 100644 index 0000000000000..4e78d463b7025 --- /dev/null +++ b/htmlReport/ns-1/sources/source-2d.html @@ -0,0 +1,469 @@ + + + + + + + + Coverage Report > SignificantTextAggregatorFactory + + + + + + +
+ + +

Coverage Summary for Class: SignificantTextAggregatorFactory (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
SignificantTextAggregatorFactory + + 0% + + + (0/4) + + + + 0% + + + (0/25) + +
SignificantTextAggregatorFactory$SignificantTextCollectorSource + + 0% + + + (0/4) + + + + 0% + + + (0/9) + +
SignificantTextAggregatorFactory$SignificantTextCollectorSource$1 + + 0% + + + (0/5) + + + + 0% + + + (0/56) + +
Total + + 0% + + + (0/13) + + + + 0% + + + (0/90) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.analysis.Analyzer;
+ import org.apache.lucene.analysis.TokenStream;
+ import org.apache.lucene.analysis.miscellaneous.DeDuplicatingTokenFilter;
+ import org.apache.lucene.analysis.miscellaneous.DuplicateByteSequenceSpotter;
+ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.util.BytesRef;
+ import org.apache.lucene.util.BytesRefBuilder;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.util.BigArrays;
+ import org.opensearch.common.util.BytesRefHash;
+ import org.opensearch.common.util.ObjectArray;
+ import org.opensearch.index.mapper.MappedFieldType;
+ import org.opensearch.index.query.QueryBuilder;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.LeafBucketCollector;
+ import org.opensearch.search.aggregations.LeafBucketCollectorBase;
+ import org.opensearch.search.aggregations.bucket.BucketUtils;
+ import org.opensearch.search.aggregations.bucket.terms.IncludeExclude.StringFilter;
+ import org.opensearch.search.aggregations.bucket.terms.MapStringTermsAggregator.CollectConsumer;
+ import org.opensearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.internal.SearchContext;
+ import org.opensearch.search.lookup.SourceLookup;
+ 
+ import java.io.IOException;
+ import java.util.Iterator;
+ import java.util.Map;
+ import java.util.function.LongConsumer;
+ 
+ /**
+  * Aggregation Factory for significant_text agg
+  *
+  * @opensearch.internal
+  */
+ public class SignificantTextAggregatorFactory extends AggregatorFactory {
+     private static final int MEMORY_GROWTH_REPORTING_INTERVAL_BYTES = 5000;
+ 
+     private final IncludeExclude includeExclude;
+     private final String indexedFieldName;
+     private final MappedFieldType fieldType;
+     private final String[] sourceFieldNames;
+     private final QueryBuilder backgroundFilter;
+     private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+     private final SignificanceHeuristic significanceHeuristic;
+     private final boolean filterDuplicateText;
+ 
+     public SignificantTextAggregatorFactory(
+         String name,
+         IncludeExclude includeExclude,
+         QueryBuilder backgroundFilter,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         SignificanceHeuristic significanceHeuristic,
+         QueryShardContext queryShardContext,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder,
+         String fieldName,
+         String[] sourceFieldNames,
+         boolean filterDuplicateText,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, queryShardContext, parent, subFactoriesBuilder, metadata);
+ 
+         // Note that if the field is unmapped (its field type is null), we don't fail,
+         // and just use the given field name as a placeholder.
+         this.fieldType = queryShardContext.fieldMapper(fieldName);
+         if (fieldType != null && fieldType.indexAnalyzer() == null) {
+             throw new IllegalArgumentException(
+                 "Field [" + fieldType.name() + "] has no analyzer, but SignificantText " + "requires an analyzed field"
+             );
+         }
+         this.indexedFieldName = fieldType != null ? fieldType.name() : fieldName;
+         this.sourceFieldNames = sourceFieldNames == null ? new String[] { indexedFieldName } : sourceFieldNames;
+ 
+         this.includeExclude = includeExclude;
+         this.backgroundFilter = backgroundFilter;
+         this.filterDuplicateText = filterDuplicateText;
+         this.bucketCountThresholds = bucketCountThresholds;
+         this.significanceHeuristic = significanceHeuristic;
+     }
+ 
+     @Override
+     protected Aggregator createInternal(
+         SearchContext searchContext,
+         Aggregator parent,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds);
+         if (bucketCountThresholds.getShardSize() == SignificantTextAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
+             // The user has not made a shardSize selection.
+             // Use default heuristic to avoid any wrong-ranking caused by
+             // distributed counting but request double the usual amount.
+             // We typically need more than the number of "top" terms requested
+             // by other aggregations as the significance algorithm is in less
+             // of a position to down-select at shard-level - some of the things
+             // we want to find have only one occurrence on each shard and as
+             // such are impossible to differentiate from non-significant terms
+             // at that early stage.
+             bucketCountThresholds.setShardSize(2 * BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize()));
+         }
+ 
+         // TODO - need to check with mapping that this is indeed a text field....
+ 
+         int maxRegexLength = searchContext.getQueryShardContext().getIndexSettings().getMaxRegexLength();
+         IncludeExclude.StringFilter incExcFilter = includeExclude == null
+             ? null
+             : includeExclude.convertToStringFilter(DocValueFormat.RAW, maxRegexLength);
+ 
+         MapStringTermsAggregator.CollectorSource collectorSource = new SignificantTextCollectorSource(
+             queryShardContext.bigArrays(),
+             fieldType,
+             sourceFieldNames,
+             filterDuplicateText
+         );
+         SignificanceLookup lookup = new SignificanceLookup(queryShardContext, fieldType, DocValueFormat.RAW, backgroundFilter);
+         return new MapStringTermsAggregator(
+             name,
+             factories,
+             collectorSource,
+             a -> a.new SignificantTermsResults(lookup, significanceHeuristic, cardinality),
+             null,
+             DocValueFormat.RAW,
+             bucketCountThresholds,
+             incExcFilter,
+             searchContext,
+             parent,
+             SubAggCollectionMode.BREADTH_FIRST,
+             false,
+             cardinality,
+             metadata
+         );
+     }
+ 
+     /**
+      * Collects significant text
+      *
+      * @opensearch.internal
+      */
+     private static class SignificantTextCollectorSource implements MapStringTermsAggregator.CollectorSource {
+         private final SourceLookup sourceLookup;
+         private final BigArrays bigArrays;
+         private final MappedFieldType fieldType;
+         private final String[] sourceFieldNames;
+         private ObjectArray<DuplicateByteSequenceSpotter> dupSequenceSpotters;
+ 
+         SignificantTextCollectorSource(
+             BigArrays bigArrays,
+             MappedFieldType fieldType,
+             String[] sourceFieldNames,
+             boolean filterDuplicateText
+         ) {
+             // Create a new SourceLookup instance per aggregator instead of use the shared one from SearchLookup. This is fine because it
+             // will only be accessed by this Aggregator instance and not anywhere else.
+             this.sourceLookup = new SourceLookup();
+             this.bigArrays = bigArrays;
+             this.fieldType = fieldType;
+             this.sourceFieldNames = sourceFieldNames;
+             dupSequenceSpotters = filterDuplicateText ? bigArrays.newObjectArray(1) : null;
+         }
+ 
+         @Override
+         public boolean needsScores() {
+             return false;
+         }
+ 
+         @Override
+         public LeafBucketCollector getLeafCollector(
+             StringFilter includeExclude,
+             LeafReaderContext ctx,
+             LeafBucketCollector sub,
+             LongConsumer addRequestCircuitBreakerBytes,
+             CollectConsumer consumer
+         ) throws IOException {
+             return new LeafBucketCollectorBase(sub, null) {
+                 private final BytesRefBuilder scratch = new BytesRefBuilder();
+ 
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     if (dupSequenceSpotters == null) {
+                         collectFromSource(doc, owningBucketOrd, null);
+                         return;
+                     }
+                     dupSequenceSpotters = bigArrays.grow(dupSequenceSpotters, owningBucketOrd + 1);
+                     DuplicateByteSequenceSpotter spotter = dupSequenceSpotters.get(owningBucketOrd);
+                     if (spotter == null) {
+                         spotter = new DuplicateByteSequenceSpotter();
+                         dupSequenceSpotters.set(owningBucketOrd, spotter);
+                     }
+                     collectFromSource(doc, owningBucketOrd, spotter);
+                     spotter.startNewSequence();
+                 }
+ 
+                 private void collectFromSource(int doc, long owningBucketOrd, DuplicateByteSequenceSpotter spotter) throws IOException {
+                     sourceLookup.setSegmentAndDocument(ctx, doc);
+                     BytesRefHash inDocTerms = new BytesRefHash(256, bigArrays);
+ 
+                     try {
+                         for (String sourceField : sourceFieldNames) {
+                             Iterator<String> itr = sourceLookup.extractRawValues(sourceField).stream().map(obj -> {
+                                 if (obj == null) {
+                                     return null;
+                                 }
+                                 if (obj instanceof BytesRef) {
+                                     return fieldType.valueForDisplay(obj).toString();
+                                 }
+                                 return obj.toString();
+                             }).iterator();
+                             Analyzer analyzer = fieldType.indexAnalyzer();
+                             while (itr.hasNext()) {
+                                 TokenStream ts = analyzer.tokenStream(fieldType.name(), itr.next());
+                                 processTokenStream(doc, owningBucketOrd, ts, inDocTerms, spotter);
+                             }
+                         }
+                     } finally {
+                         Releasables.close(inDocTerms);
+                     }
+                 }
+ 
+                 private void processTokenStream(
+                     int doc,
+                     long owningBucketOrd,
+                     TokenStream ts,
+                     BytesRefHash inDocTerms,
+                     DuplicateByteSequenceSpotter spotter
+                 ) throws IOException {
+                     long lastTrieSize = 0;
+                     if (spotter != null) {
+                         lastTrieSize = spotter.getEstimatedSizeInBytes();
+                         ts = new DeDuplicatingTokenFilter(ts, spotter);
+                     }
+                     CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+                     ts.reset();
+                     try {
+                         while (ts.incrementToken()) {
+                             if (spotter != null) {
+                                 long newTrieSize = spotter.getEstimatedSizeInBytes();
+                                 long growth = newTrieSize - lastTrieSize;
+                                 // Only update the circuitbreaker after
+                                 if (growth > MEMORY_GROWTH_REPORTING_INTERVAL_BYTES) {
+                                     addRequestCircuitBreakerBytes.accept(growth);
+                                     lastTrieSize = newTrieSize;
+                                 }
+                             }
+ 
+                             scratch.clear();
+                             scratch.copyChars(termAtt);
+                             BytesRef bytes = scratch.get();
+                             if (includeExclude != null && false == includeExclude.accept(bytes)) {
+                                 continue;
+                             }
+                             if (inDocTerms.add(bytes) < 0) {
+                                 continue;
+                             }
+                             consumer.accept(sub, doc, owningBucketOrd, bytes);
+                         }
+                     } finally {
+                         ts.close();
+                     }
+                     if (spotter != null) {
+                         long growth = spotter.getEstimatedSizeInBytes() - lastTrieSize;
+                         if (growth > 0) {
+                             addRequestCircuitBreakerBytes.accept(growth);
+                         }
+                     }
+                 }
+             };
+         }
+ 
+         @Override
+         public void close() {
+             Releasables.close(dupSequenceSpotters);
+         }
+     }
+ 
+     @Override
+     protected boolean supportsConcurrentSegmentSearch() {
+         return true;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-2e.html b/htmlReport/ns-1/sources/source-2e.html new file mode 100644 index 0000000000000..0ebf3ea0f7032 --- /dev/null +++ b/htmlReport/ns-1/sources/source-2e.html @@ -0,0 +1,320 @@ + + + + + + + + Coverage Report > StringRareTerms + + + + + + +
+ + +

Coverage Summary for Class: StringRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
StringRareTerms + + 0% + + + (0/10) + + + + 0% + + + (0/10) + +
StringRareTerms$Bucket + + 0% + + + (0/10) + + + + 0% + + + (0/14) + +
Total + + 0% + + + (0/20) + + + + 0% + + + (0/24) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Implementation of string rare terms
+  *
+  * @opensearch.internal
+  */
+ public class StringRareTerms extends InternalMappedRareTerms<StringRareTerms, StringRareTerms.Bucket> {
+     public static final String NAME = "srareterms";
+ 
+     /**
+      * Bucket for rare string terms
+      *
+      * @opensearch.internal
+      */
+     public static class Bucket extends InternalRareTerms.Bucket<Bucket> {
+         BytesRef termBytes;
+ 
+         public Bucket(BytesRef term, long docCount, InternalAggregations aggregations, DocValueFormat format) {
+             super(docCount, aggregations, format);
+             this.termBytes = term;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         public Bucket(StreamInput in, DocValueFormat format) throws IOException {
+             super(in, format);
+             termBytes = in.readBytesRef();
+         }
+ 
+         @Override
+         protected void writeTermTo(StreamOutput out) throws IOException {
+             out.writeBytesRef(termBytes);
+         }
+ 
+         @Override
+         public Object getKey() {
+             return getKeyAsString();
+         }
+ 
+         // this method is needed for scripted numeric aggs
+         @Override
+         public Number getKeyAsNumber() {
+             /*
+              * If the term is a long greater than 2^52 then parsing as a double would lose accuracy. Therefore, we first parse as a long and
+              * if this fails then we attempt to parse the term as a double.
+              */
+             try {
+                 return Long.parseLong(termBytes.utf8ToString());
+             } catch (final NumberFormatException ignored) {
+                 return Double.parseDouble(termBytes.utf8ToString());
+             }
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(termBytes).toString();
+         }
+ 
+         @Override
+         public int compareKey(Bucket other) {
+             return termBytes.compareTo(other.termBytes);
+         }
+ 
+         @Override
+         protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString());
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             return super.equals(obj) && Objects.equals(termBytes, ((Bucket) obj).termBytes);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), termBytes);
+         }
+     }
+ 
+     StringRareTerms(
+         String name,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         List<StringRareTerms.Bucket> buckets,
+         long maxDocCount,
+         SetBackedScalingCuckooFilter filter
+     ) {
+         super(name, order, metadata, format, buckets, maxDocCount, filter);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public StringRareTerms(StreamInput in) throws IOException {
+         super(in, StringRareTerms.Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public StringRareTerms create(List<StringRareTerms.Bucket> buckets) {
+         return new StringRareTerms(name, order, metadata, format, buckets, maxDocCount, filter);
+     }
+ 
+     @Override
+     public StringRareTerms.Bucket createBucket(InternalAggregations aggregations, StringRareTerms.Bucket prototype) {
+         return new StringRareTerms.Bucket(prototype.termBytes, prototype.getDocCount(), aggregations, prototype.format);
+     }
+ 
+     @Override
+     protected StringRareTerms createWithFilter(
+         String name,
+         List<StringRareTerms.Bucket> buckets,
+         SetBackedScalingCuckooFilter filterFilter
+     ) {
+         return new StringRareTerms(name, order, metadata, format, buckets, maxDocCount, filterFilter);
+     }
+ 
+     @Override
+     protected StringRareTerms.Bucket[] createBucketsArray(int size) {
+         return new StringRareTerms.Bucket[size];
+     }
+ 
+     @Override
+     public boolean containsTerm(SetBackedScalingCuckooFilter filter, StringRareTerms.Bucket bucket) {
+         return filter.mightContain(bucket.termBytes);
+     }
+ 
+     @Override
+     public void addToFilter(SetBackedScalingCuckooFilter filter, StringRareTerms.Bucket bucket) {
+         filter.add(bucket.termBytes);
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, StringRareTerms.Bucket prototype) {
+         return new Bucket(prototype.termBytes, docCount, aggs, format);
+     }
+ 
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-2f.html b/htmlReport/ns-1/sources/source-2f.html new file mode 100644 index 0000000000000..a308754438075 --- /dev/null +++ b/htmlReport/ns-1/sources/source-2f.html @@ -0,0 +1,332 @@ + + + + + + + + Coverage Report > StringRareTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: StringRareTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
StringRareTermsAggregator + + 0% + + + (0/5) + + + + 0% + + + (0/47) + +
StringRareTermsAggregator$1 + + 0% + + + (0/2) + + + + 0% + + + (0/17) + +
Total + + 0% + + + (0/7) + + + + 0% + + + (0/64) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.util.BytesRef;
+ import org.apache.lucene.util.BytesRefBuilder;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.util.BytesRefHash;
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.index.fielddata.SortedBinaryDocValues;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.LeafBucketCollector;
+ import org.opensearch.search.aggregations.LeafBucketCollectorBase;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import static java.util.Collections.emptyList;
+ 
+ /**
+  * An aggregator that finds "rare" string values (e.g. terms agg that orders ascending)
+  *
+  * @opensearch.internal
+  */
+ public class StringRareTermsAggregator extends AbstractRareTermsAggregator {
+     private final ValuesSource.Bytes valuesSource;
+     private final IncludeExclude.StringFilter filter;
+     private final BytesKeyedBucketOrds bucketOrds;
+ 
+     StringRareTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         ValuesSource.Bytes valuesSource,
+         DocValueFormat format,
+         IncludeExclude.StringFilter filter,
+         SearchContext context,
+         Aggregator parent,
+         Map<String, Object> metadata,
+         long maxDocCount,
+         double precision,
+         CardinalityUpperBound cardinality
+     ) throws IOException {
+         super(name, factories, context, parent, metadata, maxDocCount, precision, format);
+         this.valuesSource = valuesSource;
+         this.filter = filter;
+         this.bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality);
+     }
+ 
+     @Override
+     public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
+         final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
+         return new LeafBucketCollectorBase(sub, values) {
+             final BytesRefBuilder previous = new BytesRefBuilder();
+ 
+             @Override
+             public void collect(int docId, long owningBucketOrd) throws IOException {
+                 if (false == values.advanceExact(docId)) {
+                     return;
+                 }
+                 int valuesCount = values.docValueCount();
+                 previous.clear();
+ 
+                 // SortedBinaryDocValues don't guarantee uniqueness so we
+                 // need to take care of dups
+                 for (int i = 0; i < valuesCount; ++i) {
+                     BytesRef bytes = values.nextValue();
+                     if (filter != null && false == filter.accept(bytes)) {
+                         continue;
+                     }
+                     if (i > 0 && previous.get().equals(bytes)) {
+                         continue;
+                     }
+                     previous.copyBytes(bytes);
+                     long bucketOrdinal = bucketOrds.add(owningBucketOrd, bytes);
+                     if (bucketOrdinal < 0) { // already seen
+                         bucketOrdinal = -1 - bucketOrdinal;
+                         collectExistingBucket(sub, docId, bucketOrdinal);
+                     } else {
+                         collectBucket(sub, docId, bucketOrdinal);
+                     }
+                 }
+             }
+         };
+     }
+ 
+     @Override
+     public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+         /*
+          * Collect the list of buckets, populate the filter with terms
+          * that are too frequent, and figure out how to merge sub-buckets.
+          */
+         StringRareTerms.Bucket[][] rarestPerOrd = new StringRareTerms.Bucket[owningBucketOrds.length][];
+         SetBackedScalingCuckooFilter[] filters = new SetBackedScalingCuckooFilter[owningBucketOrds.length];
+         long keepCount = 0;
+         long[] mergeMap = new long[(int) bucketOrds.size()];
+         Arrays.fill(mergeMap, -1);
+         long offset = 0;
+         for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) {
+             try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(context.bigArrays())) {
+                 filters[owningOrdIdx] = newFilter();
+                 List<StringRareTerms.Bucket> builtBuckets = new ArrayList<>();
+                 BytesKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]);
+                 BytesRef scratch = new BytesRef();
+                 while (collectedBuckets.next()) {
+                     collectedBuckets.readValue(scratch);
+                     long docCount = bucketDocCount(collectedBuckets.ord());
+                     // if the key is below threshold, reinsert into the new ords
+                     if (docCount <= maxDocCount) {
+                         StringRareTerms.Bucket bucket = new StringRareTerms.Bucket(BytesRef.deepCopyOf(scratch), docCount, null, format);
+                         bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(scratch);
+                         mergeMap[(int) collectedBuckets.ord()] = bucket.bucketOrd;
+                         builtBuckets.add(bucket);
+                         keepCount++;
+                     } else {
+                         filters[owningOrdIdx].add(scratch);
+                     }
+                 }
+                 rarestPerOrd[owningOrdIdx] = builtBuckets.toArray(new StringRareTerms.Bucket[0]);
+                 offset += bucketsInThisOwningBucketToCollect.size();
+             }
+         }
+ 
+         /*
+          * Only merge/delete the ordinals if we have actually deleted one,
+          * to save on some redundant work.
+          */
+         if (keepCount != mergeMap.length) {
+             mergeBuckets(mergeMap, offset);
+             if (deferringCollector != null) {
+                 deferringCollector.mergeBuckets(mergeMap);
+             }
+         }
+ 
+         /*
+          * Now build the results!
+          */
+         buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+         InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length];
+         for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+             Arrays.sort(rarestPerOrd[ordIdx], ORDER.comparator());
+             result[ordIdx] = new StringRareTerms(
+                 name,
+                 ORDER,
+                 metadata(),
+                 format,
+                 Arrays.asList(rarestPerOrd[ordIdx]),
+                 maxDocCount,
+                 filters[ordIdx]
+             );
+         }
+         return result;
+     }
+ 
+     @Override
+     public InternalAggregation buildEmptyAggregation() {
+         return new StringRareTerms(name, LongRareTermsAggregator.ORDER, metadata(), format, emptyList(), 0, newFilter());
+     }
+ 
+     @Override
+     public void doClose() {
+         Releasables.close(bucketOrds);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-3.html b/htmlReport/ns-1/sources/source-3.html new file mode 100644 index 0000000000000..e89a571f31ac4 --- /dev/null +++ b/htmlReport/ns-1/sources/source-3.html @@ -0,0 +1,160 @@ + + + + + + + + Coverage Report > BucketPriorityQueue + + + + + + +
+ + +

Coverage Summary for Class: BucketPriorityQueue (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
BucketPriorityQueue + + 100% + + + (1/1) + + + + 100% + + + (2/2) + + + + 100% + + + (3/3) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.PriorityQueue;
+ 
+ import java.util.Comparator;
+ 
+ /**
+  * Internal priority queue for computing terms aggs
+  *
+  * @opensearch.internal
+  */
+ public class BucketPriorityQueue<B extends Terms.Bucket> extends PriorityQueue<B> {
+ 
+     private final Comparator<? super B> comparator;
+ 
+     public BucketPriorityQueue(int size, Comparator<? super B> comparator) {
+         super(size);
+         this.comparator = comparator;
+     }
+ 
+     @Override
+     protected boolean lessThan(B a, B b) {
+         return comparator.compare(a, b) > 0; // reverse, since we reverse again when adding to a list
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-30.html b/htmlReport/ns-1/sources/source-30.html new file mode 100644 index 0000000000000..417edb7faa043 --- /dev/null +++ b/htmlReport/ns-1/sources/source-30.html @@ -0,0 +1,358 @@ + + + + + + + + Coverage Report > StringTerms + + + + + + +
+ + +

Coverage Summary for Class: StringTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
StringTerms + + 50% + + + (4/8) + + + + 55.6% + + + (5/9) + +
StringTerms$Bucket + + 30% + + + (3/10) + + + + 28.6% + + + (4/14) + +
Total + + 38.9% + + + (7/18) + + + + 39.1% + + + (9/23) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the {@link TermsAggregator} when the field is a String.
+  *
+  * @opensearch.internal
+  */
+ public class StringTerms extends InternalMappedTerms<StringTerms, StringTerms.Bucket> {
+     public static final String NAME = "sterms";
+ 
+     /**
+      * Bucket for string terms
+      *
+      * @opensearch.internal
+      */
+     public static class Bucket extends InternalTerms.Bucket<Bucket> {
+         BytesRef termBytes;
+ 
+         public Bucket(
+             BytesRef term,
+             long docCount,
+             InternalAggregations aggregations,
+             boolean showDocCountError,
+             long docCountError,
+             DocValueFormat format
+         ) {
+             super(docCount, aggregations, showDocCountError, docCountError, format);
+             this.termBytes = term;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         public Bucket(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException {
+             super(in, format, showDocCountError);
+             termBytes = in.readBytesRef();
+         }
+ 
+         @Override
+         protected void writeTermTo(StreamOutput out) throws IOException {
+             out.writeBytesRef(termBytes);
+         }
+ 
+         @Override
+         public Object getKey() {
+             return getKeyAsString();
+         }
+ 
+         // this method is needed for scripted numeric aggs
+         @Override
+         public Number getKeyAsNumber() {
+             /*
+              * If the term is a long greater than 2^52 then parsing as a double would lose accuracy. Therefore, we first parse as a long and
+              * if this fails then we attempt to parse the term as a double.
+              */
+             try {
+                 return Long.parseLong(termBytes.utf8ToString());
+             } catch (final NumberFormatException ignored) {
+                 return Double.parseDouble(termBytes.utf8ToString());
+             }
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(termBytes).toString();
+         }
+ 
+         @Override
+         public int compareKey(Bucket other) {
+             return termBytes.compareTo(other.termBytes);
+         }
+ 
+         @Override
+         protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString());
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             return super.equals(obj) && Objects.equals(termBytes, ((Bucket) obj).termBytes);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), termBytes);
+         }
+     }
+ 
+     public StringTerms(
+         String name,
+         BucketOrder reduceOrder,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         int shardSize,
+         boolean showTermDocCountError,
+         long otherDocCount,
+         List<Bucket> buckets,
+         long docCountError,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public StringTerms(StreamInput in) throws IOException {
+         super(in, Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public StringTerms create(List<Bucket> buckets) {
+         return new StringTerms(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+         return new Bucket(
+             prototype.termBytes,
+             prototype.docCount,
+             aggregations,
+             prototype.showDocCountError,
+             prototype.docCountError,
+             prototype.format
+         );
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, StringTerms.Bucket prototype) {
+         return new Bucket(prototype.termBytes, docCount, aggs, prototype.showDocCountError, docCountError, format);
+     }
+ 
+     @Override
+     protected StringTerms create(String name, List<Bucket> buckets, BucketOrder reduceOrder, long docCountError, long otherDocCount) {
+         return new StringTerms(
+             name,
+             reduceOrder,
+             order,
+             getMetadata(),
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-31.html b/htmlReport/ns-1/sources/source-31.html new file mode 100644 index 0000000000000..56aacb93eafd1 --- /dev/null +++ b/htmlReport/ns-1/sources/source-31.html @@ -0,0 +1,573 @@ + + + + + + + + Coverage Report > TermsAggregationBuilder + + + + + + +
+ + +

Coverage Summary for Class: TermsAggregationBuilder (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
TermsAggregationBuilder + + 100% + + + (1/1) + + + + 30% + + + (12/40) + + + + 34.6% + + + (45/130) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.Version;
+ import org.opensearch.common.xcontent.LoggingDeprecationHandler;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.index.query.QueryRewriteContext;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.aggregations.AggregationBuilder;
+ import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.InternalOrder.CompoundOrder;
+ import org.opensearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
+ import org.opensearch.search.aggregations.support.CoreValuesSourceType;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory;
+ import org.opensearch.search.aggregations.support.ValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ import org.opensearch.search.aggregations.support.ValuesSourceType;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Aggregation Builder for terms agg
+  *
+  * @opensearch.internal
+  */
+ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<TermsAggregationBuilder> {
+     public static final String NAME = "terms";
+     public static final ValuesSourceRegistry.RegistryKey<TermsAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
+         NAME,
+         TermsAggregatorSupplier.class
+     );
+ 
+     public static final ParseField EXECUTION_HINT_FIELD_NAME = new ParseField("execution_hint");
+     public static final ParseField SHARD_SIZE_FIELD_NAME = new ParseField("shard_size");
+     public static final ParseField MIN_DOC_COUNT_FIELD_NAME = new ParseField("min_doc_count");
+     public static final ParseField SHARD_MIN_DOC_COUNT_FIELD_NAME = new ParseField("shard_min_doc_count");
+     public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size");
+ 
+     static final TermsAggregator.BucketCountThresholds DEFAULT_BUCKET_COUNT_THRESHOLDS = new TermsAggregator.BucketCountThresholds(
+         1,
+         0,
+         10,
+         -1
+     );
+     public static final ParseField SHOW_TERM_DOC_COUNT_ERROR = new ParseField("show_term_doc_count_error");
+     public static final ParseField ORDER_FIELD = new ParseField("order");
+ 
+     public static final ObjectParser<TermsAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(NAME, TermsAggregationBuilder::new);
+     static {
+         ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
+ 
+         PARSER.declareBoolean(TermsAggregationBuilder::showTermDocCountError, TermsAggregationBuilder.SHOW_TERM_DOC_COUNT_ERROR);
+ 
+         PARSER.declareInt(TermsAggregationBuilder::shardSize, SHARD_SIZE_FIELD_NAME);
+ 
+         PARSER.declareLong(TermsAggregationBuilder::minDocCount, MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareLong(TermsAggregationBuilder::shardMinDocCount, SHARD_MIN_DOC_COUNT_FIELD_NAME);
+ 
+         PARSER.declareInt(TermsAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME);
+ 
+         PARSER.declareString(TermsAggregationBuilder::executionHint, EXECUTION_HINT_FIELD_NAME);
+ 
+         PARSER.declareField(
+             TermsAggregationBuilder::collectMode,
+             (p, c) -> SubAggCollectionMode.parse(p.text(), LoggingDeprecationHandler.INSTANCE),
+             SubAggCollectionMode.KEY,
+             ObjectParser.ValueType.STRING
+         );
+ 
+         PARSER.declareObjectArray(
+             TermsAggregationBuilder::order,
+             (p, c) -> InternalOrder.Parser.parseOrderParam(p),
+             TermsAggregationBuilder.ORDER_FIELD
+         );
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())),
+             IncludeExclude::parseInclude,
+             IncludeExclude.INCLUDE_FIELD,
+             ObjectParser.ValueType.OBJECT_ARRAY_OR_STRING
+         );
+ 
+         PARSER.declareField(
+             (b, v) -> b.includeExclude(IncludeExclude.merge(b.includeExclude(), v)),
+             IncludeExclude::parseExclude,
+             IncludeExclude.EXCLUDE_FIELD,
+             ObjectParser.ValueType.STRING_ARRAY
+         );
+     }
+ 
+     public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
+         TermsAggregatorFactory.registerAggregators(builder);
+     }
+ 
+     private BucketOrder order = BucketOrder.compound(BucketOrder.count(false)); // automatically adds tie-breaker key asc order
+     private IncludeExclude includeExclude = null;
+     private String executionHint = null;
+     private SubAggCollectionMode collectMode = null;
+     private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(
+         DEFAULT_BUCKET_COUNT_THRESHOLDS
+     );
+     private boolean showTermDocCountError = false;
+ 
+     public TermsAggregationBuilder(String name) {
+         super(name);
+     }
+ 
+     protected TermsAggregationBuilder(
+         TermsAggregationBuilder clone,
+         AggregatorFactories.Builder factoriesBuilder,
+         Map<String, Object> metadata
+     ) {
+         super(clone, factoriesBuilder, metadata);
+         this.order = clone.order;
+         this.executionHint = clone.executionHint;
+         this.includeExclude = clone.includeExclude;
+         this.collectMode = clone.collectMode;
+         this.bucketCountThresholds = new BucketCountThresholds(clone.bucketCountThresholds);
+         this.showTermDocCountError = clone.showTermDocCountError;
+     }
+ 
+     @Override
+     protected ValuesSourceType defaultValueSourceType() {
+         return CoreValuesSourceType.BYTES;
+     }
+ 
+     @Override
+     protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
+         return new TermsAggregationBuilder(this, factoriesBuilder, metadata);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public TermsAggregationBuilder(StreamInput in) throws IOException {
+         super(in);
+         bucketCountThresholds = new BucketCountThresholds(in);
+         collectMode = in.readOptionalWriteable(SubAggCollectionMode::readFromStream);
+         executionHint = in.readOptionalString();
+         includeExclude = in.readOptionalWriteable(IncludeExclude::new);
+         order = InternalOrder.Streams.readOrder(in);
+         showTermDocCountError = in.readBoolean();
+     }
+ 
+     @Override
+     protected boolean serializeTargetValueType(Version version) {
+         return true;
+     }
+ 
+     @Override
+     protected void innerWriteTo(StreamOutput out) throws IOException {
+         bucketCountThresholds.writeTo(out);
+         out.writeOptionalWriteable(collectMode);
+         out.writeOptionalString(executionHint);
+         out.writeOptionalWriteable(includeExclude);
+         order.writeTo(out);
+         out.writeBoolean(showTermDocCountError);
+     }
+ 
+     /**
+      * Sets the size - indicating how many term buckets should be returned
+      * (defaults to 10)
+      */
+     public TermsAggregationBuilder size(int size) {
+         if (size <= 0) {
+             throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setRequiredSize(size);
+         return this;
+     }
+ 
+     /**
+      * Returns the number of term buckets currently configured
+      */
+     public int size() {
+         return bucketCountThresholds.getRequiredSize();
+     }
+ 
+     /**
+      * Sets the shard_size - indicating the number of term buckets each shard
+      * will return to the coordinating node (the node that coordinates the
+      * search execution). The higher the shard size is, the more accurate the
+      * results are.
+      */
+     public TermsAggregationBuilder shardSize(int shardSize) {
+         if (shardSize <= 0) {
+             throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]");
+         }
+         bucketCountThresholds.setShardSize(shardSize);
+         return this;
+     }
+ 
+     /**
+      * Returns the number of term buckets per shard that are currently configured
+      */
+     public int shardSize() {
+         return bucketCountThresholds.getShardSize();
+     }
+ 
+     /**
+      * Set the minimum document count terms should have in order to appear in
+      * the response.
+      */
+     public TermsAggregationBuilder minDocCount(long minDocCount) {
+         if (minDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[minDocCount] must be greater than or equal to 0. Found [" + minDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setMinDocCount(minDocCount);
+         return this;
+     }
+ 
+     /**
+      * Returns the minimum document count required per term
+      */
+     public long minDocCount() {
+         return bucketCountThresholds.getMinDocCount();
+     }
+ 
+     /**
+      * Set the minimum document count terms should have on the shard in order to
+      * appear in the response.
+      */
+     public TermsAggregationBuilder shardMinDocCount(long shardMinDocCount) {
+         if (shardMinDocCount < 0) {
+             throw new IllegalArgumentException(
+                 "[shardMinDocCount] must be greater than or equal to 0. Found [" + shardMinDocCount + "] in [" + name + "]"
+             );
+         }
+         bucketCountThresholds.setShardMinDocCount(shardMinDocCount);
+         return this;
+     }
+ 
+     /**
+      * Returns the minimum document count required per term, per shard
+      */
+     public long shardMinDocCount() {
+         return bucketCountThresholds.getShardMinDocCount();
+     }
+ 
+     /** Set a new order on this builder and return the builder so that calls
+      *  can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */
+     public TermsAggregationBuilder order(BucketOrder order) {
+         if (order == null) {
+             throw new IllegalArgumentException("[order] must not be null: [" + name + "]");
+         }
+         if (order instanceof CompoundOrder || InternalOrder.isKeyOrder(order)) {
+             this.order = order; // if order already contains a tie-breaker we are good to go
+         } else { // otherwise add a tie-breaker by using a compound order
+             this.order = BucketOrder.compound(order);
+         }
+         return this;
+     }
+ 
+     /**
+      * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic
+      * ordering.
+      */
+     public TermsAggregationBuilder order(List<BucketOrder> orders) {
+         if (orders == null) {
+             throw new IllegalArgumentException("[orders] must not be null: [" + name + "]");
+         }
+         // if the list only contains one order use that to avoid inconsistent xcontent
+         order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0));
+         return this;
+     }
+ 
+     /**
+      * Gets the order in which the buckets will be returned.
+      */
+     public BucketOrder order() {
+         return order;
+     }
+ 
+     /**
+      * Expert: sets an execution hint to the aggregation.
+      */
+     public TermsAggregationBuilder executionHint(String executionHint) {
+         this.executionHint = executionHint;
+         return this;
+     }
+ 
+     /**
+      * Expert: gets an execution hint to the aggregation.
+      */
+     public String executionHint() {
+         return executionHint;
+     }
+ 
+     /**
+      * Expert: set the collection mode.
+      */
+     public TermsAggregationBuilder collectMode(SubAggCollectionMode collectMode) {
+         if (collectMode == null) {
+             throw new IllegalArgumentException("[collectMode] must not be null: [" + name + "]");
+         }
+         this.collectMode = collectMode;
+         return this;
+     }
+ 
+     /**
+      * Expert: get the collection mode.
+      */
+     public SubAggCollectionMode collectMode() {
+         return collectMode;
+     }
+ 
+     /**
+      * Set terms to include and exclude from the aggregation results
+      */
+     public TermsAggregationBuilder includeExclude(IncludeExclude includeExclude) {
+         this.includeExclude = includeExclude;
+         return this;
+     }
+ 
+     /**
+      * Get terms to include and exclude from the aggregation results
+      */
+     public IncludeExclude includeExclude() {
+         return includeExclude;
+     }
+ 
+     /**
+      * Get whether doc count error will be return for individual terms
+      */
+     public boolean showTermDocCountError() {
+         return showTermDocCountError;
+     }
+ 
+     /**
+      * Set whether doc count error will be return for individual terms
+      */
+     public TermsAggregationBuilder showTermDocCountError(boolean showTermDocCountError) {
+         this.showTermDocCountError = showTermDocCountError;
+         return this;
+     }
+ 
+     @Override
+     public BucketCardinality bucketCardinality() {
+         return BucketCardinality.MANY;
+     }
+ 
+     @Override
+     protected ValuesSourceAggregatorFactory innerBuild(
+         QueryShardContext queryShardContext,
+         ValuesSourceConfig config,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder
+     ) throws IOException {
+         return new TermsAggregatorFactory(
+             name,
+             config,
+             order,
+             includeExclude,
+             executionHint,
+             collectMode,
+             bucketCountThresholds,
+             showTermDocCountError,
+             queryShardContext,
+             parent,
+             subFactoriesBuilder,
+             metadata
+         );
+     }
+ 
+     @Override
+     protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         bucketCountThresholds.toXContent(builder, params);
+         builder.field(SHOW_TERM_DOC_COUNT_ERROR.getPreferredName(), showTermDocCountError);
+         if (executionHint != null) {
+             builder.field(TermsAggregationBuilder.EXECUTION_HINT_FIELD_NAME.getPreferredName(), executionHint);
+         }
+         builder.field(ORDER_FIELD.getPreferredName());
+         order.toXContent(builder, params);
+         if (collectMode != null) {
+             builder.field(SubAggCollectionMode.KEY.getPreferredName(), collectMode.parseField().getPreferredName());
+         }
+         if (includeExclude != null) {
+             includeExclude.toXContent(builder, params);
+         }
+         return builder;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(
+             super.hashCode(),
+             bucketCountThresholds,
+             collectMode,
+             executionHint,
+             includeExclude,
+             order,
+             showTermDocCountError
+         );
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         TermsAggregationBuilder other = (TermsAggregationBuilder) obj;
+         return Objects.equals(bucketCountThresholds, other.bucketCountThresholds)
+             && Objects.equals(collectMode, other.collectMode)
+             && Objects.equals(executionHint, other.executionHint)
+             && Objects.equals(includeExclude, other.includeExclude)
+             && Objects.equals(order, other.order)
+             && Objects.equals(showTermDocCountError, other.showTermDocCountError);
+     }
+ 
+     @Override
+     public String getType() {
+         return NAME;
+     }
+ 
+     @Override
+     protected AggregationBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException {
+         return super.doRewrite(queryShardContext);
+     }
+ 
+     @Override
+     protected ValuesSourceRegistry.RegistryKey<?> getRegistryKey() {
+         return REGISTRY_KEY;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-32.html b/htmlReport/ns-1/sources/source-32.html new file mode 100644 index 0000000000000..105df5c685a49 --- /dev/null +++ b/htmlReport/ns-1/sources/source-32.html @@ -0,0 +1,445 @@ + + + + + + + + Coverage Report > TermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: TermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
TermsAggregator + + 50% + + + (2/4) + + + + 37.9% + + + (11/29) + +
TermsAggregator$BucketCountThresholds + + 56.2% + + + (9/16) + + + + 36.2% + + + (17/47) + +
TermsAggregator$CoordinatorBucketCountThresholds + + 0% + + + (0/3) + + + + 0% + + + (0/3) + +
Total + + 47.8% + + + (11/23) + + + + 35.4% + + + (28/79) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.OpenSearchException;
+ import org.opensearch.common.annotation.PublicApi;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.common.io.stream.Writeable;
+ import org.opensearch.core.xcontent.ToXContentFragment;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.AggregationExecutionException;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalOrder.Aggregation;
+ import org.opensearch.search.aggregations.InternalOrder.CompoundOrder;
+ import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator;
+ import org.opensearch.search.aggregations.bucket.nested.NestedAggregator;
+ import org.opensearch.search.aggregations.support.AggregationPath;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Comparator;
+ import java.util.HashSet;
+ import java.util.Map;
+ import java.util.Objects;
+ import java.util.Set;
+ 
+ /**
+  * Base aggregator class to aggregate documents by terms
+  *
+  * @opensearch.internal
+  */
+ public abstract class TermsAggregator extends DeferableBucketAggregator {
+ 
+     /**
+      * Bucket count thresholds
+      *
+      * @opensearch.api
+      */
+     @PublicApi(since = "1.0.0")
+     public static class BucketCountThresholds implements Writeable, ToXContentFragment {
+         private long minDocCount;
+         private long shardMinDocCount;
+         private int requiredSize;
+         private int shardSize;
+ 
+         public BucketCountThresholds(long minDocCount, long shardMinDocCount, int requiredSize, int shardSize) {
+             this.minDocCount = minDocCount;
+             this.shardMinDocCount = shardMinDocCount;
+             this.requiredSize = requiredSize;
+             this.shardSize = shardSize;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         public BucketCountThresholds(StreamInput in) throws IOException {
+             requiredSize = in.readInt();
+             shardSize = in.readInt();
+             minDocCount = in.readLong();
+             shardMinDocCount = in.readLong();
+         }
+ 
+         @Override
+         public void writeTo(StreamOutput out) throws IOException {
+             out.writeInt(requiredSize);
+             out.writeInt(shardSize);
+             out.writeLong(minDocCount);
+             out.writeLong(shardMinDocCount);
+         }
+ 
+         public BucketCountThresholds(BucketCountThresholds bucketCountThresholds) {
+             this(
+                 bucketCountThresholds.minDocCount,
+                 bucketCountThresholds.shardMinDocCount,
+                 bucketCountThresholds.requiredSize,
+                 bucketCountThresholds.shardSize
+             );
+         }
+ 
+         public void ensureValidity() {
+ 
+             // shard_size cannot be smaller than size as we need to at least fetch <size> entries from every shards in order to return
+             // <size>
+             if (shardSize < requiredSize) {
+                 setShardSize(requiredSize);
+             }
+ 
+             // shard_min_doc_count should not be larger than min_doc_count because this can cause buckets to be removed that would match
+             // the min_doc_count criteria
+             if (shardMinDocCount > minDocCount) {
+                 setShardMinDocCount(minDocCount);
+             }
+ 
+             if (requiredSize <= 0 || shardSize <= 0) {
+                 throw new OpenSearchException("parameters [required_size] and [shard_size] must be >0 in terms aggregation.");
+             }
+ 
+             if (minDocCount < 0 || shardMinDocCount < 0) {
+                 throw new OpenSearchException("parameter [min_doc_count] and [shardMinDocCount] must be >=0 in terms aggregation.");
+             }
+         }
+ 
+         public long getShardMinDocCount() {
+             return shardMinDocCount;
+         }
+ 
+         public void setShardMinDocCount(long shardMinDocCount) {
+             this.shardMinDocCount = shardMinDocCount;
+         }
+ 
+         public long getMinDocCount() {
+             return minDocCount;
+         }
+ 
+         public void setMinDocCount(long minDocCount) {
+             this.minDocCount = minDocCount;
+         }
+ 
+         public int getRequiredSize() {
+             return requiredSize;
+         }
+ 
+         public void setRequiredSize(int requiredSize) {
+             this.requiredSize = requiredSize;
+         }
+ 
+         public int getShardSize() {
+             return shardSize;
+         }
+ 
+         public void setShardSize(int shardSize) {
+             this.shardSize = shardSize;
+         }
+ 
+         @Override
+         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.field(TermsAggregationBuilder.REQUIRED_SIZE_FIELD_NAME.getPreferredName(), requiredSize);
+             if (shardSize != -1) {
+                 builder.field(TermsAggregationBuilder.SHARD_SIZE_FIELD_NAME.getPreferredName(), shardSize);
+             }
+             builder.field(TermsAggregationBuilder.MIN_DOC_COUNT_FIELD_NAME.getPreferredName(), minDocCount);
+             builder.field(TermsAggregationBuilder.SHARD_MIN_DOC_COUNT_FIELD_NAME.getPreferredName(), shardMinDocCount);
+             return builder;
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(requiredSize, shardSize, minDocCount, shardMinDocCount);
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             if (obj == null) {
+                 return false;
+             }
+             if (getClass() != obj.getClass()) {
+                 return false;
+             }
+             BucketCountThresholds other = (BucketCountThresholds) obj;
+             return Objects.equals(requiredSize, other.requiredSize)
+                 && Objects.equals(shardSize, other.shardSize)
+                 && Objects.equals(minDocCount, other.minDocCount)
+                 && Objects.equals(shardMinDocCount, other.shardMinDocCount);
+         }
+     }
+ 
+     /**
+      * BucketCountThresholds type that throws an exception when shardMinDocCount or shardSize are accessed. This is used for
+      * deserialization on the coordinator during reduce as shardMinDocCount and shardSize should not be accessed this way on the
+      * coordinator.
+      *
+      * @opensearch.internal
+      */
+     public static class CoordinatorBucketCountThresholds extends BucketCountThresholds {
+ 
+         public CoordinatorBucketCountThresholds(long minDocCount, long shardMinDocCount, int requiredSize, int shardSize) {
+             super(minDocCount, shardMinDocCount, requiredSize, shardSize);
+         }
+ 
+         @Override
+         public long getShardMinDocCount() {
+             throw new AggregationExecutionException("shard_min_doc_count should not be accessed via CoordinatorBucketCountThresholds");
+         }
+ 
+         @Override
+         public int getShardSize() {
+             throw new AggregationExecutionException("shard_size should not be accessed via CoordinatorBucketCountThresholds");
+         }
+     }
+ 
+     protected final DocValueFormat format;
+     protected final BucketCountThresholds bucketCountThresholds;
+     protected final BucketOrder order;
+     protected final Comparator<InternalTerms.Bucket<?>> partiallyBuiltBucketComparator;
+     protected final Set<Aggregator> aggsUsedForSorting = new HashSet<>();
+     protected final SubAggCollectionMode collectMode;
+ 
+     public TermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         SearchContext context,
+         Aggregator parent,
+         BucketCountThresholds bucketCountThresholds,
+         BucketOrder order,
+         DocValueFormat format,
+         SubAggCollectionMode collectMode,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, factories, context, parent, metadata);
+         this.bucketCountThresholds = bucketCountThresholds;
+         this.order = order;
+         partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this);
+         this.format = format;
+         if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) {
+             /*
+               Force the execution to depth_first because we need to access the score of
+               nested documents in a sub-aggregation and we are not able to generate this score
+               while replaying deferred documents.
+              */
+             this.collectMode = SubAggCollectionMode.DEPTH_FIRST;
+         } else {
+             this.collectMode = collectMode;
+         }
+         // Don't defer any child agg if we are dependent on it for pruning results
+         if (order instanceof Aggregation) {
+             AggregationPath path = ((Aggregation) order).path();
+             aggsUsedForSorting.add(path.resolveTopmostAggregator(this));
+         } else if (order instanceof CompoundOrder) {
+             CompoundOrder compoundOrder = (CompoundOrder) order;
+             for (BucketOrder orderElement : compoundOrder.orderElements()) {
+                 if (orderElement instanceof Aggregation) {
+                     AggregationPath path = ((Aggregation) orderElement).path();
+                     aggsUsedForSorting.add(path.resolveTopmostAggregator(this));
+                 }
+             }
+         }
+     }
+ 
+     static boolean descendsFromNestedAggregator(Aggregator parent) {
+         while (parent != null) {
+             if (parent.getClass() == NestedAggregator.class) {
+                 return true;
+             }
+             parent = parent.parent();
+         }
+         return false;
+     }
+ 
+     private boolean subAggsNeedScore() {
+         for (Aggregator subAgg : subAggregators) {
+             if (subAgg.scoreMode().needsScores()) {
+                 return true;
+             }
+         }
+         return false;
+     }
+ 
+     @Override
+     protected boolean shouldDefer(Aggregator aggregator) {
+         return collectMode == SubAggCollectionMode.BREADTH_FIRST && !aggsUsedForSorting.contains(aggregator);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-33.html b/htmlReport/ns-1/sources/source-33.html new file mode 100644 index 0000000000000..06b5e59b2192b --- /dev/null +++ b/htmlReport/ns-1/sources/source-33.html @@ -0,0 +1,790 @@ + + + + + + + + Coverage Report > TermsAggregatorFactory + + + + + + +
+ + +

Coverage Summary for Class: TermsAggregatorFactory (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
TermsAggregatorFactory + + 70% + + + (7/10) + + + + 59.6% + + + (28/47) + +
TermsAggregatorFactory$1 + + 100% + + + (2/2) + + + + 78.6% + + + (11/14) + +
TermsAggregatorFactory$2 + + 50% + + + (1/2) + + + + 5.3% + + + (1/19) + +
TermsAggregatorFactory$3 + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
TermsAggregatorFactory$ExecutionMode + + 100% + + + (5/5) + + + + 80% + + + (8/10) + +
TermsAggregatorFactory$ExecutionMode$1 + + 33.3% + + + (1/3) + + + + 14.3% + + + (1/7) + +
TermsAggregatorFactory$ExecutionMode$2 + + 75% + + + (3/4) + + + + 60.9% + + + (14/23) + +
Total + + 67.9% + + + (19/28) + + + + 51.6% + + + (63/122) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.search.IndexSearcher;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.AggregationExecutionException;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.AggregatorFactory;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.InternalOrder.CompoundOrder;
+ import org.opensearch.search.aggregations.NonCollectingAggregator;
+ import org.opensearch.search.aggregations.bucket.BucketUtils;
+ import org.opensearch.search.aggregations.bucket.terms.NumericTermsAggregator.ResultStrategy;
+ import org.opensearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds;
+ import org.opensearch.search.aggregations.support.CoreValuesSourceType;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory;
+ import org.opensearch.search.aggregations.support.ValuesSourceConfig;
+ import org.opensearch.search.aggregations.support.ValuesSourceRegistry;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Arrays;
+ import java.util.Map;
+ import java.util.function.Function;
+ 
+ /**
+  * Aggregation Factory for terms agg
+  *
+  * @opensearch.internal
+  */
+ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
+     static Boolean REMAP_GLOBAL_ORDS, COLLECT_SEGMENT_ORDS;
+ 
+     static void registerAggregators(ValuesSourceRegistry.Builder builder) {
+         builder.register(
+             TermsAggregationBuilder.REGISTRY_KEY,
+             Arrays.asList(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP),
+             TermsAggregatorFactory.bytesSupplier(),
+             true
+         );
+ 
+         builder.register(
+             TermsAggregationBuilder.REGISTRY_KEY,
+             Arrays.asList(CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.NUMERIC),
+             TermsAggregatorFactory.numericSupplier(),
+             true
+         );
+     }
+ 
+     /**
+      * This supplier is used for all the field types that should be aggregated as bytes/strings,
+      * including those that need global ordinals
+      */
+     private static TermsAggregatorSupplier bytesSupplier() {
+         return new TermsAggregatorSupplier() {
+             @Override
+             public Aggregator build(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 BucketOrder order,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 String executionHint,
+                 SearchContext context,
+                 Aggregator parent,
+                 SubAggCollectionMode subAggCollectMode,
+                 boolean showTermDocCountError,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+                 ExecutionMode execution = null;
+                 if (executionHint != null) {
+                     execution = ExecutionMode.fromString(executionHint);
+                 }
+                 // In some cases, using ordinals is just not supported: override it
+                 if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals == false) {
+                     execution = ExecutionMode.MAP;
+                 }
+                 if (execution == null) {
+                     execution = ExecutionMode.GLOBAL_ORDINALS;
+                 }
+                 final long maxOrd = execution == ExecutionMode.GLOBAL_ORDINALS ? getMaxOrd(valuesSource, context.searcher()) : -1;
+                 if (subAggCollectMode == null) {
+                     subAggCollectMode = pickSubAggColectMode(factories, bucketCountThresholds.getShardSize(), maxOrd);
+                 }
+ 
+                 if ((includeExclude != null) && (includeExclude.isRegexBased()) && format != DocValueFormat.RAW) {
+                     // TODO this exception message is not really accurate for the string case. It's really disallowing regex + formatter
+                     throw new AggregationExecutionException(
+                         "Aggregation ["
+                             + name
+                             + "] cannot support regular expression style "
+                             + "include/exclude settings as they can only be applied to string fields. Use an array of values for "
+                             + "include/exclude clauses"
+                     );
+                 }
+ 
+                 // TODO: [Zach] we might want refactor and remove ExecutionMode#create(), moving that logic outside the enum
+                 return execution.create(
+                     name,
+                     factories,
+                     valuesSource,
+                     order,
+                     format,
+                     bucketCountThresholds,
+                     includeExclude,
+                     context,
+                     parent,
+                     subAggCollectMode,
+                     showTermDocCountError,
+                     cardinality,
+                     metadata
+                 );
+ 
+             }
+         };
+     }
+ 
+     /**
+      * This supplier is used for all fields that expect to be aggregated as a numeric value.
+      * This includes floating points, and formatted types that use numerics internally for storage (date, boolean, etc)
+      */
+     private static TermsAggregatorSupplier numericSupplier() {
+         return new TermsAggregatorSupplier() {
+             @Override
+             public Aggregator build(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 BucketOrder order,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 String executionHint,
+                 SearchContext context,
+                 Aggregator parent,
+                 SubAggCollectionMode subAggCollectMode,
+                 boolean showTermDocCountError,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+ 
+                 if ((includeExclude != null) && (includeExclude.isRegexBased())) {
+                     throw new AggregationExecutionException(
+                         "Aggregation ["
+                             + name
+                             + "] cannot support regular expression style "
+                             + "include/exclude settings as they can only be applied to string fields. Use an array of numeric values for "
+                             + "include/exclude clauses used to filter numeric fields"
+                     );
+                 }
+ 
+                 if (subAggCollectMode == null) {
+                     subAggCollectMode = pickSubAggColectMode(factories, bucketCountThresholds.getShardSize(), -1);
+                 }
+ 
+                 ValuesSource.Numeric numericValuesSource = (ValuesSource.Numeric) valuesSource;
+                 IncludeExclude.LongFilter longFilter = null;
+                 Function<NumericTermsAggregator, ResultStrategy<?, ?>> resultStrategy;
+                 if (numericValuesSource.isFloatingPoint()) {
+                     if (includeExclude != null) {
+                         longFilter = includeExclude.convertToDoubleFilter();
+                     }
+                     resultStrategy = agg -> agg.new DoubleTermsResults(showTermDocCountError);
+                 } else if (numericValuesSource.isBigInteger()) {
+                     if (includeExclude != null) {
+                         longFilter = includeExclude.convertToDoubleFilter();
+                     }
+                     resultStrategy = agg -> agg.new UnsignedLongTermsResults(showTermDocCountError);
+                 } else {
+                     if (includeExclude != null) {
+                         longFilter = includeExclude.convertToLongFilter(format);
+                     }
+                     resultStrategy = agg -> agg.new LongTermsResults(showTermDocCountError);
+                 }
+                 return new NumericTermsAggregator(
+                     name,
+                     factories,
+                     resultStrategy,
+                     numericValuesSource,
+                     format,
+                     order,
+                     bucketCountThresholds,
+                     context,
+                     parent,
+                     subAggCollectMode,
+                     longFilter,
+                     cardinality,
+                     metadata
+                 );
+             }
+         };
+     }
+ 
+     private final BucketOrder order;
+     private final IncludeExclude includeExclude;
+     private final String executionHint;
+     private final SubAggCollectionMode collectMode;
+     private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+     private final boolean showTermDocCountError;
+ 
+     TermsAggregatorFactory(
+         String name,
+         ValuesSourceConfig config,
+         BucketOrder order,
+         IncludeExclude includeExclude,
+         String executionHint,
+         SubAggCollectionMode collectMode,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         boolean showTermDocCountError,
+         QueryShardContext queryShardContext,
+         AggregatorFactory parent,
+         AggregatorFactories.Builder subFactoriesBuilder,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata);
+         this.order = order;
+         this.includeExclude = includeExclude;
+         this.executionHint = executionHint;
+         this.collectMode = collectMode;
+         this.bucketCountThresholds = bucketCountThresholds;
+         this.showTermDocCountError = showTermDocCountError;
+     }
+ 
+     @Override
+     protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map<String, Object> metadata) throws IOException {
+         final InternalAggregation aggregation = new UnmappedTerms(name, order, bucketCountThresholds, metadata);
+         Aggregator agg = new NonCollectingAggregator(name, searchContext, parent, factories, metadata) {
+             @Override
+             public InternalAggregation buildEmptyAggregation() {
+                 return aggregation;
+             }
+         };
+         // even in the case of an unmapped aggregator, validate the order
+         order.validate(agg);
+         return agg;
+     }
+ 
+     private static boolean isAggregationSort(BucketOrder order) {
+         if (order instanceof InternalOrder.Aggregation) {
+             return true;
+         } else if (order instanceof InternalOrder.CompoundOrder) {
+             InternalOrder.CompoundOrder compoundOrder = (CompoundOrder) order;
+             return compoundOrder.orderElements().stream().anyMatch(TermsAggregatorFactory::isAggregationSort);
+         } else {
+             return false;
+         }
+     }
+ 
+     @Override
+     protected Aggregator doCreateInternal(
+         SearchContext searchContext,
+         Aggregator parent,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         TermsAggregatorSupplier aggregatorSupplier = queryShardContext.getValuesSourceRegistry()
+             .getAggregator(TermsAggregationBuilder.REGISTRY_KEY, config);
+         BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds);
+         if (InternalOrder.isKeyOrder(order) == false
+             && bucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
+             // The user has not made a shardSize selection. Use default
+             // heuristic to avoid any wrong-ranking caused by distributed
+             // counting
+             bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize()));
+         }
+         bucketCountThresholds.ensureValidity();
+ 
+         return aggregatorSupplier.build(
+             name,
+             factories,
+             config.getValuesSource(),
+             order,
+             config.format(),
+             bucketCountThresholds,
+             includeExclude,
+             executionHint,
+             searchContext,
+             parent,
+             collectMode,
+             showTermDocCountError,
+             cardinality,
+             metadata
+         );
+     }
+ 
+     /**
+      * Pick a {@link SubAggCollectionMode} based on heuristics about what
+      * we're collecting.
+      */
+     static SubAggCollectionMode pickSubAggColectMode(AggregatorFactories factories, int expectedSize, long maxOrd) {
+         if (factories.countAggregators() == 0) {
+             // Without sub-aggregations we pretty much ignore this field value so just pick something
+             return SubAggCollectionMode.DEPTH_FIRST;
+         }
+         if (expectedSize == Integer.MAX_VALUE) {
+             // We expect to return all buckets so delaying them won't save any time
+             return SubAggCollectionMode.DEPTH_FIRST;
+         }
+         if (maxOrd == -1 || maxOrd > expectedSize) {
+             /*
+              * We either don't know how many buckets we expect there to be
+              * (maxOrd == -1) or we expect there to be more buckets than
+              * we will collect from this shard. So delaying collection of
+              * the sub-buckets *should* save time.
+              */
+             return SubAggCollectionMode.BREADTH_FIRST;
+         }
+         // We expect to collect so many buckets that we may as well collect them all.
+         return SubAggCollectionMode.DEPTH_FIRST;
+     }
+ 
+     /**
+      * Get the maximum global ordinal value for the provided {@link ValuesSource} or -1
+      * if the values source is not an instance of {@link ValuesSource.Bytes.WithOrdinals}.
+      */
+     private static long getMaxOrd(ValuesSource source, IndexSearcher searcher) throws IOException {
+         if (source instanceof ValuesSource.Bytes.WithOrdinals) {
+             ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) source;
+             return valueSourceWithOrdinals.globalMaxOrd(searcher);
+         } else {
+             return -1;
+         }
+     }
+ 
+     /**
+      * The execution mode for the terms agg
+      *
+      * @opensearch.internal
+      */
+     public enum ExecutionMode {
+ 
+         MAP(new ParseField("map")) {
+ 
+             @Override
+             Aggregator create(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 BucketOrder order,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 SearchContext context,
+                 Aggregator parent,
+                 SubAggCollectionMode subAggCollectMode,
+                 boolean showTermDocCountError,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+                 int maxRegexLength = context.getQueryShardContext().getIndexSettings().getMaxRegexLength();
+                 final IncludeExclude.StringFilter filter = includeExclude == null
+                     ? null
+                     : includeExclude.convertToStringFilter(format, maxRegexLength);
+                 return new MapStringTermsAggregator(
+                     name,
+                     factories,
+                     new MapStringTermsAggregator.ValuesSourceCollectorSource(valuesSource),
+                     a -> a.new StandardTermsResults(valuesSource),
+                     order,
+                     format,
+                     bucketCountThresholds,
+                     filter,
+                     context,
+                     parent,
+                     subAggCollectMode,
+                     showTermDocCountError,
+                     cardinality,
+                     metadata
+                 );
+             }
+         },
+         GLOBAL_ORDINALS(new ParseField("global_ordinals")) {
+ 
+             @Override
+             Aggregator create(
+                 String name,
+                 AggregatorFactories factories,
+                 ValuesSource valuesSource,
+                 BucketOrder order,
+                 DocValueFormat format,
+                 TermsAggregator.BucketCountThresholds bucketCountThresholds,
+                 IncludeExclude includeExclude,
+                 SearchContext context,
+                 Aggregator parent,
+                 SubAggCollectionMode subAggCollectMode,
+                 boolean showTermDocCountError,
+                 CardinalityUpperBound cardinality,
+                 Map<String, Object> metadata
+             ) throws IOException {
+ 
+                 final long maxOrd = getMaxOrd(valuesSource, context.searcher());
+                 assert maxOrd != -1;
+                 final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs());
+ 
+                 assert valuesSource instanceof ValuesSource.Bytes.WithOrdinals;
+                 ValuesSource.Bytes.WithOrdinals ordinalsValuesSource = (ValuesSource.Bytes.WithOrdinals) valuesSource;
+ 
+                 if (factories == AggregatorFactories.EMPTY
+                     && includeExclude == null
+                     && cardinality == CardinalityUpperBound.ONE
+                     && ordinalsValuesSource.supportsGlobalOrdinalsMapping()
+                     &&
+                 // we use the static COLLECT_SEGMENT_ORDS to allow tests to force specific optimizations
+                     (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS.booleanValue() : ratio <= 0.5 && maxOrd <= 2048)) {
+                     /*
+                      * We can use the low cardinality execution mode iff this aggregator:
+                      *  - has no sub-aggregator AND
+                      *  - collects from a single bucket AND
+                      *  - has a values source that can map from segment to global ordinals
+                      *  - At least we reduce the number of global ordinals look-ups by half (ration <= 0.5) AND
+                      *  - the maximum global ordinal is less than 2048 (LOW_CARDINALITY has additional memory usage,
+                      *  which directly linked to maxOrd, so we need to limit).
+                      */
+                     return new GlobalOrdinalsStringTermsAggregator.LowCardinality(
+                         name,
+                         factories,
+                         a -> a.new StandardTermsResults(),
+                         ordinalsValuesSource,
+                         order,
+                         format,
+                         bucketCountThresholds,
+                         context,
+                         parent,
+                         false,
+                         subAggCollectMode,
+                         showTermDocCountError,
+                         metadata
+                     );
+ 
+                 }
+                 int maxRegexLength = context.getQueryShardContext().getIndexSettings().getMaxRegexLength();
+                 final IncludeExclude.OrdinalsFilter filter = includeExclude == null
+                     ? null
+                     : includeExclude.convertToOrdinalsFilter(format, maxRegexLength);
+                 boolean remapGlobalOrds;
+                 if (cardinality == CardinalityUpperBound.ONE && REMAP_GLOBAL_ORDS != null) {
+                     /*
+                      * We use REMAP_GLOBAL_ORDS to allow tests to force
+                      * specific optimizations but this particular one
+                      * is only possible if we're collecting from a single
+                      * bucket.
+                      */
+                     remapGlobalOrds = REMAP_GLOBAL_ORDS.booleanValue();
+                 } else {
+                     remapGlobalOrds = true;
+                     if (includeExclude == null
+                         && cardinality == CardinalityUpperBound.ONE
+                         && (factories == AggregatorFactories.EMPTY
+                             || (isAggregationSort(order) == false && subAggCollectMode == SubAggCollectionMode.BREADTH_FIRST))) {
+                         /*
+                          * We don't need to remap global ords iff this aggregator:
+                          *    - has no include/exclude rules AND
+                          *    - only collects from a single bucket AND
+                          *    - has no sub-aggregator or only sub-aggregator that can be deferred
+                          *      ({@link SubAggCollectionMode#BREADTH_FIRST}).
+                          */
+                         remapGlobalOrds = false;
+                     }
+                 }
+                 return new GlobalOrdinalsStringTermsAggregator(
+                     name,
+                     factories,
+                     a -> a.new StandardTermsResults(),
+                     ordinalsValuesSource,
+                     order,
+                     format,
+                     bucketCountThresholds,
+                     filter,
+                     context,
+                     parent,
+                     remapGlobalOrds,
+                     subAggCollectMode,
+                     showTermDocCountError,
+                     cardinality,
+                     metadata
+                 );
+             }
+         };
+ 
+         public static ExecutionMode fromString(String value) {
+             switch (value) {
+                 case "global_ordinals":
+                     return GLOBAL_ORDINALS;
+                 case "map":
+                     return MAP;
+                 default:
+                     throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of [map, global_ordinals]");
+             }
+         }
+ 
+         private final ParseField parseField;
+ 
+         ExecutionMode(ParseField parseField) {
+             this.parseField = parseField;
+         }
+ 
+         abstract Aggregator create(
+             String name,
+             AggregatorFactories factories,
+             ValuesSource valuesSource,
+             BucketOrder order,
+             DocValueFormat format,
+             TermsAggregator.BucketCountThresholds bucketCountThresholds,
+             IncludeExclude includeExclude,
+             SearchContext context,
+             Aggregator parent,
+             SubAggCollectionMode subAggCollectMode,
+             boolean showTermDocCountError,
+             CardinalityUpperBound cardinality,
+             Map<String, Object> metadata
+         ) throws IOException;
+ 
+         @Override
+         public String toString() {
+             return parseField.getPreferredName();
+         }
+     }
+ 
+     @Override
+     protected boolean supportsConcurrentSegmentSearch() {
+         return true;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-34.html b/htmlReport/ns-1/sources/source-34.html new file mode 100644 index 0000000000000..5ff2369f2216d --- /dev/null +++ b/htmlReport/ns-1/sources/source-34.html @@ -0,0 +1,273 @@ + + + + + + + + Coverage Report > UnmappedRareTerms + + + + + + +
+ + +

Coverage Summary for Class: UnmappedRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
UnmappedRareTerms + + 0% + + + (0/15) + + + + 0% + + + (0/15) + +
UnmappedRareTerms$Bucket + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
Total + + 0% + + + (0/16) + + + + 0% + + + (0/16) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.util.Collections;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import static java.util.Collections.emptyList;
+ 
+ /**
+  * Result of the RareTerms aggregation when the field is unmapped.
+  *
+  * @opensearch.internal
+  */
+ public class UnmappedRareTerms extends InternalRareTerms<UnmappedRareTerms, UnmappedRareTerms.Bucket> {
+     public static final String NAME = "umrareterms";
+ 
+     /**
+      * Bucket for unmapped rare values
+      *
+      * @opensearch.internal
+      */
+     protected abstract static class Bucket extends InternalRareTerms.Bucket<Bucket> {
+         private Bucket(long docCount, InternalAggregations aggregations, DocValueFormat formatter) {
+             super(docCount, aggregations, formatter);
+         }
+     }
+ 
+     UnmappedRareTerms(String name, Map<String, Object> metadata) {
+         super(name, LongRareTermsAggregator.ORDER, 0, metadata);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public UnmappedRareTerms(StreamInput in) throws IOException {
+         super(in);
+     }
+ 
+     @Override
+     protected void writeTermTypeInfoTo(StreamOutput out) throws IOException {
+         // Nothing to write
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public String getType() {
+         return StringTerms.NAME;
+     }
+ 
+     @Override
+     public UnmappedRareTerms create(List<UnmappedRareTerms.Bucket> buckets) {
+         return new UnmappedRareTerms(name, metadata);
+     }
+ 
+     @Override
+     public UnmappedRareTerms.Bucket createBucket(InternalAggregations aggregations, UnmappedRareTerms.Bucket prototype) {
+         throw new UnsupportedOperationException("not supported for UnmappedRareTerms");
+     }
+ 
+     @Override
+     UnmappedRareTerms.Bucket createBucket(long docCount, InternalAggregations aggs, Bucket prototype) {
+         throw new UnsupportedOperationException("not supported for UnmappedRareTerms");
+     }
+ 
+     @Override
+     protected UnmappedRareTerms createWithFilter(String name, List<UnmappedRareTerms.Bucket> buckets, SetBackedScalingCuckooFilter filter) {
+         throw new UnsupportedOperationException("not supported for UnmappedRareTerms");
+     }
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         return new UnmappedRareTerms(name, metadata);
+     }
+ 
+     @Override
+     public boolean isMapped() {
+         return false;
+     }
+ 
+     @Override
+     public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         return doXContentCommon(builder, params, Collections.emptyList());
+     }
+ 
+     @Override
+     public List<UnmappedRareTerms.Bucket> getBuckets() {
+         return emptyList();
+     }
+ 
+     @Override
+     public UnmappedRareTerms.Bucket getBucketByKey(String term) {
+         return null;
+     }
+ 
+     @Override
+     protected UnmappedRareTerms.Bucket[] createBucketsArray(int size) {
+         return new UnmappedRareTerms.Bucket[size];
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-35.html b/htmlReport/ns-1/sources/source-35.html new file mode 100644 index 0000000000000..021cf81facc2c --- /dev/null +++ b/htmlReport/ns-1/sources/source-35.html @@ -0,0 +1,317 @@ + + + + + + + + Coverage Report > UnmappedSignificantTerms + + + + + + +
+ + +

Coverage Summary for Class: UnmappedSignificantTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
UnmappedSignificantTerms + + 0% + + + (0/19) + + + + 0% + + + (0/20) + +
UnmappedSignificantTerms$Bucket + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
Total + + 0% + + + (0/20) + + + + 0% + + + (0/21) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ 
+ import java.io.IOException;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import static java.util.Collections.emptyIterator;
+ import static java.util.Collections.emptyList;
+ 
+ /**
+  * Result of the running the significant terms aggregation on an unmapped field.
+  *
+  * @opensearch.internal
+  */
+ public class UnmappedSignificantTerms extends InternalSignificantTerms<UnmappedSignificantTerms, UnmappedSignificantTerms.Bucket> {
+ 
+     public static final String NAME = "umsigterms";
+ 
+     /**
+      * Concrete type that can't be built because Java needs a concrete type so {@link InternalTerms.Bucket} can have a self type but
+      * {@linkplain UnmappedTerms} doesn't ever need to build it because it never returns any buckets.
+      *
+      * @opensearch.internal
+      */
+     protected abstract static class Bucket extends InternalSignificantTerms.Bucket<Bucket> {
+         private Bucket(
+             BytesRef term,
+             long subsetDf,
+             long subsetSize,
+             long supersetDf,
+             long supersetSize,
+             InternalAggregations aggregations,
+             DocValueFormat format
+         ) {
+             super(subsetDf, subsetSize, supersetDf, supersetSize, aggregations, format);
+         }
+     }
+ 
+     public UnmappedSignificantTerms(
+         String name,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         Map<String, Object> metadata
+     ) {
+         super(name, bucketCountThresholds, metadata);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public UnmappedSignificantTerms(StreamInput in) throws IOException {
+         super(in);
+     }
+ 
+     @Override
+     protected void writeTermTypeInfoTo(StreamOutput out) throws IOException {
+         // Nothing to write
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public String getType() {
+         return SignificantStringTerms.NAME;
+     }
+ 
+     @Override
+     public UnmappedSignificantTerms create(List<Bucket> buckets) {
+         return new UnmappedSignificantTerms(name, bucketCountThresholds, metadata);
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+         throw new UnsupportedOperationException("not supported for UnmappedSignificantTerms");
+     }
+ 
+     @Override
+     protected UnmappedSignificantTerms create(long subsetSize, long supersetSize, List<Bucket> buckets) {
+         throw new UnsupportedOperationException("not supported for UnmappedSignificantTerms");
+     }
+ 
+     @Override
+     Bucket createBucket(
+         long subsetDf,
+         long subsetSize,
+         long supersetDf,
+         long supersetSize,
+         InternalAggregations aggregations,
+         Bucket prototype
+     ) {
+         throw new UnsupportedOperationException("not supported for UnmappedSignificantTerms");
+     }
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         return new UnmappedSignificantTerms(name, bucketCountThresholds, metadata);
+     }
+ 
+     @Override
+     public boolean isMapped() {
+         return false;
+     }
+ 
+     @Override
+     public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         builder.startArray(CommonFields.BUCKETS.getPreferredName()).endArray();
+         return builder;
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ 
+     @Override
+     public Iterator<SignificantTerms.Bucket> iterator() {
+         return emptyIterator();
+     }
+ 
+     @Override
+     public List<Bucket> getBuckets() {
+         return emptyList();
+     }
+ 
+     @Override
+     public SignificantTerms.Bucket getBucketByKey(String term) {
+         return null;
+     }
+ 
+     @Override
+     protected SignificanceHeuristic getSignificanceHeuristic() {
+         throw new UnsupportedOperationException();
+     }
+ 
+     @Override
+     protected long getSubsetSize() {
+         return 0;
+     }
+ 
+     @Override
+     protected long getSupersetSize() {
+         return 0;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-36.html b/htmlReport/ns-1/sources/source-36.html new file mode 100644 index 0000000000000..79edf90a68244 --- /dev/null +++ b/htmlReport/ns-1/sources/source-36.html @@ -0,0 +1,303 @@ + + + + + + + + Coverage Report > UnmappedTerms + + + + + + +
+ + +

Coverage Summary for Class: UnmappedTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
UnmappedTerms + + 0% + + + (0/19) + + + + 0% + + + (0/19) + +
UnmappedTerms$Bucket + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
Total + + 0% + + + (0/20) + + + + 0% + + + (0/20) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.util.Collections;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import static java.util.Collections.emptyList;
+ 
+ /**
+  * Result of the {@link TermsAggregator} when the field is unmapped.
+  *
+  * @opensearch.internal
+  */
+ public class UnmappedTerms extends InternalTerms<UnmappedTerms, UnmappedTerms.Bucket> {
+     public static final String NAME = "umterms";
+ 
+     /**
+      * Concrete type that can't be built because Java needs a concrete type so {@link InternalTerms.Bucket} can have a self type but
+      * {@linkplain UnmappedTerms} doesn't ever need to build it because it never returns any buckets.
+      *
+      * @opensearch.internal
+      */
+     protected abstract static class Bucket extends InternalTerms.Bucket<Bucket> {
+         private Bucket(
+             long docCount,
+             InternalAggregations aggregations,
+             boolean showDocCountError,
+             long docCountError,
+             DocValueFormat formatter
+         ) {
+             super(docCount, aggregations, showDocCountError, docCountError, formatter);
+         }
+     }
+ 
+     public UnmappedTerms(
+         String name,
+         BucketOrder order,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         Map<String, Object> metadata
+     ) {
+         super(name, order, order, bucketCountThresholds, metadata);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public UnmappedTerms(StreamInput in) throws IOException {
+         super(in);
+     }
+ 
+     @Override
+     protected void writeTermTypeInfoTo(StreamOutput out) throws IOException {
+         // Nothing to write
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public String getType() {
+         return StringTerms.NAME;
+     }
+ 
+     @Override
+     public UnmappedTerms create(List<Bucket> buckets) {
+         return new UnmappedTerms(name, order, bucketCountThresholds, metadata);
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+         throw new UnsupportedOperationException("not supported for UnmappedTerms");
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, Bucket prototype) {
+         throw new UnsupportedOperationException("not supported for UnmappedTerms");
+     }
+ 
+     @Override
+     protected UnmappedTerms create(String name, List<Bucket> buckets, BucketOrder reduceOrder, long docCountError, long otherDocCount) {
+         throw new UnsupportedOperationException("not supported for UnmappedTerms");
+     }
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         return new UnmappedTerms(name, order, bucketCountThresholds, metadata);
+     }
+ 
+     @Override
+     public boolean isMapped() {
+         return false;
+     }
+ 
+     @Override
+     public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         return doXContentCommon(builder, params, 0, 0, Collections.emptyList());
+     }
+ 
+     @Override
+     protected void setDocCountError(long docCountError) {}
+ 
+     @Override
+     protected int getShardSize() {
+         return 0;
+     }
+ 
+     @Override
+     public long getDocCountError() {
+         return 0;
+     }
+ 
+     @Override
+     public long getSumOfOtherDocCounts() {
+         return 0;
+     }
+ 
+     @Override
+     public List<Bucket> getBuckets() {
+         return emptyList();
+     }
+ 
+     @Override
+     public Bucket getBucketByKey(String term) {
+         return null;
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-37.html b/htmlReport/ns-1/sources/source-37.html new file mode 100644 index 0000000000000..63c7a4029ff20 --- /dev/null +++ b/htmlReport/ns-1/sources/source-37.html @@ -0,0 +1,411 @@ + + + + + + + + Coverage Report > UnsignedLongTerms + + + + + + +
+ + +

Coverage Summary for Class: UnsignedLongTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
UnsignedLongTerms + + 0% + + + (0/10) + + + + 0% + + + (0/44) + +
UnsignedLongTerms$Bucket + + 0% + + + (0/10) + + + + 0% + + + (0/21) + +
Total + + 0% + + + (0/20) + + + + 0% + + + (0/65) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.math.BigInteger;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the {@link TermsAggregator} when the field is some kind of whole number like a integer,
+  * long, unsigned long or a date.
+  *
+  * @opensearch.internal
+  */
+ public class UnsignedLongTerms extends InternalMappedTerms<UnsignedLongTerms, UnsignedLongTerms.Bucket> {
+     public static final String NAME = "ulterms";
+ 
+     /**
+      * Bucket for long terms
+      *
+      * @opensearch.internal
+      */
+     public static class Bucket extends InternalTerms.Bucket<Bucket> {
+         BigInteger term;
+ 
+         public Bucket(
+             BigInteger term,
+             long docCount,
+             InternalAggregations aggregations,
+             boolean showDocCountError,
+             long docCountError,
+             DocValueFormat format
+         ) {
+             super(docCount, aggregations, showDocCountError, docCountError, format);
+             this.term = term;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         public Bucket(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException {
+             super(in, format, showDocCountError);
+             term = in.readBigInteger();
+         }
+ 
+         @Override
+         protected void writeTermTo(StreamOutput out) throws IOException {
+             out.writeBigInteger(term);
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(term).toString();
+         }
+ 
+         @Override
+         public Object getKey() {
+             if (format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                 return format.format(term);
+             } else {
+                 return term;
+             }
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             if (format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                 return (Number) format.format(term);
+             } else {
+                 return term;
+             }
+         }
+ 
+         @Override
+         public int compareKey(Bucket other) {
+             return term.compareTo(other.term);
+         }
+ 
+         @Override
+         protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             if (format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                 builder.field(CommonFields.KEY.getPreferredName(), format.format(term));
+             } else {
+                 builder.field(CommonFields.KEY.getPreferredName(), term);
+             }
+             if (format != DocValueFormat.RAW && format != DocValueFormat.UNSIGNED_LONG_SHIFTED && format != DocValueFormat.UNSIGNED_LONG) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString());
+             }
+             return builder;
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             return super.equals(obj) && Objects.equals(term, ((Bucket) obj).term);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), term);
+         }
+     }
+ 
+     public UnsignedLongTerms(
+         String name,
+         BucketOrder reduceOrder,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         int shardSize,
+         boolean showTermDocCountError,
+         long otherDocCount,
+         List<Bucket> buckets,
+         long docCountError,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public UnsignedLongTerms(StreamInput in) throws IOException {
+         super(in, Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public UnsignedLongTerms create(List<Bucket> buckets) {
+         return new UnsignedLongTerms(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+         return new Bucket(
+             prototype.term,
+             prototype.docCount,
+             aggregations,
+             prototype.showDocCountError,
+             prototype.docCountError,
+             prototype.format
+         );
+     }
+ 
+     @Override
+     protected UnsignedLongTerms create(String name, List<Bucket> buckets, BucketOrder reduceOrder, long docCountError, long otherDocCount) {
+         return new UnsignedLongTerms(
+             name,
+             reduceOrder,
+             order,
+             getMetadata(),
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         boolean unsignedLongFormat = false;
+         boolean rawFormat = false;
+         for (InternalAggregation agg : aggregations) {
+             if (agg instanceof DoubleTerms) {
+                 return agg.reduce(aggregations, reduceContext);
+             }
+             if (agg instanceof UnsignedLongTerms) {
+                 if (((UnsignedLongTerms) agg).format == DocValueFormat.RAW) {
+                     rawFormat = true;
+                 } else if (((UnsignedLongTerms) agg).format == DocValueFormat.UNSIGNED_LONG_SHIFTED) {
+                     unsignedLongFormat = true;
+                 } else if (((UnsignedLongTerms) agg).format == DocValueFormat.UNSIGNED_LONG) {
+                     unsignedLongFormat = true;
+                 }
+             }
+         }
+         if (rawFormat && unsignedLongFormat) { // if we have mixed formats, convert results to double format
+             List<InternalAggregation> newAggs = new ArrayList<>(aggregations.size());
+             for (InternalAggregation agg : aggregations) {
+                 if (agg instanceof UnsignedLongTerms) {
+                     DoubleTerms dTerms = UnsignedLongTerms.convertUnsignedLongTermsToDouble((UnsignedLongTerms) agg, format);
+                     newAggs.add(dTerms);
+                 } else {
+                     newAggs.add(agg);
+                 }
+             }
+             return newAggs.get(0).reduce(newAggs, reduceContext);
+         }
+         return super.reduce(aggregations, reduceContext);
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, UnsignedLongTerms.Bucket prototype) {
+         return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format);
+     }
+ 
+     /**
+      * Converts a {@link UnsignedLongTerms} into a {@link DoubleTerms}, returning the value of the specified long terms as doubles.
+      */
+     static DoubleTerms convertUnsignedLongTermsToDouble(UnsignedLongTerms unsignedLongTerms, DocValueFormat decimalFormat) {
+         List<UnsignedLongTerms.Bucket> buckets = unsignedLongTerms.getBuckets();
+         List<DoubleTerms.Bucket> newBuckets = new ArrayList<>();
+         for (Terms.Bucket bucket : buckets) {
+             newBuckets.add(
+                 new DoubleTerms.Bucket(
+                     bucket.getKeyAsNumber().doubleValue(),
+                     bucket.getDocCount(),
+                     (InternalAggregations) bucket.getAggregations(),
+                     unsignedLongTerms.showTermDocCountError,
+                     unsignedLongTerms.showTermDocCountError ? bucket.getDocCountError() : 0,
+                     decimalFormat
+                 )
+             );
+         }
+         return new DoubleTerms(
+             unsignedLongTerms.getName(),
+             unsignedLongTerms.reduceOrder,
+             unsignedLongTerms.order,
+             unsignedLongTerms.metadata,
+             unsignedLongTerms.format,
+             unsignedLongTerms.shardSize,
+             unsignedLongTerms.showTermDocCountError,
+             unsignedLongTerms.otherDocCount,
+             newBuckets,
+             unsignedLongTerms.docCountError,
+             unsignedLongTerms.bucketCountThresholds
+         );
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-38.html b/htmlReport/ns-1/sources/source-38.html new file mode 100644 index 0000000000000..12df7e8e6c4aa --- /dev/null +++ b/htmlReport/ns-1/sources/source-38.html @@ -0,0 +1,131 @@ + + + + + + + + Coverage Report > RareTermsAggregatorSupplier + + + + + + +
+ + +

Coverage Summary for Class: RareTermsAggregatorSupplier (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + +
Class
RareTermsAggregatorSupplier
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Map;
+ 
+ /**
+  * Aggregator supplier interface for rare_terms agg
+  *
+  * @opensearch.internal
+  */
+ interface RareTermsAggregatorSupplier {
+     Aggregator build(
+         String name,
+         AggregatorFactories factories,
+         ValuesSource valuesSource,
+         DocValueFormat format,
+         int maxDocCount,
+         double precision,
+         IncludeExclude includeExclude,
+         SearchContext context,
+         Aggregator parent,
+         CardinalityUpperBound carinality,
+         Map<String, Object> metadata
+     ) throws IOException;
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-39.html b/htmlReport/ns-1/sources/source-39.html new file mode 100644 index 0000000000000..fef131e878d52 --- /dev/null +++ b/htmlReport/ns-1/sources/source-39.html @@ -0,0 +1,166 @@ + + + + + + + + Coverage Report > SignificantTerms + + + + + + +
+ + +

Coverage Summary for Class: SignificantTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + +
Class
SignificantTerms$Bucket
Total
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation;
+ 
+ import java.util.List;
+ 
+ /**
+  * An aggregation that collects significant terms in comparison to a background set.
+  *
+  * @opensearch.internal
+  */
+ public interface SignificantTerms extends MultiBucketsAggregation, Iterable<SignificantTerms.Bucket> {
+ 
+     /**
+      * Bucket for significant terms
+      *
+      * @opensearch.internal
+      */
+     interface Bucket extends MultiBucketsAggregation.Bucket {
+ 
+         /**
+          * @return The significant score for the subset
+          */
+         double getSignificanceScore();
+ 
+         /**
+          * @return The number of docs in the subset containing a particular term.
+          * This number is equal to the document count of the bucket.
+          */
+         long getSubsetDf();
+ 
+         /**
+          * @return The numbers of docs in the subset (also known as "foreground set").
+          * This number is equal to the document count of the containing aggregation.
+          */
+         long getSubsetSize();
+ 
+         /**
+          * @return The number of docs in the superset containing a particular term (also
+          * known as the "background count" of the bucket)
+          */
+         long getSupersetDf();
+ 
+         /**
+          * @return The numbers of docs in the superset (ordinarily the background count
+          * of the containing aggregation).
+          */
+         long getSupersetSize();
+ 
+         /**
+          * @return The key, expressed as a number
+          */
+         Number getKeyAsNumber();
+     }
+ 
+     @Override
+     List<? extends Bucket> getBuckets();
+ 
+     /**
+      * Get the bucket for the given term, or null if there is no such bucket.
+      */
+     Bucket getBucketByKey(String term);
+ 
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-3a.html b/htmlReport/ns-1/sources/source-3a.html new file mode 100644 index 0000000000000..7ee6601e8277d --- /dev/null +++ b/htmlReport/ns-1/sources/source-3a.html @@ -0,0 +1,135 @@ + + + + + + + + Coverage Report > TermsAggregatorSupplier + + + + + + +
+ + +

Coverage Summary for Class: TermsAggregatorSupplier (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + +
Class
TermsAggregatorSupplier
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Map;
+ 
+ /**
+  * Aggregator supplier interface for terms agg
+  *
+  * @opensearch.internal
+  */
+ interface TermsAggregatorSupplier {
+     Aggregator build(
+         String name,
+         AggregatorFactories factories,
+         ValuesSource valuesSource,
+         BucketOrder order,
+         DocValueFormat format,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         IncludeExclude includeExclude,
+         String executionHint,
+         SearchContext context,
+         Aggregator parent,
+         Aggregator.SubAggCollectionMode subAggCollectMode,
+         boolean showTermDocCountError,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException;
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-3b.html b/htmlReport/ns-1/sources/source-3b.html new file mode 100644 index 0000000000000..a8f01e424856d --- /dev/null +++ b/htmlReport/ns-1/sources/source-3b.html @@ -0,0 +1,150 @@ + + + + + + + + Coverage Report > Terms + + + + + + +
+ + +

Coverage Summary for Class: Terms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + +
Class
Terms$Bucket
Total
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation;
+ 
+ import java.util.List;
+ 
+ /**
+  * A {@code terms} aggregation. Defines multiple bucket, each associated with a unique term for a specific field.
+  * All documents in a bucket has the bucket's term in that field.
+  *
+  * @opensearch.internal
+  */
+ public interface Terms extends MultiBucketsAggregation {
+ 
+     /**
+      * A bucket that is associated with a single term
+      *
+      * @opensearch.internal
+      */
+     interface Bucket extends MultiBucketsAggregation.Bucket {
+ 
+         Number getKeyAsNumber();
+ 
+         long getDocCountError();
+     }
+ 
+     /**
+      * Return the sorted list of the buckets in this terms aggregation.
+      */
+     @Override
+     List<? extends Bucket> getBuckets();
+ 
+     /**
+      * Get the bucket for the given term, or null if there is no such bucket.
+      */
+     Bucket getBucketByKey(String term);
+ 
+     /**
+      * Get an upper bound of the error on document counts in this aggregation.
+      */
+     long getDocCountError();
+ 
+     /**
+      * Return the sum of the document counts of all buckets that did not make
+      * it to the top buckets.
+      */
+     long getSumOfOtherDocCounts();
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-3c.html b/htmlReport/ns-1/sources/source-3c.html new file mode 100644 index 0000000000000..56ab735c4d300 --- /dev/null +++ b/htmlReport/ns-1/sources/source-3c.html @@ -0,0 +1,137 @@ + + + + + + + + Coverage Report > RareTerms + + + + + + +
+ + +

Coverage Summary for Class: RareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + +
Class
RareTerms$Bucket
Total
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation;
+ 
+ import java.util.List;
+ 
+ /**
+  * Rare terms interface
+  *
+  * @opensearch.internal
+  */
+ public interface RareTerms extends MultiBucketsAggregation {
+ 
+     /**
+      * A bucket that is associated with a single term
+      *
+      * @opensearch.internal
+      */
+     interface Bucket extends MultiBucketsAggregation.Bucket {
+ 
+         Number getKeyAsNumber();
+     }
+ 
+     /**
+      * Return the sorted list of the buckets in this terms aggregation.
+      */
+     @Override
+     List<? extends Bucket> getBuckets();
+ 
+     /**
+      * Get the bucket for the given term, or null if there is no such bucket.
+      */
+     Bucket getBucketByKey(String term);
+ 
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-3d.html b/htmlReport/ns-1/sources/source-3d.html new file mode 100644 index 0000000000000..2ada00b8d01fd --- /dev/null +++ b/htmlReport/ns-1/sources/source-3d.html @@ -0,0 +1,134 @@ + + + + + + + + Coverage Report > SignificantTermsAggregatorSupplier + + + + + + +
+ + +

Coverage Summary for Class: SignificantTermsAggregatorSupplier (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + +
Class
SignificantTermsAggregatorSupplier
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Map;
+ 
+ /**
+  * Aggregator supplier interface for significant_terms agg
+  *
+  * @opensearch.internal
+  */
+ interface SignificantTermsAggregatorSupplier {
+     Aggregator build(
+         String name,
+         AggregatorFactories factories,
+         ValuesSource valuesSource,
+         DocValueFormat format,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         IncludeExclude includeExclude,
+         String executionHint,
+         SearchContext context,
+         Aggregator parent,
+         SignificanceHeuristic significanceHeuristic,
+         SignificanceLookup lookup,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException;
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-4.html b/htmlReport/ns-1/sources/source-4.html new file mode 100644 index 0000000000000..8d8db5fb92ab2 --- /dev/null +++ b/htmlReport/ns-1/sources/source-4.html @@ -0,0 +1,156 @@ + + + + + + + + Coverage Report > BucketSignificancePriorityQueue + + + + + + +
+ + +

Coverage Summary for Class: BucketSignificancePriorityQueue (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
BucketSignificancePriorityQueue + + 0% + + + (0/1) + + + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.PriorityQueue;
+ 
+ /**
+  * Priority queue for computing bucket significance
+  *
+  * @opensearch.internal
+  */
+ public class BucketSignificancePriorityQueue<B extends SignificantTerms.Bucket> extends PriorityQueue<B> {
+ 
+     public BucketSignificancePriorityQueue(int size) {
+         super(size);
+     }
+ 
+     @Override
+     protected boolean lessThan(SignificantTerms.Bucket o1, SignificantTerms.Bucket o2) {
+         return o1.getSignificanceScore() < o2.getSignificanceScore();
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-5.html b/htmlReport/ns-1/sources/source-5.html new file mode 100644 index 0000000000000..feda12e8725ff --- /dev/null +++ b/htmlReport/ns-1/sources/source-5.html @@ -0,0 +1,467 @@ + + + + + + + + Coverage Report > BytesKeyedBucketOrds + + + + + + +
+ + +

Coverage Summary for Class: BytesKeyedBucketOrds (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
BytesKeyedBucketOrds + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
BytesKeyedBucketOrds$BucketOrdsEnum + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
BytesKeyedBucketOrds$BucketOrdsEnum$1 + + 0% + + + (0/4) + + + + 0% + + + (0/4) + +
BytesKeyedBucketOrds$FromMany + + 0% + + + (0/6) + + + + 0% + + + (0/12) + +
BytesKeyedBucketOrds$FromMany$1 + + 0% + + + (0/4) + + + + 0% + + + (0/4) + +
BytesKeyedBucketOrds$FromSingle + + 0% + + + (0/7) + + + + 0% + + + (0/9) + +
BytesKeyedBucketOrds$FromSingle$1 + + 0% + + + (0/4) + + + + 0% + + + (0/6) + +
Total + + 0% + + + (0/28) + + + + 0% + + + (0/38) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.common.lease.Releasable;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.util.BigArrays;
+ import org.opensearch.common.util.BytesRefHash;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ 
+ /**
+  * Maps {@link BytesRef} bucket keys to bucket ordinals.
+  *
+  * @opensearch.internal
+  */
+ public abstract class BytesKeyedBucketOrds implements Releasable {
+     /**
+      * Build a {@link LongKeyedBucketOrds}.
+      */
+     public static BytesKeyedBucketOrds build(BigArrays bigArrays, CardinalityUpperBound cardinality) {
+         return cardinality.map(estimate -> estimate < 2 ? new FromSingle(bigArrays) : new FromMany(bigArrays));
+     }
+ 
+     private BytesKeyedBucketOrds() {}
+ 
+     /**
+      * Add the {@code owningBucketOrd, value} pair. Return the ord for
+      * their bucket if they have yet to be added, or {@code -1-ord}
+      * if they were already present.
+      */
+     public abstract long add(long owningBucketOrd, BytesRef value);
+ 
+     /**
+      * Count the buckets in {@code owningBucketOrd}.
+      */
+     public abstract long bucketsInOrd(long owningBucketOrd);
+ 
+     /**
+      * The number of collected buckets.
+      */
+     public abstract long size();
+ 
+     /**
+      * Build an iterator for buckets inside {@code owningBucketOrd} in order
+      * of increasing ord.
+      * <p>
+      * When this is first returns it is "unpositioned" and you must call
+      * {@link BucketOrdsEnum#next()} to move it to the first value.
+      */
+     public abstract BucketOrdsEnum ordsEnum(long owningBucketOrd);
+ 
+     /**
+      * An iterator for buckets inside a particular {@code owningBucketOrd}.
+      *
+      * @opensearch.internal
+      */
+     public interface BucketOrdsEnum {
+         /**
+          * Advance to the next value.
+          * @return {@code true} if there *is* a next value,
+          *         {@code false} if there isn't
+          */
+         boolean next();
+ 
+         /**
+          * The ordinal of the current value.
+          */
+         long ord();
+ 
+         /**
+          * Read the current value.
+          */
+         void readValue(BytesRef dest);
+ 
+         /**
+          * An {@linkplain BucketOrdsEnum} that is empty.
+          */
+         BucketOrdsEnum EMPTY = new BucketOrdsEnum() {
+             @Override
+             public boolean next() {
+                 return false;
+             }
+ 
+             @Override
+             public long ord() {
+                 return 0;
+             }
+ 
+             @Override
+             public void readValue(BytesRef dest) {}
+         };
+     }
+ 
+     /**
+      * Implementation that only works if it is collecting from a single bucket.
+      *
+      * @opensearch.internal
+      */
+     private static class FromSingle extends BytesKeyedBucketOrds {
+         private final BytesRefHash ords;
+ 
+         private FromSingle(BigArrays bigArrays) {
+             ords = new BytesRefHash(bigArrays);
+         }
+ 
+         @Override
+         public long add(long owningBucketOrd, BytesRef value) {
+             assert owningBucketOrd == 0;
+             return ords.add(value);
+         }
+ 
+         @Override
+         public long bucketsInOrd(long owningBucketOrd) {
+             return ords.size();
+         }
+ 
+         @Override
+         public long size() {
+             return ords.size();
+         }
+ 
+         @Override
+         public BucketOrdsEnum ordsEnum(long owningBucketOrd) {
+             return new BucketOrdsEnum() {
+                 private int ord = -1;
+ 
+                 @Override
+                 public boolean next() {
+                     ord++;
+                     return ord < ords.size();
+                 }
+ 
+                 @Override
+                 public long ord() {
+                     return ord;
+                 }
+ 
+                 @Override
+                 public void readValue(BytesRef dest) {
+                     ords.get(ord, dest);
+                 }
+             };
+         }
+ 
+         @Override
+         public void close() {
+             ords.close();
+         }
+     }
+ 
+     /**
+      * Implementation that works properly when collecting from many buckets.
+      *
+      * @opensearch.internal
+      */
+     private static class FromMany extends BytesKeyedBucketOrds {
+         // TODO we can almost certainly do better here by building something fit for purpose rather than trying to lego together stuff
+         private final BytesRefHash bytesToLong;
+         private final LongKeyedBucketOrds longToBucketOrds;
+ 
+         private FromMany(BigArrays bigArrays) {
+             bytesToLong = new BytesRefHash(bigArrays);
+             longToBucketOrds = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY);
+         }
+ 
+         @Override
+         public long add(long owningBucketOrd, BytesRef value) {
+             long l = bytesToLong.add(value);
+             if (l < 0) {
+                 l = -1 - l;
+             }
+             return longToBucketOrds.add(owningBucketOrd, l);
+         }
+ 
+         @Override
+         public long bucketsInOrd(long owningBucketOrd) {
+             return longToBucketOrds.bucketsInOrd(owningBucketOrd);
+         }
+ 
+         @Override
+         public long size() {
+             return longToBucketOrds.size();
+         }
+ 
+         @Override
+         public BucketOrdsEnum ordsEnum(long owningBucketOrd) {
+             LongKeyedBucketOrds.BucketOrdsEnum delegate = longToBucketOrds.ordsEnum(owningBucketOrd);
+             return new BucketOrdsEnum() {
+                 @Override
+                 public boolean next() {
+                     return delegate.next();
+                 }
+ 
+                 @Override
+                 public long ord() {
+                     return delegate.ord();
+                 }
+ 
+                 @Override
+                 public void readValue(BytesRef dest) {
+                     bytesToLong.get(delegate.value(), dest);
+                 }
+             };
+         }
+ 
+         @Override
+         public void close() {
+             Releasables.close(bytesToLong, longToBucketOrds);
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-6.html b/htmlReport/ns-1/sources/source-6.html new file mode 100644 index 0000000000000..7534d4c5305e6 --- /dev/null +++ b/htmlReport/ns-1/sources/source-6.html @@ -0,0 +1,396 @@ + + + + + + + + Coverage Report > DoubleTerms + + + + + + +
+ + +

Coverage Summary for Class: DoubleTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
DoubleTerms + + 0% + + + (0/9) + + + + 0% + + + (0/32) + +
DoubleTerms$Bucket + + 0% + + + (0/10) + + + + 0% + + + (0/15) + +
Total + + 0% + + + (0/19) + + + + 0% + + + (0/47) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the {@link TermsAggregator} when the field is some kind of decimal number like a float, double, or distance.
+  *
+  * @opensearch.internal
+  */
+ public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bucket> {
+     public static final String NAME = "dterms";
+ 
+     /**
+      * Bucket for a double terms agg
+      *
+      * @opensearch.internal
+      */
+     static class Bucket extends InternalTerms.Bucket<Bucket> {
+         double term;
+ 
+         Bucket(
+             double term,
+             long docCount,
+             InternalAggregations aggregations,
+             boolean showDocCountError,
+             long docCountError,
+             DocValueFormat format
+         ) {
+             super(docCount, aggregations, showDocCountError, docCountError, format);
+             this.term = term;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         Bucket(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException {
+             super(in, format, showDocCountError);
+             term = in.readDouble();
+         }
+ 
+         @Override
+         protected void writeTermTo(StreamOutput out) throws IOException {
+             out.writeDouble(term);
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return format.format(term).toString();
+         }
+ 
+         @Override
+         public Object getKey() {
+             return term;
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             return term;
+         }
+ 
+         @Override
+         public int compareKey(Bucket other) {
+             return Double.compare(term, other.term);
+         }
+ 
+         @Override
+         protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             builder.field(CommonFields.KEY.getPreferredName(), term);
+             if (format != DocValueFormat.RAW) {
+                 builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString());
+             }
+             return builder;
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             return super.equals(obj) && Objects.equals(term, ((Bucket) obj).term);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(super.hashCode(), term);
+         }
+     }
+ 
+     public DoubleTerms(
+         String name,
+         BucketOrder reduceOrder,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         int shardSize,
+         boolean showTermDocCountError,
+         long otherDocCount,
+         List<Bucket> buckets,
+         long docCountError,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public DoubleTerms(StreamInput in) throws IOException {
+         super(in, Bucket::new);
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public DoubleTerms create(List<Bucket> buckets) {
+         return new DoubleTerms(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+         return new Bucket(
+             prototype.term,
+             prototype.docCount,
+             aggregations,
+             prototype.showDocCountError,
+             prototype.docCountError,
+             prototype.format
+         );
+     }
+ 
+     @Override
+     protected DoubleTerms create(String name, List<Bucket> buckets, BucketOrder reduceOrder, long docCountError, long otherDocCount) {
+         return new DoubleTerms(
+             name,
+             reduceOrder,
+             order,
+             getMetadata(),
+             format,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             buckets,
+             docCountError,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         boolean promoteToDouble = false;
+         for (InternalAggregation agg : aggregations) {
+             if (agg instanceof LongTerms
+                 && (((LongTerms) agg).format == DocValueFormat.RAW || ((LongTerms) agg).format == DocValueFormat.UNSIGNED_LONG_SHIFTED)) {
+                 /*
+                  * this terms agg mixes longs and doubles, we must promote longs to doubles to make the internal aggs
+                  * compatible
+                  */
+                 promoteToDouble = true;
+                 break;
+             } else if (agg instanceof UnsignedLongTerms
+                 && (((UnsignedLongTerms) agg).format == DocValueFormat.RAW
+                     || ((UnsignedLongTerms) agg).format == DocValueFormat.UNSIGNED_LONG_SHIFTED
+                     || ((UnsignedLongTerms) agg).format == DocValueFormat.UNSIGNED_LONG)) {
+                         /*
+                          * this terms agg mixes unsigned longs and doubles, we must promote unsigned longs to doubles to make the internal aggs
+                          * compatible
+                          */
+                         promoteToDouble = true;
+                         break;
+                     }
+         }
+         if (promoteToDouble == false) {
+             return super.reduce(aggregations, reduceContext);
+         }
+         List<InternalAggregation> newAggs = new ArrayList<>(aggregations.size());
+         for (InternalAggregation agg : aggregations) {
+             if (agg instanceof LongTerms) {
+                 DoubleTerms dTerms = LongTerms.convertLongTermsToDouble((LongTerms) agg, format);
+                 newAggs.add(dTerms);
+             } else if (agg instanceof UnsignedLongTerms) {
+                 DoubleTerms dTerms = UnsignedLongTerms.convertUnsignedLongTermsToDouble((UnsignedLongTerms) agg, format);
+                 newAggs.add(dTerms);
+             } else {
+                 newAggs.add(agg);
+             }
+         }
+         return newAggs.get(0).reduce(newAggs, reduceContext);
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, DoubleTerms.Bucket prototype) {
+         return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-7.html b/htmlReport/ns-1/sources/source-7.html new file mode 100644 index 0000000000000..812ea279b4299 --- /dev/null +++ b/htmlReport/ns-1/sources/source-7.html @@ -0,0 +1,1507 @@ + + + + + + + + Coverage Report > GlobalOrdinalsStringTermsAggregator + + + + + + +
+ + +

Coverage Summary for Class: GlobalOrdinalsStringTermsAggregator (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
GlobalOrdinalsStringTermsAggregator + + 76.9% + + + (10/13) + + + + 72.5% + + + (58/80) + +
GlobalOrdinalsStringTermsAggregator$1 + + 0% + + + (0/2) + + + + 0% + + + (0/4) + +
GlobalOrdinalsStringTermsAggregator$2 + + 100% + + + (2/2) + + + + 100% + + + (5/5) + +
GlobalOrdinalsStringTermsAggregator$3 + + 0% + + + (0/2) + + + + 0% + + + (0/4) + +
GlobalOrdinalsStringTermsAggregator$4 + + 100% + + + (2/2) + + + + 100% + + + (6/6) + +
GlobalOrdinalsStringTermsAggregator$BucketInfoConsumer
GlobalOrdinalsStringTermsAggregator$BucketUpdater
GlobalOrdinalsStringTermsAggregator$CollectionStrategy + + 100% + + + (1/1) + + + + 100% + + + (1/1) + +
GlobalOrdinalsStringTermsAggregator$DenseGlobalOrds + + 62.5% + + + (5/8) + + + + 75% + + + (12/16) + +
GlobalOrdinalsStringTermsAggregator$GlobalOrdLookupFunction
GlobalOrdinalsStringTermsAggregator$LowCardinality + + 0% + + + (0/7) + + + + 0% + + + (0/32) + +
GlobalOrdinalsStringTermsAggregator$LowCardinality$1 + + 0% + + + (0/2) + + + + 0% + + + (0/6) + +
GlobalOrdinalsStringTermsAggregator$LowCardinality$2 + + 0% + + + (0/2) + + + + 0% + + + (0/6) + +
GlobalOrdinalsStringTermsAggregator$OrdBucket + + 28.6% + + + (2/7) + + + + 28.6% + + + (2/7) + +
GlobalOrdinalsStringTermsAggregator$RemapGlobalOrds + + 62.5% + + + (5/8) + + + + 44.4% + + + (12/27) + +
GlobalOrdinalsStringTermsAggregator$ResultStrategy + + 100% + + + (2/2) + + + + 80.8% + + + (21/26) + +
GlobalOrdinalsStringTermsAggregator$ResultStrategy$1 + + 100% + + + (2/2) + + + + 100% + + + (8/8) + +
GlobalOrdinalsStringTermsAggregator$SignificantTermsResults + + 0% + + + (0/17) + + + + 0% + + + (0/36) + +
GlobalOrdinalsStringTermsAggregator$SignificantTermsResults$1 + + 0% + + + (0/2) + + + + 0% + + + (0/4) + +
GlobalOrdinalsStringTermsAggregator$StandardTermsResults + + 80% + + + (12/15) + + + + 82.8% + + + (24/29) + +
Total + + 45.7% + + + (43/94) + + + + 50.2% + + + (149/297) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.DocValues;
+ import org.apache.lucene.index.IndexReader;
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.index.NumericDocValues;
+ import org.apache.lucene.index.SortedDocValues;
+ import org.apache.lucene.index.SortedSetDocValues;
+ import org.apache.lucene.index.Terms;
+ import org.apache.lucene.index.TermsEnum;
+ import org.apache.lucene.search.Weight;
+ import org.apache.lucene.util.ArrayUtil;
+ import org.apache.lucene.util.BytesRef;
+ import org.apache.lucene.util.PriorityQueue;
+ import org.opensearch.common.lease.Releasable;
+ import org.opensearch.common.lease.Releasables;
+ import org.opensearch.common.util.LongArray;
+ import org.opensearch.common.util.LongHash;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.index.mapper.DocCountFieldMapper;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.AggregationExecutionException;
+ import org.opensearch.search.aggregations.Aggregator;
+ import org.opensearch.search.aggregations.AggregatorFactories;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.CardinalityUpperBound;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalMultiBucketAggregation;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.LeafBucketCollector;
+ import org.opensearch.search.aggregations.LeafBucketCollectorBase;
+ import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForBytes;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ import org.opensearch.search.aggregations.support.ValuesSource;
+ import org.opensearch.search.internal.SearchContext;
+ 
+ import java.io.IOException;
+ import java.util.Arrays;
+ import java.util.Map;
+ import java.util.function.BiConsumer;
+ import java.util.function.Function;
+ import java.util.function.LongPredicate;
+ import java.util.function.LongUnaryOperator;
+ 
+ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder;
+ import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
+ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
+ 
+ /**
+  * An aggregator of string values that relies on global ordinals in order to build buckets.
+  *
+  * @opensearch.internal
+  */
+ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggregator {
+     protected final ResultStrategy<?, ?, ?> resultStrategy;
+     protected final ValuesSource.Bytes.WithOrdinals valuesSource;
+ 
+     private final LongPredicate acceptedGlobalOrdinals;
+     private final long valueCount;
+     private final String fieldName;
+     private Weight weight;
+     private final GlobalOrdLookupFunction lookupGlobalOrd;
+     protected final CollectionStrategy collectionStrategy;
+     protected int segmentsWithSingleValuedOrds = 0;
+     protected int segmentsWithMultiValuedOrds = 0;
+ 
+     /**
+      * Lookup global ordinals
+      *
+      * @opensearch.internal
+      */
+     public interface GlobalOrdLookupFunction {
+         BytesRef apply(long ord) throws IOException;
+     }
+ 
+     public GlobalOrdinalsStringTermsAggregator(
+         String name,
+         AggregatorFactories factories,
+         Function<GlobalOrdinalsStringTermsAggregator, ResultStrategy<?, ?, ?>> resultStrategy,
+         ValuesSource.Bytes.WithOrdinals valuesSource,
+         BucketOrder order,
+         DocValueFormat format,
+         BucketCountThresholds bucketCountThresholds,
+         IncludeExclude.OrdinalsFilter includeExclude,
+         SearchContext context,
+         Aggregator parent,
+         boolean remapGlobalOrds,
+         SubAggCollectionMode collectionMode,
+         boolean showTermDocCountError,
+         CardinalityUpperBound cardinality,
+         Map<String, Object> metadata
+     ) throws IOException {
+         super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata);
+         this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
+         this.valuesSource = valuesSource;
+         final IndexReader reader = context.searcher().getIndexReader();
+         final SortedSetDocValues values = reader.leaves().size() > 0
+             ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0))
+             : DocValues.emptySortedSet();
+         this.valueCount = values.getValueCount();
+         this.lookupGlobalOrd = values::lookupOrd;
+         this.acceptedGlobalOrdinals = includeExclude == null ? ALWAYS_TRUE : includeExclude.acceptedGlobalOrdinals(values)::get;
+         if (remapGlobalOrds) {
+             this.collectionStrategy = new RemapGlobalOrds(cardinality);
+         } else {
+             this.collectionStrategy = cardinality.map(estimate -> {
+                 if (estimate > 1) {
+                     throw new AggregationExecutionException("Dense ords don't know how to collect from many buckets");
+                 }
+                 return new DenseGlobalOrds();
+             });
+         }
+         this.fieldName = (valuesSource instanceof ValuesSource.Bytes.WithOrdinals.FieldData)
+             ? ((ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource).getIndexFieldName()
+             : null;
+     }
+ 
+     String descriptCollectionStrategy() {
+         return collectionStrategy.describe();
+     }
+ 
+     public void setWeight(Weight weight) {
+         this.weight = weight;
+     }
+ 
+     /**
+      Collects term frequencies for a given field from a LeafReaderContext directly from stored segment terms
+      @param ctx The LeafReaderContext to collect terms from
+      @param globalOrds The SortedSetDocValues for the field's ordinals
+      @param ordCountConsumer A consumer to accept collected term frequencies
+      @return A no-operation LeafBucketCollector implementation, since collection is complete
+      @throws IOException If an I/O error occurs during reading
+      */
+     LeafBucketCollector termDocFreqCollector(
+         LeafReaderContext ctx,
+         SortedSetDocValues globalOrds,
+         BiConsumer<Long, Integer> ordCountConsumer
+     ) throws IOException {
+         if (weight == null) {
+             // Weight not assigned - cannot use this optimization
+             return null;
+         } else {
+             if (weight.count(ctx) == 0) {
+                 // No documents matches top level query on this segment, we can skip the segment entirely
+                 return LeafBucketCollector.NO_OP_COLLECTOR;
+             } else if (weight.count(ctx) != ctx.reader().maxDoc()) {
+                 // weight.count(ctx) == ctx.reader().maxDoc() implies there are no deleted documents and
+                 // top-level query matches all docs in the segment
+                 return null;
+             }
+         }
+ 
+         Terms segmentTerms = ctx.reader().terms(this.fieldName);
+         if (segmentTerms == null) {
+             // Field is not indexed.
+             return null;
+         }
+ 
+         NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME);
+         if (docCountValues.nextDoc() != NO_MORE_DOCS) {
+             // This segment has at least one document with the _doc_count field.
+             return null;
+         }
+ 
+         TermsEnum indexTermsEnum = segmentTerms.iterator();
+         BytesRef indexTerm = indexTermsEnum.next();
+         TermsEnum globalOrdinalTermsEnum = globalOrds.termsEnum();
+         BytesRef ordinalTerm = globalOrdinalTermsEnum.next();
+ 
+         // Iterate over the terms in the segment, look for matches in the global ordinal terms,
+         // and increment bucket count when segment terms match global ordinal terms.
+         while (indexTerm != null && ordinalTerm != null) {
+             int compare = indexTerm.compareTo(ordinalTerm);
+             if (compare == 0) {
+                 if (acceptedGlobalOrdinals.test(globalOrdinalTermsEnum.ord())) {
+                     ordCountConsumer.accept(globalOrdinalTermsEnum.ord(), indexTermsEnum.docFreq());
+                 }
+                 indexTerm = indexTermsEnum.next();
+                 ordinalTerm = globalOrdinalTermsEnum.next();
+             } else if (compare < 0) {
+                 indexTerm = indexTermsEnum.next();
+             } else {
+                 ordinalTerm = globalOrdinalTermsEnum.next();
+             }
+         }
+         return LeafBucketCollector.NO_OP_COLLECTOR;
+     }
+ 
+     @Override
+     public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+         SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx);
+         collectionStrategy.globalOrdsReady(globalOrds);
+ 
+         if (collectionStrategy instanceof DenseGlobalOrds
+             && this.resultStrategy instanceof StandardTermsResults
+             && sub == LeafBucketCollector.NO_OP_COLLECTOR) {
+             LeafBucketCollector termDocFreqCollector = termDocFreqCollector(
+                 ctx,
+                 globalOrds,
+                 (ord, docCount) -> incrementBucketDocCount(collectionStrategy.globalOrdToBucketOrd(0, ord), docCount)
+             );
+             if (termDocFreqCollector != null) {
+                 return termDocFreqCollector;
+             }
+         }
+ 
+         SortedDocValues singleValues = DocValues.unwrapSingleton(globalOrds);
+         if (singleValues != null) {
+             segmentsWithSingleValuedOrds++;
+             if (acceptedGlobalOrdinals == ALWAYS_TRUE) {
+                 /*
+                  * Optimize when there isn't a filter because that is very
+                  * common and marginally faster.
+                  */
+                 return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, globalOrds) {
+                     @Override
+                     public void collect(int doc, long owningBucketOrd) throws IOException {
+                         if (false == singleValues.advanceExact(doc)) {
+                             return;
+                         }
+                         int globalOrd = singleValues.ordValue();
+                         collectionStrategy.collectGlobalOrd(owningBucketOrd, doc, globalOrd, sub);
+                     }
+                 });
+             }
+             return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, globalOrds) {
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     if (false == singleValues.advanceExact(doc)) {
+                         return;
+                     }
+                     int globalOrd = singleValues.ordValue();
+                     if (false == acceptedGlobalOrdinals.test(globalOrd)) {
+                         return;
+                     }
+                     collectionStrategy.collectGlobalOrd(owningBucketOrd, doc, globalOrd, sub);
+                 }
+             });
+         }
+         segmentsWithMultiValuedOrds++;
+         if (acceptedGlobalOrdinals == ALWAYS_TRUE) {
+             /*
+              * Optimize when there isn't a filter because that is very
+              * common and marginally faster.
+              */
+             return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, globalOrds) {
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     if (false == globalOrds.advanceExact(doc)) {
+                         return;
+                     }
+                     for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) {
+                         collectionStrategy.collectGlobalOrd(owningBucketOrd, doc, globalOrd, sub);
+                     }
+                 }
+             });
+         }
+         return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, globalOrds) {
+             @Override
+             public void collect(int doc, long owningBucketOrd) throws IOException {
+                 if (false == globalOrds.advanceExact(doc)) {
+                     return;
+                 }
+                 for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) {
+                     if (false == acceptedGlobalOrdinals.test(globalOrd)) {
+                         continue;
+                     }
+                     collectionStrategy.collectGlobalOrd(owningBucketOrd, doc, globalOrd, sub);
+                 }
+             }
+         });
+     }
+ 
+     @Override
+     public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+         return resultStrategy.buildAggregations(owningBucketOrds);
+     }
+ 
+     @Override
+     public InternalAggregation buildEmptyAggregation() {
+         return resultStrategy.buildEmptyResult();
+     }
+ 
+     @Override
+     public void collectDebugInfo(BiConsumer<String, Object> add) {
+         super.collectDebugInfo(add);
+         add.accept("collection_strategy", collectionStrategy.describe());
+         collectionStrategy.collectDebugInfo(add);
+         add.accept("result_strategy", resultStrategy.describe());
+         add.accept("segments_with_single_valued_ords", segmentsWithSingleValuedOrds);
+         add.accept("segments_with_multi_valued_ords", segmentsWithMultiValuedOrds);
+         add.accept("has_filter", acceptedGlobalOrdinals != ALWAYS_TRUE);
+     }
+ 
+     /**
+      * This is used internally only, just for compare using global ordinal instead of term bytes in the PQ
+      *
+      * @opensearch.internal
+      */
+     static class OrdBucket extends InternalTerms.Bucket<OrdBucket> {
+         long globalOrd;
+ 
+         OrdBucket(boolean showDocCountError, DocValueFormat format) {
+             super(0, null, showDocCountError, 0, format);
+         }
+ 
+         @Override
+         public int compareKey(OrdBucket other) {
+             return Long.compare(globalOrd, other.globalOrd);
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             throw new UnsupportedOperationException();
+         }
+ 
+         @Override
+         public Object getKey() {
+             throw new UnsupportedOperationException();
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             throw new UnsupportedOperationException();
+         }
+ 
+         @Override
+         protected void writeTermTo(StreamOutput out) throws IOException {
+             throw new UnsupportedOperationException();
+         }
+ 
+         @Override
+         protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
+             throw new UnsupportedOperationException();
+         }
+     }
+ 
+     @Override
+     protected void doClose() {
+         Releasables.close(resultStrategy, collectionStrategy);
+     }
+ 
+     /**
+      * Variant of {@link GlobalOrdinalsStringTermsAggregator} that
+      * resolves global ordinals post segment collection instead of on the fly
+      * for each match.This is beneficial for low cardinality fields, because
+      * it can reduce the amount of look-ups significantly.
+      * <p>
+      * This is only supported for the standard {@code terms} aggregation and
+      * doesn't support {@code significant_terms} so this forces
+      * {@link StandardTermsResults}.
+      *
+      * @opensearch.internal
+      */
+     static class LowCardinality extends GlobalOrdinalsStringTermsAggregator {
+ 
+         private LongUnaryOperator mapping;
+         private LongArray segmentDocCounts;
+ 
+         LowCardinality(
+             String name,
+             AggregatorFactories factories,
+             Function<GlobalOrdinalsStringTermsAggregator, ResultStrategy<?, ?, ?>> resultStrategy,
+             ValuesSource.Bytes.WithOrdinals valuesSource,
+             BucketOrder order,
+             DocValueFormat format,
+             BucketCountThresholds bucketCountThresholds,
+             SearchContext context,
+             Aggregator parent,
+             boolean remapGlobalOrds,
+             SubAggCollectionMode collectionMode,
+             boolean showTermDocCountError,
+             Map<String, Object> metadata
+         ) throws IOException {
+             super(
+                 name,
+                 factories,
+                 resultStrategy,
+                 valuesSource,
+                 order,
+                 format,
+                 bucketCountThresholds,
+                 null,
+                 context,
+                 parent,
+                 remapGlobalOrds,
+                 collectionMode,
+                 showTermDocCountError,
+                 CardinalityUpperBound.ONE,
+                 metadata
+             );
+             assert factories == null || factories.countAggregators() == 0;
+             this.segmentDocCounts = context.bigArrays().newLongArray(1, true);
+         }
+ 
+         @Override
+         public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
+             if (mapping != null) {
+                 mapSegmentCountsToGlobalCounts(mapping);
+             }
+             final SortedSetDocValues segmentOrds = valuesSource.ordinalsValues(ctx);
+             segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount());
+             assert sub == LeafBucketCollector.NO_OP_COLLECTOR;
+             mapping = valuesSource.globalOrdinalsMapping(ctx);
+ 
+             if (this.resultStrategy instanceof StandardTermsResults) {
+                 LeafBucketCollector termDocFreqCollector = this.termDocFreqCollector(
+                     ctx,
+                     segmentOrds,
+                     (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount)
+                 );
+                 if (termDocFreqCollector != null) {
+                     return termDocFreqCollector;
+                 }
+             }
+ 
+             final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds);
+             if (singleValues != null) {
+                 segmentsWithSingleValuedOrds++;
+                 return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, segmentOrds) {
+                     @Override
+                     public void collect(int doc, long owningBucketOrd) throws IOException {
+                         assert owningBucketOrd == 0;
+                         if (false == singleValues.advanceExact(doc)) {
+                             return;
+                         }
+                         int ord = singleValues.ordValue();
+                         long docCount = docCountProvider.getDocCount(doc);
+                         segmentDocCounts.increment(ord + 1, docCount);
+                     }
+                 });
+             }
+             segmentsWithMultiValuedOrds++;
+             return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, segmentOrds) {
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     assert owningBucketOrd == 0;
+                     if (false == segmentOrds.advanceExact(doc)) {
+                         return;
+                     }
+                     for (long segmentOrd = segmentOrds.nextOrd(); segmentOrd != NO_MORE_ORDS; segmentOrd = segmentOrds.nextOrd()) {
+                         long docCount = docCountProvider.getDocCount(doc);
+                         segmentDocCounts.increment(segmentOrd + 1, docCount);
+                     }
+                 }
+             });
+         }
+ 
+         @Override
+         protected void doPostCollection() throws IOException {
+             if (mapping != null) {
+                 mapSegmentCountsToGlobalCounts(mapping);
+                 mapping = null;
+             }
+         }
+ 
+         @Override
+         protected void doClose() {
+             Releasables.close(resultStrategy, segmentDocCounts, collectionStrategy);
+         }
+ 
+         private void mapSegmentCountsToGlobalCounts(LongUnaryOperator mapping) throws IOException {
+             for (long i = 1; i < segmentDocCounts.size(); i++) {
+                 // We use set(...) here, because we need to reset the slow to 0.
+                 // segmentDocCounts get reused over the segments and otherwise counts would be too high.
+                 long inc = segmentDocCounts.set(i, 0);
+                 if (inc == 0) {
+                     continue;
+                 }
+                 long ord = i - 1; // remember we do +1 when counting
+                 long globalOrd = mapping.applyAsLong(ord);
+                 incrementBucketDocCount(collectionStrategy.globalOrdToBucketOrd(0, globalOrd), inc);
+             }
+         }
+     }
+ 
+     /**
+      * Strategy for collecting global ordinals.
+      * <p>
+      * The {@link GlobalOrdinalsStringTermsAggregator} uses one of these
+      * to collect the global ordinals by calling
+      * {@link CollectionStrategy#collectGlobalOrd} for each global ordinal
+      * that it hits and then calling {@link CollectionStrategy#forEach}
+      * once to iterate on the results.
+      */
+     abstract class CollectionStrategy implements Releasable {
+         /**
+          * Short description of the collection mechanism added to the profile
+          * output to help with debugging.
+          */
+         abstract String describe();
+ 
+         /**
+          * Collect debug information to add to the profiling results. This will
+          * only be called if the aggregation is being profiled.
+          */
+         abstract void collectDebugInfo(BiConsumer<String, Object> add);
+ 
+         /**
+          * Called when the global ordinals are ready.
+          */
+         abstract void globalOrdsReady(SortedSetDocValues globalOrds);
+ 
+         /**
+          * Called once per unique document, global ordinal combination to
+          * collect the bucket.
+          *
+          * @param owningBucketOrd the ordinal of the bucket that owns this collection
+          * @param doc the doc id in to collect
+          * @param globalOrd the global ordinal to collect
+          * @param sub the sub-aggregators that that will collect the bucket data
+          */
+         abstract void collectGlobalOrd(long owningBucketOrd, int doc, long globalOrd, LeafBucketCollector sub) throws IOException;
+ 
+         /**
+          * Convert a global ordinal into a bucket ordinal.
+          */
+         abstract long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd);
+ 
+         /**
+          * Iterate all of the buckets. Implementations take into account
+          * the {@link BucketCountThresholds}. In particular,
+          * if the {@link BucketCountThresholds#getMinDocCount()} is 0 then
+          * they'll make sure to iterate a bucket even if it was never
+          * {{@link #collectGlobalOrd collected}.
+          * If {@link BucketCountThresholds#getMinDocCount()} is not 0 then
+          * they'll skip all global ords that weren't collected.
+          */
+         abstract void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException;
+     }
+ 
+     interface BucketInfoConsumer {
+         void accept(long globalOrd, long bucketOrd, long docCount) throws IOException;
+     }
+ 
+     /**
+      * {@linkplain CollectionStrategy} that just uses the global ordinal as the
+      * bucket ordinal.
+      */
+     class DenseGlobalOrds extends CollectionStrategy {
+         @Override
+         String describe() {
+             return "dense";
+         }
+ 
+         @Override
+         void collectDebugInfo(BiConsumer<String, Object> add) {}
+ 
+         @Override
+         void globalOrdsReady(SortedSetDocValues globalOrds) {
+             grow(globalOrds.getValueCount());
+         }
+ 
+         @Override
+         void collectGlobalOrd(long owningBucketOrd, int doc, long globalOrd, LeafBucketCollector sub) throws IOException {
+             assert owningBucketOrd == 0;
+             collectExistingBucket(sub, doc, globalOrd);
+         }
+ 
+         @Override
+         long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) {
+             assert owningBucketOrd == 0;
+             return globalOrd;
+         }
+ 
+         @Override
+         void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
+             assert owningBucketOrd == 0;
+             for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) {
+                 if (false == acceptedGlobalOrdinals.test(globalOrd)) {
+                     continue;
+                 }
+                 long docCount = bucketDocCount(globalOrd);
+                 if (bucketCountThresholds.getMinDocCount() == 0 || docCount > 0) {
+                     consumer.accept(globalOrd, globalOrd, docCount);
+                 }
+             }
+         }
+ 
+         @Override
+         public void close() {}
+     }
+ 
+     /**
+      * {@linkplain CollectionStrategy} that uses a {@link LongHash} to map the
+      * global ordinal into bucket ordinals. This uses more memory than
+      * {@link DenseGlobalOrds} when collecting every ordinal, but significantly
+      * less when collecting only a few.
+      */
+     private class RemapGlobalOrds extends CollectionStrategy {
+         private final LongKeyedBucketOrds bucketOrds;
+ 
+         private RemapGlobalOrds(CardinalityUpperBound cardinality) {
+             bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality);
+         }
+ 
+         @Override
+         String describe() {
+             return "remap";
+         }
+ 
+         @Override
+         void collectDebugInfo(BiConsumer<String, Object> add) {
+             add.accept("total_buckets", bucketOrds.size());
+         }
+ 
+         @Override
+         void globalOrdsReady(SortedSetDocValues globalOrds) {}
+ 
+         @Override
+         void collectGlobalOrd(long owningBucketOrd, int doc, long globalOrd, LeafBucketCollector sub) throws IOException {
+             long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd);
+             if (bucketOrd < 0) {
+                 bucketOrd = -1 - bucketOrd;
+                 collectExistingBucket(sub, doc, bucketOrd);
+             } else {
+                 collectBucket(sub, doc, bucketOrd);
+             }
+         }
+ 
+         @Override
+         long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) {
+             return bucketOrds.find(owningBucketOrd, globalOrd);
+         }
+ 
+         @Override
+         void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
+             if (bucketCountThresholds.getMinDocCount() == 0) {
+                 for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) {
+                     if (false == acceptedGlobalOrdinals.test(globalOrd)) {
+                         continue;
+                     }
+                     /*
+                      * Use `add` instead of `find` here to assign an ordinal
+                      * even if the global ord wasn't found so we can build
+                      * sub-aggregations without trouble even though we haven't
+                      * hit any documents for them. This is wasteful, but
+                      * settings minDocCount == 0 is wasteful in general.....
+                      */
+                     long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd);
+                     long docCount;
+                     if (bucketOrd < 0) {
+                         bucketOrd = -1 - bucketOrd;
+                         docCount = bucketDocCount(bucketOrd);
+                     } else {
+                         docCount = 0;
+                     }
+                     consumer.accept(globalOrd, bucketOrd, docCount);
+                 }
+             } else {
+                 LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
+                 while (ordsEnum.next()) {
+                     if (false == acceptedGlobalOrdinals.test(ordsEnum.value())) {
+                         continue;
+                     }
+                     consumer.accept(ordsEnum.value(), ordsEnum.ord(), bucketDocCount(ordsEnum.ord()));
+                 }
+             }
+         }
+ 
+         @Override
+         public void close() {
+             bucketOrds.close();
+         }
+     }
+ 
+     /**
+      * Strategy for building results.
+      */
+     abstract class ResultStrategy<
+         R extends InternalAggregation,
+         B extends InternalMultiBucketAggregation.InternalBucket,
+         TB extends InternalMultiBucketAggregation.InternalBucket> implements Releasable {
+ 
+         private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
+             LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds);
+             if (valueCount == 0) { // no context in this reader
+                 InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length];
+                 for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+                     results[ordIdx] = buildNoValuesResult(owningBucketOrds[ordIdx]);
+                 }
+                 return results;
+             }
+ 
+             B[][] topBucketsPreOrd = buildTopBucketsPerOrd(owningBucketOrds.length);
+             long[] otherDocCount = new long[owningBucketOrds.length];
+             for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+                 final int size;
+                 if (localBucketCountThresholds.getMinDocCount() == 0) {
+                     // if minDocCount == 0 then we can end up with more buckets then maxBucketOrd() returns
+                     size = (int) Math.min(valueCount, localBucketCountThresholds.getRequiredSize());
+                 } else {
+                     size = (int) Math.min(maxBucketOrd(), localBucketCountThresholds.getRequiredSize());
+                 }
+                 PriorityQueue<TB> ordered = buildPriorityQueue(size);
+                 final int finalOrdIdx = ordIdx;
+                 BucketUpdater<TB> updater = bucketUpdater(owningBucketOrds[ordIdx]);
+                 collectionStrategy.forEach(owningBucketOrds[ordIdx], new BucketInfoConsumer() {
+                     TB spare = null;
+ 
+                     @Override
+                     public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException {
+                         otherDocCount[finalOrdIdx] += docCount;
+                         if (docCount >= localBucketCountThresholds.getMinDocCount()) {
+                             if (spare == null) {
+                                 spare = buildEmptyTemporaryBucket();
+                             }
+                             updater.updateBucket(spare, globalOrd, bucketOrd, docCount);
+                             spare = ordered.insertWithOverflow(spare);
+                         }
+                     }
+                 });
+ 
+                 // Get the top buckets
+                 topBucketsPreOrd[ordIdx] = buildBuckets(ordered.size());
+                 for (int i = ordered.size() - 1; i >= 0; --i) {
+                     topBucketsPreOrd[ordIdx][i] = convertTempBucketToRealBucket(ordered.pop());
+                     otherDocCount[ordIdx] -= topBucketsPreOrd[ordIdx][i].getDocCount();
+                 }
+             }
+ 
+             buildSubAggs(topBucketsPreOrd);
+ 
+             InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length];
+             for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
+                 results[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCount[ordIdx], topBucketsPreOrd[ordIdx]);
+             }
+             return results;
+         }
+ 
+         /**
+          * Short description of the collection mechanism added to the profile
+          * output to help with debugging.
+          */
+         abstract String describe();
+ 
+         /**
+          * Wrap the "standard" numeric terms collector to collect any more
+          * information that this result type may need.
+          */
+         abstract LeafBucketCollector wrapCollector(LeafBucketCollector primary);
+ 
+         /**
+          * Build an empty temporary bucket.
+          */
+         abstract TB buildEmptyTemporaryBucket();
+ 
+         /**
+          * Update fields in {@code spare} to reflect information collected for
+          * this bucket ordinal.
+          */
+         abstract BucketUpdater<TB> bucketUpdater(long owningBucketOrd) throws IOException;
+ 
+         /**
+          * Build a {@link PriorityQueue} to sort the buckets. After we've
+          * collected all of the buckets we'll collect all entries in the queue.
+          */
+         abstract PriorityQueue<TB> buildPriorityQueue(int size);
+ 
+         /**
+          * Build an array to hold the "top" buckets for each ordinal.
+          */
+         abstract B[][] buildTopBucketsPerOrd(int size);
+ 
+         /**
+          * Build an array of buckets for a particular ordinal to collect the
+          * results. The populated list is passed to {@link #buildResult}.
+          */
+         abstract B[] buildBuckets(int size);
+ 
+         /**
+          * Convert a temporary bucket into a real bucket.
+          */
+         abstract B convertTempBucketToRealBucket(TB temp) throws IOException;
+ 
+         /**
+          * Build the sub-aggregations into the buckets. This will usually
+          * delegate to {@link #buildSubAggsForAllBuckets}.
+          */
+         abstract void buildSubAggs(B[][] topBucketsPreOrd) throws IOException;
+ 
+         /**
+          * Turn the buckets into an aggregation result.
+          */
+         abstract R buildResult(long owningBucketOrd, long otherDocCount, B[] topBuckets);
+ 
+         /**
+          * Build an "empty" result. Only called if there isn't any data on this
+          * shard.
+          */
+         abstract R buildEmptyResult();
+ 
+         /**
+          * Build an "empty" result for a particular bucket ordinal. Called when
+          * there aren't any values for the field on this shard.
+          */
+         abstract R buildNoValuesResult(long owningBucketOrdinal);
+     }
+ 
+     interface BucketUpdater<TB extends InternalMultiBucketAggregation.InternalBucket> {
+         void updateBucket(TB spare, long globalOrd, long bucketOrd, long docCount) throws IOException;
+     }
+ 
+     /**
+      * Builds results for the standard {@code terms} aggregation.
+      */
+     class StandardTermsResults extends ResultStrategy<StringTerms, StringTerms.Bucket, OrdBucket> {
+         @Override
+         String describe() {
+             return "terms";
+         }
+ 
+         @Override
+         LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
+             return primary;
+         }
+ 
+         @Override
+         StringTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new StringTerms.Bucket[size][];
+         }
+ 
+         @Override
+         StringTerms.Bucket[] buildBuckets(int size) {
+             return new StringTerms.Bucket[size];
+         }
+ 
+         @Override
+         OrdBucket buildEmptyTemporaryBucket() {
+             return new OrdBucket(showTermDocCountError, format);
+         }
+ 
+         @Override
+         BucketUpdater<OrdBucket> bucketUpdater(long owningBucketOrd) throws IOException {
+             return (spare, globalOrd, bucketOrd, docCount) -> {
+                 spare.globalOrd = globalOrd;
+                 spare.bucketOrd = bucketOrd;
+                 spare.docCount = docCount;
+             };
+         }
+ 
+         @Override
+         PriorityQueue<OrdBucket> buildPriorityQueue(int size) {
+             return new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator);
+         }
+ 
+         StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp) throws IOException {
+             BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd));
+             StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format);
+             result.bucketOrd = temp.bucketOrd;
+             result.docCountError = 0;
+             return result;
+         }
+ 
+         @Override
+         void buildSubAggs(StringTerms.Bucket[][] topBucketsPreOrd) throws IOException {
+             buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+         }
+ 
+         @Override
+         StringTerms buildResult(long owningBucketOrd, long otherDocCount, StringTerms.Bucket[] topBuckets) {
+             final BucketOrder reduceOrder;
+             if (isKeyOrder(order) == false) {
+                 reduceOrder = InternalOrder.key(true);
+                 Arrays.sort(topBuckets, reduceOrder.comparator());
+             } else {
+                 reduceOrder = order;
+             }
+             return new StringTerms(
+                 name,
+                 reduceOrder,
+                 order,
+                 metadata(),
+                 format,
+                 bucketCountThresholds.getShardSize(),
+                 showTermDocCountError,
+                 otherDocCount,
+                 Arrays.asList(topBuckets),
+                 0,
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         StringTerms buildEmptyResult() {
+             return buildEmptyTermsAggregation();
+         }
+ 
+         @Override
+         StringTerms buildNoValuesResult(long owningBucketOrdinal) {
+             return buildEmptyResult();
+         }
+ 
+         @Override
+         public void close() {}
+     }
+ 
+     /**
+      * Builds results for the {@code significant_terms} aggregation.
+      */
+     class SignificantTermsResults extends ResultStrategy<
+         SignificantStringTerms,
+         SignificantStringTerms.Bucket,
+         SignificantStringTerms.Bucket> {
+ 
+         private final BackgroundFrequencyForBytes backgroundFrequencies;
+         private final long supersetSize;
+         private final SignificanceHeuristic significanceHeuristic;
+ 
+         private LongArray subsetSizes = context.bigArrays().newLongArray(1, true);
+ 
+         SignificantTermsResults(
+             SignificanceLookup significanceLookup,
+             SignificanceHeuristic significanceHeuristic,
+             CardinalityUpperBound cardinality
+         ) {
+             backgroundFrequencies = significanceLookup.bytesLookup(context.bigArrays(), cardinality);
+             supersetSize = significanceLookup.supersetSize();
+             this.significanceHeuristic = significanceHeuristic;
+         }
+ 
+         @Override
+         String describe() {
+             return "significant_terms";
+         }
+ 
+         @Override
+         LeafBucketCollector wrapCollector(LeafBucketCollector primary) {
+             return new LeafBucketCollectorBase(primary, null) {
+                 @Override
+                 public void collect(int doc, long owningBucketOrd) throws IOException {
+                     super.collect(doc, owningBucketOrd);
+                     subsetSizes = context.bigArrays().grow(subsetSizes, owningBucketOrd + 1);
+                     subsetSizes.increment(owningBucketOrd, 1);
+                 }
+             };
+         }
+ 
+         @Override
+         SignificantStringTerms.Bucket[][] buildTopBucketsPerOrd(int size) {
+             return new SignificantStringTerms.Bucket[size][];
+         }
+ 
+         @Override
+         SignificantStringTerms.Bucket[] buildBuckets(int size) {
+             return new SignificantStringTerms.Bucket[size];
+         }
+ 
+         @Override
+         SignificantStringTerms.Bucket buildEmptyTemporaryBucket() {
+             return new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format, 0);
+         }
+ 
+         private long subsetSize(long owningBucketOrd) {
+             // if the owningBucketOrd is not in the array that means the bucket is empty so the size has to be 0
+             return owningBucketOrd < subsetSizes.size() ? subsetSizes.get(owningBucketOrd) : 0;
+         }
+ 
+         @Override
+         BucketUpdater<SignificantStringTerms.Bucket> bucketUpdater(long owningBucketOrd) throws IOException {
+             long subsetSize = subsetSize(owningBucketOrd);
+             return (spare, globalOrd, bucketOrd, docCount) -> {
+                 spare.bucketOrd = bucketOrd;
+                 oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes);
+                 spare.subsetDf = docCount;
+                 spare.subsetSize = subsetSize;
+                 spare.supersetDf = backgroundFrequencies.freq(spare.termBytes);
+                 spare.supersetSize = supersetSize;
+                 /*
+                  * During shard-local down-selection we use subset/superset stats
+                  * that are for this shard only. Back at the central reducer these
+                  * properties will be updated with global stats.
+                  */
+                 spare.updateScore(significanceHeuristic);
+             };
+         }
+ 
+         @Override
+         PriorityQueue<SignificantStringTerms.Bucket> buildPriorityQueue(int size) {
+             return new BucketSignificancePriorityQueue<>(size);
+         }
+ 
+         @Override
+         SignificantStringTerms.Bucket convertTempBucketToRealBucket(SignificantStringTerms.Bucket temp) throws IOException {
+             return temp;
+         }
+ 
+         @Override
+         void buildSubAggs(SignificantStringTerms.Bucket[][] topBucketsPreOrd) throws IOException {
+             buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs);
+         }
+ 
+         @Override
+         SignificantStringTerms buildResult(long owningBucketOrd, long otherDocCount, SignificantStringTerms.Bucket[] topBuckets) {
+             return new SignificantStringTerms(
+                 name,
+                 metadata(),
+                 format,
+                 subsetSize(owningBucketOrd),
+                 supersetSize,
+                 significanceHeuristic,
+                 Arrays.asList(topBuckets),
+                 bucketCountThresholds
+             );
+         }
+ 
+         @Override
+         SignificantStringTerms buildEmptyResult() {
+             return buildEmptySignificantTermsAggregation(0, significanceHeuristic);
+         }
+ 
+         @Override
+         SignificantStringTerms buildNoValuesResult(long owningBucketOrdinal) {
+             return buildEmptySignificantTermsAggregation(subsetSizes.get(owningBucketOrdinal), significanceHeuristic);
+         }
+ 
+         @Override
+         public void close() {
+             Releasables.close(backgroundFrequencies, subsetSizes);
+         }
+ 
+         /**
+          * Copies the bytes from {@code from} into {@code to}, oversizing
+          * the destination array if the bytes won't fit into the array.
+          * <p>
+          * This is fairly similar in spirit to
+          * {@link BytesRef#deepCopyOf(BytesRef)} in that it is a way to read
+          * bytes from a mutable {@link BytesRef} into
+          * <strong>something</strong> that won't mutate out from under you.
+          * Unlike {@linkplain BytesRef#deepCopyOf(BytesRef)} its designed to
+          * be run over and over again into the same destination. In particular,
+          * oversizing the destination bytes helps to keep from allocating
+          * a bunch of little arrays over and over and over again.
+          */
+         private void oversizedCopy(BytesRef from, BytesRef to) {
+             if (to.bytes.length < from.length) {
+                 to.bytes = new byte[ArrayUtil.oversize(from.length, 1)];
+             }
+             to.offset = 0;
+             to.length = from.length;
+             System.arraycopy(from.bytes, from.offset, to.bytes, 0, from.length);
+         }
+     }
+ 
+     /**
+      * Predicate used for {@link #acceptedGlobalOrdinals} if there is no filter.
+      */
+     private static final LongPredicate ALWAYS_TRUE = l -> true;
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-8.html b/htmlReport/ns-1/sources/source-8.html new file mode 100644 index 0000000000000..f1ad6467fbe71 --- /dev/null +++ b/htmlReport/ns-1/sources/source-8.html @@ -0,0 +1,1176 @@ + + + + + + + + Coverage Report > IncludeExclude + + + + + + +
+ + +

Coverage Summary for Class: IncludeExclude (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
IncludeExclude + + 36.7% + + + (11/30) + + + + 19.1% + + + (48/251) + +
IncludeExclude$AutomatonBackedOrdinalsFilter + + 100% + + + (2/2) + + + + 100% + + + (8/8) + +
IncludeExclude$AutomatonBackedStringFilter + + 0% + + + (0/2) + + + + 0% + + + (0/3) + +
IncludeExclude$DocValuesTerms + + 20% + + + (2/10) + + + + 27.3% + + + (3/11) + +
IncludeExclude$Filter + + 100% + + + (1/1) + + + + 100% + + + (1/1) + +
IncludeExclude$LongFilter + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
IncludeExclude$OrdinalsFilter + + 100% + + + (1/1) + + + + 100% + + + (1/1) + +
IncludeExclude$PartitionedLongFilter + + 0% + + + (0/2) + + + + 0% + + + (0/3) + +
IncludeExclude$PartitionedOrdinalsFilter + + 0% + + + (0/2) + + + + 0% + + + (0/11) + +
IncludeExclude$PartitionedStringFilter + + 0% + + + (0/2) + + + + 0% + + + (0/2) + +
IncludeExclude$SetBackedLongFilter + + 0% + + + (0/4) + + + + 0% + + + (0/8) + +
IncludeExclude$StringFilter + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
IncludeExclude$TermListBackedOrdinalsFilter + + 100% + + + (2/2) + + + + 100% + + + (19/19) + +
IncludeExclude$TermListBackedStringFilter + + 0% + + + (0/2) + + + + 0% + + + (0/4) + +
Total + + 30.6% + + + (19/62) + + + + 24.7% + + + (80/324) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.index.SortedSetDocValues;
+ import org.apache.lucene.index.Terms;
+ import org.apache.lucene.index.TermsEnum;
+ import org.apache.lucene.util.BytesRef;
+ import org.apache.lucene.util.LongBitSet;
+ import org.apache.lucene.util.NumericUtils;
+ import org.apache.lucene.util.StringHelper;
+ import org.apache.lucene.util.automaton.Automata;
+ import org.apache.lucene.util.automaton.Automaton;
+ import org.apache.lucene.util.automaton.ByteRunAutomaton;
+ import org.apache.lucene.util.automaton.CompiledAutomaton;
+ import org.apache.lucene.util.automaton.Operations;
+ import org.apache.lucene.util.automaton.RegExp;
+ import org.opensearch.OpenSearchParseException;
+ import org.opensearch.common.util.BitMixer;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.common.io.stream.Writeable;
+ import org.opensearch.core.xcontent.ToXContentFragment;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ import org.opensearch.index.IndexSettings;
+ import org.opensearch.search.DocValueFormat;
+ 
+ import java.io.IOException;
+ import java.util.HashSet;
+ import java.util.Objects;
+ import java.util.Set;
+ import java.util.SortedSet;
+ import java.util.TreeSet;
+ 
+ /**
+  * Defines the include/exclude regular expression filtering for string terms aggregation. In this filtering logic,
+  * exclusion has precedence, where the {@code include} is evaluated first and then the {@code exclude}.
+  *
+  * @opensearch.internal
+  */
+ public class IncludeExclude implements Writeable, ToXContentFragment {
+     public static final ParseField INCLUDE_FIELD = new ParseField("include");
+     public static final ParseField EXCLUDE_FIELD = new ParseField("exclude");
+     public static final ParseField PARTITION_FIELD = new ParseField("partition");
+     public static final ParseField NUM_PARTITIONS_FIELD = new ParseField("num_partitions");
+     // Needed to add this seed for a deterministic term hashing policy
+     // otherwise tests fail to get expected results and worse, shards
+     // can disagree on which terms hash to the required partition.
+     private static final int HASH_PARTITIONING_SEED = 31;
+ 
+     /**
+      * The default length limit for a reg-ex string. The value is derived from {@link IndexSettings#MAX_REGEX_LENGTH_SETTING}.
+      * For context, see:
+      * https://github.com/opensearch-project/OpenSearch/issues/1992
+      * https://github.com/opensearch-project/OpenSearch/issues/2858
+      */
+     private static final int DEFAULT_MAX_REGEX_LENGTH = 1000;
+ 
+     // for parsing purposes only
+     // TODO: move all aggs to the same package so that this stuff could be pkg-private
+     public static IncludeExclude merge(IncludeExclude include, IncludeExclude exclude) {
+         if (include == null) {
+             return exclude;
+         }
+         if (exclude == null) {
+             return include;
+         }
+         if (include.isPartitionBased()) {
+             throw new IllegalArgumentException("Cannot specify any excludes when using a partition-based include");
+         }
+         String includeMethod = include.isRegexBased() ? "regex" : "set";
+         String excludeMethod = exclude.isRegexBased() ? "regex" : "set";
+         if (includeMethod.equals(excludeMethod) == false) {
+             throw new IllegalArgumentException(
+                 "Cannot mix a " + includeMethod + "-based include with a " + excludeMethod + "-based method"
+             );
+         }
+         if (include.isRegexBased()) {
+             return new IncludeExclude(include.include, exclude.exclude);
+         } else {
+             return new IncludeExclude(include.includeValues, exclude.excludeValues);
+         }
+     }
+ 
+     public static IncludeExclude parseInclude(XContentParser parser) throws IOException {
+         XContentParser.Token token = parser.currentToken();
+         if (token == XContentParser.Token.VALUE_STRING) {
+             return new IncludeExclude(parser.text(), null);
+         } else if (token == XContentParser.Token.START_ARRAY) {
+             return new IncludeExclude(new TreeSet<>(parseArrayToSet(parser)), null);
+         } else if (token == XContentParser.Token.START_OBJECT) {
+             String currentFieldName = null;
+             Integer partition = null, numPartitions = null;
+             while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+                 if (token == XContentParser.Token.FIELD_NAME) {
+                     currentFieldName = parser.currentName();
+                 } else if (NUM_PARTITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
+                     numPartitions = parser.intValue();
+                 } else if (PARTITION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
+                     partition = parser.intValue();
+                 } else {
+                     throw new OpenSearchParseException("Unknown parameter in Include/Exclude clause: " + currentFieldName);
+                 }
+             }
+             if (partition == null) {
+                 throw new IllegalArgumentException(
+                     "Missing [" + PARTITION_FIELD.getPreferredName() + "] parameter for partition-based include"
+                 );
+             }
+             if (numPartitions == null) {
+                 throw new IllegalArgumentException(
+                     "Missing [" + NUM_PARTITIONS_FIELD.getPreferredName() + "] parameter for partition-based include"
+                 );
+             }
+             return new IncludeExclude(partition, numPartitions);
+         } else {
+             throw new IllegalArgumentException("Unrecognized token for an include [" + token + "]");
+         }
+     }
+ 
+     public static IncludeExclude parseExclude(XContentParser parser) throws IOException {
+         XContentParser.Token token = parser.currentToken();
+         if (token == XContentParser.Token.VALUE_STRING) {
+             return new IncludeExclude(null, parser.text());
+         } else if (token == XContentParser.Token.START_ARRAY) {
+             return new IncludeExclude(null, new TreeSet<>(parseArrayToSet(parser)));
+         } else {
+             throw new IllegalArgumentException("Unrecognized token for an exclude [" + token + "]");
+         }
+     }
+ 
+     /**
+      * Base filter class
+      *
+      * @opensearch.internal
+      */
+     public abstract static class Filter {}
+ 
+     /**
+      * The includeValue and excludeValue ByteRefs which are the result of the parsing
+      * process are converted into a LongFilter when used on numeric fields
+      * in the index.
+      *
+      * @opensearch.internal
+      */
+     public abstract static class LongFilter extends Filter {
+         public abstract boolean accept(long value);
+     }
+ 
+     /**
+      * Long filter that is partitioned
+      *
+      * @opensearch.internal
+      */
+     public class PartitionedLongFilter extends LongFilter {
+         @Override
+         public boolean accept(long value) {
+             // hash the value to keep even distributions
+             final long hashCode = BitMixer.mix64(value);
+             return Math.floorMod(hashCode, incNumPartitions) == incZeroBasedPartition;
+         }
+     }
+ 
+     /**
+      * Long filter backed by valid values
+      *
+      * @opensearch.internal
+      */
+     public static class SetBackedLongFilter extends LongFilter {
+         private Set<Long> valids;
+         private Set<Long> invalids;
+ 
+         private SetBackedLongFilter(int numValids, int numInvalids) {
+             if (numValids > 0) {
+                 valids = new HashSet<>(numValids);
+             }
+             if (numInvalids > 0) {
+                 invalids = new HashSet<>(numInvalids);
+             }
+         }
+ 
+         @Override
+         public boolean accept(long value) {
+             return ((valids == null) || (valids.contains(value))) && ((invalids == null) || (!invalids.contains(value)));
+         }
+ 
+         private void addAccept(long val) {
+             valids.add(val);
+         }
+ 
+         private void addReject(long val) {
+             invalids.add(val);
+         }
+     }
+ 
+     /**
+      * Only used for the 'map' execution mode (ie. scripts)
+      *
+      * @opensearch.internal
+      */
+     public abstract static class StringFilter extends Filter {
+         public abstract boolean accept(BytesRef value);
+     }
+ 
+     class PartitionedStringFilter extends StringFilter {
+         @Override
+         public boolean accept(BytesRef value) {
+             return Math.floorMod(StringHelper.murmurhash3_x86_32(value, HASH_PARTITIONING_SEED), incNumPartitions) == incZeroBasedPartition;
+         }
+     }
+ 
+     /**
+      * String filter backed by an automaton
+      *
+      * @opensearch.internal
+      */
+     static class AutomatonBackedStringFilter extends StringFilter {
+ 
+         private final ByteRunAutomaton runAutomaton;
+ 
+         private AutomatonBackedStringFilter(Automaton automaton) {
+             this.runAutomaton = new ByteRunAutomaton(automaton);
+         }
+ 
+         /**
+          * Returns whether the given value is accepted based on the {@code include} &amp; {@code exclude} patterns.
+          */
+         @Override
+         public boolean accept(BytesRef value) {
+             return runAutomaton.run(value.bytes, value.offset, value.length);
+         }
+     }
+ 
+     /**
+      * String filter backed by a term list
+      *
+      * @opensearch.internal
+      */
+     static class TermListBackedStringFilter extends StringFilter {
+ 
+         private final Set<BytesRef> valids;
+         private final Set<BytesRef> invalids;
+ 
+         TermListBackedStringFilter(Set<BytesRef> includeValues, Set<BytesRef> excludeValues) {
+             this.valids = includeValues;
+             this.invalids = excludeValues;
+         }
+ 
+         /**
+          * Returns whether the given value is accepted based on the
+          * {@code include} &amp; {@code exclude} sets.
+          */
+         @Override
+         public boolean accept(BytesRef value) {
+             return ((valids == null) || (valids.contains(value))) && ((invalids == null) || (!invalids.contains(value)));
+         }
+     }
+ 
+     /**
+      * An ordinals filter
+      *
+      * @opensearch.internal
+      */
+     public abstract static class OrdinalsFilter extends Filter {
+         public abstract LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) throws IOException;
+ 
+     }
+ 
+     class PartitionedOrdinalsFilter extends OrdinalsFilter {
+ 
+         @Override
+         public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) throws IOException {
+             final long numOrds = globalOrdinals.getValueCount();
+             final LongBitSet acceptedGlobalOrdinals = new LongBitSet(numOrds);
+             final TermsEnum termEnum = globalOrdinals.termsEnum();
+ 
+             BytesRef term = termEnum.next();
+             while (term != null) {
+                 if (Math.floorMod(
+                     StringHelper.murmurhash3_x86_32(term, HASH_PARTITIONING_SEED),
+                     incNumPartitions
+                 ) == incZeroBasedPartition) {
+                     acceptedGlobalOrdinals.set(termEnum.ord());
+                 }
+                 term = termEnum.next();
+             }
+             return acceptedGlobalOrdinals;
+         }
+     }
+ 
+     /**
+      * An ordinals filter backed by an automaton
+      *
+      * @opensearch.internal
+      */
+     static class AutomatonBackedOrdinalsFilter extends OrdinalsFilter {
+ 
+         private final CompiledAutomaton compiled;
+ 
+         private AutomatonBackedOrdinalsFilter(Automaton automaton) {
+             this.compiled = new CompiledAutomaton(automaton);
+         }
+ 
+         /**
+          * Computes which global ordinals are accepted by this IncludeExclude instance.
+          *
+          */
+         @Override
+         public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) throws IOException {
+             LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());
+             TermsEnum globalTermsEnum;
+             Terms globalTerms = new DocValuesTerms(globalOrdinals);
+             // TODO: specialize based on compiled.type: for ALL and prefixes (sinkState >= 0 ) we can avoid i/o and just set bits.
+             globalTermsEnum = compiled.getTermsEnum(globalTerms);
+             for (BytesRef term = globalTermsEnum.next(); term != null; term = globalTermsEnum.next()) {
+                 acceptedGlobalOrdinals.set(globalTermsEnum.ord());
+             }
+             return acceptedGlobalOrdinals;
+         }
+ 
+     }
+ 
+     /**
+      * An ordinals filter backed by a terms list
+      *
+      * @opensearch.internal
+      */
+     static class TermListBackedOrdinalsFilter extends OrdinalsFilter {
+ 
+         private final SortedSet<BytesRef> includeValues;
+         private final SortedSet<BytesRef> excludeValues;
+ 
+         TermListBackedOrdinalsFilter(SortedSet<BytesRef> includeValues, SortedSet<BytesRef> excludeValues) {
+             this.includeValues = includeValues;
+             this.excludeValues = excludeValues;
+         }
+ 
+         @Override
+         public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) throws IOException {
+             LongBitSet acceptedGlobalOrdinals = new LongBitSet(globalOrdinals.getValueCount());
+             if (includeValues != null) {
+                 for (BytesRef term : includeValues) {
+                     long ord = globalOrdinals.lookupTerm(term);
+                     if (ord >= 0) {
+                         acceptedGlobalOrdinals.set(ord);
+                     }
+                 }
+             } else if (acceptedGlobalOrdinals.length() > 0) {
+                 // default to all terms being acceptable
+                 acceptedGlobalOrdinals.set(0, acceptedGlobalOrdinals.length());
+             }
+             if (excludeValues != null) {
+                 for (BytesRef term : excludeValues) {
+                     long ord = globalOrdinals.lookupTerm(term);
+                     if (ord >= 0) {
+                         acceptedGlobalOrdinals.clear(ord);
+                     }
+                 }
+             }
+             return acceptedGlobalOrdinals;
+         }
+ 
+     }
+ 
+     private final String include, exclude;
+     private final SortedSet<BytesRef> includeValues, excludeValues;
+     private final int incZeroBasedPartition;
+     private final int incNumPartitions;
+ 
+     /**
+      * @param include   The string or regular expression pattern for the terms to be included
+      * @param exclude   The string or regular expression pattern for the terms to be excluded
+      */
+     public IncludeExclude(String include, String exclude) {
+         this.include = include;
+         this.exclude = exclude;
+         this.includeValues = null;
+         this.excludeValues = null;
+         this.incZeroBasedPartition = 0;
+         this.incNumPartitions = 0;
+     }
+ 
+     /**
+      * @param includeValues   The terms to be included
+      * @param excludeValues   The terms to be excluded
+      */
+     public IncludeExclude(SortedSet<BytesRef> includeValues, SortedSet<BytesRef> excludeValues) {
+         if (includeValues == null && excludeValues == null) {
+             throw new IllegalArgumentException();
+         }
+         this.include = null;
+         this.exclude = null;
+         this.incZeroBasedPartition = 0;
+         this.incNumPartitions = 0;
+         this.includeValues = includeValues;
+         this.excludeValues = excludeValues;
+     }
+ 
+     public IncludeExclude(String[] includeValues, String[] excludeValues) {
+         this(convertToBytesRefSet(includeValues), convertToBytesRefSet(excludeValues));
+     }
+ 
+     public IncludeExclude(double[] includeValues, double[] excludeValues) {
+         this(convertToBytesRefSet(includeValues), convertToBytesRefSet(excludeValues));
+     }
+ 
+     public IncludeExclude(long[] includeValues, long[] excludeValues) {
+         this(convertToBytesRefSet(includeValues), convertToBytesRefSet(excludeValues));
+     }
+ 
+     public IncludeExclude(int partition, int numPartitions) {
+         if (partition < 0 || partition >= numPartitions) {
+             throw new IllegalArgumentException("Partition must be >=0 and < numPartition which is " + numPartitions);
+         }
+         this.incZeroBasedPartition = partition;
+         this.incNumPartitions = numPartitions;
+         this.include = null;
+         this.exclude = null;
+         this.includeValues = null;
+         this.excludeValues = null;
+ 
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public IncludeExclude(StreamInput in) throws IOException {
+         if (in.readBoolean()) {
+             includeValues = null;
+             excludeValues = null;
+             incZeroBasedPartition = 0;
+             incNumPartitions = 0;
+             include = in.readOptionalString();
+             exclude = in.readOptionalString();
+             return;
+         }
+         include = null;
+         exclude = null;
+         if (in.readBoolean()) {
+             int size = in.readVInt();
+             includeValues = new TreeSet<>();
+             for (int i = 0; i < size; i++) {
+                 includeValues.add(in.readBytesRef());
+             }
+         } else {
+             includeValues = null;
+         }
+         if (in.readBoolean()) {
+             int size = in.readVInt();
+             excludeValues = new TreeSet<>();
+             for (int i = 0; i < size; i++) {
+                 excludeValues.add(in.readBytesRef());
+             }
+         } else {
+             excludeValues = null;
+         }
+         incNumPartitions = in.readVInt();
+         incZeroBasedPartition = in.readVInt();
+     }
+ 
+     @Override
+     public void writeTo(StreamOutput out) throws IOException {
+         boolean regexBased = isRegexBased();
+         out.writeBoolean(regexBased);
+         if (regexBased) {
+             out.writeOptionalString(include);
+             out.writeOptionalString(exclude);
+         } else {
+             boolean hasIncludes = includeValues != null;
+             out.writeBoolean(hasIncludes);
+             if (hasIncludes) {
+                 out.writeVInt(includeValues.size());
+                 for (BytesRef value : includeValues) {
+                     out.writeBytesRef(value);
+                 }
+             }
+             boolean hasExcludes = excludeValues != null;
+             out.writeBoolean(hasExcludes);
+             if (hasExcludes) {
+                 out.writeVInt(excludeValues.size());
+                 for (BytesRef value : excludeValues) {
+                     out.writeBytesRef(value);
+                 }
+             }
+             out.writeVInt(incNumPartitions);
+             out.writeVInt(incZeroBasedPartition);
+         }
+     }
+ 
+     private static SortedSet<BytesRef> convertToBytesRefSet(String[] values) {
+         SortedSet<BytesRef> returnSet = null;
+         if (values != null) {
+             returnSet = new TreeSet<>();
+             for (String value : values) {
+                 returnSet.add(new BytesRef(value));
+             }
+         }
+         return returnSet;
+     }
+ 
+     private static SortedSet<BytesRef> convertToBytesRefSet(double[] values) {
+         SortedSet<BytesRef> returnSet = null;
+         if (values != null) {
+             returnSet = new TreeSet<>();
+             for (double value : values) {
+                 returnSet.add(new BytesRef(String.valueOf(value)));
+             }
+         }
+         return returnSet;
+     }
+ 
+     private static SortedSet<BytesRef> convertToBytesRefSet(long[] values) {
+         SortedSet<BytesRef> returnSet = null;
+         if (values != null) {
+             returnSet = new TreeSet<>();
+             for (long value : values) {
+                 returnSet.add(new BytesRef(String.valueOf(value)));
+             }
+         }
+         return returnSet;
+     }
+ 
+     /**
+      * Terms adapter around doc values.
+      *
+      * @opensearch.internal
+      */
+     private static class DocValuesTerms extends Terms {
+ 
+         private final SortedSetDocValues values;
+ 
+         DocValuesTerms(SortedSetDocValues values) {
+             this.values = values;
+         }
+ 
+         @Override
+         public TermsEnum iterator() throws IOException {
+             return values.termsEnum();
+         }
+ 
+         @Override
+         public long size() throws IOException {
+             return -1;
+         }
+ 
+         @Override
+         public long getSumTotalTermFreq() throws IOException {
+             return -1;
+         }
+ 
+         @Override
+         public long getSumDocFreq() throws IOException {
+             return -1;
+         }
+ 
+         @Override
+         public int getDocCount() throws IOException {
+             return -1;
+         }
+ 
+         @Override
+         public boolean hasFreqs() {
+             return false;
+         }
+ 
+         @Override
+         public boolean hasOffsets() {
+             return false;
+         }
+ 
+         @Override
+         public boolean hasPositions() {
+             return false;
+         }
+ 
+         @Override
+         public boolean hasPayloads() {
+             return false;
+         }
+ 
+     }
+ 
+     private static Set<BytesRef> parseArrayToSet(XContentParser parser) throws IOException {
+         final Set<BytesRef> set = new HashSet<>();
+         if (parser.currentToken() != XContentParser.Token.START_ARRAY) {
+             throw new OpenSearchParseException("Missing start of array in include/exclude clause");
+         }
+         while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+             if (!parser.currentToken().isValue()) {
+                 throw new OpenSearchParseException("Array elements in include/exclude clauses should be string values");
+             }
+             set.add(new BytesRef(parser.text()));
+         }
+         return set;
+     }
+ 
+     public boolean isRegexBased() {
+         return include != null || exclude != null;
+     }
+ 
+     public boolean isPartitionBased() {
+         return incNumPartitions > 0;
+     }
+ 
+     private Automaton toAutomaton(int maxRegExLength) {
+         Automaton a;
+         if (include != null) {
+             validateRegExpStringLength(include, maxRegExLength);
+             a = new RegExp(include).toAutomaton();
+         } else if (includeValues != null) {
+             a = Automata.makeStringUnion(includeValues);
+         } else {
+             a = Automata.makeAnyString();
+         }
+         if (exclude != null) {
+             validateRegExpStringLength(exclude, maxRegExLength);
+             Automaton excludeAutomaton = new RegExp(exclude).toAutomaton();
+             a = Operations.minus(a, excludeAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT);
+         } else if (excludeValues != null) {
+             a = Operations.minus(a, Automata.makeStringUnion(excludeValues), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT);
+         }
+         return a;
+     }
+ 
+     private static void validateRegExpStringLength(String source, int maxRegexLength) {
+         if (maxRegexLength > 0 && source.length() > maxRegexLength) {
+             throw new IllegalArgumentException(
+                 "The length of regex ["
+                     + source.length()
+                     + "] used in the request has exceeded "
+                     + "the allowed maximum of ["
+                     + maxRegexLength
+                     + "]. "
+                     + "This maximum can be set by changing the ["
+                     + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey()
+                     + "] index level setting."
+             );
+         }
+     }
+ 
+     /**
+      * Wrapper method that imposes a default regex limit.
+      * See https://github.com/opensearch-project/OpenSearch/issues/2858
+      */
+     public StringFilter convertToStringFilter(DocValueFormat format) {
+         return convertToStringFilter(format, DEFAULT_MAX_REGEX_LENGTH);
+     }
+ 
+     public StringFilter convertToStringFilter(DocValueFormat format, int maxRegexLength) {
+         if (isRegexBased()) {
+             return new AutomatonBackedStringFilter(toAutomaton(maxRegexLength));
+         }
+         if (isPartitionBased()) {
+             return new PartitionedStringFilter();
+         }
+         return new TermListBackedStringFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format));
+     }
+ 
+     private static SortedSet<BytesRef> parseForDocValues(SortedSet<BytesRef> endUserFormattedValues, DocValueFormat format) {
+         SortedSet<BytesRef> result = endUserFormattedValues;
+         if (endUserFormattedValues != null) {
+             if (format != DocValueFormat.RAW) {
+                 result = new TreeSet<>();
+                 for (BytesRef formattedVal : endUserFormattedValues) {
+                     result.add(format.parseBytesRef(formattedVal.utf8ToString()));
+                 }
+             }
+         }
+         return result;
+     }
+ 
+     /**
+      * Wrapper method that imposes a default regex limit.
+      * See https://github.com/opensearch-project/OpenSearch/issues/2858
+      */
+     public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format) {
+         return convertToOrdinalsFilter(format, DEFAULT_MAX_REGEX_LENGTH);
+     }
+ 
+     public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format, int maxRegexLength) {
+ 
+         if (isRegexBased()) {
+             return new AutomatonBackedOrdinalsFilter(toAutomaton(maxRegexLength));
+         }
+         if (isPartitionBased()) {
+             return new PartitionedOrdinalsFilter();
+         }
+ 
+         return new TermListBackedOrdinalsFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format));
+     }
+ 
+     public LongFilter convertToLongFilter(DocValueFormat format) {
+ 
+         if (isPartitionBased()) {
+             return new PartitionedLongFilter();
+         }
+ 
+         int numValids = includeValues == null ? 0 : includeValues.size();
+         int numInvalids = excludeValues == null ? 0 : excludeValues.size();
+         SetBackedLongFilter result = new SetBackedLongFilter(numValids, numInvalids);
+         if (includeValues != null) {
+             for (BytesRef val : includeValues) {
+                 result.addAccept(format.parseLong(val.utf8ToString(), false, null));
+             }
+         }
+         if (excludeValues != null) {
+             for (BytesRef val : excludeValues) {
+                 result.addReject(format.parseLong(val.utf8ToString(), false, null));
+             }
+         }
+         return result;
+     }
+ 
+     public LongFilter convertToDoubleFilter() {
+         if (isPartitionBased()) {
+             return new PartitionedLongFilter();
+         }
+ 
+         int numValids = includeValues == null ? 0 : includeValues.size();
+         int numInvalids = excludeValues == null ? 0 : excludeValues.size();
+         SetBackedLongFilter result = new SetBackedLongFilter(numValids, numInvalids);
+         if (includeValues != null) {
+             for (BytesRef val : includeValues) {
+                 double dval = Double.parseDouble(val.utf8ToString());
+                 result.addAccept(NumericUtils.doubleToSortableLong(dval));
+             }
+         }
+         if (excludeValues != null) {
+             for (BytesRef val : excludeValues) {
+                 double dval = Double.parseDouble(val.utf8ToString());
+                 result.addReject(NumericUtils.doubleToSortableLong(dval));
+             }
+         }
+         return result;
+     }
+ 
+     @Override
+     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+         if (include != null) {
+             builder.field(INCLUDE_FIELD.getPreferredName(), include);
+         } else if (includeValues != null) {
+             builder.startArray(INCLUDE_FIELD.getPreferredName());
+             for (BytesRef value : includeValues) {
+                 builder.value(value.utf8ToString());
+             }
+             builder.endArray();
+         } else if (isPartitionBased()) {
+             builder.startObject(INCLUDE_FIELD.getPreferredName());
+             builder.field(PARTITION_FIELD.getPreferredName(), incZeroBasedPartition);
+             builder.field(NUM_PARTITIONS_FIELD.getPreferredName(), incNumPartitions);
+             builder.endObject();
+         }
+         if (exclude != null) {
+             builder.field(EXCLUDE_FIELD.getPreferredName(), exclude);
+         } else if (excludeValues != null) {
+             builder.startArray(EXCLUDE_FIELD.getPreferredName());
+             for (BytesRef value : excludeValues) {
+                 builder.value(value.utf8ToString());
+             }
+             builder.endArray();
+         }
+         return builder;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(include, exclude, includeValues, excludeValues, incZeroBasedPartition, incNumPartitions);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (obj == null) {
+             return false;
+         }
+         if (getClass() != obj.getClass()) {
+             return false;
+         }
+         IncludeExclude other = (IncludeExclude) obj;
+         return Objects.equals(include, other.include)
+             && Objects.equals(exclude, other.exclude)
+             && Objects.equals(includeValues, other.includeValues)
+             && Objects.equals(excludeValues, other.excludeValues)
+             && Objects.equals(incZeroBasedPartition, other.incZeroBasedPartition)
+             && Objects.equals(incNumPartitions, other.incNumPartitions);
+     }
+ 
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-9.html b/htmlReport/ns-1/sources/source-9.html new file mode 100644 index 0000000000000..703ee9280815d --- /dev/null +++ b/htmlReport/ns-1/sources/source-9.html @@ -0,0 +1,310 @@ + + + + + + + + Coverage Report > InternalMappedRareTerms + + + + + + +
+ + +

Coverage Summary for Class: InternalMappedRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
InternalMappedRareTerms + + 0% + + + (0/1) + + + + 0% + + + (0/11) + + + + 0% + + + (0/59) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.logging.log4j.LogManager;
+ import org.apache.logging.log4j.Logger;
+ import org.apache.lucene.util.CollectionUtil;
+ import org.opensearch.common.Randomness;
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.AggregationExecutionException;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ import java.util.function.Function;
+ import java.util.stream.Collectors;
+ 
+ /**
+  * Implementation of mapped rare terms
+  *
+  * @opensearch.internal
+  */
+ public abstract class InternalMappedRareTerms<A extends InternalRareTerms<A, B>, B extends InternalRareTerms.Bucket<B>> extends
+     InternalRareTerms<A, B> {
+ 
+     protected DocValueFormat format;
+     protected List<B> buckets;
+     protected Map<String, B> bucketMap;
+ 
+     final SetBackedScalingCuckooFilter filter;
+ 
+     protected final Logger logger = LogManager.getLogger(getClass());
+ 
+     InternalMappedRareTerms(
+         String name,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         List<B> buckets,
+         long maxDocCount,
+         SetBackedScalingCuckooFilter filter
+     ) {
+         super(name, order, maxDocCount, metadata);
+         this.format = format;
+         this.buckets = buckets;
+         this.filter = filter;
+     }
+ 
+     public long getMaxDocCount() {
+         return maxDocCount;
+     }
+ 
+     SetBackedScalingCuckooFilter getFilter() {
+         return filter;
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     InternalMappedRareTerms(StreamInput in, Bucket.Reader<B> bucketReader) throws IOException {
+         super(in);
+         format = in.readNamedWriteable(DocValueFormat.class);
+         buckets = in.readList(stream -> bucketReader.read(stream, format));
+         filter = new SetBackedScalingCuckooFilter(in, Randomness.get());
+     }
+ 
+     @Override
+     protected void writeTermTypeInfoTo(StreamOutput out) throws IOException {
+         out.writeNamedWriteable(format);
+         out.writeList(buckets);
+         filter.writeTo(out);
+     }
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         Map<Object, List<B>> buckets = new HashMap<>();
+         InternalRareTerms<A, B> referenceTerms = null;
+         SetBackedScalingCuckooFilter filter = null;
+ 
+         for (InternalAggregation aggregation : aggregations) {
+             // Unmapped rare terms don't have a cuckoo filter so we'll skip all this work
+             // and save some type casting headaches later.
+             if (aggregation.isMapped() == false) {
+                 continue;
+             }
+ 
+             @SuppressWarnings("unchecked")
+             InternalRareTerms<A, B> terms = (InternalRareTerms<A, B>) aggregation;
+             if (referenceTerms == null && aggregation.getClass().equals(UnmappedRareTerms.class) == false) {
+                 referenceTerms = terms;
+             }
+             if (referenceTerms != null
+                 && referenceTerms.getClass().equals(terms.getClass()) == false
+                 && terms.getClass().equals(UnmappedRareTerms.class) == false) {
+                 // control gets into this loop when the same field name against which the query is executed
+                 // is of different types in different indices.
+                 throw new AggregationExecutionException(
+                     "Merging/Reducing the aggregations failed when computing the aggregation ["
+                         + referenceTerms.getName()
+                         + "] because the field you gave in the aggregation query existed as two different "
+                         + "types in two different indices"
+                 );
+             }
+             for (B bucket : terms.getBuckets()) {
+                 List<B> bucketList = buckets.computeIfAbsent(bucket.getKey(), k -> new ArrayList<>());
+                 bucketList.add(bucket);
+             }
+ 
+             SetBackedScalingCuckooFilter otherFilter = ((InternalMappedRareTerms) aggregation).getFilter();
+             if (filter == null) {
+                 filter = new SetBackedScalingCuckooFilter(otherFilter);
+             } else {
+                 filter.merge(otherFilter);
+             }
+         }
+ 
+         final List<B> rare = new ArrayList<>();
+         for (List<B> sameTermBuckets : buckets.values()) {
+             final B b = reduceBucket(sameTermBuckets, reduceContext);
+             if ((b.getDocCount() <= maxDocCount && containsTerm(filter, b) == false)) {
+                 rare.add(b);
+                 reduceContext.consumeBucketsAndMaybeBreak(1);
+             } else if (b.getDocCount() > maxDocCount) {
+                 // this term has gone over threshold while merging, so add it to the filter.
+                 // Note this may happen during incremental reductions too
+                 addToFilter(filter, b);
+             }
+         }
+         CollectionUtil.introSort(rare, order.comparator());
+         return createWithFilter(name, rare, filter);
+     }
+ 
+     public abstract boolean containsTerm(SetBackedScalingCuckooFilter filter, B bucket);
+ 
+     public abstract void addToFilter(SetBackedScalingCuckooFilter filter, B bucket);
+ 
+     @Override
+     public List<B> getBuckets() {
+         return buckets;
+     }
+ 
+     @Override
+     public B getBucketByKey(String term) {
+         if (bucketMap == null) {
+             bucketMap = buckets.stream().collect(Collectors.toMap(InternalRareTerms.Bucket::getKeyAsString, Function.identity()));
+         }
+         return bucketMap.get(term);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         InternalMappedRareTerms<?, ?> that = (InternalMappedRareTerms<?, ?>) obj;
+         return Objects.equals(buckets, that.buckets) && Objects.equals(format, that.format) && Objects.equals(filter, that.filter);
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), buckets, format, filter);
+     }
+ 
+     @Override
+     public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         return doXContentCommon(builder, params, buckets);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-a.html b/htmlReport/ns-1/sources/source-a.html new file mode 100644 index 0000000000000..38079b0635f9a --- /dev/null +++ b/htmlReport/ns-1/sources/source-a.html @@ -0,0 +1,273 @@ + + + + + + + + Coverage Report > InternalMappedSignificantTerms + + + + + + +
+ + +

Coverage Summary for Class: InternalMappedSignificantTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
InternalMappedSignificantTerms + + 0% + + + (0/1) + + + + 0% + + + (0/12) + + + + 0% + + + (0/43) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ 
+ import java.io.IOException;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ import java.util.function.Function;
+ import java.util.stream.Collectors;
+ 
+ /**
+  * Implementation of mapped significant terms
+  *
+  * @opensearch.internal
+  */
+ public abstract class InternalMappedSignificantTerms<
+     A extends InternalMappedSignificantTerms<A, B>,
+     B extends InternalSignificantTerms.Bucket<B>> extends InternalSignificantTerms<A, B> {
+ 
+     protected final DocValueFormat format;
+     protected final long subsetSize;
+     protected final long supersetSize;
+     protected final SignificanceHeuristic significanceHeuristic;
+     protected final List<B> buckets;
+     protected Map<String, B> bucketMap;
+ 
+     protected InternalMappedSignificantTerms(
+         String name,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         long subsetSize,
+         long supersetSize,
+         SignificanceHeuristic significanceHeuristic,
+         List<B> buckets,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(name, bucketCountThresholds, metadata);
+         this.format = format;
+         this.buckets = buckets;
+         this.subsetSize = subsetSize;
+         this.supersetSize = supersetSize;
+         this.significanceHeuristic = significanceHeuristic;
+     }
+ 
+     protected InternalMappedSignificantTerms(StreamInput in, Bucket.Reader<B> bucketReader) throws IOException {
+         super(in);
+         format = in.readNamedWriteable(DocValueFormat.class);
+         subsetSize = in.readVLong();
+         supersetSize = in.readVLong();
+         significanceHeuristic = in.readNamedWriteable(SignificanceHeuristic.class);
+         buckets = in.readList(stream -> bucketReader.read(stream, subsetSize, supersetSize, format));
+     }
+ 
+     @Override
+     protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException {
+         out.writeNamedWriteable(format);
+         out.writeVLong(subsetSize);
+         out.writeVLong(supersetSize);
+         out.writeNamedWriteable(significanceHeuristic);
+         out.writeList(buckets);
+     }
+ 
+     @Override
+     public Iterator<SignificantTerms.Bucket> iterator() {
+         return buckets.stream().map(bucket -> (SignificantTerms.Bucket) bucket).collect(Collectors.toList()).iterator();
+     }
+ 
+     @Override
+     public List<B> getBuckets() {
+         return buckets;
+     }
+ 
+     @Override
+     public B getBucketByKey(String term) {
+         if (bucketMap == null) {
+             bucketMap = buckets.stream().collect(Collectors.toMap(InternalSignificantTerms.Bucket::getKeyAsString, Function.identity()));
+         }
+         return bucketMap.get(term);
+     }
+ 
+     @Override
+     protected long getSubsetSize() {
+         return subsetSize;
+     }
+ 
+     @Override
+     protected long getSupersetSize() {
+         return supersetSize;
+     }
+ 
+     @Override
+     protected SignificanceHeuristic getSignificanceHeuristic() {
+         return significanceHeuristic;
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+ 
+         InternalMappedSignificantTerms<?, ?> that = (InternalMappedSignificantTerms<?, ?>) obj;
+         return Objects.equals(format, that.format)
+             && subsetSize == that.subsetSize
+             && supersetSize == that.supersetSize
+             && Objects.equals(significanceHeuristic, that.significanceHeuristic)
+             && Objects.equals(buckets, that.buckets)
+             && Objects.equals(bucketMap, that.bucketMap);
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), format, subsetSize, supersetSize, significanceHeuristic, buckets, bucketMap);
+     }
+ 
+     @Override
+     public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         builder.field(CommonFields.DOC_COUNT.getPreferredName(), subsetSize);
+         builder.field(BG_COUNT, supersetSize);
+         builder.startArray(CommonFields.BUCKETS.getPreferredName());
+         for (Bucket bucket : buckets) {
+             // There is a condition (presumably when only one shard has a bucket?) where reduce is not called
+             // and I end up with buckets that contravene the user's min_doc_count criteria in my reducer
+             if (bucket.subsetDf >= minDocCount) {
+                 bucket.toXContent(builder, params);
+             }
+         }
+         builder.endArray();
+         return builder;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-b.html b/htmlReport/ns-1/sources/source-b.html new file mode 100644 index 0000000000000..71935e8deb867 --- /dev/null +++ b/htmlReport/ns-1/sources/source-b.html @@ -0,0 +1,269 @@ + + + + + + + + Coverage Report > InternalMappedTerms + + + + + + +
+ + +

Coverage Summary for Class: InternalMappedTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
InternalMappedTerms + + 100% + + + (1/1) + + + + 50% + + + (6/12) + + + + 30% + + + (12/40) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.BucketOrder;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ import java.util.function.Function;
+ import java.util.stream.Collectors;
+ 
+ /**
+  * Common superclass for results of the terms aggregation on mapped fields.
+  *
+  * @opensearch.internal
+  */
+ public abstract class InternalMappedTerms<A extends InternalTerms<A, B>, B extends InternalTerms.Bucket<B>> extends InternalTerms<A, B> {
+     protected final DocValueFormat format;
+     protected final int shardSize;
+     protected final boolean showTermDocCountError;
+     protected final long otherDocCount;
+     protected final List<B> buckets;
+     protected Map<String, B> bucketMap;
+ 
+     protected long docCountError;
+ 
+     protected InternalMappedTerms(
+         String name,
+         BucketOrder reduceOrder,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         DocValueFormat format,
+         int shardSize,
+         boolean showTermDocCountError,
+         long otherDocCount,
+         List<B> buckets,
+         long docCountError,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(name, reduceOrder, order, bucketCountThresholds, metadata);
+         this.format = format;
+         this.shardSize = shardSize;
+         this.showTermDocCountError = showTermDocCountError;
+         this.otherDocCount = otherDocCount;
+         this.docCountError = docCountError;
+         this.buckets = buckets;
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     protected InternalMappedTerms(StreamInput in, Bucket.Reader<B> bucketReader) throws IOException {
+         super(in);
+         docCountError = in.readZLong();
+         format = in.readNamedWriteable(DocValueFormat.class);
+         shardSize = readSize(in);
+         showTermDocCountError = in.readBoolean();
+         otherDocCount = in.readVLong();
+         buckets = in.readList(stream -> bucketReader.read(stream, format, showTermDocCountError));
+     }
+ 
+     @Override
+     protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException {
+         out.writeZLong(docCountError);
+         out.writeNamedWriteable(format);
+         writeSize(shardSize, out);
+         out.writeBoolean(showTermDocCountError);
+         out.writeVLong(otherDocCount);
+         out.writeList(buckets);
+     }
+ 
+     @Override
+     protected void setDocCountError(long docCountError) {
+         this.docCountError = docCountError;
+     }
+ 
+     @Override
+     protected int getShardSize() {
+         return shardSize;
+     }
+ 
+     @Override
+     public long getDocCountError() {
+         return docCountError;
+     }
+ 
+     @Override
+     public long getSumOfOtherDocCounts() {
+         return otherDocCount;
+     }
+ 
+     @Override
+     public List<B> getBuckets() {
+         return buckets;
+     }
+ 
+     @Override
+     public B getBucketByKey(String term) {
+         if (bucketMap == null) {
+             bucketMap = buckets.stream().collect(Collectors.toMap(Bucket::getKeyAsString, Function.identity()));
+         }
+         return bucketMap.get(term);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+ 
+         InternalMappedTerms<?, ?> that = (InternalMappedTerms<?, ?>) obj;
+         return Objects.equals(buckets, that.buckets)
+             && Objects.equals(format, that.format)
+             && Objects.equals(otherDocCount, that.otherDocCount)
+             && Objects.equals(showTermDocCountError, that.showTermDocCountError)
+             && Objects.equals(shardSize, that.shardSize)
+             && Objects.equals(docCountError, that.docCountError);
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), buckets, format, otherDocCount, showTermDocCountError, shardSize);
+     }
+ 
+     @Override
+     public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         return doXContentCommon(builder, params, docCountError, otherDocCount, buckets);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-c.html b/htmlReport/ns-1/sources/source-c.html new file mode 100644 index 0000000000000..8031a729465be --- /dev/null +++ b/htmlReport/ns-1/sources/source-c.html @@ -0,0 +1,592 @@ + + + + + + + + Coverage Report > InternalMultiTerms + + + + + + +
+ + +

Coverage Summary for Class: InternalMultiTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
InternalMultiTerms + + 0% + + + (0/19) + + + + 0% + + + (0/65) + +
InternalMultiTerms$Bucket + + 0% + + + (0/17) + + + + 0% + + + (0/54) + +
InternalMultiTerms$Bucket$BucketComparator + + 0% + + + (0/2) + + + + 0% + + + (0/10) + +
Total + + 0% + + + (0/38) + + + + 0% + + + (0/129) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.BytesRef;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.AggregationExecutionException;
+ import org.opensearch.search.aggregations.Aggregations;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ import org.opensearch.search.aggregations.KeyComparable;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Comparator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ import java.util.function.Function;
+ import java.util.stream.Collectors;
+ 
+ /**
+  * Result of the {@link MultiTermsAggregator}.
+  *
+  * @opensearch.internal
+  */
+ public class InternalMultiTerms extends InternalTerms<InternalMultiTerms, InternalMultiTerms.Bucket> {
+     /**
+      * Internal Multi Terms Bucket.
+      *
+      * @opensearch.internal
+      */
+     public static class Bucket extends InternalTerms.AbstractInternalBucket implements KeyComparable<Bucket> {
+ 
+         protected long bucketOrd;
+         /**
+          * list of terms values.
+          */
+         protected List<Object> termValues;
+         protected long docCount;
+         protected InternalAggregations aggregations;
+         protected boolean showDocCountError;
+         protected long docCountError;
+         /**
+          * A list of term's {@link DocValueFormat}.
+          */
+         protected final List<DocValueFormat> termFormats;
+ 
+         private static final String PIPE = "|";
+ 
+         /**
+          * Create default {@link Bucket}.
+          */
+         public static Bucket EMPTY(boolean showTermDocCountError, List<DocValueFormat> formats) {
+             return new Bucket(null, 0, null, showTermDocCountError, 0, formats);
+         }
+ 
+         public Bucket(
+             List<Object> values,
+             long docCount,
+             InternalAggregations aggregations,
+             boolean showDocCountError,
+             long docCountError,
+             List<DocValueFormat> formats
+         ) {
+             this.termValues = values;
+             this.docCount = docCount;
+             this.aggregations = aggregations;
+             this.showDocCountError = showDocCountError;
+             this.docCountError = docCountError;
+             this.termFormats = formats;
+         }
+ 
+         public Bucket(StreamInput in, List<DocValueFormat> formats, boolean showDocCountError) throws IOException {
+             this.termValues = in.readList(StreamInput::readGenericValue);
+             this.docCount = in.readVLong();
+             this.aggregations = InternalAggregations.readFrom(in);
+             this.showDocCountError = showDocCountError;
+             this.docCountError = -1;
+             if (showDocCountError) {
+                 this.docCountError = in.readLong();
+             }
+             this.termFormats = formats;
+         }
+ 
+         @Override
+         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject();
+             builder.field(CommonFields.KEY.getPreferredName(), getKey());
+             builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
+             builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+             if (showDocCountError) {
+                 builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
+             }
+             aggregations.toXContentInternal(builder, params);
+             builder.endObject();
+             return builder;
+         }
+ 
+         @Override
+         public void writeTo(StreamOutput out) throws IOException {
+             out.writeCollection(termValues, StreamOutput::writeGenericValue);
+             out.writeVLong(docCount);
+             aggregations.writeTo(out);
+             if (showDocCountError) {
+                 out.writeLong(docCountError);
+             }
+         }
+ 
+         @Override
+         public List<Object> getKey() {
+             List<Object> keys = new ArrayList<>(termValues.size());
+             for (int i = 0; i < termValues.size(); i++) {
+                 keys.add(formatObject(termValues.get(i), termFormats.get(i)));
+             }
+             return keys;
+         }
+ 
+         @Override
+         public String getKeyAsString() {
+             return getKey().stream().map(Object::toString).collect(Collectors.joining(PIPE));
+         }
+ 
+         @Override
+         public long getDocCount() {
+             return docCount;
+         }
+ 
+         @Override
+         public Aggregations getAggregations() {
+             return aggregations;
+         }
+ 
+         @Override
+         void setDocCountError(long docCountError) {
+             this.docCountError = docCountError;
+         }
+ 
+         @Override
+         public void setDocCountError(Function<Long, Long> updater) {
+             this.docCountError = updater.apply(this.docCountError);
+         }
+ 
+         @Override
+         public boolean showDocCountError() {
+             return showDocCountError;
+         }
+ 
+         @Override
+         public Number getKeyAsNumber() {
+             throw new IllegalArgumentException("getKeyAsNumber is not supported by [" + MultiTermsAggregationBuilder.NAME + "]");
+         }
+ 
+         @Override
+         public long getDocCountError() {
+             if (!showDocCountError) {
+                 throw new IllegalStateException("show_terms_doc_count_error is false");
+             }
+             return docCountError;
+         }
+ 
+         @Override
+         public boolean equals(Object obj) {
+             if (obj == null || getClass() != obj.getClass()) {
+                 return false;
+             }
+             Bucket other = (Bucket) obj;
+             if (showDocCountError && docCountError != other.docCountError) {
+                 return false;
+             }
+             return termValues.equals(other.termValues)
+                 && docCount == other.docCount
+                 && aggregations.equals(other.aggregations)
+                 && showDocCountError == other.showDocCountError;
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(termValues, docCount, aggregations, showDocCountError, showDocCountError ? docCountError : 0);
+         }
+ 
+         @Override
+         public int compareKey(Bucket other) {
+             return new BucketComparator().compare(this.termValues, other.termValues);
+         }
+ 
+         /**
+          * Visible for testing.
+          *
+          * @opensearch.internal
+          */
+         protected static class BucketComparator implements Comparator<List<Object>> {
+             @SuppressWarnings({ "unchecked" })
+             @Override
+             public int compare(List<Object> thisObjects, List<Object> thatObjects) {
+                 if (thisObjects.size() != thatObjects.size()) {
+                     throw new AggregationExecutionException(
+                         "[" + MultiTermsAggregationBuilder.NAME + "] aggregations failed due to terms" + " size is different"
+                     );
+                 }
+                 for (int i = 0; i < thisObjects.size(); i++) {
+                     final Object thisObject = thisObjects.get(i);
+                     final Object thatObject = thatObjects.get(i);
+                     int ret = ((Comparable) thisObject).compareTo(thatObject);
+                     if (ret != 0) {
+                         return ret;
+                     }
+                 }
+                 return 0;
+             }
+         }
+     }
+ 
+     private final int shardSize;
+     private final boolean showTermDocCountError;
+     private final long otherDocCount;
+     private final List<DocValueFormat> termFormats;
+     private final List<Bucket> buckets;
+     private Map<String, Bucket> bucketMap;
+ 
+     private long docCountError;
+ 
+     public InternalMultiTerms(
+         String name,
+         BucketOrder reduceOrder,
+         BucketOrder order,
+         Map<String, Object> metadata,
+         int shardSize,
+         boolean showTermDocCountError,
+         long otherDocCount,
+         long docCountError,
+         List<DocValueFormat> formats,
+         List<Bucket> buckets,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds
+     ) {
+         super(name, reduceOrder, order, bucketCountThresholds, metadata);
+         this.shardSize = shardSize;
+         this.showTermDocCountError = showTermDocCountError;
+         this.otherDocCount = otherDocCount;
+         this.termFormats = formats;
+         this.buckets = buckets;
+         this.docCountError = docCountError;
+     }
+ 
+     public InternalMultiTerms(StreamInput in) throws IOException {
+         super(in);
+         this.docCountError = in.readZLong();
+         this.termFormats = in.readList(stream -> stream.readNamedWriteable(DocValueFormat.class));
+         this.shardSize = readSize(in);
+         this.showTermDocCountError = in.readBoolean();
+         this.otherDocCount = in.readVLong();
+         this.buckets = in.readList(steam -> new Bucket(steam, termFormats, showTermDocCountError));
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return MultiTermsAggregationBuilder.NAME;
+     }
+ 
+     @Override
+     public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
+         return doXContentCommon(builder, params, docCountError, otherDocCount, buckets);
+     }
+ 
+     @Override
+     public InternalMultiTerms create(List<Bucket> buckets) {
+         return new InternalMultiTerms(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             docCountError,
+             termFormats,
+             buckets,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) {
+         return new Bucket(
+             prototype.termValues,
+             prototype.docCount,
+             aggregations,
+             prototype.showDocCountError,
+             prototype.docCountError,
+             prototype.termFormats
+         );
+     }
+ 
+     @Override
+     protected void writeTermTypeInfoTo(StreamOutput out) throws IOException {
+         out.writeZLong(docCountError);
+         out.writeCollection(termFormats, StreamOutput::writeNamedWriteable);
+         writeSize(shardSize, out);
+         out.writeBoolean(showTermDocCountError);
+         out.writeVLong(otherDocCount);
+         out.writeList(buckets);
+     }
+ 
+     @Override
+     public List<Bucket> getBuckets() {
+         return buckets;
+     }
+ 
+     @Override
+     public Bucket getBucketByKey(String term) {
+         if (bucketMap == null) {
+             bucketMap = buckets.stream().collect(Collectors.toMap(InternalMultiTerms.Bucket::getKeyAsString, Function.identity()));
+         }
+         return bucketMap.get(term);
+     }
+ 
+     @Override
+     public long getDocCountError() {
+         return docCountError;
+     }
+ 
+     @Override
+     public long getSumOfOtherDocCounts() {
+         return otherDocCount;
+     }
+ 
+     @Override
+     protected void setDocCountError(long docCountError) {
+         this.docCountError = docCountError;
+     }
+ 
+     @Override
+     protected int getShardSize() {
+         return shardSize;
+     }
+ 
+     @Override
+     protected InternalMultiTerms create(
+         String name,
+         List<Bucket> buckets,
+         BucketOrder reduceOrder,
+         long docCountError,
+         long otherDocCount
+     ) {
+         return new InternalMultiTerms(
+             name,
+             reduceOrder,
+             order,
+             metadata,
+             shardSize,
+             showTermDocCountError,
+             otherDocCount,
+             docCountError,
+             termFormats,
+             buckets,
+             bucketCountThresholds
+         );
+     }
+ 
+     @Override
+     protected Bucket[] createBucketsArray(int size) {
+         return new Bucket[size];
+     }
+ 
+     @Override
+     Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, Bucket prototype) {
+         return new Bucket(
+             prototype.termValues,
+             docCount,
+             aggs,
+             prototype.showDocCountError,
+             prototype.docCountError,
+             prototype.termFormats
+         );
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         InternalMultiTerms that = (InternalMultiTerms) obj;
+ 
+         if (showTermDocCountError && docCountError != that.docCountError) {
+             return false;
+         }
+         return Objects.equals(buckets, that.buckets)
+             && Objects.equals(otherDocCount, that.otherDocCount)
+             && Objects.equals(showTermDocCountError, that.showTermDocCountError)
+             && Objects.equals(shardSize, that.shardSize)
+             && Objects.equals(docCountError, that.docCountError);
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), buckets, otherDocCount, showTermDocCountError, shardSize);
+     }
+ 
+     /**
+      * Copy from InternalComposite
+      * <p>
+      * Format {@code obj} using the provided {@link DocValueFormat}.
+      * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is
+      * for numbers and a string for {@link BytesRef}s.
+      */
+     static Object formatObject(Object obj, DocValueFormat format) {
+         if (obj == null) {
+             return null;
+         }
+         if (obj.getClass() == BytesRef.class) {
+             BytesRef value = (BytesRef) obj;
+             if (format == DocValueFormat.RAW) {
+                 return value.utf8ToString();
+             } else {
+                 return format.format(value);
+             }
+         } else if (obj.getClass() == Long.class) {
+             long value = (long) obj;
+             if (format == DocValueFormat.RAW) {
+                 return value;
+             } else {
+                 return format.format(value);
+             }
+         } else if (obj.getClass() == Double.class) {
+             double value = (double) obj;
+             if (format == DocValueFormat.RAW) {
+                 return value;
+             } else {
+                 return format.format(value);
+             }
+         }
+         return obj;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-d.html b/htmlReport/ns-1/sources/source-d.html new file mode 100644 index 0000000000000..a4d927b5f8ac0 --- /dev/null +++ b/htmlReport/ns-1/sources/source-d.html @@ -0,0 +1,365 @@ + + + + + + + + Coverage Report > InternalRareTerms + + + + + + +
+ + +

Coverage Summary for Class: InternalRareTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
InternalRareTerms + + 0% + + + (0/9) + + + + 0% + + + (0/32) + +
InternalRareTerms$Bucket + + 0% + + + (0/8) + + + + 0% + + + (0/24) + +
InternalRareTerms$Bucket$Reader
Total + + 0% + + + (0/17) + + + + 0% + + + (0/56) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.common.util.SetBackedScalingCuckooFilter;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregations;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ import org.opensearch.search.aggregations.InternalMultiBucketAggregation;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.KeyComparable;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Implementation of rare terms
+  *
+  * @opensearch.internal
+  */
+ public abstract class InternalRareTerms<A extends InternalRareTerms<A, B>, B extends InternalRareTerms.Bucket<B>> extends
+     InternalMultiBucketAggregation<A, B>
+     implements
+         RareTerms {
+ 
+     /**
+      * Bucket for a rare terms agg
+      *
+      * @opensearch.internal
+      */
+     public abstract static class Bucket<B extends Bucket<B>> extends InternalMultiBucketAggregation.InternalBucket
+         implements
+             RareTerms.Bucket,
+             KeyComparable<B> {
+         /**
+          * Reads a bucket. Should be a constructor reference.
+          *
+          * @opensearch.internal
+          */
+         @FunctionalInterface
+         public interface Reader<B extends Bucket<B>> {
+             B read(StreamInput in, DocValueFormat format) throws IOException;
+         }
+ 
+         long bucketOrd;
+ 
+         protected long docCount;
+         protected InternalAggregations aggregations;
+         protected final DocValueFormat format;
+ 
+         protected Bucket(long docCount, InternalAggregations aggregations, DocValueFormat formatter) {
+             this.format = formatter;
+             this.docCount = docCount;
+             this.aggregations = aggregations;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         protected Bucket(StreamInput in, DocValueFormat formatter) throws IOException {
+             this.format = formatter;
+             docCount = in.readVLong();
+             aggregations = InternalAggregations.readFrom(in);
+         }
+ 
+         @Override
+         public final void writeTo(StreamOutput out) throws IOException {
+             out.writeVLong(getDocCount());
+             aggregations.writeTo(out);
+             writeTermTo(out);
+         }
+ 
+         protected abstract void writeTermTo(StreamOutput out) throws IOException;
+ 
+         @Override
+         public long getDocCount() {
+             return docCount;
+         }
+ 
+         @Override
+         public Aggregations getAggregations() {
+             return aggregations;
+         }
+ 
+         @Override
+         public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject();
+             keyToXContent(builder);
+             builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+             aggregations.toXContentInternal(builder, params);
+             builder.endObject();
+             return builder;
+         }
+ 
+         protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException;
+ 
+         @Override
+         public boolean equals(Object obj) {
+             if (obj == null || getClass() != obj.getClass()) {
+                 return false;
+             }
+             Bucket<?> that = (Bucket<?>) obj;
+             return Objects.equals(docCount, that.docCount) && Objects.equals(aggregations, that.aggregations);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(getClass(), docCount, aggregations);
+         }
+     }
+ 
+     protected final BucketOrder order;
+     protected final long maxDocCount;
+ 
+     protected InternalRareTerms(String name, BucketOrder order, long maxDocCount, Map<String, Object> metadata) {
+         super(name, metadata);
+         this.order = order;
+         this.maxDocCount = maxDocCount;
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     protected InternalRareTerms(StreamInput in) throws IOException {
+         super(in);
+         order = InternalOrder.Streams.readOrder(in);
+         maxDocCount = in.readVLong();
+     }
+ 
+     @Override
+     protected final void doWriteTo(StreamOutput out) throws IOException {
+         order.writeTo(out);
+         out.writeVLong(maxDocCount);
+         writeTermTypeInfoTo(out);
+     }
+ 
+     protected abstract void writeTermTypeInfoTo(StreamOutput out) throws IOException;
+ 
+     @Override
+     public abstract List<B> getBuckets();
+ 
+     @Override
+     public abstract B getBucketByKey(String term);
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         throw new UnsupportedOperationException();
+     }
+ 
+     abstract B createBucket(long docCount, InternalAggregations aggs, B prototype);
+ 
+     @Override
+     protected B reduceBucket(List<B> buckets, ReduceContext context) {
+         assert buckets.size() > 0;
+         long docCount = 0;
+         List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size());
+         for (B bucket : buckets) {
+             docCount += bucket.docCount;
+             aggregationsList.add(bucket.aggregations);
+         }
+         InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+         return createBucket(docCount, aggs, buckets.get(0));
+     }
+ 
+     protected abstract A createWithFilter(String name, List<B> buckets, SetBackedScalingCuckooFilter filter);
+ 
+     /**
+      * Create an array to hold some buckets. Used in collecting the results.
+      */
+     protected abstract B[] createBucketsArray(int size);
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+         InternalRareTerms<?, ?> that = (InternalRareTerms<?, ?>) obj;
+         return Objects.equals(maxDocCount, that.maxDocCount) && Objects.equals(order, that.order);
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), maxDocCount, order);
+     }
+ 
+     protected static XContentBuilder doXContentCommon(XContentBuilder builder, Params params, List<? extends Bucket> buckets)
+         throws IOException {
+         builder.startArray(CommonFields.BUCKETS.getPreferredName());
+         for (Bucket bucket : buckets) {
+             bucket.toXContent(builder, params);
+         }
+         builder.endArray();
+         return builder;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-e.html b/htmlReport/ns-1/sources/source-e.html new file mode 100644 index 0000000000000..be20892d3afa4 --- /dev/null +++ b/htmlReport/ns-1/sources/source-e.html @@ -0,0 +1,501 @@ + + + + + + + + Coverage Report > InternalSignificantTerms + + + + + + +
+ + +

Coverage Summary for Class: InternalSignificantTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
InternalSignificantTerms + + 0% + + + (0/8) + + + + 0% + + + (0/79) + +
InternalSignificantTerms$Bucket + + 0% + + + (0/13) + + + + 0% + + + (0/37) + +
InternalSignificantTerms$Bucket$Reader
Total + + 0% + + + (0/21) + + + + 0% + + + (0/116) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.Aggregations;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ import org.opensearch.search.aggregations.InternalMultiBucketAggregation;
+ import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ /**
+  * Result of the significant terms aggregation.
+  *
+  * @opensearch.internal
+  */
+ public abstract class InternalSignificantTerms<A extends InternalSignificantTerms<A, B>, B extends InternalSignificantTerms.Bucket<B>>
+     extends InternalMultiBucketAggregation<A, B>
+     implements
+         SignificantTerms {
+ 
+     public static final String SCORE = "score";
+     public static final String BG_COUNT = "bg_count";
+ 
+     /**
+      * Bucket for a significant terms agg
+      *
+      * @opensearch.internal
+      */
+     @SuppressWarnings("PMD.ConstructorCallsOverridableMethod")
+     public abstract static class Bucket<B extends Bucket<B>> extends InternalMultiBucketAggregation.InternalBucket
+         implements
+             SignificantTerms.Bucket {
+         /**
+          * Reads a bucket. Should be a constructor reference.
+          *
+          * @opensearch.internal
+          */
+         @FunctionalInterface
+         public interface Reader<B extends Bucket<B>> {
+             B read(StreamInput in, long subsetSize, long supersetSize, DocValueFormat format) throws IOException;
+         }
+ 
+         long subsetDf;
+         long subsetSize;
+         long supersetDf;
+         long supersetSize;
+         long bucketOrd;
+         double score;
+         protected InternalAggregations aggregations;
+         final transient DocValueFormat format;
+ 
+         protected Bucket(
+             long subsetDf,
+             long subsetSize,
+             long supersetDf,
+             long supersetSize,
+             InternalAggregations aggregations,
+             DocValueFormat format
+         ) {
+             this.subsetSize = subsetSize;
+             this.supersetSize = supersetSize;
+             this.subsetDf = subsetDf;
+             this.supersetDf = supersetDf;
+             this.aggregations = aggregations;
+             this.format = format;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         protected Bucket(StreamInput in, long subsetSize, long supersetSize, DocValueFormat format) {
+             this.subsetSize = subsetSize;
+             this.supersetSize = supersetSize;
+             this.format = format;
+         }
+ 
+         @Override
+         public long getSubsetDf() {
+             return subsetDf;
+         }
+ 
+         @Override
+         public long getSupersetDf() {
+             return supersetDf;
+         }
+ 
+         @Override
+         public long getSupersetSize() {
+             return supersetSize;
+         }
+ 
+         @Override
+         public long getSubsetSize() {
+             return subsetSize;
+         }
+ 
+         // TODO we should refactor to remove this, since buckets should be immutable after they are generated.
+         // This can lead to confusing bugs if the bucket is re-created (via createBucket() or similar) without
+         // the score
+         void updateScore(SignificanceHeuristic significanceHeuristic) {
+             score = significanceHeuristic.getScore(subsetDf, subsetSize, supersetDf, supersetSize);
+         }
+ 
+         @Override
+         public long getDocCount() {
+             return subsetDf;
+         }
+ 
+         @Override
+         public Aggregations getAggregations() {
+             return aggregations;
+         }
+ 
+         @Override
+         public double getSignificanceScore() {
+             return score;
+         }
+ 
+         @Override
+         public boolean equals(Object o) {
+             if (this == o) {
+                 return true;
+             }
+             if (o == null || getClass() != o.getClass()) {
+                 return false;
+             }
+ 
+             Bucket<?> that = (Bucket<?>) o;
+             return bucketOrd == that.bucketOrd
+                 && Double.compare(that.score, score) == 0
+                 && Objects.equals(aggregations, that.aggregations)
+                 && Objects.equals(format, that.format);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(getClass(), bucketOrd, aggregations, score, format);
+         }
+ 
+         @Override
+         public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject();
+             keyToXContent(builder);
+             builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+             builder.field(SCORE, score);
+             builder.field(BG_COUNT, supersetDf);
+             aggregations.toXContentInternal(builder, params);
+             builder.endObject();
+             return builder;
+         }
+ 
+         protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException;
+     }
+ 
+     protected final int requiredSize;
+     protected final long minDocCount;
+     protected final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+ 
+     protected InternalSignificantTerms(
+         String name,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         Map<String, Object> metadata
+     ) {
+         super(name, metadata);
+         this.requiredSize = bucketCountThresholds.getRequiredSize();
+         this.minDocCount = bucketCountThresholds.getMinDocCount();
+         this.bucketCountThresholds = bucketCountThresholds;
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     protected InternalSignificantTerms(StreamInput in) throws IOException {
+         super(in);
+         requiredSize = readSize(in);
+         minDocCount = in.readVLong();
+         // shardMinDocCount and shardSize are not used on the coordinator, so they are not deserialized. We use
+         // CoordinatorBucketCountThresholds which will throw an exception if they are accessed.
+         bucketCountThresholds = new TermsAggregator.CoordinatorBucketCountThresholds(minDocCount, -1, requiredSize, -1);
+     }
+ 
+     protected final void doWriteTo(StreamOutput out) throws IOException {
+         writeSize(requiredSize, out);
+         out.writeVLong(minDocCount);
+         writeTermTypeInfoTo(out);
+     }
+ 
+     protected abstract void writeTermTypeInfoTo(StreamOutput out) throws IOException;
+ 
+     @Override
+     public abstract List<B> getBuckets();
+ 
+     @Override
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         LocalBucketCountThresholds localBucketCountThresholds = reduceContext.asLocalBucketCountThresholds(bucketCountThresholds);
+         long globalSubsetSize = 0;
+         long globalSupersetSize = 0;
+         // Compute the overall result set size and the corpus size using the
+         // top-level Aggregations from each shard
+         for (InternalAggregation aggregation : aggregations) {
+             @SuppressWarnings("unchecked")
+             InternalSignificantTerms<A, B> terms = (InternalSignificantTerms<A, B>) aggregation;
+             globalSubsetSize += terms.getSubsetSize();
+             // supersetSize is a shard level count, if we sum it across slices we would produce num_slices_with_bucket * supersetSize where
+             // num_slices_with_bucket is the number of segment slices that have collected a bucket for the key
+             if (reduceContext.isSliceLevel()) {
+                 globalSupersetSize = terms.getSupersetSize();
+             } else {
+                 globalSupersetSize += terms.getSupersetSize();
+             }
+         }
+         Map<String, List<B>> buckets = new HashMap<>();
+         for (InternalAggregation aggregation : aggregations) {
+             @SuppressWarnings("unchecked")
+             InternalSignificantTerms<A, B> terms = (InternalSignificantTerms<A, B>) aggregation;
+             for (B bucket : terms.getBuckets()) {
+                 List<B> existingBuckets = buckets.get(bucket.getKeyAsString());
+                 if (existingBuckets == null) {
+                     existingBuckets = new ArrayList<>(aggregations.size());
+                     buckets.put(bucket.getKeyAsString(), existingBuckets);
+                 }
+                 // Adjust the buckets with the global stats representing the
+                 // total size of the pots from which the stats are drawn
+                 existingBuckets.add(
+                     createBucket(
+                         bucket.getSubsetDf(),
+                         globalSubsetSize,
+                         bucket.getSupersetDf(),
+                         globalSupersetSize,
+                         bucket.aggregations,
+                         bucket
+                     )
+                 );
+             }
+         }
+         SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext);
+         boolean isCoordinatorPartialReduce = reduceContext.isFinalReduce() == false && reduceContext.isSliceLevel() == false;
+         // Do not apply size threshold on coordinator partial reduce
+         final int size = !isCoordinatorPartialReduce
+             ? Math.min(localBucketCountThresholds.getRequiredSize(), buckets.size())
+             : buckets.size();
+         BucketSignificancePriorityQueue<B> ordered = new BucketSignificancePriorityQueue<>(size);
+         for (Map.Entry<String, List<B>> entry : buckets.entrySet()) {
+             List<B> sameTermBuckets = entry.getValue();
+             final B b = reduceBucket(sameTermBuckets, reduceContext);
+             b.updateScore(heuristic);
+             // For concurrent search case we do not apply bucket count thresholds in buildAggregation and instead is done here during
+             // reduce. However, the bucket score is only evaluated at the final coordinator reduce.
+             boolean meetsThresholds = (b.subsetDf >= localBucketCountThresholds.getMinDocCount())
+                 && (((b.score > 0) || reduceContext.isSliceLevel()));
+             if (isCoordinatorPartialReduce || meetsThresholds) {
+                 B removed = ordered.insertWithOverflow(b);
+                 if (removed == null) {
+                     reduceContext.consumeBucketsAndMaybeBreak(1);
+                 } else {
+                     reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed));
+                 }
+             } else {
+                 reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(b));
+             }
+         }
+         B[] list = createBucketsArray(ordered.size());
+         for (int i = ordered.size() - 1; i >= 0; i--) {
+             list[i] = ordered.pop();
+         }
+         return create(globalSubsetSize, globalSupersetSize, Arrays.asList(list));
+     }
+ 
+     @Override
+     protected B reduceBucket(List<B> buckets, ReduceContext context) {
+         assert buckets.size() > 0;
+         long subsetDf = 0;
+         long supersetDf = 0;
+         List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size());
+         for (B bucket : buckets) {
+             subsetDf += bucket.subsetDf;
+             // supersetDf is a shard level count, if we sum it across slices we would produce num_slices_with_bucket * supersetSize where
+             // num_slices_with_bucket is the number of segment slices that have collected a bucket for the key
+             if (context.isSliceLevel()) {
+                 supersetDf = bucket.supersetDf;
+             } else {
+                 supersetDf += bucket.supersetDf;
+             }
+             aggregationsList.add(bucket.aggregations);
+         }
+         InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+         return createBucket(subsetDf, buckets.get(0).subsetSize, supersetDf, buckets.get(0).supersetSize, aggs, buckets.get(0));
+     }
+ 
+     abstract B createBucket(
+         long subsetDf,
+         long subsetSize,
+         long supersetDf,
+         long supersetSize,
+         InternalAggregations aggregations,
+         B prototype
+     );
+ 
+     protected abstract A create(long subsetSize, long supersetSize, List<B> buckets);
+ 
+     /**
+      * Create an array to hold some buckets. Used in collecting the results.
+      */
+     protected abstract B[] createBucketsArray(int size);
+ 
+     protected abstract long getSubsetSize();
+ 
+     protected abstract long getSupersetSize();
+ 
+     protected abstract SignificanceHeuristic getSignificanceHeuristic();
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), minDocCount, requiredSize);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+ 
+         InternalSignificantTerms<?, ?> that = (InternalSignificantTerms<?, ?>) obj;
+         return Objects.equals(minDocCount, that.minDocCount) && Objects.equals(requiredSize, that.requiredSize);
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-1/sources/source-f.html b/htmlReport/ns-1/sources/source-f.html new file mode 100644 index 0000000000000..6fa828061b1d8 --- /dev/null +++ b/htmlReport/ns-1/sources/source-f.html @@ -0,0 +1,765 @@ + + + + + + + + Coverage Report > InternalTerms + + + + + + +
+ + +

Coverage Summary for Class: InternalTerms (org.opensearch.search.aggregations.bucket.terms)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
InternalTerms + + 53.8% + + + (7/13) + + + + 58.6% + + + (112/191) + +
InternalTerms$1 + + 50% + + + (1/2) + + + + 50% + + + (1/2) + +
InternalTerms$AbstractInternalBucket + + 100% + + + (1/1) + + + + 100% + + + (1/1) + +
InternalTerms$Bucket + + 41.7% + + + (5/12) + + + + 23.8% + + + (10/42) + +
InternalTerms$Bucket$Reader
Total + + 50% + + + (14/28) + + + + 52.5% + + + (124/236) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms;
+ 
+ import org.apache.lucene.util.PriorityQueue;
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.search.DocValueFormat;
+ import org.opensearch.search.aggregations.AggregationExecutionException;
+ import org.opensearch.search.aggregations.Aggregations;
+ import org.opensearch.search.aggregations.BucketOrder;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.InternalAggregations;
+ import org.opensearch.search.aggregations.InternalMultiBucketAggregation;
+ import org.opensearch.search.aggregations.InternalOrder;
+ import org.opensearch.search.aggregations.KeyComparable;
+ import org.opensearch.search.aggregations.bucket.IteratorAndCurrent;
+ import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds;
+ import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Objects;
+ import java.util.function.Function;
+ 
+ import static org.opensearch.search.aggregations.InternalOrder.isKeyAsc;
+ import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder;
+ 
+ /**
+  * Implementation of terms
+  *
+  * @opensearch.internal
+  */
+ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends InternalTerms.AbstractInternalBucket> extends
+     InternalMultiBucketAggregation<A, B>
+     implements
+         Terms {
+ 
+     protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound");
+     protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count");
+ 
+     /**
+      * Base internal multi bucket
+      *
+      * @opensearch.internal
+      */
+     public abstract static class AbstractInternalBucket extends InternalMultiBucketAggregation.InternalBucket implements Terms.Bucket {
+         abstract void setDocCountError(long docCountError);
+ 
+         abstract void setDocCountError(Function<Long, Long> updater);
+ 
+         abstract boolean showDocCountError();
+     }
+ 
+     /**
+      * Base bucket class
+      *
+      * @opensearch.internal
+      */
+     public abstract static class Bucket<B extends Bucket<B>> extends AbstractInternalBucket implements KeyComparable<B> {
+         /**
+          * Reads a bucket. Should be a constructor reference.
+          *
+          * @opensearch.internal
+          */
+         @FunctionalInterface
+         public interface Reader<B extends Bucket<B>> {
+             B read(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException;
+         }
+ 
+         long bucketOrd;
+ 
+         protected long docCount;
+         protected long docCountError;
+         protected InternalAggregations aggregations;
+         protected final boolean showDocCountError;
+         protected final DocValueFormat format;
+ 
+         protected Bucket(
+             long docCount,
+             InternalAggregations aggregations,
+             boolean showDocCountError,
+             long docCountError,
+             DocValueFormat formatter
+         ) {
+             this.showDocCountError = showDocCountError;
+             this.format = formatter;
+             this.docCount = docCount;
+             this.aggregations = aggregations;
+             this.docCountError = docCountError;
+         }
+ 
+         /**
+          * Read from a stream.
+          */
+         protected Bucket(StreamInput in, DocValueFormat formatter, boolean showDocCountError) throws IOException {
+             this.showDocCountError = showDocCountError;
+             this.format = formatter;
+             docCount = in.readVLong();
+             docCountError = -1;
+             if (showDocCountError) {
+                 docCountError = in.readLong();
+             }
+             aggregations = InternalAggregations.readFrom(in);
+         }
+ 
+         @Override
+         public final void writeTo(StreamOutput out) throws IOException {
+             out.writeVLong(getDocCount());
+             if (showDocCountError) {
+                 out.writeLong(docCountError);
+             }
+             aggregations.writeTo(out);
+             writeTermTo(out);
+         }
+ 
+         protected abstract void writeTermTo(StreamOutput out) throws IOException;
+ 
+         @Override
+         public long getDocCount() {
+             return docCount;
+         }
+ 
+         @Override
+         public long getDocCountError() {
+             if (!showDocCountError) {
+                 throw new IllegalStateException("show_terms_doc_count_error is false");
+             }
+             return docCountError;
+         }
+ 
+         @Override
+         public void setDocCountError(long docCountError) {
+             this.docCountError = docCountError;
+         }
+ 
+         @Override
+         public void setDocCountError(Function<Long, Long> updater) {
+             this.docCountError = updater.apply(this.docCountError);
+         }
+ 
+         @Override
+         public boolean showDocCountError() {
+             return showDocCountError;
+         }
+ 
+         @Override
+         public Aggregations getAggregations() {
+             return aggregations;
+         }
+ 
+         @Override
+         public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject();
+             keyToXContent(builder);
+             builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
+             if (showDocCountError) {
+                 builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
+             }
+             aggregations.toXContentInternal(builder, params);
+             builder.endObject();
+             return builder;
+         }
+ 
+         protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException;
+ 
+         @Override
+         public boolean equals(Object obj) {
+             if (obj == null || getClass() != obj.getClass()) {
+                 return false;
+             }
+             Bucket<?> that = (Bucket<?>) obj;
+             // No need to take format and showDocCountError, they are attributes
+             // of the parent terms aggregation object that are only copied here
+             // for serialization purposes
+             return Objects.equals(docCount, that.docCount)
+                 && Objects.equals(docCountError, that.docCountError)
+                 && Objects.equals(aggregations, that.aggregations);
+         }
+ 
+         @Override
+         public int hashCode() {
+             return Objects.hash(getClass(), docCount, docCountError, aggregations);
+         }
+     }
+ 
+     protected final BucketOrder reduceOrder;
+     protected final BucketOrder order;
+     protected final int requiredSize;
+     protected final long minDocCount;
+     protected final TermsAggregator.BucketCountThresholds bucketCountThresholds;
+     private boolean hasSliceLevelDocCountError = false;
+ 
+     /**
+      * Creates a new {@link InternalTerms}
+      * @param name The name of the aggregation
+      * @param reduceOrder The {@link BucketOrder} that should be used to merge shard results.
+      * @param order The {@link BucketOrder} that should be used to sort the final reduce.
+      * @param bucketCountThresholds Object containing values for minDocCount, shardMinDocCount, size, shardSize.
+      * @param metadata The metadata associated with the aggregation.
+      */
+     protected InternalTerms(
+         String name,
+         BucketOrder reduceOrder,
+         BucketOrder order,
+         TermsAggregator.BucketCountThresholds bucketCountThresholds,
+         Map<String, Object> metadata
+     ) {
+         super(name, metadata);
+         this.reduceOrder = reduceOrder;
+         this.order = order;
+         this.bucketCountThresholds = bucketCountThresholds;
+         this.requiredSize = bucketCountThresholds.getRequiredSize();
+         this.minDocCount = bucketCountThresholds.getMinDocCount();
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     protected InternalTerms(StreamInput in) throws IOException {
+         super(in);
+         reduceOrder = InternalOrder.Streams.readOrder(in);
+         order = InternalOrder.Streams.readOrder(in);
+         requiredSize = readSize(in);
+         minDocCount = in.readVLong();
+         // shardMinDocCount and shardSize are not used on the coordinator, so they are not deserialized. We use
+         // CoordinatorBucketCountThresholds which will throw an exception if they are accessed.
+         bucketCountThresholds = new TermsAggregator.CoordinatorBucketCountThresholds(minDocCount, -1, requiredSize, getShardSize());
+     }
+ 
+     @Override
+     protected final void doWriteTo(StreamOutput out) throws IOException {
+         reduceOrder.writeTo(out);
+         order.writeTo(out);
+         writeSize(requiredSize, out);
+         out.writeVLong(minDocCount);
+         writeTermTypeInfoTo(out);
+     }
+ 
+     protected abstract void writeTermTypeInfoTo(StreamOutput out) throws IOException;
+ 
+     @Override
+     public abstract List<B> getBuckets();
+ 
+     @Override
+     public abstract B getBucketByKey(String term);
+ 
+     private BucketOrder getReduceOrder(List<InternalAggregation> aggregations) {
+         BucketOrder thisReduceOrder = null;
+         for (InternalAggregation aggregation : aggregations) {
+             @SuppressWarnings("unchecked")
+             InternalTerms<A, B> terms = (InternalTerms<A, B>) aggregation;
+             if (terms.getBuckets().size() == 0) {
+                 continue;
+             }
+             if (thisReduceOrder == null) {
+                 thisReduceOrder = terms.reduceOrder;
+             } else if (thisReduceOrder.equals(terms.reduceOrder) == false) {
+                 return order;
+             }
+         }
+         return thisReduceOrder != null ? thisReduceOrder : order;
+     }
+ 
+     private long getDocCountError(InternalTerms<?, ?> terms, ReduceContext reduceContext) {
+         int size = terms.getBuckets().size();
+         if (size == 0 || size < terms.getShardSize() || isKeyOrder(terms.order)) {
+             return 0;
+         } else if (InternalOrder.isCountDesc(terms.order)) {
+             if (terms.getDocCountError() > 0) {
+                 // If there is an existing docCountError for this agg then
+                 // use this as the error for this aggregation
+                 return terms.getDocCountError();
+             } else {
+                 // otherwise use the doc count of the last term in the
+                 // aggregation
+                 return terms.getBuckets().stream().mapToLong(MultiBucketsAggregation.Bucket::getDocCount).min().getAsLong();
+             }
+         } else {
+             return -1;
+         }
+     }
+ 
+     private List<B> reduceMergeSort(List<InternalAggregation> aggregations, BucketOrder thisReduceOrder, ReduceContext reduceContext) {
+         assert isKeyOrder(thisReduceOrder);
+         final Comparator<MultiBucketsAggregation.Bucket> cmp = thisReduceOrder.comparator();
+         final PriorityQueue<IteratorAndCurrent<B>> pq = new PriorityQueue<IteratorAndCurrent<B>>(aggregations.size()) {
+             @Override
+             protected boolean lessThan(IteratorAndCurrent<B> a, IteratorAndCurrent<B> b) {
+                 return cmp.compare(a.current(), b.current()) < 0;
+             }
+         };
+         for (InternalAggregation aggregation : aggregations) {
+             @SuppressWarnings("unchecked")
+             InternalTerms<A, B> terms = (InternalTerms<A, B>) aggregation;
+             if (terms.getBuckets().isEmpty() == false) {
+                 assert reduceOrder.equals(reduceOrder);
+                 pq.add(new IteratorAndCurrent(terms.getBuckets().iterator()));
+             }
+         }
+         List<B> reducedBuckets = new ArrayList<>();
+         // list of buckets coming from different shards that have the same key
+         List<B> currentBuckets = new ArrayList<>();
+         B lastBucket = null;
+         while (pq.size() > 0) {
+             final IteratorAndCurrent<B> top = pq.top();
+             assert lastBucket == null || cmp.compare(top.current(), lastBucket) >= 0;
+             if (lastBucket != null && cmp.compare(top.current(), lastBucket) != 0) {
+                 // the key changes, reduce what we already buffered and reset the buffer for current buckets
+                 final B reduced = reduceBucket(currentBuckets, reduceContext);
+                 reducedBuckets.add(reduced);
+                 currentBuckets.clear();
+             }
+             lastBucket = top.current();
+             currentBuckets.add(top.current());
+             if (top.hasNext()) {
+                 top.next();
+                 assert cmp.compare(top.current(), lastBucket) > 0 : "shards must return data sorted by key";
+                 pq.updateTop();
+             } else {
+                 pq.pop();
+             }
+         }
+ 
+         if (currentBuckets.isEmpty() == false) {
+             final B reduced = reduceBucket(currentBuckets, reduceContext);
+             reducedBuckets.add(reduced);
+         }
+         return reducedBuckets;
+     }
+ 
+     private List<B> reduceLegacy(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         Map<Object, List<B>> bucketMap = new HashMap<>();
+         for (InternalAggregation aggregation : aggregations) {
+             @SuppressWarnings("unchecked")
+             InternalTerms<A, B> terms = (InternalTerms<A, B>) aggregation;
+             if (terms.getBuckets().isEmpty() == false) {
+                 for (B bucket : terms.getBuckets()) {
+                     List<B> bucketList = bucketMap.get(bucket.getKey());
+                     if (bucketList == null) {
+                         bucketList = new ArrayList<>();
+                         bucketMap.put(bucket.getKey(), bucketList);
+                     }
+                     bucketList.add(bucket);
+                 }
+             }
+         }
+         List<B> reducedBuckets = new ArrayList<>();
+         for (List<B> sameTermBuckets : bucketMap.values()) {
+             final B b = reduceBucket(sameTermBuckets, reduceContext);
+             reducedBuckets.add(b);
+         }
+         return reducedBuckets;
+     }
+ 
+     public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
+         LocalBucketCountThresholds localBucketCountThresholds = reduceContext.asLocalBucketCountThresholds(bucketCountThresholds);
+         long sumDocCountError = 0;
+         long otherDocCount = 0;
+         InternalTerms<A, B> referenceTerms = null;
+         for (InternalAggregation aggregation : aggregations) {
+             @SuppressWarnings("unchecked")
+             InternalTerms<A, B> terms = (InternalTerms<A, B>) aggregation;
+             // For Concurrent Segment Search the aggregation will have a computed doc count error coming from the shards.
+             // We use the existence of this doc count error to determine whether or not doc count error originated from the slice level
+             // and if so we will maintain the doc count error for the 1 shard case at the coordinator level
+             if (aggregations.size() == 1 && terms.getDocCountError() > 0) {
+                 hasSliceLevelDocCountError = true;
+             }
+             if (referenceTerms == null && aggregation.getClass().equals(UnmappedTerms.class) == false) {
+                 referenceTerms = terms;
+             }
+             if (referenceTerms != null
+                 && referenceTerms.getClass().equals(terms.getClass()) == false
+                 && terms.getClass().equals(UnmappedTerms.class) == false) {
+                 // control gets into this loop when the same field name against which the query is executed
+                 // is of different types in different indices.
+                 throw new AggregationExecutionException(
+                     "Merging/Reducing the aggregations failed when computing the aggregation ["
+                         + referenceTerms.getName()
+                         + "] because the field you gave in the aggregation query existed as two different "
+                         + "types in two different indices"
+                 );
+             }
+             otherDocCount += terms.getSumOfOtherDocCounts();
+             final long thisAggDocCountError = getDocCountError(terms, reduceContext);
+             if (sumDocCountError != -1) {
+                 if (thisAggDocCountError == -1) {
+                     sumDocCountError = -1;
+                 } else {
+                     sumDocCountError += thisAggDocCountError;
+                 }
+             }
+             setDocCountError(thisAggDocCountError);
+             for (B bucket : terms.getBuckets()) {
+                 // If there is already a doc count error for this bucket
+                 // subtract this aggs doc count error from it to make the
+                 // new value for the bucket. This then means that when the
+                 // final error for the bucket is calculated below we account
+                 // for the existing error calculated in a previous reduce.
+                 // Note that if the error is unbounded (-1) this will be fixed
+                 // later in this method.
+                 bucket.setDocCountError(docCountError -> docCountError - thisAggDocCountError);
+             }
+         }
+ 
+         final List<B> reducedBuckets;
+         /*
+           Buckets returned by a partial reduce or a shard response are sorted by key.
+           That allows to perform a merge sort when reducing multiple aggregations together.
+           For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of
+           the provided aggregations use a different {@link InternalTerms#reduceOrder}.
+          */
+         BucketOrder thisReduceOrder = getReduceOrder(aggregations);
+         if (isKeyOrder(thisReduceOrder)) {
+             // extract the primary sort in case this is a compound order.
+             thisReduceOrder = InternalOrder.key(isKeyAsc(thisReduceOrder) ? true : false);
+             reducedBuckets = reduceMergeSort(aggregations, thisReduceOrder, reduceContext);
+         } else {
+             reducedBuckets = reduceLegacy(aggregations, reduceContext);
+         }
+         final B[] list;
+         if (reduceContext.isFinalReduce() || reduceContext.isSliceLevel()) {
+             final int size = Math.min(localBucketCountThresholds.getRequiredSize(), reducedBuckets.size());
+             // final comparator
+             final BucketPriorityQueue<B> ordered = new BucketPriorityQueue<>(size, order.comparator());
+             for (B bucket : reducedBuckets) {
+                 if (sumDocCountError == -1) {
+                     bucket.setDocCountError(-1);
+                 } else {
+                     final long finalSumDocCountError = sumDocCountError;
+                     bucket.setDocCountError(docCountError -> docCountError + finalSumDocCountError);
+                 }
+                 if (bucket.getDocCount() >= localBucketCountThresholds.getMinDocCount()) {
+                     B removed = ordered.insertWithOverflow(bucket);
+                     if (removed != null) {
+                         otherDocCount += removed.getDocCount();
+                         reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed));
+                     } else {
+                         reduceContext.consumeBucketsAndMaybeBreak(1);
+                     }
+                 } else {
+                     reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket));
+                 }
+             }
+             list = createBucketsArray(ordered.size());
+             for (int i = ordered.size() - 1; i >= 0; i--) {
+                 list[i] = ordered.pop();
+             }
+         } else {
+             // we can prune the list on partial reduce if the aggregation is ordered by key
+             // and not filtered (minDocCount == 0)
+             int size = isKeyOrder(order) && localBucketCountThresholds.getMinDocCount() == 0
+                 ? Math.min(localBucketCountThresholds.getRequiredSize(), reducedBuckets.size())
+                 : reducedBuckets.size();
+             list = createBucketsArray(size);
+             for (int i = 0; i < size; i++) {
+                 reduceContext.consumeBucketsAndMaybeBreak(1);
+                 list[i] = reducedBuckets.get(i);
+                 if (sumDocCountError == -1) {
+                     list[i].setDocCountError(-1);
+                 } else {
+                     final long fSumDocCountError = sumDocCountError;
+                     list[i].setDocCountError(docCountError -> docCountError + fSumDocCountError);
+                 }
+             }
+         }
+         long docCountError;
+         if (sumDocCountError == -1) {
+             docCountError = -1;
+         } else {
+             if (hasSliceLevelDocCountError) {
+                 docCountError = sumDocCountError;
+             } else {
+                 docCountError = aggregations.size() == 1 ? 0 : sumDocCountError;
+             }
+         }
+ 
+         // Shards must return buckets sorted by key, so we apply the sort here in shard level reduce
+         if (reduceContext.isSliceLevel()) {
+             Arrays.sort(list, thisReduceOrder.comparator());
+         }
+         return create(name, Arrays.asList(list), reduceContext.isFinalReduce() ? order : thisReduceOrder, docCountError, otherDocCount);
+     }
+ 
+     @Override
+     protected B reduceBucket(List<B> buckets, ReduceContext context) {
+         assert !buckets.isEmpty();
+         long docCount = 0;
+         // For the per term doc count error we add up the errors from the
+         // shards that did not respond with the term. To do this we add up
+         // the errors from the shards that did respond with the terms and
+         // subtract that from the sum of the error from all shards
+         long docCountError = 0;
+         List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size());
+         for (B bucket : buckets) {
+             docCount += bucket.getDocCount();
+             if (docCountError != -1) {
+                 if (bucket.showDocCountError() == false) {
+                     docCountError = -1;
+                 } else {
+                     docCountError += bucket.getDocCountError();
+                 }
+             }
+             aggregationsList.add((InternalAggregations) bucket.getAggregations());
+         }
+         InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context);
+         return createBucket(docCount, aggs, docCountError, buckets.get(0));
+     }
+ 
+     protected abstract void setDocCountError(long docCountError);
+ 
+     protected abstract int getShardSize();
+ 
+     protected abstract A create(String name, List<B> buckets, BucketOrder reduceOrder, long docCountError, long otherDocCount);
+ 
+     /**
+      * Create an array to hold some buckets. Used in collecting the results.
+      */
+     protected abstract B[] createBucketsArray(int size);
+ 
+     abstract B createBucket(long docCount, InternalAggregations aggs, long docCountError, B prototype);
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null || getClass() != obj.getClass()) return false;
+         if (super.equals(obj) == false) return false;
+ 
+         InternalTerms<?, ?> that = (InternalTerms<?, ?>) obj;
+         return Objects.equals(minDocCount, that.minDocCount)
+             && Objects.equals(reduceOrder, that.reduceOrder)
+             && Objects.equals(order, that.order)
+             && Objects.equals(requiredSize, that.requiredSize);
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(super.hashCode(), minDocCount, reduceOrder, order, requiredSize);
+     }
+ 
+     protected static XContentBuilder doXContentCommon(
+         XContentBuilder builder,
+         Params params,
+         long docCountError,
+         long otherDocCount,
+         List<? extends AbstractInternalBucket> buckets
+     ) throws IOException {
+         builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), docCountError);
+         builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), otherDocCount);
+         builder.startArray(CommonFields.BUCKETS.getPreferredName());
+         for (AbstractInternalBucket bucket : buckets) {
+             bucket.toXContent(builder, params);
+         }
+         builder.endArray();
+         return builder;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/index.html b/htmlReport/ns-2/index.html new file mode 100644 index 0000000000000..8bcc811ad8b3c --- /dev/null +++ b/htmlReport/ns-2/index.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_BLOCK.html b/htmlReport/ns-2/index_SORT_BY_BLOCK.html new file mode 100644 index 0000000000000..ebb51dbcb862f --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_BLOCK.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_BLOCK_DESC.html b/htmlReport/ns-2/index_SORT_BY_BLOCK_DESC.html new file mode 100644 index 0000000000000..038f331fba9a4 --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_BLOCK_DESC.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_CLASS.html b/htmlReport/ns-2/index_SORT_BY_CLASS.html new file mode 100644 index 0000000000000..12c4299b189bf --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_CLASS.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_CLASS_DESC.html b/htmlReport/ns-2/index_SORT_BY_CLASS_DESC.html new file mode 100644 index 0000000000000..ab2c20619d38a --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_CLASS_DESC.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_LINE.html b/htmlReport/ns-2/index_SORT_BY_LINE.html new file mode 100644 index 0000000000000..59dd3ae56b635 --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_LINE.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_LINE_DESC.html b/htmlReport/ns-2/index_SORT_BY_LINE_DESC.html new file mode 100644 index 0000000000000..9d80a83b47c64 --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_LINE_DESC.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_METHOD.html b/htmlReport/ns-2/index_SORT_BY_METHOD.html new file mode 100644 index 0000000000000..bf1a8e146e71e --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_METHOD.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_METHOD_DESC.html b/htmlReport/ns-2/index_SORT_BY_METHOD_DESC.html new file mode 100644 index 0000000000000..a3d499298255a --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_METHOD_DESC.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/index_SORT_BY_NAME_DESC.html b/htmlReport/ns-2/index_SORT_BY_NAME_DESC.html new file mode 100644 index 0000000000000..e62593ffcb082 --- /dev/null +++ b/htmlReport/ns-2/index_SORT_BY_NAME_DESC.html @@ -0,0 +1,332 @@ + + + + + + Coverage Report > org.opensearch.search.aggregations.bucket.terms.heuristic + + + + + + +
+ + + +

Coverage Summary for Package: org.opensearch.search.aggregations.bucket.terms.heuristic

+ + + + + + + + + + + + + +
Package + Class, % + + Method, % + + Line, % +
org.opensearch.search.aggregations.bucket.terms.heuristic + + 47.1% + + + (8/17) + + + + 11.6% + + + (11/95) + + + + 8.1% + + + (22/270) + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Class + Class, % + + Method, % + + Line, % +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
ScriptHeuristic + + 33.3% + + + (1/3) + + + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
PercentageScore + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
NXYSignificanceHeuristic + + 33.3% + + + (1/3) + + + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
MutualInformation + + 50% + + + (1/2) + + + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
JLHScore + + 50% + + + (1/2) + + + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
GND + + 50% + + + (1/2) + + + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
ChiSquare + + 50% + + + (1/2) + + + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
+ +
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-1.html b/htmlReport/ns-2/sources/source-1.html new file mode 100644 index 0000000000000..c05ba942bcbf0 --- /dev/null +++ b/htmlReport/ns-2/sources/source-1.html @@ -0,0 +1,259 @@ + + + + + + + + Coverage Report > ChiSquare + + + + + + +
+ + +

Coverage Summary for Class: ChiSquare (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ChiSquare + + 12.5% + + + (1/8) + + + + 15% + + + (3/20) + +
ChiSquare$ChiSquareBuilder + + 0% + + + (0/2) + + + + 0% + + + (0/5) + +
Total + + 10% + + + (1/10) + + + + 12% + + + (3/25) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.xcontent.ConstructingObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * ChiSquare significance heuristic for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public class ChiSquare extends NXYSignificanceHeuristic {
+     public static final String NAME = "chi_square";
+     public static final ConstructingObjectParser<ChiSquare, Void> PARSER = new ConstructingObjectParser<>(
+         NAME,
+         buildFromParsedArgs(ChiSquare::new)
+     );
+     static {
+         NXYSignificanceHeuristic.declareParseFields(PARSER);
+     }
+ 
+     public ChiSquare(boolean includeNegatives, boolean backgroundIsSuperset) {
+         super(includeNegatives, backgroundIsSuperset);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public ChiSquare(StreamInput in) throws IOException {
+         super(in);
+     }
+ 
+     @Override
+     public boolean equals(Object other) {
+         if (!(other instanceof ChiSquare)) {
+             return false;
+         }
+         return super.equals(other);
+     }
+ 
+     @Override
+     public int hashCode() {
+         int result = NAME.hashCode();
+         result = 31 * result + super.hashCode();
+         return result;
+     }
+ 
+     /**
+      * Calculates Chi^2
+      * see "Information Retrieval", Manning et al., Eq. 13.19
+      */
+     @Override
+     public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+         Frequencies frequencies = computeNxys(subsetFreq, subsetSize, supersetFreq, supersetSize, "ChiSquare");
+ 
+         // here we check if the term appears more often in subset than in background without subset.
+         if (!includeNegatives && frequencies.N11 / frequencies.N_1 < frequencies.N10 / frequencies.N_0) {
+             return Double.NEGATIVE_INFINITY;
+         }
+         return (frequencies.N * Math.pow((frequencies.N11 * frequencies.N00 - frequencies.N01 * frequencies.N10), 2.0) / ((frequencies.N_1)
+             * (frequencies.N1_) * (frequencies.N0_) * (frequencies.N_0)));
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+         builder.startObject(NAME);
+         super.build(builder);
+         builder.endObject();
+         return builder;
+     }
+ 
+     /**
+      * Builder for a chi squared heuristic
+      *
+      * @opensearch.internal
+      */
+     public static class ChiSquareBuilder extends NXYSignificanceHeuristic.NXYBuilder {
+         public ChiSquareBuilder(boolean includeNegatives, boolean backgroundIsSuperset) {
+             super(includeNegatives, backgroundIsSuperset);
+         }
+ 
+         @Override
+         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject(NAME);
+             super.build(builder);
+             builder.endObject();
+             return builder;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-2.html b/htmlReport/ns-2/sources/source-2.html new file mode 100644 index 0000000000000..dbe95bc6624b7 --- /dev/null +++ b/htmlReport/ns-2/sources/source-2.html @@ -0,0 +1,278 @@ + + + + + + + + Coverage Report > GND + + + + + + +
+ + +

Coverage Summary for Class: GND (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
GND + + 10% + + + (1/10) + + + + 6.7% + + + (2/30) + +
GND$GNDBuilder + + 0% + + + (0/2) + + + + 0% + + + (0/5) + +
Total + + 8.3% + + + (1/12) + + + + 5.7% + + + (2/35) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ConstructingObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ 
+ import java.io.IOException;
+ 
+ import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg;
+ 
+ /**
+  * GND significance heuristic for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public class GND extends NXYSignificanceHeuristic {
+     public static final String NAME = "gnd";
+     public static final ConstructingObjectParser<GND, Void> PARSER = new ConstructingObjectParser<>(NAME, args -> {
+         boolean backgroundIsSuperset = args[0] == null ? true : (boolean) args[0];
+         return new GND(backgroundIsSuperset);
+     });
+     static {
+         PARSER.declareBoolean(optionalConstructorArg(), BACKGROUND_IS_SUPERSET);
+     }
+ 
+     public GND(boolean backgroundIsSuperset) {
+         super(true, backgroundIsSuperset);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public GND(StreamInput in) throws IOException {
+         super(true, in.readBoolean());
+     }
+ 
+     @Override
+     public void writeTo(StreamOutput out) throws IOException {
+         out.writeBoolean(backgroundIsSuperset);
+     }
+ 
+     @Override
+     public boolean equals(Object other) {
+         if (!(other instanceof GND)) {
+             return false;
+         }
+         return super.equals(other);
+     }
+ 
+     @Override
+     public int hashCode() {
+         int result = NAME.hashCode();
+         result = 31 * result + super.hashCode();
+         return result;
+     }
+ 
+     /**
+      * Calculates Google Normalized Distance, as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007
+      * link: http://arxiv.org/pdf/cs/0412098v3.pdf
+      */
+     @Override
+     public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+ 
+         Frequencies frequencies = computeNxys(subsetFreq, subsetSize, supersetFreq, supersetSize, "GND");
+         double fx = frequencies.N1_;
+         double fy = frequencies.N_1;
+         double fxy = frequencies.N11;
+         double N = frequencies.N;
+         if (fxy == 0) {
+             // no co-occurrence
+             return 0.0;
+         }
+         if ((fx == fy) && (fx == fxy)) {
+             // perfect co-occurrence
+             return 1.0;
+         }
+         double score = (Math.max(Math.log(fx), Math.log(fy)) - Math.log(fxy)) / (Math.log(N) - Math.min(Math.log(fx), Math.log(fy)));
+ 
+         // we must invert the order of terms because GND scores relevant terms low
+         score = Math.exp(-1.0d * score);
+         return score;
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+         builder.startObject(NAME);
+         builder.field(BACKGROUND_IS_SUPERSET.getPreferredName(), backgroundIsSuperset);
+         builder.endObject();
+         return builder;
+     }
+ 
+     /**
+      * Builder for a GND heuristic
+      *
+      * @opensearch.internal
+      */
+     public static class GNDBuilder extends NXYBuilder {
+         public GNDBuilder(boolean backgroundIsSuperset) {
+             super(true, backgroundIsSuperset);
+         }
+ 
+         @Override
+         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject(NAME);
+             builder.field(BACKGROUND_IS_SUPERSET.getPreferredName(), backgroundIsSuperset);
+             builder.endObject();
+             return builder;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-3.html b/htmlReport/ns-2/sources/source-3.html new file mode 100644 index 0000000000000..00ca492d8a315 --- /dev/null +++ b/htmlReport/ns-2/sources/source-3.html @@ -0,0 +1,275 @@ + + + + + + + + Coverage Report > JLHScore + + + + + + +
+ + +

Coverage Summary for Class: JLHScore (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
JLHScore + + 22.2% + + + (2/9) + + + + 8.7% + + + (2/23) + +
JLHScore$JLHScoreBuilder + + 0% + + + (0/2) + + + + 0% + + + (0/3) + +
Total + + 18.2% + + + (2/11) + + + + 7.7% + + + (2/26) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * JLHScore heuristic for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public class JLHScore extends SignificanceHeuristic {
+     public static final String NAME = "jlh";
+     public static final ObjectParser<JLHScore, Void> PARSER = new ObjectParser<>(NAME, JLHScore::new);
+ 
+     public JLHScore() {}
+ 
+     /**
+      * Read from a stream.
+      */
+     public JLHScore(StreamInput in) {
+         // Nothing to read.
+     }
+ 
+     @Override
+     public void writeTo(StreamOutput out) throws IOException {}
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     /**
+      * Calculates the significance of a term in a sample against a background of
+      * normal distributions by comparing the changes in frequency. This is the heart
+      * of the significant terms feature.
+      */
+     @Override
+     public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+         checkFrequencyValidity(subsetFreq, subsetSize, supersetFreq, supersetSize, "JLHScore");
+         if ((subsetSize == 0) || (supersetSize == 0)) {
+             // avoid any divide by zero issues
+             return 0;
+         }
+         if (supersetFreq == 0) {
+             // If we are using a background context that is not a strict superset, a foreground
+             // term may be missing from the background, so for the purposes of this calculation
+             // we assume a value of 1 for our calculations which avoids returning an "infinity" result
+             supersetFreq = 1;
+         }
+         double subsetProbability = (double) subsetFreq / (double) subsetSize;
+         double supersetProbability = (double) supersetFreq / (double) supersetSize;
+ 
+         // Using absoluteProbabilityChange alone favours very common words e.g. you, we etc
+         // because a doubling in popularity of a common term is a big percent difference
+         // whereas a rare term would have to achieve a hundred-fold increase in popularity to
+         // achieve the same difference measure.
+         // In favouring common words as suggested features for search we would get high
+         // recall but low precision.
+         double absoluteProbabilityChange = subsetProbability - supersetProbability;
+         if (absoluteProbabilityChange <= 0) {
+             return 0;
+         }
+         // Using relativeProbabilityChange tends to favour rarer terms e.g.mis-spellings or
+         // unique URLs.
+         // A very low-probability term can very easily double in popularity due to the low
+         // numbers required to do so whereas a high-probability term would have to add many
+         // extra individual sightings to achieve the same shift.
+         // In favouring rare words as suggested features for search we would get high
+         // precision but low recall.
+         double relativeProbabilityChange = (subsetProbability / supersetProbability);
+ 
+         // A blend of the above metrics - favours medium-rare terms to strike a useful
+         // balance between precision and recall.
+         return absoluteProbabilityChange * relativeProbabilityChange;
+     }
+ 
+     @Override
+     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+         builder.startObject(NAME).endObject();
+         return builder;
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (obj == null || obj.getClass() != getClass()) {
+             return false;
+         }
+         return true;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return getClass().hashCode();
+     }
+ 
+     /**
+      * Builder for a JLH Score heuristic
+      *
+      * @opensearch.internal
+      */
+     public static class JLHScoreBuilder implements SignificanceHeuristicBuilder {
+ 
+         @Override
+         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject(NAME).endObject();
+             return builder;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-4.html b/htmlReport/ns-2/sources/source-4.html new file mode 100644 index 0000000000000..e23a885702382 --- /dev/null +++ b/htmlReport/ns-2/sources/source-4.html @@ -0,0 +1,298 @@ + + + + + + + + Coverage Report > MutualInformation + + + + + + +
+ + +

Coverage Summary for Class: MutualInformation (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
MutualInformation + + 11.1% + + + (1/9) + + + + 12.9% + + + (4/31) + +
MutualInformation$MutualInformationBuilder + + 0% + + + (0/2) + + + + 0% + + + (0/5) + +
Total + + 9.1% + + + (1/11) + + + + 11.1% + + + (4/36) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.xcontent.ConstructingObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * Mutual Information significance heuristic for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public class MutualInformation extends NXYSignificanceHeuristic {
+     public static final String NAME = "mutual_information";
+     public static final ConstructingObjectParser<MutualInformation, Void> PARSER = new ConstructingObjectParser<>(
+         NAME,
+         buildFromParsedArgs(MutualInformation::new)
+     );
+     static {
+         NXYSignificanceHeuristic.declareParseFields(PARSER);
+     }
+ 
+     private static final double log2 = Math.log(2.0);
+ 
+     public MutualInformation(boolean includeNegatives, boolean backgroundIsSuperset) {
+         super(includeNegatives, backgroundIsSuperset);
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public MutualInformation(StreamInput in) throws IOException {
+         super(in);
+     }
+ 
+     @Override
+     public boolean equals(Object other) {
+         if (!(other instanceof MutualInformation)) {
+             return false;
+         }
+         return super.equals(other);
+     }
+ 
+     @Override
+     public int hashCode() {
+         int result = NAME.hashCode();
+         result = 31 * result + super.hashCode();
+         return result;
+     }
+ 
+     /**
+      * Calculates mutual information
+      * see "Information Retrieval", Manning et al., Eq. 13.17
+      */
+     @Override
+     public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+         Frequencies frequencies = computeNxys(subsetFreq, subsetSize, supersetFreq, supersetSize, "MutualInformation");
+ 
+         double score = (getMITerm(frequencies.N00, frequencies.N0_, frequencies.N_0, frequencies.N) + getMITerm(
+             frequencies.N01,
+             frequencies.N0_,
+             frequencies.N_1,
+             frequencies.N
+         ) + getMITerm(frequencies.N10, frequencies.N1_, frequencies.N_0, frequencies.N) + getMITerm(
+             frequencies.N11,
+             frequencies.N1_,
+             frequencies.N_1,
+             frequencies.N
+         )) / log2;
+ 
+         if (Double.isNaN(score)) {
+             score = Double.NEGATIVE_INFINITY;
+         }
+         // here we check if the term appears more often in subset than in background without subset.
+         if (!includeNegatives && frequencies.N11 / frequencies.N_1 < frequencies.N10 / frequencies.N_0) {
+             score = Double.NEGATIVE_INFINITY;
+         }
+         return score;
+     }
+ 
+     /*  make sure that
+         0 * log(0/0) = 0
+         0 * log(0) = 0
+         Else, this would be the score:
+         double score =
+                   N11 / N * Math.log((N * N11) / (N1_ * N_1))
+                 + N01 / N * Math.log((N * N01) / (N0_ * N_1))
+                 + N10 / N * Math.log((N * N10) / (N1_ * N_0))
+                 + N00 / N * Math.log((N * N00) / (N0_ * N_0));
+ 
+         but we get many NaN if we do not take case of the 0s */
+ 
+     double getMITerm(double Nxy, double Nx_, double N_y, double N) {
+         double numerator = Math.abs(N * Nxy);
+         double denominator = Math.abs(Nx_ * N_y);
+         double factor = Math.abs(Nxy / N);
+         if (numerator < 1.e-7 && factor < 1.e-7) {
+             return 0.0;
+         } else {
+             return factor * Math.log(numerator / denominator);
+         }
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+         builder.startObject(NAME);
+         super.build(builder);
+         builder.endObject();
+         return builder;
+     }
+ 
+     /**
+      * Builder for a Mutual Information heuristic
+      *
+      * @opensearch.internal
+      */
+     public static class MutualInformationBuilder extends NXYBuilder {
+         public MutualInformationBuilder(boolean includeNegatives, boolean backgroundIsSuperset) {
+             super(includeNegatives, backgroundIsSuperset);
+         }
+ 
+         @Override
+         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject(NAME);
+             super.build(builder);
+             builder.endObject();
+             return builder;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-5.html b/htmlReport/ns-2/sources/source-5.html new file mode 100644 index 0000000000000..a2ac465abf9ba --- /dev/null +++ b/htmlReport/ns-2/sources/source-5.html @@ -0,0 +1,370 @@ + + + + + + + + Coverage Report > NXYSignificanceHeuristic + + + + + + +
+ + +

Coverage Summary for Class: NXYSignificanceHeuristic (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
NXYSignificanceHeuristic + + 25% + + + (3/12) + + + + 11.7% + + + (7/60) + +
NXYSignificanceHeuristic$Frequencies + + 0% + + + (0/1) + + + + 0% + + + (0/1) + +
NXYSignificanceHeuristic$NXYBuilder + + 0% + + + (0/2) + + + + 0% + + + (0/7) + +
Total + + 20% + + + (3/15) + + + + 10.3% + + + (7/68) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.ParseField;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ConstructingObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ 
+ import java.io.IOException;
+ import java.util.function.BiFunction;
+ import java.util.function.Function;
+ 
+ import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg;
+ 
+ /**
+  * NXY significance heuristic for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public abstract class NXYSignificanceHeuristic extends SignificanceHeuristic {
+ 
+     protected static final ParseField BACKGROUND_IS_SUPERSET = new ParseField("background_is_superset");
+ 
+     protected static final ParseField INCLUDE_NEGATIVES_FIELD = new ParseField("include_negatives");
+ 
+     protected static final String SCORE_ERROR_MESSAGE = ", does your background filter not include all documents in the bucket? "
+         + "If so and it is intentional, set \""
+         + BACKGROUND_IS_SUPERSET.getPreferredName()
+         + "\": false";
+ 
+     protected final boolean backgroundIsSuperset;
+ 
+     /**
+      * Some heuristics do not differentiate between terms that are descriptive for subset or for
+      * the background without the subset. We might want to filter out the terms that are appear much less often
+      * in the subset than in the background without the subset.
+      */
+     protected final boolean includeNegatives;
+ 
+     protected NXYSignificanceHeuristic(boolean includeNegatives, boolean backgroundIsSuperset) {
+         this.includeNegatives = includeNegatives;
+         this.backgroundIsSuperset = backgroundIsSuperset;
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     protected NXYSignificanceHeuristic(StreamInput in) throws IOException {
+         includeNegatives = in.readBoolean();
+         backgroundIsSuperset = in.readBoolean();
+     }
+ 
+     @Override
+     public void writeTo(StreamOutput out) throws IOException {
+         out.writeBoolean(includeNegatives);
+         out.writeBoolean(backgroundIsSuperset);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (this == obj) return true;
+         if (obj == null) return false;
+         if (getClass() != obj.getClass()) return false;
+         NXYSignificanceHeuristic other = (NXYSignificanceHeuristic) obj;
+         if (backgroundIsSuperset != other.backgroundIsSuperset) return false;
+         if (includeNegatives != other.includeNegatives) return false;
+         return true;
+     }
+ 
+     @Override
+     public int hashCode() {
+         int result = (includeNegatives ? 1 : 0);
+         result = 31 * result + (backgroundIsSuperset ? 1 : 0);
+         return result;
+     }
+ 
+     /**
+      * Frequencies for an NXY significance heuristic
+      *
+      * @opensearch.internal
+      */
+     protected static class Frequencies {
+         double N00, N01, N10, N11, N0_, N1_, N_0, N_1, N;
+     }
+ 
+     protected Frequencies computeNxys(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize, String scoreFunctionName) {
+         checkFrequencies(subsetFreq, subsetSize, supersetFreq, supersetSize, scoreFunctionName);
+         Frequencies frequencies = new Frequencies();
+         if (backgroundIsSuperset) {
+             // documents not in class and do not contain term
+             frequencies.N00 = supersetSize - supersetFreq - (subsetSize - subsetFreq);
+             // documents in class and do not contain term
+             frequencies.N01 = (subsetSize - subsetFreq);
+             // documents not in class and do contain term
+             frequencies.N10 = supersetFreq - subsetFreq;
+             // documents in class and do contain term
+             frequencies.N11 = subsetFreq;
+             // documents that do not contain term
+             frequencies.N0_ = supersetSize - supersetFreq;
+             // documents that contain term
+             frequencies.N1_ = supersetFreq;
+             // documents that are not in class
+             frequencies.N_0 = supersetSize - subsetSize;
+             // documents that are in class
+             frequencies.N_1 = subsetSize;
+             // all docs
+             frequencies.N = supersetSize;
+         } else {
+             // documents not in class and do not contain term
+             frequencies.N00 = supersetSize - supersetFreq;
+             // documents in class and do not contain term
+             frequencies.N01 = subsetSize - subsetFreq;
+             // documents not in class and do contain term
+             frequencies.N10 = supersetFreq;
+             // documents in class and do contain term
+             frequencies.N11 = subsetFreq;
+             // documents that do not contain term
+             frequencies.N0_ = supersetSize - supersetFreq + subsetSize - subsetFreq;
+             // documents that contain term
+             frequencies.N1_ = supersetFreq + subsetFreq;
+             // documents that are not in class
+             frequencies.N_0 = supersetSize;
+             // documents that are in class
+             frequencies.N_1 = subsetSize;
+             // all docs
+             frequencies.N = supersetSize + subsetSize;
+         }
+         return frequencies;
+     }
+ 
+     protected void checkFrequencies(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize, String scoreFunctionName) {
+         checkFrequencyValidity(subsetFreq, subsetSize, supersetFreq, supersetSize, scoreFunctionName);
+         if (backgroundIsSuperset) {
+             if (subsetFreq > supersetFreq) {
+                 throw new IllegalArgumentException("subsetFreq > supersetFreq" + SCORE_ERROR_MESSAGE);
+             }
+             if (subsetSize > supersetSize) {
+                 throw new IllegalArgumentException("subsetSize > supersetSize" + SCORE_ERROR_MESSAGE);
+             }
+             if (supersetFreq - subsetFreq > supersetSize - subsetSize) {
+                 throw new IllegalArgumentException("supersetFreq - subsetFreq > supersetSize - subsetSize" + SCORE_ERROR_MESSAGE);
+             }
+         }
+     }
+ 
+     protected void build(XContentBuilder builder) throws IOException {
+         builder.field(INCLUDE_NEGATIVES_FIELD.getPreferredName(), includeNegatives)
+             .field(BACKGROUND_IS_SUPERSET.getPreferredName(), backgroundIsSuperset);
+     }
+ 
+     /**
+      * Set up and {@linkplain ConstructingObjectParser} to accept the standard arguments for an {@linkplain NXYSignificanceHeuristic}.
+      */
+     protected static void declareParseFields(ConstructingObjectParser<? extends NXYSignificanceHeuristic, ?> parser) {
+         parser.declareBoolean(optionalConstructorArg(), INCLUDE_NEGATIVES_FIELD);
+         parser.declareBoolean(optionalConstructorArg(), BACKGROUND_IS_SUPERSET);
+     }
+ 
+     /**
+      * Adapt a standard two argument ctor into one that consumes a {@linkplain ConstructingObjectParser}'s fields.
+      */
+     protected static <T> Function<Object[], T> buildFromParsedArgs(BiFunction<Boolean, Boolean, T> ctor) {
+         return args -> {
+             boolean includeNegatives = args[0] == null ? false : (boolean) args[0];
+             boolean backgroundIsSuperset = args[1] == null ? true : (boolean) args[1];
+             return ctor.apply(includeNegatives, backgroundIsSuperset);
+         };
+     }
+ 
+     /**
+      * Builder for a NXY Significance heuristic
+      *
+      * @opensearch.internal
+      */
+     protected abstract static class NXYBuilder implements SignificanceHeuristicBuilder {
+         protected boolean includeNegatives = true;
+         protected boolean backgroundIsSuperset = true;
+ 
+         public NXYBuilder(boolean includeNegatives, boolean backgroundIsSuperset) {
+             this.includeNegatives = includeNegatives;
+             this.backgroundIsSuperset = backgroundIsSuperset;
+         }
+ 
+         protected void build(XContentBuilder builder) throws IOException {
+             builder.field(INCLUDE_NEGATIVES_FIELD.getPreferredName(), includeNegatives)
+                 .field(BACKGROUND_IS_SUPERSET.getPreferredName(), backgroundIsSuperset);
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-6.html b/htmlReport/ns-2/sources/source-6.html new file mode 100644 index 0000000000000..c6e52d72f2503 --- /dev/null +++ b/htmlReport/ns-2/sources/source-6.html @@ -0,0 +1,255 @@ + + + + + + + + Coverage Report > PercentageScore + + + + + + +
+ + +

Coverage Summary for Class: PercentageScore (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
PercentageScore + + 10% + + + (1/10) + + + + 5.3% + + + (1/19) + +
PercentageScore$PercentageScoreBuilder + + 0% + + + (0/2) + + + + 0% + + + (0/3) + +
Total + + 8.3% + + + (1/12) + + + + 4.5% + + + (1/22) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.OpenSearchParseException;
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.core.xcontent.XContentParser;
+ import org.opensearch.index.query.QueryShardException;
+ 
+ import java.io.IOException;
+ 
+ /**
+  * Percentage score significance heuristic for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public class PercentageScore extends SignificanceHeuristic {
+     public static final String NAME = "percentage";
+     public static final ObjectParser<PercentageScore, Void> PARSER = new ObjectParser<>(NAME, PercentageScore::new);
+ 
+     public PercentageScore() {}
+ 
+     public PercentageScore(StreamInput in) {
+         // Nothing to read.
+     }
+ 
+     @Override
+     public void writeTo(StreamOutput out) throws IOException {}
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+         builder.startObject(NAME).endObject();
+         return builder;
+     }
+ 
+     public static SignificanceHeuristic parse(XContentParser parser) throws IOException, QueryShardException {
+         // move to the closing bracket
+         if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) {
+             throw new OpenSearchParseException(
+                 "failed to parse [percentage] significance heuristic. expected an empty object, " + "but got [{}] instead",
+                 parser.currentToken()
+             );
+         }
+         return new PercentageScore();
+     }
+ 
+     /**
+      * Indicates the significance of a term in a sample by determining what percentage
+      * of all occurrences of a term are found in the sample.
+      */
+     @Override
+     public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+         checkFrequencyValidity(subsetFreq, subsetSize, supersetFreq, supersetSize, "PercentageScore");
+         if (supersetFreq == 0) {
+             // avoid a divide by zero issue
+             return 0;
+         }
+         return (double) subsetFreq / (double) supersetFreq;
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (obj == null || obj.getClass() != getClass()) {
+             return false;
+         }
+         return true;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return getClass().hashCode();
+     }
+ 
+     /**
+      * Builder for a Percentage Score heuristic
+      *
+      * @opensearch.internal
+      */
+     public static class PercentageScoreBuilder implements SignificanceHeuristicBuilder {
+ 
+         @Override
+         public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+             builder.startObject(NAME).endObject();
+             return builder;
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-7.html b/htmlReport/ns-2/sources/source-7.html new file mode 100644 index 0000000000000..6ccf3b01a100b --- /dev/null +++ b/htmlReport/ns-2/sources/source-7.html @@ -0,0 +1,367 @@ + + + + + + + + Coverage Report > ScriptHeuristic + + + + + + +
+ + +

Coverage Summary for Class: ScriptHeuristic (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Class + Method, % + + Line, % +
ScriptHeuristic + + 8.3% + + + (1/12) + + + + 7.7% + + + (2/26) + +
ScriptHeuristic$ExecutableScriptHeuristic + + 0% + + + (0/2) + + + + 0% + + + (0/17) + +
ScriptHeuristic$LongAccessor + + 0% + + + (0/6) + + + + 0% + + + (0/6) + +
Total + + 5% + + + (1/20) + + + + 4.1% + + + (2/49) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.common.io.stream.StreamInput;
+ import org.opensearch.core.common.io.stream.StreamOutput;
+ import org.opensearch.core.xcontent.ConstructingObjectParser;
+ import org.opensearch.core.xcontent.XContentBuilder;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.script.Script;
+ import org.opensearch.script.SignificantTermsHeuristicScoreScript;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ 
+ import java.io.IOException;
+ import java.util.HashMap;
+ import java.util.Map;
+ import java.util.Objects;
+ 
+ import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg;
+ 
+ /**
+  * Script significance heuristic for significant terms agg
+  *
+  * @opensearch.internal
+  */
+ public class ScriptHeuristic extends SignificanceHeuristic {
+     public static final String NAME = "script_heuristic";
+     public static final ConstructingObjectParser<ScriptHeuristic, Void> PARSER = new ConstructingObjectParser<>(
+         NAME,
+         args -> new ScriptHeuristic((Script) args[0])
+     );
+     static {
+         Script.declareScript(PARSER, constructorArg());
+     }
+ 
+     private final Script script;
+ 
+     /**
+      * This class holds an executable form of the script with private variables ready for execution
+      * on a single search thread.
+      *
+      * @opensearch.internal
+      */
+     static class ExecutableScriptHeuristic extends ScriptHeuristic {
+         private final LongAccessor subsetSizeHolder;
+         private final LongAccessor supersetSizeHolder;
+         private final LongAccessor subsetDfHolder;
+         private final LongAccessor supersetDfHolder;
+         private final SignificantTermsHeuristicScoreScript executableScript;
+         private final Map<String, Object> params = new HashMap<>();
+ 
+         ExecutableScriptHeuristic(Script script, SignificantTermsHeuristicScoreScript executableScript) {
+             super(script);
+             subsetSizeHolder = new LongAccessor();
+             supersetSizeHolder = new LongAccessor();
+             subsetDfHolder = new LongAccessor();
+             supersetDfHolder = new LongAccessor();
+             this.executableScript = executableScript;
+             params.putAll(script.getParams());
+             params.put("_subset_freq", subsetDfHolder);
+             params.put("_subset_size", subsetSizeHolder);
+             params.put("_superset_freq", supersetDfHolder);
+             params.put("_superset_size", supersetSizeHolder);
+         }
+ 
+         @Override
+         public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+             subsetSizeHolder.value = subsetSize;
+             supersetSizeHolder.value = supersetSize;
+             subsetDfHolder.value = subsetFreq;
+             supersetDfHolder.value = supersetFreq;
+             return executableScript.execute(params);
+         }
+     }
+ 
+     public ScriptHeuristic(Script script) {
+         this.script = script;
+     }
+ 
+     /**
+      * Read from a stream.
+      */
+     public ScriptHeuristic(StreamInput in) throws IOException {
+         this(new Script(in));
+     }
+ 
+     @Override
+     public void writeTo(StreamOutput out) throws IOException {
+         script.writeTo(out);
+     }
+ 
+     @Override
+     public SignificanceHeuristic rewrite(InternalAggregation.ReduceContext context) {
+         SignificantTermsHeuristicScoreScript.Factory factory = context.scriptService()
+             .compile(script, SignificantTermsHeuristicScoreScript.CONTEXT);
+         return new ExecutableScriptHeuristic(script, factory.newInstance());
+     }
+ 
+     @Override
+     public SignificanceHeuristic rewrite(QueryShardContext queryShardContext) {
+         SignificantTermsHeuristicScoreScript.Factory compiledScript = queryShardContext.compile(
+             script,
+             SignificantTermsHeuristicScoreScript.CONTEXT
+         );
+         return new ExecutableScriptHeuristic(script, compiledScript.newInstance());
+     }
+ 
+     /**
+      * Calculates score with a script
+      *
+      * @param subsetFreq   The frequency of the term in the selected sample
+      * @param subsetSize   The size of the selected sample (typically number of docs)
+      * @param supersetFreq The frequency of the term in the superset from which the sample was taken
+      * @param supersetSize The size of the superset from which the sample was taken  (typically number of docs)
+      * @return a "significance" score
+      */
+     @Override
+     public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+         throw new UnsupportedOperationException(
+             "This scoring heuristic must have 'rewrite' called on it to provide a version ready " + "for use"
+         );
+     }
+ 
+     @Override
+     public String getWriteableName() {
+         return NAME;
+     }
+ 
+     @Override
+     public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) throws IOException {
+         builder.startObject(NAME);
+         builder.field(Script.SCRIPT_PARSE_FIELD.getPreferredName());
+         script.toXContent(builder, builderParams);
+         builder.endObject();
+         return builder;
+     }
+ 
+     @Override
+     public int hashCode() {
+         return Objects.hash(script);
+     }
+ 
+     @Override
+     public boolean equals(Object obj) {
+         if (obj == null) {
+             return false;
+         }
+         if (getClass() != obj.getClass()) {
+             return false;
+         }
+         ScriptHeuristic other = (ScriptHeuristic) obj;
+         return Objects.equals(script, other.script);
+     }
+ 
+     /**
+      * Accesses long values
+      *
+      * @opensearch.internal
+      */
+     public final class LongAccessor extends Number {
+         public long value;
+ 
+         @Override
+         public int intValue() {
+             return (int) value;
+         }
+ 
+         @Override
+         public long longValue() {
+             return value;
+         }
+ 
+         @Override
+         public float floatValue() {
+             return value;
+         }
+ 
+         @Override
+         public double doubleValue() {
+             return value;
+         }
+ 
+         @Override
+         public String toString() {
+             return Long.toString(value);
+         }
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-8.html b/htmlReport/ns-2/sources/source-8.html new file mode 100644 index 0000000000000..18a0bb0e9b8b9 --- /dev/null +++ b/htmlReport/ns-2/sources/source-8.html @@ -0,0 +1,199 @@ + + + + + + + + Coverage Report > SignificanceHeuristic + + + + + + +
+ + +

Coverage Summary for Class: SignificanceHeuristic (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + + + + + + + +
Class + Class, % + + Method, % + + Line, % +
SignificanceHeuristic + + 100% + + + (1/1) + + + + 25% + + + (1/4) + + + + 11.1% + + + (1/9) + +
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.common.io.stream.NamedWriteable;
+ import org.opensearch.core.xcontent.ToXContentFragment;
+ import org.opensearch.index.query.QueryShardContext;
+ import org.opensearch.search.aggregations.InternalAggregation;
+ import org.opensearch.search.aggregations.bucket.terms.SignificantTerms;
+ 
+ /**
+  * Heuristic for that {@link SignificantTerms} uses to pick out significant terms.
+  *
+  * @opensearch.internal
+  */
+ public abstract class SignificanceHeuristic implements NamedWriteable, ToXContentFragment {
+     /**
+      * @param subsetFreq   The frequency of the term in the selected sample
+      * @param subsetSize   The size of the selected sample (typically number of docs)
+      * @param supersetFreq The frequency of the term in the superset from which the sample was taken
+      * @param supersetSize The size of the superset from which the sample was taken  (typically number of docs)
+      * @return a "significance" score
+      */
+     public abstract double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize);
+ 
+     protected void checkFrequencyValidity(
+         long subsetFreq,
+         long subsetSize,
+         long supersetFreq,
+         long supersetSize,
+         String scoreFunctionName
+     ) {
+         if (subsetFreq < 0 || subsetSize < 0 || supersetFreq < 0 || supersetSize < 0) {
+             throw new IllegalArgumentException(
+                 "Frequencies of subset and superset must be positive in " + scoreFunctionName + ".getScore()"
+             );
+         }
+         if (subsetFreq > subsetSize) {
+             throw new IllegalArgumentException("subsetFreq > subsetSize, in " + scoreFunctionName);
+         }
+         if (supersetFreq > supersetSize) {
+             throw new IllegalArgumentException("supersetFreq > supersetSize, in " + scoreFunctionName);
+         }
+     }
+ 
+     /**
+      * Provides a hook for subclasses to provide a version of the heuristic
+      * prepared for execution on data on the coordinating node.
+      * @param reduceContext the reduce context on the coordinating node
+      * @return a version of this heuristic suitable for execution
+      */
+     public SignificanceHeuristic rewrite(InternalAggregation.ReduceContext reduceContext) {
+         return this;
+     }
+ 
+     /**
+      * Provides a hook for subclasses to provide a version of the heuristic
+      * prepared for execution on data on a shard.
+      * @param queryShardContext the shard context on the data node
+      * @return a version of this heuristic suitable for execution
+      */
+     public SignificanceHeuristic rewrite(QueryShardContext queryShardContext) {
+         return this;
+     }
+ }
+
+
+
+ + + + + + diff --git a/htmlReport/ns-2/sources/source-9.html b/htmlReport/ns-2/sources/source-9.html new file mode 100644 index 0000000000000..79315caeb62f3 --- /dev/null +++ b/htmlReport/ns-2/sources/source-9.html @@ -0,0 +1,110 @@ + + + + + + + + Coverage Report > SignificanceHeuristicBuilder + + + + + + +
+ + +

Coverage Summary for Class: SignificanceHeuristicBuilder (org.opensearch.search.aggregations.bucket.terms.heuristic)

+ + + + + + + + + +
Class
SignificanceHeuristicBuilder
+ +
+
+ + +
+ /*
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * The OpenSearch Contributors require contributions made to
+  * this file be licensed under the Apache-2.0 license or a
+  * compatible open source license.
+  */
+ 
+ /*
+  * Licensed to Elasticsearch under one or more contributor
+  * license agreements. See the NOTICE file distributed with
+  * this work for additional information regarding copyright
+  * ownership. Elasticsearch licenses this file to you under
+  * the Apache License, Version 2.0 (the "License"); you may
+  * not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *    http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ /*
+  * Modifications Copyright OpenSearch Contributors. See
+  * GitHub history for details.
+  */
+ 
+ package org.opensearch.search.aggregations.bucket.terms.heuristic;
+ 
+ import org.opensearch.core.xcontent.ToXContentFragment;
+ 
+ /**
+  * Base builder class for significance heuristics
+  *
+  * @opensearch.internal
+  */
+ public interface SignificanceHeuristicBuilder extends ToXContentFragment {}
+
+
+
+ + + + + + diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 2a83710117e74..15e538f01e632 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -40,6 +40,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.Weight; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; @@ -162,7 +163,7 @@ public void setWeight(Weight weight) { @param ctx The LeafReaderContext to collect terms from @param globalOrds The SortedSetDocValues for the field's ordinals @param ordCountConsumer A consumer to accept collected term frequencies - @return A no-operation LeafBucketCollector implementation, since collection is complete + @return A LeafBucketCollector implementation with collection termination, since collection is complete @throws IOException If an I/O error occurs during reading */ LeafBucketCollector termDocFreqCollector( @@ -217,7 +218,12 @@ LeafBucketCollector termDocFreqCollector( ordinalTerm = globalOrdinalTermsEnum.next(); } } - return LeafBucketCollector.NO_OP_COLLECTOR; + return new LeafBucketCollector() { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + throw new CollectionTerminatedException(); + } + }; } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java index 5d1e02116f189..753644dce81d5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.document.Document; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; @@ -41,11 +40,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.BytesRef; import org.opensearch.common.TriConsumer; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.support.ValueType; @@ -59,6 +56,8 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { private static final String KEYWORD_FIELD = "keyword"; + private static final Consumer CONFIGURE_KEYWORD_FIELD = agg -> agg.field(KEYWORD_FIELD); + private static final List dataset; static { List d = new ArrayList<>(45); @@ -70,7 +69,7 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { dataset = d; } - private static Consumer VERIFY_MATCH_ALL_DOCS = agg -> { + private static final Consumer VERIFY_MATCH_ALL_DOCS = agg -> { assertEquals(9, agg.getBuckets().size()); for (int i = 0; i < 9; i++) { StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); @@ -79,77 +78,49 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { } }; - private static Query MATCH_ALL_DOCS_QUERY = new MatchAllDocsQuery(); + private static final Consumer VERIFY_MATCH_NO_DOCS = agg -> { assertEquals(0, agg.getBuckets().size()); }; + + private static final Query MATCH_ALL_DOCS_QUERY = new MatchAllDocsQuery(); - private static Query MATCH_NO_DOCS_QUERY = new MatchNoDocsQuery(); + private static final Query MATCH_NO_DOCS_QUERY = new MatchNoDocsQuery(); public void testMatchNoDocs() throws IOException { testSearchCase( - ADD_SORTED_FIELD_NO_STORE, + ADD_SORTED_SET_FIELD_NOT_INDEXED, MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - null, // without type hint - DEFAULT_POST_COLLECTION + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + null // without type hint ); testSearchCase( - ADD_SORTED_FIELD_NO_STORE, + ADD_SORTED_SET_FIELD_NOT_INDEXED, MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - ValueType.STRING, // with type hint - DEFAULT_POST_COLLECTION + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + ValueType.STRING // with type hint ); } public void testMatchAllDocs() throws IOException { testSearchCase( - ADD_SORTED_FIELD_NO_STORE, - MATCH_ALL_DOCS_QUERY, - dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - VERIFY_MATCH_ALL_DOCS, - null, // without type hint - DEFAULT_POST_COLLECTION - ); - - testSearchCase( - ADD_SORTED_FIELD_NO_STORE, - MATCH_ALL_DOCS_QUERY, - dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - VERIFY_MATCH_ALL_DOCS, - ValueType.STRING, // with type hint - DEFAULT_POST_COLLECTION - ); - } - - public void testMatchAllDocsWithStoredValues() throws IOException { - // aggregator.postCollection() is not required when LeafBucketCollector#termDocFreqCollector optimization is used, - // therefore using NOOP_POST_COLLECTION - // This also verifies that the bucket count is completed without running postCollection() - - testSearchCase( - ADD_SORTED_FIELD_STORE, + ADD_SORTED_SET_FIELD_NOT_INDEXED, MATCH_ALL_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), + CONFIGURE_KEYWORD_FIELD, VERIFY_MATCH_ALL_DOCS, - null, // without type hint - NOOP_POST_COLLECTION + null // without type hint ); testSearchCase( - ADD_SORTED_FIELD_STORE, + ADD_SORTED_SET_FIELD_NOT_INDEXED, MATCH_ALL_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), + CONFIGURE_KEYWORD_FIELD, VERIFY_MATCH_ALL_DOCS, - ValueType.STRING, // with type hint - NOOP_POST_COLLECTION + ValueType.STRING // with type hint ); } @@ -159,15 +130,13 @@ private void testSearchCase( List dataset, Consumer configure, Consumer verify, - ValueType valueType, - Consumer postCollectionConsumer + ValueType valueType ) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); for (String value : dataset) { addField.apply(document, KEYWORD_FIELD, value); - document.add(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef(value))); indexWriter.addDocument(document); document.clear(); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 93939657b6981..b8a08068f76a3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -44,6 +44,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -121,6 +122,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -137,9 +139,6 @@ import static org.mockito.Mockito.when; public class TermsAggregatorTests extends AggregatorTestCase { - - private boolean randomizeAggregatorImpl = true; - // Constants for a script that returns a string private static final String STRING_SCRIPT_NAME = "string_script"; private static final String STRING_SCRIPT_OUTPUT = "Orange"; @@ -172,9 +171,22 @@ protected ScriptService getMockScriptService() { return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } + protected CountingAggregator createCountingAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, + MappedFieldType... fieldTypes + ) throws IOException { + return new CountingAggregator( + new AtomicInteger(), + createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldTypes) + ); + } + protected A createAggregator( AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, MappedFieldType... fieldTypes ) throws IOException { try { @@ -189,6 +201,14 @@ protected A createAggregator( } } + protected A createAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + MappedFieldType... fieldTypes + ) throws IOException { + return createAggregator(aggregationBuilder, indexSearcher, true, fieldTypes); + } + @Override protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { return new TermsAggregationBuilder("foo").field(fieldName); @@ -208,8 +228,6 @@ protected List getSupportedValuesSourceTypes() { } public void testUsesGlobalOrdinalsByDefault() throws Exception { - randomizeAggregatorImpl = false; - Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); indexWriter.close(); @@ -221,7 +239,7 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { .field("string"); MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, false, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); GlobalOrdinalsStringTermsAggregator globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); @@ -259,30 +277,55 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { } /** - * This test case utilizes the low cardinality implementation of GlobalOrdinalsStringTermsAggregator. - * In this case, the segment terms will not get initialized and will run without LeafBucketCollector#termDocFreqCollector optimization + * This test case utilizes the default implementation of GlobalOrdinalsStringTermsAggregator. */ public void testSimpleAggregation() throws Exception { - testSimple(ADD_SORTED_FIELD_NO_STORE, DEFAULT_POST_COLLECTION); + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + } + + /** + * This test case utilizes the LowCardinality implementation of GlobalOrdinalsStringTermsAggregator. + */ + public void testSimpleAggregationLowCardinality() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); } /** - * This test case utilizes the low cardinality implementation of GlobalOrdinalsStringTermsAggregator. - * In this case, the segment terms will get initialized and will use LeafBucketCollector#termDocFreqCollector optimization + * This test case utilizes the MapStringTermsAggregator. */ - public void testSimpleAggregationWithStoredValues() throws Exception { - // aggregator.postCollection() is not required when LeafBucketCollector#termDocFreqCollector optimization is used, - // therefore using NOOP_POST_COLLECTION - // This also verifies that the bucket count is completed without running postCollection() - testSimple(ADD_SORTED_FIELD_STORE, NOOP_POST_COLLECTION); + public void testSimpleMapStringAggregation() throws Exception { + testSimple(ADD_SORTED_SET_FIELD_INDEXED, randomBoolean(), randomBoolean(), TermsAggregatorFactory.ExecutionMode.MAP, 4); } /** * This is a utility method to test out string terms aggregation * @param addFieldConsumer a function that determines how a field is added to the document + * @param includeDeletedDocumentsInSegment to include deleted documents in the segment or not + * @param collectSegmentOrds collect segment ords or not - set true to utilize LowCardinality implementation for GlobalOrdinalsStringTermsAggregator + * @param executionMode execution mode MAP or GLOBAL_ORDINALS + * @param expectedCollectCount expected number of documents visited as part of collect() invocation */ - private void testSimple(TriConsumer addFieldConsumer, Consumer postCollectionConsumer) - throws Exception { + private void testSimple( + TriConsumer addFieldConsumer, + final boolean includeDeletedDocumentsInSegment, + boolean collectSegmentOrds, + TermsAggregatorFactory.ExecutionMode executionMode, + final int expectedCollectCount + ) throws Exception { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); @@ -301,94 +344,84 @@ private void testSimple(TriConsumer addFieldConsumer, document = new Document(); addFieldConsumer.apply(document, "string", ""); indexWriter.addDocument(document); + + if (includeDeletedDocumentsInSegment) { + document = new Document(); + ADD_SORTED_SET_FIELD_INDEXED.apply(document, "string", "e"); + indexWriter.addDocument(document); + indexWriter.deleteDocuments(new Term("string", "e")); + assertEquals(5, indexWriter.getDocStats().maxDoc); + } + assertEquals(4, indexWriter.getDocStats().numDocs); + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { IndexSearcher indexSearcher = newIndexSearcher(indexReader); - for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { - TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint( - ValueType.STRING - ).executionHint(executionMode.toString()).field("string").order(BucketOrder.key(true)); - MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); - aggregator.preCollection(); - indexSearcher.search(new MatchAllDocsQuery(), aggregator); - postCollectionConsumer.accept(aggregator); - Terms result = reduce(aggregator); - assertEquals(5, result.getBuckets().size()); - assertEquals("", result.getBuckets().get(0).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("a", result.getBuckets().get(1).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(1).getDocCount()); - assertEquals("b", result.getBuckets().get(2).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(2).getDocCount()); - assertEquals("c", result.getBuckets().get(3).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(3).getDocCount()); - assertEquals("d", result.getBuckets().get(4).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(4).getDocCount()); - assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); - } + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint(ValueType.STRING) + .executionHint(executionMode.toString()) + .field("string") + .order(BucketOrder.key(true)); + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); + + TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = collectSegmentOrds; + TermsAggregatorFactory.REMAP_GLOBAL_ORDS = false; + CountingAggregator aggregator = createCountingAggregator(aggregationBuilder, indexSearcher, false, fieldType); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(5, result.getBuckets().size()); + assertEquals("", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("a", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("b", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("c", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertEquals("d", result.getBuckets().get(4).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); + + assertEquals(expectedCollectCount, aggregator.getCollectCount().get()); } } } } - /** - * This test case utilizes the default implementation of GlobalOrdinalsStringTermsAggregator. - * In this case, the segment terms will not get initialized and will run without LeafBucketCollector#termDocFreqCollector optimization - */ public void testStringIncludeExclude() throws Exception { - testStringIncludeExclude( - (document, field, value) -> document.add(new SortedSetDocValuesField(field, new BytesRef(value))), - DEFAULT_POST_COLLECTION - ); - } - - /** - * This test case utilizes the default implementation of GlobalOrdinalsStringTermsAggregator. - * In this case, the segment terms will get initialized and will use LeafBucketCollector#termDocFreqCollector optimization - */ - public void testStringIncludeExcludeWithStoredValues() throws Exception { - // aggregator.postCollection() is not required when LeafBucketCollector#termDocFreqCollector optimization is used - // This also verifies that the bucket count is completed without running postCollection() - testStringIncludeExclude((document, field, value) -> { - document.add(new SortedSetDocValuesField(field, new BytesRef(value))); - document.add(new StringField(field, value, Field.Store.NO)); - }, NOOP_POST_COLLECTION); - } - - private void testStringIncludeExclude(TriConsumer addField, Consumer postCollectionConsumer) - throws Exception { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); - addField.apply(document, "mv_field", "val000"); - addField.apply(document, "mv_field", "val001"); - addField.apply(document, "sv_field", "val001"); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val000"))); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val001"))); + document.add(new SortedDocValuesField("sv_field", new BytesRef("val001"))); indexWriter.addDocument(document); document = new Document(); - addField.apply(document, "mv_field", "val002"); - addField.apply(document, "mv_field", "val003"); - addField.apply(document, "sv_field", "val003"); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val002"))); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val003"))); + document.add(new SortedDocValuesField("sv_field", new BytesRef("val003"))); indexWriter.addDocument(document); document = new Document(); - addField.apply(document, "mv_field", "val004"); - addField.apply(document, "mv_field", "val005"); - addField.apply(document, "sv_field", "val005"); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val004"))); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val005"))); + document.add(new SortedDocValuesField("sv_field", new BytesRef("val005"))); indexWriter.addDocument(document); document = new Document(); - addField.apply(document, "mv_field", "val006"); - addField.apply(document, "mv_field", "val007"); - addField.apply(document, "sv_field", "val007"); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val006"))); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val007"))); + document.add(new SortedDocValuesField("sv_field", new BytesRef("val007"))); indexWriter.addDocument(document); document = new Document(); - addField.apply(document, "mv_field", "val008"); - addField.apply(document, "mv_field", "val009"); - addField.apply(document, "sv_field", "val009"); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val008"))); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val009"))); + document.add(new SortedDocValuesField("sv_field", new BytesRef("val009"))); indexWriter.addDocument(document); document = new Document(); - addField.apply(document, "mv_field", "val010"); - addField.apply(document, "mv_field", "val011"); - addField.apply(document, "sv_field", "val011"); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val010"))); + document.add(new SortedSetDocValuesField("mv_field", new BytesRef("val011"))); + document.add(new SortedDocValuesField("sv_field", new BytesRef("val011"))); indexWriter.addDocument(document); try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { IndexSearcher indexSearcher = newIndexSearcher(indexReader); @@ -405,7 +438,7 @@ private void testStringIncludeExclude(TriConsumer addF TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); aggregator.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), aggregator); - postCollectionConsumer.accept(aggregator); + aggregator.postCollection(); Terms result = reduce(aggregator); assertEquals(10, result.getBuckets().size()); assertEquals("val000", result.getBuckets().get(0).getKeyAsString()); @@ -440,7 +473,7 @@ private void testStringIncludeExclude(TriConsumer addF aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType2); aggregator.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), aggregator); - postCollectionConsumer.accept(aggregator); + aggregator.postCollection(); result = reduce(aggregator); assertEquals(5, result.getBuckets().size()); assertEquals("val001", result.getBuckets().get(0).getKeyAsString()); @@ -464,7 +497,7 @@ private void testStringIncludeExclude(TriConsumer addF aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); aggregator.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), aggregator); - postCollectionConsumer.accept(aggregator); + aggregator.postCollection(); result = reduce(aggregator); assertEquals(8, result.getBuckets().size()); assertEquals("val002", result.getBuckets().get(0).getKeyAsString()); @@ -493,7 +526,7 @@ private void testStringIncludeExclude(TriConsumer addF aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); aggregator.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), aggregator); - postCollectionConsumer.accept(aggregator); + aggregator.postCollection(); result = reduce(aggregator); assertEquals(2, result.getBuckets().size()); assertEquals("val010", result.getBuckets().get(0).getKeyAsString()); @@ -510,7 +543,7 @@ private void testStringIncludeExclude(TriConsumer addF aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); aggregator.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), aggregator); - postCollectionConsumer.accept(aggregator); + aggregator.postCollection(); result = reduce(aggregator); assertEquals(2, result.getBuckets().size()); assertEquals("val000", result.getBuckets().get(0).getKeyAsString()); @@ -542,7 +575,7 @@ private void testStringIncludeExclude(TriConsumer addF aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); aggregator.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), aggregator); - postCollectionConsumer.accept(aggregator); + aggregator.postCollection(); result = reduce(aggregator); assertEquals(2, result.getBuckets().size()); assertEquals("val000", result.getBuckets().get(0).getKeyAsString()); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 9fdae80bd1ada..4eb49ebb42241 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -124,6 +124,7 @@ import org.opensearch.search.aggregations.AggregatorFactories.Builder; import org.opensearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; import org.opensearch.search.aggregations.bucket.nested.NestedAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.metrics.MetricsAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; @@ -150,6 +151,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -181,23 +183,10 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase { // A list of field types that should not be tested, or are not currently supported private static List TYPE_TEST_DENYLIST; - protected static final Consumer DEFAULT_POST_COLLECTION = termsAggregator -> { - try { - termsAggregator.postCollection(); - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - - // aggregator.postCollection() is not required when LeafBucketCollector#termDocFreqCollector optimization is used. - // using NOOP_POST_COLLECTION_CONSUMER ensures that the bucket count in aggregation is completed before/without running postCollection() - protected static final Consumer NOOP_POST_COLLECTION = termsAggregator -> {}; + protected static final TriConsumer ADD_SORTED_SET_FIELD_NOT_INDEXED = (document, field, value) -> document + .add(new SortedSetDocValuesField(field, new BytesRef(value))); - protected static final TriConsumer ADD_SORTED_FIELD_NO_STORE = (document, field, value) -> document.add( - new SortedSetDocValuesField(field, new BytesRef(value)) - ); - - protected static final TriConsumer ADD_SORTED_FIELD_STORE = (document, field, value) -> { + protected static final TriConsumer ADD_SORTED_SET_FIELD_INDEXED = (document, field, value) -> { document.add(new SortedSetDocValuesField(field, new BytesRef(value))); document.add(new StringField(field, value, Field.Store.NO)); }; @@ -457,7 +446,6 @@ protected QueryShardContext queryShardContextMock( CircuitBreakerService circuitBreakerService, BigArrays bigArrays ) { - return new QueryShardContext( 0, indexSettings, @@ -508,16 +496,6 @@ protected A searchAndReduc return searchAndReduce(createIndexSettings(), searcher, query, builder, DEFAULT_MAX_BUCKETS, fieldTypes); } - protected A searchAndReduce( - IndexSearcher searcher, - Query query, - AggregationBuilder builder, - Consumer postCollectionConsumer, - MappedFieldType... fieldTypes - ) throws IOException { - return searchAndReduce(createIndexSettings(), searcher, query, builder, DEFAULT_MAX_BUCKETS, postCollectionConsumer, fieldTypes); - } - protected A searchAndReduce( IndexSettings indexSettings, IndexSearcher searcher, @@ -538,17 +516,6 @@ protected A searchAndReduc return searchAndReduce(createIndexSettings(), searcher, query, builder, maxBucket, fieldTypes); } - protected A searchAndReduce( - IndexSettings indexSettings, - IndexSearcher searcher, - Query query, - AggregationBuilder builder, - int maxBucket, - MappedFieldType... fieldTypes - ) throws IOException { - return searchAndReduce(indexSettings, searcher, query, builder, maxBucket, DEFAULT_POST_COLLECTION, fieldTypes); - } - /** * Collects all documents that match the provided query {@link Query} and * returns the reduced {@link InternalAggregation}. @@ -563,7 +530,6 @@ protected A searchAndReduc Query query, AggregationBuilder builder, int maxBucket, - Consumer postCollectionConsumer, MappedFieldType... fieldTypes ) throws IOException { final IndexReaderContext ctx = searcher.getTopReaderContext(); @@ -594,13 +560,13 @@ protected A searchAndReduc a.preCollection(); Weight weight = subSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1f); subSearcher.search(weight, a); - postCollectionConsumer.accept(a); + a.postCollection(); aggs.add(a.buildTopLevel()); } } else { root.preCollection(); searcher.search(rewritten, root); - postCollectionConsumer.accept(root); + root.postCollection(); aggs.add(root.buildTopLevel()); } @@ -1142,6 +1108,89 @@ protected void doWriteTo(StreamOutput out) throws IOException { } } + /** + * Wrapper around Aggregator class + * Maintains a count for times collect() is invoked - number of documents visited + */ + protected static class CountingAggregator extends Aggregator { + private final AtomicInteger collectCounter; + public final Aggregator delegate; + + public CountingAggregator(AtomicInteger collectCounter, TermsAggregator delegate) { + this.collectCounter = collectCounter; + this.delegate = delegate; + } + + public AtomicInteger getCollectCount() { + return collectCounter; + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public SearchContext context() { + return delegate.context(); + } + + @Override + public Aggregator parent() { + return delegate.parent(); + } + + @Override + public Aggregator subAggregator(String name) { + return delegate.subAggregator(name); + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + return delegate.buildAggregations(owningBucketOrds); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return delegate.buildEmptyAggregation(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + delegate.getLeafCollector(ctx).collect(doc, bucket); + collectCounter.incrementAndGet(); + } + }; + } + + @Override + public ScoreMode scoreMode() { + return delegate.scoreMode(); + } + + @Override + public void preCollection() throws IOException { + delegate.preCollection(); + } + + @Override + public void postCollection() throws IOException { + delegate.postCollection(); + } + + public void setWeight(Weight weight) { + this.delegate.setWeight(weight); + } + } + public static class InternalAggCardinality extends InternalAggregation { private final CardinalityUpperBound cardinality;