From bff3a808d24ef049a7107e16c43e3f8292503e11 Mon Sep 17 00:00:00 2001 From: Dawid Weiss Date: Wed, 14 Jan 2026 22:36:10 +0100 Subject: [PATCH] Replace prettyprint with code tag for prism. --- .../plugins/java/RenderJavadocPlugin.java | 8 +-- .../prettify/inject-javadocs.js | 27 -------- .../render-javadoc/prettify/prettify.css | 17 ----- .../render-javadoc/prettify/prettify.js | 46 ------------- .../ar/ArabicNormalizationFilterFactory.java | 4 +- .../analysis/ar/ArabicStemFilterFactory.java | 4 +- .../bg/BulgarianStemFilterFactory.java | 4 +- .../bn/BengaliNormalizationFilterFactory.java | 4 +- .../analysis/bn/BengaliStemFilterFactory.java | 4 +- .../DelimitedBoostTokenFilterFactory.java | 4 +- .../br/BrazilianStemFilterFactory.java | 4 +- .../HTMLStripCharFilterFactory.java | 4 +- .../charfilter/MappingCharFilterFactory.java | 4 +- .../analysis/cjk/CJKBigramFilterFactory.java | 4 +- .../analysis/cjk/CJKWidthFilterFactory.java | 4 +- .../ckb/SoraniNormalizationFilterFactory.java | 4 +- .../analysis/ckb/SoraniStemFilterFactory.java | 4 +- .../classic/ClassicFilterFactory.java | 4 +- .../classic/ClassicTokenizerFactory.java | 4 +- .../commongrams/CommonGramsFilterFactory.java | 4 +- .../CommonGramsQueryFilterFactory.java | 4 +- ...tionaryCompoundWordTokenFilterFactory.java | 4 +- ...enationCompoundWordTokenFilterFactory.java | 4 +- .../analysis/compound/package-info.java | 4 +- .../core/DecimalDigitFilterFactory.java | 4 +- .../core/KeywordTokenizerFactory.java | 4 +- .../analysis/core/LetterTokenizerFactory.java | 4 +- .../analysis/core/LowerCaseFilterFactory.java | 4 +- .../analysis/core/StopFilterFactory.java | 4 +- .../analysis/core/TypeTokenFilterFactory.java | 4 +- .../analysis/core/UpperCaseFilterFactory.java | 4 +- .../core/WhitespaceTokenizerFactory.java | 4 +- .../analysis/custom/CustomAnalyzer.java | 12 ++-- .../analysis/cz/CzechStemFilterFactory.java | 4 +- .../de/GermanLightStemFilterFactory.java | 4 +- .../de/GermanMinimalStemFilterFactory.java | 4 +- .../de/GermanNormalizationFilterFactory.java | 4 +- .../analysis/de/GermanStemFilterFactory.java | 4 +- .../el/GreekLowerCaseFilterFactory.java | 4 +- .../analysis/el/GreekStemFilterFactory.java | 4 +- .../email/UAX29URLEmailTokenizerFactory.java | 4 +- .../en/EnglishMinimalStemFilterFactory.java | 4 +- .../en/EnglishPossessiveFilterFactory.java | 4 +- .../analysis/en/KStemFilterFactory.java | 4 +- .../lucene/analysis/en/PorterStemFilter.java | 4 +- .../analysis/en/PorterStemFilterFactory.java | 4 +- .../es/SpanishLightStemFilterFactory.java | 4 +- .../es/SpanishMinimalStemFilterFactory.java | 4 +- .../es/SpanishPluralStemFilterFactory.java | 4 +- .../analysis/fa/PersianCharFilterFactory.java | 4 +- .../fa/PersianNormalizationFilterFactory.java | 4 +- .../fi/FinnishLightStemFilterFactory.java | 4 +- .../fr/FrenchLightStemFilterFactory.java | 4 +- .../fr/FrenchMinimalStemFilterFactory.java | 4 +- .../ga/IrishLowerCaseFilterFactory.java | 4 +- .../gl/GalicianMinimalStemFilterFactory.java | 4 +- .../gl/GalicianStemFilterFactory.java | 4 +- .../hi/HindiNormalizationFilterFactory.java | 4 +- .../analysis/hi/HindiStemFilterFactory.java | 4 +- .../hu/HungarianLightStemFilterFactory.java | 4 +- .../hunspell/HunspellStemFilterFactory.java | 4 +- .../id/IndonesianStemFilterFactory.java | 4 +- .../in/IndicNormalizationFilterFactory.java | 4 +- .../it/ItalianLightStemFilterFactory.java | 4 +- .../analysis/lv/LatvianStemFilterFactory.java | 4 +- .../ASCIIFoldingFilterFactory.java | 4 +- .../CapitalizationFilterFactory.java | 4 +- .../CodepointCountFilterFactory.java | 4 +- .../DateRecognizerFilterFactory.java | 4 +- ...imitedTermFrequencyTokenFilterFactory.java | 4 +- .../DropIfFlaggedFilterFactory.java | 4 +- .../FingerprintFilterFactory.java | 9 +-- .../miscellaneous/HyphenatedWordsFilter.java | 4 +- .../HyphenatedWordsFilterFactory.java | 4 +- .../miscellaneous/KeepWordFilterFactory.java | 4 +- .../KeywordMarkerFilterFactory.java | 4 +- .../miscellaneous/LengthFilterFactory.java | 4 +- .../LimitTokenCountFilterFactory.java | 4 +- .../LimitTokenOffsetFilterFactory.java | 4 +- .../LimitTokenPositionFilterFactory.java | 4 +- .../PerFieldAnalyzerWrapper.java | 45 ++++++------- .../ProtectedTermFilterFactory.java | 12 ++-- .../RemoveDuplicatesTokenFilterFactory.java | 4 +- .../ScandinavianFoldingFilterFactory.java | 4 +- ...candinavianNormalizationFilterFactory.java | 4 +- .../StemmerOverrideFilterFactory.java | 4 +- .../miscellaneous/TrimFilterFactory.java | 4 +- .../TruncateTokenFilterFactory.java | 4 +- .../TypeAsSynonymFilterFactory.java | 4 +- .../WordDelimiterFilterFactory.java | 4 +- .../WordDelimiterGraphFilterFactory.java | 4 +- .../ngram/EdgeNGramFilterFactory.java | 4 +- .../ngram/EdgeNGramTokenizerFactory.java | 4 +- .../analysis/ngram/NGramFilterFactory.java | 4 +- .../analysis/ngram/NGramTokenizerFactory.java | 4 +- .../no/NorwegianLightStemFilterFactory.java | 4 +- .../no/NorwegianMinimalStemFilterFactory.java | 4 +- .../path/PathHierarchyTokenizerFactory.java | 8 +-- .../PatternCaptureGroupFilterFactory.java | 4 +- .../PatternReplaceCharFilterFactory.java | 4 +- .../pattern/PatternReplaceFilterFactory.java | 4 +- .../pattern/PatternTokenizerFactory.java | 4 +- .../pattern/PatternTypingFilterFactory.java | 4 +- .../SimplePatternSplitTokenizerFactory.java | 4 +- .../SimplePatternTokenizerFactory.java | 4 +- .../DelimitedPayloadTokenFilterFactory.java | 4 +- .../NumericPayloadTokenFilterFactory.java | 4 +- .../TokenOffsetPayloadTokenFilterFactory.java | 4 +- .../TypeAsPayloadTokenFilterFactory.java | 4 +- .../pt/PortugueseLightStemFilterFactory.java | 4 +- .../PortugueseMinimalStemFilterFactory.java | 4 +- .../pt/PortugueseStemFilterFactory.java | 4 +- .../reverse/ReverseStringFilterFactory.java | 4 +- .../ru/RussianLightStemFilterFactory.java | 4 +- .../shingle/ShingleFilterFactory.java | 4 +- .../analysis/sinks/TeeSinkTokenFilter.java | 4 +- .../snowball/SnowballPorterFilterFactory.java | 4 +- .../sr/SerbianNormalizationFilterFactory.java | 4 +- .../sv/SwedishLightStemFilterFactory.java | 4 +- .../sv/SwedishMinimalStemFilterFactory.java | 4 +- .../synonym/SynonymFilterFactory.java | 4 +- .../synonym/SynonymGraphFilterFactory.java | 4 +- .../te/TeluguNormalizationFilterFactory.java | 4 +- .../analysis/te/TeluguStemFilterFactory.java | 4 +- .../analysis/th/ThaiTokenizerFactory.java | 4 +- .../analysis/tr/ApostropheFilterFactory.java | 4 +- .../tr/TurkishLowerCaseFilterFactory.java | 4 +- .../lucene/analysis/util/CharTokenizer.java | 20 +++--- .../analysis/util/ElisionFilterFactory.java | 4 +- .../wikipedia/WikipediaTokenizerFactory.java | 4 +- .../apache/lucene/collation/package-info.java | 12 ++-- .../analysis/icu/ICUFoldingFilterFactory.java | 4 +- .../icu/segmentation/ICUTokenizerFactory.java | 8 +-- lucene/analysis/icu/src/java/overview.html | 40 ++++++------ ...apaneseIterationMarkCharFilterFactory.java | 4 +- .../ja/JapaneseKatakanaStemFilterFactory.java | 4 +- .../ja/JapaneseNumberFilterFactory.java | 4 +- ...JapanesePartOfSpeechStopFilterFactory.java | 4 +- .../ja/JapaneseReadingFormFilterFactory.java | 4 +- .../analysis/ja/JapaneseTokenizerFactory.java | 4 +- .../morfologik/MorfologikFilterFactory.java | 4 +- .../ko/KoreanNumberFilterFactory.java | 4 +- .../KoreanPartOfSpeechStopFilterFactory.java | 4 +- .../ko/KoreanReadingFormFilterFactory.java | 4 +- .../analysis/ko/KoreanTokenizerFactory.java | 4 +- .../opennlp/OpenNLPChunkerFilterFactory.java | 4 +- .../OpenNLPLemmatizerFilterFactory.java | 4 +- .../opennlp/OpenNLPPOSFilterFactory.java | 4 +- .../opennlp/OpenNLPTokenizerFactory.java | 4 +- .../phonetic/BeiderMorseFilterFactory.java | 4 +- .../DaitchMokotoffSoundexFilterFactory.java | 4 +- .../DoubleMetaphoneFilterFactory.java | 4 +- .../phonetic/PhoneticFilterFactory.java | 4 +- .../lucene50/Lucene50StoredFieldsFormat.java | 4 +- .../lucene87/Lucene87StoredFieldsFormat.java | 4 +- .../packed/LegacyDirectReader.java | 4 +- .../packed/LegacyDirectWriter.java | 4 +- .../benchmark/quality/package-info.java | 4 +- .../org/apache/lucene/analysis/Analyzer.java | 4 +- .../apache/lucene/analysis/package-info.java | 64 +++++++++---------- .../standard/StandardTokenizerFactory.java | 4 +- .../TermToBytesRefAttribute.java | 4 +- .../org/apache/lucene/codecs/CodecUtil.java | 4 +- .../org/apache/lucene/codecs/FilterCodec.java | 4 +- .../lucene90/Lucene90StoredFieldsFormat.java | 4 +- .../apache/lucene/codecs/package-info.java | 4 +- .../lucene/document/BinaryDocValuesField.java | 4 +- .../document/NumericDocValuesField.java | 4 +- .../lucene/document/SortedDocValuesField.java | 4 +- .../document/SortedNumericDocValuesField.java | 8 +-- .../document/SortedSetDocValuesField.java | 4 +- .../org/apache/lucene/index/IndexReader.java | 12 ++-- .../lucene/index/IndexWriterConfig.java | 4 +- .../apache/lucene/index/MergeScheduler.java | 4 +- .../apache/lucene/index/NumericDocValues.java | 8 +-- .../org/apache/lucene/index/PointValues.java | 4 +- .../org/apache/lucene/index/PostingsEnum.java | 4 +- .../lucene/index/UpgradeIndexMergePolicy.java | 4 +- .../org/apache/lucene/index/package-info.java | 20 +++--- .../apache/lucene/search/BooleanQuery.java | 2 +- .../org/apache/lucene/search/BulkScorer.java | 4 +- .../lucene/search/DocIdSetIterator.java | 8 +-- .../org/apache/lucene/search/HitQueue.java | 4 +- .../lucene/search/IndexOrDocValuesQuery.java | 4 +- .../apache/lucene/search/IndexSearcher.java | 4 +- ...xSortSortedNumericDocValuesRangeQuery.java | 4 +- .../apache/lucene/search/LRUQueryCache.java | 4 +- .../apache/lucene/search/LeafCollector.java | 4 +- .../org/apache/lucene/search/PhraseQuery.java | 8 +-- .../java/org/apache/lucene/search/Scorer.java | 4 +- .../apache/lucene/search/SearcherFactory.java | 4 +- .../search/SearcherLifetimeManager.java | 16 ++--- .../apache/lucene/search/SearcherManager.java | 4 +- .../apache/lucene/search/TermInSetQuery.java | 4 +- .../java/org/apache/lucene/search/Weight.java | 4 +- .../apache/lucene/search/package-info.java | 16 ++--- .../search/similarities/BM25Similarity.java | 4 +- .../IndriDirichletSimilarity.java | 4 +- .../search/similarities/Similarity.java | 4 +- .../search/similarities/TFIDFSimilarity.java | 4 +- .../java/org/apache/lucene/store/Lock.java | 4 +- .../lucene/store/NRTCachingDirectory.java | 4 +- .../org/apache/lucene/util/AttributeImpl.java | 4 +- .../src/java/org/apache/lucene/util/Bits.java | 4 +- .../java/org/apache/lucene/util/LongHeap.java | 8 +-- .../org/apache/lucene/util/PriorityQueue.java | 12 ++-- .../org/apache/lucene/util/QueryBuilder.java | 4 +- .../apache/lucene/util/SentinelIntSet.java | 4 +- .../apache/lucene/util/TernaryLongHeap.java | 8 +-- .../org/apache/lucene/util/VectorUtil.java | 4 +- .../org/apache/lucene/util/VirtualMethod.java | 8 +-- .../apache/lucene/util/fst/FSTCompiler.java | 8 +-- .../java/org/apache/lucene/util/fst/Util.java | 4 +- .../apache/lucene/util/fst/package-info.java | 20 +++--- .../lucene/util/packed/DirectReader.java | 4 +- .../lucene/util/packed/DirectWriter.java | 4 +- .../OptimizedScalarQuantizer.java | 4 +- .../util/quantization/ScalarQuantizer.java | 12 ++-- .../apache/lucene/expressions/Expression.java | 8 +-- .../lucene/expressions/SimpleBindings.java | 4 +- .../expressions/js/JavascriptCompiler.java | 8 +-- .../taxonomy/OrdinalMappingLeafReader.java | 4 +- .../lucene/search/grouping/package-info.java | 20 +++--- .../lucene/search/highlight/package-info.java | 4 +- .../search/uhighlight/UnifiedHighlighter.java | 8 +-- .../SingleFragListBuilder.java | 4 +- .../search/vectorhighlight/package-info.java | 12 ++-- ...iversifyingChildrenByteKnnVectorQuery.java | 4 +- ...versifyingChildrenFloatKnnVectorQuery.java | 4 +- .../lucene/search/join/package-info.java | 8 +-- .../lucene/index/memory/MemoryIndex.java | 12 ++-- .../lucene/misc/index/BPIndexReorderer.java | 4 +- .../lucene/monitor/ConcurrentQueryLoader.java | 4 +- .../lucene/queries/mlt/MoreLikeThis.java | 4 +- .../queries/spans/FieldMaskingSpanQuery.java | 4 +- .../spans/SpanMultiTermQueryWrapper.java | 6 +- .../lucene/queries/spans/package-info.java | 8 +-- .../classic/MultiFieldQueryParser.java | 12 ++-- .../flexible/core/nodes/PathQueryNode.java | 4 +- .../flexible/messages/package-info.java | 8 +-- .../flexible/standard/QueryParserUtil.java | 12 ++-- .../standard/StandardQueryParser.java | 8 +-- .../lucene/search/spell/SpellChecker.java | 4 +- .../analyzing/SuggestStopFilterFactory.java | 4 +- .../suggest/document/CompletionQuery.java | 4 +- .../search/suggest/document/ContextQuery.java | 4 +- .../suggest/document/ContextSuggestField.java | 4 +- .../document/FuzzyCompletionQuery.java | 4 +- .../document/PrefixCompletionQuery.java | 4 +- .../document/RegexCompletionQuery.java | 4 +- .../search/suggest/document/SuggestField.java | 4 +- .../BaseTokenStreamFactoryTestCase.java | 4 +- 252 files changed, 679 insertions(+), 789 deletions(-) delete mode 100644 gradle/documentation/render-javadoc/prettify/inject-javadocs.js delete mode 100644 gradle/documentation/render-javadoc/prettify/prettify.css delete mode 100644 gradle/documentation/render-javadoc/prettify/prettify.js diff --git a/build-tools/build-infra/src/main/java/org/apache/lucene/gradle/plugins/java/RenderJavadocPlugin.java b/build-tools/build-infra/src/main/java/org/apache/lucene/gradle/plugins/java/RenderJavadocPlugin.java index 6a7dd40aa982..61ddde49b9ef 100644 --- a/build-tools/build-infra/src/main/java/org/apache/lucene/gradle/plugins/java/RenderJavadocPlugin.java +++ b/build-tools/build-infra/src/main/java/org/apache/lucene/gradle/plugins/java/RenderJavadocPlugin.java @@ -566,17 +566,11 @@ public void render() throws IOException { getTaskResources(), "table_padding.css", "custom_styles.css", - "prettify/prettify.css", "prettify/prism.css"); // append prettify to scripts Provider customScript = getOutputDir().file("script-files/lucene-script.js"); - concat( - customScript, - getTaskResources().dir("prettify"), - "prettify.js", - "inject-javadocs.js", - "prism.js"); + concat(customScript, getTaskResources().dir("prettify"), "prism.js"); opts.add(List.of("--add-script", customScript.get().getAsFile().toString())); opts.add(List.of("--add-stylesheet", customCss.get().getAsFile().toString())); diff --git a/gradle/documentation/render-javadoc/prettify/inject-javadocs.js b/gradle/documentation/render-javadoc/prettify/inject-javadocs.js deleted file mode 100644 index 77f6a4b04636..000000000000 --- a/gradle/documentation/render-javadoc/prettify/inject-javadocs.js +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -!function(){ - var oldonload = window.onload; - if (typeof oldonload != 'function') { - window.onload = prettyPrint; - } else { - window.onload = function() { - oldonload(); - prettyPrint(); - } - } -}(); diff --git a/gradle/documentation/render-javadoc/prettify/prettify.css b/gradle/documentation/render-javadoc/prettify/prettify.css deleted file mode 100644 index d1a82584a65b..000000000000 --- a/gradle/documentation/render-javadoc/prettify/prettify.css +++ /dev/null @@ -1,17 +0,0 @@ -/* - - Copyright (C) 2006 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ -.pln{color:#000}@media screen{.str{color:#080}.kwd{color:#008}.com{color:#800}.typ{color:#606}.lit{color:#066}.pun,.opn,.clo{color:#660}.tag{color:#008}.atn{color:#606}.atv{color:#080}.dec,.var{color:#606}.fun{color:red}}@media print,projection{.str{color:#060}.kwd{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{color:#404;font-weight:bold}.lit{color:#044}.pun,.opn,.clo{color:#440}.tag{color:#006;font-weight:bold}.atn{color:#404}.atv{color:#060}}pre.prettyprint{padding:2px;border:1px solid #888}ol.linenums{margin-top:0;margin-bottom:0}li.L0,li.L1,li.L2,li.L3,li.L5,li.L6,li.L7,li.L8{list-style-type:none}li.L1,li.L3,li.L5,li.L7,li.L9{background:#eee} diff --git a/gradle/documentation/render-javadoc/prettify/prettify.js b/gradle/documentation/render-javadoc/prettify/prettify.js deleted file mode 100644 index 3b74b5bdaa3d..000000000000 --- a/gradle/documentation/render-javadoc/prettify/prettify.js +++ /dev/null @@ -1,46 +0,0 @@ -!function(){/* - - Copyright (C) 2006 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ -window.PR_SHOULD_USE_CONTINUATION=!0; -(function(){function T(a){function d(e){var b=e.charCodeAt(0);if(92!==b)return b;var a=e.charAt(1);return(b=w[a])?b:"0"<=a&&"7">=a?parseInt(e.substring(1),8):"u"===a||"x"===a?parseInt(e.substring(2),16):e.charCodeAt(1)}function f(e){if(32>e)return(16>e?"\\x0":"\\x")+e.toString(16);e=String.fromCharCode(e);return"\\"===e||"-"===e||"]"===e||"^"===e?"\\"+e:e}function b(e){var b=e.substring(1,e.length-1).match(/\\u[0-9A-Fa-f]{4}|\\x[0-9A-Fa-f]{2}|\\[0-3][0-7]{0,2}|\\[0-7]{1,2}|\\[\s\S]|-|[^-\\]/g);e= -[];var a="^"===b[0],c=["["];a&&c.push("^");for(var a=a?1:0,g=b.length;ak||122k||90k||122h[0]&&(h[1]+1>h[0]&&c.push("-"),c.push(f(h[1])));c.push("]");return c.join("")}function v(e){for(var a=e.source.match(/(?:\[(?:[^\x5C\x5D]|\\[\s\S])*\]|\\u[A-Fa-f0-9]{4}|\\x[A-Fa-f0-9]{2}|\\[0-9]+|\\[^ux0-9]|\(\?[:!=]|[\(\)\^]|[^\x5B\x5C\(\)\^]+)/g),c=a.length,d=[],g=0,h=0;g/,null])):d.push(["com",/^#[^\r\n]*/,null,"#"]));a.cStyleComments&&(f.push(["com",/^\/\/[^\r\n]*/,null]),f.push(["com",/^\/\*[\s\S]*?(?:\*\/|$)/,null]));if(b=a.regexLiterals){var v=(b=1|\\/=?|::?|<>?>?=?|,|;|\\?|@|\\[|~|{|\\^\\^?=?|\\|\\|?=?|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\\s*("+ -("/(?=[^/*"+b+"])(?:[^/\\x5B\\x5C"+b+"]|\\x5C"+v+"|\\x5B(?:[^\\x5C\\x5D"+b+"]|\\x5C"+v+")*(?:\\x5D|$))+/")+")")])}(b=a.types)&&f.push(["typ",b]);b=(""+a.keywords).replace(/^ | $/g,"");b.length&&f.push(["kwd",new RegExp("^(?:"+b.replace(/[\s,]+/g,"|")+")\\b"),null]);d.push(["pln",/^\s+/,null," \r\n\t\u00a0"]);b="^.[^\\s\\w.$@'\"`/\\\\]*";a.regexLiterals&&(b+="(?!s*/)");f.push(["lit",/^@[a-z_$][a-z_$@0-9]*/i,null],["typ",/^(?:[@_]?[A-Z]+[a-z][A-Za-z_$@0-9]*|\w+_t\b)/,null],["pln",/^[a-z_$][a-z_$@0-9]*/i, -null],["lit",/^(?:0x[a-f0-9]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+\-]?\d+)?)[a-z]*/i,null,"0123456789"],["pln",/^\\[\s\S]?/,null],["pun",new RegExp(b),null]);return G(d,f)}function L(a,d,f){function b(a){var c=a.nodeType;if(1==c&&!A.test(a.className))if("br"===a.nodeName)v(a),a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)b(a);else if((3==c||4==c)&&f){var d=a.nodeValue,q=d.match(n);q&&(c=d.substring(0,q.index),a.nodeValue=c,(d=d.substring(q.index+q[0].length))&& -a.parentNode.insertBefore(l.createTextNode(d),a.nextSibling),v(a),c||a.parentNode.removeChild(a))}}function v(a){function b(a,c){var d=c?a.cloneNode(!1):a,k=a.parentNode;if(k){var k=b(k,1),e=a.nextSibling;k.appendChild(d);for(var f=e;f;f=e)e=f.nextSibling,k.appendChild(f)}return d}for(;!a.nextSibling;)if(a=a.parentNode,!a)return;a=b(a.nextSibling,0);for(var d;(d=a.parentNode)&&1===d.nodeType;)a=d;c.push(a)}for(var A=/(?:^|\s)nocode(?:\s|$)/,n=/\r\n?|\n/,l=a.ownerDocument,m=l.createElement("li");a.firstChild;)m.appendChild(a.firstChild); -for(var c=[m],p=0;p=+v[1],d=/\n/g,A=a.a,n=A.length,f=0,l=a.c,m=l.length,b=0,c=a.g,p=c.length,w=0;c[p]=n;var r,e;for(e=r=0;e=h&&(b+=2);f>=k&&(w+=2)}}finally{g&&(g.style.display=a)}}catch(x){E.console&&console.log(x&&x.stack||x)}}var E=window,C=["break,continue,do,else,for,if,return,while"], -F=[[C,"auto,case,char,const,default,double,enum,extern,float,goto,inline,int,long,register,restrict,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"],"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],H=[F,"alignas,alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,delegate,dynamic_cast,explicit,export,friend,generic,late_check,mutable,namespace,noexcept,noreturn,nullptr,property,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"], -O=[F,"abstract,assert,boolean,byte,extends,finally,final,implements,import,instanceof,interface,null,native,package,strictfp,super,synchronized,throws,transient"],P=[F,"abstract,add,alias,as,ascending,async,await,base,bool,by,byte,checked,decimal,delegate,descending,dynamic,event,finally,fixed,foreach,from,get,global,group,implicit,in,interface,internal,into,is,join,let,lock,null,object,out,override,orderby,params,partial,readonly,ref,remove,sbyte,sealed,select,set,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,value,var,virtual,where,yield"], -F=[F,"abstract,async,await,constructor,debugger,enum,eval,export,function,get,implements,instanceof,interface,let,null,set,undefined,var,with,yield,Infinity,NaN"],Q=[C,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"],R=[C,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],C=[C,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"], -S=/^(DIR|FILE|array|vector|(de|priority_)?queue|(forward_)?list|stack|(const_)?(reverse_)?iterator|(unordered_)?(multi)?(set|map)|bitset|u?(int|float)\d*)\b/,W=/\S/,X=y({keywords:[H,P,O,F,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",Q,R,C],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),I={};t(X,["default-code"]);t(G([],[["pln",/^[^]*(?:>|$)/],["com",/^<\!--[\s\S]*?(?:-\->|$)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-js",/^]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),"default-markup htm html mxml xhtml xml xsl".split(" "));t(G([["pln",/^[\s]+/,null," \t\r\n"],["atv",/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,null, -"\"'"]],[["tag",/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],["pun",/^[=<>\/]+/],["lang-js",/^on\w+\s*=\s*\"([^\"]+)\"/i],["lang-js",/^on\w+\s*=\s*\'([^\']+)\'/i],["lang-js",/^on\w+\s*=\s*([^\"\'>\s]+)/i],["lang-css",/^style\s*=\s*\"([^\"]+)\"/i],["lang-css",/^style\s*=\s*\'([^\']+)\'/i],["lang-css",/^style\s*=\s*([^\"\'>\s]+)/i]]),["in.tag"]);t(G([],[["atv",/^[\s\S]+/]]),["uq.val"]);t(y({keywords:H, -hashComments:!0,cStyleComments:!0,types:S}),"c cc cpp cxx cyc m".split(" "));t(y({keywords:"null,true,false"}),["json"]);t(y({keywords:P,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:S}),["cs"]);t(y({keywords:O,cStyleComments:!0}),["java"]);t(y({keywords:C,hashComments:!0,multiLineStrings:!0}),["bash","bsh","csh","sh"]);t(y({keywords:Q,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}),["cv","py","python"]);t(y({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END", -hashComments:!0,multiLineStrings:!0,regexLiterals:2}),["perl","pl","pm"]);t(y({keywords:R,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb","ruby"]);t(y({keywords:F,cStyleComments:!0,regexLiterals:!0}),["javascript","js","ts","typescript"]);t(y({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,throw,true,try,unless,until,when,while,yes",hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0, -regexLiterals:!0}),["coffee"]);t(G([],[["str",/^[\s\S]+/]]),["regex"]);var Y=E.PR={createSimpleLexer:G,registerLangHandler:t,sourceDecorator:y,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit",PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ",prettyPrintOne:E.prettyPrintOne=function(a,d,f){f=f||!1;d=d||null;var b=document.createElement("div");b.innerHTML="
"+a+"
"; -b=b.firstChild;f&&L(b,f,!0);M({j:d,m:f,h:b,l:1,a:null,i:null,c:null,g:null});return b.innerHTML},prettyPrint:E.prettyPrint=function(a,d){function f(){for(var b=E.PR_SHOULD_USE_CONTINUATION?c.now()+250:Infinity;p + *

  * <fieldType name="text_arnormal" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.ArabicNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType> * * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilterFactory.java index 9f2e70270c7f..f9dae8b9a542 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link ArabicStemFilter}. * - *
+ * 

  * <fieldType name="text_arstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.ArabicNormalizationFilterFactory"/>
  *     <filter class="solr.ArabicStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemFilterFactory.java index 8aebc1e0726e..1152128fba95 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link BulgarianStemFilter}. * - *
+ * 

  * <fieldType name="text_bgstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.BulgarianStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizationFilterFactory.java index b95cbfbb93a5..6090db2336f2 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliNormalizationFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link BengaliNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_bnnormal" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.BengaliNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 7.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemFilterFactory.java index a8711b4df53e..f238d4537296 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/bn/BengaliStemFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link BengaliStemFilter}. * - *
+ * 

  * <fieldType name="text_histem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.BengaliStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 7.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/boost/DelimitedBoostTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/boost/DelimitedBoostTokenFilterFactory.java index 71f42bd587b2..fe997448c793 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/boost/DelimitedBoostTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/boost/DelimitedBoostTokenFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link DelimitedBoostTokenFilter}. * - *
+ * 

  * <fieldType name="text_dlmtd" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DelimitedBoostTokenFilterFactory" delimiter="|"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @lucene.spi {@value #NAME} */ diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilterFactory.java index 634e1cc1b7ee..b9b43aec1b9a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link BrazilianStemFilter}. * - *
+ * 

  * <fieldType name="text_brstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.BrazilianStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterFactory.java index 84fc1cf7cfcd..453cae98cef0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterFactory.java @@ -25,13 +25,13 @@ /** * Factory for {@link HTMLStripCharFilter}. * - *
+ * 

  * <fieldType name="text_html" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <charFilter class="solr.HTMLStripCharFilterFactory" escapedTags="a, title" />
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java index 6ad3c3be39f2..21fdc5caa4a6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java @@ -30,13 +30,13 @@ /** * Factory for {@link MappingCharFilter}. * - *
+ * 

  * <fieldType name="text_map" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <charFilter class="solr.MappingCharFilterFactory" mapping="mapping.txt"/>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since Solr 1.4 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilterFactory.java index 1c8699a92e3a..7a99a2077131 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilterFactory.java @@ -23,7 +23,7 @@ /** * Factory for {@link CJKBigramFilter}. * - *
+ * 

  * <fieldType name="text_cjk" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -33,7 +33,7 @@
  *       han="true" hiragana="true"
  *       katakana="true" hangul="true" outputUnigrams="false" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilterFactory.java index 8e464fd7432d..b9ee825d1fbc 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilterFactory.java @@ -23,7 +23,7 @@ /** * Factory for {@link CJKWidthFilter}. * - *
+ * 

  * <fieldType name="text_cjk" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -31,7 +31,7 @@
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.CJKBigramFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizationFilterFactory.java index 59409f74888c..7e6451e6c904 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniNormalizationFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link SoraniNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_ckbnormal" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.SoraniNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.7.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemFilterFactory.java index 00ce270a8859..8df056edf810 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ckb/SoraniStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link SoraniStemFilter}. * - *
+ * 

  * <fieldType name="text_ckbstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.SoraniNormalizationFilterFactory"/>
  *     <filter class="solr.SoraniStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.7.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicFilterFactory.java index 4c153bfdad64..dfb5bb4806ac 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicFilterFactory.java @@ -24,13 +24,13 @@ /** * Factory for {@link ClassicFilter}. * - *
+ * 

  * <fieldType name="text_clssc" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.ClassicTokenizerFactory"/>
  *     <filter class="solr.ClassicFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerFactory.java index d62dbf9cdbae..1ecf84c7e920 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerFactory.java @@ -24,12 +24,12 @@ /** * Factory for {@link ClassicTokenizer}. * - *
+ * 

  * <fieldType name="text_clssc" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.ClassicTokenizerFactory" maxTokenLength="120"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilterFactory.java index 939b712a75fc..052d421a01d5 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilterFactory.java @@ -26,13 +26,13 @@ /** * Constructs a {@link CommonGramsFilter}. * - *
+ * 

  * <fieldType name="text_cmmngrms" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.CommonGramsFilterFactory" words="commongramsstopwords.txt" ignoreCase="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsQueryFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsQueryFilterFactory.java index 0e19d787f535..0f911b9b12c5 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsQueryFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsQueryFilterFactory.java @@ -23,13 +23,13 @@ /** * Construct {@link CommonGramsQueryFilter}. * - *
+ * 

  * <fieldType name="text_cmmngrmsqry" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.CommonGramsQueryFilterFactory" words="commongramsquerystopwords.txt" ignoreCase="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java index 13071aec38d7..7102b65d3532 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java @@ -28,14 +28,14 @@ /** * Factory for {@link DictionaryCompoundWordTokenFilter}. * - *
+ * 

  * <fieldType name="text_dictcomp" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DictionaryCompoundWordTokenFilterFactory" dictionary="dictionary.txt"
  *         minWordSize="5" minSubwordSize="2" maxSubwordSize="15" onlyLongestMatch="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java index 8a40de23fb8c..54428b23a695 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java @@ -49,14 +49,14 @@ * *
* - *
+ * 

  * <fieldType name="text_hyphncomp" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.HyphenationCompoundWordTokenFilterFactory" hyphenator="hyphenator.xml" encoding="UTF-8"
  *         dictionary="dictionary.txt" minWordSize="5" minSubwordSize="2" maxSubwordSize="15" onlyLongestMatch="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @see HyphenationCompoundWordTokenFilter * @since 3.1.0 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java index 0f89337b1dac..bcfbd1d79658 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/package-info.java @@ -138,7 +138,7 @@ * *

Examples

* - *
+ * 

  *   public void testHyphenationCompoundWordsDE() throws Exception {
  *     String[] dict = { "Rind", "Fleisch", "Draht", "Schere", "Gesetz",
  *         "Aufgabe", "Überwachung" };
@@ -192,6 +192,6 @@
  *        System.out.println(t);
  *     }
  *   }
- * 
+ *
*/ package org.apache.lucene.analysis.compound; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilterFactory.java index b2c7b9a3fb12..37194237ec52 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/DecimalDigitFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link DecimalDigitFilter}. * - *
+ * 

  * <fieldType name="text_lwrcase" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DecimalDigitFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 5.4.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java index 0f8a8c6a7d71..959e4e493922 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java @@ -25,12 +25,12 @@ /** * Factory for {@link KeywordTokenizer}. * - *
+ * 

  * <fieldType name="text_keyword" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.KeywordTokenizerFactory" maxTokenLen="256"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * Options: * diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java index 3741fd811d3e..04748eee104f 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java @@ -26,12 +26,12 @@ /** * Factory for {@link LetterTokenizer}. * - *
+ * 

  * <fieldType name="text_letter" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.LetterTokenizerFactory" maxTokenLen="256"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * Options: * diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilterFactory.java index 8558e7c3cac0..e78618759c9b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link LowerCaseFilter}. * - *
+ * 

  * <fieldType name="text_lwrcase" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilterFactory.java index b7ea4615f1df..4c0a8c71f66d 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilterFactory.java @@ -26,14 +26,14 @@ /** * Factory for {@link StopFilter}. * - *
+ * 

  * <fieldType name="text_stop" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.StopFilterFactory" ignoreCase="true"
  *             words="stopwords.txt" format="wordset"
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

All attributes are optional: * diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java index e1baa25c9d33..c58f206533ad 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java @@ -29,14 +29,14 @@ /** * Factory class for {@link TypeTokenFilter}. * - *

+ * 

  * <fieldType name="chars" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.TypeTokenFilterFactory" types="stoptypes.txt"
  *                   useWhitelist="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UpperCaseFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UpperCaseFilterFactory.java index 0f86ca5495d1..a6af42519e11 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UpperCaseFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/UpperCaseFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link UpperCaseFilter}. * - *
+ * 

  * <fieldType name="text_uppercase" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.UpperCaseFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

NOTE: In Unicode, this transformation may lose information when the upper case * character represents more than one lower case character. Use this filter when you require diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java index e62e5a3761c4..647800ff7d10 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java @@ -29,12 +29,12 @@ /** * Factory for {@link WhitespaceTokenizer}. * - *

+ * 

  * <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory" rule="unicode"  maxTokenLen="256"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * Options: * diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/custom/CustomAnalyzer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/custom/CustomAnalyzer.java index a6a41e94c0c6..1ed08f7b288e 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/custom/CustomAnalyzer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/custom/CustomAnalyzer.java @@ -55,13 +55,13 @@ *

You can create an instance of this Analyzer using the builder by passing the SPI names (as * defined by {@link java.util.ServiceLoader} interface) to it: * - *

+ * 

  * Analyzer ana = CustomAnalyzer.builder(Paths.get("/path/to/config/dir"))
  *   .withTokenizer(StandardTokenizerFactory.NAME)
  *   .addTokenFilter(LowerCaseFilterFactory.NAME)
  *   .addTokenFilter(StopFilterFactory.NAME, "ignoreCase", "false", "words", "stopwords.txt", "format", "wordset")
  *   .build();
- * 
+ *
* * The parameters passed to components are also used by Apache Solr and are documented on their * corresponding factory classes. Refer to documentation of subclasses of {@link TokenizerFactory}, @@ -69,13 +69,13 @@ * *

This is the same as the above: * - *

+ * 

  * Analyzer ana = CustomAnalyzer.builder(Paths.get("/path/to/config/dir"))
  *   .withTokenizer("standard")
  *   .addTokenFilter("lowercase")
  *   .addTokenFilter("stop", "ignoreCase", "false", "words", "stopwords.txt", "format", "wordset")
  *   .build();
- * 
+ *
* *

The list of names to be used for components can be looked up through: {@link * TokenizerFactory#availableTokenizers()}, {@link TokenFilterFactory#availableTokenFilters()}, and @@ -84,7 +84,7 @@ *

You can create conditional branches in the analyzer by using {@link Builder#when(String, * String...)} and {@link Builder#whenTerm(Predicate)}: * - *

+ * 

  * Analyzer ana = CustomAnalyzer.builder()
  *    .withTokenizer("standard")
  *    .addTokenFilter("lowercase")
@@ -92,7 +92,7 @@
  *      .addTokenFilter("reversestring")
  *    .endwhen()
  *    .build();
- * 
+ *
* * @since 5.0.0 */ diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilterFactory.java index 5b5c4ac66a30..6b9f2faac0a4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link CzechStemFilter}. * - *
+ * 

  * <fieldType name="text_czstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.CzechStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemFilterFactory.java index 1f4a357ffcaf..66d3e8482794 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link GermanLightStemFilter}. * - *
+ * 

  * <fieldType name="text_delgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.GermanLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemFilterFactory.java index 1183a00048a9..0270c2479fa1 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link GermanMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_deminstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.GermanMinimalStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilterFactory.java index f55e26812b94..93ddf39a7877 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link GermanNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_denorm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.GermanNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilterFactory.java index 4bf9b74650ef..3768630d65b9 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link GermanStemFilter}. * - *
+ * 

  * <fieldType name="text_destem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.GermanStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilterFactory.java index 54789ccde16a..5cfa11ec8fd1 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link GreekLowerCaseFilter}. * - *
+ * 

  * <fieldType name="text_glc" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.GreekLowerCaseFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemFilterFactory.java index c6dc20031d92..6c4980cd14fa 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link GreekStemFilter}. * - *
+ * 

  * <fieldType name="text_gstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.GreekLowerCaseFilterFactory"/>
  *     <filter class="solr.GreekStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerFactory.java index 566cd6723501..d46f05f05bc1 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerFactory.java @@ -24,12 +24,12 @@ /** * Factory for {@link UAX29URLEmailTokenizer}. * - *
+ * 

  * <fieldType name="text_urlemail" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.UAX29URLEmailTokenizerFactory" maxTokenLength="255"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemFilterFactory.java index 68f230126314..66a0809e7ae6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link EnglishMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_enminstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.EnglishMinimalStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishPossessiveFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishPossessiveFilterFactory.java index 4eb6f13e9f1a..4a1f5b4e20f2 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishPossessiveFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishPossessiveFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link EnglishPossessiveFilter}. * - *
+ * 

  * <fieldType name="text_enpossessive" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.EnglishPossessiveFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilterFactory.java index 536ae0ff6b6c..64a982ac05a6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilterFactory.java @@ -24,14 +24,14 @@ /** * Factory for {@link KStemFilter}. * - *
+ * 

  * <fieldType name="text_kstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.KStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.3.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java index 1d7c4170ceb6..4b20623da9eb 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilter.java @@ -31,7 +31,7 @@ * the TokenStream chain as you want it. To use this with LowerCaseTokenizer, for example, you'd * write an analyzer like this:
* - *
+ * 

  * class MyAnalyzer extends Analyzer {
  *   {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
@@ -39,7 +39,7 @@
  *     return new TokenStreamComponents(source, new PorterStemFilter(source));
  *   }
  * }
- * 
+ *
* *

Note: This filter is aware of the {@link KeywordAttribute}. To prevent certain terms from * being passed to the stemmer {@link KeywordAttribute#isKeyword()} should be set to true diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilterFactory.java index aa7c4fd630a6..a73aeac3254f 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link PorterStemFilter}. * - *

+ * 

  * <fieldType name="text_porterstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.PorterStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemFilterFactory.java index a2040f2cd51c..4eb3bcf7a177 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link SpanishLightStemFilter}. * - *
+ * 

  * <fieldType name="text_eslgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.SpanishLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemFilterFactory.java index 9526704a2da8..da929afb5e2e 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishMinimalStemFilterFactory.java @@ -24,14 +24,14 @@ /** * Factory for {@link SpanishMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_eslgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.SpanishMinimalStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @lucene.spi {@value #NAME} * @deprecated Use {@link SpanishPluralStemFilterFactory} instead diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishPluralStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishPluralStemFilterFactory.java index 6063601b1523..e995333a2d56 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishPluralStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishPluralStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link SpanishPluralStemFilterFactory}. * - *
+ * 

  * <fieldType name="text_eslgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.SpanishPluralStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @lucene.spi {@value #NAME} */ diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilterFactory.java index 0b4fbde18a27..80c35db5cfdf 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link PersianCharFilter}. * - *
+ * 

  * <fieldType name="text_fa" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <charFilter class="solr.PersianCharFilterFactory"/>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizationFilterFactory.java index d086caf9d564..ec6161252b52 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizationFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link PersianNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_fanormal" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <charFilter class="solr.PersianCharFilterFactory"/>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.PersianNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemFilterFactory.java index 67589c4dbd8a..bc8159fe283a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link FinnishLightStemFilter}. * - *
+ * 

  * <fieldType name="text_filgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.FinnishLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemFilterFactory.java index ee0c7ceb5f4a..71bb605b7183 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemFilterFactory.java @@ -23,7 +23,7 @@ /** * Factory for {@link FrenchLightStemFilter}. * - *
+ * 

  * <fieldType name="text_frlgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -31,7 +31,7 @@
  *     <filter class="solr.ElisionFilterFactory"/>
  *     <filter class="solr.FrenchLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemFilterFactory.java index de7399911f57..c309be23cf38 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemFilterFactory.java @@ -23,7 +23,7 @@ /** * Factory for {@link FrenchMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_frminstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -31,7 +31,7 @@
  *     <filter class="solr.ElisionFilterFactory"/>
  *     <filter class="solr.FrenchMinimalStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ga/IrishLowerCaseFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ga/IrishLowerCaseFilterFactory.java index 5cbe67db6c39..489d50e61f84 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ga/IrishLowerCaseFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ga/IrishLowerCaseFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link IrishLowerCaseFilter}. * - *
+ * 

  * <fieldType name="text_ga" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.IrishLowerCaseFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemFilterFactory.java index c0bc05e636ce..08b83e328f5f 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link GalicianMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_glplural" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.GalicianMinimalStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemFilterFactory.java index 825101a185dd..3f7288873597 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link GalicianStemFilter}. * - *
+ * 

  * <fieldType name="text_glstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.GalicianStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilterFactory.java index a6b39cd4d44a..1f757d861f72 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link HindiNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_hinormal" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.HindiNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilterFactory.java index c20659c8e4bc..fd27b88814f0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link HindiStemFilter}. * - *
+ * 

  * <fieldType name="text_histem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.HindiStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemFilterFactory.java index b115ce9dce45..7955bc474715 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link HungarianLightStemFilter}. * - *
+ * 

  * <fieldType name="text_hulgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.HungarianLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java index e4bab11874f8..63a775b5a8ce 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java @@ -36,12 +36,12 @@ * TokenFilterFactory that creates instances of {@link HunspellStemFilter}. Example config for * British English: * - *
+ * 

  * <filter class="solr.HunspellStemFilterFactory"
  *         dictionary="en_GB.dic,my_custom.dic"
  *         affix="en_GB.aff"
  *         ignoreCase="false"
- *         longestOnly="false" />
+ * longestOnly="false" />
* * Both parameters dictionary and affix are mandatory. Dictionaries for many languages are available * through the OpenOffice project. diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemFilterFactory.java index e36d115b7087..bc577b6fc3bf 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link IndonesianStemFilter}. * - *
+ * 

  * <fieldType name="text_idstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.IndonesianStemFilterFactory" stemDerivational="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilterFactory.java index a8cec4852770..0af2b5fead3b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link IndicNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_innormal" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.IndicNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemFilterFactory.java index df570336946c..0c42fec01a5b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link ItalianLightStemFilter}. * - *
+ * 

  * <fieldType name="text_itlgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.ItalianLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemFilterFactory.java index 8810753626ee..61c51cf61ecc 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link LatvianStemFilter}. * - *
+ * 

  * <fieldType name="text_lvstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.LatvianStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.2.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilterFactory.java index 53038cdaa4ed..0bcadfa5a46a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link ASCIIFoldingFilter}. * - *
+ * 

  * <fieldType name="text_ascii" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.ASCIIFoldingFilterFactory" preserveOriginal="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java index 7f2f81fd6370..43756eb8ebef 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java @@ -46,7 +46,7 @@ * assumed to be correct. * * - *
+ * 

  * <fieldType name="text_cptlztn" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -54,7 +54,7 @@
  *           keep="java solr lucene" keepIgnoreCase="false"
  *           okPrefix="McK McD McA"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since solr 1.3 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CodepointCountFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CodepointCountFilterFactory.java index 6211ff581660..e92ed7e82e70 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CodepointCountFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CodepointCountFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link CodepointCountFilter}. * - *
+ * 

  * <fieldType name="text_lngth" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.CodepointCountFilterFactory" min="0" max="1" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.5.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterFactory.java index aa6a40b0157b..b5926116e66e 100755 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterFactory.java @@ -26,14 +26,14 @@ /** * Factory for {@link DateRecognizerFilter}. * - *
+ * 

  * <fieldType name="text_filter_none_date" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DateRecognizerFilterFactory" datePattern="yyyy/mm/dd" locale="en-US" />
  *   </analyzer>
  * </fieldType>
- * 
+ *
* *

The {@code datePattern} is optional. If omitted, {@link DateRecognizerFilter} will be created * with the default date format of the system. The {@code locale} is optional and if omitted the diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilterFactory.java index 5fb58398d6d1..ab07cfcaf755 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilterFactory.java @@ -25,13 +25,13 @@ * Factory for {@link DelimitedTermFrequencyTokenFilter}. The field must have {@code * omitPositions=true}. * - *

+ * 

  * <fieldType name="text_tfdl" class="solr.TextField" omitPositions="true">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DelimitedTermFrequencyTokenFilterFactory" delimiter="|"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 7.0.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DropIfFlaggedFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DropIfFlaggedFilterFactory.java index efd9da2df707..f63b166c4ba0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DropIfFlaggedFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/DropIfFlaggedFilterFactory.java @@ -29,13 +29,13 @@ *
* In Solr this might be configured such as * - *
+ * 

  *     <analyzer type="index">
  *       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *       <-- other filters -->
  *       <filter class="solr.DropIfFlaggedFilterFactory" dropFlags="9"/>
  *     </analyzer>
- * 
+ *
* * The above would drop any token that had the first and fourth bit set. * diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilterFactory.java index e34d0a2a2ac7..76e4c29a8481 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FingerprintFilterFactory.java @@ -23,12 +23,9 @@ /** * Factory for {@link FingerprintFilter}. * - *
- * The {@code maxOutputTokenSize} property is optional and defaults to {@code 1024}.
- * The {@code separator} property is optional and defaults to the space character.
- * See
- * {@link FingerprintFilter} for an explanation of its use.
- * 
+ *

The {@code maxOutputTokenSize} property is optional and defaults to {@code 1024}. The {@code + * separator} property is optional and defaults to the space character. See {@link + * FingerprintFilter} for an explanation of its use. * * @since 5.4.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java index 50649698518e..38d376b6b9dc 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilter.java @@ -30,7 +30,7 @@ * broken into two lines back together. This filter should be used on indexing time only. Example * field definition in schema.xml: * - *

+ * 

  * <fieldtype name="text" class="solr.TextField" positionIncrementGap="100">
  *  <analyzer type="index">
  *    <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -50,7 +50,7 @@
  *      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
  *  </analyzer>
  * </fieldtype>
- * 
+ *
*/ @IgnoreRandomChains( reason = "TODO: doesn't handle graph inputs (or even look at positionIncrement)") diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java index 130241c6de1b..c25fbcba2c0e 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link HyphenatedWordsFilter}. * - *
+ * 

  * <fieldType name="text_hyphn" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.HyphenatedWordsFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilterFactory.java index 324c4ca9a43c..af792b39bd9c 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilterFactory.java @@ -24,13 +24,13 @@ /** * Factory for {@link KeepWordFilter}. * - *
+ * 

  * <fieldType name="text_keepword" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.KeepWordFilterFactory" words="keepwords.txt" ignoreCase="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilterFactory.java index 712668d2caf6..dbf17d0cdd74 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilterFactory.java @@ -28,13 +28,13 @@ /** * Factory for {@link KeywordMarkerFilter}. * - *
+ * 

  * <fieldType name="text_keyword" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.KeywordMarkerFilterFactory" protected="protectedkeyword.txt" pattern="^.+er$" ignoreCase="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilterFactory.java index e7b62bd65c60..10d909df43dc 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link LengthFilter}. * - *
+ * 

  * <fieldType name="text_lngth" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.LengthFilterFactory" min="0" max="1" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilterFactory.java index 8f5f6c039e4f..0e2f91262bad 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link LimitTokenCountFilter}. * - *
+ * 

  * <fieldType name="text_lngthcnt" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10" consumeAllTokens="false" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

The {@code consumeAllTokens} property is optional and defaults to {@code false}. See {@link * LimitTokenCountFilter} for an explanation of its use. diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenOffsetFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenOffsetFilterFactory.java index ddd3027bd82b..71e1b1893529 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenOffsetFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenOffsetFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link LimitTokenOffsetFilter}. * - *

+ * 

  * <fieldType name="text_limit_pos" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.LimitTokenOffsetFilter" maxStartOffset="100000" consumeAllTokens="false" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

The {@code consumeAllTokens} property is optional and defaults to {@code false}. * diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenPositionFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenPositionFilterFactory.java index 75c8c646a77f..23dba2b79304 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenPositionFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenPositionFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link LimitTokenPositionFilter}. * - *

+ * 

  * <fieldType name="text_limit_pos" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.LimitTokenPositionFilterFactory" maxTokenPosition="3" consumeAllTokens="false" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

The {@code consumeAllTokens} property is optional and defaults to {@code false}. See {@link * LimitTokenPositionFilter} for an explanation of its use. diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java index c8606079b051..c1232197ed23 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java @@ -21,30 +21,27 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; -/** - * This analyzer is used to facilitate scenarios where different fields require different analysis - * techniques. Use the Map argument in {@link #PerFieldAnalyzerWrapper(Analyzer, java.util.Map)} to - * add non-default analyzers for fields. - * - *

Example usage: - * - *

{@code
- * Map analyzerPerField = new HashMap<>();
- * analyzerPerField.put("firstname", new KeywordAnalyzer());
- * analyzerPerField.put("lastname", new KeywordAnalyzer());
- *
- * PerFieldAnalyzerWrapper aWrapper =
- *   new PerFieldAnalyzerWrapper(new StandardAnalyzer(version), analyzerPerField);
- * }
- * - *

In this example, StandardAnalyzer will be used for all fields except "firstname" and - * "lastname", for which KeywordAnalyzer will be used. - * - *

A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing and query - * parsing. - * - * @since 3.1 - */ +/// This analyzer is used to facilitate scenarios where different fields require different analysis +/// techniques. Use the Map argument in [#PerFieldAnalyzerWrapper(Analyzer, java.util.Map)] to +/// add non-default analyzers for fields. +/// +/// Example usage: +/// ```java +/// Map analyzerPerField = new HashMap<>(); +/// analyzerPerField.put("firstname", new KeywordAnalyzer()); +/// analyzerPerField.put("lastname", new KeywordAnalyzer()); +/// +/// PerFieldAnalyzerWrapper aWrapper = +/// new PerFieldAnalyzerWrapper(new StandardAnalyzer(version), analyzerPerField); +/// ``` +/// +/// In this example, StandardAnalyzer will be used for all fields except "firstname" and +/// "lastname", for which KeywordAnalyzer will be used. +/// +/// A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing and query +/// parsing. +/// +/// @since 3.1 public final class PerFieldAnalyzerWrapper extends DelegatingAnalyzerWrapper { private final Analyzer defaultAnalyzer; private final Map fieldAnalyzers; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java index 9d134784b36a..8787813c0173 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java @@ -37,7 +37,7 @@ * *

CustomAnalyzer example: * - *

+ * 

  * Analyzer ana = CustomAnalyzer.builder()
  *   .withTokenizer("standard")
  *   .when("protectedterm", "ignoreCase", "true", "protected", "protectedTerms.txt")
@@ -45,26 +45,26 @@
  *     .addTokenFilter("lowercase")
  *   .endwhen()
  *   .build();
- * 
+ *
* *

Solr example, in which conditional filters are specified via the wrappedFilters * parameter - a comma-separated list of case-insensitive TokenFilter SPI names - and conditional * filter args are specified via filterName.argName parameters: * - *

+ * 

  * <fieldType name="reverse_lower_with_exceptions" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.ProtectedTermFilterFactory" ignoreCase="true" protected="protectedTerms.txt"
  *             wrappedFilters="truncate,lowercase" truncate.prefixLength="4" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

When using the wrappedFilters parameter, each filter name must be unique, so if * you need to specify the same filter more than once, you must add case-insensitive unique '-id' * suffixes (note that the '-id' suffix is stripped prior to SPI lookup), e.g.: * - *

+ * 

  * <fieldType name="double_synonym_with_exceptions" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -73,7 +73,7 @@
  *             synonymgraph-A.synonyms="synonyms-1.txt"
  *             synonymgraph-B.synonyms="synonyms-2.txt"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

See related {@link * org.apache.lucene.analysis.custom.CustomAnalyzer.Builder#whenTerm(Predicate)} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilterFactory.java index 9d54ac8e5d47..b8cdcbf9b5e0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link RemoveDuplicatesTokenFilter}. * - *

+ * 

  * <fieldType name="text_rmdup" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilterFactory.java index 12b24d6f5c3a..dd9b42e7a997 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link ScandinavianFoldingFilter}. * - *
+ * 

  * <fieldType name="text_scandfold" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.ScandinavianFoldingFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.4.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilterFactory.java index 6f65fffe658b..489b0bc453bd 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_scandnorm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.ScandinavianNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.4.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilterFactory.java index b0b33a9b3fa0..f633499d4eed 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilterFactory.java @@ -28,13 +28,13 @@ /** * Factory for {@link StemmerOverrideFilter}. * - *
+ * 

  * <fieldType name="text_dicstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.StemmerOverrideFilterFactory" dictionary="dictionary.txt" ignoreCase="false"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TrimFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TrimFilterFactory.java index d3e12d9444d8..dd11a64d6d61 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TrimFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TrimFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link TrimFilter}. * - *
+ * 

  * <fieldType name="text_trm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.NGramTokenizerFactory"/>
  *     <filter class="solr.TrimFilterFactory" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @see TrimFilter * @since 3.1 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterFactory.java index ba57a675b6fd..11ae978532ff 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterFactory.java @@ -24,7 +24,7 @@ * Factory for {@link org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter}. The following * type is recommended for "diacritics-insensitive search" for Turkish. * - *
+ * 

  * <fieldType name="text_tr_ascii_f5" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -35,7 +35,7 @@
  *     <filter class="solr.TruncateTokenFilterFactory" prefixLength="5"/>
  *     <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.8.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TypeAsSynonymFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TypeAsSynonymFilterFactory.java index e648a5139f94..628b492df2a9 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TypeAsSynonymFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TypeAsSynonymFilterFactory.java @@ -27,13 +27,13 @@ * *

In Solr this might be used as such * - *

+ * 

  * <fieldType name="text_type_as_synonym" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.UAX29URLEmailTokenizerFactory"/>
  *     <filter class="solr.TypeAsSynonymFilterFactory" prefix="_type_" synFlagsMask="5" ignore="foo,bar"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

If the optional {@code prefix} parameter is used, the specified value will be prepended to the * type, e.g. with prefix="_type_", for a token "example.com" with type "<URL>", the emitted diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java index d2a05833ca0b..f4c397db14f7 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java @@ -35,7 +35,7 @@ /** * Factory for {@link WordDelimiterFilter}. * - *

+ * 

  * <fieldType name="text_wd" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -45,7 +45,7 @@
  *             generateWordParts="1" generateNumberParts="1" stemEnglishPossessive="1"
  *             types="wdfftypes.txt" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @deprecated Use {@link WordDelimiterGraphFilterFactory} instead: it produces a correct token * graph so that e.g. {@link PhraseQuery} works correctly when it's used in the search time diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java index 31fdf35fb024..cbadad173dbc 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterGraphFilterFactory.java @@ -51,7 +51,7 @@ /** * Factory for {@link WordDelimiterGraphFilter}. * - *
+ * 

  * <fieldType name="text_wd" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -61,7 +61,7 @@
  *             generateWordParts="1" generateNumberParts="1" stemEnglishPossessive="1"
  *             types="wdfftypes.txt" ignoreKeywords="0" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 6.5.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java index 26341f8e45c7..b232075fe4cd 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java @@ -24,13 +24,13 @@ /** * Creates new instances of {@link EdgeNGramTokenFilter}. * - *
+ * 

  * <fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.EdgeNGramFilterFactory" minGramSize="1" maxGramSize="2" preserveOriginal="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java index 2eb84c334c1d..52b46f174282 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java @@ -24,12 +24,12 @@ /** * Creates new instances of {@link EdgeNGramTokenizer}. * - *
+ * 

  * <fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.EdgeNGramTokenizerFactory" minGramSize="1" maxGramSize="1"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramFilterFactory.java index 7d2c57c02808..01cd1c27c8b4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramFilterFactory.java @@ -24,13 +24,13 @@ /** * Factory for {@link NGramTokenFilter}. * - *
+ * 

  * <fieldType name="text_ngrm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.NGramFilterFactory" minGramSize="1" maxGramSize="2" preserveOriginal="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java index 400451f2eea9..a23db511a547 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java @@ -26,12 +26,12 @@ /** * Factory for {@link NGramTokenizer}. * - *
+ * 

  * <fieldType name="text_ngrm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.NGramTokenizerFactory" minGramSize="1" maxGramSize="2"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java index 81061f49db51..76ef0c65f5a8 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java @@ -26,14 +26,14 @@ /** * Factory for {@link NorwegianLightStemFilter}. * - *
+ * 

  * <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.NorwegianLightStemFilterFactory" variant="nb"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java index 3c8d28c9b290..873f722dc946 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java @@ -26,14 +26,14 @@ /** * Factory for {@link NorwegianMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.NorwegianMinimalStemFilterFactory" variant="nb"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java index b4c01ea072f9..f02e648713d6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java @@ -32,7 +32,7 @@ * Books/NonFic/Science/Physics, etc. But it will not match documents indexed with * values like Books, or Books/Fic... * - *
+ * 

  * <fieldType name="descendent_path" class="solr.TextField">
  *   <analyzer type="index">
  *     <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
@@ -41,14 +41,14 @@
  *     <tokenizer class="solr.KeywordTokenizerFactory" />
  *   </analyzer>
  * </fieldType>
- * 
+ *
* *

In this example however we see the oposite configuration, so that a query for * Books/NonFic/Science/Physics would match documents containing Books/NonFic, * Books/NonFic/Science, or Books/NonFic/Science/Physics, but not * Books/NonFic/Science/Physics/Theory or Books/NonFic/Law. * - *

+ * 

  * <fieldType name="descendent_path" class="solr.TextField">
  *   <analyzer type="index">
  *     <tokenizer class="solr.KeywordTokenizerFactory" />
@@ -57,7 +57,7 @@
  *     <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
  *   </analyzer>
  * </fieldType>
- * 
+ *
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupFilterFactory.java index fc6c78ba7833..754b19351b91 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternCaptureGroupFilterFactory.java @@ -24,13 +24,13 @@ /** * Factory for {@link PatternCaptureGroupTokenFilter}. * - *
+ * 

  * <fieldType name="text_ptncapturegroup" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.KeywordTokenizerFactory"/>
  *     <filter class="solr.PatternCaptureGroupFilterFactory" pattern="([^a-z])" preserve_original="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @see PatternCaptureGroupTokenFilter * @since 4.4.0 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.java index 9ddd8c176f7d..208632ea9a9d 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.java @@ -24,14 +24,14 @@ /** * Factory for {@link PatternReplaceCharFilter}. * - *
+ * 

  * <fieldType name="text_ptnreplace" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <charFilter class="solr.PatternReplaceCharFilterFactory"
  *                    pattern="([^a-z])" replacement=""/>
  *     <tokenizer class="solr.KeywordTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since Solr 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceFilterFactory.java index 93845a6294dd..3672209af75e 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceFilterFactory.java @@ -25,14 +25,14 @@ /** * Factory for {@link PatternReplaceFilter}. * - *
+ * 

  * <fieldType name="text_ptnreplace" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.KeywordTokenizerFactory"/>
  *     <filter class="solr.PatternReplaceFilterFactory" pattern="([^a-z])" replacement=""
  *             replace="all"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @see PatternReplaceFilter * @since 3.1 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java index 6f5852c196ea..07fe9e6d2244 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java @@ -46,12 +46,12 @@ * *

NOTE: This Tokenizer does not output tokens that are of zero length. * - *

+ * 

  * <fieldType name="text_ptn" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.PatternTokenizerFactory" pattern="\'([^\']+)\'" group="1"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @see PatternTokenizer * @since solr1.2 diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilterFactory.java index 4c379679f7f7..a187e689714b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilterFactory.java @@ -33,7 +33,7 @@ * itself this filter is not very useful. Normally it is combined with a filter that reacts to types * or flags. * - *
+ * 

  * <fieldType name="text_taf" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -42,7 +42,7 @@
  *     <filter class="solr.TypeAsSynonymFilterFactory" prefix="__TAS__"
  *               ignore="word,&lt;ALPHANUM&gt;,&lt;NUM&gt;,&lt;SOUTHEAST_ASIAN&gt;,&lt;IDEOGRAPHIC&gt;,&lt;HIRAGANA&gt;,&lt;KATAKANA&gt;,&lt;HANGUL&gt;,&lt;EMOJI&gt;"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

Note that a configuration such as above may interfere with multi-word synonyms. The patterns * file has the format: diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternSplitTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternSplitTokenizerFactory.java index 7472bba5848b..b6ab82408794 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternSplitTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternSplitTokenizerFactory.java @@ -46,12 +46,12 @@ * *

For example, to match tokens delimited by simple whitespace characters: * - *

+ * 

  * <fieldType name="text_ptn" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.SimplePatternSplitTokenizerFactory" pattern="[ \t\r\n]+"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @lucene.experimental * @see SimplePatternSplitTokenizer diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternTokenizerFactory.java index ea790345abd5..39261eef158a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/SimplePatternTokenizerFactory.java @@ -44,12 +44,12 @@ * *

For example, to match tokens delimited by simple whitespace characters: * - *

+ * 

  * <fieldType name="text_ptn" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.SimplePatternTokenizerFactory" pattern="[^ \t\r\n]+"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @lucene.experimental * @see SimplePatternTokenizer diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterFactory.java index 74131f166608..74a8bd026a19 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterFactory.java @@ -25,13 +25,13 @@ /** * Factory for {@link DelimitedPayloadTokenFilter}. * - *
+ * 

  * <fieldType name="text_dlmtd" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float" delimiter="|"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterFactory.java index 646dba1d88c9..7539935a9293 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link NumericPayloadTokenFilter}. * - *
+ * 

  * <fieldType name="text_numpayload" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.NumericPayloadTokenFilterFactory" payload="24" typeMatch="word"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterFactory.java index 5321ce282f61..ef0a6c597003 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link TokenOffsetPayloadTokenFilter}. * - *
+ * 

  * <fieldType name="text_tokenoffset" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.TokenOffsetPayloadTokenFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterFactory.java index 8c5b2c1948d4..e02217d187ef 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link TypeAsPayloadTokenFilter}. * - *
+ * 

  * <fieldType name="text_typeaspayload" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.TypeAsPayloadTokenFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemFilterFactory.java index bd579801d9b3..b2d802ace5b9 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link PortugueseLightStemFilter}. * - *
+ * 

  * <fieldType name="text_ptlgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.PortugueseLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemFilterFactory.java index 052bc7675fcc..2895910ddc2b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link PortugueseMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_ptminstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.PortugueseMinimalStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemFilterFactory.java index f28bcf7ec5a0..77f1c793af10 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link PortugueseStemFilter}. * - *
+ * 

  * <fieldType name="text_ptstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.PortugueseStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.java index 9b2e43ddd787..e4a5e7667ae0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link ReverseStringFilter}. * - *
+ * 

  * <fieldType name="text_rvsstr" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.ReverseStringFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since solr 1.4 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemFilterFactory.java index 0f76f95bb365..692f57aaab66 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link RussianLightStemFilter}. * - *
+ * 

  * <fieldType name="text_rulgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.RussianLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilterFactory.java index 008f8460da34..25f0d3c81190 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link ShingleFilter}. * - *
+ * 

  * <fieldType name="text_shingle" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.ShingleFilterFactory" minShingleSize="2" maxShingleSize="2"
  *             outputUnigrams="true" outputUnigramsIfNoShingles="false" tokenSeparator=" " fillerToken="_"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java index 57088983fa87..7d2a5fdf27d6 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java @@ -32,7 +32,7 @@ *

It is also useful for doing things like entity extraction or proper noun analysis as part of * the analysis workflow and saving off those tokens for use in another field. * - *

+ * 

  * TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer());
  * TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
  * TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
@@ -44,7 +44,7 @@
  * d.add(new TextField("f1", final1));
  * d.add(new TextField("f2", final2));
  * d.add(new TextField("f3", final3));
- * 
+ *
* *

In this example, {@code sink1} and {@code sink2} will both get tokens from {@code source1} * after whitespace tokenization, and will further do additional token filtering, e.g. detect diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballPorterFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballPorterFilterFactory.java index 2b31d2fe1fa8..3997ea9d46f0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballPorterFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballPorterFilterFactory.java @@ -32,14 +32,14 @@ * *

Note: Use of the "Lovins" stemmer is not recommended, as it is implemented with reflection. * - *

+ * 

  * <fieldType name="text_snowballstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.SnowballPorterFilterFactory" protected="protectedkeyword.txt" language="English"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilterFactory.java index f801b8f4a71d..cfdfddf8b8c4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sr/SerbianNormalizationFilterFactory.java @@ -24,7 +24,7 @@ /** * Factory for {@link SerbianNormalizationFilter}. * - *
+ * 

  * <fieldType name="text_srnorm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -32,7 +32,7 @@
  *     <filter class="solr.SerbianNormalizationFilterFactory"
  *       haircut="bald"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 5.0.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemFilterFactory.java index e4e9834a509d..3f61546f32f2 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link SwedishLightStemFilter}. * - *
+ * 

  * <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.SwedishLightStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemFilterFactory.java index 49edfa4e8b60..c0454c712461 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishMinimalStemFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link SwedishMinimalStemFilter}. * - *
+ * 

  * <fieldType name="text_svminstem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.LowerCaseFilterFactory"/>
  *     <filter class="solr.SwedishMinimalStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 9.0.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java index 81b017bf6075..fae3224e6ec0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java @@ -41,7 +41,7 @@ /** * Factory for {@link SynonymFilter}. * - *
+ * 

  * <fieldType name="text_synonym" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -50,7 +50,7 @@
  *             tokenizerFactory="solr.WhitespaceTokenizerFactory"
  *             [optional tokenizer factory parameters]/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

An optional param name prefix of "tokenizerFactory." may be used for any init params that the * SynonymFilterFactory needs to pass to the specified TokenizerFactory. If the TokenizerFactory diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilterFactory.java index f2c5c42c18cf..7b34caf78f15 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymGraphFilterFactory.java @@ -40,7 +40,7 @@ /** * Factory for {@link SynonymGraphFilter}. * - *

+ * 

  * <fieldType name="text_synonym" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -49,7 +49,7 @@
  *             tokenizerFactory="solr.WhitespaceTokenizerFactory"
  *             [optional tokenizer factory parameters]/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

An optional param name prefix of "tokenizerFactory." may be used for any init params that the * SynonymGraphFilterFactory needs to pass to the specified TokenizerFactory. If the diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguNormalizationFilterFactory.java index 8fa05efc7f9e..bf23b5e13b29 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguNormalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguNormalizationFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link TeluguNormalizationFilter}. * - *

+ * 

  * <fieldType name="text_tenormal" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.TeluguNormalizationFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 9.0.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguStemFilterFactory.java index 0e26ba115cd3..bda48538c87a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/te/TeluguStemFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link TeluguStemFilter}. * - *
+ * 

  * <fieldType name="text_testem" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.TeluguStemFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 9.0.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiTokenizerFactory.java index bf0b7f8f9370..ef0969681a31 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiTokenizerFactory.java @@ -24,12 +24,12 @@ /** * Factory for {@link ThaiTokenizer}. * - *
+ * 

  * <fieldType name="text_thai" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.ThaiTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.10.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/ApostropheFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/ApostropheFilterFactory.java index 44bb65449d25..33884c165c49 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/ApostropheFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/ApostropheFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link ApostropheFilter}. * - *
+ * 

  * <fieldType name="text_tr_lower_apostrophes" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.ApostropheFilterFactory"/>
  *     <filter class="solr.TurkishLowerCaseFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.8.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilterFactory.java index 3b6a57838010..44b159b76ad3 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link TurkishLowerCaseFilter}. * - *
+ * 

  * <fieldType name="text_trlwr" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
  *     <filter class="solr.TurkishLowerCaseFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java index 753b936b53ae..73acb521605b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java @@ -37,9 +37,9 @@ * 8 lambdas or method references. It is possible to create an instance which behaves exactly like * {@link LetterTokenizer}: * - *
+ * 

  * Tokenizer tok = CharTokenizer.fromTokenCharPredicate(Character::isLetter);
- * 
+ *
*/ public abstract class CharTokenizer extends Tokenizer { @@ -85,9 +85,9 @@ public CharTokenizer(AttributeFactory factory, int maxTokenLen) { *

This factory is intended to be used with lambdas or method references. E.g., an elegant way * to create an instance which behaves exactly as {@link LetterTokenizer} is: * - *

+   * 

    * Tokenizer tok = CharTokenizer.fromTokenCharPredicate(Character::isLetter);
-   * 
+ *
*/ public static CharTokenizer fromTokenCharPredicate(final IntPredicate tokenCharPredicate) { return fromTokenCharPredicate(DEFAULT_TOKEN_ATTRIBUTE_FACTORY, tokenCharPredicate); @@ -101,9 +101,9 @@ public static CharTokenizer fromTokenCharPredicate(final IntPredicate tokenCharP *

This factory is intended to be used with lambdas or method references. E.g., an elegant way * to create an instance which behaves exactly as {@link LetterTokenizer} is: * - *

+   * 

    * Tokenizer tok = CharTokenizer.fromTokenCharPredicate(factory, Character::isLetter);
-   * 
+ *
*/ public static CharTokenizer fromTokenCharPredicate( AttributeFactory factory, final IntPredicate tokenCharPredicate) { @@ -125,9 +125,9 @@ protected boolean isTokenChar(int c) { *

This factory is intended to be used with lambdas or method references. E.g., an elegant way * to create an instance which behaves exactly as {@link WhitespaceTokenizer} is: * - *

+   * 

    * Tokenizer tok = CharTokenizer.fromSeparatorCharPredicate(Character::isWhitespace);
-   * 
+ *
*/ public static CharTokenizer fromSeparatorCharPredicate( final IntPredicate separatorCharPredicate) { @@ -142,9 +142,9 @@ public static CharTokenizer fromSeparatorCharPredicate( *

This factory is intended to be used with lambdas or method references. E.g., an elegant way * to create an instance which behaves exactly as {@link WhitespaceTokenizer} is: * - *

+   * 

    * Tokenizer tok = CharTokenizer.fromSeparatorCharPredicate(factory, Character::isWhitespace);
-   * 
+ *
*/ public static CharTokenizer fromSeparatorCharPredicate( AttributeFactory factory, final IntPredicate separatorCharPredicate) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/ElisionFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/ElisionFilterFactory.java index c8caec9f32b3..19c8095dfbcd 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/ElisionFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/ElisionFilterFactory.java @@ -28,7 +28,7 @@ /** * Factory for {@link ElisionFilter}. * - *
+ * 

  * <fieldType name="text_elsn" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -36,7 +36,7 @@
  *     <filter class="solr.ElisionFilterFactory"
  *       articles="stopwordarticles.txt" ignoreCase="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java index 71b7b7a1aeac..5b13f5dce29c 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java @@ -25,12 +25,12 @@ /** * Factory for {@link WikipediaTokenizer}. * - *
+ * 

  * <fieldType name="text_wiki" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WikipediaTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java b/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java index 821cdc007a50..bfc76356d9fb 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java @@ -38,7 +38,7 @@ * *

Farsi Range Queries

* - *
+ * 

  *   // "fa" Locale is not supported by Sun JDK 1.4 or 1.5
  *   Collator collator = Collator.getInstance(new Locale("ar"));
  *   CollationKeyAnalyzer analyzer = new CollationKeyAnalyzer(collator);
@@ -63,11 +63,11 @@
  *   ScoreDoc[] result
  *     = is.search(aqp.parse("[ \u062F TO \u0698 ]"), null, 1000).scoreDocs;
  *   assertEquals("The index Term should not be included.", 0, result.length);
- * 
+ *
* *

Danish Sorting

* - *
+ * 

  *   Analyzer analyzer
  *     = new CollationKeyAnalyzer(Collator.getInstance(new Locale("da", "dk")));
  *   Path dirPath = Files.createTempDirectory("tempIndex");
@@ -93,11 +93,11 @@
  *     Document doc = searcher.doc(result[i].doc);
  *     assertEquals(sortedTracerOrder[i], doc.getValues("tracer")[0]);
  *   }
- * 
+ *
* *

Turkish Case Normalization

* - *
+ * 

  *   Collator collator = Collator.getInstance(new Locale("tr", "TR"));
  *   collator.setStrength(Collator.PRIMARY);
  *   Analyzer analyzer = new CollationKeyAnalyzer(collator);
@@ -114,7 +114,7 @@
  *   Query query = parser.parse("d\u0131gy");   // U+0131: dotless i
  *   ScoreDoc[] result = is.search(query, null, 1000).scoreDocs;
  *   assertEquals("The index Term should be included.", 1, result.length);
- * 
+ *
* *

Caveats and Comparisons

* diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java index f8e84e9d8ac5..dfa5e44d7979 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java @@ -26,13 +26,13 @@ /** * Factory for {@link ICUFoldingFilter}. * - *
+ * 

  * <fieldType name="text_folded" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.ICUFoldingFilterFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java index 0545db0ba19d..d3085ab17816 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java @@ -39,12 +39,12 @@ * *

To use the default set of per-script rules: * - *

+ * 

  * <fieldType name="text_icu" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.ICUTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

You can customize this tokenizer's behavior by specifying per-script rule files, which are * compiled by the ICU RuleBasedBreakIterator. See the + *


  * <fieldType name="text_icu_custom" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.ICUTokenizerFactory" cjkAsWords="true"
  *                rulefiles="Latn:my.Latin.rules.rbbi,Cyrl:my.Cyrillic.rules.rbbi"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType> * * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/icu/src/java/overview.html b/lucene/analysis/icu/src/java/overview.html index 7b9c2c88a7fb..5f7112c501bb 100644 --- a/lucene/analysis/icu/src/java/overview.html +++ b/lucene/analysis/icu/src/java/overview.html @@ -70,10 +70,10 @@

Use Cases

Example Usages

Tokenizing multilanguage text

-
+

   // This tokenizer will work well in general for most languages.
   Tokenizer tokenizer = new ICUTokenizer(reader);
-
+

Collation

@@ -110,7 +110,7 @@

Use Cases

Example Usages

Farsi Range Queries

-
+

   Collator collator = Collator.getInstance(new ULocale("ar"));
   ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(collator);
   Path indexPath = Files.createTempDirectory("tempIndex");
@@ -134,10 +134,10 @@ 

Farsi Range Queries

ScoreDoc[] result = is.search(aqp.parse("[ \u062F TO \u0698 ]"), null, 1000).scoreDocs; assertEquals("The index Term should not be included.", 0, result.length); -
+

Danish Sorting

-
+

   Analyzer analyzer
     = new ICUCollationKeyAnalyzer(Collator.getInstance(new ULocale("da", "dk")));
   Path indexPath = Files.createTempDirectory("tempIndex");
@@ -162,10 +162,10 @@ 

Danish Sorting

Document doc = searcher.doc(result[i].doc); assertEquals(sortedTracerOrder[i], doc.getValues("tracer")[0]); } -
+

Turkish Case Normalization

-
+

   Collator collator = Collator.getInstance(new ULocale("tr", "TR"));
   collator.setStrength(Collator.PRIMARY);
   Analyzer analyzer = new ICUCollationKeyAnalyzer(collator);
@@ -181,7 +181,7 @@ 

Turkish Case Normalization

Query query = parser.parse("d\u0131gy"); // U+0131: dotless i ScoreDoc[] result = is.search(query, null, 1000).scoreDocs; assertEquals("The index Term should be included.", 1, result.length); -
+

Caveats and Comparisons

@@ -241,12 +241,12 @@

Use Cases

Example Usages

Normalizing text to NFC

-
+

   // Normalizer2 objects are unmodifiable and immutable.
   Normalizer2 normalizer = Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.COMPOSE);
   // This filter will normalize to NFC.
   TokenStream tokenstream = new ICUNormalizer2Filter(tokenizer, normalizer);
-
+

Case Folding

@@ -276,10 +276,10 @@

Use Cases

Example Usages

Lowercasing text

-
+

   // This filter will case-fold and normalize to NFKC.
   TokenStream tokenstream = new ICUNormalizer2Filter(tokenizer);
-
+

Search Term Folding

@@ -301,11 +301,11 @@

Use Cases

Example Usages

Removing accents

-
+

   // This filter will case-fold, remove accents and other distinctions, and
   // normalize to NFKC.
   TokenStream tokenstream = new ICUFoldingFilter(tokenizer);
-
+

Text Transformation

@@ -329,16 +329,16 @@

Use Cases

Example Usages

Convert Traditional to Simplified

-
+

   // This filter will map Traditional Chinese to Simplified Chinese
   TokenStream tokenstream = new ICUTransformFilter(tokenizer, Transliterator.getInstance("Traditional-Simplified"));
-
+

Transliterate Serbian Cyrillic to Serbian Latin

-
+

   //
   This filter will map Serbian Cyrillic to Serbian Latin according to BGN rules
   TokenStream tokenstream = new ICUTransformFilter(tokenizer, Transliterator.getInstance("Serbian-Latin/BGN"));
-
+

Backwards Compatibility

@@ -350,7 +350,7 @@

Backwards Compatibility

Example Usages

Restricting normalization to Unicode 5.0

-
+

   // This filter will do NFC normalization, but will ignore any characters that
   // did not exist as of Unicode 5.0. Because of the normalization stability policy
   // of Unicode, this is an easy way to force normalization to a specific version.
@@ -360,6 +360,6 @@ 

Restricting normalization to Unicode 5.0

set.freeze(); FilteredNormalizer2 unicode50 = new FilteredNormalizer2(normalizer, set); TokenStream tokenstream = new ICUNormalizer2Filter(tokenizer, unicode50); -
+
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilterFactory.java index c300cc06b0ac..743b85eb025a 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilterFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link org.apache.lucene.analysis.ja.JapaneseIterationMarkCharFilter}. * - *
+ * 

  * <fieldType name="text_ja" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="false">
  *   <analyzer>
  *     <charFilter class="solr.JapaneseIterationMarkCharFilterFactory normalizeKanji="true" normalizeKana="true"/>
  *     <tokenizer class="solr.JapaneseTokenizerFactory"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 4.0.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseKatakanaStemFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseKatakanaStemFilterFactory.java index 5ec89222f590..ead84b632759 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseKatakanaStemFilterFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseKatakanaStemFilterFactory.java @@ -23,7 +23,7 @@ /** * Factory for {@link JapaneseKatakanaStemFilter}. * - *
+ * 

  * <fieldType name="text_ja" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.JapaneseTokenizerFactory"/>
@@ -31,7 +31,7 @@
  *             minimumLength="4"/>
  *   </analyzer>
  * </fieldType>
- * 
+ *
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java index 5b1b92e6fb22..1ddf79436538 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseNumberFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link JapaneseNumberFilter}.
* - *
+ * 

  * <fieldType name="text_ja" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.JapaneseTokenizerFactory" discardPunctuation="false"/>
  *     <filter class="solr.JapaneseNumberFilterFactory"/>
  *   </analyzer>
  * </fieldType>
- * 
+ *
* *

It is important that punctuation is not discarded by the tokenizer so use {@code * discardPunctuation="false"} in your {@link JapaneseTokenizerFactory}. diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java index 3e9b981258f1..0d6024c294cf 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java @@ -29,7 +29,7 @@ /** * Factory for {@link org.apache.lucene.analysis.ja.JapanesePartOfSpeechStopFilter}. * - *

+ * 

  * <fieldType name="text_ja" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.JapaneseTokenizerFactory"/>
@@ -37,7 +37,7 @@
  *             tags="stopTags.txt"/>
  *   </analyzer>
  * </fieldType>
- * 
+ *
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseReadingFormFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseReadingFormFilterFactory.java index f8ccc0f3996d..fa5d3b15923f 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseReadingFormFilterFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseReadingFormFilterFactory.java @@ -23,7 +23,7 @@ /** * Factory for {@link org.apache.lucene.analysis.ja.JapaneseReadingFormFilter}. * - *
+ * 

  * <fieldType name="text_ja" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.JapaneseTokenizerFactory"/>
@@ -31,7 +31,7 @@
  *             useRomaji="false"/>
  *   </analyzer>
  * </fieldType>
- * 
+ *
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java index 911df4eb0107..cb61aaedc56c 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java @@ -36,7 +36,7 @@ /** * Factory for {@link org.apache.lucene.analysis.ja.JapaneseTokenizer}. * - *
+ * 

  * <fieldType name="text_ja" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.JapaneseTokenizerFactory"
@@ -49,7 +49,7 @@
  *     <filter class="solr.JapaneseBaseFormFilterFactory"/>
  *   </analyzer>
  * </fieldType>
- * 
+ *
* *

Additional expert user parameters nBestCost and nBestExamples can be used to include * additional searchable tokens that those most likely according to the statistical model. A typical diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java index 2822769c828c..ab95b6b6c5d6 100644 --- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java +++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java @@ -34,13 +34,13 @@ *

An explicit resource name of the dictionary ({@code ".dict"}) can be provided via the * dictionary attribute, as the example below demonstrates: * - *

+ * 

  * <fieldType name="text_mylang" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.MorfologikFilterFactory" dictionary="mylang.dict" />
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

If the dictionary attribute is not provided, the Polish dictionary is loaded and used by * default. diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanNumberFilterFactory.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanNumberFilterFactory.java index cbb65b12bd60..bad47555f07d 100644 --- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanNumberFilterFactory.java +++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanNumberFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link KoreanNumberFilter}. * - *

+ * 

  * <fieldType name="text_ko" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.KoreanTokenizerFactory" discardPunctuation="false"/>
  *     <filter class="solr.KoreanNumberFilterFactory"/>
  *   </analyzer>
  * </fieldType>
- * 
+ *
* *

It is important that punctuation is not discarded by the tokenizer so use {@code * discardPunctuation="false"} in your {@link KoreanTokenizerFactory}. diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanPartOfSpeechStopFilterFactory.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanPartOfSpeechStopFilterFactory.java index 65c18f8d931a..4725c74afa40 100644 --- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanPartOfSpeechStopFilterFactory.java +++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanPartOfSpeechStopFilterFactory.java @@ -25,7 +25,7 @@ /** * Factory for {@link KoreanPartOfSpeechStopFilter}. * - *

+ * 

  * <fieldType name="text_ko" class="solr.TextField">
  *    <analyzer>
  *      <tokenizer class="solr.KoreanTokenizerFactory"/>
@@ -33,7 +33,7 @@
  *              tags="E,J"/>
  *    </analyzer>
  * </fieldType>
- * 
+ *
* *

Supports the following attributes: * diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanReadingFormFilterFactory.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanReadingFormFilterFactory.java index b2c7ae466e4e..5022558a3a67 100644 --- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanReadingFormFilterFactory.java +++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanReadingFormFilterFactory.java @@ -23,14 +23,14 @@ /** * Factory for {@link KoreanReadingFormFilter}. * - *

+ * 

  * <fieldType name="text_ko" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.KoreanTokenizerFactory"/>
  *     <filter class="solr.KoreanReadingFormFilterFactory"/>
  *   </analyzer>
  * </fieldType>
- * 
+ *
* * @lucene.experimental * @since 7.4.0 diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizerFactory.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizerFactory.java index f7cdc1ed76de..399012fb52b8 100644 --- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizerFactory.java +++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizerFactory.java @@ -36,7 +36,7 @@ /** * Factory for {@link KoreanTokenizer}. * - *
+ * 

  * <fieldType name="text_ko" class="solr.TextField">
  *   <analyzer>
  *     <tokenizer class="solr.KoreanTokenizerFactory"
@@ -48,7 +48,7 @@
  *     />
  *  </analyzer>
  * </fieldType>
- * 
+ *
* *

Supports the following attributes: * diff --git a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPChunkerFilterFactory.java b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPChunkerFilterFactory.java index 95188dd38236..9388450de2b6 100644 --- a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPChunkerFilterFactory.java +++ b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPChunkerFilterFactory.java @@ -29,14 +29,14 @@ /** * Factory for {@link OpenNLPChunkerFilter}. * - *

+ * 

  * <fieldType name="text_opennlp_chunked" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.OpenNLPTokenizerFactory" sentenceModel="filename" tokenizerModel="filename"/>
  *     <filter class="solr.OpenNLPPOSFilterFactory" posTaggerModel="filename"/>
  *     <filter class="solr.OpenNLPChunkerFilterFactory" chunkerModel="filename"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 7.3.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPLemmatizerFilterFactory.java b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPLemmatizerFilterFactory.java index ae78d868634b..0a230cf5f06a 100644 --- a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPLemmatizerFilterFactory.java +++ b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPLemmatizerFilterFactory.java @@ -29,7 +29,7 @@ /** * Factory for {@link OpenNLPLemmatizerFilter}. * - *
+ * 

  * <fieldType name="text_opennlp_lemma" class="solr.TextField" positionIncrementGap="100"
  *   <analyzer>
  *     <tokenizer class="solr.OpenNLPTokenizerFactory"
@@ -40,7 +40,7 @@
  *             dictionary="filename"
  *             lemmatizerModel="filename"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 7.3.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPPOSFilterFactory.java b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPPOSFilterFactory.java index a897daad8109..f94f3d36ae5e 100644 --- a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPPOSFilterFactory.java +++ b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPPOSFilterFactory.java @@ -28,13 +28,13 @@ /** * Factory for {@link OpenNLPPOSFilter}. * - *
+ * 

  * <fieldType name="text_opennlp_pos" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.OpenNLPTokenizerFactory" sentenceModel="filename" tokenizerModel="filename"/>
  *     <filter class="solr.OpenNLPPOSFilterFactory" posTaggerModel="filename"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 7.3.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizerFactory.java b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizerFactory.java index 859a0dde4d8a..ee9b3d592a03 100644 --- a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizerFactory.java +++ b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizerFactory.java @@ -30,12 +30,12 @@ /** * Factory for {@link OpenNLPTokenizer}. * - *
+ * 

  * <fieldType name="text_opennlp" class="solr.TextField" positionIncrementGap="100"
  *   <analyzer>
  *     <tokenizer class="solr.OpenNLPTokenizerFactory" sentenceModel="filename" tokenizerModel="filename"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 7.3.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilterFactory.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilterFactory.java index 8dfc75b4a2e5..fa6a372606a4 100644 --- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilterFactory.java +++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilterFactory.java @@ -28,7 +28,7 @@ /** * Factory for {@link BeiderMorseFilter}. * - *
+ * 

  * <fieldType name="text_bm" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -37,7 +37,7 @@
  *        concat="true" languageSet="auto"
  *     </filter>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.6.0 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DaitchMokotoffSoundexFilterFactory.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DaitchMokotoffSoundexFilterFactory.java index 1b6ccaab9d2d..6a47db632d46 100644 --- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DaitchMokotoffSoundexFilterFactory.java +++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DaitchMokotoffSoundexFilterFactory.java @@ -32,13 +32,13 @@ *
(default=true) add tokens to the stream with the offset=0 * * - *
+ * 

  * <fieldType name="text_phonetic" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DaitchMokotoffSoundexFilterFactory" inject="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @see DaitchMokotoffSoundexFilter * @lucene.experimental diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterFactory.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterFactory.java index 71e36640b4f7..0f4096c4caff 100644 --- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterFactory.java +++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterFactory.java @@ -23,13 +23,13 @@ /** * Factory for {@link DoubleMetaphoneFilter}. * - *
+ * 

  * <fieldType name="text_dblmtphn" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.DoubleMetaphoneFilterFactory" inject="true" maxCodeLength="4"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java index e1df2e3408aa..15a091690f4f 100644 --- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java +++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java @@ -56,13 +56,13 @@ * support this then specifying this is an error. * * - *
+ * 

  * <fieldType name="text_phonetic" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
  *     <filter class="solr.PhoneticFilterFactory" encoder="DoubleMetaphone" inject="true"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @see PhoneticFilter * @since 3.1 diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50StoredFieldsFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50StoredFieldsFormat.java index c208f1252eba..560bdb2d1f33 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50StoredFieldsFormat.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/Lucene50StoredFieldsFormat.java @@ -55,12 +55,12 @@ * href="http://en.wikipedia.org/wiki/DEFLATE">DEFLATE algorithm with 60KB blocks for a better * ratio at the expense of slower performance. These two options can be configured like this: * - *
+ * 

  *   // the default: for high performance
  *   indexWriterConfig.setCodec(new Lucene54Codec(Mode.BEST_SPEED));
  *   // instead for higher performance (but slower):
  *   // indexWriterConfig.setCodec(new Lucene54Codec(Mode.BEST_COMPRESSION));
- * 
+ *
* *

File formats * diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87StoredFieldsFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87StoredFieldsFormat.java index 570e50b94a68..988b979354fb 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87StoredFieldsFormat.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87StoredFieldsFormat.java @@ -47,12 +47,12 @@ * dictionaries for a better ratio at the expense of slower performance. These two options can be * configured like this: * - *

+ * 

  *   // the default: for high performance
  *   indexWriterConfig.setCodec(new Lucene87Codec(Mode.BEST_SPEED));
  *   // instead for higher performance (but slower):
  *   // indexWriterConfig.setCodec(new Lucene87Codec(Mode.BEST_COMPRESSION));
- * 
+ *
* *

File formats * diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectReader.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectReader.java index d738e74953e0..b7b68ce6fed0 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectReader.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectReader.java @@ -25,14 +25,14 @@ * *

Example usage: * - *

+ * 

  *   int bitsPerValue = 100;
  *   IndexInput in = dir.openInput("packed", IOContext.DEFAULT);
  *   LongValues values = LegacyDirectReader.getInstance(in.randomAccessSlice(start, end), bitsPerValue);
  *   for (int i = 0; i < numValues; i++) {
  *     long value = values.get(i);
  *   }
- * 
+ *
* * @see LegacyDirectWriter */ diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java index 124bd7feacb2..2c5af1f43385 100644 --- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java +++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/packed/LegacyDirectWriter.java @@ -29,7 +29,7 @@ *

Unlike PackedInts, it optimizes for read i/o operations and supports > 2B values. Example * usage: * - *

+ * 

  *   int bitsPerValue = LegacyDirectWriter.bitsRequired(100); // values up to and including 100
  *   IndexOutput output = dir.createOutput("packed", IOContext.DEFAULT);
  *   DirectWriter writer = LegacyDirectWriter.getInstance(output, numberOfValues, bitsPerValue);
@@ -38,7 +38,7 @@
  *   }
  *   writer.finish();
  *   output.close();
- * 
+ *
* * @see LegacyDirectReader */ diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/package-info.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/package-info.java index 19aad3d0f0fd..807e37e5104a 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/package-info.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/package-info.java @@ -35,7 +35,7 @@ * *

Here is a sample code used to run the TREC 2006 queries 701-850 on the .Gov2 collection: * - *

+ * 

  *     File topicsFile = new File("topics-701-850.txt");
  *     File qrelsFile = new File("qrels-701-850.txt");
  *     IndexReader ir = DirectoryReader.open(directory):
@@ -67,7 +67,7 @@
  *     // print an average sum of the results
  *     QualityStats avg = QualityStats.average(stats);
  *     avg.log("SUMMARY",2,logger, "  ");
- * 
+ *
* *

Some immediate ways to modify this program to your needs are: * diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java b/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java index 17f98abb5185..86057ebbb636 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java @@ -41,7 +41,7 @@ * *

Simple example: * - *

+ * 

  * Analyzer analyzer = new Analyzer() {
  *  {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
@@ -57,7 +57,7 @@
  *     return new FooFilter(in);
  *   }
  * };
- * 
+ *
* * For more examples, see the {@link org.apache.lucene.analysis Analysis package documentation}. * diff --git a/lucene/core/src/java/org/apache/lucene/analysis/package-info.java b/lucene/core/src/java/org/apache/lucene/analysis/package-info.java index 256d2e00053a..4dfe81038f42 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/package-info.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/package-info.java @@ -178,7 +178,7 @@ *

However an application might invoke Analysis of any text for testing or for any other purpose, * something like: * - *

+ * 

  *     Version matchVersion = Version.LUCENE_XY; // Substitute desired Lucene version for XY
  *     Analyzer analyzer = new StandardAnalyzer(matchVersion); // or any other analyzer
  *     TokenStream ts = analyzer.tokenStream("myfield", new StringReader("some text goes here"));
@@ -200,7 +200,7 @@
  *     } finally {
  *       ts.close(); // Release resources associated with this stream.
  *     }
- * 
+ *
* *

Indexing Analysis vs. Search Analysis

* @@ -251,25 +251,25 @@ * search and proximity search to seamlessly cross boundaries between these "sections". In other * words, if a certain field "f" is added like this: * - *
+ * 

  *     document.add(new Field("f","first ends",...);
  *     document.add(new Field("f","starts two",...);
  *     indexWriter.addDocument(document);
- * 
+ *
* *

Then, a phrase search for "ends starts" would find that document. Where desired, this behavior * can be modified by introducing a "position gap" between consecutive field "sections", simply by * overriding {@link org.apache.lucene.analysis.Analyzer#getPositionIncrementGap(java.lang.String) * Analyzer.getPositionIncrementGap(fieldName)}: * - *

+ * 

  *   Version matchVersion = Version.LUCENE_XY; // Substitute desired Lucene version for XY
  *   Analyzer myAnalyzer = new StandardAnalyzer(matchVersion) {
  *     public int getPositionIncrementGap(String fieldName) {
  *       return 10;
  *     }
  *   };
- * 
+ *
* *

End of Input Cleanup

* @@ -304,7 +304,7 @@ * the position increment in order not to generate corrupt tokenstream graphs. Here is the logic * used by StopFilter to increment positions when filtering out tokens: * - *
+ * 

  *   public TokenStream tokenStream(final String fieldName, Reader reader) {
  *     final TokenStream ts = someAnalyzer.tokenStream(fieldName, reader);
  *     TokenStream res = new TokenStream() {
@@ -330,7 +330,7 @@
  *     };
  *     return res;
  *   }
- * 
+ *
* *

A few more use cases for modifying position increments are: * @@ -537,7 +537,7 @@ * selected logic to another tokenizer) must also set the reader to the delegate in the overridden * {@link org.apache.lucene.analysis.Tokenizer#reset()} method, e.g.: * - *

+ * 

  *     public class ForwardingTokenizer extends Tokenizer {
  *        private Tokenizer delegate;
  *        ...
@@ -548,7 +548,7 @@
  *           delegate.reset();
  *        }
  *     }
- * 
+ *
* *

Testing Your Analysis Component

* @@ -603,7 +603,7 @@ * *

Whitespace tokenization

* - *
+ * 

  * public class MyAnalyzer extends Analyzer {
  *
  *   private Version matchVersion;
@@ -642,7 +642,7 @@
  *     }
  *   }
  * }
- * 
+ *
* * In this easy example a simple white space tokenization is performed. In main() a loop consumes * the stream and prints the term text of the tokens by accessing the CharTermAttribute that the @@ -666,14 +666,14 @@ * LengthFilter to the chain. Only the createComponents() method in our analyzer needs * to be changed: * - *
+ * 

  *   {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
  *     final Tokenizer source = new WhitespaceTokenizer(matchVersion);
  *     TokenStream result = new LengthFilter(true, source, 3, Integer.MAX_VALUE);
  *     return new TokenStreamComponents(source, result);
  *   }
- * 
+ *
* * Note how now only words with 3 or more characters are contained in the output: * @@ -688,7 +688,7 @@ * * Now let's take a look at how the LengthFilter is implemented: * - *
+ * 

  * public final class LengthFilter extends FilteringTokenFilter {
  *
  *   private final int min;
@@ -718,7 +718,7 @@
  *   }
  *
  * }
- * 
+ *
* *

In LengthFilter, the CharTermAttribute is added and stored in the instance variable * termAtt. Remember that there can only be a single instance of CharTermAttribute in the @@ -734,7 +734,7 @@ * *

LengthFilter extends FilteringTokenFilter: * - *

+ * 

  * public abstract class FilteringTokenFilter extends TokenFilter {
  *
  *   private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
@@ -772,7 +772,7 @@
  *   }
  *
  * }
- * 
+ *
* *

Adding a custom Attribute

* @@ -780,7 +780,7 @@ * consequently PartOfSpeechAttribute. First we need to define the interface of the new * Attribute: * - *
+ * 

  *   public interface PartOfSpeechAttribute extends Attribute {
  *     public enum PartOfSpeech {
  *       Noun, Verb, Adjective, Adverb, Pronoun, Preposition, Conjunction, Article, Unknown
@@ -790,7 +790,7 @@
  *
  *     public PartOfSpeech getPartOfSpeech();
  *   }
- * 
+ *
* *

Now we also need to write the implementing class. The name of that class is important here: By * default, Lucene checks if there is a class with the name of the Attribute with the suffix 'Impl'. @@ -805,7 +805,7 @@ *

Now here is the actual class that implements our new Attribute. Notice that the class has to * extend {@link org.apache.lucene.util.AttributeImpl}: * - *

+ * 

  * public final class PartOfSpeechAttributeImpl extends AttributeImpl
  *                                   implements PartOfSpeechAttribute {
  *
@@ -829,7 +829,7 @@
  *     ((PartOfSpeechAttribute) target).setPartOfSpeech(pos);
  *   }
  * }
- * 
+ *
* *

This is a simple Attribute implementation has only a single variable that stores the * part-of-speech of a token. It extends the AttributeImpl class and therefore @@ -838,7 +838,7 @@ * very naive filter that tags every word with a leading upper-case letter as a 'Noun' and all other * words as 'Unknown'. * - *

+ * 

  *   public static class PartOfSpeechTaggingFilter extends TokenFilter {
  *     PartOfSpeechAttribute posAtt = addAttribute(PartOfSpeechAttribute.class);
  *     CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
@@ -862,7 +862,7 @@
  *       return PartOfSpeech.Unknown;
  *     }
  *   }
- * 
+ *
* *

Just like the LengthFilter, this new filter stores references to the attributes it needs in * instance variables. Notice how you only need to pass in the interface of the new Attribute and @@ -870,7 +870,7 @@ * *

Now we need to add the filter to the chain in MyAnalyzer: * - *

+ * 

  *   {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
  *     final Tokenizer source = new WhitespaceTokenizer(matchVersion);
@@ -878,7 +878,7 @@
  *     result = new PartOfSpeechTaggingFilter(result);
  *     return new TokenStreamComponents(source, result);
  *   }
- * 
+ *
* * Now let's look at the output: * @@ -895,7 +895,7 @@ * chain does not affect any existing consumers, simply because they don't know the new Attribute. * Now let's change the consumer to make use of the new PartOfSpeechAttribute and print it out: * - *
+ * 

  *   public static void main(String[] args) throws IOException {
  *     // text to tokenize
  *     final String text = "This is a demo of the TokenStream API";
@@ -922,7 +922,7 @@
  *       stream.close();
  *     }
  *   }
- * 
+ *
* * The change that was made is to get the PartOfSpeechAttribute from the TokenStream and print out * its contents in the while loop that consumes the stream. Here is the new output: @@ -945,7 +945,7 @@ * nouns if not the first word of a sentence (we know, this is still not a correct behavior, but * hey, it's a good exercise). As a small hint, this is how the new Attribute class could begin: * - *
+ * 

  *   public class FirstTokenOfSentenceAttributeImpl extends AttributeImpl
  *                               implements FirstTokenOfSentenceAttribute {
  *
@@ -965,7 +965,7 @@
  *     }
  *
  *   ...
- * 
+ *
* *

Adding a CharFilter chain

* @@ -980,7 +980,7 @@ * *

Example: * - *

+ * 

  * public class MyAnalyzer extends Analyzer {
  *
  *   {@literal @Override}
@@ -994,6 +994,6 @@
  *     return new SecondCharFilter(new FirstCharFilter(reader));
  *   }
  * }
- * 
+ *
*/ package org.apache.lucene.analysis; diff --git a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java index ea8273a2758d..97f60a424c0e 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java @@ -23,12 +23,12 @@ /** * Factory for {@link StandardTokenizer}. * - *
+ * 

  * <fieldType name="text_stndrd" class="solr.TextField" positionIncrementGap="100">
  *   <analyzer>
  *     <tokenizer class="solr.StandardTokenizerFactory" maxTokenLength="255"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* * @since 3.1 * @lucene.spi {@value #NAME} diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermToBytesRefAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermToBytesRefAttribute.java index 19b0b9fb7efe..72cd82489d55 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermToBytesRefAttribute.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermToBytesRefAttribute.java @@ -25,7 +25,7 @@ * *

Consumers of this attribute call {@link #getBytesRef()} for each term. Example: * - *

+ * 

  *   final TermToBytesRefAttribute termAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
  *
  *   while (tokenStream.incrementToken() {
@@ -41,7 +41,7 @@
  *     }
  *   }
  *   ...
- * 
+ *
* * @lucene.internal This is a very expert and internal API, please use {@link CharTermAttribute} and * its implementation for UTF-8 terms; to index binary terms, use {@link BytesTermAttribute} and diff --git a/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java b/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java index bdfa78af87f1..91c1c045247d 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java @@ -454,7 +454,7 @@ public static long checkFooter(ChecksumIndexInput in) throws IOException { * *

Example usage: * - *

+   * 

    * try (ChecksumIndexInput input = ...) {
    *   Throwable priorE = null;
    *   try {
@@ -465,7 +465,7 @@ public static long checkFooter(ChecksumIndexInput in) throws IOException {
    *     CodecUtil.checkFooter(input, priorE);
    *   }
    * }
-   * 
+ *
*/ public static void checkFooter(ChecksumIndexInput in, Throwable priorException) throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java b/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java index e3e83e588de7..4da7610343e9 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/FilterCodec.java @@ -22,7 +22,7 @@ *

Extend this class when you need to reuse the functionality of an existing codec. For example, * if you want to build a codec that redefines LuceneMN's {@link LiveDocsFormat}: * - *

+ * 

  *   public final class CustomCodec extends FilterCodec {
  *
  *     public CustomCodec() {
@@ -34,7 +34,7 @@
  *     }
  *
  *   }
- * 
+ *
* *

Please note: Don't call {@link Codec#forName} from the no-arg constructor of your own * codec. When the SPI framework loads your own Codec as SPI component, SPI has not yet fully diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90StoredFieldsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90StoredFieldsFormat.java index 9e367a3d9d82..e8f328a5ca30 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90StoredFieldsFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90StoredFieldsFormat.java @@ -47,12 +47,12 @@ * dictionaries for a better ratio at the expense of slower performance. These two options can be * configured like this: * - *

+ * 

  *   // the default: for high performance
  *   indexWriterConfig.setCodec(new Lucene100Codec(Mode.BEST_SPEED));
  *   // instead for higher performance (but slower):
  *   // indexWriterConfig.setCodec(new Lucene100Codec(Mode.BEST_COMPRESSION));
- * 
+ *
* *

File formats * diff --git a/lucene/core/src/java/org/apache/lucene/codecs/package-info.java b/lucene/core/src/java/org/apache/lucene/codecs/package-info.java index e0cdd23ba729..ce585593c1f3 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/package-info.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/package-info.java @@ -41,7 +41,7 @@ * codec, extend {@link org.apache.lucene.codecs.Codec} and pass the new codec's name to the super() * constructor: * - *

+ * 

  * public class MyCodec extends Codec {
  *
  *     public MyCodec() {
@@ -50,7 +50,7 @@
  *
  *     ...
  * }
- * 
+ *
* * You will need to register the Codec class so that the {@link java.util.ServiceLoader * ServiceLoader} can find it, by including a META-INF/services/org.apache.lucene.codecs.Codec file diff --git a/lucene/core/src/java/org/apache/lucene/document/BinaryDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/BinaryDocValuesField.java index ffebe97bcb07..2f83f6b1cc56 100644 --- a/lucene/core/src/java/org/apache/lucene/document/BinaryDocValuesField.java +++ b/lucene/core/src/java/org/apache/lucene/document/BinaryDocValuesField.java @@ -27,9 +27,9 @@ * share (many) values, such as a title field. If values may be shared and sorted it's better to use * {@link SortedDocValuesField}. Here's an example usage: * - *
+ * 

  *   document.add(new BinaryDocValuesField(name, new BytesRef("hello")));
- * 
+ *
* *

If you also need to store the value, you should add a separate {@link StoredField} instance. * diff --git a/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java index 6f1fb78e70c7..052ecc56e123 100644 --- a/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java +++ b/lucene/core/src/java/org/apache/lucene/document/NumericDocValuesField.java @@ -25,9 +25,9 @@ * Field that stores a per-document long value for scoring, sorting or value retrieval. * Here's an example usage: * - *

+ * 

  *   document.add(new NumericDocValuesField(name, 22L));
- * 
+ *
* *

If you also need to store the value, you should add a separate {@link StoredField} instance. */ diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java index 746f65ae5647..e8e18cdd1e53 100644 --- a/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java +++ b/lucene/core/src/java/org/apache/lucene/document/SortedDocValuesField.java @@ -29,9 +29,9 @@ * Field that stores a per-document {@link BytesRef} value, indexed for sorting. Here's an example * usage: * - *

+ * 

  *   document.add(new SortedDocValuesField(name, new BytesRef("hello")));
- * 
+ *
* *

If you also need to store the value, you should add a separate {@link StoredField} instance. * diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java index 0efb6f463939..d9db2a65595f 100644 --- a/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java +++ b/lucene/core/src/java/org/apache/lucene/document/SortedNumericDocValuesField.java @@ -25,17 +25,17 @@ * Field that stores a per-document long values for scoring, sorting or value * retrieval. Here's an example usage: * - *

+ * 

  *   document.add(new SortedNumericDocValuesField(name, 5L));
  *   document.add(new SortedNumericDocValuesField(name, 14L));
- * 
+ *
* *

Note that if you want to encode doubles or floats with proper sort order, you will need to * encode them with {@link org.apache.lucene.util.NumericUtils}: * - *

+ * 

  *   document.add(new SortedNumericDocValuesField(name, NumericUtils.floatToSortableInt(-5.3f)));
- * 
+ *
* *

If you also need to store the value, you should add a separate {@link StoredField} instance. */ diff --git a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java index 02e5a82d6a6f..014ca27416fb 100644 --- a/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java +++ b/lucene/core/src/java/org/apache/lucene/document/SortedSetDocValuesField.java @@ -29,10 +29,10 @@ * Field that stores a set of per-document {@link BytesRef} values, indexed for * faceting,grouping,joining. Here's an example usage: * - *

+ * 

  *   document.add(new SortedSetDocValuesField(name, new BytesRef("hello")));
  *   document.add(new SortedSetDocValuesField(name, new BytesRef("world")));
- * 
+ *
* *

If you also need to store the value, you should add a separate {@link StoredField} instance. * diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java index f14b36ac89af..676d2db0ee3d 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java @@ -74,7 +74,7 @@ public abstract sealed class IndexReader implements Closeable permits CompositeR * *

Example: cache the number of documents that match a query per reader. * - *

+   * 

    * public class QueryCountCache {
    *
    *   private final Query query;
@@ -105,7 +105,7 @@ public abstract sealed class IndexReader implements Closeable permits CompositeR
    *   }
    *
    * }
-   * 
+ *
* * @lucene.experimental */ @@ -275,13 +275,13 @@ public final int hashCode() { * *

Example: * - *

+   * 

    * TopDocs hits = searcher.search(query, 10);
    * TermVectors termVectors = reader.termVectors();
    * for (ScoreDoc hit : hits.scoreDocs) {
    *   Fields vector = termVectors.get(hit.doc);
    * }
-   * 
+ *
* * @throws IOException If there is a low-level IO error */ @@ -319,13 +319,13 @@ public final int numDeletedDocs() { * *

Example: * - *

+   * 

    * TopDocs hits = searcher.search(query, 10);
    * StoredFields storedFields = reader.storedFields();
    * for (ScoreDoc hit : hits.scoreDocs) {
    *   Document doc = storedFields.document(hit.doc);
    * }
-   * 
+ *
* * @throws IOException If there is a low-level IO error */ diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java index 7c1a9cb92866..a1e1b7eaad9e 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java @@ -43,10 +43,10 @@ *

All setter methods return {@link IndexWriterConfig} to allow chaining settings conveniently, * for example: * - *

+ * 

  * IndexWriterConfig conf = new IndexWriterConfig(analyzer);
  * conf.setter1().setter2();
- * 
+ *
* * @see IndexWriter#getConfig() * @since 3.1 diff --git a/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java b/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java index fb8286b31bd6..c426c94faddb 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java +++ b/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java @@ -84,11 +84,11 @@ void initialize(InfoStream infoStream, Directory directory) throws IOException { * Returns true if infoStream messages are enabled. This method is usually used in conjunction * with {@link #message(String)}: * - *
+   * 

    * if (verbose()) {
    *   message("your message");
    * }
-   * 
+ *
*/ protected boolean verbose() { return infoStream != null && infoStream.isEnabled("MS"); diff --git a/lucene/core/src/java/org/apache/lucene/index/NumericDocValues.java b/lucene/core/src/java/org/apache/lucene/index/NumericDocValues.java index 4e89f3b3d807..250860fcebbc 100644 --- a/lucene/core/src/java/org/apache/lucene/index/NumericDocValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/NumericDocValues.java @@ -41,7 +41,7 @@ protected NumericDocValues() {} * *

This API behaves as if implemented as below, which is the default implementation: * - *

+   * 

    * public void longValues(int size, int[] docs, long[] values, long defaultValue) throws IOException {
    *   for (int i = 0; i < size; ++i) {
    *     int doc = docs[i];
@@ -54,7 +54,7 @@ protected NumericDocValues() {}
    *     values[i] = value;
    *   }
    * }
-   * 
+ *
* *

NOTE: The {@code docs} array is required to be sorted in ascending order with no * duplicates. @@ -65,13 +65,13 @@ protected NumericDocValues() {} * using {@link #advanceExact} and {@link #longValue()} on ranges of doc IDs that may not be * dense, e.g. * - *

+   * 

    * if (size > 0 && values.advannceExact(docs[0]) && values.docIDRunEnd() > docs[size - 1]) {
    *   // use values#longValues to retrieve values
    * } else {
    *   // some docs may not have a value, use #advanceExact and #longValue
    * }
-   * 
+ *
* * @param size the number of values to retrieve * @param docs the buffer of doc IDs whose values should be looked up diff --git a/lucene/core/src/java/org/apache/lucene/index/PointValues.java b/lucene/core/src/java/org/apache/lucene/index/PointValues.java index c77eec0e5ffd..328bdedb0574 100644 --- a/lucene/core/src/java/org/apache/lucene/index/PointValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/PointValues.java @@ -65,7 +65,7 @@ * addition to indexing support, point classes also contain static methods (such as {@link * IntPoint#newRangeQuery(String, int, int)}) for creating common queries. For example: * - *
+ * 

  *   // add year 1970 to document
  *   document.add(new IntPoint("year", 1970));
  *   // index document
@@ -74,7 +74,7 @@
  *   // issue range query of 1960-1980
  *   Query query = IntPoint.newRangeQuery("year", 1960, 1980);
  *   TopDocs docs = searcher.search(query, ...);
- * 
+ *
* *

Geospatial Point Types

* diff --git a/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java b/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java index 8fd9d8c9a373..0b09186e7f21 100644 --- a/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java +++ b/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java @@ -114,7 +114,7 @@ protected PostingsEnum() {} * *

The default implementation is provided below: * - *

+   * 

    * int batchSize = 16; // arbitrary
    * buffer.growNoCopy(batchSize);
    * int size = 0;
@@ -124,7 +124,7 @@ protected PostingsEnum() {}
    *   ++size;
    * }
    * buffer.size = size;
-   * 
+ *
* *

NOTE: The provided {@link DocAndFloatFeatureBuffer} should not hold references to * internal data structures. diff --git a/lucene/core/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java index 944fd780d8c1..836b62be8a0b 100644 --- a/lucene/core/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java +++ b/lucene/core/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java @@ -33,13 +33,13 @@ *

In general one would use {@link IndexUpgrader}, but for a fully customizeable upgrade, you can * use this like any other {@code MergePolicy} and call {@link IndexWriter#forceMerge(int)}: * - *

+ * 

  *  IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_XX, new KeywordAnalyzer());
  *  iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy()));
  *  IndexWriter w = new IndexWriter(dir, iwc);
  *  w.forceMerge(1);
  *  w.close();
- * 
+ *
* *

Warning: This merge policy may reorder documents if the index was partially upgraded * before calling forceMerge (e.g., documents were added). If your application relies on diff --git a/lucene/core/src/java/org/apache/lucene/index/package-info.java b/lucene/core/src/java/org/apache/lucene/index/package-info.java index c554e081af7d..4a1684ebaf15 100644 --- a/lucene/core/src/java/org/apache/lucene/index/package-info.java +++ b/lucene/core/src/java/org/apache/lucene/index/package-info.java @@ -155,7 +155,7 @@ *

{@link org.apache.lucene.index.Terms} represents the collection of terms within a field, * exposes some metadata and statistics, and an API for enumeration. * - *

+ * 

  * Terms terms = leafReader.terms("body");
  * // metadata about the field
  * System.out.println("positions? " + terms.hasPositions());
@@ -167,13 +167,13 @@
  * while ((term = termsEnum.next()) != null) {
  *   doSomethingWith(term);
  * }
- * 
+ *
* * {@link org.apache.lucene.index.TermsEnum} provides an iterator over the list of terms within a * field, some statistics about the term, and methods to access the term's * documents and positions. * - *
+ * 

  * // seek to a specific term
  * boolean found = termsEnum.seekExact(new BytesRef("foobar"));
  * if (found) {
@@ -184,7 +184,7 @@
  *   // enumerate through documents and positions
  *   PostingsEnum docsAndPositions = termsEnum.postings(null, PostingsEnum.POSITIONS);
  * }
- * 
+ *
* * * @@ -194,13 +194,13 @@ * org.apache.lucene.search.DocIdSetIterator} that iterates over the list of documents for a term, * along with the term frequency within that document. * - *
+ * 

  * int docid;
  * while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
  *   System.out.println(docid);
  *   System.out.println(docsEnum.freq());
  * }
- * 
+ *
* * * @@ -210,7 +210,7 @@ * any additional per-position information (offsets and payload). The information available is * controlled by flags passed to TermsEnum#postings * - *
+ * 

  * int docid;
  * PostingsEnum postings = termsEnum.postings(null, PostingsEnum.PAYLOADS | PostingsEnum.OFFSETS);
  * while ((docid = postings.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
@@ -223,7 +223,7 @@
  *     System.out.println(postings.getPayload());
  *   }
  * }
- * 
+ *
* *

Impacts

* @@ -233,7 +233,7 @@ * these blocks of postings, so that they can be skipped if they cannot possibly produce a * competitive hit. * - *
+ * 

  * int docid;
  * ImpactsEnum impactsEnum = termsEnum.impacts(PostingsEnum.FREQS);
  * int targetDocID = 420;
@@ -245,7 +245,7 @@
  *   // List of pareto-optimal (termFreq, lengthNorm) tuples between targetDocID inclusive and docIdUpTo inclusive.
  *   List<Impact> perLevelImpacts = impacts.getImpacts(level);
  * }
- * 
+ *
* * * diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java index 3ab0bf72038f..ae5b159c1d58 100644 --- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java @@ -203,7 +203,7 @@ Query[] rewriteTwoClauseDisjunctionWithTermsForCount(IndexSearcher indexSearcher * Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface * to make it possible to do: * - *
for (BooleanClause clause : booleanQuery) {}
+ *
for (BooleanClause clause : booleanQuery) {}
*/ @Override public final Iterator iterator() { diff --git a/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java b/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java index eecf0164a0f1..29234921bc42 100644 --- a/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java @@ -47,7 +47,7 @@ public abstract class BulkScorer { * *

For instance, a {@link Scorer}-based implementation could look like below: * - *

+   * 

    * private final Scorer scorer; // set via constructor
    *
    * public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
@@ -64,7 +64,7 @@ public abstract class BulkScorer {
    *   }
    *   return doc;
    * }
-   * 
+ *
* * @param collector The collector to which all matching documents are passed. * @param acceptDocs {@link Bits} that represents the allowed documents to match, or {@code null} diff --git a/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java b/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java index cfed223d165b..9443985c20fd 100644 --- a/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java +++ b/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java @@ -143,14 +143,14 @@ public int docIDRunEnd() throws IOException { * *

When target > current it behaves as if written: * - *

+   * 

    * int advance(int target) {
    *   int doc;
    *   while ((doc = nextDoc()) < target) {
    *   }
    *   return doc;
    * }
-   * 
+ *
* * Some implementations are considerably more efficient than that. * @@ -187,11 +187,11 @@ protected final int slowAdvance(int target) throws IOException { * Load doc IDs into a {@link FixedBitSet}. This should behave exactly as if implemented as below, * which is the default implementation: * - *
+   * 

    * for (int doc = docID(); doc < upTo; doc = nextDoc()) {
    *   bitSet.set(doc - offset);
    * }
-   * 
+ *
* *

Note: {@code offset} must be less than or equal to the {@link #docID() current doc * ID}. Behaviour is undefined if this iterator is unpositioned. diff --git a/lucene/core/src/java/org/apache/lucene/search/HitQueue.java b/lucene/core/src/java/org/apache/lucene/search/HitQueue.java index a2032991f4bd..e77c756b3b62 100644 --- a/lucene/core/src/java/org/apache/lucene/search/HitQueue.java +++ b/lucene/core/src/java/org/apache/lucene/search/HitQueue.java @@ -33,7 +33,7 @@ public final class HitQueue extends PriorityQueue { * NOTE: in case prePopulate is true, you should pop elements from the queue * using the following code example: * - *

+   * 

    * PriorityQueue<ScoreDoc> pq = new HitQueue(10, true); // pre-populate.
    * ScoreDoc top = pq.top();
    *
@@ -52,7 +52,7 @@ public final class HitQueue extends PriorityQueue {
    * for (int i = totalHits - 1; i >= 0; i--) {
    *   results[i] = (ScoreDoc) pq.pop();
    * }
-   * 
+ *
* *

NOTE: This class pre-allocate a full array of length size. * diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java b/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java index a26b07967c5a..57b9af2289aa 100644 --- a/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java @@ -29,13 +29,13 @@ * SortedNumericDocValuesField}s with the same values, an efficient range query could be created by * doing: * - *

+ * 

  *   String field;
  *   long minValue, maxValue;
  *   Query pointQuery = LongPoint.newRangeQuery(field, minValue, maxValue);
  *   Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, minValue, maxValue);
  *   Query query = new IndexOrDocValuesQuery(pointQuery, dvQuery);
- * 
+ *
* * The above query will be efficient as it will use points in the case that they perform better, ie. * when we need a good lead iterator that will be almost entirely consumed; and doc values diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java index d1079b69089a..ed595af30f40 100644 --- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java +++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java @@ -458,13 +458,13 @@ public IndexReader getIndexReader() { * *

Example: * - *

+   * 

    * TopDocs hits = searcher.search(query, 10);
    * StoredFields storedFields = searcher.storedFields();
    * for (ScoreDoc hit : hits.scoreDocs) {
    *   Document doc = storedFields.document(hit.doc);
    * }
-   * 
+ *
* * @throws IOException If there is a low-level IO error * @see IndexReader#storedFields() diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSortSortedNumericDocValuesRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/IndexSortSortedNumericDocValuesRangeQuery.java index ac85f3068826..0f781a1a5127 100644 --- a/lucene/core/src/java/org/apache/lucene/search/IndexSortSortedNumericDocValuesRangeQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/IndexSortSortedNumericDocValuesRangeQuery.java @@ -56,13 +56,13 @@ * give constant scores. As an example, an {@link IndexSortSortedNumericDocValuesRangeQuery} might * be constructed as follows: * - *
+ * 

  *   String field = "field";
  *   long lowerValue = 0, long upperValue = 10;
  *   Query fallbackQuery = LongPoint.newRangeQuery(field, lowerValue, upperValue);
  *   Query rangeQuery = new IndexSortSortedNumericDocValuesRangeQuery(
  *       field, lowerValue, upperValue, fallbackQuery);
- * 
+ *
* * @lucene.experimental */ diff --git a/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java b/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java index bab2be625774..dd9b91ba139d 100644 --- a/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java +++ b/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java @@ -59,7 +59,7 @@ *

A default query cache and policy instance is used in IndexSearcher. If you want to replace * those defaults it is typically done like this: * - *

+ * 

  *   final int maxNumberOfCachedQueries = 256;
  *   final long maxRamBytesUsed = 50 * 1024L * 1024L; // 50MB
  *   // these cache and policy instances can be shared across several queries and readers
@@ -68,7 +68,7 @@
  *   final QueryCachingPolicy defaultCachingPolicy = new UsageTrackingQueryCachingPolicy();
  *   indexSearcher.setQueryCache(queryCache);
  *   indexSearcher.setQueryCachingPolicy(defaultCachingPolicy);
- * 
+ *
* * This cache exposes some global statistics ({@link #getHitCount() hit count}, {@link * #getMissCount() miss count}, {@link #getCacheSize() number of cache entries}, {@link diff --git a/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java index f8765c18372d..7e4ee528da5d 100644 --- a/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java +++ b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java @@ -32,7 +32,7 @@ * it by recording the docBase from the most recent setNextReader call. Here's a simple example * showing how to collect docIDs into a BitSet: * - *
+ * 

  * IndexSearcher searcher = new IndexSearcher(indexReader);
  * final BitSet bits = new BitSet(indexReader.maxDoc());
  * searcher.search(query, new Collector() {
@@ -54,7 +54,7 @@
  *   }
  *
  * });
- * 
+ *
* *

Not all collectors will need to rebase the docID. For example, a collector that simply counts * the total number of hits would skip it. diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java index 122554747394..16fc86610c35 100644 --- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java @@ -52,21 +52,21 @@ * Also, Leading holes don't have any particular meaning for this query and will be ignored. For * instance this query: * - *

+ * 

  * PhraseQuery.Builder builder = new PhraseQuery.Builder();
  * builder.add(new Term("body", "one"), 4);
  * builder.add(new Term("body", "two"), 5);
  * PhraseQuery pq = builder.build();
- * 
+ *
* * is equivalent to the below query: * - *
+ * 

  * PhraseQuery.Builder builder = new PhraseQuery.Builder();
  * builder.add(new Term("body", "one"), 0);
  * builder.add(new Term("body", "two"), 1);
  * PhraseQuery pq = builder.build();
- * 
+ *
*/ public class PhraseQuery extends Query { diff --git a/lucene/core/src/java/org/apache/lucene/search/Scorer.java b/lucene/core/src/java/org/apache/lucene/search/Scorer.java index fc540c30cc42..0fc5b61ff508 100644 --- a/lucene/core/src/java/org/apache/lucene/search/Scorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/Scorer.java @@ -92,7 +92,7 @@ public int advanceShallow(int target) throws IOException { * *

The default implementation is provided below: * - *

+   * 

    * int batchSize = 64; // arbitrary
    * buffer.growNoCopy(batchSize);
    * int size = 0;
@@ -105,7 +105,7 @@ public int advanceShallow(int target) throws IOException {
    *   }
    * }
    * buffer.size = size;
-   * 
+ *
* *

NOTE: The provided {@link DocAndFloatFeatureBuffer} should not hold references to * internal data structures. diff --git a/lucene/core/src/java/org/apache/lucene/search/SearcherFactory.java b/lucene/core/src/java/org/apache/lucene/search/SearcherFactory.java index 325c15fe37a9..5d164cda55fc 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SearcherFactory.java +++ b/lucene/core/src/java/org/apache/lucene/search/SearcherFactory.java @@ -27,11 +27,11 @@ * Factory class used by {@link SearcherManager} to create new IndexSearchers. The default * implementation just creates an IndexSearcher with no custom behavior: * - *

+ * 

  *   public IndexSearcher newSearcher(IndexReader r) throws IOException {
  *     return new IndexSearcher(r);
  *   }
- * 
+ *
* * You can pass your own factory instead if you want custom behavior, such as: * diff --git a/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java b/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java index 08978a1293ea..b0bd73acfaaf 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java +++ b/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java @@ -31,24 +31,24 @@ * *

Use it like this: * - *

+ * 

  *   SearcherLifetimeManager mgr = new SearcherLifetimeManager();
- * 
+ *
* * Per search-request, if it's a "new" search request, then obtain the latest searcher you have (for * example, by using {@link SearcherManager}), and then record this searcher: * - *
+ * 

  *   // Record the current searcher, and save the returend
  *   // token into user's search results (eg as a  hidden
  *   // HTML form field):
  *   long token = mgr.record(searcher);
- * 
+ *
* * When a follow-up search arrives, for example the user clicks next page, drills down/up, etc., * take the token that you saved from the previous search and: * - *
+ * 

  *   // If possible, obtain the same searcher as the last
  *   // search:
  *   IndexSearcher searcher = mgr.acquire(token);
@@ -65,14 +65,14 @@
  *     // Searcher was pruned -- notify user session timed
  *     // out, or, pull fresh searcher again
  *   }
- * 
+ *
* * Finally, in a separate thread, ideally the same thread that's periodically reopening your * searchers, you should periodically prune old searchers: * - *
+ * 

  *   mgr.prune(new PruneByAge(600.0));
- * 
+ *
* *

NOTE: keeping many searchers around means you'll use more resources (open files, RAM) * than a single searcher. However, as long as you are using {@link diff --git a/lucene/core/src/java/org/apache/lucene/search/SearcherManager.java b/lucene/core/src/java/org/apache/lucene/search/SearcherManager.java index 862e62a8c5bb..3a84fef4d741 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SearcherManager.java +++ b/lucene/core/src/java/org/apache/lucene/search/SearcherManager.java @@ -31,7 +31,7 @@ *

Use {@link #acquire} to obtain the current searcher, and {@link #release} to release it, like * this: * - *

+ * 

  * IndexSearcher s = manager.acquire();
  * try {
  *   // Do searching, doc retrieval, etc. with s
@@ -40,7 +40,7 @@
  * }
  * // Do not use s after this!
  * s = null;
- * 
+ *
* *

In addition you should periodically call {@link #maybeRefresh}. While it's possible to call * this just before running each query, this is discouraged since it penalizes the unlucky queries diff --git a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java index 9c80972d3d70..1c1a633f44ab 100644 --- a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java @@ -47,14 +47,14 @@ *

For instance in the following example, both {@code q1} and {@code q2} would yield the same * scores: * - *

+ * 

  * Query q1 = new TermInSetQuery("field", new BytesRef("foo"), new BytesRef("bar"));
  *
  * BooleanQuery bq = new BooleanQuery();
  * bq.add(new TermQuery(new Term("field", "foo")), Occur.SHOULD);
  * bq.add(new TermQuery(new Term("field", "bar")), Occur.SHOULD);
  * Query q2 = new ConstantScoreQuery(bq);
- * 
+ *
* *

Unless a custom {@link MultiTermQuery.RewriteMethod} is provided, this query executes like a * regular disjunction where there are few terms. However, when there are many terms, instead of diff --git a/lucene/core/src/java/org/apache/lucene/search/Weight.java b/lucene/core/src/java/org/apache/lucene/search/Weight.java index 341dd3cadf6a..c0add319ac5c 100644 --- a/lucene/core/src/java/org/apache/lucene/search/Weight.java +++ b/lucene/core/src/java/org/apache/lucene/search/Weight.java @@ -153,7 +153,7 @@ public final Scorer scorer(LeafReaderContext context) throws IOException { * Helper method that delegates to {@link #scorerSupplier(LeafReaderContext)}. It is implemented * as * - *

+   * 

    * ScorerSupplier scorerSupplier = scorerSupplier(context);
    * if (scorerSupplier == null) {
    *   // No docs match
@@ -162,7 +162,7 @@ public final Scorer scorer(LeafReaderContext context) throws IOException {
    *
    * scorerSupplier.setTopLevelScoringClause();
    * return scorerSupplier.bulkScorer();
-   * 
+ *
* * A bulk scorer for the same {@link LeafReaderContext} instance may be requested multiple times * as part of a single search call. diff --git a/lucene/core/src/java/org/apache/lucene/search/package-info.java b/lucene/core/src/java/org/apache/lucene/search/package-info.java index b01de702cc1f..d974a31dd9ab 100644 --- a/lucene/core/src/java/org/apache/lucene/search/package-info.java +++ b/lucene/core/src/java/org/apache/lucene/search/package-info.java @@ -74,9 +74,9 @@ * org.apache.lucene.document.Field Field} with the specified string in it. Constructing a {@link * org.apache.lucene.search.TermQuery TermQuery} is as simple as: * - *
+ * 

  * TermQuery tq = new TermQuery(new Term("fieldName", "term"));
- * 
+ *
* * In this example, the {@link org.apache.lucene.search.Query Query} identifies all {@link * org.apache.lucene.document.Document Document}s that have the {@link @@ -290,7 +290,7 @@ *

Here is an example that constructs a query on "apache OR lucene" on fields "title" with a * boost of 10, and "body" with a boost of 1: * - *

+ * 

  * BooleanQuery.Builder builder = new BooleanQuery.Builder();
  * for (String term : new String[] { "apache", "lucene" }) {
  *   Query query = new CombinedFieldQuery(term)
@@ -300,7 +300,7 @@
  *   builder.add(query, Occur.SHOULD);
  * }
  * Query query = builder.build();
- * 
+ *
* *

Integrating field values into the score

* @@ -311,7 +311,7 @@ * linear combination. For instance the below query matches the same documents as {@code * originalQuery} and computes scores as {@code similarityScore + 0.7 * featureScore}: * - *
+ * 

  * Query originalQuery = new BooleanQuery.Builder()
  *     .add(new TermQuery(new Term("body", "apache")), Occur.SHOULD)
  *     .add(new TermQuery(new Term("body", "lucene")), Occur.SHOULD)
@@ -321,7 +321,7 @@
  *     .add(originalQuery, Occur.MUST)
  *     .add(new BoostQuery(featureQuery, 0.7f), Occur.SHOULD)
  *     .build();
- * 
+ *
* *

A less efficient yet more flexible way of modifying scores is to index scoring features into * doc-value fields and then combine them with the similarity score using a + *


  *   // compile an expression:
  *   Expression expr = JavascriptCompiler.compile("_score * ln(popularity)");
  *
@@ -346,7 +346,7 @@
  *   Query query = new FunctionScoreQuery(
  *       originalQuery,
  *       expr.getDoubleValuesSource(bindings));
- * 
+ * * * * diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java index 78e89f515160..5c5d85d665e8 100644 --- a/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java +++ b/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java @@ -122,9 +122,9 @@ protected float avgFieldLength(CollectionStatistics collectionStats) { * *

The default implementation uses: * - *

+   * 

    * idf(docFreq, docCount);
-   * 
+ *
* * Note that {@link CollectionStatistics#docCount()} is used instead of {@link * org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also {@link diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/IndriDirichletSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/IndriDirichletSimilarity.java index b3994c5dc46e..91fb1395cb16 100644 --- a/lucene/core/src/java/org/apache/lucene/search/similarities/IndriDirichletSimilarity.java +++ b/lucene/core/src/java/org/apache/lucene/search/similarities/IndriDirichletSimilarity.java @@ -24,10 +24,10 @@ * Bayesian smoothing using Dirichlet priors as implemented in the Indri Search engine * (http://www.lemurproject.org/indri.php). Indri Dirichelet Smoothing! * - *
+ * 

  * tf_E + mu*P(t|D) P(t|E)= documentLength + documentMu
  * mu*P(t|C) + tf_D where P(t|D)= doclen + mu
- * 
+ *
* *

A larger value for mu, produces more smoothing. Smoothing is most important for short * documents where the probabilities are more granular. diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java index 721ce2a73113..d7f73975b159 100644 --- a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java +++ b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java @@ -242,11 +242,11 @@ public interface BulkSimScorer { * Bulk computation of scores. For each index {@code i} in [0, size), scores[i] is computed as * score(freqs[i], norms[i]). The default implementation does the following: * - *

+     * 

      * for (int i = 0; i < size; ++i) {
      *   scores[i] = score(freqs[i], norms[i]);
      * }
-     * 
+ *
* *

NOTE: It is legal to pass the same {@code freqs} and {@code scores} arrays. */ diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java index 9af5b4877c38..47385de65833 100644 --- a/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java +++ b/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java @@ -353,9 +353,9 @@ public TFIDFSimilarity(boolean discountOverlaps) { * *

The default implementation uses: * - *

+   * 

    * idf(docFreq, docCount);
-   * 
+ *
* * Note that {@link CollectionStatistics#docCount()} is used instead of {@link * org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also {@link diff --git a/lucene/core/src/java/org/apache/lucene/store/Lock.java b/lucene/core/src/java/org/apache/lucene/store/Lock.java index cdacdb5c5559..504043b15eca 100644 --- a/lucene/core/src/java/org/apache/lucene/store/Lock.java +++ b/lucene/core/src/java/org/apache/lucene/store/Lock.java @@ -24,11 +24,11 @@ * *

Typical use might look like: * - *

+ * 

  *   try (final Lock lock = directory.obtainLock("my.lock")) {
  *     // ... code to execute while locked ...
  *   }
- * 
+ *
* * @see Directory#obtainLock(String) * @lucene.internal diff --git a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java index 8bbb4564285f..22010024fd8e 100644 --- a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java @@ -49,12 +49,12 @@ * *

Here's a simple example usage: * - *

+ * 

  *   Directory fsDir = FSDirectory.open(new File("/path/to/index").toPath());
  *   NRTCachingDirectory cachedFSDir = new NRTCachingDirectory(fsDir, 5.0, 60.0);
  *   IndexWriterConfig conf = new IndexWriterConfig(analyzer);
  *   IndexWriter writer = new IndexWriter(cachedFSDir, conf);
- * 
+ *
* *

This will cache all newly flushed segments, all merges whose expected segment size is {@code * <= 5 MB}, unless the net cached bytes exceed 60 MB at which point all writes will not be cached diff --git a/lucene/core/src/java/org/apache/lucene/util/AttributeImpl.java b/lucene/core/src/java/org/apache/lucene/util/AttributeImpl.java index 3a34c55b72b2..b2beaefee2a3 100644 --- a/lucene/core/src/java/org/apache/lucene/util/AttributeImpl.java +++ b/lucene/core/src/java/org/apache/lucene/util/AttributeImpl.java @@ -76,12 +76,12 @@ public final String reflectAsString(final boolean prependAttClass) { * *

Implementations look like this (e.g. for a combined attribute implementation): * - *

+   * 

    *   public void reflectWith(AttributeReflector reflector) {
    *     reflector.reflect(CharTermAttribute.class, "term", term());
    *     reflector.reflect(PositionIncrementAttribute.class, "positionIncrement", getPositionIncrement());
    *   }
-   * 
+ *
* *

If you implement this method, make sure that for each invocation, the same set of {@link * Attribute} interfaces and keys are passed to {@link AttributeReflector#reflect} in the same diff --git a/lucene/core/src/java/org/apache/lucene/util/Bits.java b/lucene/core/src/java/org/apache/lucene/util/Bits.java index 61757a1a34e4..a312a0fd4572 100644 --- a/lucene/core/src/java/org/apache/lucene/util/Bits.java +++ b/lucene/core/src/java/org/apache/lucene/util/Bits.java @@ -42,7 +42,7 @@ public interface Bits { * *

This should behave the same way as the default implementation, which does the following: * - *

+   * 

    * for (int i = bitSet.nextSetBit(0);
    *     i != DocIdSetIterator.NO_MORE_DOCS;
    *     i = i + 1 >= bitSet.length() ? DocIdSetIterator.NO_MORE_DOCS : bitSet.nextSetBit(i + 1)) {
@@ -50,7 +50,7 @@ public interface Bits {
    *     bitSet.clear(i);
    *   }
    * }
-   * 
+ *
*/ default void applyMask(FixedBitSet bitSet, int offset) { for (int i = bitSet.nextSetBit(0); diff --git a/lucene/core/src/java/org/apache/lucene/util/LongHeap.java b/lucene/core/src/java/org/apache/lucene/util/LongHeap.java index 3f1115e09076..9d467ed35bd9 100644 --- a/lucene/core/src/java/org/apache/lucene/util/LongHeap.java +++ b/lucene/core/src/java/org/apache/lucene/util/LongHeap.java @@ -132,16 +132,16 @@ public long pop() { * Replace the top of the pq with {@code newTop}. Should be called when the top value changes. * Still log(n) worst case, but it's at least twice as fast to * - *
+   * 

    * pq.updateTop(value);
-   * 
+ *
* * instead of * - *
+   * 

    * pq.pop();
    * pq.push(value);
-   * 
+ *
* * Calling this method on an empty LongHeap has no visible effect. * diff --git a/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java b/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java index 799db0700f43..ec89bb1b2696 100644 --- a/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java +++ b/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java @@ -91,7 +91,7 @@ public PriorityQueue(int maxSize, LessThan lessThan) { *

If this method is extended to return a non-null value, then the following usage pattern is * recommended: * - *

+   * 

    * PriorityQueue<MyObject> pq = new PriorityQueue<MyObject>(numHits, lessThan);
    * // save the 'top' element, which is guaranteed to not be null.
    * MyObject pqTop = pq.top();
@@ -100,7 +100,7 @@ public PriorityQueue(int maxSize, LessThan lessThan) {
    * // you've verified it is better), it is as simple as:
    * pqTop.change().
    * pqTop = pq.updateTop();
-   * 
+ *
* * NOTE: the given supplier will be called {@code maxSize} times, relying on a new object * to be returned and will not check if it's null again. Therefore you should ensure any call to @@ -236,18 +236,18 @@ public final T pop() { * Should be called when the Object at top changes values. Still log(n) worst case, but it's at * least twice as fast to * - *
+   * 

    * pq.top().change();
    * pq.updateTop();
-   * 
+ *
* * instead of * - *
+   * 

    * o = pq.pop();
    * o.change();
    * pq.push(o);
-   * 
+ *
* * @return the new 'top' element. */ diff --git a/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java b/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java index b95ba73f449c..b7805df5f07f 100644 --- a/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java +++ b/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java @@ -45,12 +45,12 @@ * *

Example usage: * - *

+ * 

  *   QueryBuilder builder = new QueryBuilder(analyzer);
  *   Query a = builder.createBooleanQuery("body", "just a test");
  *   Query b = builder.createPhraseQuery("body", "another test");
  *   Query c = builder.createMinShouldMatchQuery("body", "another test", 0.5f);
- * 
+ *
* *

This can also be used as a subclass for query parsers to make it easier to interact with the * analysis chain. Factory methods such as {@code newTermQuery} are provided so that the generated diff --git a/lucene/core/src/java/org/apache/lucene/util/SentinelIntSet.java b/lucene/core/src/java/org/apache/lucene/util/SentinelIntSet.java index 29a214752e18..a765c50f2abd 100644 --- a/lucene/core/src/java/org/apache/lucene/util/SentinelIntSet.java +++ b/lucene/core/src/java/org/apache/lucene/util/SentinelIntSet.java @@ -28,13 +28,13 @@ * *

To iterate over the integers held in this set, simply use code like this: * - *

+ * 

  * SentinelIntSet set = ...
  * for (int v : set.keys) {
  *   if (v == set.emptyVal)
  *     continue;
  *   //use v...
- * }
+ * }
* * @lucene.internal */ diff --git a/lucene/core/src/java/org/apache/lucene/util/TernaryLongHeap.java b/lucene/core/src/java/org/apache/lucene/util/TernaryLongHeap.java index e10bfaecb8ae..75c3e7f0d117 100644 --- a/lucene/core/src/java/org/apache/lucene/util/TernaryLongHeap.java +++ b/lucene/core/src/java/org/apache/lucene/util/TernaryLongHeap.java @@ -132,16 +132,16 @@ public long pop() { * Replace the top of the pq with {@code newTop}. Should be called when the top value changes. * Still log(n) worst case, but it's at least twice as fast to * - *
+   * 

    * pq.updateTop(value);
-   * 
+ *
* *

instead of * - *

+   * 

    * pq.pop();
    * pq.push(value);
-   * 
+ *
* *

Calling this method on an empty TernaryLongHeap has no visible effect. * diff --git a/lucene/core/src/java/org/apache/lucene/util/VectorUtil.java b/lucene/core/src/java/org/apache/lucene/util/VectorUtil.java index 3428899bb25c..061e253bde90 100644 --- a/lucene/core/src/java/org/apache/lucene/util/VectorUtil.java +++ b/lucene/core/src/java/org/apache/lucene/util/VectorUtil.java @@ -245,12 +245,12 @@ public static int int4DotProduct(byte[] a, byte[] b) { * Dot product computed over uint4 (values between [0,15]) bytes. The second vector is considered * "packed" (i.e. every byte representing two values). The following packing is assumed: * - *

+   * 

    *   packed[0] = (raw[0] * 16) | raw[packed.length];
    *   packed[1] = (raw[1] * 16) | raw[packed.length + 1];
    *   ...
    *   packed[packed.length - 1] = (raw[packed.length - 1] * 16) | raw[2 * packed.length - 1];
-   * 
+ *
* * @param unpacked the unpacked vector, of even length * @param packed the packed vector, of length {@code (unpacked.length + 1) / 2} diff --git a/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java b/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java index 7c6c357862e9..32ee94a22994 100644 --- a/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java +++ b/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java @@ -33,12 +33,12 @@ *

Define static final fields in the base class ({@code BaseClass}), where the * old and new method are declared: * - *

+ * 

  *  static final VirtualMethod<BaseClass> newMethod =
  *   new VirtualMethod<BaseClass>(BaseClass.class, "newName", parameters...);
  *  static final VirtualMethod<BaseClass> oldMethod =
  *   new VirtualMethod<BaseClass>(BaseClass.class, "oldName", parameters...);
- * 
+ *
* *

This enforces the singleton status of these objects, as the maintenance of the cache would be * too costly else. If you try to create a second instance of for the same method/{@code baseClass} @@ -47,7 +47,7 @@ *

To detect if e.g. the old method was overridden by a more far subclass on the inheritance path * to the current instance's class, use a non-static field: * - *

+ * 

  *  final boolean isDeprecatedMethodOverridden =
  *   AccessController.doPrivileged((PrivilegedAction<Boolean>) () ->
  *    (oldMethod.getImplementationDistance(this.getClass()) > newMethod.getImplementationDistance(this.getClass())));
@@ -56,7 +56,7 @@
  *  final boolean isDeprecatedMethodOverridden =
  *   AccessController.doPrivileged((PrivilegedAction<Boolean>) () ->
  *    VirtualMethod.compareImplementationDistance(this.getClass(), oldMethod, newMethod) > 0);
- * 
+ *
* *

{@link #getImplementationDistance} returns the distance of the subclass that overrides this * method. The one with the larger distance should be used preferable. This way also more diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java b/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java index 43a3bfc62631..8c9b2ad0c80c 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java @@ -978,20 +978,20 @@ private boolean validOutput(T output) { *

- If a FSTReader DataOutput was used, such as the one returned by {@link * #getOnHeapReaderWriter(int)} * - *

+   * 

    *     fstMetadata = fstCompiler.compile();
    *     fst = FST.fromFSTReader(fstMetadata, fstCompiler.getFSTReader());
-   * 
+ *
* *

- If a non-FSTReader DataOutput was used, such as {@link * org.apache.lucene.store.IndexOutput}, you need to first create the corresponding {@link * org.apache.lucene.store.DataInput}, such as {@link org.apache.lucene.store.IndexInput} then * pass it to the FST construct * - *

+   * 

    *     fstMetadata = fstCompiler.compile();
    *     fst = new FST<>(fstMetadata, dataInput, new OffHeapFSTStore());
-   * 
+ *
*/ public FST.FSTMetadata compile() throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java index c0f4bfeb5726..70ae6703be6b 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java @@ -476,11 +476,11 @@ public static TopResults shortestPaths( * Dumps an {@link FST} to a GraphViz's dot language description for visualization. * Example of use: * - *
+   * 

    * PrintWriter pw = new PrintWriter("out.dot");
    * Util.toDot(fst, pw, true, true);
    * pw.close();
-   * 
+ *
* * and then, from command line: * diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/package-info.java b/lucene/core/src/java/org/apache/lucene/util/fst/package-info.java index 1f80869f6995..f75a6084de95 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/package-info.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/package-info.java @@ -34,7 +34,7 @@ * *

FST Construction example: * - *

+ * 

  *     // Input values (keys). These must be provided to Builder in Unicode code point (UTF8 or UTF32) sorted order.
  *     // Note that sorting by Java's String.compareTo, which is UTF16 sorted order, is not correct and can lead to
  *     // exceptions while building the FST:
@@ -50,26 +50,26 @@
  *       fstCompiler.add(Util.toIntsRef(scratchBytes.toBytesRef(), scratchInts), outputValues[i]);
  *     }
  *     FST<Long> fst = FST.fromFSTReader(fstCompiler.compile(), fstCompiler.getFSTReader());
- * 
+ *
* * Retrieval by key: * - *
+ * 

  *     Long value = Util.get(fst, new BytesRef("dog"));
  *     System.out.println(value); // 7
- * 
+ *
* * Retrieval by value: * - *
+ * 

  *     // Only works because outputs are also in sorted order
  *     IntsRef key = Util.getByOutput(fst, 12);
  *     System.out.println(Util.toBytesRef(key, scratchBytes).utf8ToString()); // dogs
- * 
+ *
* * Iterate over key-value pairs in sorted order: * - *
+ * 

  *     // Like TermsEnum, this also supports seeking (advance)
  *     BytesRefFSTEnum<Long> iterator = new BytesRefFSTEnum<Long>(fst);
  *     while (iterator.next() != null) {
@@ -77,11 +77,11 @@
  *       System.out.println(mapEntry.input.utf8ToString());
  *       System.out.println(mapEntry.output);
  *     }
- * 
+ *
* * N-shortest paths by weight: * - *
+ * 

  *     Comparator<Long> comparator = new Comparator<Long>() {
  *       public int compare(Long left, Long right) {
  *         return left.compareTo(right);
@@ -93,6 +93,6 @@
  *     System.out.println(paths.topN.get(0).output); // 5
  *     System.out.println(Util.toBytesRef(paths.topN.get(1).input, scratchBytes).utf8ToString()); // dog
  *     System.out.println(paths.topN.get(1).output); // 7
- * 
+ *
*/ package org.apache.lucene.util.fst; diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/DirectReader.java b/lucene/core/src/java/org/apache/lucene/util/packed/DirectReader.java index c4390e4d9695..6fede3a35f4d 100644 --- a/lucene/core/src/java/org/apache/lucene/util/packed/DirectReader.java +++ b/lucene/core/src/java/org/apache/lucene/util/packed/DirectReader.java @@ -26,14 +26,14 @@ * *

Example usage: * - *

+ * 

  *   int bitsPerValue = DirectWriter.bitsRequired(100);
  *   IndexInput in = dir.openInput("packed", IOContext.DEFAULT);
  *   LongValues values = DirectReader.getInstance(in.randomAccessSlice(start, end), bitsPerValue);
  *   for (int i = 0; i < numValues; i++) {
  *     long value = values.get(i);
  *   }
- * 
+ *
* * @see DirectWriter */ diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java b/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java index c9d2cbe52db6..d2a2e1ab2346 100644 --- a/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java +++ b/lucene/core/src/java/org/apache/lucene/util/packed/DirectWriter.java @@ -29,7 +29,7 @@ *

Unlike PackedInts, it optimizes for read i/o operations and supports > 2B values. Example * usage: * - *

+ * 

  *   int bitsPerValue = DirectWriter.bitsRequired(100); // values up to and including 100
  *   IndexOutput output = dir.createOutput("packed", IOContext.DEFAULT);
  *   DirectWriter writer = DirectWriter.getInstance(output, numberOfValues, bitsPerValue);
@@ -38,7 +38,7 @@
  *   }
  *   writer.finish();
  *   output.close();
- * 
+ *
* * @see DirectReader */ diff --git a/lucene/core/src/java/org/apache/lucene/util/quantization/OptimizedScalarQuantizer.java b/lucene/core/src/java/org/apache/lucene/util/quantization/OptimizedScalarQuantizer.java index 23e2e7ab19f1..179799ff83af 100644 --- a/lucene/core/src/java/org/apache/lucene/util/quantization/OptimizedScalarQuantizer.java +++ b/lucene/core/src/java/org/apache/lucene/util/quantization/OptimizedScalarQuantizer.java @@ -370,11 +370,11 @@ public static int discretize(int value, int bucket) { * *

This bit decomposition for fast bitwise SIMD operations was first proposed in: * - *

+   * 

    *   Gao, Jianyang, and Cheng Long. "RaBitQ: Quantizing High-
    *   Dimensional Vectors with a Theoretical Error Bound for Approximate Nearest Neighbor Search."
    *   Proceedings of the ACM on Management of Data 2, no. 3 (2024): 1-27.
-   *   
+ *
* * @param q the query vector, assumed to be half-byte quantized with values between 0 and 15 * @param quantQueryByte the byte array to store the transposed query vector diff --git a/lucene/core/src/java/org/apache/lucene/util/quantization/ScalarQuantizer.java b/lucene/core/src/java/org/apache/lucene/util/quantization/ScalarQuantizer.java index cacc86e2ef1e..c4eb6bf2c776 100644 --- a/lucene/core/src/java/org/apache/lucene/util/quantization/ScalarQuantizer.java +++ b/lucene/core/src/java/org/apache/lucene/util/quantization/ScalarQuantizer.java @@ -46,30 +46,30 @@ * normalization. Given a float vector `v` and a confidenceInterval `q` we can calculate the * quantiles of the vector values [minQuantile, maxQuantile]. * - *
+ * 

  *   byte = (float - minQuantile) * 127/(maxQuantile - minQuantile)
  *   float = (maxQuantile - minQuantile)/127 * byte + minQuantile
- * 
+ *
* *

This then means to multiply two float values together (e.g. dot_product) we can do the * following: * - *

+ * 

  *   float1 * float2 ~= (byte1 * (maxQuantile - minQuantile)/127 + minQuantile) * (byte2 * (maxQuantile - minQuantile)/127 + minQuantile)
  *   float1 * float2 ~= (byte1 * byte2 * (maxQuantile - minQuantile)^2)/(127^2) + (byte1 * minQuantile * (maxQuantile - minQuantile)/127) + (byte2 * minQuantile * (maxQuantile - minQuantile)/127) + minQuantile^2
  *   let alpha = (maxQuantile - minQuantile)/127
  *   float1 * float2 ~= (byte1 * byte2 * alpha^2) + (byte1 * minQuantile * alpha) + (byte2 * minQuantile * alpha) + minQuantile^2
- * 
+ *
* *

The expansion for square distance is much simpler: * - *

+ * 

  *  square_distance = (float1 - float2)^2
  *  (float1 - float2)^2 ~= (byte1 * alpha + minQuantile - byte2 * alpha - minQuantile)^2
  *  = (alpha*byte1 + minQuantile)^2 + (alpha*byte2 + minQuantile)^2 - 2*(alpha*byte1 + minQuantile)(alpha*byte2 + minQuantile)
  *  this can be simplified to:
  *  = alpha^2 (byte1 - byte2)^2
- * 
+ *
*/ public class ScalarQuantizer { diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java b/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java index c7710305f335..e32e58d4304b 100644 --- a/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java +++ b/lucene/expressions/src/java/org/apache/lucene/expressions/Expression.java @@ -28,7 +28,7 @@ * *

Example that sorts based on an expression: * - *

+ * 

  *   // compile an expression:
  *   Expression expr = JavascriptCompiler.compile("sqrt(_score) + ln(popularity)");
  *
@@ -41,11 +41,11 @@
  *   Sort sort = new Sort(expr.getSortField(bindings, true));
  *   Query query = new TermQuery(new Term("body", "contents"));
  *   searcher.search(query, 10, sort);
- * 
+ *
* *

Example that modifies the scores produced by the query: * - *

+ * 

  *   // compile an expression:
  *   Expression expr = JavascriptCompiler.compile("sqrt(_score) + ln(popularity)");
  *
@@ -60,7 +60,7 @@
  *       new TermQuery(new Term("body", "contents")),
  *       expr.getDoubleValuesSource(bindings));
  *   searcher.search(query, 10);
- * 
+ *
* * @see JavascriptCompiler#compile * @lucene.experimental diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java index dd24285e662a..42947dfe9f59 100644 --- a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java +++ b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java @@ -29,7 +29,7 @@ * *

Example usage: * - *

+ * 

  *   SimpleBindings bindings = new SimpleBindings();
  *   // document's text relevance score
  *   bindings.add("_score", DoubleValuesSource.SCORES);
@@ -40,7 +40,7 @@
  *
  *   // create a sort field in reverse order
  *   Sort sort = new Sort(expr.getSortField(bindings, true));
- * 
+ *
* * @lucene.experimental */ diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java index dfd53f0caa64..d8c1c6a663eb 100644 --- a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java +++ b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java @@ -73,9 +73,9 @@ * *

Example: * - *

+ * 

  *   Expression foo = JavascriptCompiler.compile("((0.3*popularity)/10.0)+(0.7*score)");
- * 
+ *
* *

See the {@link org.apache.lucene.expressions.js package documentation} for the supported * syntax and default functions. @@ -83,7 +83,7 @@ *

You can compile with an alternate set of functions via {@link #compile(String, Map)}. For * example: * - *

+ * 

  *   Map<String,MethodHandle> functions = new HashMap<>();
  *   // add all the default functions
  *   functions.putAll(JavascriptCompiler.DEFAULT_FUNCTIONS);
@@ -93,7 +93,7 @@
  *   // call compile with customized function map
  *   Expression foo = JavascriptCompiler.compile("cbrt(score)+ln(popularity)",
  *                                               functions);
- * 
+ *
* *

It is possible to pass any {@link MethodHandle} as function that only takes {@code double} * parameters and returns a {@code double}. The method does not need to be public, it just needs to diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/OrdinalMappingLeafReader.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/OrdinalMappingLeafReader.java index 13be69535772..80edfaf12dc7 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/OrdinalMappingLeafReader.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/OrdinalMappingLeafReader.java @@ -39,7 +39,7 @@ * *

For re-mapping the ordinals during index merge, do the following: * - *

+ * 

  * // merge the old taxonomy with the new one.
  * OrdinalMap map = new MemoryOrdinalMap();
  * DirectoryTaxonomyWriter.addTaxonomy(srcTaxoDir, map);
@@ -56,7 +56,7 @@
  * }
  * writer.addIndexes(new MultiReader(wrappedLeaves));
  * writer.commit();
- * 
+ *
* * @lucene.experimental */ diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java index 5c1b1247e0a5..72bdc9dcc5eb 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/package-info.java @@ -78,7 +78,7 @@ *

Typical usage for the generic two-pass grouping search looks like this using the grouping * convenience utility (optionally using caching for the second pass search): * - *

+ * 

  *   GroupingSearch groupingSearch = new GroupingSearch("author");
  *   groupingSearch.setGroupSort(groupSort);
  *   groupingSearch.setFillSortFields(fillFields);
@@ -99,13 +99,13 @@
  *   if (requiredTotalGroupCount) {
  *     int totalGroupCount = result.totalGroupCount;
  *   }
- * 
+ *
* *

To use the single-pass BlockGroupingCollector, first, at indexing time, you must * ensure all docs in each group are added as a block, and you have some way to find the last * document of each group. One simple way to do this is to add a marker binary field: * - *

+ * 

  *   // Create Documents from your source:
  *   List<Document> oneGroup = ...;
  *
@@ -119,22 +119,22 @@
  *   // example, each group could have a "groupID" field, with the same
  *   // value for all docs in this group:
  *   writer.addDocuments(oneGroup);
- * 
+ *
* * Then, at search time: * - *
+ * 

  *   Query groupEndDocs = new TermQuery(new Term("groupEnd", "x"));
  *   BlockGroupingCollector c = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, groupEndDocs);
  *   s.search(new TermQuery(new Term("content", searchTerm)), c);
  *   TopGroups groupsResult = c.getTopGroups(withinGroupSort, groupOffset, docOffset, docOffset+docsPerGroup, fillFields);
  *
  *   // Render groupsResult...
- * 
+ *
* * Or alternatively use the GroupingSearch convenience utility: * - *
+ * 

  *   // Per search:
  *   GroupingSearch groupingSearch = new GroupingSearch(groupEndDocs);
  *   groupingSearch.setGroupSort(groupSort);
@@ -143,7 +143,7 @@
  *   TopGroups groupsResult = groupingSearch.search(indexSearcher, query, groupOffset, groupLimit);
  *
  *   // Render groupsResult...
- * 
+ *
* * Note that the groupValue of each GroupDocs will be null, * so if you need to present this value you'll have to separately retrieve it (for example using @@ -156,7 +156,7 @@ * the GroupingSearch convenience utility, but when if one only wants to compute the * most relevant documents per group it is better to just use the collector as done here below. * - *
+ * 

  *   TermGroupSelector grouper = new TermGroupSelector(groupField);
  *   AllGroupHeadsCollector c = AllGroupHeadsCollector.newCollector(grouper, sortWithinGroup);
  *   s.search(new TermQuery(new Term("content", searchTerm)), c);
@@ -165,6 +165,6 @@
  *   // Return all group heads as FixedBitSet.
  *   int maxDoc = s.maxDoc();
  *   FixedBitSet groupHeadsBitSet = c.retrieveGroupHeads(maxDoc)
- * 
+ *
*/ package org.apache.lucene.search.grouping; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/package-info.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/package-info.java index a9ab1cafcbdf..4ba005164a7d 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/package-info.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/package-info.java @@ -25,7 +25,7 @@ * *

Example Usage

* - *
+ * 

  * //... Above, create documents with two fields, one with term vectors (tv) and one without (notv)
  * IndexSearcher searcher = new IndexSearcher(directory);
  * QueryParser parser = new QueryParser("notv", analyzer);
@@ -57,7 +57,7 @@
  *     }
  *     System.out.println("-------------");
  *   }
- * 
+ *
* *

New features 06/02/2005

* diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java index 0e24931b4bd7..2abfaceccda5 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java @@ -775,13 +775,13 @@ public String[] highlight(String field, Query query, TopDocs topDocs, int maxPas * *

Conceptually, this behaves as a more efficient form of: * - *

+   * 

    * Map m = new HashMap();
    * for (String field : fields) {
    * m.put(field, highlight(field, query, topDocs));
    * }
    * return m;
-   * 
+ *
* * @param fields field names to highlight. Must have a stored string value. * @param query query to highlight. @@ -805,13 +805,13 @@ public Map highlightFields(String[] fields, Query query, TopDo * *

Conceptually, this behaves as a more efficient form of: * - *

+   * 

    * Map m = new HashMap();
    * for (String field : fields) {
    * m.put(field, highlight(field, query, topDocs, maxPassages));
    * }
    * return m;
-   * 
+ *
* * @param fields field names to highlight. Must have a stored string value. * @param query query to highlight. diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java index 06ae1ee7775d..10436504a809 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java @@ -27,10 +27,10 @@ * object. Typical use case of this class is that you can get an entire field contents by using both * of this class and {@link SimpleFragmentsBuilder}. * - *
+ * 

  * FastVectorHighlighter h = new FastVectorHighlighter( true, true,
  *   new SingleFragListBuilder(), new SimpleFragmentsBuilder() );
- * 
+ *
*/ public class SingleFragListBuilder implements FragListBuilder { diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java index fa659b5d6fd9..95d63ee6113a 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java @@ -71,14 +71,14 @@ * org.apache.lucene.search.vectorhighlight.FieldQuery.QueryPhraseMap} from the user query. * QueryPhraseMap consists of the following members: * - *
+ * 

  * public class QueryPhraseMap {
  *   boolean terminal;
  *   int slop;   // valid if terminal == true and phraseHighlight == true
  *   float boost;  // valid if terminal == true
  *   Map<String, QueryPhraseMap> subMap;
  * }
- * 
+ *
* *

QueryPhraseMap has subMap. The key of the subMap is a term text in the user query * and the value is a subsequent QueryPhraseMap. If the query is a term (not phrase), @@ -157,7 +157,7 @@ *

The calculation for each FieldFragList.WeightedFragInfo.totalBoost (weight) * depends on the implementation of FieldFragList.add( ... ): * - *

+ * 

  *   public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
  *     float totalBoost = 0;
  *     List<SubInfo> subInfos = new ArrayList<SubInfo>();
@@ -168,16 +168,16 @@
  *     getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, subInfos, totalBoost ) );
  *   }
  *
- * 
+ *
* * The used implementation of FieldFragList is noted in * BaseFragListBuilder.createFieldFragList( ... ): * - *
+ * 

  *   public FieldFragList createFieldFragList( FieldPhraseList fieldPhraseList, int fragCharSize ){
  *     return createFieldFragList( fieldPhraseList, new SimpleFieldFragList( fragCharSize ), fragCharSize );
  *   }
- * 
+ *
* *

Currently there are basically to approaches available: * diff --git a/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenByteKnnVectorQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenByteKnnVectorQuery.java index b77cf14bb739..877d004d5c78 100644 --- a/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenByteKnnVectorQuery.java +++ b/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenByteKnnVectorQuery.java @@ -45,13 +45,13 @@ * top documents returned are the child document ids and the calculated scores. Here is how to use * this in conjunction with {@link ToParentBlockJoinQuery}. * - *

+ * 

  *   Query knnQuery = new DiversifyingChildrenByteKnnVectorQuery(fieldName, queryVector, ...);
  *   // Rewrite executes kNN search and collects nearest children docIds and their scores
  *   Query rewrittenKnnQuery = searcher.rewrite(knnQuery);
  *   // Join the scored children docs with their parents and score the parents
  *   Query childrenToParents = new ToParentBlockJoinQuery(rewrittenKnnQuery, parentsFilter, ScoreMode.MAX);
- * 
+ *
*/ public class DiversifyingChildrenByteKnnVectorQuery extends KnnByteVectorQuery { private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS; diff --git a/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenFloatKnnVectorQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenFloatKnnVectorQuery.java index 22f5333102e9..e2d6179ec426 100644 --- a/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenFloatKnnVectorQuery.java +++ b/lucene/join/src/java/org/apache/lucene/search/join/DiversifyingChildrenFloatKnnVectorQuery.java @@ -45,13 +45,13 @@ * The top documents returned are the child document ids and the calculated scores. Here is how to * use this in conjunction with {@link ToParentBlockJoinQuery}. * - *
+ * 

  *   Query knnQuery = new DiversifyingChildrenFloatKnnVectorQuery(fieldName, queryVector, ...);
  *   // Rewrite executes kNN search and collects nearest children docIds and their scores
  *   Query rewrittenKnnQuery = searcher.rewrite(knnQuery);
  *   // Join the scored children docs with their parents and score the parents
  *   Query childrenToParents = new ToParentBlockJoinQuery(rewrittenKnnQuery, parentsFilter, ScoreMode.MAX);
- * 
+ *
*/ public class DiversifyingChildrenFloatKnnVectorQuery extends KnnFloatVectorQuery { private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS; diff --git a/lucene/join/src/java/org/apache/lucene/search/join/package-info.java b/lucene/join/src/java/org/apache/lucene/search/join/package-info.java index 949ca19a58ae..72a5c03838bb 100644 --- a/lucene/join/src/java/org/apache/lucene/search/join/package-info.java +++ b/lucene/join/src/java/org/apache/lucene/search/join/package-info.java @@ -43,7 +43,7 @@ * should be used after your main query has been executed. For each hit execute the {@link * org.apache.lucene.search.join.ParentChildrenBlockJoinQuery} query * - *
+ * 

  *   TopDocs results = searcher.search(mainQuery, 10);
  *   for (int i = 0; i < results.scoreDocs.length; i++) {
  *     ScoreDoc scoreDoc = results.scoreDocs[i];
@@ -54,7 +54,7 @@
  *     TopDocs topChildResults = searcher.search(parentChildrenBlockJoinQuery, 3);
  *     // Process top child hits...
  *   }
- * 
+ *
* *

To map/join in the opposite direction, use {@link * org.apache.lucene.search.join.ToChildBlockJoinQuery}. This wraps any query matching parent @@ -89,7 +89,7 @@ * org.apache.lucene.search.Query, org.apache.lucene.search.IndexSearcher, * org.apache.lucene.search.join.ScoreMode) JoinUtil.createJoinQuery()} : * - *

+ * 

  *   String fromField = "from"; // Name of the from field
  *   boolean multipleValuesPerDocument = false; // Set only to true in the case when your fromField has multiple values per document in your index
  *   String toField = "to"; // Name of the to field
@@ -99,6 +99,6 @@
  *   Query joinQuery = JoinUtil.createJoinQuery(fromField, multipleValuesPerDocument, toField, fromQuery, fromSearcher, scoreMode);
  *   TopDocs topDocs = toSearcher.search(joinQuery, 10); // Note: toSearcher can be the same as the fromSearcher
  *   // Render topDocs...
- * 
+ *
*/ package org.apache.lucene.search.join; diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java index 145a6c8ce6f0..62548cb6ca17 100644 --- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java +++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java @@ -110,9 +110,9 @@ * search), this class targets fulltext search of huge numbers of queries over comparatively small * transient realtime data (prospective search). For example as in * - *
+ * 

  * float score = search(String text, Query query)
- * 
+ *
* *

Each instance can hold at most one Lucene "document", with a document containing zero or more * "fields", each field having a name and a fulltext value. The fulltext value is tokenized (split @@ -141,7 +141,7 @@ * *

Example Usage
* - *

+ * 

  * Analyzer analyzer = new SimpleAnalyzer(version);
  * MemoryIndex index = new MemoryIndex();
  * index.addField("content", "Readings about Salmons and other select Alaska fishing Manuals", analyzer);
@@ -154,11 +154,11 @@
  *     System.out.println("no match found");
  * }
  * System.out.println("indexData=" + index.toString());
- * 
+ *
* *

Example XQuery Usage * - *

+ * 

  * (: An XQuery that finds all books authored by James that have something to do with "salmon fishing manuals", sorted by relevance :)
  * declare namespace lucene = "java:nux.xom.pool.FullTextUtil";
  * declare variable $query := "+salmon~ +fish* manual~"; (: any arbitrary Lucene query can go here :)
@@ -167,7 +167,7 @@
  * let $score := lucene:match($book/abstract, $query)
  * order by $score descending
  * return $book
- * 
+ *
* *

Thread safety guarantees * diff --git a/lucene/misc/src/java/org/apache/lucene/misc/index/BPIndexReorderer.java b/lucene/misc/src/java/org/apache/lucene/misc/index/BPIndexReorderer.java index 4d2bab216b9b..773391ccd7d8 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/index/BPIndexReorderer.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/index/BPIndexReorderer.java @@ -72,7 +72,7 @@ * *

Typical usage would look like this: * - *

+ * 

  * LeafReader reader; // reader to reorder
  * Directory targetDir; // Directory where to write the reordered index
  *
@@ -87,7 +87,7 @@
  *   w.addIndexes(reorderedReaderView);
  * }
  * DirectoryReader reorderedReader = DirectoryReader.open(targetDir);
- * 
+ *
* *

Note: This is a slow operation that consumes O(maxDoc + numTerms * numThreads) memory. */ diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java b/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java index 1a2f8bcce3e7..e3101bc83800 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/ConcurrentQueryLoader.java @@ -39,14 +39,14 @@ * *

Use as follows: * - *

+ * 

  *     List<QueryError> errors = new ArrayList<>();
  *     try (ConcurrentQueryLoader loader = new ConcurrentQueryLoader(monitor, errors)) {
  *         for (MonitorQuery mq : getQueries()) {
  *             loader.add(mq);
  *         }
  *     }
- * 
+ *
* *

The Monitor's MonitorQueryParser must be thread-safe for this to work correctly. */ diff --git a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java index f6d9cce75671..6fea095939b4 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java @@ -87,7 +87,7 @@ *

This class has lots of options to try to make it efficient and flexible. The simplest possible * usage is as follows. The bold fragment is specific to this class.
* - *

+ * 

  * IndexReader ir = ...
  * IndexSearcher is = ...
  *
@@ -99,7 +99,7 @@
  * // now the usual iteration thru 'topDocs' - the only thing to watch for is to make sure
  * // you ignore the doc if it matches your 'target' document, as it should be similar to itself
  *
- * 
+ *
* *

Thus you: * diff --git a/lucene/queries/src/java/org/apache/lucene/queries/spans/FieldMaskingSpanQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/spans/FieldMaskingSpanQuery.java index 038a2e3742de..554a3669fc19 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/spans/FieldMaskingSpanQuery.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/spans/FieldMaskingSpanQuery.java @@ -50,12 +50,12 @@ *

a SpanNearQuery with a slop of 0 can be applied across two {@link SpanTermQuery} objects as * follows: * - *

+ * 

  *    SpanQuery q1  = new SpanTermQuery(new Term("studentfirstname", "james"));
  *    SpanQuery q2  = new SpanTermQuery(new Term("studentsurname", "jones"));
  *    SpanQuery q2m = new FieldMaskingSpanQuery(q2, "studentfirstname");
  *    Query q = new SpanNearQuery(new SpanQuery[]{q1, q2m}, -1, false);
- * 
+ *
* * to search for 'studentfirstname:james studentsurname:jones' and find teacherid 1 without matching * teacherid 2 (which has a 'james' in position 0 and 'jones' in position 1). diff --git a/lucene/queries/src/java/org/apache/lucene/queries/spans/SpanMultiTermQueryWrapper.java b/lucene/queries/src/java/org/apache/lucene/queries/spans/SpanMultiTermQueryWrapper.java index b4f4b88dc5c5..3cb2c640596b 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/spans/SpanMultiTermQueryWrapper.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/spans/SpanMultiTermQueryWrapper.java @@ -42,11 +42,11 @@ * *
* - *
{@code
+ * 

  * WildcardQuery wildcard = new WildcardQuery(new Term("field", "bro?n"));
- * SpanQuery spanWildcard = new SpanMultiTermQueryWrapper(wildcard);
+ * SpanQuery spanWildcard = new SpanMultiTermQueryWrapper<WildcardQuery>(wildcard);
  * // do something with spanWildcard, such as use it in a SpanFirstQuery
- * }
+ *
* *
*/ diff --git a/lucene/queries/src/java/org/apache/lucene/queries/spans/package-info.java b/lucene/queries/src/java/org/apache/lucene/queries/spans/package-info.java index 1039b1b72ce6..96f554c98d0c 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/spans/package-info.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/spans/package-info.java @@ -55,7 +55,7 @@ *

For example, a span query which matches "John Kerry" within ten words of "George Bush" within * the first 100 words of the document could be constructed with: * - *

+ * 

  * SpanQuery john   = new SpanTermQuery(new Term("content", "john"));
  * SpanQuery kerry  = new SpanTermQuery(new Term("content", "kerry"));
  * SpanQuery george = new SpanTermQuery(new Term("content", "george"));
@@ -72,15 +72,15 @@
  *
  * SpanQuery johnKerryNearGeorgeBushAtStart =
  *    new SpanFirstQuery(johnKerryNearGeorgeBush, 100);
- * 
+ *
* *

Span queries may be freely intermixed with other Lucene queries. So, for example, the above * query can be restricted to documents which also use the word "iraq" with: * - *

+ * 

  * Query query = new BooleanQuery();
  * query.add(johnKerryNearGeorgeBushAtStart, true, false);
  * query.add(new TermQuery("content", "iraq"), true, false);
- * 
+ *
*/ package org.apache.lucene.queries.spans; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java index 7e5db828c6d3..2a75dfedefac 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java @@ -325,15 +325,13 @@ public static Query parse(String[] queries, String[] fields, Analyzer analyzer) * *

Usage: * - *

-   * 
+   * 

    * String[] fields = {"filename", "contents", "description"};
    * BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
    *                BooleanClause.Occur.MUST,
    *                BooleanClause.Occur.MUST_NOT};
    * MultiFieldQueryParser.parse("query", fields, flags, analyzer);
-   * 
-   * 
+ *
* *

The code above would construct a query: * @@ -375,16 +373,14 @@ public static Query parse( * *

Usage: * - *

-   * 
+   * 

    * String[] query = {"query1", "query2", "query3"};
    * String[] fields = {"filename", "contents", "description"};
    * BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
    *                BooleanClause.Occur.MUST,
    *                BooleanClause.Occur.MUST_NOT};
    * MultiFieldQueryParser.parse(query, fields, flags, analyzer);
-   * 
-   * 
+ *
* *

The code above would construct a query: * diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java index 620f0d2d680b..08dcb924d34e 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java @@ -29,13 +29,13 @@ * *

Example how the text parser creates these objects: * - *

+ * 

  * List values = ArrayList();
  * values.add(new PathQueryNode.QueryText("company", 1, 7));
  * values.add(new PathQueryNode.QueryText("USA", 9, 12));
  * values.add(new PathQueryNode.QueryText("California", 14, 23));
  * QueryNode q = new PathQueryNode(values);
- * 
+ *
*/ public class PathQueryNode extends QueryNodeImpl { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java index 066e55c1a6a2..9f855c7f8fa8 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/package-info.java @@ -39,7 +39,7 @@ * *

Lazy loading of Message Strings * - *

+ * 

  *   public class MessagesTestBundle extends NLS {
  *
  *     private static final String BUNDLE_NAME = MessagesTestBundle.class.getName();
@@ -71,17 +71,17 @@
  *
  *     String message1 = invalidSyntax.getLocalizedMessage();
  *     String message2 = invalidSyntax.getLocalizedMessage(Locale.JAPANESE);
- * 
+ *
* *
*
* *

Normal loading of Message Strings * - *

+ * 

  *   String message1 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION);
  *   String message2 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.JAPANESE);
- * 
+ *
* *

The org.apache.lucene.messages.TestNLS junit contains several other examples. The TestNLS java * code is available from the Apache Lucene code repository. diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java index de1c814924a2..c3d7ac01946b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java @@ -67,15 +67,13 @@ public static Query parse(String[] queries, String[] fields, Analyzer analyzer) * *

Usage: * - *

-   * 
+   * 

    * String[] fields = {"filename", "contents", "description"};
    * BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
    *                BooleanClause.Occur.MUST,
    *                BooleanClause.Occur.MUST_NOT};
    * MultiFieldQueryParser.parse("query", fields, flags, analyzer);
-   * 
-   * 
+ *
* *

The code above would construct a query: * @@ -118,16 +116,14 @@ public static Query parse( * *

Usage: * - *

-   * 
+   * 

    * String[] query = {"query1", "query2", "query3"};
    * String[] fields = {"filename", "contents", "description"};
    * BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
    *                BooleanClause.Occur.MUST,
    *                BooleanClause.Occur.MUST_NOT};
    * MultiFieldQueryParser.parse(query, fields, flags, analyzer);
-   * 
-   * 
+ *
* *

The code above would construct a query: * diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java index f72ba10f4cd6..431a7b1a0caa 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/StandardQueryParser.java @@ -50,13 +50,13 @@ * *

Typical usage, including configuration tweaks: * - *

{@code
+ * 

  * StandardQueryParser qpHelper = new StandardQueryParser();
  * StandardQueryConfigHandler config =  qpHelper.getQueryConfigHandler();
  * config.setAllowLeadingWildcard(true);
  * config.setAnalyzer(new WhitespaceAnalyzer());
  * Query query = qpHelper.parse("apache AND lucene", "defaultField");
- * }
+ *
* *

Supported query syntax

* @@ -245,10 +245,10 @@ public StandardQueryParser() { * Constructs a {@link StandardQueryParser} object and sets an {@link Analyzer} to it. The same * as: * - *
+   * 

    * StandardQueryParser qp = new StandardQueryParser();
    * qp.getQueryConfigHandler().setAnalyzer(analyzer);
-   * 
+ *
* * @param analyzer the analyzer to be used by this query parser helper */ diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java index 60202bcb10af..643b7469b7e0 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java @@ -53,14 +53,14 @@ * *

Example Usage: * - *

+ * 

  *  SpellChecker spellchecker = new SpellChecker(spellIndexDirectory);
  *  // To index a field of a user index:
  *  spellchecker.indexDictionary(new LuceneDictionary(my_lucene_reader, a_field));
  *  // To index a file containing words:
  *  spellchecker.indexDictionary(new PlainTextDictionary(new File("myfile.txt")));
  *  String[] suggestions = spellchecker.suggestSimilar("misspelt", 5);
- * 
+ *
*/ public class SpellChecker implements java.io.Closeable { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java index 9d8f6c189b6a..eb9a6a9059b3 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/SuggestStopFilterFactory.java @@ -29,7 +29,7 @@ /** * Factory for {@link SuggestStopFilter}. * - *
+ * 

  * <fieldType name="autosuggest" class="solr.TextField"
  *            positionIncrementGap="100" autoGeneratePhraseQueries="true">
  *   <analyzer>
@@ -38,7 +38,7 @@
  *     <filter class="solr.SuggestStopFilterFactory" ignoreCase="true"
  *             words="stopwords.txt" format="wordset"/>
  *   </analyzer>
- * </fieldType>
+ * </fieldType>
* *

All attributes are optional: * diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java index 4a63ad7e7945..ebf76a84fb5d 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java @@ -37,10 +37,10 @@ * that provides a concrete implementation of this query. Example below shows using this query to * retrieve the top 5 documents. * - *

+ * 

  *  SuggestIndexSearcher searcher = new SuggestIndexSearcher(reader);
  *  TopSuggestDocs suggestDocs = searcher.suggest(query, 5);
- * 
+ *
* * This query rewrites to an appropriate {@link CompletionQuery} depending on the type ({@link * SuggestField} or {@link ContextSuggestField}) of the field the query is run against. diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java index b9e8d6213fe9..953534c6dea2 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java @@ -47,12 +47,12 @@ * *

Example of using a {@link CompletionQuery} with boosted contexts: * - *

+ * 

  *  CompletionQuery completionQuery = ...;
  *  ContextQuery query = new ContextQuery(completionQuery);
  *  query.addContext("context1", 2);
  *  query.addContext("context2", 1);
- * 
+ *
* *

NOTE: * diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java index bd25d3ef3afc..6f9a9209f50e 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextSuggestField.java @@ -32,10 +32,10 @@ * {@link SuggestField} which additionally takes in a set of contexts. Example usage of adding a * suggestion with contexts is as follows: * - *

+ * 

  *  document.add(
  *   new ContextSuggestField(name, "suggestion", Arrays.asList("context1", "context2"),  4));
- * 
+ *
* * Use {@link ContextQuery} to boost and/or filter suggestions at query-time. Use {@link * PrefixCompletionQuery}, {@link RegexCompletionQuery} or {@link FuzzyCompletionQuery} if context diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java index 91bbe98f885b..d20db3353563 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/FuzzyCompletionQuery.java @@ -46,9 +46,9 @@ *

Example usage of querying an analyzed prefix within an edit distance of 1 of 'subg' against a * field 'suggest_field' is as follows: * - *

+ * 

  *  CompletionQuery query = new FuzzyCompletionQuery(analyzer, new Term("suggest_field", "subg"));
- * 
+ *
* * @lucene.experimental */ diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java index 09b1f0b19cd0..a66d4cf41203 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/PrefixCompletionQuery.java @@ -32,9 +32,9 @@ *

Example usage of querying an analyzed prefix 'sugg' against a field 'suggest_field' is as * follows: * - *

+ * 

  *  CompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"));
- * 
+ *
* * @lucene.experimental */ diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java index d3cc80623a78..89bcd258f30c 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/RegexCompletionQuery.java @@ -34,9 +34,9 @@ *

Example usage of querying a prefix of 'sug' and 'sub' as a regular expression against a * suggest field 'suggest_field': * - *

+ * 

  *  CompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "su[g|b]"));
- * 
+ *
* *

See {@link RegExp} for the supported regular expression syntax * diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java index a595197ca79c..664f1a4c2243 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/SuggestField.java @@ -38,9 +38,9 @@ * *

Example indexing usage: * - *

+ * 

  * document.add(new SuggestField(name, "suggestion", 4));
- * 
+ *
* * To perform document suggestions based on the field, use {@link * SuggestIndexSearcher#suggest(CompletionQuery, int, boolean)} diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/analysis/BaseTokenStreamFactoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/analysis/BaseTokenStreamFactoryTestCase.java index 5013b1ac8956..f9f6b4ab9e3a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/analysis/BaseTokenStreamFactoryTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/analysis/BaseTokenStreamFactoryTestCase.java @@ -33,14 +33,14 @@ * *

Example usage: * - *

+ * 

  *   Reader reader = new StringReader("Some Text to Analyze");
  *   reader = charFilterFactory("htmlstrip").create(reader);
  *   TokenStream stream = tokenizerFactory("standard").create(reader);
  *   stream = tokenFilterFactory("lowercase").create(stream);
  *   stream = tokenFilterFactory("asciifolding").create(stream);
  *   assertTokenStreamContents(stream, new String[] { "some", "text", "to", "analyze" });
- * 
+ *
*/ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTestCase {