lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [07/11] lucenenet git commit: Lucene.Net.Analysis SWEEP: fix for various broken XML comments
Date Fri, 03 Feb 2017 17:51:13 GMT
Lucene.Net.Analysis SWEEP: fix for various broken XML comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d4b9c00e
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d4b9c00e
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d4b9c00e

Branch: refs/heads/api-work
Commit: d4b9c00efe9b96c15c683e7f757842dfd250d95f
Parents: 46b02fb
Author: Shad Storhaug <shad@shadstorhaug.com>
Authored: Fri Feb 3 22:56:15 2017 +0700
Committer: Shad Storhaug <shad@shadstorhaug.com>
Committed: Fri Feb 3 22:56:15 2017 +0700

----------------------------------------------------------------------
 .../Analysis/Ar/ArabicLetterTokenizer.cs        |  3 +--
 .../Analysis/CharFilter/HTMLStripCharFilter.cs  |  6 +++---
 .../Analysis/CharFilter/NormalizeCharMap.cs     |  2 +-
 .../Analysis/Cjk/CJKTokenizer.cs                | 14 ++++++-------
 .../Analysis/Cn/ChineseTokenizer.cs             |  2 +-
 .../Analysis/CommonGrams/CommonGramsFilter.cs   |  1 +
 .../CommonGrams/CommonGramsQueryFilter.cs       |  2 +-
 .../Compound/Hyphenation/HyphenationTree.cs     | 10 ++++-----
 .../Analysis/Core/LetterTokenizer.cs            |  2 +-
 .../Analysis/Core/StopFilter.cs                 |  4 ++--
 .../Analysis/Core/StopFilterFactory.cs          |  2 +-
 .../Analysis/Core/WhitespaceTokenizer.cs        |  2 +-
 .../Analysis/El/GreekStemmer.cs                 | 22 ++++++++++----------
 .../Analysis/En/EnglishAnalyzer.cs              |  1 +
 .../Analysis/En/EnglishPossessiveFilter.cs      |  2 +-
 .../Analysis/En/PorterStemmer.cs                |  6 +++---
 .../Analysis/Hu/HungarianAnalyzer.cs            |  1 +
 .../Analysis/Hunspell/Dictionary.cs             |  2 +-
 .../Analysis/Lv/LatvianAnalyzer.cs              |  2 +-
 .../Analysis/Miscellaneous/KeepWordFilter.cs    |  2 +-
 .../Analysis/Miscellaneous/PatternAnalyzer.cs   |  6 +++---
 .../Miscellaneous/PatternKeywordMarkerFilter.cs |  2 +-
 .../Analysis/Util/CharArrayMap.cs               |  6 +++---
 .../Analysis/Util/CharArraySet.cs               | 20 +++++++++---------
 24 files changed, 60 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
index c698d5c..ae875e4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
@@ -31,7 +31,6 @@ namespace Lucene.Net.Analysis.Ar
     /// Handling similar to this is necessary for Indic Scripts, Hebrew, Thaana, etc.
     /// </para>
     /// <para>
-    /// <paramref name="matchVersion"/>
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when
creating
     /// <see cref="ArabicLetterTokenizer"/>:
     /// <list type="bullet">
@@ -62,7 +61,7 @@ namespace Lucene.Net.Analysis.Ar
         /// <see cref="AttributeSource.AttributeFactory"/>. 
         /// </summary>
         /// <param name="matchVersion">
-        ///         matchVersion Lucene version to match See
+        ///         Lucene version to match - See
         ///         <see cref="LuceneVersion"/>.
         /// </param>
         /// <param name="factory">

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs
index d60080e..7dba4f6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/HTMLStripCharFilter.cs
@@ -30659,13 +30659,13 @@ namespace Lucene.Net.Analysis.CharFilters
         private int yycolumn;
 
         /// <summary>
-        /// zzAtBOL == true <=> the scanner is currently at the beginning of a line
+        /// zzAtBOL == true &lt;=&gt; the scanner is currently at the beginning of
a line
         /// </summary>
         private bool zzAtBOL = true;
 
 #pragma warning restore 169, 414
 
-        /// <summary>zzAtEOF == true <=> the scanner is at the EOF</summary>
+        /// <summary>zzAtEOF == true &lt;=&gt; the scanner is at the EOF</summary>
         private bool zzAtEOF;
 
         /// <summary>denotes if the user-EOF-code has already been executed</summary>
@@ -31061,7 +31061,7 @@ namespace Lucene.Net.Analysis.CharFilters
         /// <para/>
         /// All internal variables are reset, the old input stream
         /// <b>cannot</b> be reused (internal buffer is discarded and lost).
-        /// Lexical state is set to <see cref="ZZ_INITIAL"/>.
+        /// Lexical state is set to <see cref="YYINITIAL"/>.
         /// <para/>
         /// Internal scan buffer is resized down to its initial length, if it has grown.
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
index 110790f..6678922 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CharFilter/NormalizeCharMap.cs
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.CharFilters
             /// </summary>
             /// <param name="match"> input String to be replaced </param>
             /// <param name="replacement"> output String </param>
-            /// <exception cref="IllegalArgumentException"> if
+            /// <exception cref="ArgumentException"> if
             /// <code>match</code> is the empty string, or was
             /// already previously added </exception>
             public virtual void Add(string match, string replacement)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
index 160306d..901320b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
@@ -85,13 +85,13 @@ namespace Lucene.Net.Analysis.Cjk
         private int dataLen = 0;
 
         /// <summary>
-        /// character buffer, store the characters which are used to compose <br>
+        /// character buffer, store the characters which are used to compose 
         /// the returned Token
         /// </summary>
         private readonly char[] buffer = new char[MAX_WORD_LEN];
 
         /// <summary>
-        /// I/O buffer, used to store the content of the input(one of the <br>
+        /// I/O buffer, used to store the content of the input(one of the
         /// members of Tokenizer)
         /// </summary>
         private readonly char[] ioBuffer = new char[IO_BUFFER_SIZE];
@@ -151,22 +151,20 @@ namespace Lucene.Net.Analysis.Cjk
         public override bool IncrementToken()
         {
             ClearAttributes();
-            /// <summary>
-            /// how many character(s) has been stored in buffer </summary>
+
+            // how many character(s) has been stored in buffer 
 
             while (true) // loop until we find a non-empty token
             {
 
                 int length = 0;
 
-                /// <summary>
-                /// the position used to create Token </summary>
+                // the position used to create Token 
                 int start = offset;
 
                 while (true) // loop until we've found a full token
                 {
-                    /// <summary>
-                    /// current character </summary>
+                    // current character
                     char c;
 
                     offset++;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
index eb500bb..cd98aca 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Cn
     /// </list>
     /// </para>
     /// <para>
-    /// Therefore the index created by <see cref="CJKTokenizer"/> is much larger.
+    /// Therefore the index created by <see cref="Cjk.CJKTokenizer"/> is much larger.
     /// </para>
     /// <para>
     /// The problem is that when searching for C1, C1C2, C1C3,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
index e7578be..fcd9b7a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
@@ -69,6 +69,7 @@ namespace Lucene.Net.Analysis.CommonGrams
         /// bigrams with position increment 0 type=gram where one or both of the words
         /// in a potential bigram are in the set of common words .
         /// </summary>
+        /// <param name="matchVersion"> lucene compatibility version </param>
         /// <param name="input"> <see cref="TokenStream"/> input in filter chain
</param>
         /// <param name="commonWords"> The set of common words. </param>
         public CommonGramsFilter(LuceneVersion matchVersion, TokenStream input, CharArraySet
commonWords)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
index 32039ca..07e7b53 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.CommonGrams
         /// <para/>
         /// If you override this method, always call <c>base.Reset()</c>, otherwise
         /// some internal state will not be correctly reset (e.g., <see cref="Tokenizer"/>
will
-        /// throw <see cref="InvalidOperationException"/> on further usage).
+        /// throw <see cref="System.InvalidOperationException"/> on further usage).
         /// </summary>
         /// <remarks>
         /// <b>NOTE:</b>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/HyphenationTree.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/HyphenationTree.cs
b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/HyphenationTree.cs
index c4dfe8b..6f2af07 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/HyphenationTree.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/HyphenationTree.cs
@@ -117,7 +117,7 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
         /// <summary>
         /// Read hyphenation patterns from an XML file.
         /// </summary>
-        /// <param name="f"> the filename </param>
+        /// <param name="filename"> the filename </param>
         /// <exception cref="IOException"> In case the parsing fails </exception>
         public virtual void LoadPatterns(string filename)
         {
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
         /// <summary>
         /// Read hyphenation patterns from an XML file.
         /// </summary>
-        /// <param name="f"> the filename </param>
+        /// <param name="filename"> the filename </param>
         /// <param name="encoding">The character encoding to use</param>
         /// <exception cref="IOException"> In case the parsing fails </exception>
         public virtual void LoadPatterns(string filename, Encoding encoding)
@@ -359,10 +359,8 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
                         {
                             q = m_lo[q];
 
-                            /// <summary>
-                            /// actually the code should be: q = sc[q] < 0 ? hi[q] : lo[q];
but
-                            /// java chars are unsigned
-                            /// </summary>
+                            // actually the code should be: q = sc[q] < 0 ? hi[q] : lo[q];
but
+                            // java chars are unsigned
                         }
                     }
                 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
index 9ef19a6..1be2e65 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Core
     /// <summary>
     /// A <see cref="LetterTokenizer"/> is a tokenizer that divides text at non-letters.
That's to
     /// say, it defines tokens as maximal strings of adjacent letters, as defined by
-    /// <see cref="char.IsLetter"/> predicate.
+    /// <see cref="char.IsLetter(char)"/> predicate.
     /// <para>
     /// Note: this does a decent job for most European languages, but does a terrible
     /// job for some Asian languages, where words are not separated by spaces.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
index 1e5e2a0..2515426 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Core
         /// an <see cref="Analyzer"/> is constructed.
         /// </summary>
         /// <param name="matchVersion"> <see cref="LuceneVersion"/> to enable
correct Unicode 4.0 behavior in the returned set if Version > 3.0 </param>
-        /// <param name="stopWords"> A List of <see cref="string"/>s or <see
cref="char[]"/> or any other ToString()-able list representing the stopwords </param>
+        /// <param name="stopWords"> A List of <see cref="string"/>s or <see
cref="T:char[]"/> or any other ToString()-able list representing the stopwords </param>
         /// <returns> A Set (<see cref="CharArraySet"/>) containing the words
</returns>
         /// <seealso cref="MakeStopSet(LuceneVersion, string[], bool)"/> passing false
to ignoreCase
         public static CharArraySet MakeStopSet<T1>(LuceneVersion matchVersion, IList<T1>
stopWords)
@@ -104,7 +104,7 @@ namespace Lucene.Net.Analysis.Core
         /// <summary>
         /// Creates a stopword set from the given stopword list. </summary>
         /// <param name="matchVersion"> <see cref="LuceneVersion"/> to enable
correct Unicode 4.0 behavior in the returned set if Version > 3.0 </param>
-        /// <param name="stopWords"> A List of <see cref="string"/>s or <see
cref="char[]"/> or any other ToString()-able list representing the stopwords </param>
+        /// <param name="stopWords"> A List of <see cref="string"/>s or <see
cref="T:char[]"/> or any other ToString()-able list representing the stopwords </param>
         /// <param name="ignoreCase"> if true, all words are lower cased first </param>
         /// <returns> A Set (<see cref="CharArraySet"/>) containing the words
</returns>
         public static CharArraySet MakeStopSet<T1>(LuceneVersion matchVersion, IList<T1>
stopWords, bool ignoreCase)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
index 9466549..1f0c5db 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Core
     ///  <item><c>snowball</c> - This format allows for multiple words
specified on each 
     ///      line, and trailing comments may be specified using the vertical line ("&#124;").

     ///      Blank lines are ignored.  See 
-    ///      <see cref="WordlistLoader.GetSnowballWordSet"/> 
+    ///      <see cref="WordlistLoader.GetSnowballWordSet(System.IO.TextReader, Net.Util.LuceneVersion)"/>

     ///      for details.
     ///  </item>
     /// </list>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
index a60a679..98db5e7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
@@ -62,7 +62,7 @@ namespace Lucene.Net.Analysis.Core
 
         /// <summary>
         /// Collects only characters which do not satisfy
-        /// <see cref="char.IsWhitespace(char)"/>.
+        /// <see cref="char.IsWhiteSpace(char)"/>.
         /// </summary>
         protected override bool IsTokenChar(int c)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
index 1a5e8b3..4934410 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
@@ -34,11 +34,11 @@ namespace Lucene.Net.Analysis.El
     public class GreekStemmer
     {
         /// <summary>
-        /// Stems a word contained in a leading portion of a <see cref="char[]"/> array.
+        /// Stems a word contained in a leading portion of a <see cref="T:char[]"/>
array.
         /// The word is passed through a number of rules that modify it's length.
         /// </summary>
-        /// <param name="s"> A <see cref="char[]"/> array that contains the word
to be stemmed. </param>
-        /// <param name="len"> The length of the <see cref="char[]"/> array.
</param>
+        /// <param name="s"> A <see cref="T:char[]"/> array that contains the
word to be stemmed. </param>
+        /// <param name="len"> The length of the <see cref="T:char[]"/> array.
</param>
         /// <returns> The new length of the stemmed word. </returns>
         public virtual int Stem(char[] s, int len)
         {
@@ -1021,12 +1021,12 @@ namespace Lucene.Net.Analysis.El
         }
 
         /// <summary>
-        /// Checks if the word contained in the leading portion of <see cref="char[]"/>
array , 
+        /// Checks if the word contained in the leading portion of <see cref="T:char[]"/>
array , 
         /// ends with a Greek vowel.
         /// </summary>
-        /// <param name="s"> A <see cref="char[]"/> array that represents a word.
</param>
-        /// <param name="len"> The length of the <see cref="char[]"/> array.
</param>
-        /// <returns> True if the word contained in the leading portion of <see
cref="char[]"/> array , 
+        /// <param name="s"> A <see cref="T:char[]"/> array that represents a
word. </param>
+        /// <param name="len"> The length of the <see cref="T:char[]"/> array.
</param>
+        /// <returns> True if the word contained in the leading portion of <see
cref="T:char[]"/> array , 
         /// ends with a vowel , false otherwise. </returns>
         private bool EndsWithVowel(char[] s, int len)
         {
@@ -1050,12 +1050,12 @@ namespace Lucene.Net.Analysis.El
         }
 
         /// <summary>
-        /// Checks if the word contained in the leading portion of <see cref="char[]"/>
array , 
+        /// Checks if the word contained in the leading portion of <see cref="T:char[]"/>
array , 
         /// ends with a Greek vowel.
         /// </summary>
-        /// <param name="s"> A <see cref="char[]"/> array that represents a word.
</param>
-        /// <param name="len"> The length of the <see cref="char[]"/> array.
</param>
-        /// <returns> True if the word contained in the leading portion of <see
cref="char[]"/> array , 
+        /// <param name="s"> A <see cref="T:char[]"/> array that represents a
word. </param>
+        /// <param name="len"> The length of the <see cref="T:char[]"/> array.
</param>
+        /// <returns> True if the word contained in the leading portion of <see
cref="T:char[]"/> array , 
         /// ends with a vowel , false otherwise. </returns>
         private bool EndsWithVowelNoY(char[] s, int len)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
index eec5878..3c6e0ff 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
@@ -55,6 +55,7 @@ namespace Lucene.Net.Analysis.En
         /// <summary>
         /// Builds an analyzer with the default stop words: <see cref="DefaultStopSet"/>.
         /// </summary>
+        /// <param name="matchVersion"> lucene compatibility version </param>
         public EnglishAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
index 9e22c3d..888e7a8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
@@ -37,7 +37,7 @@ namespace Lucene.Net.Analysis.En
         private readonly ICharTermAttribute termAtt;
         private LuceneVersion matchVersion;
 
-        /// @deprecated Use <see cref="#EnglishPossessiveFilter(Version, TokenStream)"/>
instead. 
+        /// @deprecated Use <see cref="EnglishPossessiveFilter(LuceneVersion, TokenStream)"/>
instead. 
         [Obsolete(@"Use <see cref=""#EnglishPossessiveFilter(org.apache.lucene.util.Version,
org.apache.lucene.analysis.TokenStream)""/> instead.")]
         public EnglishPossessiveFilter(TokenStream input) 
             : this(LuceneVersion.LUCENE_35, input)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
index 2c2c3e9..d1119c4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemmer.cs
@@ -741,7 +741,7 @@ namespace Lucene.Net.Analysis.En
         }
 
         /// <summary>
-        /// Stem a word contained in a <see cref="char[]"/>.  Returns true if the stemming
process
+        /// Stem a word contained in a <see cref="T:char[]"/>.  Returns true if the
stemming process
         /// resulted in a word different from the input.  You can retrieve the
         /// result with <see cref="ResultLength"/>/<see cref="ResultBuffer"/>
or <see cref="ToString"/>.
         /// </summary>
@@ -751,7 +751,7 @@ namespace Lucene.Net.Analysis.En
         }
 
         /// <summary>
-        /// Stem a word contained in a portion of a <see cref="char[]"/> array.  Returns
+        /// Stem a word contained in a portion of a <see cref="T:char[]"/> array. 
Returns
         /// true if the stemming process resulted in a word different from
         /// the input.  You can retrieve the result with
         /// <see cref="ResultLength"/>/<see cref="ResultBuffer"/> or <see
cref="ToString"/>.
@@ -769,7 +769,7 @@ namespace Lucene.Net.Analysis.En
         }
 
         /// <summary>
-        /// Stem a word contained in a leading portion of a <see cref="char[]"/> array.
+        /// Stem a word contained in a leading portion of a <see cref="T:char[]"/>
array.
         /// Returns true if the stemming process resulted in a word different
         /// from the input.  You can retrieve the result with
         /// <see cref="ResultLength"/>/<see cref="ResultBuffer"/> or <see
cref="ToString"/>.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
index 9d9cdd5..6951d32 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
@@ -81,6 +81,7 @@ namespace Lucene.Net.Analysis.Hu
         /// <summary>
         /// Builds an analyzer with the default stop words: <see cref="DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
+        /// <param name="matchVersion"> lucene compatibility version </param>
         public HungarianAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
index 780fe1c..29938e5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
@@ -393,7 +393,7 @@ namespace Lucene.Net.Analysis.Hunspell
         /// <summary>
         /// Parses a specific affix rule putting the result into the provided affix map
         /// </summary>
-        /// <param name="affixes"> <see cref="SortedDictionary{string, IList{char?}}"/>
where the result of the parsing will be put </param>
+        /// <param name="affixes"> <see cref="SortedDictionary{TKey, TValue}"/>
where the result of the parsing will be put </param>
         /// <param name="header"> Header line of the affix rule </param>
         /// <param name="reader"> <see cref="TextReader"/> to read the content
of the rule from </param>
         /// <param name="conditionPattern"> <see cref="string.Format(string, object[])"/>
pattern to be used to generate the condition regex

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
index e466f12..7b1828f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
@@ -112,7 +112,7 @@ namespace Lucene.Net.Analysis.Lv
         /// <summary>
         /// Creates a
         /// <see cref="Analyzer.TokenStreamComponents"/>
-        /// which tokenizes all the text in the provided <see cref="Reader"/>.
+        /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
         ///         <see cref="Analyzer.TokenStreamComponents"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
index 3aa0978..a7972e0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
 
     /// <summary>
     /// A <see cref="TokenFilter"/> that only keeps tokens with text contained in the
-    /// required words.  This filter behaves like the inverse of <see cref="StopFilter"/>.
+    /// required words.  This filter behaves like the inverse of <see cref="Core.StopFilter"/>.
     /// 
     /// @since solr 1.3
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
index a01625b..f943762 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// <para>
     /// If you are unsure how exactly a regular expression should look like, consider 
     /// prototyping by simply trying various expressions on some test texts via
-    /// <see cref="string.Split(string)"/>. Once you are satisfied, give that regex
to 
+    /// <see cref="string.Split(char[])"/>. Once you are satisfied, give that regex
to 
     /// <see cref="PatternAnalyzer"/>. Also see <a target="_blank" 
     /// href="http://www.regular-expressions.info/">Regular Expression Tutorial</a>.
     /// </para>
@@ -48,7 +48,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// It can also serve as a building block in a compound Lucene
     /// <see cref="TokenFilter"/> chain. For example as in this 
     /// stemming example:
-    /// <pre>
+    /// <code>
     /// PatternAnalyzer pat = ...
     /// TokenStream tokenStream = new SnowballFilter(
     ///     pat.GetTokenStream("content", "James is running round in the woods"), 
@@ -151,7 +151,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
         ///            if non-null, ignores all tokens that are contained in the
         ///            given stop set (after previously having applied toLowerCase()
         ///            if applicable). For example, created via
-        ///            <see cref="StopFilter#makeStopSet(Version, String[])"/>and/or
+        ///            <see cref="StopFilter.MakeStopSet(LuceneVersion, string[])"/>and/or
         ///            <see cref="WordlistLoader"/>as in
         ///            <code>WordlistLoader.getWordSet(new File("samples/fulltext/stopwords.txt")</code>
         ///            or <a href="http://www.unine.ch/info/clef/">other stop words

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
index f9c8898..10ea4a4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// <summary>
     /// Marks terms as keywords via the <see cref="KeywordAttribute"/>. Each token
     /// that matches the provided pattern is marked as a keyword by setting
-    /// <see cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
+    /// <see cref="KeywordAttribute.IsKeyword"/> to <c>true</c>.
     /// </summary>
     public sealed class PatternKeywordMarkerFilter : KeywordMarkerFilter
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
index 5cade2d..b6e2e36 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
@@ -29,12 +29,12 @@ namespace Lucene.Net.Analysis.Util
 	 */
 
     /// <summary>
-    /// A simple class that stores key <see cref="string"/>s as <see cref="char[]"/>'s
in a
+    /// A simple class that stores key <see cref="string"/>s as <see cref="T:char[]"/>'s
in a
     /// hash table. Note that this is not a general purpose
     /// class.  For example, it cannot remove items from the
     /// map, nor does it resize its hash table to be smaller,
     /// etc.  It is designed to be quick to retrieve items
-    /// by <see cref="char[]"/> keys without the necessity of converting
+    /// by <see cref="T:char[]"/> keys without the necessity of converting
     /// to a <see cref="string"/> first.
     /// 
     /// <a name="version"></a>
@@ -249,7 +249,7 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// <c>true</c> if the entire <see cref="KeySet"/> is the same
as the 
-        /// <paramref name="text"/> <see cref="char[]"/> being passed in; 
+        /// <paramref name="text"/> <see cref="T:char[]"/> being passed in; 
         /// otherwise <c>false</c>.
         /// </summary>
         public virtual bool ContainsKey(char[] text)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4b9c00e/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
index b715768..9ac42ba 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArraySet.cs
@@ -28,11 +28,11 @@ namespace Lucene.Net.Analysis.Util
 	 */
 
     /// <summary>
-    /// A simple class that stores <see cref="string"/>s as <see cref="char[]"/>'s
in a
+    /// A simple class that stores <see cref="string"/>s as <see cref="T:char[]"/>'s
in a
     /// hash table.  Note that this is not a general purpose
     /// class.  For example, it cannot remove items from the
     /// set, nor does it resize its hash table to be smaller,
-    /// etc.  It is designed to be quick to test if a <see cref="char[]"/>
+    /// etc.  It is designed to be quick to test if a <see cref="T:char[]"/>
     /// is in the set without the necessity of converting it
     /// to a <see cref="string"/> first.
     /// 
@@ -54,9 +54,9 @@ namespace Lucene.Net.Analysis.Util
     /// does not behave like it should in all cases. The generic type is
     /// <see cref="string"/>, because you can add any object to it,
     /// that has a string representation (which is converted to a string). The add methods
will use
-    /// <see cref="object.ToString()"/> and store the result using a <see cref="char[]"/>
+    /// <see cref="object.ToString()"/> and store the result using a <see cref="T:char[]"/>
     /// buffer. The same behavior have the <see cref="Contains(string)"/> methods.
-    /// The <see cref="GetEnumerator()"/> returns an <see cref="IEnumerator{Char[]}"/>
+    /// The <see cref="GetEnumerator()"/> returns an <see cref="T:IEnumerator{char[]}"/>
     /// </para>
     /// </summary>
     public class CharArraySet : ISet<string>
@@ -125,7 +125,7 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// <c>true</c> if the <see cref="char[]"/>s 
+        /// <c>true</c> if the <see cref="T:char[]"/>s 
         /// are in the set 
         /// </summary>
         public virtual bool Contains(char[] text)
@@ -189,8 +189,8 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Add this <see cref="char[]"/> directly to the set.
-        /// If <see cref="ignoreCase"/> is true for this <see cref="CharArraySet"/>,
the text array will be directly modified.
+        /// Add this <see cref="T:char[]"/> directly to the set.
+        /// If <c>ignoreCase</c> is true for this <see cref="CharArraySet"/>,
the text array will be directly modified.
         /// The user should never modify this text array after calling this method.
         /// </summary>
         /// <returns><c>true</c> if <paramref name="o"/> was added
to the set; <c>false</c> if it already existed prior to this call</returns>
@@ -293,7 +293,7 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Returns an <see cref="IEnumerator"/> for <see cref="char[]"/> instances
in this set.
+        /// Returns an <see cref="IEnumerator"/> for <see cref="T:char[]"/> instances
in this set.
         /// </summary>
         public virtual IEnumerator GetEnumerator()
         {
@@ -327,10 +327,10 @@ namespace Lucene.Net.Analysis.Util
         #region LUCENENET specific members
 
         /// <summary>
-        /// Copies the entire <see cref="CharArraySet"/> to a one-dimensional <see
cref="string[]"/> array, 
+        /// Copies the entire <see cref="CharArraySet"/> to a one-dimensional <see
cref="T:string[]"/> array, 
         /// starting at the specified index of the target array.
         /// </summary>
-        /// <param name="array">The one-dimensional <see cref="string[]"/> Array
that is the destination of the 
+        /// <param name="array">The one-dimensional <see cref="T:string[]"/>
Array that is the destination of the 
         /// elements copied from <see cref="CharArraySet"/>. The Array must have zero-based
indexing.</param>
         /// <param name="arrayIndex">The zero-based index in array at which copying
begins.</param>
         public void CopyTo(string[] array, int arrayIndex)


Mime
View raw message