lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [03/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and replace for document comments - <pre class="prettyprint"> > <code>, </pre> > </code>, <seealso cref=" > <see cref=", org.apache.lucene.analysis.Analyzer.TokenStreamComponents > Analyzer.T
Date Fri, 03 Feb 2017 04:41:50 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
index d5fad67..bfa7751 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis.Util
      */
 
     /// <summary>
-    /// Removes elisions from a <seealso cref="TokenStream"/>. For example, "l'avion"
(the plane) will be
+    /// Removes elisions from a <see cref="TokenStream"/>. For example, "l'avion" (the
plane) will be
     /// tokenized as "avion" (plane).
     /// </summary>
     /// <seealso cref= <a href="http://fr.wikipedia.org/wiki/%C3%89lision">Elision
in Wikipedia</a> </seealso>
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// Constructs an elision filter with a Set of stop words </summary>
-        /// <param name="input"> the source <seealso cref="TokenStream"/> </param>
+        /// <param name="input"> the source <see cref="TokenStream"/> </param>
         /// <param name="articles"> a set of stopword articles </param>
         public ElisionFilter(TokenStream input, CharArraySet articles)
             : base(input)
@@ -41,7 +41,7 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Increments the <seealso cref="TokenStream"/> with a <seealso cref="CharTermAttribute"/>
without elisioned start
+        /// Increments the <see cref="TokenStream"/> with a <see cref="CharTermAttribute"/>
without elisioned start
         /// </summary>
         public override bool IncrementToken()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
index 71c240e..f12f57b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Util
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="ElisionFilter"/>.
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="ElisionFilter"/>.
+    /// <code>
     /// &lt;fieldType name="text_elsn" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Util
     ///     &lt;filter class="solr.ElisionFilterFactory" 
     ///       articles="stopwordarticles.txt" ignoreCase="true"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class ElisionFilterFactory : TokenFilterFactory, IResourceLoaderAware, IMultiTermAwareComponent
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
index fb4d438..09aab01 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Util
 	 */
 
     /// <summary>
-    /// Simple <seealso cref="ResourceLoader"/> that opens resource files
+    /// Simple <see cref="ResourceLoader"/> that opens resource files
     /// from the local file system, optionally resolving against
     /// a base directory.
     /// 
-    /// <para>This loader wraps a delegate <seealso cref="ResourceLoader"/>
+    /// <para>This loader wraps a delegate <see cref="ResourceLoader"/>
     /// that is used to resolve all files, the current base directory
-    /// does not contain. <seealso cref="#newInstance"/> is always resolved
-    /// against the delegate, as a <seealso cref="ClassLoader"/> is needed.
+    /// does not contain. <see cref="#newInstance"/> is always resolved
+    /// against the delegate, as a <see cref="ClassLoader"/> is needed.
     /// 
     /// </para>
     /// <para>You can chain several {@code FilesystemResourceLoader}s
@@ -65,7 +65,7 @@ namespace Lucene.Net.Analysis.Util
         /// Creates a resource loader that resolves resources against the given
         /// base directory (may be {@code null} to refer to CWD).
         /// Files not found in file system and class lookups are delegated
-        /// to the given delegate <seealso cref="ResourceLoader"/>.
+        /// to the given delegate <see cref="ResourceLoader"/>.
         /// </summary>
         public FilesystemResourceLoader(DirectoryInfo baseDirectory, IResourceLoader @delegate)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
index 688c890..241c8da 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
@@ -23,11 +23,11 @@ namespace Lucene.Net.Analysis.Util
 
     /// <summary>
     /// Abstract base class for TokenFilters that may remove tokens.
-    /// You have to implement <seealso cref="#accept"/> and return a boolean if the
current
-    /// token should be preserved. <seealso cref="#incrementToken"/> uses this method
+    /// You have to implement <see cref="#accept"/> and return a boolean if the current
+    /// token should be preserved. <see cref="#incrementToken"/> uses this method
     /// to decide if a token should be passed to the caller.
     /// <para><a name="lucene_match_version" />As of Lucene 4.4, an
-    /// <seealso cref="IllegalArgumentException"/> is thrown when trying to disable
position
+    /// <see cref="IllegalArgumentException"/> is thrown when trying to disable position
     /// increments when filtering terms.
     /// </para>
     /// </summary>
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.Util
         private int skippedPositions;
 
         /// <summary>
-        /// Create a new <seealso cref="FilteringTokenFilter"/>. </summary>
+        /// Create a new <see cref="FilteringTokenFilter"/>. </summary>
         /// <param name="version">                  the <a href="#lucene_match_version">Lucene
match version</a> </param>
         /// <param name="enablePositionIncrements"> whether to increment position increments
when filtering out terms </param>
         /// <param name="input">                    the input to consume </param>
@@ -66,9 +66,9 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Create a new <seealso cref="FilteringTokenFilter"/>. </summary>
+        /// Create a new <see cref="FilteringTokenFilter"/>. </summary>
         /// <param name="version"> the Lucene match version </param>
-        /// <param name="in">      the <seealso cref="TokenStream"/> to consume
</param>
+        /// <param name="in">      the <see cref="TokenStream"/> to consume </param>
         public FilteringTokenFilter(LuceneVersion version, TokenStream @in)
             : base(@in)
         {
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Override this method and return if the current input token should be returned
by <seealso cref="#incrementToken"/>. </summary>
+        /// Override this method and return if the current input token should be returned
by <see cref="#incrementToken"/>. </summary>
         protected abstract bool Accept();
 
         public override sealed bool IncrementToken()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
index 6f12908..f9c0506 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
@@ -19,7 +19,7 @@
 
     /// <summary>
     /// Interface for a component that needs to be initialized by
-    /// an implementation of <seealso cref="ResourceLoader"/>.
+    /// an implementation of <see cref="ResourceLoader"/>.
     /// </summary>
     /// <seealso cref= ResourceLoader </seealso>
     public interface IResourceLoaderAware

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
index 073c10b..6bddce4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Util
     ///  characters that haven't been freed yet.  This is like a
     ///  PushbackReader, except you don't have to specify
     ///  up-front the max size of the buffer, but you do have to
-    ///  periodically call <seealso cref="#freeBefore"/>. 
+    ///  periodically call <see cref="#freeBefore"/>. 
     /// </summary>
 
     public sealed class RollingCharBuffer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
index 0a9ad07..c19ace3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// Creates a CharArraySet from a file resource associated with a class. (See
-        /// <seealso cref="Class#getResourceAsStream(String)"/>).
+        /// <see cref="Class#getResourceAsStream(String)"/>).
         /// </summary>
         /// <param name="ignoreCase">
         ///          <code>true</code> if the set should ignore the case of the
@@ -93,7 +93,7 @@ namespace Lucene.Net.Analysis.Util
         /// <returns> a CharArraySet containing the distinct stopwords from the given
         ///         file </returns>
         /// <exception cref="IOException">
-        ///           if loading the stopwords throws an <seealso cref="IOException"/>
</exception>
+        ///           if loading the stopwords throws an <see cref="IOException"/>
</exception>
         protected internal static CharArraySet LoadStopwordSet(bool ignoreCase, Type aClass,
string resource, string comment)
         {
             TextReader reader = null;
@@ -124,7 +124,7 @@ namespace Lucene.Net.Analysis.Util
         /// <returns> a CharArraySet containing the distinct stopwords from the given
         ///         file </returns>
         /// <exception cref="IOException">
-        ///           if loading the stopwords throws an <seealso cref="IOException"/>
</exception>
+        ///           if loading the stopwords throws an <see cref="IOException"/>
</exception>
         protected internal static CharArraySet LoadStopwordSet(FileInfo stopwords, LuceneVersion
matchVersion)
         {
             TextReader reader = null;
@@ -150,7 +150,7 @@ namespace Lucene.Net.Analysis.Util
         /// <returns> a CharArraySet containing the distinct stopwords from the given
         ///         reader </returns>
         /// <exception cref="IOException">
-        ///           if loading the stopwords throws an <seealso cref="IOException"/>
</exception>
+        ///           if loading the stopwords throws an <see cref="IOException"/>
</exception>
         protected internal static CharArraySet LoadStopwordSet(TextReader stopwords, LuceneVersion
matchVersion)
         {
             try

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
index afa0557..b2822d2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Util
      */
 
     /// <summary>
-    /// Abstract parent class for analysis factories that create <seealso cref="TokenFilter"/>
+    /// Abstract parent class for analysis factories that create <see cref="TokenFilter"/>
     /// instances.
     /// </summary>
     public abstract class TokenFilterFactory : AbstractAnalysisFactory
@@ -50,9 +50,9 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Reloads the factory list from the given <seealso cref="ClassLoader"/>.
+        /// Reloads the factory list from the given <see cref="ClassLoader"/>.
         /// Changes to the factories are visible after the method ends, all
-        /// iterators (<seealso cref="#availableTokenFilters()"/>,...) stay consistent.

+        /// iterators (<see cref="#availableTokenFilters()"/>,...) stay consistent.

         /// 
         /// <para><b>NOTE:</b> Only new factories are added, existing ones
are
         /// never removed or replaced.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
index 127be40..285f090 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Util
      */
 
     /// <summary>
-    /// Abstract parent class for analysis factories that create <seealso cref="Tokenizer"/>
+    /// Abstract parent class for analysis factories that create <see cref="Tokenizer"/>
     /// instances.
     /// </summary>
     public abstract class TokenizerFactory : AbstractAnalysisFactory
@@ -53,9 +53,9 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Reloads the factory list from the given <seealso cref="ClassLoader"/>.
+        /// Reloads the factory list from the given <see cref="ClassLoader"/>.
         /// Changes to the factories are visible after the method ends, all
-        /// iterators (<seealso cref="#availableTokenizers()"/>,...) stay consistent.

+        /// iterators (<see cref="#availableTokenizers()"/>,...) stay consistent. 
         /// 
         /// <para><b>NOTE:</b> Only new factories are added, existing ones
are
         /// never removed or replaced.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
index 4005c55..d091f87 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Util
     /// <summary>
     /// Loader for text files that represent a list of stopwords.
     /// </summary>
-    /// <seealso cref= IOUtils to obtain <seealso cref="Reader"/> instances
+    /// <seealso cref= IOUtils to obtain <see cref="Reader"/> instances
     /// @lucene.internal </seealso>
     public class WordlistLoader
     {
@@ -49,8 +49,8 @@ namespace Lucene.Net.Analysis.Util
         /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
         /// </summary>
         /// <param name="reader"> TextReader containing the wordlist </param>
-        /// <param name="result"> the <seealso cref="CharArraySet"/> to fill
with the readers words </param>
-        /// <returns> the given <seealso cref="CharArraySet"/> with the reader's
words </returns>
+        /// <param name="result"> the <see cref="CharArraySet"/> to fill with
the readers words </param>
+        /// <returns> the given <see cref="CharArraySet"/> with the reader's
words </returns>
         public static CharArraySet GetWordSet(TextReader reader, CharArraySet result)
         {
             try
@@ -76,8 +76,8 @@ namespace Lucene.Net.Analysis.Util
         /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
         /// </summary>
         /// <param name="reader"> TextReader containing the wordlist </param>
-        /// <param name="matchVersion"> the <seealso cref="LuceneVersion"/> </param>
-        /// <returns> A <seealso cref="CharArraySet"/> with the reader's words
</returns>
+        /// <param name="matchVersion"> the <see cref="LuceneVersion"/> </param>
+        /// <returns> A <see cref="CharArraySet"/> with the reader's words </returns>
         public static CharArraySet GetWordSet(TextReader reader, LuceneVersion matchVersion)
         {
             return GetWordSet(reader, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
@@ -91,7 +91,7 @@ namespace Lucene.Net.Analysis.Util
         /// </summary>
         /// <param name="reader"> TextReader containing the wordlist </param>
         /// <param name="comment"> The string representing a comment. </param>
-        /// <param name="matchVersion"> the <seealso cref="LuceneVersion"/> </param>
+        /// <param name="matchVersion"> the <see cref="LuceneVersion"/> </param>
         /// <returns> A CharArraySet with the reader's words </returns>
         public static CharArraySet GetWordSet(TextReader reader, string comment, LuceneVersion
matchVersion)
         {
@@ -106,8 +106,8 @@ namespace Lucene.Net.Analysis.Util
         /// </summary>
         /// <param name="reader"> TextReader containing the wordlist </param>
         /// <param name="comment"> The string representing a comment. </param>
-        /// <param name="result"> the <seealso cref="CharArraySet"/> to fill
with the readers words </param>
-        /// <returns> the given <seealso cref="CharArraySet"/> with the reader's
words </returns>
+        /// <param name="result"> the <see cref="CharArraySet"/> to fill with
the readers words </param>
+        /// <returns> the given <see cref="CharArraySet"/> with the reader's
words </returns>
         public static CharArraySet GetWordSet(TextReader reader, string comment, CharArraySet
result)
         {
             try
@@ -141,8 +141,8 @@ namespace Lucene.Net.Analysis.Util
         /// </para>
         /// </summary>
         /// <param name="reader"> TextReader containing a Snowball stopword list </param>
-        /// <param name="result"> the <seealso cref="CharArraySet"/> to fill
with the readers words </param>
-        /// <returns> the given <seealso cref="CharArraySet"/> with the reader's
words </returns>
+        /// <param name="result"> the <see cref="CharArraySet"/> to fill with
the readers words </param>
+        /// <returns> the given <see cref="CharArraySet"/> with the reader's
words </returns>
         public static CharArraySet GetSnowballWordSet(TextReader reader, CharArraySet result)
         {
             try
@@ -184,8 +184,8 @@ namespace Lucene.Net.Analysis.Util
         /// </para>
         /// </summary>
         /// <param name="reader"> TextReader containing a Snowball stopword list </param>
-        /// <param name="matchVersion"> the Lucene <seealso cref="Version"/>
</param>
-        /// <returns> A <seealso cref="CharArraySet"/> with the reader's words
</returns>
+        /// <param name="matchVersion"> the Lucene <see cref="LuceneVersion"/>
</param>
+        /// <returns> A <see cref="CharArraySet"/> with the reader's words </returns>
         public static CharArraySet GetSnowballWordSet(TextReader reader, LuceneVersion matchVersion)
         {
             return GetSnowballWordSet(reader, new CharArraySet(matchVersion, INITIAL_CAPACITY,
false));
@@ -194,7 +194,7 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// Reads a stem dictionary. Each line contains:
-        /// <pre>word<b>\t</b>stem</pre>
+        /// <pre>word<b>\t</b>stem</code>
         /// (i.e. two tab separated words)
         /// </summary>
         /// <returns> stem dictionary that overrules the stemming algorithm </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
index 8cfc982..f815db4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
@@ -81,7 +81,7 @@ namespace Lucene.Net.Analysis.Wikipedia
         /// </summary>
         public const int BOTH = 2;
         /// <summary>
-        /// This flag is used to indicate that the produced "Token" would, if <seealso
cref="#TOKENS_ONLY"/> was used, produce multiple tokens.
+        /// This flag is used to indicate that the produced "Token" would, if <see cref="#TOKENS_ONLY"/>
was used, produce multiple tokens.
         /// </summary>
         public const int UNTOKENIZED_TOKEN_FLAG = 1;
         /// <summary>
@@ -102,7 +102,7 @@ namespace Lucene.Net.Analysis.Wikipedia
         private bool first;
 
         /// <summary>
-        /// Creates a new instance of the <seealso cref="WikipediaTokenizer"/>. Attaches
the
+        /// Creates a new instance of the <see cref="WikipediaTokenizer"/>. Attaches
the
         /// <code>input</code> to a newly created JFlex scanner.
         /// </summary>
         /// <param name="input"> The Input TextReader </param>
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Wikipedia
         }
 
         /// <summary>
-        /// Creates a new instance of the <seealso cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>.
 Attaches the
+        /// Creates a new instance of the <see cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>.
 Attaches the
         /// <code>input</code> to a the newly created JFlex scanner.
         /// </summary>
         /// <param name="input"> The input </param>
-        /// <param name="tokenOutput"> One of <seealso cref="#TOKENS_ONLY"/>,
<seealso cref="#UNTOKENIZED_ONLY"/>, <seealso cref="#BOTH"/> </param>
+        /// <param name="tokenOutput"> One of <see cref="#TOKENS_ONLY"/>, <see
cref="#UNTOKENIZED_ONLY"/>, <see cref="#BOTH"/> </param>
         public WikipediaTokenizer(TextReader input, int tokenOutput, ICollection<string>
untokenizedTypes)
               : base(input)
         {
@@ -125,11 +125,11 @@ namespace Lucene.Net.Analysis.Wikipedia
         }
 
         /// <summary>
-        /// Creates a new instance of the <seealso cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>.
 Attaches the
-        /// <code>input</code> to a the newly created JFlex scanner. Uses the
given <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>.
+        /// Creates a new instance of the <see cref="org.apache.lucene.analysis.wikipedia.WikipediaTokenizer"/>.
 Attaches the
+        /// <code>input</code> to a the newly created JFlex scanner. Uses the
given <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/>.
         /// </summary>
         /// <param name="input"> The input </param>
-        /// <param name="tokenOutput"> One of <seealso cref="#TOKENS_ONLY"/>,
<seealso cref="#UNTOKENIZED_ONLY"/>, <seealso cref="#BOTH"/> </param>
+        /// <param name="tokenOutput"> One of <see cref="#TOKENS_ONLY"/>, <see
cref="#UNTOKENIZED_ONLY"/>, <see cref="#BOTH"/> </param>
         public WikipediaTokenizer(AttributeFactory factory, TextReader input, int tokenOutput,
ICollection<string> untokenizedTypes)
               : base(factory, input)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
index 4a88289..d63e61a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
@@ -27,13 +27,13 @@ namespace Lucene.Net.Analysis.Wikipedia
 	 */
 
 	/// <summary>
-	/// Factory for <seealso cref="WikipediaTokenizer"/>.
-	/// <pre class="prettyprint">
+	/// Factory for <see cref="WikipediaTokenizer"/>.
+	/// <code>
 	/// &lt;fieldType name="text_wiki" class="solr.TextField" positionIncrementGap="100"&gt;
 	///   &lt;analyzer&gt;
 	///     &lt;tokenizer class="solr.WikipediaTokenizerFactory"/&gt;
 	///   &lt;/analyzer&gt;
-	/// &lt;/fieldType&gt;</pre>
+	/// &lt;/fieldType&gt;</code>
 	/// </summary>
 	public class WikipediaTokenizerFactory : TokenizerFactory
 	{

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
index d3a7b1f..b057e7d 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
@@ -24,13 +24,13 @@ namespace Lucene.Net.Collation
 
     /// <summary>
     /// <para>
-    ///   Converts each token into its <seealso cref="CollationKey"/>, and then
+    ///   Converts each token into its <see cref="CollationKey"/>, and then
     ///   encodes the bytes as an index term.
     /// </para>
     /// <para>
     ///   <strong>WARNING:</strong> Make sure you use exactly the same Collator
at
     ///   index and query time -- CollationKeys are only comparable when produced by
-    ///   the same Collator.  Since <seealso cref="RuleBasedCollator"/>s are not
+    ///   the same Collator.  Since <see cref="RuleBasedCollator"/>s are not
     ///   independently versioned, it is unsafe to search against stored
     ///   CollationKeys unless the following are exactly the same (best practice is
     ///   to store this information with the index and check that they remain the
@@ -42,10 +42,10 @@ namespace Lucene.Net.Collation
     ///   <li>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
-    ///     <seealso cref="Collator#getInstance(Locale)"/>.
+    ///     <see cref="Collator#getInstance(Locale)"/>.
     ///   </li>
     ///   <li>
-    ///     The collation strength used - see <seealso cref="Collator#setStrength(int)"/>
+    ///     The collation strength used - see <see cref="Collator#setStrength(int)"/>
     ///   </li>
     /// </ol> 
     /// <para>
@@ -76,7 +76,7 @@ namespace Lucene.Net.Collation
 
 		/// <summary>
 		/// Create a CollationAttributeFactory, using 
-		/// <seealso cref="AttributeSource.AttributeFactory#DEFAULT_ATTRIBUTE_FACTORY"/>
as the
+		/// <see cref="AttributeSource.AttributeFactory#DEFAULT_ATTRIBUTE_FACTORY"/> as the
 		/// factory for all other attributes. </summary>
 		/// <param name="collator"> CollationKey generator </param>
 		public CollationAttributeFactory(Collator collator) : this(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY,
collator)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
index b76e520..4eabd4d 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
@@ -26,18 +26,18 @@ namespace Lucene.Net.Collation
 
     /// <summary>
     /// <para>
-    ///   Configures <seealso cref="KeywordTokenizer"/> with <seealso cref="CollationAttributeFactory"/>.
+    ///   Configures <see cref="KeywordTokenizer"/> with <see cref="CollationAttributeFactory"/>.
     /// </para>
     /// <para>
-    ///   Converts the token into its <seealso cref="java.text.CollationKey"/>, and
then
+    ///   Converts the token into its <see cref="java.text.CollationKey"/>, and then
     ///   encodes the CollationKey either directly or with 
-    ///   <seealso cref="IndexableBinaryStringTools"/> (see <a href="#version">below</a>),
to allow 
+    ///   <see cref="IndexableBinaryStringTools"/> (see <a href="#version">below</a>),
to allow 
     ///   it to be stored as an index term.
     /// </para>
     /// <para>
     ///   <strong>WARNING:</strong> Make sure you use exactly the same Collator
at
     ///   index and query time -- CollationKeys are only comparable when produced by
-    ///   the same Collator.  Since <seealso cref="java.text.RuleBasedCollator"/>s
are not
+    ///   the same Collator.  Since <see cref="java.text.RuleBasedCollator"/>s are
not
     ///   independently versioned, it is unsafe to search against stored
     ///   CollationKeys unless the following are exactly the same (best practice is
     ///   to store this information with the index and check that they remain the
@@ -49,10 +49,10 @@ namespace Lucene.Net.Collation
     ///   <li>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
-    ///     <seealso cref="Collator#getInstance(java.util.Locale)"/>.
+    ///     <see cref="Collator#getInstance(java.util.Locale)"/>.
     ///   </li>
     ///   <li>
-    ///     The collation strength used - see <seealso cref="Collator#setStrength(int)"/>
+    ///     The collation strength used - see <see cref="Collator#setStrength(int)"/>
     ///   </li>
     /// </ol> 
     /// <para>
@@ -73,11 +73,11 @@ namespace Lucene.Net.Collation
     ///   ICUCollationKeyAnalyzer on the query side, or vice versa.
     /// </para>
     /// <a name="version"/>
-    /// <para>You must specify the required <seealso cref="Version"/>
+    /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating CollationKeyAnalyzer:
     /// <ul>
     ///   <li> As of 4.0, Collation Keys are directly encoded as bytes. Previous
-    ///   versions will encode the bytes with <seealso cref="IndexableBinaryStringTools"/>.
+    ///   versions will encode the bytes with <see cref="IndexableBinaryStringTools"/>.
     /// </ul>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
index 6e684c1..477e524 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
@@ -25,14 +25,14 @@ namespace Lucene.Net.Collation
 
 	/// <summary>
 	/// <para>
-	///   Converts each token into its <seealso cref="java.text.CollationKey"/>, and then
-	///   encodes the CollationKey with <seealso cref="IndexableBinaryStringTools"/>,
to allow 
+	///   Converts each token into its <see cref="java.text.CollationKey"/>, and then
+	///   encodes the CollationKey with <see cref="IndexableBinaryStringTools"/>, to allow

 	///   it to be stored as an index term.
 	/// </para>
 	/// <para>
 	///   <strong>WARNING:</strong> Make sure you use exactly the same Collator
at
 	///   index and query time -- CollationKeys are only comparable when produced by
-	///   the same Collator.  Since <seealso cref="java.text.RuleBasedCollator"/>s are
not
+	///   the same Collator.  Since <see cref="java.text.RuleBasedCollator"/>s are not
 	///   independently versioned, it is unsafe to search against stored
 	///   CollationKeys unless the following are exactly the same (best practice is
 	///   to store this information with the index and check that they remain the
@@ -44,10 +44,10 @@ namespace Lucene.Net.Collation
 	///   <li>
 	///     The language (and country and variant, if specified) of the Locale
 	///     used when constructing the collator via
-	///     <seealso cref="Collator#getInstance(CultureInfo)"/>.
+	///     <see cref="Collator#getInstance(CultureInfo)"/>.
 	///   </li>
 	///   <li>
-	///     The collation strength used - see <seealso cref="Collator#setStrength(int)"/>
+	///     The collation strength used - see <see cref="Collator#setStrength(int)"/>
 	///   </li>
 	/// </ol> 
 	/// <para>
@@ -67,7 +67,7 @@ namespace Lucene.Net.Collation
 	///   CollationKeyFilter to generate index terms, do not use
 	///   ICUCollationKeyFilter on the query side, or vice versa.
 	/// </para> </summary>
-	/// @deprecated Use <seealso cref="CollationAttributeFactory"/> instead, which encodes
+	/// @deprecated Use <see cref="CollationAttributeFactory"/> instead, which encodes
 	///  terms directly as bytes. This filter will be removed in Lucene 5.0 
 	[Obsolete("Use <seealso cref=\"CollationAttributeFactory\"/> instead, which encodes")]
     // LUCENENET TODO: A better option would be to contribute to the icu.net library and

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
index 4d5ab2a..d5e53a1 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Collation
      */
 
 	/// <summary>
-	/// Factory for <seealso cref="CollationKeyFilter"/>.
+	/// Factory for <see cref="CollationKeyFilter"/>.
 	/// <para>
 	/// This factory can be created in two ways: 
 	/// <ul>
@@ -62,15 +62,15 @@ namespace Lucene.Net.Collation
 	///     &lt;tokenizer class="solr.KeywordTokenizerFactory"/&gt;
 	///     &lt;filter class="solr.CollationKeyFilterFactory" language="ja" country="JP"/&gt;
 	///   &lt;/analyzer&gt;
-	/// &lt;/fieldType&gt;</pre>
+	/// &lt;/fieldType&gt;</code>
 	/// 
 	/// </para>
 	/// </summary>
-	/// <seealso cref="Collator"></seealso>
-	/// <seealso cref="CultureInfo"></seealso>
-	/// <seealso cref="RuleBasedCollator">
+	/// <see cref="Collator"></seealso>
+	/// <see cref="CultureInfo"></seealso>
+	/// <see cref="RuleBasedCollator">
 	/// @since solr 3.1 </seealso>
-	/// @deprecated use <seealso cref="CollationKeyAnalyzer"/> instead. 
+	/// @deprecated use <see cref="CollationKeyAnalyzer"/> instead. 
 	[Obsolete("use <seealso cref=\"CollationKeyAnalyzer\"/> instead.")]
 	public class CollationKeyFilterFactory : TokenFilterFactory, IMultiTermAwareComponent, IResourceLoaderAware
 	{

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
b/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
index a29a5e8..ceebafb 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/TokenAttributes/CollatedTermAttributeImpl.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Collation.TokenAttributes
      */
 
     /// <summary>
-    /// Extension of <seealso cref="CharTermAttribute"/> that encodes the term
+    /// Extension of <see cref="CharTermAttribute"/> that encodes the term
     /// text as a binary Unicode collation key instead of as UTF-8 bytes.
     /// </summary>
     // LUCENENET TODO: A better option would be to contribute to the icu.net library and


Mime
View raw message