lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [07/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and replace for document comments - <pre class="prettyprint"> > <code>, </pre> > </code>, <seealso cref=" > <see cref=", org.apache.lucene.analysis.Analyzer.TokenStreamComponents > Analyzer.T
Date Fri, 03 Feb 2017 04:41:54 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
index 0d0a357..ce3cc41 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemmer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fr
     /// refer to http://snowball.sourceforge.net/french/stemmer.html<br>
     /// (French stemming algorithm) for details
     /// </para> </summary>
-    /// @deprecated Use <seealso cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead, 
+    /// @deprecated Use <see cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead, 
     /// which has the same functionality. This filter will be removed in Lucene 4.0 
     [Obsolete("Use FrenchStemmer instead, which has the same functionality.")]
     public class FrenchStemmer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
index 5dfd573..e1e7e6e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Ga
 	 */
 
     /// <summary>
-    /// <seealso cref="Analyzer"/> for Irish.
+    /// <see cref="Analyzer"/> for Irish.
     /// </summary>
     public sealed class IrishAnalyzer : StopwordAnalyzerBase
     {
@@ -92,7 +92,7 @@ namespace Lucene.Net.Analysis.Ga
         }
 
         /// <summary>
-        /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public IrishAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -111,7 +111,7 @@ namespace Lucene.Net.Analysis.Ga
 
         /// <summary>
         /// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
-        /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+        /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
         /// stemming.
         /// </summary>
         /// <param name="matchVersion"> lucene compatibility version </param>
@@ -125,15 +125,15 @@ namespace Lucene.Net.Analysis.Ga
 
         /// <summary>
         /// Creates a
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// which tokenizes all the text in the provided <see cref="Reader"/>.
         /// </summary>
         /// <returns> A
-        ///         <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="StandardFilter"/>, <seealso cref="IrishLowerCaseFilter"/>, <seealso cref="StopFilter"/>
-        ///         , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
-        ///         provided and <seealso cref="SnowballFilter"/>. </returns>
+        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from an <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="StandardFilter"/>, <see cref="IrishLowerCaseFilter"/>, <see cref="StopFilter"/>
+        ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+        ///         provided and <see cref="SnowballFilter"/>. </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {
             Tokenizer source = new StandardTokenizer(m_matchVersion, reader);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
index d6ae608..c292cd5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishLowerCaseFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Ga
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="IrishLowerCaseFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="IrishLowerCaseFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_ga" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.IrishLowerCaseFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class IrishLowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
index c996ef4..9b471bc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Gl
 	 */
 
     /// <summary>
-    /// <seealso cref="Analyzer"/> for Galician.
+    /// <see cref="Analyzer"/> for Galician.
     /// </summary>
     public sealed class GalicianAnalyzer : StopwordAnalyzerBase
     {
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.Gl
         }
 
         /// <summary>
-        /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public GalicianAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -96,7 +96,7 @@ namespace Lucene.Net.Analysis.Gl
 
         /// <summary>
         /// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
-        /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+        /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
         /// stemming.
         /// </summary>
         /// <param name="matchVersion"> lucene compatibility version </param>
@@ -110,15 +110,15 @@ namespace Lucene.Net.Analysis.Gl
 
         /// <summary>
         /// Creates a
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// which tokenizes all the text in the provided <see cref="Reader"/>.
         /// </summary>
         /// <returns> A
-        ///         <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
-        ///         , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
-        ///         provided and <seealso cref="GalicianStemFilter"/>. </returns>
+        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from an <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+        ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+        ///         provided and <see cref="GalicianStemFilter"/>. </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {
             Tokenizer source = new StandardTokenizer(m_matchVersion, reader);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
index e1952ad..2b67926 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Gl
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GalicianMinimalStemmer"/> to stem 
+    /// A <see cref="TokenFilter"/> that applies <see cref="GalicianMinimalStemmer"/> to stem 
     /// Galician words.
     /// <para>
     /// To prevent terms from being stemmed use an instance of
-    /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
-    /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+    /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+    /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
     /// </para>
     /// </summary>
     public sealed class GalicianMinimalStemFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
index 3954829..3fb2221 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Gl
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="GalicianMinimalStemFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="GalicianMinimalStemFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_glplural" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
     ///     &lt;filter class="solr.GalicianMinimalStemFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class GalicianMinimalStemFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
index ed11084..3db897e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Gl
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GalicianStemmer"/> to stem 
+    /// A <see cref="TokenFilter"/> that applies <see cref="GalicianStemmer"/> to stem 
     /// Galician words.
     /// <para>
     /// To prevent terms from being stemmed use an instance of
-    /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
-    /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+    /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+    /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
     /// </para>
     /// </summary>
     public sealed class GalicianStemFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
index 36fe800..2c47784 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Gl
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="GalicianStemFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="GalicianStemFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_glstem" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
     ///     &lt;filter class="solr.GalicianStemFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class GalicianStemFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
index 3888b73..49a1d01 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Hi
     /// <para>
     /// <a name="version"/>
     /// </para>
-    /// <para>You must specify the required <seealso cref="Version"/>
+    /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating HindiAnalyzer:
     /// <ul>
     ///   <li> As of 3.6, StandardTokenizer is used for tokenization
@@ -110,7 +110,7 @@ namespace Lucene.Net.Analysis.Hi
 
         /// <summary>
         /// Builds an analyzer with the default stop words:
-        /// <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public HindiAnalyzer(LuceneVersion version)
               : this(version, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -119,14 +119,14 @@ namespace Lucene.Net.Analysis.Hi
 
         /// <summary>
         /// Creates
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// used to tokenize all the text in the provided <see cref="Reader"/>.
         /// </summary>
-        /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from a <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="LowerCaseFilter"/>, <seealso cref="IndicNormalizationFilter"/>,
-        ///         <seealso cref="HindiNormalizationFilter"/>, <seealso cref="SetKeywordMarkerFilter"/>
-        ///         if a stem exclusion set is provided, <seealso cref="HindiStemFilter"/>, and
+        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from a <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="LowerCaseFilter"/>, <see cref="IndicNormalizationFilter"/>,
+        ///         <see cref="HindiNormalizationFilter"/>, <see cref="SetKeywordMarkerFilter"/>
+        ///         if a stem exclusion set is provided, <see cref="HindiStemFilter"/>, and
         ///         Hindi Stop words </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
index 7502b65..8600e0d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilter.cs
@@ -21,13 +21,13 @@ namespace Lucene.Net.Analysis.Hi
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="HindiNormalizer"/> to normalize the
+    /// A <see cref="TokenFilter"/> that applies <see cref="HindiNormalizer"/> to normalize the
     /// orthography.
     /// <para>
     /// In some cases the normalization may cause unrelated terms to conflate, so
     /// to prevent terms from being normalized use an instance of
-    /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
-    /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+    /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+    /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
     /// </para> </summary>
     /// <seealso cref= HindiNormalizer </seealso>
     public sealed class HindiNormalizationFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
index 4351770..70f7175 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizationFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Hi
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="HindiNormalizationFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="HindiNormalizationFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_hinormal" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.HindiNormalizationFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class HindiNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
index ff9981f..ac11063 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Hi
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="HindiStemmer"/> to stem Hindi words.
+    /// A <see cref="TokenFilter"/> that applies <see cref="HindiStemmer"/> to stem Hindi words.
     /// </summary>
     public sealed class HindiStemFilter : TokenFilter
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
index 1a037c1..cdd897e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiStemFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Hi
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="HindiStemFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="HindiStemFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_histem" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.HindiStemFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class HindiStemFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
index 57169e9..46fc2ec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Hu
 	 */
 
     /// <summary>
-    /// <seealso cref="Analyzer"/> for Hungarian.
+    /// <see cref="Analyzer"/> for Hungarian.
     /// </summary>
     public sealed class HungarianAnalyzer : StopwordAnalyzerBase
     {
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Hu
         }
 
         /// <summary>
-        /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public HungarianAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Hu
 
         /// <summary>
         /// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
-        /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+        /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
         /// stemming.
         /// </summary>
         /// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Hu
 
         /// <summary>
         /// Creates a
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// which tokenizes all the text in the provided <see cref="Reader"/>.
         /// </summary>
         /// <returns> A
-        ///         <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
-        ///         , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
-        ///         provided and <seealso cref="SnowballFilter"/>. </returns>
+        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from an <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+        ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+        ///         provided and <see cref="SnowballFilter"/>. </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {
             Tokenizer source = new StandardTokenizer(m_matchVersion, reader);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
index 5dd0e49..43f33a1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Hu
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="HungarianLightStemmer"/> to stem
+    /// A <see cref="TokenFilter"/> that applies <see cref="HungarianLightStemmer"/> to stem
     /// Hungarian words.
     /// <para>
     /// To prevent terms from being stemmed use an instance of
-    /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
-    /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+    /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+    /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
     /// </para>
     /// </summary>
     public sealed class HungarianLightStemFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
index d8f7c54..2664d63 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Hu
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="HungarianLightStemFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="HungarianLightStemFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_hulgtstem" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
     ///     &lt;filter class="solr.HungarianLightStemFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class HungarianLightStemFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
index af966d1..8795529 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/Dictionary.cs
@@ -396,7 +396,7 @@ namespace Lucene.Net.Analysis.Hunspell
         /// <param name="affixes"> Map where the result of the parsing will be put </param>
         /// <param name="header"> Header line of the affix rule </param>
         /// <param name="reader"> BufferedReader to read the content of the rule from </param>
-        /// <param name="conditionPattern"> <seealso cref="String#format(String, Object...)"/> pattern to be used to generate the condition regex
+        /// <param name="conditionPattern"> <see cref="String#format(String, Object...)"/> pattern to be used to generate the condition regex
         ///                         pattern </param>
         /// <param name="seenPatterns"> map from condition -> index of patterns, for deduplication. </param>
         /// <exception cref="IOException"> Can be thrown while reading the rule </exception>
@@ -675,7 +675,7 @@ namespace Lucene.Net.Analysis.Hunspell
 
 
         /// <summary>
-        /// Determines the appropriate <seealso cref="FlagParsingStrategy"/> based on the FLAG definition line taken from the affix file
+        /// Determines the appropriate <see cref="FlagParsingStrategy"/> based on the FLAG definition line taken from the affix file
         /// </summary>
         /// <param name="flagLine"> Line containing the flag information </param>
         /// <returns> FlagParsingStrategy that handles parsing flags in the way specified in the FLAG definition </returns>
@@ -1036,7 +1036,7 @@ namespace Lucene.Net.Analysis.Hunspell
         }
 
         /// <summary>
-        /// Simple implementation of <seealso cref="FlagParsingStrategy"/> that treats the chars in each String as a individual flags.
+        /// Simple implementation of <see cref="FlagParsingStrategy"/> that treats the chars in each String as a individual flags.
         /// Can be used with both the ASCII and UTF-8 flag types.
         /// </summary>
         private class SimpleFlagParsingStrategy : FlagParsingStrategy
@@ -1048,7 +1048,7 @@ namespace Lucene.Net.Analysis.Hunspell
         }
 
         /// <summary>
-        /// Implementation of <seealso cref="FlagParsingStrategy"/> that assumes each flag is encoded in its numerical form.  In the case
+        /// Implementation of <see cref="FlagParsingStrategy"/> that assumes each flag is encoded in its numerical form.  In the case
         /// of multiple flags, each number is separated by a comma.
         /// </summary>
         private class NumFlagParsingStrategy : FlagParsingStrategy
@@ -1080,7 +1080,7 @@ namespace Lucene.Net.Analysis.Hunspell
         }
 
         /// <summary>
-        /// Implementation of <seealso cref="FlagParsingStrategy"/> that assumes each flag is encoded as two ASCII characters whose codes
+        /// Implementation of <see cref="FlagParsingStrategy"/> that assumes each flag is encoded as two ASCII characters whose codes
         /// must be combined into a single character.
         /// 
         /// TODO (rmuir) test

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
index 1d9c70f..da38ef8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilter.cs
@@ -26,13 +26,13 @@ namespace Lucene.Net.Analysis.Hunspell
     /// stems, this filter can emit multiple tokens for each consumed token
     /// 
     /// <para>
-    /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+    /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
     /// certain terms from being passed to the stemmer
-    /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
-    /// in a previous <seealso cref="TokenStream"/>.
+    /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+    /// in a previous <see cref="TokenStream"/>.
     /// 
     /// Note: For including the original term as well as the stemmed version, see
-    /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+    /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
     /// </para>
     /// 
     /// @lucene.experimental
@@ -52,7 +52,7 @@ namespace Lucene.Net.Analysis.Hunspell
         private readonly bool longestOnly;
 
         /// <summary>
-        /// Create a <seealso cref="HunspellStemFilter"/> outputting all possible stems. </summary>
+        /// Create a <see cref="HunspellStemFilter"/> outputting all possible stems. </summary>
         ///  <seealso cref= #HunspellStemFilter(TokenStream, Dictionary, boolean)  </seealso>
         public HunspellStemFilter(TokenStream input, Dictionary dictionary)
               : this(input, dictionary, true)
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Hunspell
         }
 
         /// <summary>
-        /// Create a <seealso cref="HunspellStemFilter"/> outputting all possible stems. </summary>
+        /// Create a <see cref="HunspellStemFilter"/> outputting all possible stems. </summary>
         ///  <seealso cref= #HunspellStemFilter(TokenStream, Dictionary, boolean, boolean)  </seealso>
         public HunspellStemFilter(TokenStream input, Dictionary dictionary, bool dedup)
               : this(input, dictionary, dedup, false)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
index 4d720f4..4615260 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hunspell/HunspellStemFilterFactory.cs
@@ -24,14 +24,14 @@ namespace Lucene.Net.Analysis.Hunspell
 	 */
 
     /// <summary>
-    /// TokenFilterFactory that creates instances of <seealso cref="HunspellStemFilter"/>.
+    /// TokenFilterFactory that creates instances of <see cref="HunspellStemFilter"/>.
     /// Example config for British English:
-    /// <pre class="prettyprint">
+    /// <code>
     /// &lt;filter class=&quot;solr.HunspellStemFilterFactory&quot;
     ///         dictionary=&quot;en_GB.dic,my_custom.dic&quot;
     ///         affix=&quot;en_GB.aff&quot; 
     ///         ignoreCase=&quot;false&quot;
-    ///         longestOnly=&quot;false&quot; /&gt;</pre>
+    ///         longestOnly=&quot;false&quot; /&gt;</code>
     /// Both parameters dictionary and affix are mandatory.
     /// Dictionaries for many languages are available through the OpenOffice project.
     /// 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
index 911705b..bec70b7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Hy
 	 */
 
     /// <summary>
-    /// <seealso cref="Analyzer"/> for Armenian.
+    /// <see cref="Analyzer"/> for Armenian.
     /// </summary>
     public sealed class ArmenianAnalyzer : StopwordAnalyzerBase
     {
@@ -74,7 +74,7 @@ namespace Lucene.Net.Analysis.Hy
         }
 
         /// <summary>
-        /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public ArmenianAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -93,7 +93,7 @@ namespace Lucene.Net.Analysis.Hy
 
         /// <summary>
         /// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
-        /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+        /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
         /// stemming.
         /// </summary>
         /// <param name="matchVersion"> lucene compatibility version </param>
@@ -107,15 +107,15 @@ namespace Lucene.Net.Analysis.Hy
 
         /// <summary>
         /// Creates a
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// which tokenizes all the text in the provided <see cref="Reader"/>.
         /// </summary>
         /// <returns> A
-        ///         <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
-        ///         , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
-        ///         provided and <seealso cref="SnowballFilter"/>. </returns>
+        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from an <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+        ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+        ///         provided and <see cref="SnowballFilter"/>. </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {
             Tokenizer source = new StandardTokenizer(m_matchVersion, reader);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
index c483c64..44d6c11 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
@@ -72,7 +72,7 @@ namespace Lucene.Net.Analysis.Id
         private readonly CharArraySet stemExclusionSet;
 
         /// <summary>
-        /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public IndonesianAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -93,8 +93,8 @@ namespace Lucene.Net.Analysis.Id
 
         /// <summary>
         /// Builds an analyzer with the given stop word. If a none-empty stem exclusion set is
-        /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
-        /// <seealso cref="IndonesianStemFilter"/>.
+        /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
+        /// <see cref="IndonesianStemFilter"/>.
         /// </summary>
         /// <param name="matchVersion">
         ///          lucene compatibility version </param>
@@ -110,14 +110,14 @@ namespace Lucene.Net.Analysis.Id
 
         /// <summary>
         /// Creates
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// used to tokenize all the text in the provided <see cref="Reader"/>.
         /// </summary>
-        /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>,
-        ///         <seealso cref="StopFilter"/>, <seealso cref="SetKeywordMarkerFilter"/>
-        ///         if a stem exclusion set is provided and <seealso cref="IndonesianStemFilter"/>. </returns>
+        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from an <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>,
+        ///         <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/>
+        ///         if a stem exclusion set is provided and <see cref="IndonesianStemFilter"/>. </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {
             Tokenizer source = new StandardTokenizer(m_matchVersion, reader);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
index 84e1e61..a2ac74d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Id
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="IndonesianStemmer"/> to stem Indonesian words.
+    /// A <see cref="TokenFilter"/> that applies <see cref="IndonesianStemmer"/> to stem Indonesian words.
     /// </summary>
     public sealed class IndonesianStemFilter : TokenFilter
     {
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Id
         private readonly bool stemDerivational;
 
         /// <summary>
-        /// Calls <seealso cref="#IndonesianStemFilter(TokenStream, boolean) IndonesianStemFilter(input, true)"/>
+        /// Calls <see cref="#IndonesianStemFilter(TokenStream, boolean) IndonesianStemFilter(input, true)"/>
         /// </summary>
         public IndonesianStemFilter(TokenStream input)
               : this(input, true)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
index 0773391..2944496 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Id
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="IndonesianStemFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="IndonesianStemFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_idstem" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
     ///     &lt;filter class="solr.IndonesianStemFilterFactory" stemDerivational="true"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class IndonesianStemFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
index 5128b92..412714a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilter.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.In
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="IndicNormalizer"/> to normalize text
+    /// A <see cref="TokenFilter"/> that applies <see cref="IndicNormalizer"/> to normalize text
     /// in Indian Languages.
     /// </summary>
     public sealed class IndicNormalizationFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
index a96d5b7..9026c7c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicNormalizationFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.In
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="IndicNormalizationFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="IndicNormalizationFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_innormal" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.IndicNormalizationFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class IndicNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
index 2de7baa..d492ff6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/In/IndicTokenizer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.In
 
     /// <summary>
     /// Simple Tokenizer for text in Indian Languages. </summary>
-    /// @deprecated (3.6) Use <seealso cref="StandardTokenizer"/> instead. 
+    /// @deprecated (3.6) Use <see cref="StandardTokenizer"/> instead. 
     [Obsolete("(3.6) Use StandardTokenizer instead.")]
     public sealed class IndicTokenizer : CharTokenizer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
index cf00799..be81d75 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
@@ -30,11 +30,11 @@ namespace Lucene.Net.Analysis.It
 	 */
 
     /// <summary>
-    /// <seealso cref="Analyzer"/> for Italian.
+    /// <see cref="Analyzer"/> for Italian.
     /// <para>
     /// <a name="version"/>
     /// </para>
-    /// <para>You must specify the required <seealso cref="Version"/>
+    /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating ItalianAnalyzer:
     /// <ul>
     ///   <li> As of 3.6, ItalianLightStemFilter is used for less aggressive stemming.
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.It
         }
 
         /// <summary>
-        /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public ItalianAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -117,7 +117,7 @@ namespace Lucene.Net.Analysis.It
 
         /// <summary>
         /// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
-        /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+        /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
         /// stemming.
         /// </summary>
         /// <param name="matchVersion"> lucene compatibility version </param>
@@ -131,15 +131,15 @@ namespace Lucene.Net.Analysis.It
 
         /// <summary>
         /// Creates a
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// which tokenizes all the text in the provided <see cref="Reader"/>.
         /// </summary>
         /// <returns> A
-        ///         <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="StandardFilter"/>, <seealso cref="ElisionFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
-        ///         , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
-        ///         provided and <seealso cref="ItalianLightStemFilter"/>. </returns>
+        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from an <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+        ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+        ///         provided and <see cref="ItalianLightStemFilter"/>. </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {
             Tokenizer source = new StandardTokenizer(m_matchVersion, reader);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
index b772db3..f86d45d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.It
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="ItalianLightStemmer"/> to stem Italian
+    /// A <see cref="TokenFilter"/> that applies <see cref="ItalianLightStemmer"/> to stem Italian
     /// words.
     /// <para>
     /// To prevent terms from being stemmed use an instance of
-    /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
-    /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+    /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+    /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
     /// </para>
     /// </summary>
     public sealed class ItalianLightStemFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
index 1caa912..1ea8ee5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.It
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="ItalianLightStemFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="ItalianLightStemFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_itlgtstem" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
     ///     &lt;filter class="solr.ItalianLightStemFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre> 
+    /// &lt;/fieldType&gt;</code> 
     /// </summary>
     public class ItalianLightStemFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
index 2ecfd82..26b5074 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Lv
 	 */
 
     /// <summary>
-    /// <seealso cref="Analyzer"/> for Latvian.
+    /// <see cref="Analyzer"/> for Latvian.
     /// </summary>
     public sealed class LatvianAnalyzer : StopwordAnalyzerBase
     {
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.Lv
         }
 
         /// <summary>
-        /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+        /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
         /// </summary>
         public LatvianAnalyzer(LuceneVersion matchVersion)
               : this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -96,7 +96,7 @@ namespace Lucene.Net.Analysis.Lv
 
         /// <summary>
         /// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
-        /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+        /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
         /// stemming.
         /// </summary>
         /// <param name="matchVersion"> lucene compatibility version </param>
@@ -110,15 +110,15 @@ namespace Lucene.Net.Analysis.Lv
 
         /// <summary>
         /// Creates a
-        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// which tokenizes all the text in the provided <see cref="Reader"/>.
         /// </summary>
         /// <returns> A
-        ///         <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
-        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
-        ///         <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
-        ///         , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
-        ///         provided and <seealso cref="LatvianStemFilter"/>. </returns>
+        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         built from an <see cref="StandardTokenizer"/> filtered with
+        ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+        ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+        ///         provided and <see cref="LatvianStemFilter"/>. </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
         {
             Tokenizer source = new StandardTokenizer(m_matchVersion, reader);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
index 22ea386..8a373fa 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Lv
 	 */
 
     /// <summary>
-    /// A <seealso cref="TokenFilter"/> that applies <seealso cref="LatvianStemmer"/> to stem Latvian
+    /// A <see cref="TokenFilter"/> that applies <see cref="LatvianStemmer"/> to stem Latvian
     /// words.
     /// <para>
     /// To prevent terms from being stemmed use an instance of
-    /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
-    /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+    /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+    /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
     /// </para>
     /// </summary>
     public sealed class LatvianStemFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
index d953f94..33b3789 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Lv
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="LatvianStemFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="LatvianStemFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_lvstem" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
     ///     &lt;filter class="solr.LatvianStemFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class LatvianStemFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
index db59e0c..76bb80a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
@@ -67,7 +67,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
         }
 
         /// <summary>
-        /// Create a new <seealso cref="ASCIIFoldingFilter"/>.
+        /// Create a new <see cref="ASCIIFoldingFilter"/>.
         /// </summary>
         /// <param name="input">
         ///          TokenStream to filter </param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
index 5155908..74a0d33 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="ASCIIFoldingFilter"/>.
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="ASCIIFoldingFilter"/>.
+    /// <code>
     /// &lt;fieldType name="text_ascii" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.ASCIIFoldingFilterFactory" preserveOriginal="false"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class ASCIIFoldingFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
index d68f881..236d6da 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="CapitalizationFilter"/>.
+    /// Factory for <see cref="CapitalizationFilter"/>.
     /// <p/>
     /// The factory takes parameters:<br/>
     /// "onlyFirstWord" - should each word be capitalized or all of the words?<br/>
@@ -37,7 +37,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// "maxWordCount" - if the token contains more then maxWordCount words, the capitalization is
     /// assumed to be correct.<br/>
     /// 
-    /// <pre class="prettyprint">
+    /// <code>
     /// &lt;fieldType name="text_cptlztn" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     ///           keep="java solr lucene" keepIgnoreCase="false"
     ///           okPrefix="McK McD McA"/&gt;   
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// 
     /// @since solr 1.3
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
index 2b6f70b..1c12925 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilter.cs
@@ -37,11 +37,11 @@ namespace Lucene.Net.Analysis.Miscellaneous
         private readonly ICharTermAttribute termAtt;
 
         /// <summary>
-        /// Create a new <seealso cref="CodepointCountFilter"/>. This will filter out tokens whose
-        /// <seealso cref="CharTermAttribute"/> is either too short (<seealso cref="Character#CodePointCount(char[], int, int)"/>
-        /// &lt; min) or too long (<seealso cref="Character#codePointCount(char[], int, int)"/> &gt; max). </summary>
+        /// Create a new <see cref="CodepointCountFilter"/>. This will filter out tokens whose
+        /// <see cref="CharTermAttribute"/> is either too short (<see cref="Character#CodePointCount(char[], int, int)"/>
+        /// &lt; min) or too long (<see cref="Character#codePointCount(char[], int, int)"/> &gt; max). </summary>
         /// <param name="version"> the Lucene match version </param>
-        /// <param name="in">      the <seealso cref="TokenStream"/> to consume </param>
+        /// <param name="in">      the <see cref="TokenStream"/> to consume </param>
         /// <param name="min">     the minimum length </param>
         /// <param name="max">     the maximum length </param>
         public CodepointCountFilter(LuceneVersion version, TokenStream @in, int min, int max)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
index 23c678f..4163aec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CodepointCountFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="CodepointCountFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="CodepointCountFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_lngth" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.CodepointCountFilterFactory" min="0" max="1" /&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class CodepointCountFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
index 3b8f7d9..c5da204 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilter.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// In order to increase search efficiency, this filter puts hyphenated words broken into two lines back together.
     /// This filter should be used on indexing time only.
     /// Example field definition in schema.xml:
-    /// <pre class="prettyprint">
+    /// <code>
     /// &lt;fieldtype name="text" class="solr.TextField" positionIncrementGap="100"&gt;
     ///  &lt;analyzer type="index"&gt;
     ///    &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
@@ -47,7 +47,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     ///      &lt;filter class="solr.RemoveDuplicatesTokenFilterFactory"/&gt;
     ///  &lt;/analyzer&gt;
     /// &lt;/fieldtype&gt;
-    /// </pre>
+    /// </code>
     /// 
     /// </summary>
     public sealed class HyphenatedWordsFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
index 526885c..6c4d375 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/HyphenatedWordsFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
 	 */
 
     /// <summary>
-    /// Factory for <seealso cref="HyphenatedWordsFilter"/>.
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="HyphenatedWordsFilter"/>.
+    /// <code>
     /// &lt;fieldType name="text_hyphn" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.HyphenatedWordsFilterFactory"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class HyphenatedWordsFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
index 82ec1bc..0ff278c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilter.cs
@@ -43,13 +43,13 @@ namespace Lucene.Net.Analysis.Miscellaneous
         }
 
         /// <summary>
-        /// Create a new <seealso cref="KeepWordFilter"/>.
+        /// Create a new <see cref="KeepWordFilter"/>.
         /// <para><b>NOTE</b>: The words set passed to this constructor will be directly
         /// used by this filter and should not be modified.
         /// </para>
         /// </summary>
         /// <param name="version"> the Lucene match version </param>
-        /// <param name="in">      the <seealso cref="TokenStream"/> to consume </param>
+        /// <param name="in">      the <see cref="TokenStream"/> to consume </param>
         /// <param name="words">   the words to keep </param>
         public KeepWordFilter(LuceneVersion version, TokenStream @in, CharArraySet words)
             : base(version, @in)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
index 99c3fc9..39f61bf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeepWordFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="KeepWordFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="KeepWordFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_keepword" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.KeepWordFilterFactory" words="keepwords.txt" ignoreCase="false"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class KeepWordFilterFactory : TokenFilterFactory, IResourceLoaderAware
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
index 722ce4b..ab8a884 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilter.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
      * limitations under the License.
      */
     /// <summary>
-    /// Marks terms as keywords via the <seealso cref="KeywordAttribute"/>.
+    /// Marks terms as keywords via the <see cref="KeywordAttribute"/>.
     /// </summary>
     /// <seealso cref= KeywordAttribute </seealso>
     public abstract class KeywordMarkerFilter : TokenFilter
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
         private readonly IKeywordAttribute keywordAttr;
 
         /// <summary>
-        /// Creates a new <seealso cref="KeywordMarkerFilter"/> </summary>
+        /// Creates a new <see cref="KeywordMarkerFilter"/> </summary>
         /// <param name="in"> the input stream </param>
         protected internal KeywordMarkerFilter(TokenStream @in)
             : base(@in)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
index 9705ff6..0070b74 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordMarkerFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="KeywordMarkerFilter"/>.
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="KeywordMarkerFilter"/>.
+    /// <code>
     /// &lt;fieldType name="text_keyword" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.KeywordMarkerFilterFactory" protected="protectedkeyword.txt" pattern="^.+er$" ignoreCase="false"/&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class KeywordMarkerFilterFactory : TokenFilterFactory, IResourceLoaderAware
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
index e770a56..4cfe3e8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilter.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
     /// <summary>
     /// This TokenFilterĀ emits each incoming token twice once as keyword and once non-keyword, in other words once with
-    /// <seealso cref="KeywordAttribute#setKeyword(boolean)"/> set to <code>true</code> and once set to <code>false</code>.
-    /// This is useful if used with a stem filter that respects the <seealso cref="KeywordAttribute"/> to index the stemmed and the
+    /// <see cref="KeywordAttribute#setKeyword(boolean)"/> set to <code>true</code> and once set to <code>false</code>.
+    /// This is useful if used with a stem filter that respects the <see cref="KeywordAttribute"/> to index the stemmed and the
     /// un-stemmed version of a term into the same field.
     /// </summary>
     public sealed class KeywordRepeatFilter : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
index 842ab95..c34561a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/KeywordRepeatFilterFactory.cs
@@ -21,11 +21,11 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="KeywordRepeatFilter"/>.
+    /// Factory for <see cref="KeywordRepeatFilter"/>.
     /// 
-    /// Since <seealso cref="KeywordRepeatFilter"/> emits two tokens for every input token, and any tokens that aren't transformed
+    /// Since <see cref="KeywordRepeatFilter"/> emits two tokens for every input token, and any tokens that aren't transformed
     /// later in the analysis chain will be in the document twice. Therefore, consider adding
-    /// <seealso cref="RemoveDuplicatesTokenFilterFactory"/> later in the analysis chain.
+    /// <see cref="RemoveDuplicatesTokenFilterFactory"/> later in the analysis chain.
     /// </summary>
     public sealed class KeywordRepeatFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
index e02fd24..ab19c3a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilter.cs
@@ -54,11 +54,11 @@ namespace Lucene.Net.Analysis.Miscellaneous
         }
 
         /// <summary>
-        /// Create a new <seealso cref="LengthFilter"/>. This will filter out tokens whose
-        /// <seealso cref="CharTermAttribute"/> is either too short (<seealso cref="CharTermAttribute#length()"/>
-        /// &lt; min) or too long (<seealso cref="CharTermAttribute#length()"/> &gt; max). </summary>
+        /// Create a new <see cref="LengthFilter"/>. This will filter out tokens whose
+        /// <see cref="CharTermAttribute"/> is either too short (<see cref="CharTermAttribute#length()"/>
+        /// &lt; min) or too long (<see cref="CharTermAttribute#length()"/> &gt; max). </summary>
         /// <param name="version"> the Lucene match version </param>
-        /// <param name="in">      the <seealso cref="TokenStream"/> to consume </param>
+        /// <param name="in">      the <see cref="TokenStream"/> to consume </param>
         /// <param name="min">     the minimum length </param>
         /// <param name="max">     the maximum length </param>
         public LengthFilter(LuceneVersion version, TokenStream @in, int min, int max)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
index 019e611..f206b4b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LengthFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="LengthFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="LengthFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_lngth" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LengthFilterFactory" min="0" max="1" /&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// </summary>
     public class LengthFilterFactory : TokenFilterFactory
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
index 0e51d49..3bafb19 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountAnalyzer.cs
@@ -19,7 +19,7 @@
 
     /// <summary>
     /// This Analyzer limits the number of tokens while indexing. It is
-    /// a replacement for the maximum field length setting inside <seealso cref="org.apache.lucene.index.IndexWriter"/>. </summary>
+    /// a replacement for the maximum field length setting inside <see cref="org.apache.lucene.index.IndexWriter"/>. </summary>
     /// <seealso cref= LimitTokenCountFilter </seealso>
     public sealed class LimitTokenCountAnalyzer : AnalyzerWrapper
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
index 8202f8c..9e4a37c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
@@ -19,7 +19,7 @@
 
     /// <summary>
     /// This TokenFilter limits the number of tokens while indexing. It is
-    /// a replacement for the maximum field length setting inside <seealso cref="org.apache.lucene.index.IndexWriter"/>.
+    /// a replacement for the maximum field length setting inside <see cref="org.apache.lucene.index.IndexWriter"/>.
     /// <para>
     /// By default, this filter ignores any tokens in the wrapped {@code TokenStream}
     /// once the limit has been reached, which can result in {@code reset()} being 
@@ -28,7 +28,7 @@
     /// then consuming the full stream. If you are wrapping a {@code TokenStream} 
     /// which requires that the full stream of tokens be exhausted in order to 
     /// function properly, use the 
-    /// <seealso cref="#LimitTokenCountFilter(TokenStream,int,boolean) consumeAllTokens"/> 
+    /// <see cref="#LimitTokenCountFilter(TokenStream,int,boolean) consumeAllTokens"/> 
     /// option.
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
index 3644202..25b980d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilterFactory.cs
@@ -21,17 +21,17 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="LimitTokenCountFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="LimitTokenCountFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_lngthcnt" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10" consumeAllTokens="false" /&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// <para>
     /// The {@code consumeAllTokens} property is optional and defaults to {@code false}.  
-    /// See <seealso cref="LimitTokenCountFilter"/> for an explanation of it's use.
+    /// See <see cref="LimitTokenCountFilter"/> for an explanation of it's use.
     /// </para>
     /// </summary>
     public class LimitTokenCountFilterFactory : TokenFilterFactory

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
index d43d23c..008ff97 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// then consuming the full stream. If you are wrapping a {@code TokenStream}
     /// which requires that the full stream of tokens be exhausted in order to 
     /// function properly, use the 
-    /// <seealso cref="#LimitTokenPositionFilter(TokenStream,int,boolean) consumeAllTokens"/>
+    /// <see cref="#LimitTokenPositionFilter(TokenStream,int,boolean) consumeAllTokens"/>
     /// option.
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
index 22fb345..e2d7692 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilterFactory.cs
@@ -21,17 +21,17 @@ namespace Lucene.Net.Analysis.Miscellaneous
      */
 
     /// <summary>
-    /// Factory for <seealso cref="LimitTokenPositionFilter"/>. 
-    /// <pre class="prettyprint">
+    /// Factory for <see cref="LimitTokenPositionFilter"/>. 
+    /// <code>
     /// &lt;fieldType name="text_limit_pos" class="solr.TextField" positionIncrementGap="100"&gt;
     ///   &lt;analyzer&gt;
     ///     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
     ///     &lt;filter class="solr.LimitTokenPositionFilterFactory" maxTokenPosition="3" consumeAllTokens="false" /&gt;
     ///   &lt;/analyzer&gt;
-    /// &lt;/fieldType&gt;</pre>
+    /// &lt;/fieldType&gt;</code>
     /// <para>
     /// The {@code consumeAllTokens} property is optional and defaults to {@code false}.  
-    /// See <seealso cref="LimitTokenPositionFilter"/> for an explanation of its use.
+    /// See <see cref="LimitTokenPositionFilter"/> for an explanation of its use.
     /// </para>
     /// </summary>
     public class LimitTokenPositionFilterFactory : TokenFilterFactory


Mime
View raw message