lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [14/39] lucenenet git commit: Lucene.Net.Analysis.Util (CharArraySet - WordlistLoader) refactor: member accessibility and documentation comments
Date Sat, 04 Feb 2017 20:32:33 GMT
Lucene.Net.Analysis.Util (CharArraySet - WordlistLoader) refactor: member accessibility and documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/f934cebe
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/f934cebe
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/f934cebe

Branch: refs/heads/api-work
Commit: f934cebea3707a46224a2a1a7092a62ee017990f
Parents: b19aee5
Author: Shad Storhaug <shad@shadstorhaug.com>
Authored: Sat Feb 4 21:20:08 2017 +0700
Committer: Shad Storhaug <shad@shadstorhaug.com>
Committed: Sat Feb 4 23:08:19 2017 +0700

----------------------------------------------------------------------
 .../Analysis/Util/CharFilterFactory.cs          | 13 ++--
 .../Analysis/Util/CharTokenizer.cs              | 27 ++++----
 .../Analysis/Util/ClasspathResourceLoader.cs    | 14 +++--
 .../Analysis/Util/ElisionFilter.cs              |  7 ++-
 .../Analysis/Util/ElisionFilterFactory.cs       |  2 +-
 .../Analysis/Util/FilesystemResourceLoader.cs   | 20 +++---
 .../Analysis/Util/FilteringTokenFilter.cs       | 17 +++--
 .../Analysis/Util/MultiTermAwareComponent.cs    |  2 +-
 .../Analysis/Util/OpenStringBuilder.cs          | 14 +++--
 .../Analysis/Util/ResourceLoader.cs             |  2 +-
 .../Analysis/Util/ResourceLoaderAware.cs        |  8 +--
 .../Analysis/Util/RollingCharBuffer.cs          | 32 +++++-----
 .../Analysis/Util/SegmentingTokenizerBase.cs    | 16 ++---
 .../Analysis/Util/StemmerUtil.cs                |  9 ++-
 .../Analysis/Util/StopwordAnalyzerBase.cs       | 50 ++++++++-------
 .../Analysis/Util/TokenFilterFactory.cs         | 17 ++---
 .../Analysis/Util/TokenizerFactory.cs           | 21 +++----
 .../Analysis/Util/TypeExtensions.cs             |  4 +-
 .../Analysis/Util/WordlistLoader.cs             | 65 ++++++++++----------
 19 files changed, 171 insertions(+), 169 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
index a6a1efe..9da80b3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharFilterFactory.cs
@@ -27,18 +27,17 @@ namespace Lucene.Net.Analysis.Util
     /// </summary>
     public abstract class CharFilterFactory : AbstractAnalysisFactory
     {
-
         private static readonly AnalysisSPILoader<CharFilterFactory> loader = new AnalysisSPILoader<CharFilterFactory>();
 
         /// <summary>
-        /// looks up a charfilter by name from context classpath </summary>
+        /// looks up a charfilter by name from the host project's dependent assemblies </summary>
         public static CharFilterFactory ForName(string name, IDictionary<string, string> args)
         {
             return loader.NewInstance(name, args);
         }
 
         /// <summary>
-        /// looks up a charfilter class by name from context classpath </summary>
+        /// looks up a charfilter class by name from the host project's dependent assemblies </summary>
         public static Type LookupClass(string name)
         {
             return loader.LookupClass(name);
@@ -52,9 +51,9 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Reloads the factory list from the given <see cref="ClassLoader"/>.
+        /// Reloads the factory list.
         /// Changes to the factories are visible after the method ends, all
-        /// iterators (<see cref="#availableCharFilters()"/>,...) stay consistent. 
+        /// iterators (<see cref="AvailableCharFilters"/>,...) stay consistent. 
         /// 
         /// <para><b>NOTE:</b> Only new factories are added, existing ones are
         /// never removed or replaced.
@@ -72,13 +71,13 @@ namespace Lucene.Net.Analysis.Util
         /// <summary>
         /// Initialize this factory via a set of key-value pairs.
         /// </summary>
-        protected internal CharFilterFactory(IDictionary<string, string> args)
+        protected CharFilterFactory(IDictionary<string, string> args)
             : base(args)
         {
         }
 
         /// <summary>
-        /// Wraps the given TextReader with a CharFilter. </summary>
+        /// Wraps the given <see cref="TextReader"/> with a <see cref="CharFilter"/>. </summary>
         public abstract TextReader Create(TextReader input);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
index 58cc255..9ef33c4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
@@ -26,13 +26,13 @@ namespace Lucene.Net.Analysis.Util
     /// <summary>
     /// An abstract base class for simple, character-oriented tokenizers. 
     /// <para>
-    /// <a name="version">You must specify the required <see cref="LuceneVersion"/> compatibility
+    /// You must specify the required <see cref="LuceneVersion"/> compatibility
     /// when creating <see cref="CharTokenizer"/>:
-    /// <ul>
-    /// <li>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
-    /// detect token codepoints. See <see cref="#isTokenChar(int)"/> and
-    /// <see cref="#normalize(int)"/> for details.</li>
-    /// </ul>
+    /// <list type="bullet">
+    ///     <item>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+    ///         detect token codepoints. See <see cref="IsTokenChar(int)"/> and
+    ///         <see cref="Normalize(int)"/> for details.</item>
+    /// </list>
     /// </para>
     /// <para>
     /// A new <see cref="CharTokenizer"/> API has been introduced with Lucene 3.1. This API
@@ -41,26 +41,25 @@ namespace Lucene.Net.Analysis.Util
     /// "http://java.sun.com/j2se/1.5.0/docs/api/java/lang/Character.html#supplementary"
     /// >supplementary characters</a>. The old <i>char</i> based API has been
     /// deprecated and should be replaced with the <i>int</i> based methods
-    /// <see cref="#isTokenChar(int)"/> and <see cref="#normalize(int)"/>.
+    /// <see cref="IsTokenChar(int)"/> and <see cref="Normalize(int)"/>.
     /// </para>
     /// <para>
     /// As of Lucene 3.1 each <see cref="CharTokenizer"/> - constructor expects a
     /// <see cref="LuceneVersion"/> argument. Based on the given <see cref="LuceneVersion"/> either the new
     /// API or a backwards compatibility layer is used at runtime. For
-    /// <see cref="LuceneVersion"/> < 3.1 the backwards compatibility layer ensures correct
+    /// <see cref="LuceneVersion"/> &lt; 3.1 the backwards compatibility layer ensures correct
     /// behavior even for indexes build with previous versions of Lucene. If a
     /// <see cref="LuceneVersion"/> >= 3.1 is used <see cref="CharTokenizer"/> requires the new API to
     /// be implemented by the instantiated class. Yet, the old <i>char</i> based API
     /// is not required anymore even if backwards compatibility must be preserved.
     /// <see cref="CharTokenizer"/> subclasses implementing the new API are fully backwards
-    /// compatible if instantiated with <see cref="LuceneVersion"/> < 3.1.
+    /// compatible if instantiated with <see cref="LuceneVersion"/> &lt; 3.1.
     /// </para>
     /// <para>
     /// <strong>Note:</strong> If you use a subclass of <see cref="CharTokenizer"/> with <see cref="LuceneVersion"/> >=
-    /// 3.1 on an index build with a version < 3.1, created tokens might not be
+    /// 3.1 on an index build with a version &lt; 3.1, created tokens might not be
     /// compatible with the terms in your index.
     /// </para>
-    /// 
     /// </summary>
     public abstract class CharTokenizer : Tokenizer
     {
@@ -71,7 +70,7 @@ namespace Lucene.Net.Analysis.Util
         ///          Lucene version to match </param>
         /// <param name="input">
         ///          the input to split up into tokens </param>
-        protected CharTokenizer(LuceneVersion matchVersion, TextReader input)
+        public CharTokenizer(LuceneVersion matchVersion, TextReader input)
             : base(input)
         {
             Init(matchVersion);
@@ -86,14 +85,14 @@ namespace Lucene.Net.Analysis.Util
         ///          the attribute factory to use for this <see cref="Tokenizer"/> </param>
         /// <param name="input">
         ///          the input to split up into tokens </param>
-        protected CharTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
+        public CharTokenizer(LuceneVersion matchVersion, AttributeFactory factory, TextReader input)
             : base(factory, input)
         {
             Init(matchVersion);
         }
 
         /// <summary>
-        /// LUCENENET Added in the .NET version to assist with setting the attributes
+        /// LUCENENET specific - Added in the .NET version to assist with setting the attributes
         /// from multiple constructors.
         /// </summary>
         /// <param name="matchVersion"></param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
index aa425c7..40434f3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ClasspathResourceLoader.cs
@@ -23,9 +23,9 @@ namespace Lucene.Net.Analysis.Util
      */
 
     /// <summary>
-    /// Simple <see cref="ResourceLoader"/> that uses <see cref="ClassLoader#getResourceAsStream(String)"/>
-    /// and <see cref="Class#forName(String,boolean,ClassLoader)"/> to open resources and
-    /// classes, respectively.
+    /// Simple <see cref="IResourceLoader"/> that uses <see cref="Assembly.GetManifestResourceStream(string)"/>
+    /// and <see cref="Assembly.GetType(string)"/> to open resources and
+    /// <see cref="Type"/>s, respectively.
     /// </summary>
     public sealed class ClasspathResourceLoader : IResourceLoader
     {
@@ -48,7 +48,7 @@ namespace Lucene.Net.Analysis.Util
         /// Resource names are relative to the resourcePrefix.
         /// </summary>
         /// <param name="clazz">The class type</param>
-        /// <param name="namespacePrefixToExclude">Removes the part of the namespace of the class that matches the regex. 
+        /// <param name="namespaceExcludeRegex">Removes the part of the namespace of the class that matches the regex. 
         /// This is useful to get to the resource if the assembly name and namespace name don't happen to match.
         /// If provided, the assembly name will be concatnated with the namespace name (excluding the part tha matches the regex)
         /// to provide the complete path to the embedded resource in the assembly. Note you can view the entire path to all of 
@@ -110,8 +110,12 @@ namespace Lucene.Net.Analysis.Util
         /// </summary>
         /// <param name="resource"></param>
         /// <returns></returns>
-        private string GetQualifiedResourceName(string resource)
+        private string GetQualifiedResourceName(string resource) 
         {
+            // LUCENENET TODO: Need to ensure this works in .NET Core (and perhaps refactor to make it more reliable).
+            // Perhaps it would make more sense to use Assembly.GetManifestResourceStream(Type, string), which allows
+            // you to filter by the namespace of a Type.
+
             var namespaceName = this.clazz.Namespace;
             var assemblyName = clazz.GetTypeInfo().Assembly.GetName().Name;
             if (string.IsNullOrEmpty(this.namespaceExcludeRegex) && (assemblyName.Equals(namespaceName, StringComparison.OrdinalIgnoreCase)))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
index bfa7751..4209310 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilter.cs
@@ -22,15 +22,16 @@ namespace Lucene.Net.Analysis.Util
     /// <summary>
     /// Removes elisions from a <see cref="TokenStream"/>. For example, "l'avion" (the plane) will be
     /// tokenized as "avion" (plane).
+    /// <para/>
+    /// <a href="http://fr.wikipedia.org/wiki/%C3%89lision">Elision in Wikipedia</a>
     /// </summary>
-    /// <seealso cref= <a href="http://fr.wikipedia.org/wiki/%C3%89lision">Elision in Wikipedia</a> </seealso>
     public sealed class ElisionFilter : TokenFilter
     {
         private readonly CharArraySet articles;
         private readonly ICharTermAttribute termAtt;
 
         /// <summary>
-        /// Constructs an elision filter with a Set of stop words </summary>
+        /// Constructs an elision filter with a <see cref="CharArraySet"/> of stop words </summary>
         /// <param name="input"> the source <see cref="TokenStream"/> </param>
         /// <param name="articles"> a set of stopword articles </param>
         public ElisionFilter(TokenStream input, CharArraySet articles)
@@ -43,7 +44,7 @@ namespace Lucene.Net.Analysis.Util
         /// <summary>
         /// Increments the <see cref="TokenStream"/> with a <see cref="CharTermAttribute"/> without elisioned start
         /// </summary>
-        public override bool IncrementToken()
+        public override sealed bool IncrementToken()
         {
             if (m_input.IncrementToken())
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
index f12f57b..e591916 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ElisionFilterFactory.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Util
         private CharArraySet articles;
 
         /// <summary>
-        /// Creates a new ElisionFilterFactory </summary>
+        /// Creates a new <see cref="ElisionFilterFactory"/> </summary>
         public ElisionFilterFactory(IDictionary<string, string> args) : base(args)
         {
             articlesFile = Get(args, "articles");

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
index 09aab01..275d335 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilesystemResourceLoader.cs
@@ -21,17 +21,17 @@ namespace Lucene.Net.Analysis.Util
 	 */
 
     /// <summary>
-    /// Simple <see cref="ResourceLoader"/> that opens resource files
+    /// Simple <see cref="IResourceLoader"/> that opens resource files
     /// from the local file system, optionally resolving against
     /// a base directory.
     /// 
-    /// <para>This loader wraps a delegate <see cref="ResourceLoader"/>
+    /// <para>This loader wraps a delegate <see cref="IResourceLoader"/>
     /// that is used to resolve all files, the current base directory
-    /// does not contain. <see cref="#newInstance"/> is always resolved
-    /// against the delegate, as a <see cref="ClassLoader"/> is needed.
+    /// does not contain. <see cref="NewInstance"/> is always resolved
+    /// against the delegate, as an <see cref="T:System.Assembly"/> is needed.
     /// 
     /// </para>
-    /// <para>You can chain several {@code FilesystemResourceLoader}s
+    /// <para>You can chain several <see cref="FilesystemResourceLoader"/>s
     /// to allow lookup of files in more than one base directory.
     /// </para>
     /// </summary>
@@ -46,26 +46,26 @@ namespace Lucene.Net.Analysis.Util
         /// are delegated to context classloader.
         /// </summary>
         public FilesystemResourceLoader()
-              : this((DirectoryInfo)null)
+            : this((DirectoryInfo)null)
         {
         }
 
         /// <summary>
         /// Creates a resource loader that resolves resources against the given
-        /// base directory (may be {@code null} to refer to CWD).
+        /// base directory (may be <c>null</c> to refer to CWD).
         /// Files not found in file system and class lookups are delegated to context
         /// classloader.
         /// </summary>
         public FilesystemResourceLoader(DirectoryInfo baseDirectory)
-              : this(baseDirectory, new ClasspathResourceLoader(typeof(FilesystemResourceLoader)))
+            : this(baseDirectory, new ClasspathResourceLoader(typeof(FilesystemResourceLoader)))
         {
         }
 
         /// <summary>
         /// Creates a resource loader that resolves resources against the given
-        /// base directory (may be {@code null} to refer to CWD).
+        /// base directory (may be <c>null</c> to refer to CWD).
         /// Files not found in file system and class lookups are delegated
-        /// to the given delegate <see cref="ResourceLoader"/>.
+        /// to the given delegate <see cref="IResourceLoader"/>.
         /// </summary>
         public FilesystemResourceLoader(DirectoryInfo baseDirectory, IResourceLoader @delegate)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
index 241c8da..c829ac0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/FilteringTokenFilter.cs
@@ -23,17 +23,17 @@ namespace Lucene.Net.Analysis.Util
 
     /// <summary>
     /// Abstract base class for TokenFilters that may remove tokens.
-    /// You have to implement <see cref="#accept"/> and return a boolean if the current
-    /// token should be preserved. <see cref="#incrementToken"/> uses this method
+    /// You have to implement <see cref="Accept"/> and return a boolean if the current
+    /// token should be preserved. <see cref="IncrementToken"/> uses this method
     /// to decide if a token should be passed to the caller.
-    /// <para><a name="lucene_match_version" />As of Lucene 4.4, an
-    /// <see cref="IllegalArgumentException"/> is thrown when trying to disable position
+    /// <para>
+    /// As of Lucene 4.4, an
+    /// <see cref="ArgumentException"/> is thrown when trying to disable position
     /// increments when filtering terms.
     /// </para>
     /// </summary>
     public abstract class FilteringTokenFilter : TokenFilter
     {
-
         private static void CheckPositionIncrement(LuceneVersion version, bool enablePositionIncrements)
         {
             if (!enablePositionIncrements &&
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis.Util
             }
         }
 
-        protected internal readonly LuceneVersion m_version;
+        protected readonly LuceneVersion m_version;
         private readonly IPositionIncrementAttribute posIncrAtt;
         private bool enablePositionIncrements; // no init needed, as ctor enforces setting value!
         private bool first = true;
@@ -78,7 +78,7 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Override this method and return if the current input token should be returned by <see cref="#incrementToken"/>. </summary>
+        /// Override this method and return if the current input token should be returned by <see cref="IncrementToken"/>. </summary>
         protected abstract bool Accept();
 
         public override sealed bool IncrementToken()
@@ -129,7 +129,6 @@ namespace Lucene.Net.Analysis.Util
             skippedPositions = 0;
         }
 
-        /// <seealso cref= #setEnablePositionIncrements(boolean) </seealso>
         public virtual bool EnablePositionIncrements
         {
             get
@@ -139,7 +138,7 @@ namespace Lucene.Net.Analysis.Util
             // LUCENENET TODO:
             // deprecated enablePositionIncrements=false is not supported anymore as of Lucene 4.4
             // There doesn't appear to be a way to apply [Obsolete] on a property setter only. The only way
-            // to make it show the obsolete warning is to change this back to separate Get and Set methods.
+            // to make it show the obsolete warning is to change this back to a separate Set method.
             set
             {
                 CheckPositionIncrement(m_version, value);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/MultiTermAwareComponent.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/MultiTermAwareComponent.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/MultiTermAwareComponent.cs
index 9f32238..397b110 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/MultiTermAwareComponent.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/MultiTermAwareComponent.cs
@@ -28,7 +28,7 @@
     {
         /// <summary>
         /// Returns an analysis component to handle analysis if multi-term queries.
-        /// The returned component must be a TokenizerFactory, TokenFilterFactory or CharFilterFactory.
+        /// The returned component must be a <see cref="TokenizerFactory"/>, <see cref="TokenFilterFactory"/> or <see cref="CharFilterFactory"/>.
         /// </summary>
         AbstractAnalysisFactory MultiTermComponent { get; } // LUCENENET TODO: Change to GetMultiTermComponent() ? Some implementations return new instance.
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
index c97d9f0..fc73055 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/OpenStringBuilder.cs
@@ -26,8 +26,8 @@ namespace Lucene.Net.Analysis.Util
     /// </summary>
     public class OpenStringBuilder : ICharSequence
     {
-        protected internal char[] m_buf;
-        protected internal int m_len;
+        protected char[] m_buf;
+        protected int m_len;
 
         public OpenStringBuilder() 
             : this(32)
@@ -68,6 +68,8 @@ namespace Lucene.Net.Analysis.Util
                 return m_buf;
             }
         }
+
+        // LUCENENE TODO: Change to Length (StringBuilder uses Length in .NET)
         public virtual int Count // LUCENENET NOTE: This was size() in Lucene.
         {
             get{ return m_len; }
@@ -78,12 +80,12 @@ namespace Lucene.Net.Analysis.Util
             get { return m_buf.Length; }
         }
 
-        public virtual OpenStringBuilder Append(string csq)
+        public virtual OpenStringBuilder Append(string csq) // LUCENENET TODO: Add overloads for ICharSequence and StringBuilder
         {
             return Append(csq, 0, csq.Length);
         }
 
-        public virtual OpenStringBuilder Append(string csq, int start, int end)
+        public virtual OpenStringBuilder Append(string csq, int start, int end) // LUCENENET TODO: Add overloads for ICharSequence and StringBuilder
         {
             Reserve(end - start);
             for (int i = start; i < end; i++)
@@ -137,7 +139,7 @@ namespace Lucene.Net.Analysis.Util
             this.m_len += len;
         }
 
-        protected internal virtual void Resize(int len)
+        protected virtual void Resize(int len)
         {
             char[] newbuf = new char[Math.Max(m_buf.Length << 1, len)];
             System.Array.Copy(m_buf, 0, newbuf, 0, Count);
@@ -177,7 +179,7 @@ namespace Lucene.Net.Analysis.Util
             UnsafeWrite(b, off, len);
         }
 
-        public void Write(OpenStringBuilder arr)
+        public void Write(OpenStringBuilder arr) // LUCENENET TODO: Add overload for StringBuilder
         {
             Write(arr.m_buf, 0, m_len);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoader.cs
index 301a3d3..7c472bc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoader.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Util
         /// <summary>
         /// Finds class of the name
         /// </summary>
-        Type FindClass(string cname);
+        Type FindClass(string cname); // LUCENENET TODO: Rename FindType ?
 
         /// <summary>
         /// Creates an instance of the name and expected type

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
index f9c0506..6d8fe1c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/ResourceLoaderAware.cs
@@ -19,14 +19,14 @@
 
     /// <summary>
     /// Interface for a component that needs to be initialized by
-    /// an implementation of <see cref="ResourceLoader"/>.
+    /// an implementation of <see cref="IResourceLoader"/>.
     /// </summary>
-    /// <seealso cref= ResourceLoader </seealso>
+    /// <seealso cref="IResourceLoader"/>
     public interface IResourceLoaderAware
     {
         /// <summary>
-        /// Initializes this component with the provided ResourceLoader
-        /// (used for loading classes, files, etc).
+        /// Initializes this component with the provided <see cref="IResourceLoader"/>
+        /// (used for loading types, embedded resources, files, etc).
         /// </summary>
         void Inform(IResourceLoader loader);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
index 6bddce4..3cf6f12 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/RollingCharBuffer.cs
@@ -5,7 +5,6 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Util
 {
-
     /*
      * Licensed to the Apache Software Foundation (ASF) under one or more
      * contributor license agreements.  See the NOTICE file distributed with
@@ -22,19 +21,18 @@ namespace Lucene.Net.Analysis.Util
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
+
     /// <summary>
-    /// Acts like a forever growing char[] as you read
-    ///  characters into it from the provided reader, but
-    ///  internally it uses a circular buffer to only hold the
-    ///  characters that haven't been freed yet.  This is like a
-    ///  PushbackReader, except you don't have to specify
-    ///  up-front the max size of the buffer, but you do have to
-    ///  periodically call <see cref="#freeBefore"/>. 
+    /// Acts like a forever growing <see cref="T:char[]"/> as you read
+    /// characters into it from the provided reader, but
+    /// internally it uses a circular buffer to only hold the
+    /// characters that haven't been freed yet.  This is like a
+    /// PushbackReader, except you don't have to specify
+    /// up-front the max size of the buffer, but you do have to
+    /// periodically call <see cref="FreeBefore"/>. 
     /// </summary>
-
     public sealed class RollingCharBuffer
     {
-
         private TextReader reader;
 
         private char[] buffer = new char[512];
@@ -62,11 +60,13 @@ namespace Lucene.Net.Analysis.Util
             end = false;
         }
 
-        /* Absolute position read.  NOTE: pos must not jump
-         * ahead by more than 1!  Ie, it's OK to read arbitarily
-         * far back (just not prior to the last {@link
-         * #freeBefore}), but NOT ok to read arbitrarily far
-         * ahead.  Returns -1 if you hit EOF. */
+        /// <summary>
+        /// Absolute position read.  NOTE: pos must not jump
+        /// ahead by more than 1!  Ie, it's OK to read arbitarily
+        /// far back (just not prior to the last <see cref="FreeBefore(int)"/>, 
+        /// but NOT ok to read arbitrarily far
+        /// ahead.  Returns -1 if you hit EOF.
+        /// </summary>
         public int Get(int pos)
         {
             //System.out.println("    Get pos=" + pos + " nextPos=" + nextPos + " count=" + count);
@@ -161,7 +161,7 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// Call this to notify us that no chars before this
-        ///  absolute position are needed anymore. 
+        /// absolute position are needed anymore. 
         /// </summary>
         public void FreeBefore(int pos)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
index ca0b994..a0a63c5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/SegmentingTokenizerBase.cs
@@ -59,22 +59,22 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// Construct a new SegmenterBase, using
-        /// the provided BreakIterator for sentence segmentation.
+        /// the provided <see cref="BreakIterator"/> for sentence segmentation.
         /// <para>
-        /// Note that you should never share BreakIterators across different
-        /// TokenStreams, instead a newly created or cloned one should always
+        /// Note that you should never share <see cref="BreakIterator"/>s across different
+        /// <see cref="TokenStream"/>s, instead a newly created or cloned one should always
         /// be provided to this constructor.
         /// </para>
         /// </summary>
-        protected SegmentingTokenizerBase(TextReader reader, BreakIterator iterator)
+        public SegmentingTokenizerBase(TextReader reader, BreakIterator iterator)
             : this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, reader, iterator)
         {
         }
 
         /// <summary>
-        /// Construct a new SegmenterBase, also supplying the AttributeFactory
+        /// Construct a new SegmenterBase, also supplying the <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory"/>
         /// </summary>
-        protected SegmentingTokenizerBase(AttributeFactory factory, TextReader reader, BreakIterator iterator)
+        public SegmentingTokenizerBase(AttributeFactory factory, TextReader reader, BreakIterator iterator)
             : base(factory, reader)
         {
             offsetAtt = AddAttribute<IOffsetAttribute>();
@@ -106,7 +106,7 @@ namespace Lucene.Net.Analysis.Util
             length = usableLength = m_offset = 0;
         }
 
-        public override void End()
+        public override sealed void End()
         {
             base.End();
             int finalOffset = CorrectOffset(length < 0 ? m_offset : m_offset + length);
@@ -129,7 +129,7 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// For sentence tokenization, these are the unambiguous break positions. </summary>
-        protected internal virtual bool IsSafeEnd(char ch)
+        protected virtual bool IsSafeEnd(char ch)
         {
             switch ((int)ch)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/StemmerUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/StemmerUtil.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/StemmerUtil.cs
index 145c064..9164e95 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/StemmerUtil.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/StemmerUtil.cs
@@ -3,7 +3,6 @@ using System.Diagnostics;
 
 namespace Lucene.Net.Analysis.Util
 {
-
     /*
      * Licensed to the Apache Software Foundation (ASF) under one or more
      * contributor license agreements.  See the NOTICE file distributed with
@@ -35,12 +34,12 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Returns true if the character array starts with the suffix.
+        /// Returns true if the character array starts with the prefix.
         /// </summary>
         /// <param name="s"> Input Buffer </param>
         /// <param name="len"> length of input buffer </param>
         /// <param name="prefix"> Prefix string to test </param>
-        /// <returns> true if <code>s</code> starts with <code>prefix</code> </returns>
+        /// <returns> <c>true</c> if <paramref name="s"/> starts with <paramref name="prefix"/> </returns>
         public static bool StartsWith(char[] s, int len, string prefix)
         {
             int prefixLen = prefix.Length;
@@ -64,7 +63,7 @@ namespace Lucene.Net.Analysis.Util
         /// <param name="s"> Input Buffer </param>
         /// <param name="len"> length of input buffer </param>
         /// <param name="suffix"> Suffix string to test </param>
-        /// <returns> true if <code>s</code> ends with <code>suffix</code> </returns>
+        /// <returns> <c>true</c> if <paramref name="s"/> ends with <paramref name="suffix"/> </returns>
         public static bool EndsWith(char[] s, int len, string suffix)
         {
             int suffixLen = suffix.Length;
@@ -89,7 +88,7 @@ namespace Lucene.Net.Analysis.Util
         /// <param name="s"> Input Buffer </param>
         /// <param name="len"> length of input buffer </param>
         /// <param name="suffix"> Suffix string to test </param>
-        /// <returns> true if <code>s</code> ends with <code>suffix</code> </returns>
+        /// <returns> <c>true</c> if <paramref name="s"/> ends with <paramref name="suffix"/> </returns>
         public static bool EndsWith(char[] s, int len, char[] suffix)
         {
             int suffixLen = suffix.Length;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
index c19ace3..98d71c9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/StopwordAnalyzerBase.cs
@@ -1,8 +1,8 @@
-using System;
+using Lucene.Net.Util;
+using System;
 using System.IO;
-using System.Text;
-using Lucene.Net.Util;
 using System.Reflection;
+using System.Text;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -24,18 +24,16 @@ namespace Lucene.Net.Analysis.Util
      */
 
     /// <summary>
-    /// Base class for Analyzers that need to make use of stopword sets. 
-    /// 
+    /// Base class for <see cref="Analyzer"/>s that need to make use of stopword sets. 
     /// </summary>
     public abstract class StopwordAnalyzerBase : Analyzer
     {
-
         /// <summary>
         /// An immutable stopword set
         /// </summary>
-        protected internal readonly CharArraySet m_stopwords;
+        protected readonly CharArraySet m_stopwords;
 
-        protected internal readonly LuceneVersion m_matchVersion;
+        protected readonly LuceneVersion m_matchVersion;
 
         /// <summary>
         /// Returns the analyzer's stopword set or an empty set if the analyzer has no
@@ -58,7 +56,7 @@ namespace Lucene.Net.Analysis.Util
         ///          the Lucene version for cross version compatibility </param>
         /// <param name="stopwords">
         ///          the analyzer's stopword set </param>
-        protected internal StopwordAnalyzerBase(LuceneVersion version, CharArraySet stopwords)
+        protected StopwordAnalyzerBase(LuceneVersion version, CharArraySet stopwords)
         {
             m_matchVersion = version;
             // analyzers should use char array set for stopwords!
@@ -66,40 +64,40 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Creates a new Analyzer with an empty stopword set
+        /// Creates a new <see cref="Analyzer"/> with an empty stopword set
         /// </summary>
         /// <param name="version">
         ///          the Lucene version for cross version compatibility </param>
-        protected internal StopwordAnalyzerBase(LuceneVersion version)
+        protected StopwordAnalyzerBase(LuceneVersion version)
             : this(version, null)
         {
         }
 
-        // LUCENENET TODO: If this works, need to update the documentation for the .NET version of the story.
-
         /// <summary>
-        /// Creates a CharArraySet from a file resource associated with a class. (See
-        /// <see cref="Class#getResourceAsStream(String)"/>).
+        /// Creates a <see cref="CharArraySet"/> from an embedded resource associated with a class. (See
+        /// <see cref="Assembly.GetManifestResourceStream(string)"/>).
         /// </summary>
         /// <param name="ignoreCase">
-        ///          <code>true</code> if the set should ignore the case of the
-        ///          stopwords, otherwise <code>false</code> </param>
+        ///          <c>true</c> if the set should ignore the case of the
+        ///          stopwords, otherwise <c>false</c> </param>
         /// <param name="aClass">
         ///          a class that is associated with the given stopwordResource </param>
         /// <param name="resource">
         ///          name of the resource file associated with the given class </param>
         /// <param name="comment">
         ///          comment string to ignore in the stopword file </param>
-        /// <returns> a CharArraySet containing the distinct stopwords from the given
+        /// <returns> a <see cref="CharArraySet"/> containing the distinct stopwords from the given
         ///         file </returns>
         /// <exception cref="IOException">
         ///           if loading the stopwords throws an <see cref="IOException"/> </exception>
-        protected internal static CharArraySet LoadStopwordSet(bool ignoreCase, Type aClass, string resource, string comment)
+        protected static CharArraySet LoadStopwordSet(bool ignoreCase, Type aClass, string resource, string comment)
         {
             TextReader reader = null;
             try
             {
-                var resourceNames = aClass.GetTypeInfo().Assembly.GetManifestResourceNames();
+                //var resourceNames = aClass.GetTypeInfo().Assembly.GetManifestResourceNames();
+                // LUCENENET TODO: Maybe it would make more sense to use this overload?
+                //var resourceStream = aClass.GetTypeInfo().Assembly.GetManifestResourceStream(aClass, resource);
                 var resourceStream = aClass.GetTypeInfo().Assembly.GetManifestResourceStream(resource);
                 reader = IOUtils.GetDecodingReader(resourceStream, Encoding.UTF8);
                 return WordlistLoader.GetWordSet(reader, comment, new CharArraySet(
@@ -114,18 +112,18 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Creates a CharArraySet from a file.
+        /// Creates a <see cref="CharArraySet"/> from a file.
         /// </summary>
         /// <param name="stopwords">
         ///          the stopwords file to load
         /// </param>
         /// <param name="matchVersion">
         ///          the Lucene version for cross version compatibility </param>
-        /// <returns> a CharArraySet containing the distinct stopwords from the given
+        /// <returns> a <see cref="CharArraySet"/> containing the distinct stopwords from the given
         ///         file </returns>
         /// <exception cref="IOException">
         ///           if loading the stopwords throws an <see cref="IOException"/> </exception>
-        protected internal static CharArraySet LoadStopwordSet(FileInfo stopwords, LuceneVersion matchVersion)
+        protected static CharArraySet LoadStopwordSet(FileInfo stopwords, LuceneVersion matchVersion)
         {
             TextReader reader = null;
             try
@@ -140,18 +138,18 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Creates a CharArraySet from a file.
+        /// Creates a <see cref="CharArraySet"/> from a file.
         /// </summary>
         /// <param name="stopwords">
         ///          the stopwords reader to load
         /// </param>
         /// <param name="matchVersion">
         ///          the Lucene version for cross version compatibility </param>
-        /// <returns> a CharArraySet containing the distinct stopwords from the given
+        /// <returns> a <see cref="CharArraySet"/> containing the distinct stopwords from the given
         ///         reader </returns>
         /// <exception cref="IOException">
         ///           if loading the stopwords throws an <see cref="IOException"/> </exception>
-        protected internal static CharArraySet LoadStopwordSet(TextReader stopwords, LuceneVersion matchVersion)
+        protected static CharArraySet LoadStopwordSet(TextReader stopwords, LuceneVersion matchVersion)
         {
             try
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
index b2822d2..beb9b22 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenFilterFactory.cs
@@ -26,33 +26,34 @@ namespace Lucene.Net.Analysis.Util
     /// </summary>
     public abstract class TokenFilterFactory : AbstractAnalysisFactory
     {
-        private static readonly AnalysisSPILoader<TokenFilterFactory> loader = new AnalysisSPILoader<TokenFilterFactory>(new string[] { "TokenFilterFactory", "FilterFactory" });
+        private static readonly AnalysisSPILoader<TokenFilterFactory> loader = 
+            new AnalysisSPILoader<TokenFilterFactory>(new string[] { "TokenFilterFactory", "FilterFactory" });
 
         /// <summary>
-        /// looks up a tokenfilter by name from context classpath </summary>
+        /// looks up a tokenfilter by name from the host project's referenced assemblies </summary>
         public static TokenFilterFactory ForName(string name, IDictionary<string, string> args)
         {
             return loader.NewInstance(name, args);
         }
 
         /// <summary>
-        /// looks up a tokenfilter class by name from context classpath </summary>
+        /// looks up a tokenfilter class by name from  the host project's referenced assemblies </summary>
         public static Type LookupClass(string name)
         {
             return loader.LookupClass(name);
         }
 
         /// <summary>
-        /// returns a list of all available tokenfilter names from context classpath </summary>
+        /// returns a list of all available tokenfilter names from the host project's referenced assemblies </summary>
         public static ICollection<string> AvailableTokenFilters
         {
             get { return loader.AvailableServices; }
         }
 
         /// <summary>
-        /// Reloads the factory list from the given <see cref="ClassLoader"/>.
+        /// Reloads the factory list.
         /// Changes to the factories are visible after the method ends, all
-        /// iterators (<see cref="#availableTokenFilters()"/>,...) stay consistent. 
+        /// iterators (<see cref="AvailableTokenFilters"/>,...) stay consistent. 
         /// 
         /// <para><b>NOTE:</b> Only new factories are added, existing ones are
         /// never removed or replaced.
@@ -70,13 +71,13 @@ namespace Lucene.Net.Analysis.Util
         /// <summary>
         /// Initialize this factory via a set of key-value pairs.
         /// </summary>
-        protected internal TokenFilterFactory(IDictionary<string, string> args)
+        protected TokenFilterFactory(IDictionary<string, string> args)
             : base(args)
         {
         }
 
         /// <summary>
-        /// Transform the specified input TokenStream </summary>
+        /// Transform the specified input <see cref="TokenStream"/> </summary>
         public abstract TokenStream Create(TokenStream input);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
index 285f090..e2916eb 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TokenizerFactory.cs
@@ -1,7 +1,7 @@
-using System;
+using Lucene.Net.Util;
+using System;
 using System.Collections.Generic;
 using System.IO;
-using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Util
 {
@@ -28,34 +28,33 @@ namespace Lucene.Net.Analysis.Util
     /// </summary>
     public abstract class TokenizerFactory : AbstractAnalysisFactory
     {
-
         private static readonly AnalysisSPILoader<TokenizerFactory> loader = new AnalysisSPILoader<TokenizerFactory>();
 
         /// <summary>
-        /// looks up a tokenizer by name from context classpath </summary>
+        /// looks up a tokenizer by name from the host project's referenced assemblies </summary>
         public static TokenizerFactory ForName(string name, IDictionary<string, string> args)
         {
             return loader.NewInstance(name, args);
         }
 
         /// <summary>
-        /// looks up a tokenizer class by name from context classpath </summary>
+        /// looks up a tokenizer class by name from the host project's referenced assemblies </summary>
         public static Type LookupClass(string name)
         {
             return loader.LookupClass(name);
         }
 
         /// <summary>
-        /// returns a list of all available tokenizer names from context classpath </summary>
+        /// returns a list of all available tokenizer names from the host project's referenced assemblies </summary>
         public static ICollection<string> AvailableTokenizers
         {
             get { return loader.AvailableServices; }
         }
 
         /// <summary>
-        /// Reloads the factory list from the given <see cref="ClassLoader"/>.
+        /// Reloads the factory list.
         /// Changes to the factories are visible after the method ends, all
-        /// iterators (<see cref="#availableTokenizers()"/>,...) stay consistent. 
+        /// iterators (<see cref="AvailableTokenizers"/>,...) stay consistent. 
         /// 
         /// <para><b>NOTE:</b> Only new factories are added, existing ones are
         /// never removed or replaced.
@@ -73,20 +72,20 @@ namespace Lucene.Net.Analysis.Util
         /// <summary>
         /// Initialize this factory via a set of key-value pairs.
         /// </summary>
-        protected internal TokenizerFactory(IDictionary<string, string> args)
+        protected TokenizerFactory(IDictionary<string, string> args)
             : base(args)
         {
         }
 
         /// <summary>
-        /// Creates a TokenStream of the specified input using the default attribute factory. </summary>
+        /// Creates a <see cref="TokenStream"/> of the specified input using the default attribute factory. </summary>
         public Tokenizer Create(TextReader input)
         {
             return Create(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, input);
         }
 
         /// <summary>
-        /// Creates a TokenStream of the specified input using the given AttributeFactory </summary>
+        /// Creates a <see cref="TokenStream"/> of the specified input using the given <see cref="AttributeSource.AttributeFactory"/> </summary>
         public abstract Tokenizer Create(AttributeSource.AttributeFactory factory, TextReader input);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/TypeExtensions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/TypeExtensions.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/TypeExtensions.cs
index 79cc6c7..2c53dc4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/TypeExtensions.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/TypeExtensions.cs
@@ -1,10 +1,12 @@
 using System;
-using System.Reflection;
 
 namespace Lucene.Net.Analysis.Util
 {
     internal static class TypeExtensions
     {
+        // LUCENENET TODO: Try to eliminate this class by using Assembly.GetManifestResourceStream(Type, string), if possible.
+        // If not possible, perhaps we should move this and BufferedCharFilter into a Support namespace here in Analysis.Common ?
+
         /// <summary>
         /// LUCENENET specific:
         /// In .NET Core, resources are embedded with the namespace based on

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f934cebe/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
index d091f87..5687823 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
@@ -26,12 +26,12 @@ namespace Lucene.Net.Analysis.Util
 
     /// <summary>
     /// Loader for text files that represent a list of stopwords.
+    /// <para/>
+    /// <see cref="IOUtils"/> to obtain <see cref="TextReader"/> instances.
+    /// @lucene.internal
     /// </summary>
-    /// <seealso cref= IOUtils to obtain <see cref="Reader"/> instances
-    /// @lucene.internal </seealso>
     public class WordlistLoader
     {
-
         private const int INITIAL_CAPACITY = 16;
 
         /// <summary>
@@ -43,12 +43,12 @@ namespace Lucene.Net.Analysis.Util
         // LUCENENET TODO: Add .NET overloads that accept a file name? Or at least a FileInfo object as was done in 3.0.3?
 
         /// <summary>
-        /// Reads lines from a TextReader and adds every line as an entry to a CharArraySet (omitting
-        /// leading and trailing whitespace). Every line of the TextReader should contain only
+        /// Reads lines from a <see cref="TextReader"/> and adds every line as an entry to a <see cref="CharArraySet"/> (omitting
+        /// leading and trailing whitespace). Every line of the <see cref="TextReader"/> should contain only
         /// one word. The words need to be in lowercase if you make use of an
-        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// <see cref="Analyzer"/> which uses <see cref="Core.LowerCaseFilter"/> (like <see cref="Standard.StandardAnalyzer"/>).
         /// </summary>
-        /// <param name="reader"> TextReader containing the wordlist </param>
+        /// <param name="reader"> <see cref="TextReader"/> containing the wordlist </param>
         /// <param name="result"> the <see cref="CharArraySet"/> to fill with the readers words </param>
         /// <returns> the given <see cref="CharArraySet"/> with the reader's words </returns>
         public static CharArraySet GetWordSet(TextReader reader, CharArraySet result)
@@ -70,12 +70,12 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Reads lines from a TextReader and adds every line as an entry to a CharArraySet (omitting
-        /// leading and trailing whitespace). Every line of the TextReader should contain only
+        /// Reads lines from a <see cref="TextReader"/> and adds every line as an entry to a <see cref="CharArraySet"/> (omitting
+        /// leading and trailing whitespace). Every line of the <see cref="TextReader"/> should contain only
         /// one word. The words need to be in lowercase if you make use of an
-        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// <see cref="Analyzer"/> which uses <see cref="Core.LowerCaseFilter"/> (like <see cref="Standard.StandardAnalyzer"/>).
         /// </summary>
-        /// <param name="reader"> TextReader containing the wordlist </param>
+        /// <param name="reader"> <see cref="TextReader"/> containing the wordlist </param>
         /// <param name="matchVersion"> the <see cref="LuceneVersion"/> </param>
         /// <returns> A <see cref="CharArraySet"/> with the reader's words </returns>
         public static CharArraySet GetWordSet(TextReader reader, LuceneVersion matchVersion)
@@ -84,12 +84,12 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Reads lines from a TextReader and adds every non-comment line as an entry to a CharArraySet (omitting
-        /// leading and trailing whitespace). Every line of the TextReader should contain only
+        /// Reads lines from a <see cref="TextReader"/> and adds every non-comment line as an entry to a <see cref="CharArraySet"/> (omitting
+        /// leading and trailing whitespace). Every line of the <see cref="TextReader"/> should contain only
         /// one word. The words need to be in lowercase if you make use of an
-        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// <see cref="Analyzer"/> which uses <see cref="Core.LowerCaseFilter"/> (like <see cref="Standard.StandardAnalyzer"/>).
         /// </summary>
-        /// <param name="reader"> TextReader containing the wordlist </param>
+        /// <param name="reader"> <see cref="TextReader"/> containing the wordlist </param>
         /// <param name="comment"> The string representing a comment. </param>
         /// <param name="matchVersion"> the <see cref="LuceneVersion"/> </param>
         /// <returns> A CharArraySet with the reader's words </returns>
@@ -99,12 +99,12 @@ namespace Lucene.Net.Analysis.Util
         }
 
         /// <summary>
-        /// Reads lines from a TextReader and adds every non-comment line as an entry to a CharArraySet (omitting
-        /// leading and trailing whitespace). Every line of the TextReader should contain only
+        /// Reads lines from a <see cref="TextReader"/> and adds every non-comment line as an entry to a <see cref="CharArraySet"/> (omitting
+        /// leading and trailing whitespace). Every line of the <see cref="TextReader"/> should contain only
         /// one word. The words need to be in lowercase if you make use of an
-        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// <see cref="Analyzer"/> which uses <see cref="Core.LowerCaseFilter"/> (like <see cref="Standard.StandardAnalyzer"/>).
         /// </summary>
-        /// <param name="reader"> TextReader containing the wordlist </param>
+        /// <param name="reader"> <see cref="TextReader"/> containing the wordlist </param>
         /// <param name="comment"> The string representing a comment. </param>
         /// <param name="result"> the <see cref="CharArraySet"/> to fill with the readers words </param>
         /// <returns> the given <see cref="CharArraySet"/> with the reader's words </returns>
@@ -133,14 +133,14 @@ namespace Lucene.Net.Analysis.Util
         /// Reads stopwords from a stopword list in Snowball format.
         /// <para>
         /// The snowball format is the following:
-        /// <ul>
-        /// <li>Lines may contain multiple words separated by whitespace.
-        /// <li>The comment character is the vertical line (&#124;).
-        /// <li>Lines may contain trailing comments.
-        /// </ul>
+        /// <list type="bullet">
+        ///     <item>Lines may contain multiple words separated by whitespace.</item>
+        ///     <item>The comment character is the vertical line (&#124;).</item>
+        ///     <item>Lines may contain trailing comments.</item>
+        /// </list>
         /// </para>
         /// </summary>
-        /// <param name="reader"> TextReader containing a Snowball stopword list </param>
+        /// <param name="reader"> <see cref="TextReader"/> containing a Snowball stopword list </param>
         /// <param name="result"> the <see cref="CharArraySet"/> to fill with the readers words </param>
         /// <returns> the given <see cref="CharArraySet"/> with the reader's words </returns>
         public static CharArraySet GetSnowballWordSet(TextReader reader, CharArraySet result)
@@ -176,14 +176,14 @@ namespace Lucene.Net.Analysis.Util
         /// Reads stopwords from a stopword list in Snowball format.
         /// <para>
         /// The snowball format is the following:
-        /// <ul>
-        /// <li>Lines may contain multiple words separated by whitespace.
-        /// <li>The comment character is the vertical line (&#124;).
-        /// <li>Lines may contain trailing comments.
-        /// </ul>
+        /// <list type="bullet">
+        ///     <item>Lines may contain multiple words separated by whitespace.</item>
+        ///     <item>The comment character is the vertical line (&#124;).</item>
+        ///     <item>Lines may contain trailing comments.</item>
+        /// </list>
         /// </para>
         /// </summary>
-        /// <param name="reader"> TextReader containing a Snowball stopword list </param>
+        /// <param name="reader"> <see cref="TextReader"/> containing a Snowball stopword list </param>
         /// <param name="matchVersion"> the Lucene <see cref="LuceneVersion"/> </param>
         /// <returns> A <see cref="CharArraySet"/> with the reader's words </returns>
         public static CharArraySet GetSnowballWordSet(TextReader reader, LuceneVersion matchVersion)
@@ -194,7 +194,7 @@ namespace Lucene.Net.Analysis.Util
 
         /// <summary>
         /// Reads a stem dictionary. Each line contains:
-        /// <pre>word<b>\t</b>stem</code>
+        /// <code>word<b>\t</b>stem</code>
         /// (i.e. two tab separated words)
         /// </summary>
         /// <returns> stem dictionary that overrules the stemming algorithm </returns>
@@ -220,7 +220,6 @@ namespace Lucene.Net.Analysis.Util
         /// <summary>
         /// Accesses a resource by name and returns the (non comment) lines containing
         /// data using the given character encoding.
-        /// 
         /// <para>
         /// A comment line is any line that starts with the character "#"
         /// </para>


Mime
View raw message