lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [4/4] lucenenet git commit: Lucene.Net.Core.Index: documentation comments (types starting with letter D)
Date Sat, 01 Apr 2017 15:02:55 GMT
Lucene.Net.Core.Index: documentation comments (types starting with letter D)


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/b8d797ea
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/b8d797ea
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/b8d797ea

Branch: refs/heads/api-work
Commit: b8d797eaa157215e48c951ad53a47663d4a079cd
Parents: 6d2dec4
Author: Shad Storhaug <shad@shadstorhaug.com>
Authored: Sat Apr 1 22:02:35 2017 +0700
Committer: Shad Storhaug <shad@shadstorhaug.com>
Committed: Sat Apr 1 22:02:35 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Core/Index/DirectoryReader.cs    | 303 +++++++++----------
 src/Lucene.Net.Core/Index/DocFieldConsumer.cs   |   4 +-
 src/Lucene.Net.Core/Index/DocFieldProcessor.cs  |   6 +-
 src/Lucene.Net.Core/Index/DocInverter.cs        |   6 +-
 .../Index/DocInverterPerField.cs                |   6 +-
 src/Lucene.Net.Core/Index/DocTermOrds.cs        | 175 ++++++-----
 src/Lucene.Net.Core/Index/DocValues.cs          |  20 +-
 .../Index/DocValuesFieldUpdates.cs              |  18 +-
 src/Lucene.Net.Core/Index/DocValuesUpdate.cs    |  11 +-
 .../Index/DocsAndPositionsEnum.cs               |   6 +-
 src/Lucene.Net.Core/Index/DocsEnum.cs           |  16 +-
 src/Lucene.Net.Core/Index/DocumentsWriter.cs    |  57 ++--
 .../Index/DocumentsWriterDeleteQueue.cs         |  34 +--
 .../Index/DocumentsWriterFlushControl.cs        |  32 +-
 .../Index/DocumentsWriterFlushQueue.cs          |   6 +-
 .../Index/DocumentsWriterPerThread.cs           |  37 ++-
 .../Index/DocumentsWriterPerThreadPool.cs       |  97 +++---
 .../Index/DocumentsWriterStallControl.cs        |  26 +-
 18 files changed, 435 insertions(+), 425 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DirectoryReader.cs b/src/Lucene.Net.Core/Index/DirectoryReader.cs
index 0398816..d3cf4de 100644
--- a/src/Lucene.Net.Core/Index/DirectoryReader.cs
+++ b/src/Lucene.Net.Core/Index/DirectoryReader.cs
@@ -26,26 +26,25 @@ namespace Lucene.Net.Index
     using Directory = Lucene.Net.Store.Directory;
 
     /// <summary>
-    /// DirectoryReader is an implementation of <seealso cref="CompositeReader"/>
-    /// that can read indexes in a <seealso cref="Directory"/>.
+    /// <see cref="DirectoryReader"/> is an implementation of <see cref="CompositeReader"/>
+    /// that can read indexes in a <see cref="Store.Directory"/>.
     ///
-    /// <p>DirectoryReader instances are usually constructed with a call to
-    /// one of the static <code>open()</code> methods, e.g. {@link
-    /// #open(Directory)}.
+    /// <para/><see cref="DirectoryReader"/> instances are usually constructed with a call to
+    /// one of the static <c>Open()</c> methods, e.g. <see cref="Open(Directory)"/>.
     ///
-    /// <p> For efficiency, in this API documents are often referred to via
+    /// <para/> For efficiency, in this API documents are often referred to via
     /// <i>document numbers</i>, non-negative integers which each name a unique
     /// document in the index.  These document numbers are ephemeral -- they may change
     /// as documents are added to and deleted from an index.  Clients should thus not
     /// rely on a given document having the same number between sessions.
     ///
-    /// <p>
-    /// <a name="thread-safety"></a><p><b>NOTE</b>: {@link
-    /// IndexReader} instances are completely thread
+    /// <para/>
+    /// <p><b>NOTE</b>:
+    /// <see cref="IndexReader"/> instances are completely thread
     /// safe, meaning multiple threads can call any of its methods,
     /// concurrently.  If your application requires external
     /// synchronization, you should <b>not</b> synchronize on the
-    /// <code>IndexReader</code> instance; use your own
+    /// <see cref="IndexReader"/> instance; use your own
     /// (non-Lucene) objects instead.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -62,8 +61,8 @@ namespace Lucene.Net.Index
         protected readonly Directory m_directory;
 
         /// <summary>
-        /// Returns a IndexReader reading the index in the given
-        ///  Directory </summary>
+        /// Returns a <see cref="IndexReader"/> reading the index in the given
+        /// <see cref="Store.Directory"/> </summary>
         /// <param name="directory"> the index directory </param>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
         new public static DirectoryReader Open(Directory directory)
@@ -72,22 +71,22 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: Returns a IndexReader reading the index in the given
-        ///  Directory with the given termInfosIndexDivisor. </summary>
+        /// Expert: Returns a <see cref="IndexReader"/> reading the index in the given
+        /// <see cref="Store.Directory"/> with the given termInfosIndexDivisor. </summary>
         /// <param name="directory"> the index directory </param>
         /// <param name="termInfosIndexDivisor"> Subsamples which indexed
-        ///  terms are loaded into RAM. this has the same effect as {@link
-        ///  IndexWriterConfig#setTermIndexInterval} except that setting
-        ///  must be done at indexing time while this setting can be
-        ///  set per reader.  When set to N, then one in every
-        ///  N*termIndexInterval terms in the index is loaded into
-        ///  memory.  By setting this to a value > 1 you can reduce
-        ///  memory usage, at the expense of higher latency when
-        ///  loading a TermInfo.  The default value is 1.  Set this
-        ///  to -1 to skip loading the terms index entirely.
-        ///  <b>NOTE:</b> divisor settings &gt; 1 do not apply to all PostingsFormat
-        ///  implementations, including the default one in this release. It only makes
-        ///  sense for terms indexes that can efficiently re-sample terms at load time. </param>
+        /// terms are loaded into RAM. this has the same effect as setting
+        /// <see cref="LiveIndexWriterConfig.TermIndexInterval"/> (on <see cref="IndexWriterConfig"/>) except that setting
+        /// must be done at indexing time while this setting can be
+        /// set per reader.  When set to N, then one in every
+        /// N*termIndexInterval terms in the index is loaded into
+        /// memory.  By setting this to a value &gt; 1 you can reduce
+        /// memory usage, at the expense of higher latency when
+        /// loading a TermInfo.  The default value is 1.  Set this
+        /// to -1 to skip loading the terms index entirely.
+        /// <b>NOTE:</b> divisor settings &gt; 1 do not apply to all <see cref="Codecs.PostingsFormat"/>
+        /// implementations, including the default one in this release. It only makes
+        /// sense for terms indexes that can efficiently re-sample terms at load time. </param>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
         new public static DirectoryReader Open(Directory directory, int termInfosIndexDivisor)
         {
@@ -95,31 +94,31 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Open a near real time IndexReader from the <seealso cref="Lucene.Net.Index.IndexWriter"/>.
+        /// Open a near real time <see cref="IndexReader"/> from the <see cref="IndexWriter"/>.
+        /// <para/>
+        /// @lucene.experimental 
         /// </summary>
-        /// <param name="writer"> The IndexWriter to open from </param>
-        /// <param name="applyAllDeletes"> If true, all buffered deletes will
+        /// <param name="writer"> The <see cref="IndexWriter"/> to open from </param>
+        /// <param name="applyAllDeletes"> If <c>true</c>, all buffered deletes will
         /// be applied (made visible) in the returned reader.  If
-        /// false, the deletes are not applied but remain buffered
+        /// <c>false</c>, the deletes are not applied but remain buffered
         /// (in IndexWriter) so that they will be applied in the
         /// future.  Applying deletes can be costly, so if your app
         /// can tolerate deleted documents being returned you might
-        /// gain some performance by passing false. </param>
-        /// <returns> The new IndexReader </returns>
+        /// gain some performance by passing <c>false</c>. </param>
+        /// <returns> The new <see cref="IndexReader"/> </returns>
         /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
         /// <exception cref="IOException"> if there is a low-level IO error
         /// </exception>
-        /// <seealso cref= #openIfChanged(DirectoryReader,IndexWriter,boolean)
-        ///
-        /// @lucene.experimental </seealso>
+        /// <seealso cref="OpenIfChanged(DirectoryReader, IndexWriter, bool)"/>
         new public static DirectoryReader Open(IndexWriter writer, bool applyAllDeletes)
         {
             return writer.GetReader(applyAllDeletes);
         }
 
         /// <summary>
-        /// Expert: returns an IndexReader reading the index in the given
-        ///  <seealso cref="IndexCommit"/>. </summary>
+        /// Expert: returns an <see cref="IndexReader"/> reading the index in the given
+        /// <see cref="Index.IndexCommit"/>. </summary>
         /// <param name="commit"> the commit point to open </param>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
         new public static DirectoryReader Open(IndexCommit commit)
@@ -128,22 +127,22 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: returns an IndexReader reading the index in the given
-        ///  <seealso cref="IndexCommit"/> and termInfosIndexDivisor. </summary>
+        /// Expert: returns an <see cref=""/>IndexReader reading the index in the given
+        ///  <seealso cref="IndexCommit"/> and <paramref name="termInfosIndexDivisor"/>. </summary>
         /// <param name="commit"> the commit point to open </param>
         /// <param name="termInfosIndexDivisor"> Subsamples which indexed
-        ///  terms are loaded into RAM. this has the same effect as {@link
-        ///  IndexWriterConfig#setTermIndexInterval} except that setting
-        ///  must be done at indexing time while this setting can be
-        ///  set per reader.  When set to N, then one in every
-        ///  N*termIndexInterval terms in the index is loaded into
-        ///  memory.  By setting this to a value > 1 you can reduce
-        ///  memory usage, at the expense of higher latency when
-        ///  loading a TermInfo.  The default value is 1.  Set this
-        ///  to -1 to skip loading the terms index entirely.
-        ///  <b>NOTE:</b> divisor settings &gt; 1 do not apply to all PostingsFormat
-        ///  implementations, including the default one in this release. It only makes
-        ///  sense for terms indexes that can efficiently re-sample terms at load time. </param>
+        /// terms are loaded into RAM. this has the same effect as setting
+        /// <see cref="LiveIndexWriterConfig.TermIndexInterval"/> (on <see cref="IndexWriterConfig"/>) except that setting
+        /// must be done at indexing time while this setting can be
+        /// set per reader.  When set to N, then one in every
+        /// N*termIndexInterval terms in the index is loaded into
+        /// memory.  By setting this to a value &gt; 1 you can reduce
+        /// memory usage, at the expense of higher latency when
+        /// loading a TermInfo.  The default value is 1.  Set this
+        /// to -1 to skip loading the terms index entirely.
+        /// <b>NOTE:</b> divisor settings &gt; 1 do not apply to all <see cref="Codecs.PostingsFormat"/>
+        /// implementations, including the default one in this release. It only makes
+        /// sense for terms indexes that can efficiently re-sample terms at load time. </param>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
         new public static DirectoryReader Open(IndexCommit commit, int termInfosIndexDivisor)
         {
@@ -153,26 +152,26 @@ namespace Lucene.Net.Index
         /// <summary>
         /// If the index has changed since the provided reader was
         /// opened, open and return a new reader; else, return
-        /// null.  The new reader, if not null, will be the same
-        /// type of reader as the previous one, ie an NRT reader
-        /// will open a new NRT reader, a MultiReader will open a
-        /// new MultiReader,  etc.
+        /// <c>null</c>.  The new reader, if not <c>null</c>, will be the same
+        /// type of reader as the previous one, ie a near-real-time (NRT) reader
+        /// will open a new NRT reader, a <see cref="MultiReader"/> will open a
+        /// new <see cref="MultiReader"/>,  etc.
         ///
-        /// <p>this method is typically far less costly than opening a
-        /// fully new <code>DirectoryReader</code> as it shares
+        /// <para/>This method is typically far less costly than opening a
+        /// fully new <see cref="DirectoryReader"/> as it shares
         /// resources (for example sub-readers) with the provided
-        /// <code>DirectoryReader</code>, when possible.
+        /// <see cref="DirectoryReader"/>, when possible.
         ///
-        /// <p>The provided reader is not closed (you are responsible
+        /// <para/>The provided reader is not disposed (you are responsible
         /// for doing so); if a new reader is returned you also
-        /// must eventually close it.  Be sure to never close a
+        /// must eventually dispose it.  Be sure to never dispose a
         /// reader while other threads are still using it; see
-        /// <seealso cref="SearcherManager"/> to simplify managing this.
+        /// <see cref="Search.SearcherManager"/> to simplify managing this.
         /// </summary>
         /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> null if there are no changes; else, a new
-        /// DirectoryReader instance which you must eventually close </returns>
+        /// <returns> <c>null</c> if there are no changes; else, a new
+        /// <see cref="DirectoryReader"/> instance which you must eventually dispose </returns>
         public static DirectoryReader OpenIfChanged(DirectoryReader oldReader)
         {
             DirectoryReader newReader = oldReader.DoOpenIfChanged();
@@ -181,11 +180,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// If the IndexCommit differs from what the
+        /// If the <see cref="Index.IndexCommit"/> differs from what the
         /// provided reader is searching, open and return a new
-        /// reader; else, return null.
+        /// reader; else, return <c>null</c>.
         /// </summary>
-        /// <seealso cref= #openIfChanged(DirectoryReader) </seealso>
+        /// <seealso cref="OpenIfChanged(DirectoryReader)"/>
         public static DirectoryReader OpenIfChanged(DirectoryReader oldReader, IndexCommit commit)
         {
             DirectoryReader newReader = oldReader.DoOpenIfChanged(commit);
@@ -195,64 +194,64 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Expert: If there changes (committed or not) in the
-        /// <seealso cref="IndexWriter"/> versus what the provided reader is
+        /// <see cref="IndexWriter"/> versus what the provided reader is
         /// searching, then open and return a new
-        /// IndexReader searching both committed and uncommitted
-        /// changes from the writer; else, return null (though, the
-        /// current implementation never returns null).
+        /// <see cref="IndexReader"/> searching both committed and uncommitted
+        /// changes from the writer; else, return <c>null</c> (though, the
+        /// current implementation never returns <c>null</c>).
         ///
-        /// <p>this provides "near real-time" searching, in that
-        /// changes made during an <seealso cref="IndexWriter"/> session can be
+        /// <para/>This provides "near real-time" searching, in that
+        /// changes made during an <see cref="IndexWriter"/> session can be
         /// quickly made available for searching without closing
-        /// the writer nor calling <seealso cref="IndexWriter#commit"/>.
+        /// the writer nor calling <see cref="IndexWriter.Commit()"/>.
         ///
-        /// <p>It's <i>near</i> real-time because there is no hard
+        /// <para>It's <i>near</i> real-time because there is no hard
         /// guarantee on how quickly you can get a new reader after
-        /// making changes with IndexWriter.  You'll have to
+        /// making changes with <see cref="IndexWriter"/>.  You'll have to
         /// experiment in your situation to determine if it's
         /// fast enough.  As this is a new and experimental
         /// feature, please report back on your findings so we can
-        /// learn, improve and iterate.</p>
+        /// learn, improve and iterate.</para>
         ///
-        /// <p>The very first time this method is called, this
+        /// <para>The very first time this method is called, this
         /// writer instance will make every effort to pool the
         /// readers that it opens for doing merges, applying
-        /// deletes, etc.  this means additional resources (RAM,
-        /// file descriptors, CPU time) will be consumed.</p>
+        /// deletes, etc.  This means additional resources (RAM,
+        /// file descriptors, CPU time) will be consumed.</para>
         ///
-        /// <p>For lower latency on reopening a reader, you should
-        /// call <seealso cref="IndexWriterConfig#setMergedSegmentWarmer"/> to
+        /// <para>For lower latency on reopening a reader, you should
+        /// call <see cref="LiveIndexWriterConfig.MergedSegmentWarmer"/> (on <see cref="IndexWriterConfig"/>) to
         /// pre-warm a newly merged segment before it's committed
-        /// to the index.  this is important for minimizing
-        /// index-to-search delay after a large merge.  </p>
+        /// to the index.  This is important for minimizing
+        /// index-to-search delay after a large merge.  </para>
         ///
-        /// <p>If an addIndexes* call is running in another thread,
+        /// <para>If an AddIndexes* call is running in another thread,
         /// then this reader will only search those segments from
         /// the foreign index that have been successfully copied
-        /// over, so far.</p>
+        /// over, so far.</para>
         ///
-        /// <p><b>NOTE</b>: Once the writer is closed, any
+        /// <para><b>NOTE</b>: Once the writer is disposed, any
         /// outstanding readers may continue to be used.  However,
         /// if you attempt to reopen any of those readers, you'll
-        /// hit an <seealso cref="System.ObjectDisposedException"/>.</p>
+        /// hit an <see cref="System.ObjectDisposedException"/>.</para>
+        /// 
+        /// @lucene.experimental
         /// </summary>
-        /// <returns> DirectoryReader that covers entire index plus all
-        /// changes made so far by this IndexWriter instance, or
-        /// null if there are no new changes
+        /// <returns> <see cref="DirectoryReader"/> that covers entire index plus all
+        /// changes made so far by this <see cref="IndexWriter"/> instance, or
+        /// <c>null</c> if there are no new changes
         /// </returns>
-        /// <param name="writer"> The IndexWriter to open from
+        /// <param name="writer"> The <see cref="IndexWriter"/> to open from
         /// </param>
-        /// <param name="applyAllDeletes"> If true, all buffered deletes will
+        /// <param name="applyAllDeletes"> If <c>true</c>, all buffered deletes will
         /// be applied (made visible) in the returned reader.  If
-        /// false, the deletes are not applied but remain buffered
-        /// (in IndexWriter) so that they will be applied in the
+        /// <c>false</c>, the deletes are not applied but remain buffered
+        /// (in <see cref="IndexWriter"/>) so that they will be applied in the
         /// future.  Applying deletes can be costly, so if your app
         /// can tolerate deleted documents being returned you might
-        /// gain some performance by passing false.
+        /// gain some performance by passing <c>false</c>.
         /// </param>
-        /// <exception cref="IOException"> if there is a low-level IO error
-        ///
-        /// @lucene.experimental </exception>
+        /// <exception cref="IOException"> if there is a low-level IO error </exception>
         public static DirectoryReader OpenIfChanged(DirectoryReader oldReader, IndexWriter writer, bool applyAllDeletes)
         {
             DirectoryReader newReader = oldReader.DoOpenIfChanged(writer, applyAllDeletes);
@@ -261,21 +260,21 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns all commit points that exist in the Directory.
-        ///  Normally, because the default is {@link
-        ///  KeepOnlyLastCommitDeletionPolicy}, there would be only
-        ///  one commit point.  But if you're using a custom {@link
-        ///  IndexDeletionPolicy} then there could be many commits.
-        ///  Once you have a given commit, you can open a reader on
-        ///  it by calling <seealso cref="DirectoryReader#open(IndexCommit)"/>
-        ///  There must be at least one commit in
-        ///  the Directory, else this method throws {@link
-        ///  IndexNotFoundException}.  Note that if a commit is in
-        ///  progress while this method is running, that commit
-        ///  may or may not be returned.
+        /// Returns all commit points that exist in the <see cref="Store.Directory"/>.
+        /// Normally, because the default is 
+        /// <see cref="KeepOnlyLastCommitDeletionPolicy"/>, there would be only
+        /// one commit point.  But if you're using a custom
+        /// <see cref="IndexDeletionPolicy"/> then there could be many commits.
+        /// Once you have a given commit, you can open a reader on
+        /// it by calling <see cref="DirectoryReader.Open(IndexCommit)"/>
+        /// There must be at least one commit in
+        /// the <see cref="Store.Directory"/>, else this method throws 
+        /// <see cref="IndexNotFoundException"/>.  Note that if a commit is in
+        /// progress while this method is running, that commit
+        /// may or may not be returned.
         /// </summary>
-        ///  <returns> a sorted list of <seealso cref="IndexCommit"/>s, from oldest
-        ///  to latest.  </returns>
+        /// <returns> a sorted list of <see cref="Index.IndexCommit"/>s, from oldest
+        /// to latest. </returns>
         public static IList<IndexCommit> ListCommits(Directory dir)
         {
             string[] files = dir.ListAll();
@@ -345,11 +344,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns <code>true</code> if an index likely exists at
+        /// Returns <c>true</c> if an index likely exists at
         /// the specified directory.  Note that if a corrupt index
         /// exists, or if an index in the process of committing </summary>
         /// <param name="directory"> the directory to check for an index </param>
-        /// <returns> <code>true</code> if an index exists; <code>false</code> otherwise </returns>
+        /// <returns> <c>true</c> if an index exists; <c>false</c> otherwise </returns>
         public static bool IndexExists(Directory directory)
         {
             // LUCENE-2812, LUCENE-2727, LUCENE-4738: this logic will
@@ -395,13 +394,13 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: Constructs a {@code DirectoryReader} on the given subReaders. </summary>
-        /// <param name="segmentReaders"> the wrapped atomic index segment readers. this array is
-        /// returned by <seealso cref="#getSequentialSubReaders"/> and used to resolve the correct
+        /// Expert: Constructs a <see cref="DirectoryReader"/> on the given <paramref name="segmentReaders"/>. </summary>
+        /// <param name="segmentReaders"> the wrapped atomic index segment readers. This array is
+        /// returned by <see cref="CompositeReader.GetSequentialSubReaders"/> and used to resolve the correct
         /// subreader for docID-based methods. <b>Please note:</b> this array is <b>not</b>
         /// cloned and not protected for modification outside of this reader.
-        /// Subclasses of {@code DirectoryReader} should take care to not allow
-        /// modification of this internal array, e.g. <seealso cref="#doOpenIfChanged()"/>. </param>
+        /// Subclasses of <see cref="DirectoryReader"/> should take care to not allow
+        /// modification of this internal array, e.g. <see cref="DoOpenIfChanged()"/>. </param>
         protected DirectoryReader(Directory directory, AtomicReader[] segmentReaders)
             : base(segmentReaders)
         {
@@ -422,40 +421,40 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Implement this method to support <seealso cref="#openIfChanged(DirectoryReader)"/>.
-        /// If this reader does not support reopen, return {@code null}, so
-        /// client code is happy. this should be consistent with <seealso cref="#isCurrent"/>
-        /// (should always return {@code true}) if reopen is not supported. </summary>
+        /// Implement this method to support <see cref="OpenIfChanged(DirectoryReader)"/>.
+        /// If this reader does not support reopen, return <c>null</c>, so
+        /// client code is happy. This should be consistent with <see cref="IsCurrent()"/>
+        /// (should always return <c>true</c>) if reopen is not supported. </summary>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> null if there are no changes; else, a new
-        /// DirectoryReader instance. </returns>
+        /// <returns> <c>null</c> if there are no changes; else, a new
+        /// <see cref="DirectoryReader"/> instance. </returns>
         protected internal abstract DirectoryReader DoOpenIfChanged();
 
         /// <summary>
-        /// Implement this method to support <seealso cref="#openIfChanged(DirectoryReader,IndexCommit)"/>.
-        /// If this reader does not support reopen from a specific <seealso cref="IndexCommit"/>,
-        /// throw <seealso cref="UnsupportedOperationException"/>. </summary>
+        /// Implement this method to support <see cref="OpenIfChanged(DirectoryReader, IndexCommit)"/>.
+        /// If this reader does not support reopen from a specific <see cref="Index.IndexCommit"/>,
+        /// throw <see cref="NotSupportedException"/>. </summary>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> null if there are no changes; else, a new
-        /// DirectoryReader instance. </returns>
+        /// <returns> <c>null</c> if there are no changes; else, a new
+        /// <see cref="DirectoryReader"/> instance. </returns>
         protected internal abstract DirectoryReader DoOpenIfChanged(IndexCommit commit);
 
         /// <summary>
-        /// Implement this method to support <seealso cref="#openIfChanged(DirectoryReader,IndexWriter,boolean)"/>.
-        /// If this reader does not support reopen from <seealso cref="IndexWriter"/>,
-        /// throw <seealso cref="UnsupportedOperationException"/>. </summary>
+        /// Implement this method to support <see cref="OpenIfChanged(DirectoryReader, IndexWriter, bool)"/>.
+        /// If this reader does not support reopen from <see cref="IndexWriter"/>,
+        /// throw <see cref="NotSupportedException"/>. </summary>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        /// <returns> null if there are no changes; else, a new
-        /// DirectoryReader instance. </returns>
+        /// <returns> <c>null</c> if there are no changes; else, a new
+        /// <see cref="DirectoryReader"/> instance. </returns>
         protected internal abstract DirectoryReader DoOpenIfChanged(IndexWriter writer, bool applyAllDeletes);
 
         /// <summary>
-        /// Version number when this IndexReader was opened.
+        /// Version number when this <see cref="IndexReader"/> was opened.
         ///
-        /// <p>this method
+        /// <para>This method
         /// returns the version recorded in the commit that the
-        /// reader opened.  this version is advanced every time
-        /// a change is made with <seealso cref="IndexWriter"/>.</p>
+        /// reader opened.  This version is advanced every time
+        /// a change is made with <see cref="IndexWriter"/>.</para>
         /// </summary>
         public abstract long Version { get; }
 
@@ -463,30 +462,30 @@ namespace Lucene.Net.Index
         /// Check whether any new changes have occurred to the
         /// index since this reader was opened.
         ///
-        /// <p>If this reader was created by calling <seealso cref="#open"/>,
+        /// <para>If this reader was created by calling <see cref="Open"/>,
         /// then this method checks if any further commits
-        /// (see <seealso cref="IndexWriter#commit"/>) have occurred in the
-        /// directory.</p>
+        /// (see <see cref="IndexWriter.Commit()"/>) have occurred in the
+        /// directory.</para>
         ///
-        /// <p>If instead this reader is a near real-time reader
-        /// (ie, obtained by a call to {@link
-        /// DirectoryReader#open(IndexWriter,boolean)}, or by calling <seealso cref="#openIfChanged"/>
+        /// <para>If instead this reader is a near real-time reader
+        /// (ie, obtained by a call to 
+        /// <see cref="DirectoryReader.Open(IndexWriter, bool)"/>, or by calling <see cref="OpenIfChanged"/>
         /// on a near real-time reader), then this method checks if
         /// either a new commit has occurred, or any new
         /// uncommitted changes have taken place via the writer.
         /// Note that even if the writer has only performed
-        /// merging, this method will still return false.</p>
+        /// merging, this method will still return <c>false</c>.</para>
         ///
-        /// <p>In any event, if this returns false, you should call
-        /// <seealso cref="#openIfChanged"/> to get a new reader that sees the
-        /// changes.</p>
+        /// <para>In any event, if this returns <c>false</c>, you should call
+        /// <see cref="OpenIfChanged"/> to get a new reader that sees the
+        /// changes.</para>
         /// </summary>
-        /// <exception cref="IOException">           if there is a low-level IO error </exception>
+        /// <exception cref="IOException"> if there is a low-level IO error </exception>
         public abstract bool IsCurrent();
 
         /// <summary>
-        /// Expert: return the IndexCommit that this reader has opened.
-        /// <p/>
+        /// Expert: return the <see cref="Index.IndexCommit"/> that this reader has opened.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public abstract IndexCommit IndexCommit { get; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocFieldConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocFieldConsumer.cs b/src/Lucene.Net.Core/Index/DocFieldConsumer.cs
index 52f134e..283704b 100644
--- a/src/Lucene.Net.Core/Index/DocFieldConsumer.cs
+++ b/src/Lucene.Net.Core/Index/DocFieldConsumer.cs
@@ -25,8 +25,8 @@ namespace Lucene.Net.Index
     internal abstract class DocFieldConsumer
     {
         /// <summary>
-        /// Called when DocumentsWriterPerThread decides to create a new
-        ///  segment
+        /// Called when <see cref="DocumentsWriterPerThread"/> decides to create a new
+        /// segment
         /// </summary>
         internal abstract void Flush(IDictionary<string, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocFieldProcessor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocFieldProcessor.cs b/src/Lucene.Net.Core/Index/DocFieldProcessor.cs
index 51f152e..b3c288c 100644
--- a/src/Lucene.Net.Core/Index/DocFieldProcessor.cs
+++ b/src/Lucene.Net.Core/Index/DocFieldProcessor.cs
@@ -29,11 +29,11 @@ namespace Lucene.Net.Index
     using IOContext = Lucene.Net.Store.IOContext;
 
     /// <summary>
-    /// this is a DocConsumer that gathers all fields under the
+    /// This is a <see cref="DocConsumer"/> that gathers all fields under the
     /// same name, and calls per-field consumers to process field
-    /// by field.  this class doesn't doesn't do any "real" work
+    /// by field.  This class doesn't doesn't do any "real" work
     /// of its own: it just forwards the fields to a
-    /// DocFieldConsumer.
+    /// <see cref="DocFieldConsumer"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocInverter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocInverter.cs b/src/Lucene.Net.Core/Index/DocInverter.cs
index 7ce08ea..e03a117 100644
--- a/src/Lucene.Net.Core/Index/DocInverter.cs
+++ b/src/Lucene.Net.Core/Index/DocInverter.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// this is a DocFieldConsumer that inverts each field,
-    ///  separately, from a Document, and accepts a
-    ///  InvertedTermsConsumer to process those terms.
+    /// This is a <see cref="DocFieldConsumer"/> that inverts each field,
+    /// separately, from a <see cref="Documents.Document"/>, and accepts a
+    /// <see cref="InvertedDocConsumer"/> to process those terms.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocInverterPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocInverterPerField.cs b/src/Lucene.Net.Core/Index/DocInverterPerField.cs
index 4c6c3df..8b6b8d4 100644
--- a/src/Lucene.Net.Core/Index/DocInverterPerField.cs
+++ b/src/Lucene.Net.Core/Index/DocInverterPerField.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Holds state for inverting all occurrences of a single
-    /// field in the document.  this class doesn't do anything
+    /// field in the document.  This class doesn't do anything
     /// itself; instead, it forwards the tokens produced by
     /// analysis to its own consumer
-    /// (InvertedDocConsumerPerField).  It also interacts with an
-    /// endConsumer (InvertedDocEndConsumerPerField).
+    /// (<see cref="InvertedDocConsumerPerField"/>).  It also interacts with an
+    /// endConsumer (<see cref="InvertedDocEndConsumerPerField"/>).
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocTermOrds.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocTermOrds.cs b/src/Lucene.Net.Core/Index/DocTermOrds.cs
index 691a077..c9a1930 100644
--- a/src/Lucene.Net.Core/Index/DocTermOrds.cs
+++ b/src/Lucene.Net.Core/Index/DocTermOrds.cs
@@ -31,84 +31,90 @@ namespace Lucene.Net.Index
     using StringHelper = Lucene.Net.Util.StringHelper;
 
     /// <summary>
-    /// this class enables fast access to multiple term ords for
+    /// This class enables fast access to multiple term ords for
     /// a specified field across all docIDs.
-    ///
-    /// Like FieldCache, it uninverts the index and holds a
+    /// <para/>
+    /// Like <see cref="Search.IFieldCache"/>, it uninverts the index and holds a
     /// packed data structure in RAM to enable fast access.
-    /// Unlike FieldCache, it can handle multi-valued fields,
+    /// Unlike <see cref="Search.IFieldCache"/>, it can handle multi-valued fields,
     /// and, it does not hold the term bytes in RAM.  Rather, you
-    /// must obtain a TermsEnum from the <seealso cref="#getOrdTermsEnum"/>
+    /// must obtain a <see cref="TermsEnum"/> from the <see cref="GetOrdTermsEnum"/>
     /// method, and then seek-by-ord to get the term's bytes.
-    ///
-    /// While normally term ords are type long, in this API they are
-    /// int as the internal representation here cannot address
-    /// more than MAX_INT32 unique terms.  Also, typically this
+    /// <para/>
+    /// While normally term ords are type <see cref="long"/>, in this API they are
+    /// <see cref="int"/> as the internal representation here cannot address
+    /// more than <see cref="BufferedUpdates.MAX_INT32"/> unique terms.  Also, typically this
     /// class is used on fields with relatively few unique terms
     /// vs the number of documents.  In addition, there is an
     /// internal limit (16 MB) on how many bytes each chunk of
     /// documents may consume.  If you trip this limit you'll hit
-    /// an InvalidOperationException.
-    ///
+    /// an <see cref="InvalidOperationException"/>.
+    /// <para/>
     /// Deleted documents are skipped during uninversion, and if
     /// you look them up you'll get 0 ords.
-    ///
+    /// <para/>
     /// The returned per-document ords do not retain their
     /// original order in the document.  Instead they are returned
-    /// in sorted (by ord, ie term's BytesRef comparer) order.  They
+    /// in sorted (by ord, ie term's <see cref="BytesRef"/> comparer) order.  They
     /// are also de-dup'd (ie if doc has same term more than once
     /// in this field, you'll only get that ord back once).
-    ///
-    /// this class tests whether the provided reader is able to
+    /// <para/>
+    /// This class tests whether the provided reader is able to
     /// retrieve terms by ord (ie, it's single segment, and it
     /// uses an ord-capable terms index).  If not, this class
     /// will create its own term index internally, allowing to
-    /// create a wrapped TermsEnum that can handle ord.  The
-    /// <seealso cref="#getOrdTermsEnum"/> method then provides this
+    /// create a wrapped <see cref="TermsEnum"/> that can handle ord.  The
+    /// <see cref="GetOrdTermsEnum"/> method then provides this
     /// wrapped enum, if necessary.
-    ///
+    /// <para/>
     /// The RAM consumption of this class can be high!
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
-    // LUCENENET TODO: Make remarks section
-    /*
-     * Final form of the un-inverted field:
-     *   Each document points to a list of term numbers that are contained in that document.
-     *
-     *   Term numbers are in sorted order, and are encoded as variable-length deltas from the
-     *   previous term number.  Real term numbers start at 2 since 0 and 1 are reserved.  A
-     *   term number of 0 signals the end of the termNumber list.
-     *
-     *   There is a single int[maxDoc()] which either contains a pointer into a byte[] for
-     *   the termNumber lists, or directly contains the termNumber list if it fits in the 4
-     *   bytes of an integer.  If the first byte in the integer is 1, the next 3 bytes
-     *   are a pointer into a byte[] where the termNumber list starts.
-     *
-     *   There are actually 256 byte arrays, to compensate for the fact that the pointers
-     *   into the byte arrays are only 3 bytes long.  The correct byte array for a document
-     *   is a function of it's id.
-     *
-     *   To save space and speed up faceting, any term that matches enough documents will
-     *   not be un-inverted... it will be skipped while building the un-inverted field structure,
-     *   and will use a set intersection method during faceting.
-     *
-     *   To further save memory, the terms (the actual string values) are not all stored in
-     *   memory, but a TermIndex is used to convert term numbers to term values only
-     *   for the terms needed after faceting has completed.  Only every 128th term value
-     *   is stored, along with it's corresponding term number, and this is used as an
-     *   index to find the closest term and iterate until the desired number is hit (very
-     *   much like Lucene's own internal term index).
-     *
-     */
+    /// <remarks>
+    /// Final form of the un-inverted field:
+    /// <list type="bullet">
+    ///     <item>Each document points to a list of term numbers that are contained in that document.</item>
+    ///     <item>
+    ///         Term numbers are in sorted order, and are encoded as variable-length deltas from the
+    ///         previous term number.  Real term numbers start at 2 since 0 and 1 are reserved.  A
+    ///         term number of 0 signals the end of the termNumber list.
+    ///     </item>
+    ///     <item>
+    ///         There is a single int[maxDoc()] which either contains a pointer into a byte[] for
+    ///         the termNumber lists, or directly contains the termNumber list if it fits in the 4
+    ///         bytes of an integer.  If the first byte in the integer is 1, the next 3 bytes
+    ///         are a pointer into a byte[] where the termNumber list starts.
+    ///     </item>
+    ///     <item>
+    ///         There are actually 256 byte arrays, to compensate for the fact that the pointers
+    ///         into the byte arrays are only 3 bytes long.  The correct byte array for a document
+    ///         is a function of it's id.
+    ///     </item>
+    ///     <item>
+    ///         To save space and speed up faceting, any term that matches enough documents will
+    ///         not be un-inverted... it will be skipped while building the un-inverted field structure,
+    ///         and will use a set intersection method during faceting.
+    ///     </item>
+    ///     <item>
+    ///         To further save memory, the terms (the actual string values) are not all stored in
+    ///         memory, but a TermIndex is used to convert term numbers to term values only
+    ///         for the terms needed after faceting has completed.  Only every 128th term value
+    ///         is stored, along with it's corresponding term number, and this is used as an
+    ///         index to find the closest term and iterate until the desired number is hit (very
+    ///         much like Lucene's own internal term index).
+    ///     </item>
+    /// </list>
+    /// </remarks>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
     public class DocTermOrds
     {
-        // Term ords are shifted by this, internally, to reserve
-        // values 0 (end term) and 1 (index is a pointer into byte array)
+        /// <summary>
+        /// Term ords are shifted by this, internally, to reserve
+        /// values 0 (end term) and 1 (index is a pointer into byte array)
+        /// </summary>
         private static readonly int TNUM_OFFSET = 2;
 
         /// <summary>
@@ -164,14 +170,14 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// If non-null, only terms matching this prefix were
-        ///  indexed.
+        /// indexed.
         /// </summary>
         protected BytesRef m_prefix;
 
         /// <summary>
         /// Ordinal of the first term in the field, or 0 if the
-        ///  <seealso cref="PostingsFormat"/> does not implement {@link
-        ///  TermsEnum#ord}.
+        /// <see cref="PostingsFormat"/> does not implement 
+        /// <see cref="TermsEnum.Ord"/>.
         /// </summary>
         protected int m_ordBase;
 
@@ -217,8 +223,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Inverts only terms starting w/ prefix, and only terms
-        ///  whose docFreq (not taking deletions into account) is
-        ///  <=  maxTermDocFreq
+        /// whose docFreq (not taking deletions into account) is
+        /// &lt;= <paramref name="maxTermDocFreq"/>
         /// </summary>
         public DocTermOrds(AtomicReader reader, IBits liveDocs, string field, BytesRef termPrefix, int maxTermDocFreq)
             : this(reader, liveDocs, field, termPrefix, maxTermDocFreq, DEFAULT_INDEX_INTERVAL_BITS)
@@ -227,9 +233,9 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Inverts only terms starting w/ prefix, and only terms
-        ///  whose docFreq (not taking deletions into account) is
-        ///  <=  maxTermDocFreq, with a custom indexing interval
-        ///  (default is every 128nd term).
+        /// whose docFreq (not taking deletions into account) is
+        /// &lt;=  <paramref name="maxTermDocFreq"/>, with a custom indexing interval
+        /// (default is every 128nd term).
         /// </summary>
         public DocTermOrds(AtomicReader reader, IBits liveDocs, string field, BytesRef termPrefix, int maxTermDocFreq, int indexIntervalBits)
             : this(field, maxTermDocFreq, indexIntervalBits)
@@ -239,7 +245,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Subclass inits w/ this, but be sure you then call
-        ///  uninvert, only once
+        /// uninvert, only once
         /// </summary>
         protected DocTermOrds(string field, int maxTermDocFreq, int indexIntervalBits)
         {
@@ -252,16 +258,16 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a TermsEnum that implements ord.  If the
-        ///  provided reader supports ord, we just return its
-        ///  TermsEnum; if it does not, we build a "private" terms
-        ///  index internally (WARNING: consumes RAM) and use that
-        ///  index to implement ord.  this also enables ord on top
-        ///  of a composite reader.  The returned TermsEnum is
-        ///  unpositioned.  this returns null if there are no terms.
+        /// Returns a <see cref="TermsEnum"/> that implements <see cref="TermsEnum.Ord"/>.  If the
+        /// provided <paramref name="reader"/> supports <see cref="TermsEnum.Ord"/>, we just return its
+        /// <see cref="TermsEnum"/>; if it does not, we build a "private" terms
+        /// index internally (WARNING: consumes RAM) and use that
+        /// index to implement <see cref="TermsEnum.Ord"/>.  This also enables <see cref="TermsEnum.Ord"/> on top
+        /// of a composite reader.  The returned <see cref="TermsEnum"/> is
+        /// unpositioned.  This returns <c>null</c> if there are no terms.
         ///
-        ///  <p><b>NOTE</b>: you must pass the same reader that was
-        ///  used when creating this class
+        /// <para/><b>NOTE</b>: you must pass the same reader that was
+        /// used when creating this class
         /// </summary>
         public virtual TermsEnum GetOrdTermsEnum(AtomicReader reader)
         {
@@ -299,7 +305,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns {@code true} if no terms were indexed.
+        /// Returns <c>true</c> if no terms were indexed.
         /// </summary>
         public virtual bool IsEmpty
         {
@@ -316,9 +322,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Invoked during <seealso cref="#uninvert(AtomicReader,Bits,BytesRef)"/>
-        ///  to record the document frequency for each uninverted
-        ///  term.
+        /// Invoked during <see cref="Uninvert(AtomicReader, IBits, BytesRef)"/>
+        /// to record the document frequency for each uninverted
+        /// term.
         /// </summary>
         protected virtual void SetActualDocFreq(int termNum, int df)
         {
@@ -729,10 +735,11 @@ namespace Lucene.Net.Index
             return pos;
         }
 
-        /* Only used if original IndexReader doesn't implement
-         * ord; in this case we "wrap" our own terms index
-         * around it. */
-
+        /// <summary>
+        /// Only used if original <see cref="IndexReader"/> doesn't implement
+        /// <see cref="TermsEnum.Ord"/>; in this case we "wrap" our own terms index
+        /// around it.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -929,8 +936,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the term (<seealso cref="BytesRef"/>) corresponding to
-        ///  the provided ordinal.
+        /// Returns the term (<see cref="BytesRef"/>) corresponding to
+        /// the provided ordinal.
         /// </summary>
         public virtual BytesRef LookupTerm(TermsEnum termsEnum, int ord)
         {
@@ -939,7 +946,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a SortedSetDocValues view of this instance </summary>
+        /// Returns a <see cref="SortedSetDocValues"/> view of this instance </summary>
         public virtual SortedSetDocValues GetIterator(AtomicReader reader)
         {
             if (IsEmpty)
@@ -997,9 +1004,9 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Buffer must be at least 5 ints long.  Returns number
-            ///  of term ords placed into buffer; if this count is
-            ///  less than buffer.length then that is the end.
+            /// Buffer must be at least 5 <see cref="int"/>s long.  Returns number
+            /// of term ords placed into buffer; if this count is
+            /// less than buffer.Length then that is the end.
             /// </summary>
             internal virtual int Read(int[] buffer)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocValues.cs b/src/Lucene.Net.Core/Index/DocValues.cs
index 5f9a38e..2cf3502 100644
--- a/src/Lucene.Net.Core/Index/DocValues.cs
+++ b/src/Lucene.Net.Core/Index/DocValues.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// this class contains utility methods and constants for DocValues
+    /// This class contains utility methods and constants for <see cref="DocValues"/>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -37,7 +37,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// An empty BinaryDocValues which returns <seealso cref="BytesRef#EMPTY_BYTES"/> for every document
+        /// An empty <see cref="BinaryDocValues"/> which returns <see cref="BytesRef.EMPTY_BYTES"/> for every document
         /// </summary>
         public static readonly BinaryDocValues EMPTY_BINARY = new BinaryDocValuesAnonymousInnerClassHelper();
 
@@ -59,7 +59,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// An empty NumericDocValues which returns zero for every document
+        /// An empty <see cref="NumericDocValues"/> which returns zero for every document
         /// </summary>
         public static readonly NumericDocValues EMPTY_NUMERIC = new NumericDocValuesAnonymousInnerClassHelper();
 
@@ -79,7 +79,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// An empty SortedDocValues which returns <seealso cref="BytesRef#EMPTY_BYTES"/> for every document
+        /// An empty <see cref="SortedDocValues"/> which returns <see cref="BytesRef.EMPTY_BYTES"/> for every document
         /// </summary>
         public static readonly SortedDocValues EMPTY_SORTED = new SortedDocValuesAnonymousInnerClassHelper();
 
@@ -114,7 +114,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// An empty SortedDocValues which returns <seealso cref="SortedSetDocValues#NO_MORE_ORDS"/> for every document
+        /// An empty <see cref="SortedDocValues"/> which returns <see cref="SortedSetDocValues.NO_MORE_ORDS"/> for every document
         /// </summary>
         public static readonly SortedSetDocValues EMPTY_SORTED_SET = new RandomAccessOrdsAnonymousInnerClassHelper();
 
@@ -161,7 +161,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a multi-valued view over the provided SortedDocValues
+        /// Returns a multi-valued view over the provided <see cref="SortedDocValues"/>
         /// </summary>
         public static SortedSetDocValues Singleton(SortedDocValues dv)
         {
@@ -169,8 +169,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a single-valued view of the SortedSetDocValues, if it was previously
-        /// wrapped with <seealso cref="#singleton"/>, or null.
+        /// Returns a single-valued view of the <see cref="SortedSetDocValues"/>, if it was previously
+        /// wrapped with <see cref="Singleton"/>, or <c>null</c>.
         /// </summary>
         public static SortedDocValues UnwrapSingleton(SortedSetDocValues dv)
         {
@@ -185,7 +185,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a Bits representing all documents from <code>dv</code> that have a value.
+        /// Returns a <see cref="IBits"/> representing all documents from <paramref name="dv"/> that have a value.
         /// </summary>
         public static IBits DocsWithValue(SortedDocValues dv, int maxDoc)
         {
@@ -218,7 +218,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a Bits representing all documents from <code>dv</code> that have a value.
+        /// Returns a <see cref="IBits"/> representing all documents from <paramref name="dv"/> that have a value.
         /// </summary>
         public static IBits DocsWithValue(SortedSetDocValues dv, int maxDoc)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs b/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs
index 44fff8d..81e5965 100644
--- a/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs
+++ b/src/Lucene.Net.Core/Index/DocValuesFieldUpdates.cs
@@ -23,8 +23,8 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Holds updates of a single DocValues field, for a set of documents.
-    ///
+    /// Holds updates of a single <see cref="DocValues"/> field, for a set of documents.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -46,7 +46,7 @@ namespace Lucene.Net.Index
         {
             /// <summary>
             /// Returns the next document which has an update, or
-            /// <seealso cref="DocIdSetIterator#NO_MORE_DOCS"/> if there are no more documents to
+            /// <see cref="Search.DocIdSetIterator.NO_MORE_DOCS"/> if there are no more documents to
             /// return.
             /// </summary>
             public abstract int NextDoc();
@@ -56,13 +56,13 @@ namespace Lucene.Net.Index
             public abstract int Doc { get; }
 
             /// <summary>
-            /// Returns the value of the document returned from <seealso cref="#nextDoc()"/>. A
-            /// {@code null} value means that it was unset for this document.
+            /// Returns the value of the document returned from <see cref="NextDoc()"/>. A
+            /// <c>null</c> value means that it was unset for this document.
             /// </summary>
             public abstract object Value { get; }
 
             /// <summary>
-            /// Reset the iterator's state. Should be called before <seealso cref="#nextDoc()"/>
+            /// Reset the iterator's state. Should be called before <see cref="NextDoc()"/>
             /// and <seealso cref="#value()"/>.
             /// </summary>
             public abstract void Reset();
@@ -159,18 +159,18 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Add an update to a document. For unsetting a value you should pass
-        /// {@code null}.
+        /// <c>null</c>.
         /// </summary>
         public abstract void Add(int doc, object value);
 
         /// <summary>
-        /// Returns an <seealso cref="Iterator"/> over the updated documents and their
+        /// Returns an <see cref="Iterator"/> over the updated documents and their
         /// values.
         /// </summary>
         public abstract Iterator GetIterator();
 
         /// <summary>
-        /// Merge with another <seealso cref="DocValuesFieldUpdates"/>. this is called for a
+        /// Merge with another <see cref="DocValuesFieldUpdates"/>. this is called for a
         /// segment which received updates while it was being merged. The given updates
         /// should override whatever updates are in that instance.
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocValuesUpdate.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocValuesUpdate.cs b/src/Lucene.Net.Core/Index/DocValuesUpdate.cs
index df46698..c6f562b 100644
--- a/src/Lucene.Net.Core/Index/DocValuesUpdate.cs
+++ b/src/Lucene.Net.Core/Index/DocValuesUpdate.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Index
     using RamUsageEstimator = Lucene.Net.Util.RamUsageEstimator;
 
     /// <summary>
-    /// An in-place update to a DocValues field. </summary>
+    /// An in-place update to a <see cref="DocValues"/> field. </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -49,8 +49,9 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Constructor.
         /// </summary>
-        /// <param name="term"> the <seealso cref="term"/> which determines the documents that will be updated </param>
-        /// <param name="field"> the <seealso cref="NumericDocValuesField"/> to update </param>
+        /// <param name="type"> the <see cref="DocValuesFieldUpdatesType"/> </param>
+        /// <param name="term"> the <see cref="Term"/> which determines the documents that will be updated </param>
+        /// <param name="field"> the <see cref="NumericDocValuesField"/> to update </param>
         /// <param name="value"> the updated value </param>
         protected DocValuesUpdate(DocValuesFieldUpdatesType type, Term term, string field, object value)
         {
@@ -78,7 +79,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// An in-place update to a binary DocValues field </summary>
+        /// An in-place update to a binary <see cref="DocValues"/> field </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -101,7 +102,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// An in-place update to a numeric DocValues field </summary>
+        /// An in-place update to a numeric <see cref="DocValues"/> field </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs b/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs
index 8ccb546..17fb5e4 100644
--- a/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs
+++ b/src/Lucene.Net.Core/Index/DocsAndPositionsEnum.cs
@@ -87,10 +87,10 @@ namespace Lucene.Net.Index
         public abstract int EndOffset { get; }
 
         /// <summary>
-        /// Returns the payload at this position, or null if no
+        /// Returns the payload at this position, or <c>null</c> if no
         /// payload was indexed. You should not modify anything
-        /// (neither members of the returned BytesRef nor bytes
-        /// in the byte[]).
+        /// (neither members of the returned <see cref="BytesRef"/> nor bytes
+        /// in the <see cref="T:byte[]"/>).
         /// </summary>
         public abstract BytesRef GetPayload();
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocsEnum.cs b/src/Lucene.Net.Core/Index/DocsEnum.cs
index 7eec082..32b022e 100644
--- a/src/Lucene.Net.Core/Index/DocsEnum.cs
+++ b/src/Lucene.Net.Core/Index/DocsEnum.cs
@@ -42,8 +42,8 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Iterates through the documents and term freqs.
-    ///  NOTE: you must first call <seealso cref="#nextDoc"/> before using
-    ///  any of the per-doc methods.
+    /// NOTE: you must first call <see cref="DocIdSetIterator.NextDoc()"/> before using
+    /// any of the per-doc methods.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -56,7 +56,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected DocsEnum()
         {
@@ -64,12 +64,12 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns term frequency in the current document, or 1 if the field was
-        /// indexed with <seealso cref="IndexOptions#DOCS_ONLY"/>. Do not call this before
-        /// <seealso cref="#nextDoc"/> is first called, nor after <seealso cref="#nextDoc"/> returns
-        /// <seealso cref="DocIdSetIterator#NO_MORE_DOCS"/>.
+        /// indexed with <see cref="IndexOptions.DOCS_ONLY"/>. Do not call this before
+        /// <see cref="DocIdSetIterator.NextDoc()"/> is first called, nor after <see cref="DocIdSetIterator.NextDoc()"/> returns
+        /// <see cref="DocIdSetIterator.NO_MORE_DOCS"/>.
         ///
-        /// <p>
-        /// <b>NOTE:</b> if the <seealso cref="DocsEnum"/> was obtain with <seealso cref="#FLAG_NONE"/>,
+        /// <para/>
+        /// <b>NOTE:</b> if the <see cref="DocsEnum"/> was obtain with <see cref="DocsFlags.NONE"/>,
         /// the result of this method is undefined.
         /// </summary>
         public abstract int Freq { get; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocumentsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocumentsWriter.cs b/src/Lucene.Net.Core/Index/DocumentsWriter.cs
index 4b79ce3..3577487 100644
--- a/src/Lucene.Net.Core/Index/DocumentsWriter.cs
+++ b/src/Lucene.Net.Core/Index/DocumentsWriter.cs
@@ -38,49 +38,49 @@ namespace Lucene.Net.Index
     using ThreadState = Lucene.Net.Index.DocumentsWriterPerThreadPool.ThreadState;
 
     /// <summary>
-    /// this class accepts multiple added documents and directly
+    /// This class accepts multiple added documents and directly
     /// writes segment files.
-    ///
-    /// Each added document is passed to the <seealso cref="DocConsumer"/>,
+    /// <para/>
+    /// Each added document is passed to the <see cref="DocConsumer"/>,
     /// which in turn processes the document and interacts with
     /// other consumers in the indexing chain.  Certain
-    /// consumers, like <seealso cref="StoredFieldsConsumer"/> and {@link
-    /// TermVectorsConsumer}, digest a document and
+    /// consumers, like <see cref="StoredFieldsConsumer"/> and 
+    /// <see cref="TermVectorsConsumer"/>, digest a document and
     /// immediately write bytes to the "doc store" files (ie,
     /// they do not consume RAM per document, except while they
     /// are processing the document).
-    ///
-    /// Other consumers, eg <seealso cref="FreqProxTermsWriter"/> and
-    /// <seealso cref="NormsConsumer"/>, buffer bytes in RAM and flush only
+    /// <para/>
+    /// Other consumers, eg <see cref="FreqProxTermsWriter"/> and
+    /// <see cref="NormsConsumer"/>, buffer bytes in RAM and flush only
     /// when a new segment is produced.
-    ///
+    /// <para/>
     /// Once we have used our allowed RAM buffer, or the number
     /// of added docs is large enough (in the case we are
     /// flushing by doc count instead of RAM usage), we create a
     /// real segment and flush it to the Directory.
-    ///
+    /// <para/>
     /// Threads:
-    ///
-    /// Multiple threads are allowed into addDocument at once.
-    /// There is an initial synchronized call to getThreadState
-    /// which allocates a ThreadState for this thread.  The same
-    /// thread will get the same ThreadState over time (thread
+    /// <para/>
+    /// Multiple threads are allowed into AddDocument at once.
+    /// There is an initial synchronized call to <see cref="DocumentsWriterPerThreadPool.GetThreadState(int)"/>
+    /// which allocates a <see cref="ThreadState"/> for this thread.  The same
+    /// thread will get the same <see cref="ThreadState"/> over time (thread
     /// affinity) so that if there are consistent patterns (for
     /// example each thread is indexing a different content
     /// source) then we make better use of RAM.  Then
-    /// processDocument is called on that ThreadState without
+    /// ProcessDocument is called on that <see cref="ThreadState"/> without
     /// synchronization (most of the "heavy lifting" is in this
     /// call).  Finally the synchronized "finishDocument" is
     /// called to flush changes to the directory.
-    ///
-    /// When flush is called by IndexWriter we forcefully idle
+    /// <para/>
+    /// When flush is called by <see cref="IndexWriter"/> we forcefully idle
     /// all threads and flush only once they are all idle.  this
     /// means you can call flush with a given thread even while
     /// other threads are actively adding/deleting documents.
-    ///
+    /// <para/>
     ///
     /// Exceptions:
-    ///
+    /// <para/>
     /// Because this class directly updates in-memory posting
     /// lists, and flushes stored fields and term vectors
     /// directly to files in the directory, there are certain
@@ -90,8 +90,8 @@ namespace Lucene.Net.Index
     /// exception while appending to the in-memory posting lists
     /// can corrupt that posting list.  We call such exceptions
     /// "aborting exceptions".  In these cases we must call
-    /// abort() to discard all docs added since the last flush.
-    ///
+    /// <see cref="Abort(IndexWriter)"/> to discard all docs added since the last flush.
+    /// <para/>
     /// All other exceptions ("non-aborting exceptions") can
     /// still partially update the index structures.  These
     /// updates are consistent, but, they represent only a part
@@ -119,12 +119,13 @@ namespace Lucene.Net.Index
         internal volatile DocumentsWriterDeleteQueue deleteQueue = new DocumentsWriterDeleteQueue();
 
         private readonly DocumentsWriterFlushQueue ticketQueue = new DocumentsWriterFlushQueue();
-        /*
-         * we preserve changes during a full flush since IW might not checkout before
-         * we release all changes. NRT Readers otherwise suddenly return true from
-         * isCurrent while there are actually changes currently committed. See also
-         * #anyChanges() & #flushAllThreads
-         */
+
+        /// <summary>
+        /// we preserve changes during a full flush since IW might not checkout before
+        /// we release all changes. NRT Readers otherwise suddenly return true from
+        /// IsCurrent() while there are actually changes currently committed. See also
+        /// <see cref="AnyChanges()"/> &amp; <see cref="FlushAllThreads(IndexWriter)"/>
+        /// </summary>
         private volatile bool pendingChangesInCurrentFullFlush;
 
         internal readonly DocumentsWriterPerThreadPool perThreadPool;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocumentsWriterDeleteQueue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocumentsWriterDeleteQueue.cs b/src/Lucene.Net.Core/Index/DocumentsWriterDeleteQueue.cs
index 661e8dc..640d6b9 100644
--- a/src/Lucene.Net.Core/Index/DocumentsWriterDeleteQueue.cs
+++ b/src/Lucene.Net.Core/Index/DocumentsWriterDeleteQueue.cs
@@ -27,42 +27,42 @@ namespace Lucene.Net.Index
     using Query = Lucene.Net.Search.Query;
 
     /// <summary>
-    /// <seealso cref="DocumentsWriterDeleteQueue"/> is a non-blocking linked pending deletes
+    /// <see cref="DocumentsWriterDeleteQueue"/> is a non-blocking linked pending deletes
     /// queue. In contrast to other queue implementation we only maintain the
     /// tail of the queue. A delete queue is always used in a context of a set of
     /// DWPTs and a global delete pool. Each of the DWPT and the global pool need to
-    /// maintain their 'own' head of the queue (as a DeleteSlice instance per DWPT).
+    /// maintain their 'own' head of the queue (as a <see cref="DeleteSlice"/> instance per DWPT).
     /// The difference between the DWPT and the global pool is that the DWPT starts
     /// maintaining a head once it has added its first document since for its segments
     /// private deletes only the deletes after that document are relevant. The global
     /// pool instead starts maintaining the head once this instance is created by
     /// taking the sentinel instance as its initial head.
-    /// <p>
-    /// Since each <seealso cref="DeleteSlice"/> maintains its own head and the list is only
+    /// <para/>
+    /// Since each <see cref="DeleteSlice"/> maintains its own head and the list is only
     /// single linked the garbage collector takes care of pruning the list for us.
     /// All nodes in the list that are still relevant should be either directly or
-    /// indirectly referenced by one of the DWPT's private <seealso cref="DeleteSlice"/> or by
-    /// the global <seealso cref="BufferedUpdates"/> slice.
-    /// <p>
+    /// indirectly referenced by one of the DWPT's private <see cref="DeleteSlice"/> or by
+    /// the global <see cref="BufferedUpdates"/> slice.
+    /// <para/>
     /// Each DWPT as well as the global delete pool maintain their private
     /// DeleteSlice instance. In the DWPT case updating a slice is equivalent to
     /// atomically finishing the document. The slice update guarantees a "happens
     /// before" relationship to all other updates in the same indexing session. When a
     /// DWPT updates a document it:
     ///
-    /// <ol>
-    /// <li>consumes a document and finishes its processing</li>
-    /// <li>updates its private <seealso cref="DeleteSlice"/> either by calling
-    /// <seealso cref="#updateSlice(DeleteSlice)"/> or <seealso cref="#add(Term, DeleteSlice)"/> (if the
-    /// document has a delTerm)</li>
-    /// <li>applies all deletes in the slice to its private <seealso cref="BufferedUpdates"/>
-    /// and resets it</li>
-    /// <li>increments its internal document id</li>
-    /// </ol>
+    /// <list type="number">
+    ///     <item>consumes a document and finishes its processing</item>
+    ///     <item>updates its private <see cref="DeleteSlice"/> either by calling
+    ///     <see cref="UpdateSlice(DeleteSlice)"/> or <see cref="Add(Term, DeleteSlice)"/> (if the
+    ///         document has a delTerm)</item>
+    ///     <item>applies all deletes in the slice to its private <see cref="BufferedUpdates"/>
+    ///         and resets it</item>
+    ///     <item>increments its internal document id</item>
+    /// </list>
     ///
     /// The DWPT also doesn't apply its current documents delete term until it has
     /// updated its delete slice which ensures the consistency of the update. If the
-    /// update fails before the DeleteSlice could have been updated the deleteTerm
+    /// update fails before the <see cref="DeleteSlice"/> could have been updated the deleteTerm
     /// will also not be added to its private deletes neither to the global deletes.
     ///
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocumentsWriterFlushControl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocumentsWriterFlushControl.cs b/src/Lucene.Net.Core/Index/DocumentsWriterFlushControl.cs
index d22c6d1..88438ef 100644
--- a/src/Lucene.Net.Core/Index/DocumentsWriterFlushControl.cs
+++ b/src/Lucene.Net.Core/Index/DocumentsWriterFlushControl.cs
@@ -27,15 +27,15 @@ namespace Lucene.Net.Index
     using ThreadState = Lucene.Net.Index.DocumentsWriterPerThreadPool.ThreadState;
 
     /// <summary>
-    /// this class controls <seealso cref="DocumentsWriterPerThread"/> flushing during
+    /// This class controls <see cref="DocumentsWriterPerThread"/> flushing during
     /// indexing. It tracks the memory consumption per
-    /// <seealso cref="DocumentsWriterPerThread"/> and uses a configured <seealso cref="flushPolicy"/> to
-    /// decide if a <seealso cref="DocumentsWriterPerThread"/> must flush.
-    /// <p>
-    /// In addition to the <seealso cref="flushPolicy"/> the flush control might set certain
-    /// <seealso cref="DocumentsWriterPerThread"/> as flush pending iff a
-    /// <seealso cref="DocumentsWriterPerThread"/> exceeds the
-    /// <seealso cref="IndexWriterConfig#getRAMPerThreadHardLimitMB()"/> to prevent address
+    /// <see cref="DocumentsWriterPerThread"/> and uses a configured <see cref="flushPolicy"/> to
+    /// decide if a <see cref="DocumentsWriterPerThread"/> must flush.
+    /// <para/>
+    /// In addition to the <see cref="flushPolicy"/> the flush control might set certain
+    /// <see cref="DocumentsWriterPerThread"/> as flush pending iff a
+    /// <see cref="DocumentsWriterPerThread"/> exceeds the
+    /// <see cref="IndexWriterConfig.RAMPerThreadHardLimitMB"/> to prevent address
     /// space exhaustion.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -327,8 +327,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Sets flush pending state on the given <seealso cref="ThreadState"/>. The
-        /// <seealso cref="ThreadState"/> must have indexed at least on Document and must not be
+        /// Sets flush pending state on the given <see cref="ThreadState"/>. The
+        /// <see cref="ThreadState"/> must have indexed at least on <see cref="Documents.Document"/> and must not be
         /// already pending.
         /// </summary>
         public void SetFlushPending(ThreadState perThread)
@@ -491,7 +491,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns an iterator that provides access to all currently active <seealso cref="ThreadState"/>s
+        /// Returns an iterator that provides access to all currently active <see cref="ThreadState"/>s
         /// </summary>
         public IEnumerator<ThreadState> AllActiveThreadStates()
         {
@@ -865,7 +865,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns <code>true</code> if a full flush is currently running
+        /// Returns <c>true</c> if a full flush is currently running
         /// </summary>
         internal bool IsFullFlush
         {
@@ -895,7 +895,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns the number of flushes that are checked out but not yet available
-        /// for flushing. this only applies during a full flush if a DWPT needs
+        /// for flushing. This only applies during a full flush if a DWPT needs
         /// flushing but must not be flushed until the full flush has finished.
         /// </summary>
         internal int NumBlockedFlushes
@@ -926,7 +926,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// this method will block if too many DWPT are currently flushing and no
+        /// This method will block if too many DWPT are currently flushing and no
         /// checked out DWPT are available
         /// </summary>
         internal void WaitIfStalled()
@@ -939,7 +939,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns <code>true</code> iff stalled
+        /// Returns <c>true</c> iff stalled
         /// </summary>
         internal bool AnyStalledThreads()
         {
@@ -947,7 +947,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the <seealso cref="IndexWriter"/> <seealso cref="InfoStream"/>
+        /// Returns the <see cref="IndexWriter"/> <see cref="Util.InfoStream"/>
         /// </summary>
         public InfoStream InfoStream
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocumentsWriterFlushQueue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocumentsWriterFlushQueue.cs b/src/Lucene.Net.Core/Index/DocumentsWriterFlushQueue.cs
index 03e7e85..72b4e7d 100644
--- a/src/Lucene.Net.Core/Index/DocumentsWriterFlushQueue.cs
+++ b/src/Lucene.Net.Core/Index/DocumentsWriterFlushQueue.cs
@@ -244,9 +244,9 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Publishes the flushed segment, segment private deletes (if any) and its
-            /// associated global delete (if present) to IndexWriter.  The actual
-            /// publishing operation is synced on IW -> BDS so that the <seealso cref="SegmentInfo"/>'s
-            /// delete generation is always GlobalPacket_deleteGeneration + 1
+            /// associated global delete (if present) to <see cref="IndexWriter"/>.  The actual
+            /// publishing operation is synced on IW -> BDS so that the <see cref="SegmentInfo"/>'s
+            /// delete generation is always <see cref="FrozenBufferedUpdates.DelGen"/> (<paramref name="globalPacket"/>) + 1
             /// </summary>
             protected void PublishFlushedSegment(IndexWriter indexWriter, FlushedSegment newSegment, FrozenBufferedUpdates globalPacket)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b8d797ea/src/Lucene.Net.Core/Index/DocumentsWriterPerThread.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/DocumentsWriterPerThread.cs b/src/Lucene.Net.Core/Index/DocumentsWriterPerThread.cs
index 09cdbb0..a516d36 100644
--- a/src/Lucene.Net.Core/Index/DocumentsWriterPerThread.cs
+++ b/src/Lucene.Net.Core/Index/DocumentsWriterPerThread.cs
@@ -48,8 +48,8 @@ namespace Lucene.Net.Index
     internal class DocumentsWriterPerThread
     {
         /// <summary>
-        /// The IndexingChain must define the <seealso cref="#getChain(DocumentsWriterPerThread)"/> method
-        /// which returns the DocConsumer that the DocumentsWriter calls to process the
+        /// The <see cref="IndexingChain"/> must define the <see cref="GetChain(DocumentsWriterPerThread)"/> method
+        /// which returns the <see cref="DocConsumer"/> that the <see cref="DocumentsWriter"/> calls to process the
         /// documents.
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -172,9 +172,9 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Called if we hit an exception at a bad time (when
-        ///  updating the index files) and must discard all
-        ///  currently buffered docs.  this resets our state,
-        ///  discarding any docs added since last flush.
+        /// updating the index files) and must discard all
+        /// currently buffered docs.  this resets our state,
+        /// discarding any docs added since last flush.
         /// </summary>
         internal virtual void Abort(ISet<string> createdFiles)
         {
@@ -482,7 +482,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the number of delete terms in this <seealso cref="DocumentsWriterPerThread"/>
+        /// Returns the number of delete terms in this <see cref="DocumentsWriterPerThread"/>
         /// </summary>
         public virtual int NumDeleteTerms
         {
@@ -494,7 +494,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the number of RAM resident documents in this <seealso cref="DocumentsWriterPerThread"/>
+        /// Returns the number of RAM resident documents in this <see cref="DocumentsWriterPerThread"/>
         /// </summary>
         public virtual int NumDocsInRAM
         {
@@ -507,7 +507,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Prepares this DWPT for flushing. this method will freeze and return the
-        /// <seealso cref="DocumentsWriterDeleteQueue"/>s global buffer and apply all pending
+        /// <see cref="DocumentsWriterDeleteQueue"/>s global buffer and apply all pending
         /// deletes to this DWPT.
         /// </summary>
         internal virtual FrozenBufferedUpdates PrepareFlush()
@@ -624,8 +624,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Seals the <seealso cref="SegmentInfo"/> for the new flushed segment and persists
-        /// the deleted documents <seealso cref="IMutableBits"/>.
+        /// Seals the <see cref="Index.SegmentInfo"/> for the new flushed segment and persists
+        /// the deleted documents <see cref="IMutableBits"/>.
         /// </summary>
         internal virtual void SealFlushedSegment(FlushedSegment flushedSegment)
         {
@@ -712,12 +712,16 @@ namespace Lucene.Net.Index
             get { return bytesUsed.Get() + pendingUpdates.bytesUsed.Get(); }
         }
 
-        /* Initial chunks size of the shared byte[] blocks used to
-           store postings data */
+        /// <summary>
+        /// Initial chunks size of the shared byte[] blocks used to
+        /// store postings data
+        /// </summary>
         internal static readonly int BYTE_BLOCK_NOT_MASK = ~ByteBlockPool.BYTE_BLOCK_MASK;
 
-        /* if you increase this, you must fix field cache impl for
-         * getTerms/getTermsIndex requires <= 32768 */
+        /// <summary>
+        /// if you increase this, you must fix field cache impl for
+        /// getTerms/getTermsIndex requires &lt;= 32768
+        /// </summary>
         internal static readonly int MAX_TERM_LENGTH_UTF8 = ByteBlockPool.BYTE_BLOCK_SIZE - 2;
 
         /// <summary>
@@ -736,8 +740,9 @@ namespace Lucene.Net.Index
                 this.bytesUsed = bytesUsed;
             }
 
-            /* Allocate another int[] from the shared pool */
-
+            /// <summary>
+            /// Allocate another int[] from the shared pool
+            /// </summary>
             public override int[] GetInt32Block()
             {
                 int[] b = new int[Int32BlockPool.INT32_BLOCK_SIZE];


Mime
View raw message