Author: digy
Date: Mon Dec 14 19:05:31 2009
New Revision: 890443
URL: http://svn.apache.org/viewvc?rev=890443&view=rev
Log:
Some XML documentation clean up
Modified:
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfos.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentReader.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/TermPositions.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/QueryParser/MultiFieldQueryParser.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/QueryParser/QueryParser.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/BooleanQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/BooleanScorer2.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Collector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ComplexExplanation.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DefaultSimilarity.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DisjunctionMaxQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DisjunctionSumScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/DocIdSetIterator.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Explanation.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldCache.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FieldComparator.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ByteFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/DocValues.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/FieldCacheSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/FloatFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/IntFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/MultiValueSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/OrdFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ReverseOrdFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Function/ShortFieldSource.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/FuzzyTermEnum.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/HitCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/HitQueue.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Hits.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/IndexSearcher.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/NumericRangeQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/PhraseScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Query.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ReqExclScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ReqOptSumScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/ScoreDocComparator.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Scorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Searchable.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Searcher.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Similarity.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/SloppyPhraseScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Sort.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Spans/Spans.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TermScorer.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitedCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TimeLimitingCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopDocsCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/TopFieldCollector.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Search/Weight.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/Directory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FSDirectory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/FileSwitchDirectory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/LockFactory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/NativeFSLockFactory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Store/SimpleFSLockFactory.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/FieldCacheSanityChecker.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/PriorityQueue.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/SmallFloat.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/SortedVIntList.cs
incubator/lucene.net/trunk/C#/src/Lucene.Net/Util/UnicodeUtil.cs
incubator/lucene.net/trunk/C#/src/Test/Search/CheckHits.cs
incubator/lucene.net/trunk/C#/src/Test/Search/TestDisjunctionMaxQuery.cs
incubator/lucene.net/trunk/C#/src/Test/Search/TestExplanations.cs
incubator/lucene.net/trunk/C#/src/Test/Util/LocalizedTestCase.cs
incubator/lucene.net/trunk/C#/src/Test/Util/LuceneTestCase.cs
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/ASCIIFoldingFilter.cs Mon Dec 14 19:05:31 2009
@@ -31,29 +31,29 @@
/// those characters with reasonable ASCII alternatives are converted:
///
///
///
/// See: http://en.wikipedia.org/wiki/Latin_characters_in_Unicode
///
/// The set of character conversions supported by this class is a superset of
/// those supported by Lucene's {@link ISOLatin1AccentFilter} which strips
- /// accents from Latin1 characters. For example, 'à' will be replaced by
+ /// accents from Latin1 characters. For example, 'à' will be replaced by
/// 'a'.
///
public sealed class ASCIIFoldingFilter:TokenFilter
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/NumericTokenStream.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/NumericTokenStream.cs Mon Dec 14 19:05:31 2009
@@ -44,7 +44,7 @@
/// should use this class.
///
/// See {@link NumericField} for capabilities of fields
- /// indexed numerically.
+ /// indexed numerically.
///
/// Here's an example usage, for an int
field:
///
@@ -74,17 +74,17 @@
///
/// This stream is not intended to be used in analyzers;
/// it's more for iterating the different precisions during
- /// indexing a specific numeric value.
+ /// indexing a specific numeric value.
///
/// NOTE: as token streams are only consumed once
/// the document is added to the index, if you index more
/// than one numeric field, use a separate NumericTokenStream
- /// instance for each.
+ /// instance for each.
///
/// See {@link NumericRangeQuery} for more details on the
/// precisionStep
- /// parameter as well as how numeric fields work under the hood.
+ /// parameter as well as how numeric fields work under the hood.
///
/// NOTE: This API is experimental and
/// might change in incompatible ways in the next release.
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs Mon Dec 14 19:05:31 2009
@@ -32,9 +32,9 @@
/// You must specify the required {@link Version} compatibility when creating
/// StandardAnalyzer:
///
- /// - As of 2.9, StopFilter preserves position increments
+ ///
- As of 2.9, StopFilter preserves position increments
/// - As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
- /// LUCENE-1608
+ /// LUCENE-1608
///
///
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Standard/StandardTokenizer.cs Mon Dec 14 19:05:31 2009
@@ -36,10 +36,10 @@
///
///
/// - Splits words at punctuation characters, removing punctuation. However, a
- /// dot that's not followed by whitespace is considered part of a token.
+ /// dot that's not followed by whitespace is considered part of a token.
/// - Splits words at hyphens, unless there's a number in the token, in which case
- /// the whole token is interpreted as a product number and is not split.
- ///
- Recognizes email addresses and internet hostnames as one token.
+ /// the whole token is interpreted as a product number and is not split.
+ /// - Recognizes email addresses and internet hostnames as one token.
///
///
/// Many applications have specific tokenizer needs. If this tokenizer does
@@ -52,7 +52,7 @@
/// StandardAnalyzer:
///
/// - As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
- /// LUCENE-1608
+ /// LUCENE-1608
///
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Token.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Token.cs Mon Dec 14 19:05:31 2009
@@ -50,7 +50,7 @@
/// length byte array. Use {@link TermPositions#GetPayloadLength()} and
/// {@link TermPositions#GetPayload(byte[], int)} to retrieve the payloads from the index.
///
- ///
+ ///
///
/// NOTE: As of 2.9, Token implements all {@link Attribute} interfaces
/// that are part of core Lucene and can be found in the {@code tokenattributes} subpackage.
@@ -58,7 +58,7 @@
/// be used as convenience class that implements all {@link Attribute}s, which is especially useful
/// to easily switch from the old to the new TokenStream API.
///
- ///
+ ///
/// NOTE: As of 2.3, Token stores the term text
/// internally as a malleable char[] termBuffer instead of
/// String termText. The indexing code and core tokenizers
@@ -69,7 +69,7 @@
/// String for every term. The APIs that accept String
/// termText are still available but a warning about the
/// associated performance cost has been added (below). The
- /// {@link #TermText()} method has been deprecated.
+ /// {@link #TermText()} method has been deprecated.
///
/// Tokenizers and TokenFilters should try to re-use a Token instance when
/// possible for best performance, by implementing the
@@ -86,7 +86,7 @@
/// or with {@link System#arraycopy(Object, int, Object, int, int)}, and finally call {@link #SetTermLength(int)} to
/// set the length of the term text. See LUCENE-969
- /// for details.
+ /// for details.
/// Typical Token reuse patterns:
///
- ///
+ ///
///
///
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/TokenStream.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/TokenStream.cs Mon Dec 14 19:05:31 2009
@@ -38,9 +38,9 @@
///
/// This is an abstract class. Concrete subclasses are:
///
- /// - {@link Tokenizer}, a
TokenStream
whose input is a Reader; and
+ /// - {@link Tokenizer}, a
TokenStream
whose input is a Reader; and
/// - {@link TokenFilter}, a
TokenStream
whose input is another
- /// TokenStream
.
+ /// TokenStream
.
///
/// A new TokenStream
API has been introduced with Lucene 2.9. This API
/// has moved from being {@link Token} based to {@link Attribute} based. While
@@ -57,16 +57,16 @@
/// The workflow of the new TokenStream
API is as follows:
///
/// - Instantiation of
TokenStream
/{@link TokenFilter}s which add/get
- /// attributes to/from the {@link AttributeSource}.
- /// - The consumer calls {@link TokenStream#Reset()}.
+ /// attributes to/from the {@link AttributeSource}.
+ /// - The consumer calls {@link TokenStream#Reset()}.
/// - The consumer retrieves attributes from the stream and stores local
- /// references to all attributes it wants to access
+ /// references to all attributes it wants to access
/// - The consumer calls {@link #IncrementToken()} until it returns false and
- /// consumes the attributes after each call.
+ /// consumes the attributes after each call.
/// - The consumer calls {@link #End()} so that any end-of-stream operations
- /// can be performed.
+ /// can be performed.
/// - The consumer calls {@link #Close()} to release any resource when finished
- /// using the
TokenStream
+ /// using the TokenStream
///
/// To make sure that filters and consumers know which attributes are available,
/// the attributes must be added during instantiation. Filters and consumers are
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs Mon Dec 14 19:05:31 2009
@@ -35,14 +35,14 @@
/// including either stem will match. In this case, all but the first stem's
/// increment should be set to zero: the increment of the first instance
/// should be one. Repeating a token with an increment of zero can also be
- /// used to boost the scores of matches on that token.
+ /// used to boost the scores of matches on that token.
///
/// Set it to values greater than one to inhibit exact phrase matches.
/// If, for example, one does not want phrases to match across removed stop
/// words, then one could build a stop word filter that removes stop words and
/// also sets the increment to the number of stop words removed before each
/// non-stop word. Then exact phrase queries will only match when the terms
- /// occur with no intervening stop words.
+ /// occur with no intervening stop words.
///
///
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs Mon Dec 14 19:05:31 2009
@@ -36,14 +36,14 @@
/// including either stem will match. In this case, all but the first stem's
/// increment should be set to zero: the increment of the first instance
/// should be one. Repeating a token with an increment of zero can also be
- /// used to boost the scores of matches on that token.
+ /// used to boost the scores of matches on that token.
///
/// Set it to values greater than one to inhibit exact phrase matches.
/// If, for example, one does not want phrases to match across removed stop
/// words, then one could build a stop word filter that removes stop words and
/// also sets the increment to the number of stop words removed before each
/// non-stop word. Then exact phrase queries will only match when the terms
- /// occur with no intervening stop words.
+ /// occur with no intervening stop words.
///
///
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Document.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Document.cs Mon Dec 14 19:05:31 2009
@@ -140,12 +140,12 @@
/// Adds a field to a document. Several fields may be added with
/// the same name. In this case, if the fields are indexed, their text is
- /// treated as though appended for the purposes of search.
+ /// treated as though appended for the purposes of search.
/// Note that add like the removeField(s) methods only makes sense
/// prior to adding a document to an index. These methods cannot
/// be used to change the content of an existing index! In order to achieve this,
/// a document has to be deleted from an index and a new changed version of that
- /// document has to be added.
+ /// document has to be added.
///
public void Add(Fieldable field)
{
@@ -154,12 +154,12 @@
/// Removes field with the specified name from the document.
/// If multiple fields exist with this name, this method removes the first field that has been added.
- /// If there is no field with the specified name, the document remains unchanged.
+ /// If there is no field with the specified name, the document remains unchanged.
/// Note that the removeField(s) methods like the add method only make sense
/// prior to adding a document to an index. These methods cannot
/// be used to change the content of an existing index! In order to achieve this,
/// a document has to be deleted from an index and a new changed version of that
- /// document has to be added.
+ /// document has to be added.
///
public void RemoveField(System.String name)
{
@@ -176,12 +176,12 @@
}
/// Removes all fields with the given name from the document.
- /// If there is no field with the specified name, the document remains unchanged.
+ /// If there is no field with the specified name, the document remains unchanged.
/// Note that the removeField(s) methods like the add method only make sense
/// prior to adding a document to an index. These methods cannot
/// be used to change the content of an existing index! In order to achieve this,
/// a document has to be deleted from an index and a new changed version of that
- /// document has to be added.
+ /// document has to be added.
///
public void RemoveFields(System.String name)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Field.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Field.cs Mon Dec 14 19:05:31 2009
@@ -243,12 +243,12 @@
/// instance to improve indexing speed by avoiding GC cost
/// of new'ing and reclaiming Field instances. Typically
/// a single {@link Document} instance is re-used as
- /// well. This helps most on small documents.
+ /// well. This helps most on small documents.
///
/// Each Field instance should only be used once
/// within a single {@link Document} instance. See ImproveIndexingSpeed
- /// for details.
+ /// for details.
///
public void SetValue(System.String value_Renamed)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/Fieldable.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/Fieldable.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// This means new methods may be added from version to version. This change only affects the Fieldable API; other backwards
/// compatibility promises remain intact. For example, Lucene can still
/// read and write indices created within the same major version.
- ///
+ ///
///
///
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Document/NumericField.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Document/NumericField.cs Mon Dec 14 19:05:31 2009
@@ -64,7 +64,7 @@
/// value, either by dividing the result of
/// {@link java.util.Date#getTime} or using the separate getters
/// (for year, month, etc.) to construct an int
or
- /// long
value.
+ /// long
value.
///
/// To perform range querying or filtering against a
/// NumericField
, use {@link NumericRangeQuery} or {@link
@@ -72,25 +72,25 @@
/// NumericField
, use the normal numeric sort types, eg
/// {@link SortField#INT} (note that {@link SortField#AUTO}
/// will not work with these fields). NumericField
values
- /// can also be loaded directly from {@link FieldCache}.
+ /// can also be loaded directly from {@link FieldCache}.
///
/// By default, a NumericField
's value is not stored but
/// is indexed for range filtering and sorting. You can use
/// the {@link #NumericField(String,Field.Store,boolean)}
- /// constructor if you need to change these defaults.
+ /// constructor if you need to change these defaults.
///
/// You may add the same field name as a NumericField
to
/// the same document more than once. Range querying and
/// filtering will be the logical OR of all values; so a range query
/// will hit all documents that have at least one value in
/// the range. However sort behavior is not defined. If you need to sort,
- /// you should separately index a single-valued NumericField
.
+ /// you should separately index a single-valued NumericField
.
///
/// A NumericField
will consume somewhat more disk space
/// in the index than an ordinary single-valued field.
/// However, for a typical index that includes substantial
/// textual content per document, this increase will likely
- /// be in the noise.
+ /// be in the noise.
///
/// Within Lucene, each numeric value is indexed as a
/// trie structure, where each term is logically
@@ -122,12 +122,12 @@
/// If you only need to sort by numeric value, and never
/// run range querying/filtering, you can index using a
/// precisionStep
of {@link Integer#MAX_VALUE}.
- /// This will minimize disk space consumed.
+ /// This will minimize disk space consumed.
///
/// More advanced users can instead use {@link
/// NumericTokenStream} directly, when indexing numbers. This
/// class is a wrapper around this token stream type for
- /// easier, more intuitive usage.
+ /// easier, more intuitive usage.
///
/// NOTE: This class is only used during
/// indexing. When retrieving the stored field value from a
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/CompoundFileWriter.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/CompoundFileWriter.cs Mon Dec 14 19:05:31 2009
@@ -26,7 +26,7 @@
/// Combines multiple files into a single compound file.
- /// The file format:
+ /// The file format:
///
/// - VInt fileCount
/// - {Directory}
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/DirectoryReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/DirectoryReader.cs Mon Dec 14 19:05:31 2009
@@ -1086,7 +1086,7 @@
/// Expert: return the IndexCommit that this reader has opened.
///
- /// WARNING: this API is new and experimental and may suddenly change.
+ /// WARNING: this API is new and experimental and may suddenly change.
///
public override IndexCommit GetIndexCommit()
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/FieldInvertState.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FieldInvertState.cs Mon Dec 14 19:05:31 2009
@@ -27,7 +27,7 @@
/// also used to calculate the normalization factor for a field.
///
/// WARNING: This API is new and experimental, and may suddenly
- /// change.
+ /// change.
///
public sealed class FieldInvertState
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/FilterIndexReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/FilterIndexReader.cs Mon Dec 14 19:05:31 2009
@@ -143,8 +143,8 @@
/// Construct a FilterIndexReader based on the specified base reader.
/// Directory locking for delete, undeleteAll, and setNorm operations is
- /// left to the base reader.
- /// Note that base reader is closed if this FilterIndexReader is closed.
+ /// left to the base reader.
+ /// Note that base reader is closed if this FilterIndexReader is closed.
///
/// specified base reader.
///
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexCommit.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexCommit.cs Mon Dec 14 19:05:31 2009
@@ -23,21 +23,21 @@
{
/// Expert: represents a single commit into an index as seen by the
- /// {@link IndexDeletionPolicy} or {@link IndexReader}.
+ /// {@link IndexDeletionPolicy} or {@link IndexReader}.
///
/// Changes to the content of an index are made visible
/// only after the writer who made that change commits by
/// writing a new segments file
/// (segments_N
). This point in time, when the
/// action of writing of a new segments file to the directory
- /// is completed, is an index commit.
+ /// is completed, is an index commit.
///
/// Each index commit point has a unique segments file
/// associated with it. The segments file associated with a
- /// later index commit point would have a larger N.
+ /// later index commit point would have a larger N.
///
/// WARNING: This API is a new and experimental and
- /// may suddenly change.
+ /// may suddenly change.
///
public abstract class IndexCommit : IndexCommitPoint
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexDeletionPolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexDeletionPolicy.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// are deleted from the index directory. The default deletion policy
/// is {@link KeepOnlyLastCommitDeletionPolicy}, which always
/// removes old commits as soon as a new commit is done (this
- /// matches the behavior before 2.2).
+ /// matches the behavior before 2.2).
///
/// One expected use case for this (and the reason why it
/// was first created) is to work around problems with an
@@ -44,7 +44,7 @@
/// increase the storage requirements of the index. See LUCENE-710
- /// for details.
+ /// for details.
///
public interface IndexDeletionPolicy
@@ -52,13 +52,13 @@
/// This is called once when a writer is first
/// instantiated to give the policy a chance to remove old
- /// commit points.
+ /// commit points.
///
/// The writer locates all index commits present in the
/// index directory and calls this method. The policy may
/// choose to delete some of the commit points, doing so by
/// calling method {@link IndexCommit#delete delete()}
- /// of {@link IndexCommit}.
+ /// of {@link IndexCommit}.
///
/// Note: the last CommitPoint is the most recent one,
/// i.e. the "front index state". Be careful not to delete it,
@@ -74,11 +74,11 @@
/// This is called each time the writer completed a commit.
/// This gives the policy a chance to remove old commit points
- /// with each commit.
+ /// with each commit.
///
/// The policy may now choose to delete old commit points
/// by calling method {@link IndexCommit#delete delete()}
- /// of {@link IndexCommit}.
+ /// of {@link IndexCommit}.
///
/// If writer has autoCommit = true
then
/// this method will in general be called many times during
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexModifier.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexModifier.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// [Note that as of 2.1, all but one of the
/// methods in this class are available via {@link
/// IndexWriter}. The one method that is not available is
- /// {@link #DeleteDocument(int)}.]
+ /// {@link #DeleteDocument(int)}.]
///
/// A class to modify an index, i.e. to delete and add documents. This
/// class hides {@link IndexReader} and {@link IndexWriter} so that you
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexReader.cs Mon Dec 14 19:05:31 2009
@@ -44,7 +44,7 @@
/// instead always throw UnsupportedOperationException. Subclasses are
/// strongly encouraged to override these methods, but in many cases may not
/// need to.
- ///
+ ///
///
/// NOTE: as of 2.4, it's possible to open a read-only
/// IndexReader using one of the static open methods that
@@ -55,7 +55,7 @@
/// read/write IndexReader. But in 3.0 this default will
/// change to true, meaning you must explicitly specify false
/// if you want to make changes with the resulting IndexReader.
- ///
+ ///
/// NOTE: {@link
/// IndexReader
} instances are completely thread
/// safe, meaning multiple threads can call any of its methods,
@@ -589,7 +589,7 @@
///
/// If the index has not changed since this instance was (re)opened, then this
/// call is a NOOP and returns this instance. Otherwise, a new instance is
- /// returned. The old instance is not closed and remains usable.
+ /// returned. The old instance is not closed and remains usable.
///
/// If the reader is reopened, even though they share
/// resources internally, it's safe to make changes
@@ -860,7 +860,7 @@
/// this method returns the version recorded in the commit that the reader
/// opened. This version is advanced every time {@link IndexWriter#Commit} is
/// called.
- ///
+ ///
///
///
/// If instead this reader is a near real-time reader (ie, obtained by a call
@@ -870,7 +870,7 @@
/// with the writer, the version will not changed until a commit is
/// completed. Thus, you should not rely on this method to determine when a
/// near real-time reader should be opened. Use {@link #IsCurrent} instead.
- ///
+ ///
///
///
/// UnsupportedOperationException
@@ -904,7 +904,7 @@
/// N*termIndexInterval terms in the index is loaded into
/// memory. By setting this to a value > 1 you can reduce
/// memory usage, at the expense of higher latency when
- /// loading a TermInfo. The default value is 1.
+ /// loading a TermInfo. The default value is 1.
///
/// NOTE: you must call this before the term
/// index is loaded. If the index is already loaded,
@@ -937,7 +937,7 @@
/// {@link #open}, or {@link #reopen} on a reader based on a Directory), then
/// this method checks if any further commits (see {@link IndexWriter#commit}
/// have occurred in that directory).
- ///
+ ///
///
///
/// If instead this reader is a near real-time reader (ie, obtained by a call
@@ -946,12 +946,12 @@
/// occurred, or any new uncommitted changes have taken place via the writer.
/// Note that even if the writer has only performed merging, this method will
/// still return false.
- ///
+ ///
///
///
/// In any event, if this returns false, you should call {@link #reopen} to
/// get a new reader that sees the changes.
- ///
+ ///
///
///
/// CorruptIndexException if the index is corrupt
@@ -1626,7 +1626,7 @@
/// segments_N file.
///
/// WARNING: this API is new and experimental and
- /// may suddenly change.
+ /// may suddenly change.
///
public virtual IndexCommit GetIndexCommit()
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IndexWriter.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IndexWriter.cs Mon Dec 14 19:05:31 2009
@@ -44,13 +44,13 @@
/// also {@link #IndexWriter(Directory, Analyzer) constructors}
/// with no create
argument which will create a new index
/// if there is not already an index at the provided path and otherwise
- /// open the existing index.
+ /// open the existing index.
/// In either case, documents are added with {@link #AddDocument(Document)
/// addDocument} and removed with {@link #DeleteDocuments(Term)} or {@link
/// #DeleteDocuments(Query)}. A document can be updated with {@link
/// #UpdateDocument(Term, Document) updateDocument} (which just deletes
/// and then adds the entire document). When finished adding, deleting
- /// and updating documents, {@link #Close() close} should be called.
+ /// and updating documents, {@link #Close() close} should be called.
///
/// These changes are buffered in memory and periodically
/// flushed to the {@link Directory} (during the above method
@@ -69,7 +69,7 @@
/// also trigger one or more segment merges which by default
/// run with a background thread so as not to block the
/// addDocument calls (see below
- /// for changing the {@link MergeScheduler}).
+ /// for changing the {@link MergeScheduler}).
///
/// The optional autoCommit
argument to the {@link
/// #IndexWriter(Directory, boolean, Analyzer) constructors}
@@ -96,7 +96,7 @@
/// followed by {@link #Commit()}. This is necessary when
/// Lucene is working with an external resource (for example,
/// a database) and both must either commit or rollback the
- /// transaction.
+ /// transaction.
/// When autoCommit
is true
then
/// the writer will periodically commit on its own. [Deprecated: Note that in 3.0, IndexWriter will
/// no longer accept autoCommit=true (it will be hardwired to
@@ -110,22 +110,22 @@
/// see the changes to the index as of that commit. When
/// running in this mode, be careful not to refresh your
/// readers while optimize or segment merges are taking place
- /// as this can tie up substantial disk space.
+ /// as this can tie up substantial disk space.
///
/// Regardless of autoCommit
, an {@link
/// IndexReader} or {@link Lucene.Net.Search.IndexSearcher} will only see the
/// index as of the "point in time" that it was opened. Any
/// changes committed to the index after the reader was opened
- /// are not visible until the reader is re-opened.
+ /// are not visible until the reader is re-opened.
/// If an index will not have more documents added for a while and optimal search
/// performance is desired, then either the full {@link #Optimize() optimize}
/// method or partial {@link #Optimize(int)} method should be
- /// called before the index is closed.
+ /// called before the index is closed.
/// Opening an IndexWriter
creates a lock file for the directory in use. Trying to open
/// another IndexWriter
on the same directory will lead to a
/// {@link LockObtainFailedException}. The {@link LockObtainFailedException}
/// is also thrown if an IndexReader on the same directory is used to delete documents
- /// from the index.
+ /// from the index.
///
///
/// Expert: IndexWriter
allows an optional
@@ -141,7 +141,7 @@
/// deleted out from under them. This is necessary on
/// filesystems like NFS that do not support "delete on last
/// close" semantics, which Lucene's "point in time" search
- /// normally relies on.
+ /// normally relies on.
/// Expert:
/// IndexWriter
allows you to separately change
/// the {@link MergePolicy} and the {@link MergeScheduler}.
@@ -153,7 +153,7 @@
/// {@link LogByteSizeMergePolicy}. Then, the {@link
/// MergeScheduler} is invoked with the requested merges and
/// it decides when and how to run the merges. The default is
- /// {@link ConcurrentMergeScheduler}.
+ /// {@link ConcurrentMergeScheduler}.
/// NOTE: if you hit an
/// OutOfMemoryError then IndexWriter will quietly record this
/// fact and block all future segment commits. This is a
@@ -165,7 +165,7 @@
/// #Rollback()}, to undo any changes to the index since the
/// last commit. If you opened the writer with autoCommit
/// false you can also just call {@link #Rollback()}
- /// directly.
+ /// directly.
/// NOTE: {@link
/// IndexWriter
} instances are completely thread
/// safe, meaning multiple threads can call any of its
@@ -173,7 +173,7 @@
/// external synchronization, you should not
/// synchronize on the IndexWriter
instance as
/// this may cause deadlock; use your own (non-Lucene) objects
- /// instead.
+ /// instead.
///
/*
@@ -372,36 +372,36 @@
/// experiment in your situation to determine if it's
/// faster enough. As this is a new and experimental
/// feature, please report back on your findings so we can
- /// learn, improve and iterate.
+ /// learn, improve and iterate.
///
/// The resulting reader suppports {@link
/// IndexReader#reopen}, but that call will simply forward
/// back to this method (though this may change in the
- /// future).
+ /// future).
///
/// The very first time this method is called, this
/// writer instance will make every effort to pool the
/// readers that it opens for doing merges, applying
/// deletes, etc. This means additional resources (RAM,
- /// file descriptors, CPU time) will be consumed.
+ /// file descriptors, CPU time) will be consumed.
///
/// For lower latency on reopening a reader, you may
/// want to call {@link #setMergedSegmentWarmer} to
/// pre-warm a newly merged segment before it's committed
- /// to the index.
+ /// to the index.
///
/// If an addIndexes* call is running in another thread,
/// then this reader will only search those segments from
/// the foreign index that have been successfully copied
- /// over, so far.
+ /// over, so far.
///
/// NOTE: Once the writer is closed, any
/// outstanding readers may continue to be used. However,
/// if you attempt to reopen any of those readers, you'll
- /// hit an {@link AlreadyClosedException}.
+ /// hit an {@link AlreadyClosedException}.
///
/// NOTE: This API is experimental and might
- /// change in incompatible ways in the next release.
+ /// change in incompatible ways in the next release.
///
///
/// IndexReader that covers entire index plus all
@@ -962,12 +962,12 @@
/// this just returns the value previously set with
/// setUseCompoundFile(boolean), or the default value
/// (true). You cannot use this to query the status of
- /// previously flushed segments.
+ /// previously flushed segments.
///
/// Note that this method is a convenience method: it
/// just calls mergePolicy.getUseCompoundFile as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.
+ /// Otherwise an IllegalArgumentException is thrown.
///
///
///
@@ -979,12 +979,12 @@
/// Setting to turn on usage of a compound file. When on,
/// multiple files for each segment are merged into a
- /// single file when a new segment is flushed.
+ /// single file when a new segment is flushed.
///
/// Note that this method is a convenience method: it
/// just calls mergePolicy.setUseCompoundFile as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.
+ /// Otherwise an IllegalArgumentException is thrown.
///
public virtual void SetUseCompoundFile(bool value_Renamed)
{
@@ -2001,19 +2001,19 @@
/// interactive indexing, as this limits the length of
/// pauses while indexing to a few seconds. Larger values
/// are best for batched indexing and speedier
- /// searches.
+ /// searches.
///
- /// The default value is {@link Integer#MAX_VALUE}.
+ /// The default value is {@link Integer#MAX_VALUE}.
///
/// Note that this method is a convenience method: it
/// just calls mergePolicy.setMaxMergeDocs as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.
+ /// Otherwise an IllegalArgumentException is thrown.
///
/// The default merge policy ({@link
/// LogByteSizeMergePolicy}) also allows you to set this
/// limit by net size (in MB) of the segment, using {@link
- /// LogByteSizeMergePolicy#setMaxMergeMB}.
+ /// LogByteSizeMergePolicy#setMaxMergeMB}.
///
public virtual void SetMaxMergeDocs(int maxMergeDocs)
{
@@ -2021,12 +2021,12 @@
}
/// Returns the largest segment (measured by document
- /// count) that may be merged with other segments.
+ /// count) that may be merged with other segments.
///
/// Note that this method is a convenience method: it
/// just calls mergePolicy.getMaxMergeDocs as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.
+ /// Otherwise an IllegalArgumentException is thrown.
///
///
///
@@ -2079,9 +2079,9 @@
/// #DISABLE_AUTO_FLUSH} to prevent triggering a flush due
/// to number of buffered documents. Note that if flushing
/// by RAM usage is also enabled, then the flush will be
- /// triggered by whichever comes first.
+ /// triggered by whichever comes first.
///
- /// Disabled by default (writer flushes by RAM usage).
+ /// Disabled by default (writer flushes by RAM usage).
///
///
/// IllegalArgumentException if maxBufferedDocs is
@@ -2149,7 +2149,7 @@
/// Pass in {@link #DISABLE_AUTO_FLUSH} to prevent
/// triggering a flush due to RAM usage. Note that if
/// flushing by document count is also enabled, then the
- /// flush will be triggered by whichever comes first.
+ /// flush will be triggered by whichever comes first.
///
/// NOTE: the account of RAM usage for pending
/// deletions is only approximate. Specifically, if you
@@ -2167,9 +2167,9 @@
/// less than 2048 MB. The precise limit depends on various factors, such as
/// how large your documents are, how many fields have norms, etc., so it's
/// best to set this value comfortably under 2048.
- ///
+ ///
///
- /// The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.
+ /// The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.
///
///
/// IllegalArgumentException if ramBufferSize is
@@ -2200,8 +2200,8 @@
/// Determines the minimal number of delete terms required before the buffered
/// in-memory delete terms are applied and flushed. If there are documents
/// buffered in memory at the time, they are merged and a new segment is
- /// created.
- /// Disabled by default (writer flushes by RAM usage).
+ /// created.
+ /// Disabled by default (writer flushes by RAM usage).
///
///
/// IllegalArgumentException if maxBufferedDeleteTerms
@@ -2241,7 +2241,7 @@
/// Note that this method is a convenience method: it
/// just calls mergePolicy.setMergeFactor as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.
+ /// Otherwise an IllegalArgumentException is thrown.
///
/// This must never be less than 2. The default value is 10.
///
@@ -2252,12 +2252,12 @@
/// Returns the number of segments that are merged at
/// once and also controls the total number of segments
- /// allowed to accumulate in the index.
+ /// allowed to accumulate in the index.
///
/// Note that this method is a convenience method: it
/// just calls mergePolicy.getMergeFactor as long as
/// mergePolicy is an instance of {@link LogMergePolicy}.
- /// Otherwise an IllegalArgumentException is thrown.
+ /// Otherwise an IllegalArgumentException is thrown.
///
///
///
@@ -2400,14 +2400,14 @@
/// be consistent. However, the close will not be complete
/// even though part of it (flushing buffered documents)
/// may have succeeded, so the write lock will still be
- /// held.
+ /// held.
///
/// If you can correct the underlying cause (eg free up
/// some disk space) then you can call close() again.
/// Failing that, if you want to force the write lock to be
/// released (dangerous, because you may then lose buffered
/// docs in the IndexWriter instance) then you can do
- /// something like this:
+ /// something like this:
///
///
/// try {
@@ -2420,11 +2420,11 @@
///
///
/// after which, you must be certain not to use the writer
- /// instance anymore.
+ /// instance anymore.
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer, again. See above for details.
+ /// href="#OOME">above for details.
///
///
/// CorruptIndexException if the index is corrupt
@@ -2441,14 +2441,14 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer, again. See above for details.
+ /// href="#OOME">above for details.
///
/// NOTE: it is dangerous to always call
/// close(false), especially when IndexWriter is not open
/// for very long, because this can result in "merge
/// starvation" whereby long merges will never have a
/// chance to finish. This will cause too many segments in
- /// your index over time.
+ /// your index over time.
///
///
/// if true, this call will block
@@ -2790,12 +2790,12 @@
/// may not have been added. Furthermore, it's possible
/// the index will have one segment in non-compound format
/// even when using compound files (when a merge has
- /// partially succeeded).
+ /// partially succeeded).
///
/// This method periodically flushes pending documents
/// to the Directory (see above), and
/// also periodically triggers segment merges in the index
- /// according to the {@link MergePolicy} in use.
+ /// according to the {@link MergePolicy} in use.
///
/// Merges temporarily consume space in the
/// directory. The amount of space required is up to 1X the
@@ -2809,17 +2809,17 @@
///
/// Note that each term in the document can be no longer
/// than 16383 characters, otherwise an
- /// IllegalArgumentException will be thrown.
+ /// IllegalArgumentException will be thrown.
///
/// Note that it's possible to create an invalid Unicode
/// string in java if a UTF16 surrogate pair is malformed.
/// In this case, the invalid characters are silently
/// replaced with the Unicode replacement character
- /// U+FFFD.
+ /// U+FFFD.
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// CorruptIndexException if the index is corrupt
@@ -2836,11 +2836,11 @@
///
/// See {@link #AddDocument(Document)} for details on
/// index and IndexWriter state after an Exception, and
- /// flushing/merging temporary free space requirements.
+ /// flushing/merging temporary free space requirements.
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// CorruptIndexException if the index is corrupt
@@ -2891,7 +2891,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// the term to identify the documents to be deleted
@@ -2918,7 +2918,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// array of terms to identify the documents
@@ -2945,7 +2945,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// the query to identify the documents to be deleted
@@ -2965,7 +2965,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// array of queries to identify the documents
@@ -2989,7 +2989,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// the term to identify the document(s) to be
@@ -3013,7 +3013,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// the term to identify the document(s) to be
@@ -3145,28 +3145,28 @@
/// It is recommended that this method be called upon completion of indexing. In
/// environments with frequent updates, optimize is best done during low volume times, if at all.
///
- ///
- /// See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 for more discussion.
+ ///
+ /// See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 for more discussion.
///
/// Note that optimize requires 2X the index size free
/// space in your Directory. For example, if your index
/// size is 10 MB then you need 20 MB free for optimize to
- /// complete.
+ /// complete.
///
/// If some but not all readers re-open while an
/// optimize is underway, this will cause > 2X temporary
/// space to be consumed as those new readers will then
/// hold open the partially optimized segments at that
/// time. It is best not to re-open readers while optimize
- /// is running.
+ /// is running.
///
/// The actual temporary usage could be much less than
- /// these figures (it depends on many factors).
+ /// these figures (it depends on many factors).
///
/// In general, once the optimize completes, the total size of the
/// index will be less than the size of the starting index.
/// It could be quite a bit smaller (if there were many
- /// pending deletes) or just slightly smaller.
+ /// pending deletes) or just slightly smaller.
///
/// If an Exception is hit during optimize(), for example
/// due to disk full, the index will not be corrupt and no
@@ -3176,17 +3176,17 @@
/// the index will be in non-compound format even when
/// using compound file format. This will occur when the
/// Exception is hit during conversion of the segment into
- /// compound format.
+ /// compound format.
///
/// This call will optimize those segments present in
/// the index when the call started. If other threads are
/// still adding documents and flushing segments, those
/// newly created segments will not be optimized unless you
- /// call optimize again.
+ /// call optimize again.
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// CorruptIndexException if the index is corrupt
@@ -3204,7 +3204,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// maximum number of segments left
@@ -3223,7 +3223,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
public virtual void Optimize(bool doWait)
{
@@ -3238,7 +3238,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
public virtual void Optimize(int maxNumSegments, bool doWait)
{
@@ -3368,7 +3368,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
public virtual void ExpungeDeletes(bool doWait)
{
@@ -3455,7 +3455,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
public virtual void ExpungeDeletes()
{
@@ -3473,7 +3473,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
public void MaybeMerge()
{
@@ -3905,9 +3905,9 @@
/// This method will drop all buffered documents and will
/// remove all segments from the index. This change will not be
/// visible until a {@link #Commit()} has been called. This method
- /// can be rolled back using {@link #Rollback()}.
+ /// can be rolled back using {@link #Rollback()}.
///
- /// NOTE: this method is much faster than using deleteDocuments( new MatchAllDocsQuery() ).
+ /// NOTE: this method is much faster than using deleteDocuments( new MatchAllDocsQuery() ).
///
/// NOTE: this method will forcefully abort all merges
/// in progress. If other threads are running {@link
@@ -4028,7 +4028,7 @@
/// Wait for any currently outstanding merges to finish.
///
/// It is guaranteed that any merges started prior to calling this method
- /// will have completed once this method completes.
+ /// will have completed once this method completes.
///
public virtual void WaitForMerges()
{
@@ -4097,7 +4097,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// Use {@link #addIndexesNoOptimize} instead,
@@ -4229,7 +4229,7 @@
/// handled: it does not commit a new segments_N file until
/// all indexes are added. This means if an Exception
/// occurs (for example disk full), then either no indexes
- /// will have been added or they all will have been.
+ /// will have been added or they all will have been.
///
/// Note that this requires temporary free space in the
/// Directory up to 2X the sum of all input indexes
@@ -4237,20 +4237,20 @@
/// are open against the starting index, then temporary
/// free space required will be higher by the size of the
/// starting index (see {@link #Optimize()} for details).
- ///
+ ///
///
/// Once this completes, the final size of the index
/// will be less than the sum of all input index sizes
/// (including the starting index). It could be quite a
/// bit smaller (if there were many pending deletes) or
- /// just slightly smaller.
+ /// just slightly smaller.
///
///
/// This requires this index not be among those to be added.
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// CorruptIndexException if the index is corrupt
@@ -4434,8 +4434,8 @@
}
/// Merges the provided indexes into this index.
- /// After this completes, the index is optimized.
- /// The provided IndexReaders are not closed.
+ /// After this completes, the index is optimized.
+ /// The provided IndexReaders are not closed.
///
/// NOTE: while this is running, any attempts to
/// add or delete documents (with another thread) will be
@@ -4444,11 +4444,11 @@
/// See {@link #AddIndexesNoOptimize(Directory[])} for
/// details on transactional semantics, temporary free
/// space required in the Directory, and non-CFS segments
- /// on an Exception.
+ /// on an Exception.
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// CorruptIndexException if the index is corrupt
@@ -4644,7 +4644,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// please call {@link #Commit()}) instead
@@ -4667,7 +4667,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
///
@@ -4688,7 +4688,7 @@
/// After calling this you must call either {@link
/// #Commit()} to finish the commit, or {@link
/// #Rollback()} to revert the commit and undo all changes
- /// done since the writer was opened.
+ /// done since the writer was opened.
///
/// You can also just call {@link #Commit(Map)} directly
/// without prepareCommit first in which case that method
@@ -4696,7 +4696,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
/// Opaque Map (String->String)
@@ -4753,7 +4753,7 @@
/// crash or power loss. Note that this does not wait for
/// any running background merges to finish. This may be a
/// costly operation, so you should test the cost in your
- /// application and do it only when really necessary.
+ /// application and do it only when really necessary.
///
/// Note that this operation calls Directory.sync on
/// the index files. That call should not return until the
@@ -4765,11 +4765,11 @@
/// performance. If you have such a device, and it does
/// not have a battery backup (for example) then on power
/// loss it may still lose data. Lucene cannot guarantee
- /// consistency on such devices.
+ /// consistency on such devices.
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
///
///
@@ -4788,7 +4788,7 @@
///
/// NOTE: if this method hits an OutOfMemoryError
/// you should immediately close the writer. See above for details.
+ /// href="#OOME">above for details.
///
public void Commit(System.Collections.Generic.IDictionary commitUserData)
{
@@ -6618,7 +6618,7 @@
/// new near real-time reader after a merge completes.
///
/// NOTE: This API is experimental and might
- /// change in incompatible ways in the next release.
+ /// change in incompatible ways in the next release.
///
/// NOTE: warm is called before any deletes have
/// been carried over to the merged segment.
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs Mon Dec 14 19:05:31 2009
@@ -53,11 +53,11 @@
/// than 50 MB) are best for interactive indexing, as this
/// limits the length of pauses while indexing to a few
/// seconds. Larger values are best for batched indexing
- /// and speedier searches.
+ /// and speedier searches.
///
/// Note that {@link #setMaxMergeDocs} is also
/// used to check whether a segment is too large for
- /// merging (it's either or).
+ /// merging (it's either or).
///
public virtual void SetMaxMergeMB(double mb)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/LogMergePolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs Mon Dec 14 19:05:31 2009
@@ -27,7 +27,7 @@
/// (beyond the merge factor upper bound) are encountered,
/// all segments within the level are merged. You can get or
/// set the merge factor using {@link #GetMergeFactor()} and
- /// {@link #SetMergeFactor(int)} respectively.
+ /// {@link #SetMergeFactor(int)} respectively.
///
/// This class is abstract and requires a subclass to
/// define the {@link #size} method which specifies how a
@@ -35,7 +35,7 @@
/// is one subclass that measures size by document count in
/// the segment. {@link LogByteSizeMergePolicy} is another
/// subclass that measures size as the total byte size of the
- /// file(s) for the segment.
+ /// file(s) for the segment.
///
public abstract class LogMergePolicy:MergePolicy
@@ -87,7 +87,7 @@
/// Returns the number of segments that are merged at
/// once and also controls the total number of segments
- /// allowed to accumulate in the index.
+ /// allowed to accumulate in the index.
///
public virtual int GetMergeFactor()
{
@@ -520,14 +520,14 @@
/// interactive indexing, as this limits the length of
/// pauses while indexing to a few seconds. Larger values
/// are best for batched indexing and speedier
- /// searches.
+ /// searches.
///
- /// The default value is {@link Integer#MAX_VALUE}.
+ /// The default value is {@link Integer#MAX_VALUE}.
///
/// The default merge policy ({@link
/// LogByteSizeMergePolicy}) also allows you to set this
/// limit by net size (in MB) of the segment, using {@link
- /// LogByteSizeMergePolicy#setMaxMergeMB}.
+ /// LogByteSizeMergePolicy#setMaxMergeMB}.
///
public virtual void SetMaxMergeDocs(int maxMergeDocs)
{
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MergePolicy.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs Mon Dec 14 19:05:31 2009
@@ -24,7 +24,7 @@
/// Expert: a MergePolicy determines the sequence of
/// primitive merge operations to be used for overall merge
- /// and optimize operations.
+ /// and optimize operations.
///
/// Whenever the segments in an index have been altered by
/// {@link IndexWriter}, either the addition of a newly
@@ -37,19 +37,19 @@
/// merges that should be done, or null if no merges are
/// necessary. When IndexWriter.optimize is called, it calls
/// {@link #findMergesForOptimize} and the MergePolicy should
- /// then return the necessary merges.
+ /// then return the necessary merges.
///
/// Note that the policy can return more than one merge at
/// a time. In this case, if the writer is using {@link
/// SerialMergeScheduler}, the merges will be run
/// sequentially but if it is using {@link
- /// ConcurrentMergeScheduler} they will be run concurrently.
+ /// ConcurrentMergeScheduler} they will be run concurrently.
///
/// The default MergePolicy is {@link
- /// LogByteSizeMergePolicy}.
+ /// LogByteSizeMergePolicy}.
///
/// NOTE: This API is new and still experimental
- /// (subject to change suddenly in the next release)
+ /// (subject to change suddenly in the next release)
///
/// NOTE: This class typically requires access to
/// package-private APIs (e.g. SegmentInfos
) to do its job;
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MergeScheduler.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeScheduler.cs Mon Dec 14 19:05:31 2009
@@ -23,10 +23,10 @@
/// Expert: {@link IndexWriter} uses an instance
/// implementing this interface to execute the merges
/// selected by a {@link MergePolicy}. The default
- /// MergeScheduler is {@link ConcurrentMergeScheduler}.
+ /// MergeScheduler is {@link ConcurrentMergeScheduler}.
///
/// NOTE: This API is new and still experimental
- /// (subject to change suddenly in the next release)
+ /// (subject to change suddenly in the next release)
///
/// NOTE: This class typically requires access to
/// package-private APIs (eg, SegmentInfos) to do its job;
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MultiReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs Mon Dec 14 19:05:31 2009
@@ -44,8 +44,8 @@
/// Construct a MultiReader aggregating the named set of (sub)readers.
/// Directory locking for delete, undeleteAll, and setNorm operations is
- /// left to the subreaders.
- /// Note that all subreaders are closed if this Multireader is closed.
+ /// left to the subreaders.
+ /// Note that all subreaders are closed if this Multireader is closed.
///
/// set of (sub)readers
///
@@ -57,7 +57,7 @@
/// Construct a MultiReader aggregating the named set of (sub)readers.
/// Directory locking for delete, undeleteAll, and setNorm operations is
- /// left to the subreaders.
+ /// left to the subreaders.
///
/// indicates whether the subreaders should be closed
/// when this MultiReader is closed
@@ -98,7 +98,7 @@
}
/// Tries to reopen the subreaders.
- ///
+ ///
/// If one or more subreaders could be re-opened (i. e. subReader.reopen()
/// returned a new instance != subReader), then a new MultiReader instance
/// is returned, otherwise this instance is returned.
@@ -125,7 +125,7 @@
/// Clones the subreaders.
/// (see {@link IndexReader#clone()}).
- ///
+ ///
///
/// If subreaders are shared, then the reference count of those
/// readers is increased to ensure that the subreaders remain open
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ParallelReader.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs Mon Dec 14 19:05:31 2009
@@ -57,7 +57,7 @@
private bool hasDeletions;
/// Construct a ParallelReader.
- /// Note that all subreaders are closed if this ParallelReader is closed.
+ /// Note that all subreaders are closed if this ParallelReader is closed.
///
public ParallelReader():this(true)
{
@@ -144,7 +144,7 @@
}
/// Tries to reopen the subreaders.
- ///
+ ///
/// If one or more subreaders could be re-opened (i. e. subReader.reopen()
/// returned a new instance != subReader), then a new ParallelReader instance
/// is returned, otherwise this instance is returned.
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/Payload.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs Mon Dec 14 19:05:31 2009
@@ -31,7 +31,7 @@
/// produces payload data.
///
/// Use {@link TermPositions#GetPayloadLength()} and {@link TermPositions#GetPayload(byte[], int)}
- /// to retrieve the payloads from the index.
+ /// to retrieve the payloads from the index.
///
///
[Serializable]
Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/SegmentInfo.cs?rev=890443&r1=890442&r2=890443&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/SegmentInfo.cs Mon Dec 14 19:05:31 2009
@@ -29,7 +29,7 @@
/// to the segment.
///
/// * NOTE: This API is new and still experimental
- /// (subject to change suddenly in the next release)
+ /// (subject to change suddenly in the next release)
///
public sealed class SegmentInfo : System.ICloneable
{