lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject svn commit: r1303294 [2/6] - in /incubator/lucene.net/trunk: src/contrib/Analyzers/AR/ src/contrib/Analyzers/BR/ src/contrib/Analyzers/CJK/ src/contrib/Analyzers/Cn/ src/contrib/Analyzers/Compound/ src/contrib/Analyzers/Cz/ src/contrib/Analyzers/De/ sr...
Date Wed, 21 Mar 2012 06:04:35 GMT
Modified: incubator/lucene.net/trunk/src/core/Analysis/PorterStemmer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/PorterStemmer.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/PorterStemmer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/PorterStemmer.cs Wed Mar 21 06:04:26 2012
@@ -96,23 +96,23 @@ namespace Lucene.Net.Analysis
 		{
 			return new System.String(b, 0, i);
 		}
-		
-		/// <summary> Returns the length of the word resulting from the stemming process.</summary>
-		public virtual int GetResultLength()
-		{
-			return i;
-		}
-		
-		/// <summary> Returns a reference to a character buffer containing the results of
-		/// the stemming process.  You also need to consult getResultLength()
-		/// to determine the length of the result.
-		/// </summary>
-		public virtual char[] GetResultBuffer()
-		{
-			return b;
-		}
-		
-		/* cons(i) is true <=> b[i] is a consonant. */
+
+	    /// <summary> Returns the length of the word resulting from the stemming process.</summary>
+	    public virtual int ResultLength
+	    {
+	        get { return i; }
+	    }
+
+	    /// <summary> Returns a reference to a character buffer containing the results of
+	    /// the stemming process.  You also need to consult getResultLength()
+	    /// to determine the length of the result.
+	    /// </summary>
+	    public virtual char[] ResultBuffer
+	    {
+	        get { return b; }
+	    }
+
+	    /* cons(i) is true <=> b[i] is a consonant. */
 		
 		private bool Cons(int i)
 		{

Modified: incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/SimpleAnalyzer.cs Wed Mar 21 06:04:26 2012
@@ -33,11 +33,11 @@ namespace Lucene.Net.Analysis
 		
 		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
 		{
-			Tokenizer tokenizer = (Tokenizer) GetPreviousTokenStream();
+			Tokenizer tokenizer = (Tokenizer) PreviousTokenStream;
 			if (tokenizer == null)
 			{
 				tokenizer = new LowerCaseTokenizer(reader);
-				SetPreviousTokenStream(tokenizer);
+				PreviousTokenStream = tokenizer;
 			}
 			else
 				tokenizer.Reset(reader);

Modified: incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardAnalyzer.cs Wed Mar 21 06:04:26 2012
@@ -145,11 +145,11 @@ namespace Lucene.Net.Analysis.Standard
 				// tokenStream but not reusableTokenStream
 				return TokenStream(fieldName, reader);
 			}
-			SavedStreams streams = (SavedStreams) GetPreviousTokenStream();
+			SavedStreams streams = (SavedStreams) PreviousTokenStream;
 			if (streams == null)
 			{
 				streams = new SavedStreams();
-				SetPreviousTokenStream(streams);
+				PreviousTokenStream = streams;
 				streams.tokenStream = new StandardTokenizer(matchVersion, reader);
 				streams.filteredTokenStream = new StandardFilter(streams.tokenStream);
 				streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream);

Modified: incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardFilter.cs Wed Mar 21 06:04:26 2012
@@ -59,7 +59,7 @@ namespace Lucene.Net.Analysis.Standard
 			
 			char[] buffer = termAtt.TermBuffer();
 			int bufferLength = termAtt.TermLength();
-			System.String type = typeAtt.Type();
+			System.String type = typeAtt.Type;
 			
 			if ((System.Object) type == (System.Object) APOSTROPHE_TYPE && bufferLength >= 2 && buffer[bufferLength - 2] == '\'' && (buffer[bufferLength - 1] == 's' || buffer[bufferLength - 1] == 'S'))
 			{

Modified: incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Standard/StandardTokenizer.cs Wed Mar 21 06:04:26 2012
@@ -187,17 +187,17 @@ namespace Lucene.Net.Analysis.Standard
 					{
 						if (replaceInvalidAcronym)
 						{
-							typeAtt.SetType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.HOST]);
+							typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.HOST];
 							termAtt.SetTermLength(termAtt.TermLength() - 1); // remove extra '.'
 						}
 						else
 						{
-							typeAtt.SetType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM]);
+							typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
 						}
 					}
 					else
 					{
-						typeAtt.SetType(StandardTokenizerImpl.TOKEN_TYPES[tokenType]);
+						typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[tokenType];
 					}
 					return true;
 				}

Modified: incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/StopAnalyzer.cs Wed Mar 21 06:04:26 2012
@@ -118,13 +118,13 @@ namespace Lucene.Net.Analysis
 		
 		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
 		{
-			SavedStreams streams = (SavedStreams) GetPreviousTokenStream();
+			SavedStreams streams = (SavedStreams) PreviousTokenStream;
 			if (streams == null)
 			{
 				streams = new SavedStreams(this);
 				streams.source = new LowerCaseTokenizer(reader);
 				streams.result = new StopFilter(enablePositionIncrements, streams.source, stopWords);
-				SetPreviousTokenStream(streams);
+				PreviousTokenStream = streams;
 			}
 			else
 				streams.source.Reset(reader);

Modified: incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/TeeSinkTokenFilter.cs Wed Mar 21 06:04:26 2012
@@ -111,7 +111,7 @@ namespace Lucene.Net.Analysis
 				throw new System.ArgumentException("The supplied sink is not compatible to this tee");
 			}
 			// add eventually missing attribute impls to the existing sink
-            foreach (AttributeImpl impl in this.CloneAttributes().AttributeImpls)
+            foreach (AttributeImpl impl in this.CloneAttributes().GetAttributeImplsIterator())
             {
                 sink.AddAttributeImpl(impl);
             }

Modified: incubator/lucene.net/trunk/src/core/Analysis/Token.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Token.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Token.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Token.cs Wed Mar 21 06:04:26 2012
@@ -122,7 +122,7 @@ namespace Lucene.Net.Analysis
 	/// <seealso cref="Lucene.Net.Index.Payload">
 	/// </seealso>
 	[Serializable]
-	public class Token:AttributeImpl, System.ICloneable, TermAttribute, TypeAttribute, PositionIncrementAttribute, FlagsAttribute, OffsetAttribute, PayloadAttribute
+	public class Token : AttributeImpl, TermAttribute, TypeAttribute, PositionIncrementAttribute, FlagsAttribute, OffsetAttribute, PayloadAttribute
 	{
 		public const System.String DEFAULT_TYPE = "word";
 		
@@ -441,44 +441,30 @@ namespace Lucene.Net.Analysis
 				throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
 			termLength = length;
 		}
-		
-		/// <summary>Returns this Token's starting offset, the position of the first character
-		/// corresponding to this token in the source text.
-		/// Note that the difference between endOffset() and startOffset() may not be
-		/// equal to <see cref="TermLength"/>, as the term text may have been altered by a
-		/// stemmer or some other filter. 
-		/// </summary>
-		public int StartOffset()
-		{
-			return startOffset;
-		}
-		
-		/// <summary>Set the starting offset.</summary>
-		/// <seealso cref="StartOffset()">
-		/// </seealso>
-		public virtual void  SetStartOffset(int offset)
-		{
-			this.startOffset = offset;
-		}
-		
-		/// <summary>Returns this Token's ending offset, one greater than the position of the
-		/// last character corresponding to this token in the source text. The length
-		/// of the token in the source text is (endOffset - startOffset). 
-		/// </summary>
-		public int EndOffset()
-		{
-			return endOffset;
-		}
-		
-		/// <summary>Set the ending offset.</summary>
-		/// <seealso cref="EndOffset()">
-		/// </seealso>
-		public virtual void  SetEndOffset(int offset)
-		{
-			this.endOffset = offset;
-		}
-		
-		/// <summary>Set the starting and ending offset.
+
+	    /// <summary>Gets or sets this Token's starting offset, the position of the first character
+	    /// corresponding to this token in the source text.
+	    /// Note that the difference between endOffset() and startOffset() may not be
+	    /// equal to <see cref="TermLength"/>, as the term text may have been altered by a
+	    /// stemmer or some other filter. 
+	    /// </summary>
+	    public virtual int StartOffset
+	    {
+	        get { return startOffset; }
+	        set { this.startOffset = value; }
+	    }
+
+	    /// <summary>Gets or sets this Token's ending offset, one greater than the position of the
+	    /// last character corresponding to this token in the source text. The length
+	    /// of the token in the source text is (endOffset - startOffset). 
+	    /// </summary>
+	    public virtual int EndOffset
+	    {
+	        get { return endOffset; }
+	        set { this.endOffset = value; }
+	    }
+
+	    /// <summary>Set the starting and ending offset.
 		/// See StartOffset() and EndOffset()
 		/// </summary>
 		public virtual void  SetOffset(int startOffset, int endOffset)
@@ -486,20 +472,13 @@ namespace Lucene.Net.Analysis
 			this.startOffset = startOffset;
 			this.endOffset = endOffset;
 		}
-		
-		/// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
-		public System.String Type()
-		{
-			return type;
-		}
-		
-		/// <summary>Set the lexical type.</summary>
-		/// <seealso cref="Type()">
-		/// </seealso>
-		public void  SetType(System.String type)
-		{
-			this.type = type;
-		}
+
+	    /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+	    public string Type
+	    {
+	        get { return type; }
+	        set { this.type = value; }
+	    }
 
 	    /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
 	    /// <p/>
@@ -827,7 +806,7 @@ namespace Lucene.Net.Analysis
 				((PositionIncrementAttribute) target).PositionIncrement = positionIncrement;
 				((PayloadAttribute) target).Payload = (payload == null)?null:(Payload) payload.Clone();
 				((FlagsAttribute) target).Flags = flags;
-				((TypeAttribute) target).SetType(type);
+				((TypeAttribute) target).Type = type;
 			}
 		}
        

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttribute.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttribute.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttribute.cs Wed Mar 21 06:04:26 2012
@@ -23,27 +23,27 @@ namespace Lucene.Net.Analysis.Tokenattri
 {
 	
 	/// <summary> The start and end character offset of a Token. </summary>
-	public interface OffsetAttribute:Attribute
+	public interface OffsetAttribute : Attribute
 	{
-		/// <summary>Returns this Token's starting offset, the position of the first character
-		/// corresponding to this token in the source text.
-		/// Note that the difference between endOffset() and startOffset() may not be
-		/// equal to termText.length(), as the term text may have been altered by a
-		/// stemmer or some other filter. 
-		/// </summary>
-		int StartOffset();
-		
-		
-		/// <summary>Set the starting and ending offset.
+	    /// <summary>Returns this Token's starting offset, the position of the first character
+	    /// corresponding to this token in the source text.
+	    /// Note that the difference between endOffset() and startOffset() may not be
+	    /// equal to termText.length(), as the term text may have been altered by a
+	    /// stemmer or some other filter. 
+	    /// </summary>
+	    int StartOffset { get; }
+
+
+	    /// <summary>Set the starting and ending offset.
         /// See StartOffset() and EndOffset()
         /// </summary>
 		void  SetOffset(int startOffset, int endOffset);
-		
-		
-		/// <summary>Returns this Token's ending offset, one greater than the position of the
-		/// last character corresponding to this token in the source text. The length
-		/// of the token in the source text is (endOffset - startOffset). 
-		/// </summary>
-		int EndOffset();
+
+
+	    /// <summary>Returns this Token's ending offset, one greater than the position of the
+	    /// last character corresponding to this token in the source text. The length
+	    /// of the token in the source text is (endOffset - startOffset). 
+	    /// </summary>
+	    int EndOffset { get; }
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttributeImpl.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/OffsetAttributeImpl.cs Wed Mar 21 06:04:26 2012
@@ -28,20 +28,20 @@ namespace Lucene.Net.Analysis.Tokenattri
 	{
 		private int startOffset;
 		private int endOffset;
-		
-		/// <summary>Returns this Token's starting offset, the position of the first character
-		/// corresponding to this token in the source text.
-		/// Note that the difference between endOffset() and startOffset() may not be
-		/// equal to termText.length(), as the term text may have been altered by a
-		/// stemmer or some other filter. 
-		/// </summary>
-		public virtual int StartOffset()
-		{
-			return startOffset;
-		}
+
+	    /// <summary>Returns this Token's starting offset, the position of the first character
+	    /// corresponding to this token in the source text.
+	    /// Note that the difference between endOffset() and startOffset() may not be
+	    /// equal to termText.length(), as the term text may have been altered by a
+	    /// stemmer or some other filter. 
+	    /// </summary>
+	    public virtual int StartOffset
+	    {
+	        get { return startOffset; }
+	    }
 
 
-        /// <summary>Set the starting and ending offset.
+	    /// <summary>Set the starting and ending offset.
         /// See StartOffset() and EndOffset()
         /// </summary>
 		public virtual void  SetOffset(int startOffset, int endOffset)
@@ -49,19 +49,19 @@ namespace Lucene.Net.Analysis.Tokenattri
 			this.startOffset = startOffset;
 			this.endOffset = endOffset;
 		}
-		
-		
-		/// <summary>Returns this Token's ending offset, one greater than the position of the
-		/// last character corresponding to this token in the source text. The length
-		/// of the token in the source text is (endOffset - startOffset). 
-		/// </summary>
-		public virtual int EndOffset()
-		{
-			return endOffset;
-		}
-		
-		
-		public override void  Clear()
+
+
+	    /// <summary>Returns this Token's ending offset, one greater than the position of the
+	    /// last character corresponding to this token in the source text. The length
+	    /// of the token in the source text is (endOffset - startOffset). 
+	    /// </summary>
+	    public virtual int EndOffset
+	    {
+	        get { return endOffset; }
+	    }
+
+
+	    public override void  Clear()
 		{
 			startOffset = 0;
 			endOffset = 0;

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttribute.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttribute.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttribute.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttribute.cs Wed Mar 21 06:04:26 2012
@@ -25,12 +25,7 @@ namespace Lucene.Net.Analysis.Tokenattri
 	/// <summary> A Token's lexical type. The Default value is "word". </summary>
 	public interface TypeAttribute:Attribute
 	{
-		/// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
-		System.String Type();
-		
-		/// <summary>Set the lexical type.</summary>
-		/// <seealso cref="Type()">
-		/// </seealso>
-		void  SetType(System.String type);
+	    /// <summary>Gets or sets this Token's lexical type.  Defaults to "word". </summary>
+	    string Type { get; set; }
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttributeImpl.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TypeAttributeImpl.cs Wed Mar 21 06:04:26 2012
@@ -37,22 +37,15 @@ namespace Lucene.Net.Analysis.Tokenattri
 		{
 			this.type = type;
 		}
-		
-		/// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
-		public virtual System.String Type()
-		{
-			return type;
-		}
-		
-		/// <summary>Set the lexical type.</summary>
-		/// <seealso cref="Type()">
-		/// </seealso>
-		public virtual void  SetType(System.String type)
-		{
-			this.type = type;
-		}
-		
-		public override void  Clear()
+
+	    /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+	    public virtual string Type
+	    {
+	        get { return type; }
+	        set { this.type = value; }
+	    }
+
+	    public override void  Clear()
 		{
 			type = DEFAULT_TYPE;
 		}
@@ -80,7 +73,7 @@ namespace Lucene.Net.Analysis.Tokenattri
 		public override void  CopyTo(AttributeImpl target)
 		{
 			TypeAttribute t = (TypeAttribute) target;
-			t.SetType(type);
+			t.Type = type;
 		}
 		
 		override public System.Object Clone()

Modified: incubator/lucene.net/trunk/src/core/Analysis/WhitespaceAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/WhitespaceAnalyzer.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/WhitespaceAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/WhitespaceAnalyzer.cs Wed Mar 21 06:04:26 2012
@@ -31,11 +31,11 @@ namespace Lucene.Net.Analysis
 		
 		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
 		{
-			Tokenizer tokenizer = (Tokenizer) GetPreviousTokenStream();
+			Tokenizer tokenizer = (Tokenizer) PreviousTokenStream;
 			if (tokenizer == null)
 			{
 				tokenizer = new WhitespaceTokenizer(reader);
-				SetPreviousTokenStream(tokenizer);
+				PreviousTokenStream = tokenizer;
 			}
 			else
 				tokenizer.Reset(reader);

Modified: incubator/lucene.net/trunk/src/core/Document/AbstractField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/AbstractField.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/AbstractField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/AbstractField.cs Wed Mar 21 06:04:26 2012
@@ -171,10 +171,10 @@ namespace Lucene.Net.Documents
 	    /// to know which range of bytes in this
 	    /// returned array belong to the field.
 	    /// </summary>
-	    /// <value> reference to the Field value as byte[]. </value>
-	    public virtual byte[] BinaryValue
+	    /// <returns> reference to the Field value as byte[]. </returns>
+	    public virtual byte[] GetBinaryValue()
 	    {
-	        get { return GetBinaryValue(null); }
+	        return GetBinaryValue(null);
 	    }
 
 	    public virtual byte[] GetBinaryValue(byte[] result)

Modified: incubator/lucene.net/trunk/src/core/Document/Document.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/Document.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/Document.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/Document.cs Wed Mar 21 06:04:26 2012
@@ -100,27 +100,12 @@ namespace Lucene.Net.Documents
 		public Document()
 		{
 		}
-		
-		
-		/// <summary>Sets a boost factor for hits on any field of this document.  This value
-		/// will be multiplied into the score of all hits on this document.
-		/// 
-		/// <p/>The default value is 1.0.
-		/// 
-		/// <p/>Values are multiplied into the value of <see cref="IFieldable.GetBoost()" /> of
-		/// each field in this document.  Thus, this method in effect sets a default
-		/// boost for the fields of this document.
-		/// 
-		/// </summary>
-		/// <seealso cref="IFieldable.SetBoost(float)">
-		/// </seealso>
-		public void  SetBoost(float boost)
-		{
-			this.boost = boost;
-		}
 
-	    /// <summary>Returns, at indexing time, the boost factor as set by <see cref="SetBoost(float)" />. 
-	    /// 
+
+	    /// <summary>Gets or sets, at indexing time, the boost factor. 
+	    /// <para>
+	    /// The default is 1.0
+	    /// </para>
 	    /// <p/>Note that once a document is indexed this value is no longer available
 	    /// from the index.  At search time, for retrieved documents, this method always 
 	    /// returns 1. This however does not mean that the boost value set at  indexing 
@@ -128,13 +113,11 @@ namespace Lucene.Net.Documents
 	    /// stored elsewhere, for better indexing and search performance. (For more 
 	    /// information see the "norm(t,d)" part of the scoring formula in 
 	    /// <see cref="Lucene.Net.Search.Similarity">Similarity</see>.)
-	    /// 
 	    /// </summary>
-	    /// <seealso cref="SetBoost(float)">
-	    /// </seealso>
 	    public float Boost
 	    {
 	        get { return boost; }
+	        set { this.boost = value; }
 	    }
 
 	    /// <summary> <p/>Adds a field to a document.  Several fields may be added with
@@ -346,7 +329,7 @@ namespace Lucene.Net.Documents
 			foreach(IFieldable field in fields)
 			{
 				if (field.Name.Equals(name) && (field.IsBinary))
-					result.Add(field.BinaryValue);
+					result.Add(field.GetBinaryValue());
 			}
 			
 			if (result.Count == 0)
@@ -370,7 +353,7 @@ namespace Lucene.Net.Documents
 			foreach(IFieldable field in fields)
 			{
 				if (field.Name.Equals(name) && (field.IsBinary))
-					return field.BinaryValue;
+					return field.GetBinaryValue();
 			}
 			return null;
 		}

Modified: incubator/lucene.net/trunk/src/core/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/Fieldable.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/Fieldable.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/Fieldable.cs Wed Mar 21 06:04:26 2012
@@ -165,8 +165,8 @@ namespace Lucene.Net.Documents
 	    /// to know which range of bytes in this
 	    /// returned array belong to the field.
 	    /// </summary>
-	    /// <value> reference to the Field value as byte[]. </value>
-	    byte[] BinaryValue { get; }
+	    /// <returns> reference to the Field value as byte[]. </returns>
+	    byte[] GetBinaryValue();
 
 	    /// <summary> Return the raw byte[] for the binary field.  Note that
         /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />

Modified: incubator/lucene.net/trunk/src/core/Index/AbstractAllTermDocs.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/AbstractAllTermDocs.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/AbstractAllTermDocs.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/AbstractAllTermDocs.cs Wed Mar 21 06:04:26 2012
@@ -33,7 +33,7 @@ namespace Lucene.Net.Index
     public abstract class AbstractAllTermDocs : TermDocs
     {
         protected int maxDoc;
-        protected int doc = -1;
+        protected int _doc = -1;
 
         protected AbstractAllTermDocs(int maxDoc)
         {
@@ -44,7 +44,7 @@ namespace Lucene.Net.Index
         {
             if (term == null)
             {
-                doc = -1;
+                _doc = -1;
             }
             else
             {
@@ -59,7 +59,7 @@ namespace Lucene.Net.Index
 
         public int Doc
         {
-            get { return doc; }
+            get { return _doc; }
         }
 
         public int Freq
@@ -69,36 +69,36 @@ namespace Lucene.Net.Index
 
         public bool Next()
         {
-            return SkipTo(doc + 1);
+            return SkipTo(_doc + 1);
         }
 
         public int Read(int[] docs, int[] freqs)
         {
             int length = docs.Length;
             int i = 0;
-            while (i < length && doc < maxDoc)
+            while (i < length && _doc < maxDoc)
             {
-                if (!IsDeleted(doc))
+                if (!IsDeleted(_doc))
                 {
-                    docs[i] = doc;
+                    docs[i] = _doc;
                     freqs[i] = 1;
                     ++i;
                 }
-                doc++;
+                _doc++;
             }
             return i;
         }
 
         public bool SkipTo(int target)
         {
-            doc = target;
-            while (doc < maxDoc)
+            _doc = target;
+            while (_doc < maxDoc)
             {
-                if (!IsDeleted(doc))
+                if (!IsDeleted(_doc))
                 {
                     return true;
                 }
-                doc++;
+                _doc++;
             }
             return false;
         }

Modified: incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs Wed Mar 21 06:04:26 2012
@@ -467,8 +467,8 @@ namespace Lucene.Net.Index
 				
 				try
 				{
-					Msg("    compound=" + info.UseCompoundFile);
-					segInfoStat.compound = info.UseCompoundFile;
+					Msg("    compound=" + info.GetUseCompoundFile());
+					segInfoStat.compound = info.GetUseCompoundFile();
 					Msg("    hasProx=" + info.HasProx);
 					segInfoStat.hasProx = info.HasProx;
 					Msg("    numFiles=" + info.Files().Count);
@@ -510,7 +510,7 @@ namespace Lucene.Net.Index
 					
 					segInfoStat.openReaderPassed = true;
 					
-					int numDocs = reader.NumDocs;
+					int numDocs = reader.GetNumDocs();
 					toLoseDocCount = numDocs;
 					if (reader.HasDeletions)
 					{
@@ -781,7 +781,7 @@ namespace Lucene.Net.Index
 				}
 				
 				// Validate docCount
-				if (status.docCount != reader.NumDocs)
+				if (status.docCount != reader.GetNumDocs())
 				{
 					throw new System.SystemException("docCount=" + status.docCount + " but saw " + status.docCount + " undeleted docs");
 				}
@@ -818,7 +818,7 @@ namespace Lucene.Net.Index
 					if (!reader.IsDeleted(j))
 					{
 						status.docCount++;
-						TermFreqVector[] tfv = reader.GetTermFreqVectors(j);
+						ITermFreqVector[] tfv = reader.GetTermFreqVectors(j);
 						if (tfv != null)
 						{
 							status.totVectors += tfv.Length;

Modified: incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs Wed Mar 21 06:04:26 2012
@@ -116,23 +116,11 @@ namespace Lucene.Net.Index
 	        get { return directory; }
 	    }
 
-        [Obsolete("Use Directory property instead.")]
-        public virtual Directory GetDirectory()
-        {
-            return Directory;
-        }
-
 	    public virtual string Name
 	    {
 	        get { return fileName; }
 	    }
 
-        [Obsolete("Use Name property instead.")]
-        public virtual System.String GetName()
-        {
-            return fileName;
-        }
-
 	    protected override void Dispose(bool disposing)
         {
             lock (this)

Modified: incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs Wed Mar 21 06:04:26 2012
@@ -94,26 +94,12 @@ namespace Lucene.Net.Index
 	        get { return directory; }
 	    }
 
-        /// <summary>Returns the directory of the compound file. </summary>
-        [Obsolete("Use Directory property instead")]
-        public Directory GetDirectory()
-        {
-            return Directory;
-        }
-
 	    /// <summary>Returns the name of the compound file. </summary>
 	    public string Name
 	    {
 	        get { return fileName; }
 	    }
 
-        /// <summary>Returns the name of the compound file. </summary>
-        [Obsolete("Use Name property instead")]
-        public System.String GetName()
-        {
-            return Name;
-        }
-
 	    /// <summary>Add a source stream. <c>file</c> is the string by which the 
 		/// sub-stream will be known in the compound stream.
 		/// 

Modified: incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs Wed Mar 21 06:04:26 2012
@@ -79,7 +79,8 @@ namespace Lucene.Net.Index
 		/// slightly higher priority than) the first thread that
 		/// calls merge. 
 		/// </summary>
-		public virtual int GetMergeThreadPriority()
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public virtual int GetMergeThreadPriority()
 		{
 			lock (this)
 			{
@@ -108,7 +109,7 @@ namespace Lucene.Net.Index
 		
 		private bool Verbose()
 		{
-			return writer != null && writer.Verbose();
+			return writer != null && writer.Verbose;
 		}
 		
 		private void  Message(System.String message)
@@ -197,7 +198,7 @@ namespace Lucene.Net.Index
 			
 			InitMergeThreadPriority();
 			
-			dir = writer.GetDirectory();
+			dir = writer.Directory;
 			
 			// First, quickly run through the newly proposed merges
 			// and add any orthogonal merges (ie a merge not

Modified: incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs Wed Mar 21 06:04:26 2012
@@ -173,7 +173,7 @@ namespace Lucene.Net.Index
         // Used by near real-time search
         internal DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor)
         {
-            this.directory = writer.GetDirectory();
+            this.directory = writer.Directory;
             this.readOnly = true;
             segmentInfos = infos;
             segmentInfosStart = (SegmentInfos) infos.Clone();
@@ -190,7 +190,7 @@ namespace Lucene.Net.Index
             // no need to process segments in reverse order
             int numSegments = infos.Count;
             SegmentReader[] readers = new SegmentReader[numSegments];
-            Directory dir = writer.GetDirectory();
+            Directory dir = writer.Directory;
             int upto = 0;
             
             for (int i = 0; i < numSegments; i++)
@@ -290,7 +290,7 @@ namespace Lucene.Net.Index
                 try
                 {
                     SegmentReader newReader;
-                    if (newReaders[i] == null || infos.Info(i).UseCompoundFile != newReaders[i].SegmentInfo.UseCompoundFile)
+                    if (newReaders[i] == null || infos.Info(i).GetUseCompoundFile() != newReaders[i].SegmentInfo.GetUseCompoundFile())
                     {
                         
                         // We should never see a totally new segment during cloning
@@ -524,7 +524,7 @@ namespace Lucene.Net.Index
                         System.Diagnostics.Debug.Assert(writeLock != null);
                         // so no other writer holds the write lock, which
                         // means no changes could have been done to the index:
-                        System.Diagnostics.Debug.Assert(IsCurrent);
+                        System.Diagnostics.Debug.Assert(IsCurrent());
 
                         if (openReadOnly)
                         {
@@ -535,7 +535,7 @@ namespace Lucene.Net.Index
                             return this;
                         }
                     }
-                    else if (IsCurrent)
+                    else if (IsCurrent())
                     {
                         if (openReadOnly != readOnly)
                         {
@@ -618,14 +618,14 @@ namespace Lucene.Net.Index
             }
         }
 
-        public override TermFreqVector[] GetTermFreqVectors(int n)
+        public override ITermFreqVector[] GetTermFreqVectors(int n)
         {
             EnsureOpen();
             int i = ReaderIndex(n); // find segment num
             return subReaders[i].GetTermFreqVectors(n - starts[i]); // dispatch to segment
         }
         
-        public override TermFreqVector GetTermFreqVector(int n, System.String field)
+        public override ITermFreqVector GetTermFreqVector(int n, System.String field)
         {
             EnsureOpen();
             int i = ReaderIndex(n); // find segment num
@@ -648,33 +648,27 @@ namespace Lucene.Net.Index
         }
 
         /// <summary> Checks is the index is optimized (if it has a single segment and no deletions)</summary>
-        /// <value> &lt;c&gt;true&lt;/c&gt; if the index is optimized; &lt;c&gt;false&lt;/c&gt; otherwise </value>
-        public override bool IsOptimized
+        /// <returns> &amp;lt;c&amp;gt;true&amp;lt;/c&amp;gt; if the index is optimized; &amp;lt;c&amp;gt;false&amp;lt;/c&amp;gt; otherwise </returns>
+        public override bool IsOptimized()
         {
-            get
-            {
-                EnsureOpen();
-                return segmentInfos.Count == 1 && !HasDeletions;
-            }
+            EnsureOpen();
+            return segmentInfos.Count == 1 && !HasDeletions;
         }
 
-        public override int NumDocs
+        public override int GetNumDocs()
         {
-            get
+            // Don't call ensureOpen() here (it could affect performance)
+            // NOTE: multiple threads may wind up init'ing
+            // numDocs... but that's harmless
+            if (numDocs == - 1)
             {
-                // Don't call ensureOpen() here (it could affect performance)
-                // NOTE: multiple threads may wind up init'ing
-                // numDocs... but that's harmless
-                if (numDocs == - 1)
-                {
-                    // check cache
-                    int n = 0; // cache miss--recompute
-                    for (int i = 0; i < subReaders.Length; i++)
-                        n += subReaders[i].NumDocs; // sum from readers
-                    numDocs = n;
-                }
-                return numDocs;
+                // check cache
+                int n = 0; // cache miss--recompute
+                for (int i = 0; i < subReaders.Length; i++)
+                    n += subReaders[i].GetNumDocs(); // sum from readers
+                numDocs = n;
             }
+            return numDocs;
         }
 
         public override int MaxDoc
@@ -1018,20 +1012,17 @@ namespace Lucene.Net.Index
             }
         }
 
-        public override bool IsCurrent
+        public override bool IsCurrent()
         {
-            get
+            EnsureOpen();
+            if (writer == null || writer.IsClosed())
             {
-                EnsureOpen();
-                if (writer == null || writer.IsClosed())
-                {
-                    // we loaded SegmentInfos from the directory
-                    return SegmentInfos.ReadCurrentVersion(directory) == segmentInfos.Version;
-                }
-                else
-                {
-                    return writer.NrtIsCurrent(segmentInfosStart);
-                }
+                // we loaded SegmentInfos from the directory
+                return SegmentInfos.ReadCurrentVersion(directory) == segmentInfos.Version;
+            }
+            else
+            {
+                return writer.NrtIsCurrent(segmentInfosStart);
             }
         }
 
@@ -1083,9 +1074,9 @@ namespace Lucene.Net.Index
             return fieldSet;
         }
 
-        public override IndexReader[] SequentialSubReaders
+        public override IndexReader[] GetSequentialSubReaders()
         {
-            get { return subReaders; }
+            return subReaders;
         }
 
         /// <summary>Returns the directory this index resides in. </summary>

Modified: incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs Wed Mar 21 06:04:26 2012
@@ -197,7 +197,7 @@ namespace Lucene.Net.Index
 										docState.docWriter.SetAborting();
 								}
 								fieldState.position++;
-								offsetEnd = fieldState.offset + offsetAttribute.EndOffset();
+								offsetEnd = fieldState.offset + offsetAttribute.EndOffset;
 								if (++fieldState.length >= maxFieldLength)
 								{
 									if (docState.infoStream != null)
@@ -210,7 +210,7 @@ namespace Lucene.Net.Index
 							// trigger streams to perform end-of-stream operations
 							stream.End();
 							
-							fieldState.offset += offsetAttribute.EndOffset();
+							fieldState.offset += offsetAttribute.EndOffset;
 							anyToken = fieldState.length > startLength;
 						}
 						finally

Modified: incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs Wed Mar 21 06:04:26 2012
@@ -348,7 +348,7 @@ namespace Lucene.Net.Index
 			this.directory = directory;
 			this.writer = writer;
 			this.similarity = writer.Similarity;
-			flushedDocCount = writer.MaxDoc;
+			flushedDocCount = writer.MaxDoc();
 			
 			consumer = indexingChain.GetChain(this);
 			if (consumer is DocFieldProcessor)
@@ -1248,18 +1248,14 @@ namespace Lucene.Net.Index
 				return (bufferIsFull || DeletesFull()) && SetFlushPending();
 			}
 		}
-		
-		internal void  SetMaxBufferedDeleteTerms(int maxBufferedDeleteTerms)
-		{
-			this.maxBufferedDeleteTerms = maxBufferedDeleteTerms;
-		}
-		
-		internal int GetMaxBufferedDeleteTerms()
-		{
-			return maxBufferedDeleteTerms;
-		}
-		
-		internal bool HasDeletes()
+
+	    internal int MaxBufferedDeleteTerms
+	    {
+	        set { this.maxBufferedDeleteTerms = value; }
+	        get { return maxBufferedDeleteTerms; }
+	    }
+
+	    internal bool HasDeletes()
 		{
 			lock (this)
 			{

Modified: incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs Wed Mar 21 06:04:26 2012
@@ -243,7 +243,7 @@ namespace Lucene.Net.Index
 				byte[] data;
 				int len;
 				int offset;
-				data = field.BinaryValue;
+				data = field.GetBinaryValue();
 				len = field.BinaryLength;
 				offset = field.BinaryOffset;
 					

Modified: incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs Wed Mar 21 06:04:26 2012
@@ -182,13 +182,13 @@ namespace Lucene.Net.Index
 			return in_Renamed.Directory();
 		}
 		
-		public override TermFreqVector[] GetTermFreqVectors(int docNumber)
+		public override ITermFreqVector[] GetTermFreqVectors(int docNumber)
 		{
 			EnsureOpen();
 			return in_Renamed.GetTermFreqVectors(docNumber);
 		}
 		
-		public override TermFreqVector GetTermFreqVector(int docNumber, System.String field)
+		public override ITermFreqVector GetTermFreqVector(int docNumber, System.String field)
 		{
 			EnsureOpen();
 			return in_Renamed.GetTermFreqVector(docNumber, field);
@@ -207,13 +207,10 @@ namespace Lucene.Net.Index
 			in_Renamed.GetTermFreqVector(docNumber, mapper);
 		}
 
-	    public override int NumDocs
+	    public override int GetNumDocs()
 	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return in_Renamed.NumDocs;
-	        }
+	        // Don't call ensureOpen() here (it could affect performance)
+	        return in_Renamed.GetNumDocs();
 	    }
 
 	    public override int MaxDoc
@@ -345,27 +342,21 @@ namespace Lucene.Net.Index
 	        }
 	    }
 
-	    public override bool IsCurrent
+	    public override bool IsCurrent()
 	    {
-	        get
-	        {
-	            EnsureOpen();
-	            return in_Renamed.IsCurrent;
-	        }
+	        EnsureOpen();
+	        return in_Renamed.IsCurrent();
 	    }
 
-	    public override bool IsOptimized
+	    public override bool IsOptimized()
 	    {
-	        get
-	        {
-	            EnsureOpen();
-	            return in_Renamed.IsOptimized;
-	        }
+	        EnsureOpen();
+	        return in_Renamed.IsOptimized();
 	    }
 
-	    public override IndexReader[] SequentialSubReaders
+	    public override IndexReader[] GetSequentialSubReaders()
 	    {
-	        get { return in_Renamed.SequentialSubReaders; }
+	        return in_Renamed.GetSequentialSubReaders();
 	    }
 
 	    override public System.Object Clone()

Modified: incubator/lucene.net/trunk/src/core/Index/IndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/IndexReader.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/IndexReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/IndexReader.cs Wed Mar 21 06:04:26 2012
@@ -617,26 +617,26 @@ namespace Lucene.Net.Index
 	    /// <throws>  CorruptIndexException if the index is corrupt </throws>
 	    /// <throws>  IOException if there is a low-level IO error </throws>
 	    /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
-	    public virtual bool IsCurrent
+	    public virtual bool IsCurrent()
 	    {
-	        get { throw new System.NotSupportedException("This reader does not support this method."); }
+	        throw new System.NotSupportedException("This reader does not support this method.");
 	    }
 
 	    /// <summary> Checks is the index is optimized (if it has a single segment and 
 	    /// no deletions).  Not implemented in the IndexReader base class.
 	    /// </summary>
-	    /// <value> &lt;c&gt;true&lt;/c&gt; if the index is optimized; &lt;c&gt;false&lt;/c&gt; otherwise </value>
+	    /// <returns> &amp;lt;c&amp;gt;true&amp;lt;/c&amp;gt; if the index is optimized; &amp;lt;c&amp;gt;false&amp;lt;/c&amp;gt; otherwise </returns>
 	    /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
-	    public virtual bool IsOptimized
+	    public virtual bool IsOptimized()
 	    {
-	        get { throw new System.NotSupportedException("This reader does not support this method."); }
+	        throw new System.NotSupportedException("This reader does not support this method.");
 	    }
 
 	    /// <summary> Return an array of term frequency vectors for the specified document.
 		/// The array contains a vector for each vectorized field in the document.
 		/// Each vector contains terms and frequencies for all terms in a given vectorized field.
 		/// If no such fields existed, the method returns null. The term vectors that are
-		/// returned may either be of type <see cref="TermFreqVector" />
+		/// returned may either be of type <see cref="ITermFreqVector" />
 		/// or of type <see cref="TermPositionVector" /> if
 		/// positions or offsets have been stored.
 		/// 
@@ -649,7 +649,7 @@ namespace Lucene.Net.Index
 		/// <throws>  IOException if index cannot be accessed </throws>
 		/// <seealso cref="Lucene.Net.Documents.Field.TermVector">
 		/// </seealso>
-		abstract public TermFreqVector[] GetTermFreqVectors(int docNumber);
+		abstract public ITermFreqVector[] GetTermFreqVectors(int docNumber);
 		
 		
 		/// <summary> Return a term frequency vector for the specified document and field. The
@@ -669,10 +669,10 @@ namespace Lucene.Net.Index
 		/// <throws>  IOException if index cannot be accessed </throws>
 		/// <seealso cref="Lucene.Net.Documents.Field.TermVector">
 		/// </seealso>
-		abstract public TermFreqVector GetTermFreqVector(int docNumber, System.String field);
+		abstract public ITermFreqVector GetTermFreqVector(int docNumber, System.String field);
 		
 		/// <summary> Load the Term Vector into a user-defined data structure instead of relying on the parallel arrays of
-		/// the <see cref="TermFreqVector" />.
+		/// the <see cref="ITermFreqVector" />.
 		/// </summary>
 		/// <param name="docNumber">The number of the document to load the vector for
 		/// </param>
@@ -707,7 +707,8 @@ namespace Lucene.Net.Index
 		}
 
 	    /// <summary>Returns the number of documents in this index. </summary>
-	    public abstract int NumDocs { get; }
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public abstract int GetNumDocs();
 
 	    /// <summary>Returns one greater than the largest possible document number.
 	    /// This may be used to, e.g., determine how big to allocate an array which
@@ -718,7 +719,7 @@ namespace Lucene.Net.Index
 	    /// <summary>Returns the number of deleted documents. </summary>
 	    public virtual int NumDeletedDocs
 	    {
-	        get { return MaxDoc - NumDocs; }
+	        get { return MaxDoc - GetNumDocs(); }
 	    }
 
 	    /// <summary> Returns the stored fields of the <c>n</c><sup>th</sup>
@@ -1331,9 +1332,9 @@ namespace Lucene.Net.Index
 	    /// corruption for other readers (like DirectoryReader obtained
 	    /// through <see cref="IndexReader.Open(Lucene.Net.Store.Directory,bool)" />. Use the parent reader directly. 
 	    /// </summary>
-	    public virtual IndexReader[] SequentialSubReaders
+	    public virtual IndexReader[] GetSequentialSubReaders()
 	    {
-	        get { return null; }
+	        return null;
 	    }
 
 	    /// <summary>Expert</summary>

Modified: incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs Wed Mar 21 06:04:26 2012
@@ -17,6 +17,7 @@
 
 using System;
 using System.Collections.Generic;
+using System.IO;
 using Lucene.Net.Support;
 using Analyzer = Lucene.Net.Analysis.Analyzer;
 using Document = Lucene.Net.Documents.Document;
@@ -885,17 +886,21 @@ namespace Lucene.Net.Index
 				this.infoStream = infoStream;
 			}
 		}
-		
-		/// <summary> Casts current mergePolicy to LogMergePolicy, and throws
-		/// an exception if the mergePolicy is not a LogMergePolicy.
-		/// </summary>
-		private LogMergePolicy GetLogMergePolicy()
-		{
-			if (mergePolicy is LogMergePolicy)
-				return (LogMergePolicy) mergePolicy;
-			else
-				throw new System.ArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
-		}
+
+	    /// <summary> Casts current mergePolicy to LogMergePolicy, and throws
+	    /// an exception if the mergePolicy is not a LogMergePolicy.
+	    /// </summary>
+	    private LogMergePolicy LogMergePolicy
+	    {
+	        get
+	        {
+	            if (mergePolicy is LogMergePolicy)
+	                return (LogMergePolicy) mergePolicy;
+
+	            throw new System.ArgumentException(
+	                "this method can only be called when the merge policy is the default LogMergePolicy");
+	        }
+	    }
 
 	    /// <summary><p/>Gets or sets the current setting of whether newly flushed
 	    /// segments will use the compound file format.  Note that
@@ -912,19 +917,16 @@ namespace Lucene.Net.Index
 	    /// </summary>
 	    public virtual bool UseCompoundFile
 	    {
-	        get { return GetLogMergePolicy().GetUseCompoundFile(); }
+	        get { return LogMergePolicy.GetUseCompoundFile(); }
 	        set
 	        {
-	            GetLogMergePolicy().SetUseCompoundFile(value);
-	            GetLogMergePolicy().SetUseCompoundDocStore(value);
+	            LogMergePolicy.SetUseCompoundFile(value);
+	            LogMergePolicy.SetUseCompoundDocStore(value);
 	        }
 	    }
 
 	    /// <summary>Expert: Set the Similarity implementation used by this IndexWriter.
-		/// 
 		/// </summary>
-		/// <seealso cref="Similarity.SetDefault(Similarity)">
-		/// </seealso>
 		public virtual void  SetSimilarity(Similarity similarity)
 		{
 			EnsureOpen();
@@ -1013,7 +1015,7 @@ namespace Lucene.Net.Index
 		public IndexWriter(Directory d, Analyzer a, bool create, MaxFieldLength mfl)
 		{
 			InitBlock();
-			Init(d, a, create, null, mfl.GetLimit(), null, null);
+			Init(d, a, create, null, mfl.Limit, null, null);
 		}
 		
 		/// <summary> Constructs an IndexWriter for the index in
@@ -1040,7 +1042,7 @@ namespace Lucene.Net.Index
 		public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
 		{
 			InitBlock();
-			Init(d, a, null, mfl.GetLimit(), null, null);
+			Init(d, a, null, mfl.Limit, null, null);
 		}
 		
 		/// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
@@ -1069,7 +1071,7 @@ namespace Lucene.Net.Index
 		public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
 		{
 			InitBlock();
-			Init(d, a, deletionPolicy, mfl.GetLimit(), null, null);
+			Init(d, a, deletionPolicy, mfl.Limit, null, null);
 		}
 		
 		/// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
@@ -1105,7 +1107,7 @@ namespace Lucene.Net.Index
 		public IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
 		{
 			InitBlock();
-			Init(d, a, create, deletionPolicy, mfl.GetLimit(), null, null);
+			Init(d, a, create, deletionPolicy, mfl.Limit, null, null);
 		}
 		
 		/// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
@@ -1147,7 +1149,7 @@ namespace Lucene.Net.Index
 		internal IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexingChain indexingChain, IndexCommit commit)
 		{
 			InitBlock();
-			Init(d, a, create, deletionPolicy, mfl.GetLimit(), indexingChain, commit);
+			Init(d, a, create, deletionPolicy, mfl.Limit, indexingChain, commit);
 		}
 		
 		/// <summary> Expert: constructs an IndexWriter on specific commit
@@ -1191,7 +1193,7 @@ namespace Lucene.Net.Index
 		public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit)
 		{
 			InitBlock();
-			Init(d, a, false, deletionPolicy, mfl.GetLimit(), null, commit);
+			Init(d, a, false, deletionPolicy, mfl.Limit, null, commit);
 		}
 		
 		private void  Init(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
@@ -1443,8 +1445,8 @@ namespace Lucene.Net.Index
 	    /// </seealso>
 	    public virtual int MaxMergeDocs
 	    {
-	        get { return GetLogMergePolicy().GetMaxMergeDocs(); }
-	        set { GetLogMergePolicy().SetMaxMergeDocs(value); }
+	        get { return LogMergePolicy.MaxMergeDocs; }
+	        set { LogMergePolicy.MaxMergeDocs = value; }
 	    }
 
 	    /// <summary> The maximum number of terms that will be indexed for a single field in a
@@ -1474,7 +1476,8 @@ namespace Lucene.Net.Index
 		/// </summary>
 		/// <seealso cref="SetMaxFieldLength">
 		/// </seealso>
-		public virtual int GetMaxFieldLength()
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public virtual int GetMaxFieldLength()
 		{
 			EnsureOpen();
 			return maxFieldLength;
@@ -1571,7 +1574,8 @@ namespace Lucene.Net.Index
 		/// </summary>
 		/// <seealso cref="SetMaxBufferedDocs">
 		/// </seealso>
-		public virtual int GetMaxBufferedDocs()
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public virtual int GetMaxBufferedDocs()
 		{
 			EnsureOpen();
 			return docWriter.MaxBufferedDocs;
@@ -1632,7 +1636,8 @@ namespace Lucene.Net.Index
 		}
 		
 		/// <summary> Returns the value set by <see cref="SetRAMBufferSizeMB" /> if enabled.</summary>
-		public virtual double GetRAMBufferSizeMB()
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public virtual double GetRAMBufferSizeMB()
 		{
 			return docWriter.GetRAMBufferSizeMB();
 		}
@@ -1654,7 +1659,7 @@ namespace Lucene.Net.Index
 			EnsureOpen();
 			if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH && maxBufferedDeleteTerms < 1)
 				throw new System.ArgumentException("maxBufferedDeleteTerms must at least be 1 when enabled");
-			docWriter.SetMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
+			docWriter.MaxBufferedDeleteTerms = maxBufferedDeleteTerms;
 			if (infoStream != null)
 				Message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
 		}
@@ -1664,70 +1669,50 @@ namespace Lucene.Net.Index
 		/// </summary>
 		/// <seealso cref="SetMaxBufferedDeleteTerms">
 		/// </seealso>
-		public virtual int GetMaxBufferedDeleteTerms()
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public virtual int GetMaxBufferedDeleteTerms()
 		{
 			EnsureOpen();
-			return docWriter.GetMaxBufferedDeleteTerms();
+			return docWriter.MaxBufferedDeleteTerms;
 		}
-		
-		/// <summary>Determines how often segment indices are merged by addDocument().  With
-		/// smaller values, less RAM is used while indexing, and searches on
-		/// unoptimized indices are faster, but indexing speed is slower.  With larger
-		/// values, more RAM is used during indexing, and while searches on unoptimized
-		/// indices are slower, indexing is faster.  Thus larger values (> 10) are best
-        /// for batch index creation, and smaller values (&lt; 10) for indices that are
-		/// interactively maintained.
-		/// 
-		/// <p/>Note that this method is a convenience method: it
-		/// just calls mergePolicy.setMergeFactor as long as
-		/// mergePolicy is an instance of <see cref="LogMergePolicy" />.
-		/// Otherwise an IllegalArgumentException is thrown.<p/>
-		/// 
-		/// <p/>This must never be less than 2.  The default value is 10.
-		/// </summary>
-		public virtual void  SetMergeFactor(int mergeFactor)
-		{
-			GetLogMergePolicy().MergeFactor = mergeFactor;
-		}
-		
-		/// <summary> <p/>Returns the number of segments that are merged at
-		/// once and also controls the total number of segments
-		/// allowed to accumulate in the index.<p/>
-		/// 
-		/// <p/>Note that this method is a convenience method: it
-		/// just calls mergePolicy.getMergeFactor as long as
-		/// mergePolicy is an instance of <see cref="LogMergePolicy" />.
-		/// Otherwise an IllegalArgumentException is thrown.<p/>
-		/// 
-		/// </summary>
-		/// <seealso cref="SetMergeFactor">
-		/// </seealso>
-		public virtual int GetMergeFactor()
-		{
-			return GetLogMergePolicy().MergeFactor;
-		}
-		
-		/// <summary>If non-null, this will be the default infoStream used
-		/// by a newly instantiated IndexWriter.
-		/// </summary>
-		/// <seealso cref="SetInfoStream">
-		/// </seealso>
-		public static void  SetDefaultInfoStream(System.IO.StreamWriter infoStream)
-		{
-			IndexWriter.defaultInfoStream = infoStream;
-		}
-		
-		/// <summary> Returns the current default infoStream for newly
-		/// instantiated IndexWriters.
-		/// </summary>
-		/// <seealso cref="SetDefaultInfoStream">
-		/// </seealso>
-		public static System.IO.StreamWriter GetDefaultInfoStream()
-		{
-			return IndexWriter.defaultInfoStream;
-		}
-		
-		/// <summary>If non-null, information about merges, deletes and a
+
+	    /// <summary>Gets or sets the number of segments that are merged at
+	    /// once and also controls the total number of segments
+	    /// allowed to accumulate in the index.
+	    /// <p/>Determines how often segment indices are merged by addDocument().  With
+	    /// smaller values, less RAM is used while indexing, and searches on
+	    /// unoptimized indices are faster, but indexing speed is slower.  With larger
+	    /// values, more RAM is used during indexing, and while searches on unoptimized
+	    /// indices are slower, indexing is faster.  Thus larger values (> 10) are best
+	    /// for batch index creation, and smaller values (&lt; 10) for indices that are
+	    /// interactively maintained.
+	    /// 
+	    /// <p/>Note that this method is a convenience method: it
+	    /// just calls mergePolicy.setMergeFactor as long as
+	    /// mergePolicy is an instance of <see cref="LogMergePolicy" />.
+	    /// Otherwise an IllegalArgumentException is thrown.<p/>
+	    /// 
+	    /// <p/>This must never be less than 2.  The default value is 10.
+	    /// </summary>
+	    public virtual int MergeFactor
+	    {
+	        set { LogMergePolicy.MergeFactor = value; }
+	        get { return LogMergePolicy.MergeFactor; }
+	    }
+
+	    /// <summary>Gets or sets the default info stream.
+	    /// If non-null, this will be the default infoStream used
+	    /// by a newly instantiated IndexWriter.
+	    /// </summary>
+	    /// <seealso cref="SetInfoStream">
+	    /// </seealso>
+	    public static StreamWriter DefaultInfoStream
+	    {
+	        set { IndexWriter.defaultInfoStream = value; }
+	        get { return IndexWriter.defaultInfoStream; }
+	    }
+
+	    /// <summary>If non-null, information about merges, deletes and a
 		/// message when maxFieldLength is reached will be printed
 		/// to this.
 		/// </summary>
@@ -1748,62 +1733,54 @@ namespace Lucene.Net.Index
                     " mergeScheduler=" + mergeScheduler +
 		            " ramBufferSizeMB=" + docWriter.GetRAMBufferSizeMB() + 
                     " maxBufferedDocs=" +  docWriter.MaxBufferedDocs +
-                    " maxBuffereDeleteTerms=" + docWriter.GetMaxBufferedDeleteTerms() +
+                    " maxBuffereDeleteTerms=" + docWriter.MaxBufferedDeleteTerms +
 		            " maxFieldLength=" + maxFieldLength + 
                     " index=" + SegString());
 		}
-		
-		/// <summary> Returns the current infoStream in use by this writer.</summary>
-		/// <seealso cref="SetInfoStream">
-		/// </seealso>
-		public virtual System.IO.StreamWriter GetInfoStream()
-		{
-			EnsureOpen();
-			return infoStream;
-		}
-		
-		/// <summary>Returns true if verbosing is enabled (i.e., infoStream != null). </summary>
-		public virtual bool Verbose()
-		{
-			return infoStream != null;
-		}
-		
-		/// <seealso cref="SetDefaultWriteLockTimeout"> to change the default value for all instances of IndexWriter.
-		/// </seealso>
-		public virtual void  SetWriteLockTimeout(long writeLockTimeout)
-		{
-			EnsureOpen();
-			this.writeLockTimeout = writeLockTimeout;
-		}
-		
-		/// <summary> Returns allowed timeout when acquiring the write lock.</summary>
-		/// <seealso cref="SetWriteLockTimeout">
-		/// </seealso>
-		public virtual long GetWriteLockTimeout()
-		{
-			EnsureOpen();
-			return writeLockTimeout;
-		}
-		
-		/// <summary> Sets the default (for any instance of IndexWriter) maximum time to wait for a write lock (in
-		/// milliseconds).
-		/// </summary>
-		public static void  SetDefaultWriteLockTimeout(long writeLockTimeout)
-		{
-			IndexWriter.WRITE_LOCK_TIMEOUT = writeLockTimeout;
-		}
-		
-		/// <summary> Returns default write lock timeout for newly
-		/// instantiated IndexWriters.
-		/// </summary>
-		/// <seealso cref="SetDefaultWriteLockTimeout">
-		/// </seealso>
-		public static long GetDefaultWriteLockTimeout()
-		{
-			return IndexWriter.WRITE_LOCK_TIMEOUT;
-		}
-		
-		/// <summary> Commits all changes to an index and closes all
+
+	    /// <summary> Returns the current infoStream in use by this writer.</summary>
+	    /// <seealso cref="SetInfoStream">
+	    /// </seealso>
+	    public virtual StreamWriter InfoStream
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return infoStream;
+	        }
+	    }
+
+	    /// <summary>Returns true if verbosing is enabled (i.e., infoStream != null). </summary>
+	    public virtual bool Verbose
+	    {
+	        get { return infoStream != null; }
+	    }
+
+	    /// <summary>Gets or sets allowed timeout when acquiring the write lock.</summary>
+	    public virtual long WriteLockTimeout
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return writeLockTimeout;
+	        }
+	        set
+	        {
+	            EnsureOpen();
+	            this.writeLockTimeout = value;
+	        }
+	    }
+
+	    /// <summary> Gets or sets the default (for any instance of IndexWriter) maximum time to wait for a write lock (in
+	    /// milliseconds).
+	    /// </summary>
+	    public static long DefaultWriteLockTimeout
+	    {
+	        set { IndexWriter.WRITE_LOCK_TIMEOUT = value; }
+	        get { return IndexWriter.WRITE_LOCK_TIMEOUT; }
+	    }
+
+	    /// <summary> Commits all changes to an index and closes all
 		/// associated files.  Note that this may be a costly
 		/// operation, so, try to re-use a single writer instead of
 		/// closing and opening a new one.  See <see cref="Commit()" /> for
@@ -2177,48 +2154,51 @@ namespace Lucene.Net.Index
 				return useCompoundDocStore;
 			}
 		}
-		
-		/// <summary>Returns the Directory used by this index. </summary>
-		public virtual Directory GetDirectory()
-		{
-			// Pass false because the flush during closing calls getDirectory
-			EnsureOpen(false);
-			return directory;
-		}
-		
-		/// <summary>Returns the analyzer used by this index. </summary>
-		public virtual Analyzer GetAnalyzer()
-		{
-			EnsureOpen();
-			return analyzer;
-		}
-		
-		/// <summary>Returns total number of docs in this index, including
-		/// docs not yet flushed (still in the RAM buffer),
-		/// not counting deletions.
-		/// </summary>
-		/// <seealso cref="NumDocs">
-		/// </seealso>
-		public virtual int MaxDoc
-		{
-            get
-            {
-                lock (this)
-                {
-                    int count;
-                    if (docWriter != null)
-                        count = docWriter.NumDocsInRAM;
-                    else
-                        count = 0;
 
-                    for (int i = 0; i < segmentInfos.Count; i++)
-                        count += segmentInfos.Info(i).docCount;
-                    return count;
-                }
-            }
-		}
-		
-		/// <summary>Returns total number of docs in this index, including
+	    /// <summary>Returns the Directory used by this index. </summary>
+	    public virtual Directory Directory
+	    {
+	        get
+	        {
+	            // Pass false because the flush during closing calls getDirectory
+	            EnsureOpen(false);
+	            return directory;
+	        }
+	    }
+
+	    /// <summary>Returns the analyzer used by this index. </summary>
+	    public virtual Analyzer Analyzer
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return analyzer;
+	        }
+	    }
+
+	    /// <summary>Returns total number of docs in this index, including
+	    /// docs not yet flushed (still in the RAM buffer),
+	    /// not counting deletions.
+	    /// </summary>
+	    /// <seealso cref="NumDocs">
+	    /// </seealso>
+	    public virtual int MaxDoc()
+	    {
+	        lock (this)
+	        {
+	            int count;
+	            if (docWriter != null)
+	                count = docWriter.NumDocsInRAM;
+	            else
+	                count = 0;
+
+	            for (int i = 0; i < segmentInfos.Count; i++)
+	                count += segmentInfos.Info(i).docCount;
+	            return count;
+	        }
+	    }
+
+	    /// <summary>Returns total number of docs in this index, including
 		/// docs not yet flushed (still in the RAM buffer), and
 		/// including deletions.  <b>NOTE:</b> buffered deletions
 		/// are not counted.  If you really need these to be
@@ -2496,7 +2476,7 @@ namespace Lucene.Net.Index
 		public virtual void  UpdateDocument(Term term, Document doc)
 		{
 			EnsureOpen();
-			UpdateDocument(term, doc, GetAnalyzer());
+			UpdateDocument(term, doc, Analyzer);
 		}
 		
 		/// <summary> Updates a document by first deleting the document(s)
@@ -2559,7 +2539,7 @@ namespace Lucene.Net.Index
 		}
 		
 		// for test purpose
-		public /*internal*/ int GetSegmentCount()
+		internal int GetSegmentCount()
 		{
 			lock (this)
 			{
@@ -2568,7 +2548,7 @@ namespace Lucene.Net.Index
 		}
 		
 		// for test purpose
-		public /*internal*/ int GetNumBufferedDocuments()
+		internal int GetNumBufferedDocuments()
 		{
 			lock (this)
 			{
@@ -2593,7 +2573,7 @@ namespace Lucene.Net.Index
 		}
 		
 		// for test purpose
-		public /*internal*/ int GetFlushCount()
+		internal int GetFlushCount()
 		{
 			lock (this)
 			{
@@ -2602,7 +2582,7 @@ namespace Lucene.Net.Index
 		}
 		
 		// for test purpose
-		public /*internal*/ int GetFlushDeletesCount()
+		internal int GetFlushDeletesCount()
 		{
 			lock (this)
 			{
@@ -3022,11 +3002,6 @@ namespace Lucene.Net.Index
 				}
 			}
 		}
-
-        public virtual MergePolicy.OneMerge GetNextMerge_forNUnit()
-        {
-            return GetNextMerge();
-        }
 		
 		/// <summary>Expert: the <see cref="MergeScheduler" /> calls this method
 		/// to retrieve the next merge requested by the
@@ -3940,7 +3915,7 @@ namespace Lucene.Net.Index
 							merger.CreateCompoundFile(mergedName + ".cfs");
 							lock (this)
 							{
-								info.UseCompoundFile = true;
+								info.SetUseCompoundFile(true);
 							}
 							
 							success = true;
@@ -4389,7 +4364,7 @@ namespace Lucene.Net.Index
 							}
 						}
 						
-						newSegment.UseCompoundFile = true;
+						newSegment.SetUseCompoundFile(true);
 						Checkpoint();
 					}
 					
@@ -5186,7 +5161,7 @@ namespace Lucene.Net.Index
                         currentDSSMerged |= currentDocStoreSegment.Equals(info.DocStoreSegment);
                     }
 
-                    totDocCount += clone.NumDocs;
+                    totDocCount += clone.GetNumDocs();
                 }
 
                 if (infoStream != null)
@@ -5300,7 +5275,7 @@ namespace Lucene.Net.Index
                         }
                     }
 
-                    merge.info.UseCompoundFile = true;
+                    merge.info.SetUseCompoundFile(true);
                 }
 
                 int termsIndexDivisor;
@@ -5405,7 +5380,7 @@ namespace Lucene.Net.Index
 		}
 		
 		// For test purposes.
-		public /*internal*/ int GetBufferedDeleteTermsSize()
+		internal int GetBufferedDeleteTermsSize()
 		{
 			lock (this)
 			{
@@ -5414,7 +5389,7 @@ namespace Lucene.Net.Index
 		}
 		
 		// For test purposes.
-		public /*internal*/ int GetNumBufferedDeleteTerms()
+		internal int GetNumBufferedDeleteTerms()
 		{
 			lock (this)
 			{
@@ -5850,13 +5825,13 @@ namespace Lucene.Net.Index
 			public MaxFieldLength(int limit):this("User-specified", limit)
 			{
 			}
-			
-			public int GetLimit()
-			{
-				return limit;
-			}
-			
-			public override System.String ToString()
+
+		    public int Limit
+		    {
+		        get { return limit; }
+		    }
+
+		    public override System.String ToString()
 			{
 				return name + ":" + limit;
 			}
@@ -5895,24 +5870,17 @@ namespace Lucene.Net.Index
 		}
 		
 		private IndexReaderWarmer mergedSegmentWarmer;
-		
-		/// <summary>Set the merged segment warmer.  See <see cref="IndexReaderWarmer" />
-		///. 
-		/// </summary>
-		public virtual void  SetMergedSegmentWarmer(IndexReaderWarmer warmer)
-		{
-			mergedSegmentWarmer = warmer;
-		}
-		
-		/// <summary>Returns the current merged segment warmer.  See <see cref="IndexReaderWarmer" />
-		///. 
-		/// </summary>
-		public virtual IndexReaderWarmer GetMergedSegmentWarmer()
-		{
-			return mergedSegmentWarmer;
-		}
-		
-		private void  HandleOOM(System.OutOfMemoryException oom, System.String location)
+
+	    /// <summary>Gets or sets the merged segment warmer.  See <see cref="IndexReaderWarmer" />
+	    ///. 
+	    /// </summary>
+	    public virtual IndexReaderWarmer MergedSegmentWarmer
+	    {
+	        set { mergedSegmentWarmer = value; }
+	        get { return mergedSegmentWarmer; }
+	    }
+
+	    private void  HandleOOM(System.OutOfMemoryException oom, System.String location)
 		{
 			if (infoStream != null)
 			{
@@ -5969,6 +5937,7 @@ namespace Lucene.Net.Index
 				return closed;
 			}
 		}
+
 		static IndexWriter()
 		{
 			MAX_TERM_LENGTH = DocumentsWriter.MAX_TERM_LENGTH;

Modified: incubator/lucene.net/trunk/src/core/Index/LogMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/LogMergePolicy.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/LogMergePolicy.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/LogMergePolicy.cs Wed Mar 21 06:04:26 2012
@@ -85,7 +85,7 @@ namespace Lucene.Net.Index
 		
 		protected internal virtual bool Verbose()
 		{
-			return writer != null && writer.Verbose();
+			return writer != null && writer.Verbose;
 		}
 
 	    public double NoCFSRatio
@@ -146,8 +146,9 @@ namespace Lucene.Net.Index
 		{
 			this.useCompoundFile = useCompoundFile;
 		}
-		
-		public virtual bool GetUseCompoundFile()
+
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public virtual bool GetUseCompoundFile()
 		{
 			return useCompoundFile;
 		}
@@ -172,28 +173,22 @@ namespace Lucene.Net.Index
 		/// </summary>
         /// <seealso cref="SetUseCompoundDocStore ">
 		/// </seealso>
-		public virtual bool GetUseCompoundDocStore()
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        public virtual bool GetUseCompoundDocStore()
 		{
 			return useCompoundDocStore;
 		}
-		
-		/// <summary>Sets whether the segment size should be calibrated by
-		/// the number of deletes when choosing segments for merge. 
-		/// </summary>
-		public virtual void  SetCalibrateSizeByDeletes(bool calibrateSizeByDeletes)
-		{
-			this.calibrateSizeByDeletes = calibrateSizeByDeletes;
-		}
-		
-		/// <summary>Returns true if the segment size should be calibrated 
-		/// by the number of deletes when choosing segments for merge. 
-		/// </summary>
-		public virtual bool GetCalibrateSizeByDeletes()
-		{
-			return calibrateSizeByDeletes;
-		}
-		
-		abstract protected internal long Size(SegmentInfo info);
+
+	    /// <summary>Gets or sets whether the segment size should be calibrated by
+	    /// the number of deletes when choosing segments for merge. 
+	    /// </summary>
+	    public virtual bool CalibrateSizeByDeletes
+	    {
+	        set { this.calibrateSizeByDeletes = value; }
+	        get { return calibrateSizeByDeletes; }
+	    }
+
+	    abstract protected internal long Size(SegmentInfo info);
 		
 		protected internal virtual long SizeDocs(SegmentInfo info)
 		{
@@ -248,8 +243,8 @@ namespace Lucene.Net.Index
 		private bool IsOptimized(SegmentInfo info)
 		{
 			bool hasDeletions = writer.NumDeletedDocs(info) > 0;
-			return !hasDeletions && !info.HasSeparateNorms() && info.dir == writer.GetDirectory() &&
-                (info.UseCompoundFile == useCompoundFile || noCFSRatio < 1.0);
+			return !hasDeletions && !info.HasSeparateNorms() && info.dir == writer.Directory &&
+                (info.GetUseCompoundFile() == useCompoundFile || noCFSRatio < 1.0);
 		}
 		
 		/// <summary>Returns the merges necessary to optimize the index.
@@ -557,35 +552,29 @@ namespace Lucene.Net.Index
 
             return new OneMerge(infosToMerge, doCFS);
         }
-		
-		/// <summary><p/>Determines the largest segment (measured by
-		/// document count) that may be merged with other segments.
-		/// Small values (e.g., less than 10,000) are best for
-		/// interactive indexing, as this limits the length of
-		/// pauses while indexing to a few seconds.  Larger values
-		/// are best for batched indexing and speedier
-		/// searches.<p/>
-		/// 
-		/// <p/>The default value is <see cref="int.MaxValue" />.<p/>
-		/// 
-		/// <p/>The default merge policy (<see cref="LogByteSizeMergePolicy" />)
-		/// also allows you to set this
-		/// limit by net size (in MB) of the segment, using 
-		/// <see cref="LogByteSizeMergePolicy.SetMaxMergeMB" />.<p/>
-		/// </summary>
-		public virtual void  SetMaxMergeDocs(int maxMergeDocs)
-		{
-			this.maxMergeDocs = maxMergeDocs;
-		}
-		
-		/// <summary>Returns the largest segment (measured by document
-		/// count) that may be merged with other segments.
-		/// </summary>
-		/// <seealso cref="SetMaxMergeDocs">
-		/// </seealso>
-		public virtual int GetMaxMergeDocs()
-		{
-			return maxMergeDocs;
-		}
+
+	    /// <summary>
+	    /// Gets or sets the largest segment (measured by document
+	    /// count) that may be merged with other segments.
+	    /// <p/>Determines the largest segment (measured by
+	    /// document count) that may be merged with other segments.
+	    /// Small values (e.g., less than 10,000) are best for
+	    /// interactive indexing, as this limits the length of
+	    /// pauses while indexing to a few seconds.  Larger values
+	    /// are best for batched indexing and speedier
+	    /// searches.<p/>
+	    /// 
+	    /// <p/>The default value is <see cref="int.MaxValue" />.<p/>
+	    /// 
+	    /// <p/>The default merge policy (<see cref="LogByteSizeMergePolicy" />)
+	    /// also allows you to set this
+	    /// limit by net size (in MB) of the segment, using 
+	    /// <see cref="LogByteSizeMergePolicy.MaxMergeMB" />.<p/>
+	    /// </summary>
+	    public virtual int MaxMergeDocs
+	    {
+	        set { this.maxMergeDocs = value; }
+	        get { return maxMergeDocs; }
+	    }
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Index/MultiReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/MultiReader.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/MultiReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/MultiReader.cs Wed Mar 21 06:04:26 2012
@@ -217,14 +217,14 @@ namespace Lucene.Net.Index
             }
         }
         
-        public override TermFreqVector[] GetTermFreqVectors(int n)
+        public override ITermFreqVector[] GetTermFreqVectors(int n)
         {
             EnsureOpen();
             int i = ReaderIndex(n); // find segment num
             return subReaders[i].GetTermFreqVectors(n - starts[i]); // dispatch to segment
         }
         
-        public override TermFreqVector GetTermFreqVector(int n, System.String field)
+        public override ITermFreqVector GetTermFreqVector(int n, System.String field)
         {
             EnsureOpen();
             int i = ReaderIndex(n); // find segment num
@@ -246,28 +246,25 @@ namespace Lucene.Net.Index
             subReaders[i].GetTermFreqVector(docNumber - starts[i], mapper);
         }
 
-        public override bool IsOptimized
+        public override bool IsOptimized()
         {
-            get { return false; }
+            return false;
         }
 
-        public override int NumDocs
+        public override int GetNumDocs()
         {
-            get
+            // Don't call ensureOpen() here (it could affect performance)
+            // NOTE: multiple threads may wind up init'ing
+            // numDocs... but that's harmless
+            if (numDocs == - 1)
             {
-                // Don't call ensureOpen() here (it could affect performance)
-                // NOTE: multiple threads may wind up init'ing
-                // numDocs... but that's harmless
-                if (numDocs == - 1)
-                {
-                    // check cache
-                    int n = 0; // cache miss--recompute
-                    for (int i = 0; i < subReaders.Length; i++)
-                        n += subReaders[i].NumDocs; // sum from readers
-                    numDocs = n;
-                }
-                return numDocs;
+                // check cache
+                int n = 0; // cache miss--recompute
+                for (int i = 0; i < subReaders.Length; i++)
+                    n += subReaders[i].GetNumDocs(); // sum from readers
+                numDocs = n;
             }
+            return numDocs;
         }
 
         public override int MaxDoc
@@ -468,21 +465,18 @@ namespace Lucene.Net.Index
         }
 
         /// <summary> Checks recursively if all subreaders are up to date. </summary>
-        public override bool IsCurrent
+        public override bool IsCurrent()
         {
-            get
+            for (int i = 0; i < subReaders.Length; i++)
             {
-                for (int i = 0; i < subReaders.Length; i++)
+                if (!subReaders[i].IsCurrent())
                 {
-                    if (!subReaders[i].IsCurrent)
-                    {
-                        return false;
-                    }
+                    return false;
                 }
-
-                // all subreaders are up to date
-                return true;
             }
+
+            // all subreaders are up to date
+            return true;
         }
 
         /// <summary>Not implemented.</summary>
@@ -492,9 +486,9 @@ namespace Lucene.Net.Index
             get { throw new System.NotSupportedException("MultiReader does not support this method."); }
         }
 
-        public override IndexReader[] SequentialSubReaders
+        public override IndexReader[] GetSequentialSubReaders()
         {
-            get { return subReaders; }
+            return subReaders;
         }
     }
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs Wed Mar 21 06:04:26 2012
@@ -98,15 +98,15 @@ namespace Lucene.Net.Index
 			if (readers.Count == 0)
 			{
 				this.maxDoc = reader.MaxDoc;
-				this.numDocs = reader.NumDocs;
+				this.numDocs = reader.GetNumDocs();
 				this.hasDeletions = reader.HasDeletions;
 			}
 			
 			if (reader.MaxDoc != maxDoc)
 			// check compatibility
 				throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc);
-			if (reader.NumDocs != numDocs)
-				throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs);
+			if (reader.GetNumDocs() != numDocs)
+				throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.GetNumDocs());
 			
 			ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
 			readerToFields[reader] = fields;
@@ -254,13 +254,10 @@ namespace Lucene.Net.Index
 		}
 
 
-	    public override int NumDocs
+	    public override int GetNumDocs()
 	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return numDocs;
-	        }
+	        // Don't call ensureOpen() here (it could affect performance)
+	        return numDocs;
 	    }
 
 	    public override int MaxDoc
@@ -343,23 +340,23 @@ namespace Lucene.Net.Index
 		}
 		
 		// get all vectors
-		public override TermFreqVector[] GetTermFreqVectors(int n)
+		public override ITermFreqVector[] GetTermFreqVectors(int n)
 		{
 			EnsureOpen();
-			IList<TermFreqVector> results = new List<TermFreqVector>();
+			IList<ITermFreqVector> results = new List<ITermFreqVector>();
             foreach(var e in fieldToReader)
 			{
 				System.String field = e.Key;
 				IndexReader reader = e.Value;
 
-				TermFreqVector vector = reader.GetTermFreqVector(n, field);
+				ITermFreqVector vector = reader.GetTermFreqVector(n, field);
 				if (vector != null)
 					results.Add(vector);
 			}
 			return results.ToArray();
 		}
 		
-		public override TermFreqVector GetTermFreqVector(int n, System.String field)
+		public override ITermFreqVector GetTermFreqVector(int n, System.String field)
 		{
 			EnsureOpen();
 			IndexReader reader = (fieldToReader[field]);
@@ -462,39 +459,33 @@ namespace Lucene.Net.Index
 		}
 
 	    /// <summary> Checks recursively if all subreaders are up to date. </summary>
-	    public override bool IsCurrent
+	    public override bool IsCurrent()
 	    {
-	        get
+	        foreach (var reader in readers)
 	        {
-	            foreach (var reader in readers)
+	            if (!reader.IsCurrent())
 	            {
-	                if (!reader.IsCurrent)
-	                {
-	                    return false;
-	                }
+	                return false;
 	            }
-
-	            // all subreaders are up to date
-	            return true;
 	        }
+
+	        // all subreaders are up to date
+	        return true;
 	    }
 
 	    /// <summary> Checks recursively if all subindexes are optimized </summary>
-	    public override bool IsOptimized
+	    public override bool IsOptimized()
 	    {
-	        get
+	        foreach (var reader in readers)
 	        {
-	            foreach (var reader in readers)
+	            if (!reader.IsOptimized())
 	            {
-	                if (!reader.IsOptimized)
-	                {
-	                    return false;
-	                }
+	                return false;
 	            }
-
-	            // all subindexes are optimized
-	            return true;
 	        }
+
+	        // all subindexes are optimized
+	        return true;
 	    }
 
 

Modified: incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs?rev=1303294&r1=1303293&r2=1303294&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs Wed Mar 21 06:04:26 2012
@@ -42,16 +42,15 @@ namespace Lucene.Net.Index
 		public PositionBasedTermVectorMapper(bool ignoringOffsets):base(false, ignoringOffsets)
 		{
 		}
-		
-		/// <summary> Never ignores positions.  This mapper doesn't make much sense unless there are positions</summary>
-		/// <returns> false
-		/// </returns>
-		public override bool IsIgnoringPositions()
-		{
-			return false;
-		}
-		
-		/// <summary> Callback for the TermVectorReader. </summary>
+
+	    /// <summary> Never ignores positions.  This mapper doesn't make much sense unless there are positions</summary>
+	    /// <value> false </value>
+	    public override bool IsIgnoringPositions
+	    {
+	        get { return false; }
+	    }
+
+	    /// <summary> Callback for the TermVectorReader. </summary>
 		/// <param name="term">
 		/// </param>
 		/// <param name="frequency">



Mime
View raw message