lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject svn commit: r1299911 [3/14] - in /incubator/lucene.net/trunk: src/core/ src/core/Analysis/ src/core/Analysis/Standard/ src/core/Analysis/Tokenattributes/ src/core/Document/ src/core/Index/ src/core/Messages/ src/core/QueryParser/ src/core/Search/ src/c...
Date Mon, 12 Mar 2012 22:29:37 GMT
Modified: incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FieldsWriter.cs Mon Mar 12 22:29:26 2012
@@ -16,10 +16,9 @@
  */
 
 using System;
-
+using Lucene.Net.Documents;
 using CompressionTools = Lucene.Net.Documents.CompressionTools;
 using Document = Lucene.Net.Documents.Document;
-using Fieldable = Lucene.Net.Documents.Fieldable;
 using Directory = Lucene.Net.Store.Directory;
 using IndexInput = Lucene.Net.Store.IndexInput;
 using IndexOutput = Lucene.Net.Store.IndexOutput;
@@ -152,14 +151,14 @@ namespace Lucene.Net.Index
 		// in the correct fields format.
 		internal void  FlushDocument(int numStoredFields, RAMOutputStream buffer)
 		{
-			indexStream.WriteLong(fieldsStream.GetFilePointer());
+			indexStream.WriteLong(fieldsStream.FilePointer);
 			fieldsStream.WriteVInt(numStoredFields);
 			buffer.WriteTo(fieldsStream);
 		}
 		
 		internal void  SkipDocument()
 		{
-			indexStream.WriteLong(fieldsStream.GetFilePointer());
+			indexStream.WriteLong(fieldsStream.FilePointer);
 			fieldsStream.WriteVInt(0);
 		}
 		
@@ -227,33 +226,33 @@ namespace Lucene.Net.Index
 			}
 		}
 		
-		internal void  WriteField(FieldInfo fi, Fieldable field)
+		internal void  WriteField(FieldInfo fi, IFieldable field)
 		{
 			fieldsStream.WriteVInt(fi.number);
 			byte bits = 0;
-			if (field.IsTokenized())
+			if (field.IsTokenized)
 				bits |= FieldsWriter.FIELD_IS_TOKENIZED;
-			if (field.IsBinary())
+			if (field.IsBinary)
 				bits |= FieldsWriter.FIELD_IS_BINARY;
 			
 			fieldsStream.WriteByte(bits);
 			
 			// compression is disabled for the current field
-			if (field.IsBinary())
+			if (field.IsBinary)
 			{
 				byte[] data;
 				int len;
 				int offset;
-				data = field.GetBinaryValue();
-				len = field.GetBinaryLength();
-				offset = field.GetBinaryOffset();
+				data = field.BinaryValue;
+				len = field.BinaryLength;
+				offset = field.BinaryOffset;
 					
 				fieldsStream.WriteVInt(len);
 				fieldsStream.WriteBytes(data, offset, len);
 			}
 			else
 			{
-				fieldsStream.WriteString(field.StringValue());
+				fieldsStream.WriteString(field.StringValue);
 			}
 		}
 		
@@ -265,7 +264,7 @@ namespace Lucene.Net.Index
 		/// </summary>
 		internal void  AddRawDocuments(IndexInput stream, int[] lengths, int numDocs)
 		{
-			long position = fieldsStream.GetFilePointer();
+			long position = fieldsStream.FilePointer;
 			long start = position;
 			for (int i = 0; i < numDocs; i++)
 			{
@@ -273,26 +272,26 @@ namespace Lucene.Net.Index
 				position += lengths[i];
 			}
 			fieldsStream.CopyBytes(stream, position - start);
-			System.Diagnostics.Debug.Assert(fieldsStream.GetFilePointer() == position);
+			System.Diagnostics.Debug.Assert(fieldsStream.FilePointer == position);
 		}
 		
 		internal void  AddDocument(Document doc)
 		{
-			indexStream.WriteLong(fieldsStream.GetFilePointer());
+			indexStream.WriteLong(fieldsStream.FilePointer);
 			
 			int storedCount = 0;
-		    System.Collections.Generic.IList<Fieldable> fields = doc.GetFields();
-			foreach(Fieldable field in fields)
+		    System.Collections.Generic.IList<IFieldable> fields = doc.GetFields();
+			foreach(IFieldable field in fields)
 			{
-				if (field.IsStored())
+				if (field.IsStored)
 					storedCount++;
 			}
 			fieldsStream.WriteVInt(storedCount);
 			
-			foreach(Fieldable field in fields)
+			foreach(IFieldable field in fields)
 			{
-				if (field.IsStored())
-					WriteField(fieldInfos.FieldInfo(field.Name()), field);
+				if (field.IsStored)
+					WriteField(fieldInfos.FieldInfo(field.Name), field);
 			}
 		}
 	}

Modified: incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FilterIndexReader.cs Mon Mar 12 22:29:26 2012
@@ -106,23 +106,24 @@ namespace Lucene.Net.Index
 			{
 				return ((TermPositions) this.in_Renamed).NextPosition();
 			}
-			
-			public virtual int GetPayloadLength()
-			{
-				return ((TermPositions) this.in_Renamed).GetPayloadLength();
-			}
-			
-			public virtual byte[] GetPayload(byte[] data, int offset)
+
+		    public virtual int PayloadLength
+		    {
+		        get { return ((TermPositions) this.in_Renamed).PayloadLength; }
+		    }
+
+		    public virtual byte[] GetPayload(byte[] data, int offset)
 			{
 				return ((TermPositions) this.in_Renamed).GetPayload(data, offset);
 			}
 			
 			
 			// TODO: Remove warning after API has been finalized
-			public virtual bool IsPayloadAvailable()
-			{
-				return ((TermPositions) this.in_Renamed).IsPayloadAvailable();
-			}
+
+		    public virtual bool IsPayloadAvailable
+		    {
+		        get { return ((TermPositions) this.in_Renamed).IsPayloadAvailable; }
+		    }
 		}
 		
 		/// <summary>Base class for filtering <see cref="TermEnum" /> implementations. </summary>
@@ -202,20 +203,26 @@ namespace Lucene.Net.Index
 			EnsureOpen();
 			in_Renamed.GetTermFreqVector(docNumber, mapper);
 		}
-		
-		public override int NumDocs()
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			return in_Renamed.NumDocs();
-		}
-		
-		public override int MaxDoc()
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			return in_Renamed.MaxDoc();
-		}
-		
-		public override Document Document(int n, FieldSelector fieldSelector)
+
+	    public override int NumDocs
+	    {
+	        get
+	        {
+	            // Don't call ensureOpen() here (it could affect performance)
+	            return in_Renamed.NumDocs;
+	        }
+	    }
+
+	    public override int MaxDoc
+	    {
+	        get
+	        {
+	            // Don't call ensureOpen() here (it could affect performance)
+	            return in_Renamed.MaxDoc;
+	        }
+	    }
+
+	    public override Document Document(int n, FieldSelector fieldSelector)
 		{
 			EnsureOpen();
 			return in_Renamed.Document(n, fieldSelector);
@@ -226,14 +233,17 @@ namespace Lucene.Net.Index
 			// Don't call ensureOpen() here (it could affect performance)
 			return in_Renamed.IsDeleted(n);
 		}
-		
-		public override bool HasDeletions()
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			return in_Renamed.HasDeletions();
-		}
-		
-		protected internal override void  DoUndeleteAll()
+
+	    public override bool HasDeletions
+	    {
+	        get
+	        {
+	            // Don't call ensureOpen() here (it could affect performance)
+	            return in_Renamed.HasDeletions;
+	        }
+	    }
+
+	    protected internal override void  DoUndeleteAll()
 		{
 			in_Renamed.UndeleteAll();
 		}
@@ -322,54 +332,63 @@ namespace Lucene.Net.Index
 			EnsureOpen();
 			return in_Renamed.GetFieldNames(fieldNames);
 		}
-		
-		public override long GetVersion()
-		{
-			EnsureOpen();
-			return in_Renamed.GetVersion();
-		}
-		
-		public override bool IsCurrent()
-		{
-			EnsureOpen();
-			return in_Renamed.IsCurrent();
-		}
-		
-		public override bool IsOptimized()
-		{
-			EnsureOpen();
-			return in_Renamed.IsOptimized();
-		}
-		
-		public override IndexReader[] GetSequentialSubReaders()
-		{
-			return in_Renamed.GetSequentialSubReaders();
-		}
-		
-		override public System.Object Clone()
+
+	    public override long Version
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return in_Renamed.Version;
+	        }
+	    }
+
+	    public override bool IsCurrent
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return in_Renamed.IsCurrent;
+	        }
+	    }
+
+	    public override bool IsOptimized
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return in_Renamed.IsOptimized;
+	        }
+	    }
+
+	    public override IndexReader[] SequentialSubReaders
+	    {
+	        get { return in_Renamed.SequentialSubReaders; }
+	    }
+
+	    override public System.Object Clone()
 		{
             System.Diagnostics.Debug.Fail("Port issue:", "Lets see if we need this FilterIndexReader.Clone()"); // {{Aroush-2.9}}
 			return null;
 		}
 
-        /// <summary>
-        /// If the subclass of FilteredIndexReader modifies the
-        /// contents of the FieldCache, you must override this
-        /// method to provide a different key */
-        ///</summary>
-        public override object GetFieldCacheKey() 
-        {
-            return in_Renamed.GetFieldCacheKey();
-        }
-
-        /// <summary>
-        /// If the subclass of FilteredIndexReader modifies the
-        /// deleted docs, you must override this method to provide
-        /// a different key */
-        /// </summary>
-        public override object GetDeletesCacheKey() 
-        {
-            return in_Renamed.GetDeletesCacheKey();
-        }
+	    /// <summary>
+	    /// If the subclass of FilteredIndexReader modifies the
+	    /// contents of the FieldCache, you must override this
+	    /// method to provide a different key */
+	    ///</summary>
+	    public override object FieldCacheKey
+	    {
+	        get { return in_Renamed.FieldCacheKey; }
+	    }
+
+	    /// <summary>
+	    /// If the subclass of FilteredIndexReader modifies the
+	    /// deleted docs, you must override this method to provide
+	    /// a different key */
+	    /// </summary>
+	    public override object DeletesCacheKey
+	    {
+	        get { return in_Renamed.DeletesCacheKey; }
+	    }
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Index/FormatPostingsTermsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FormatPostingsTermsWriter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FormatPostingsTermsWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FormatPostingsTermsWriter.cs Mon Mar 12 22:29:26 2012
@@ -55,9 +55,9 @@ namespace Lucene.Net.Index
 			// TODO: this is abstraction violation -- ideally this
 			// terms writer is not so "invasive", looking for file
 			// pointers in its child consumers.
-			freqStart = docsWriter.out_Renamed.GetFilePointer();
+			freqStart = docsWriter.out_Renamed.FilePointer;
 			if (docsWriter.posWriter.out_Renamed != null)
-				proxStart = docsWriter.posWriter.out_Renamed.GetFilePointer();
+				proxStart = docsWriter.posWriter.out_Renamed.FilePointer;
 			
 			parent.skipListWriter.ResetSkip();
 			

Modified: incubator/lucene.net/trunk/src/core/Index/FreqProxTermsWriterPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FreqProxTermsWriterPerField.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FreqProxTermsWriterPerField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FreqProxTermsWriterPerField.cs Mon Mar 12 22:29:26 2012
@@ -16,9 +16,8 @@
  */
 
 using System;
-
+using Lucene.Net.Documents;
 using PayloadAttribute = Lucene.Net.Analysis.Tokenattributes.PayloadAttribute;
-using Fieldable = Lucene.Net.Documents.Fieldable;
 
 namespace Lucene.Net.Index
 {
@@ -78,15 +77,15 @@ namespace Lucene.Net.Index
 			payloadAttribute = null;
 		}
 		
-		internal override bool Start(Fieldable[] fields, int count)
+		internal override bool Start(IFieldable[] fields, int count)
 		{
 			for (int i = 0; i < count; i++)
-				if (fields[i].IsIndexed())
+				if (fields[i].IsIndexed)
 					return true;
 			return false;
 		}
 		
-		internal override void  Start(Fieldable f)
+		internal override void  Start(IFieldable f)
 		{
             if (fieldState.attributeSource.HasAttribute<PayloadAttribute>())
 			{
@@ -107,7 +106,7 @@ namespace Lucene.Net.Index
 			}
 			else
 			{
-				payload = payloadAttribute.GetPayload();
+				payload = payloadAttribute.Payload;
 			}
 			
 			if (payload != null && payload.length > 0)

Modified: incubator/lucene.net/trunk/src/core/Index/IndexCommit.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/IndexCommit.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/IndexCommit.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/IndexCommit.cs Mon Mar 12 22:29:26 2012
@@ -17,6 +17,7 @@
 
 using System;
 using System.Collections.Generic;
+using Lucene.Net.Store;
 using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
@@ -42,19 +43,18 @@ namespace Lucene.Net.Index
 	
 	public abstract class IndexCommit
 	{
-		
-		/// <summary> Get the segments file (<c>segments_N</c>) associated 
-		/// with this commit point.
-		/// </summary>
-		public abstract System.String GetSegmentsFileName();
-		
-		/// <summary> Returns all index files referenced by this commit point.</summary>
-		public abstract ICollection<string> GetFileNames();
-		
-		/// <summary> Returns the <see cref="Directory" /> for the index.</summary>
-		public abstract Directory GetDirectory();
-		
-		/// <summary> Delete this commit point.  This only applies when using
+	    /// <summary> Get the segments file (<c>segments_N</c>) associated 
+	    /// with this commit point.
+	    /// </summary>
+	    public abstract string SegmentsFileName { get; }
+
+	    /// <summary> Returns all index files referenced by this commit point.</summary>
+	    public abstract ICollection<string> FileNames { get; }
+
+	    /// <summary> Returns the <see cref="Store.Directory" /> for the index.</summary>
+	    public abstract Directory Directory { get; }
+
+	    /// <summary> Delete this commit point.  This only applies when using
 		/// the commit point in the context of IndexWriter's
 		/// IndexDeletionPolicy.
 		/// <p/>
@@ -67,9 +67,9 @@ namespace Lucene.Net.Index
 		/// </summary>
         public abstract void Delete();
 
-        public abstract bool IsDeleted();
-		
-		/// <summary> Returns true if this commit is an optimized index.</summary>
+	    public abstract bool IsDeleted { get; }
+
+	    /// <summary> Returns true if this commit is an optimized index.</summary>
         public abstract bool IsOptimized();
 
         /// <summary> Two IndexCommits are equal if both their Directory and versions are equal.</summary>
@@ -78,7 +78,7 @@ namespace Lucene.Net.Index
 			if (other is IndexCommit)
 			{
 				IndexCommit otherCommit = (IndexCommit) other;
-				return otherCommit.GetDirectory().Equals(GetDirectory()) && otherCommit.GetVersion() == GetVersion();
+				return otherCommit.Directory.Equals(Directory) && otherCommit.Version == Version;
 			}
 			else
 				return false;
@@ -86,34 +86,34 @@ namespace Lucene.Net.Index
 		
 		public override int GetHashCode()
 		{
-			return (int)(GetDirectory().GetHashCode() + GetVersion());
-		}
-		
-		/// <summary>Returns the version for this IndexCommit.  This is the
-		/// same value that <see cref="IndexReader.GetVersion" /> would
-		/// return if it were opened on this commit. 
-		/// </summary>
-        public abstract long GetVersion();
-		
-		/// <summary>Returns the generation (the _N in segments_N) for this
-		/// IndexCommit 
-		/// </summary>
-        public abstract long GetGeneration();
-		
-		/// <summary>Convenience method that returns the last modified time
-		/// of the segments_N file corresponding to this index
-		/// commit, equivalent to
-		/// getDirectory().fileModified(getSegmentsFileName()). 
-		/// </summary>
-		public virtual long GetTimestamp()
-		{
-			return GetDirectory().FileModified(GetSegmentsFileName());
+			return (int)(Directory.GetHashCode() + Version);
 		}
 
-        /// <summary>Returns userData, previously passed to 
-        /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
-		/// for this commit.  IDictionary is String -> String. 
-		/// </summary>
-        public abstract IDictionary<string, string> GetUserData();
+	    /// <summary>Returns the version for this IndexCommit.  This is the
+	    /// same value that <see cref="IndexReader.GetVersion" /> would
+	    /// return if it were opened on this commit. 
+	    /// </summary>
+	    public abstract long Version { get; }
+
+	    /// <summary>Returns the generation (the _N in segments_N) for this
+	    /// IndexCommit 
+	    /// </summary>
+	    public abstract long Generation { get; }
+
+	    /// <summary>Convenience method that returns the last modified time
+	    /// of the segments_N file corresponding to this index
+	    /// commit, equivalent to
+	    /// getDirectory().fileModified(getSegmentsFileName()). 
+	    /// </summary>
+	    public virtual long Timestamp
+	    {
+	        get { return Directory.FileModified(SegmentsFileName); }
+	    }
+
+	    /// <summary>Returns userData, previously passed to 
+	    /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
+	    /// for this commit.  IDictionary is String -> String. 
+	    /// </summary>
+	    public abstract IDictionary<string, string> UserData { get; }
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Index/IndexFileDeleter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/IndexFileDeleter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/IndexFileDeleter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/IndexFileDeleter.cs Mon Mar 12 22:29:26 2012
@@ -140,8 +140,8 @@ namespace Lucene.Net.Index
 			
 			// First pass: walk the files and initialize our ref
 			// counts:
-			long currentGen = segmentInfos.GetGeneration();
-			IndexFileNameFilter filter = IndexFileNameFilter.GetFilter();
+			long currentGen = segmentInfos.Generation;
+			IndexFileNameFilter filter = IndexFileNameFilter.Filter;
 			
 			System.String[] files = directory.ListAll();
 			
@@ -205,14 +205,14 @@ namespace Lucene.Net.Index
                         if (sis != null)
                         {
                             CommitPoint commitPoint = new CommitPoint(this, commitsToDelete, directory, sis);
-                            if (sis.GetGeneration() == segmentInfos.GetGeneration())
+                            if (sis.Generation == segmentInfos.Generation)
                             {
                                 currentCommitPoint = commitPoint;
                             }
                             commits.Add(commitPoint);
                             IncRef(sis, true);
 
-                            if (lastSegmentInfos == null || sis.GetGeneration() > lastSegmentInfos.GetGeneration())
+                            if (lastSegmentInfos == null || sis.Generation > lastSegmentInfos.Generation)
                             {
                                 lastSegmentInfos = sis;
                             }
@@ -274,17 +274,17 @@ namespace Lucene.Net.Index
 			// sometime it may not be the most recent commit
 			Checkpoint(segmentInfos, false);
 			
-			startingCommitDeleted = currentCommitPoint.IsDeleted();
+			startingCommitDeleted = currentCommitPoint.IsDeleted;
 			
 			DeleteCommits();
 		}
 
-        public SegmentInfos GetLastSegmentInfos()
+        public SegmentInfos LastSegmentInfos
         {
-            return lastSegmentInfos;
+            get { return lastSegmentInfos; }
         }
-		
-		/// <summary> Remove the CommitPoints in the commitsToDelete List by
+
+        /// <summary> Remove the CommitPoints in the commitsToDelete List by
 		/// DecRef'ing all files from each SegmentInfos.
 		/// </summary>
 		private void  DeleteCommits()
@@ -302,7 +302,7 @@ namespace Lucene.Net.Index
 					CommitPoint commit = commitsToDelete[i];
 					if (infoStream != null)
 					{
-						Message("deleteCommits: now decRef commit \"" + commit.GetSegmentsFileName() + "\"");
+						Message("deleteCommits: now decRef commit \"" + commit.SegmentsFileName + "\"");
 					}
 					foreach(string file in commit.files)
 					{
@@ -347,7 +347,7 @@ namespace Lucene.Net.Index
 		public void  Refresh(System.String segmentName)
 		{
 			System.String[] files = directory.ListAll();
-			IndexFileNameFilter filter = IndexFileNameFilter.GetFilter();
+			IndexFileNameFilter filter = IndexFileNameFilter.Filter;
 			System.String segmentPrefix1;
 			System.String segmentPrefix2;
 			if (segmentName != null)
@@ -720,12 +720,12 @@ namespace Lucene.Net.Index
 				InitBlock(enclosingInstance);
 				this.directory = directory;
 				this.commitsToDelete = commitsToDelete;
-				userData = segmentInfos.GetUserData();
+				userData = segmentInfos.UserData;
 				segmentsFileName = segmentInfos.GetCurrentSegmentFileName();
-				version = segmentInfos.GetVersion();
-				generation = segmentInfos.GetGeneration();
+				version = segmentInfos.Version;
+				generation = segmentInfos.Generation;
                 files = segmentInfos.Files(directory, true);
-				gen = segmentInfos.GetGeneration();
+				gen = segmentInfos.Generation;
 				isOptimized = segmentInfos.Count == 1 && !segmentInfos.Info(0).HasDeletions();
 				
 				System.Diagnostics.Debug.Assert(!segmentInfos.HasExternalSegments(directory));
@@ -740,38 +740,38 @@ namespace Lucene.Net.Index
 			{
 				return isOptimized;
 			}
-			
-			public override System.String GetSegmentsFileName()
-			{
-				return segmentsFileName;
-			}
 
-            public override ICollection<string> GetFileNames()
-			{
-				return files;
-			}
-			
-			public override Directory GetDirectory()
-			{
-				return directory;
-			}
-			
-			public override long GetVersion()
-			{
-				return version;
-			}
-			
-			public override long GetGeneration()
-			{
-				return generation;
-			}
+		    public override string SegmentsFileName
+		    {
+		        get { return segmentsFileName; }
+		    }
+
+		    public override ICollection<string> FileNames
+		    {
+		        get { return files; }
+		    }
+
+		    public override Directory Directory
+		    {
+		        get { return directory; }
+		    }
+
+		    public override long Version
+		    {
+		        get { return version; }
+		    }
+
+		    public override long Generation
+		    {
+		        get { return generation; }
+		    }
+
+		    public override IDictionary<string, string> UserData
+		    {
+		        get { return userData; }
+		    }
 
-            public override IDictionary<string, string> GetUserData()
-			{
-				return userData;
-			}
-			
-			/// <summary> Called only be the deletion policy, to remove this
+		    /// <summary> Called only be the deletion policy, to remove this
 			/// commit point from the index.
 			/// </summary>
 			public override void  Delete()
@@ -782,13 +782,13 @@ namespace Lucene.Net.Index
 					Enclosing_Instance.commitsToDelete.Add(this);
 				}
 			}
-			
-			public override bool IsDeleted()
-			{
-				return deleted;
-			}
 
-            public int CompareTo(CommitPoint commit)
+		    public override bool IsDeleted
+		    {
+		        get { return deleted; }
+		    }
+
+		    public int CompareTo(CommitPoint commit)
 			{
 				if (gen < commit.gen)
 				{

Modified: incubator/lucene.net/trunk/src/core/Index/IndexFileNameFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/IndexFileNameFilter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/IndexFileNameFilter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/IndexFileNameFilter.cs Mon Mar 12 22:29:26 2012
@@ -98,10 +98,10 @@ namespace Lucene.Net.Index
 			}
 			return false;
 		}
-		
-		public static IndexFileNameFilter GetFilter()
-		{
-			return singleton;
-		}
+
+	    public static IndexFileNameFilter Filter
+	    {
+	        get { return singleton; }
+	    }
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Index/IndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/IndexReader.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/IndexReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/IndexReader.cs Mon Mar 12 22:29:26 2012
@@ -16,7 +16,8 @@
  */
 
 using System;
-
+using System.Collections.Generic;
+using Lucene.Net.Documents;
 using Document = Lucene.Net.Documents.Document;
 using FieldSelector = Lucene.Net.Documents.FieldSelector;
 using Lucene.Net.Store;
@@ -127,17 +128,20 @@ namespace Lucene.Net.Index
 		private int refCount;
 		
 		protected internal static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
-		
-		/// <summary>Expert: returns the current refCount for this reader </summary>
-		public virtual int GetRefCount()
-		{
-			lock (this)
-			{
-				return refCount;
-			}
-		}
-		
-		/// <summary> Expert: increments the refCount of this IndexReader
+
+	    /// <summary>Expert: returns the current refCount for this reader </summary>
+	    public virtual int RefCount
+	    {
+	        get
+	        {
+	            lock (this)
+	            {
+	                return refCount;
+	            }
+	        }
+	    }
+
+	    /// <summary> Expert: increments the refCount of this IndexReader
 		/// instance.  RefCounts are used to determine when a
 		/// reader can be closed safely, i.e. as soon as there are
 		/// no more references.  Be sure to always call a
@@ -242,7 +246,7 @@ namespace Lucene.Net.Index
 		/// <throws>  IOException if there is a low-level IO error </throws>
 		public static IndexReader Open(IndexCommit commit, bool readOnly)
 		{
-			return Open(commit.GetDirectory(), null, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+			return Open(commit.Directory, null, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
 		}
 		
 		/// <summary>Expert: returns an IndexReader reading the index in
@@ -321,7 +325,7 @@ namespace Lucene.Net.Index
 		/// <throws>  IOException if there is a low-level IO error </throws>
 		public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly)
 		{
-			return Open(commit.GetDirectory(), deletionPolicy, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+			return Open(commit.Directory, deletionPolicy, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
 		}
 		
 		/// <summary>Expert: returns an IndexReader reading the index in
@@ -356,7 +360,7 @@ namespace Lucene.Net.Index
 		/// <throws>  IOException if there is a low-level IO error </throws>
 		public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
 		{
-			return Open(commit.GetDirectory(), deletionPolicy, commit, readOnly, termInfosIndexDivisor);
+			return Open(commit.Directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
 		}
 		
 		private static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, IndexCommit commit, bool readOnly, int termInfosIndexDivisor)
@@ -540,96 +544,95 @@ namespace Lucene.Net.Index
 		{
 			return SegmentInfos.ReadCurrentUserData(directory);
 		}
-		
-		/// <summary> Version number when this IndexReader was opened. Not implemented in the
-		/// IndexReader base class.
-		/// 
-		/// <p/>
-		/// If this reader is based on a Directory (ie, was created by calling
-		/// <see cref="Open(Lucene.Net.Store.Directory)" />, or <see cref="Reopen()" /> 
-		/// on a reader based on a Directory), then
-		/// this method returns the version recorded in the commit that the reader
-		/// opened. This version is advanced every time <see cref="IndexWriter.Commit()" /> is
-		/// called.
-		/// <p/>
-		/// 
-		/// <p/>
-		/// If instead this reader is a near real-time reader (ie, obtained by a call
-		/// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
-		/// real-time reader), then this method returns the version of the last
-		/// commit done by the writer. Note that even as further changes are made
-		/// with the writer, the version will not changed until a commit is
-		/// completed. Thus, you should not rely on this method to determine when a
-		/// near real-time reader should be opened. Use <see cref="IsCurrent" /> instead.
-		/// <p/>
-		/// 
-		/// </summary>
-		/// <throws>  UnsupportedOperationException </throws>
-		/// <summary>             unless overridden in subclass
-		/// </summary>
-		public virtual long GetVersion()
-		{
-			throw new System.NotSupportedException("This reader does not support this method.");
-		}
-		
-		/// <summary> Retrieve the String userData optionally passed to
-        /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />.  
-        /// This will return null if 
-        /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
-		/// has never been called for this index.
-		/// </summary>
-		/// <seealso cref="GetCommitUserData(Directory)">
-		/// </seealso>
-        public virtual System.Collections.Generic.IDictionary<string, string> GetCommitUserData()
-		{
-			throw new System.NotSupportedException("This reader does not support this method.");
-		}
-		
-		/// <summary> Check whether any new changes have occurred to the index since this
-		/// reader was opened.
-		/// 
-		/// <p/>
-		/// If this reader is based on a Directory (ie, was created by calling
-		/// <see cref="Open(Store.Directory)" />, or <see cref="Reopen()" /> on a reader based on a Directory), then
-		/// this method checks if any further commits (see <see cref="IndexWriter.Commit()" />
-		/// have occurred in that directory).
-		/// <p/>
-		/// 
-		/// <p/>
-		/// If instead this reader is a near real-time reader (ie, obtained by a call
-		/// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
-		/// real-time reader), then this method checks if either a new commmit has
-		/// occurred, or any new uncommitted changes have taken place via the writer.
-		/// Note that even if the writer has only performed merging, this method will
-		/// still return false.
-		/// <p/>
-		/// 
-		/// <p/>
-		/// In any event, if this returns false, you should call <see cref="Reopen()" /> to
-		/// get a new reader that sees the changes.
-		/// <p/>
-		/// 
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		/// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
-		public virtual bool IsCurrent()
-		{
-			throw new System.NotSupportedException("This reader does not support this method.");
-		}
-		
-		/// <summary> Checks is the index is optimized (if it has a single segment and 
-		/// no deletions).  Not implemented in the IndexReader base class.
-		/// </summary>
-		/// <returns> <c>true</c> if the index is optimized; <c>false</c> otherwise
-		/// </returns>
-		/// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
-		public virtual bool IsOptimized()
-		{
-			throw new System.NotSupportedException("This reader does not support this method.");
-		}
-		
-		/// <summary> Return an array of term frequency vectors for the specified document.
+
+	    /// <summary> Version number when this IndexReader was opened. Not implemented in the
+	    /// IndexReader base class.
+	    /// 
+	    /// <p/>
+	    /// If this reader is based on a Directory (ie, was created by calling
+	    /// <see cref="Open(Lucene.Net.Store.Directory)" />, or <see cref="Reopen()" /> 
+	    /// on a reader based on a Directory), then
+	    /// this method returns the version recorded in the commit that the reader
+	    /// opened. This version is advanced every time <see cref="IndexWriter.Commit()" /> is
+	    /// called.
+	    /// <p/>
+	    /// 
+	    /// <p/>
+	    /// If instead this reader is a near real-time reader (ie, obtained by a call
+	    /// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
+	    /// real-time reader), then this method returns the version of the last
+	    /// commit done by the writer. Note that even as further changes are made
+	    /// with the writer, the version will not changed until a commit is
+	    /// completed. Thus, you should not rely on this method to determine when a
+	    /// near real-time reader should be opened. Use <see cref="IsCurrent" /> instead.
+	    /// <p/>
+	    /// 
+	    /// </summary>
+	    /// <throws>  UnsupportedOperationException </throws>
+	    /// <summary>             unless overridden in subclass
+	    /// </summary>
+	    public virtual long Version
+	    {
+	        get { throw new System.NotSupportedException("This reader does not support this method."); }
+	    }
+
+	    /// <summary> Retrieve the String userData optionally passed to
+	    /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />.  
+	    /// This will return null if 
+	    /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
+	    /// has never been called for this index.
+	    /// </summary>
+	    /// <seealso cref="GetCommitUserData(Directory)">
+	    /// </seealso>
+	    public virtual IDictionary<string, string> CommitUserData
+	    {
+	        get { throw new System.NotSupportedException("This reader does not support this method."); }
+	    }
+
+	    /// <summary> Check whether any new changes have occurred to the index since this
+	    /// reader was opened.
+	    /// 
+	    /// <p/>
+	    /// If this reader is based on a Directory (ie, was created by calling
+	    /// <see cref="Open(Store.Directory)" />, or <see cref="Reopen()" /> on a reader based on a Directory), then
+	    /// this method checks if any further commits (see <see cref="IndexWriter.Commit()" />
+	    /// have occurred in that directory).
+	    /// <p/>
+	    /// 
+	    /// <p/>
+	    /// If instead this reader is a near real-time reader (ie, obtained by a call
+	    /// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
+	    /// real-time reader), then this method checks if either a new commmit has
+	    /// occurred, or any new uncommitted changes have taken place via the writer.
+	    /// Note that even if the writer has only performed merging, this method will
+	    /// still return false.
+	    /// <p/>
+	    /// 
+	    /// <p/>
+	    /// In any event, if this returns false, you should call <see cref="Reopen()" /> to
+	    /// get a new reader that sees the changes.
+	    /// <p/>
+	    /// 
+	    /// </summary>
+	    /// <throws>  CorruptIndexException if the index is corrupt </throws>
+	    /// <throws>  IOException if there is a low-level IO error </throws>
+	    /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
+	    public virtual bool IsCurrent
+	    {
+	        get { throw new System.NotSupportedException("This reader does not support this method."); }
+	    }
+
+	    /// <summary> Checks is the index is optimized (if it has a single segment and 
+	    /// no deletions).  Not implemented in the IndexReader base class.
+	    /// </summary>
+	    /// <value> &lt;c&gt;true&lt;/c&gt; if the index is optimized; &lt;c&gt;false&lt;/c&gt; otherwise </value>
+	    /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
+	    public virtual bool IsOptimized
+	    {
+	        get { throw new System.NotSupportedException("This reader does not support this method."); }
+	    }
+
+	    /// <summary> Return an array of term frequency vectors for the specified document.
 		/// The array contains a vector for each vectorized field in the document.
 		/// Each vector contains terms and frequencies for all terms in a given vectorized field.
 		/// If no such fields existed, the method returns null. The term vectors that are
@@ -702,23 +705,23 @@ namespace Lucene.Net.Index
 		{
 			return SegmentInfos.GetCurrentSegmentGeneration(directory) != - 1;
 		}
-		
-		/// <summary>Returns the number of documents in this index. </summary>
-		public abstract int NumDocs();
-		
-		/// <summary>Returns one greater than the largest possible document number.
-		/// This may be used to, e.g., determine how big to allocate an array which
-		/// will have an element for every document number in an index.
-		/// </summary>
-		public abstract int MaxDoc();
-		
-		/// <summary>Returns the number of deleted documents. </summary>
-		public virtual int NumDeletedDocs()
-		{
-			return MaxDoc() - NumDocs();
-		}
-		
-		/// <summary> Returns the stored fields of the <c>n</c><sup>th</sup>
+
+	    /// <summary>Returns the number of documents in this index. </summary>
+	    public abstract int NumDocs { get; }
+
+	    /// <summary>Returns one greater than the largest possible document number.
+	    /// This may be used to, e.g., determine how big to allocate an array which
+	    /// will have an element for every document number in an index.
+	    /// </summary>
+	    public abstract int MaxDoc { get; }
+
+	    /// <summary>Returns the number of deleted documents. </summary>
+	    public virtual int NumDeletedDocs
+	    {
+	        get { return MaxDoc - NumDocs; }
+	    }
+
+	    /// <summary> Returns the stored fields of the <c>n</c><sup>th</sup>
 		/// <c>Document</c> in this index.
 		/// <p/>
 		/// <b>NOTE:</b> for performance reasons, this method does not check if the
@@ -735,6 +738,23 @@ namespace Lucene.Net.Index
 			EnsureOpen();
 			return Document(n, null);
 		}
+
+        /// <summary> Returns the stored fields of the <c>n</c><sup>th</sup>
+        /// <c>Document</c> in this index.
+        /// <p/>
+        /// <b>NOTE:</b> for performance reasons, this method does not check if the
+        /// requested document is deleted, and therefore asking for a deleted document
+        /// may yield unspecified results. Usually this is not required, however you
+        /// can call <see cref="IsDeleted(int)" /> with the requested document ID to verify
+        /// the document is not deleted.
+        /// 
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+	    public Document this[int doc]
+	    {
+	        get { return Document(doc); }
+	    }
 		
 		/// <summary> Get the <see cref="Lucene.Net.Documents.Document" /> at the <c>n</c>
 		/// <sup>th</sup> position. The <see cref="FieldSelector" /> may be used to determine
@@ -764,7 +784,7 @@ namespace Lucene.Net.Index
 		/// </returns>
 		/// <throws>  CorruptIndexException if the index is corrupt </throws>
 		/// <throws>  IOException if there is a low-level IO error </throws>
-		/// <seealso cref="Lucene.Net.Documents.Fieldable">
+		/// <seealso cref="IFieldable">
 		/// </seealso>
 		/// <seealso cref="Lucene.Net.Documents.FieldSelector">
 		/// </seealso>
@@ -777,11 +797,11 @@ namespace Lucene.Net.Index
 		
 		/// <summary>Returns true if document <i>n</i> has been deleted </summary>
 		public abstract bool IsDeleted(int n);
-		
-		/// <summary>Returns true if any documents have been deleted </summary>
-		public abstract bool HasDeletions();
-		
-		/// <summary>Returns true if there are norms stored for this field. </summary>
+
+	    /// <summary>Returns true if any documents have been deleted </summary>
+	    public abstract bool HasDeletions { get; }
+
+	    /// <summary>Returns true if there are norms stored for this field. </summary>
 		public virtual bool HasNorms(System.String field)
 		{
 			// backward compatible implementation.
@@ -807,7 +827,7 @@ namespace Lucene.Net.Index
 		public abstract void  Norms(System.String field, byte[] bytes, int offset);
 		
 		/// <summary>Expert: Resets the normalization factor for the named field of the named
-		/// document.  The norm represents the product of the field's <see cref="Lucene.Net.Documents.Fieldable.SetBoost(float)">boost</see>
+		/// document.  The norm represents the product of the field's <see cref="IFieldable.SetBoost(float)">boost</see>
         /// and its <see cref="Similarity.LengthNorm(String,int)">length normalization</see>.  Thus, to preserve the length normalization
 		/// values when resetting this, one should base the new value upon the old.
 		/// 
@@ -1169,21 +1189,21 @@ namespace Lucene.Net.Index
 		/// <seealso cref="IndexReader.FieldOption">
 		/// </seealso>
 		public abstract System.Collections.Generic.ICollection<string> GetFieldNames(FieldOption fldOption);
-		
-		/// <summary> Expert: return the IndexCommit that this reader has
-		/// opened.  This method is only implemented by those
-		/// readers that correspond to a Directory with its own
-		/// segments_N file.
-		/// 
-		/// <p/><b>WARNING</b>: this API is new and experimental and
-		/// may suddenly change.<p/>
-		/// </summary>
-		public virtual IndexCommit GetIndexCommit()
-		{
-			throw new System.NotSupportedException("This reader does not support this method.");
-		}
-		
-		/// <summary> Prints the filename and size of each file within a given compound file.
+
+	    /// <summary> Expert: return the IndexCommit that this reader has
+	    /// opened.  This method is only implemented by those
+	    /// readers that correspond to a Directory with its own
+	    /// segments_N file.
+	    /// 
+	    /// <p/><b>WARNING</b>: this API is new and experimental and
+	    /// may suddenly change.<p/>
+	    /// </summary>
+	    public virtual IndexCommit IndexCommit
+	    {
+	        get { throw new System.NotSupportedException("This reader does not support this method."); }
+	    }
+
+	    /// <summary> Prints the filename and size of each file within a given compound file.
 		/// Add the -extract flag to extract files to the current working directory.
 		/// In order to make the extracted version of the index work, you have to copy
 		/// the segments file from the compound index into the directory where the extracted files are stored.
@@ -1294,75 +1314,71 @@ namespace Lucene.Net.Index
 		{
 			return DirectoryReader.ListCommits(dir);
 		}
-		
-		/// <summary>Expert: returns the sequential sub readers that this
-		/// reader is logically composed of.  For example,
-		/// IndexSearcher uses this API to drive searching by one
-		/// sub reader at a time.  If this reader is not composed
-		/// of sequential child readers, it should return null.
-		/// If this method returns an empty array, that means this
-		/// reader is a null reader (for example a MultiReader
-		/// that has no sub readers).
-		/// <p/>
-		/// NOTE: You should not try using sub-readers returned by
-		/// this method to make any changes (setNorm, deleteDocument,
-		/// etc.). While this might succeed for one composite reader
-		/// (like MultiReader), it will most likely lead to index
-		/// corruption for other readers (like DirectoryReader obtained
-		/// through <see cref="IndexReader.Open(Lucene.Net.Store.Directory,bool)" />. Use the parent reader directly. 
-		/// </summary>
-		public virtual IndexReader[] GetSequentialSubReaders()
-		{
-			return null;
-		}
-		
-		/// <summary>Expert</summary>
-		public virtual System.Object GetFieldCacheKey()
-		{
-			return this;
-		}
 
-        /** Expert.  Warning: this returns null if the reader has
+	    /// <summary>Expert: returns the sequential sub readers that this
+	    /// reader is logically composed of.  For example,
+	    /// IndexSearcher uses this API to drive searching by one
+	    /// sub reader at a time.  If this reader is not composed
+	    /// of sequential child readers, it should return null.
+	    /// If this method returns an empty array, that means this
+	    /// reader is a null reader (for example a MultiReader
+	    /// that has no sub readers).
+	    /// <p/>
+	    /// NOTE: You should not try using sub-readers returned by
+	    /// this method to make any changes (setNorm, deleteDocument,
+	    /// etc.). While this might succeed for one composite reader
+	    /// (like MultiReader), it will most likely lead to index
+	    /// corruption for other readers (like DirectoryReader obtained
+	    /// through <see cref="IndexReader.Open(Lucene.Net.Store.Directory,bool)" />. Use the parent reader directly. 
+	    /// </summary>
+	    public virtual IndexReader[] SequentialSubReaders
+	    {
+	        get { return null; }
+	    }
+
+	    /// <summary>Expert</summary>
+	    public virtual object FieldCacheKey
+	    {
+	        get { return this; }
+	    }
+
+	    /** Expert.  Warning: this returns null if the reader has
           *  no deletions 
           */
-        public virtual object GetDeletesCacheKey()
-        {
-            return this;
-        }
-		
-		/// <summary>Returns the number of unique terms (across all fields)
-		/// in this reader.
-		/// 
-		/// This method returns long, even though internally
-		/// Lucene cannot handle more than 2^31 unique terms, for
-		/// a possible future when this limitation is removed.
-		/// 
-		/// </summary>
-		/// <throws>  UnsupportedOperationException if this count </throws>
-		/// <summary>  cannot be easily determined (eg Multi*Readers).
-		/// Instead, you should call <see cref="GetSequentialSubReaders" />
-		/// and ask each sub reader for
-		/// its unique term count. 
-		/// </summary>
-		public virtual long GetUniqueTermCount()
-		{
-			throw new System.NotSupportedException("this reader does not implement getUniqueTermCount()");
-		}
 
-        /// <summary>
-        /// For IndexReader implementations that use
-        /// TermInfosReader to read terms, this returns the
-        /// current indexDivisor as specified when the reader was
-        /// opened.
-        /// </summary>
-        public virtual int GetTermInfosIndexDivisor()
-        {
-            throw new NotSupportedException("This reader does not support this method.");
-        }
+	    public virtual object DeletesCacheKey
+	    {
+	        get { return this; }
+	    }
 
-        public bool hasChanges_ForNUnit
-        {
-            get { return hasChanges; }
-        }
+	    /// <summary>Returns the number of unique terms (across all fields)
+	    /// in this reader.
+	    /// 
+	    /// This method returns long, even though internally
+	    /// Lucene cannot handle more than 2^31 unique terms, for
+	    /// a possible future when this limitation is removed.
+	    /// 
+	    /// </summary>
+	    /// <throws>  UnsupportedOperationException if this count </throws>
+	    /// <summary>  cannot be easily determined (eg Multi*Readers).
+	    /// Instead, you should call <see cref="GetSequentialSubReaders" />
+	    /// and ask each sub reader for
+	    /// its unique term count. 
+	    /// </summary>
+	    public virtual long UniqueTermCount
+	    {
+	        get { throw new System.NotSupportedException("this reader does not implement getUniqueTermCount()"); }
+	    }
+
+	    /// <summary>
+	    /// For IndexReader implementations that use
+	    /// TermInfosReader to read terms, this returns the
+	    /// current indexDivisor as specified when the reader was
+	    /// opened.
+	    /// </summary>
+	    public virtual int TermInfosIndexDivisor
+	    {
+	        get { throw new NotSupportedException("This reader does not support this method."); }
+	    }
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/IndexWriter.cs Mon Mar 12 22:29:26 2012
@@ -160,7 +160,7 @@ namespace Lucene.Net.Index
 	{
 		private void  InitBlock()
 		{
-			similarity = Similarity.GetDefault();
+			similarity = Search.Similarity.Default;
 			mergePolicy = new LogByteSizeMergePolicy(this);
 			readerPool = new ReaderPool(this);
 		}
@@ -502,15 +502,15 @@ namespace Lucene.Net.Index
 				lock (this)
 				{
 					
-					bool pooled = readerMap.ContainsKey(sr.GetSegmentInfo());
+					bool pooled = readerMap.ContainsKey(sr.SegmentInfo);
 
-                    System.Diagnostics.Debug.Assert(!pooled || readerMap[sr.GetSegmentInfo()] == sr);
+                    System.Diagnostics.Debug.Assert(!pooled || readerMap[sr.SegmentInfo] == sr);
 
                     // Drop caller's ref; for an external reader (not
                     // pooled), this decRef will close it
 					sr.DecRef();
 					
-					if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.GetRefCount() == 1)))
+					if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.RefCount == 1)))
 					{
 
                         // We invoke deleter.checkpoint below, so we must be
@@ -532,7 +532,7 @@ namespace Lucene.Net.Index
 
                         // We are the last ref to this reader; since we're
                         // not pooling readers, we release it:
-                        readerMap.Remove(sr.GetSegmentInfo());
+                        readerMap.Remove(sr.SegmentInfo);
 
                         if (hasChanges)
                         {
@@ -569,7 +569,7 @@ namespace Lucene.Net.Index
                             SegmentReader sr = ent.Value;
                             if (sr.hasChanges)
                             {
-                                System.Diagnostics.Debug.Assert(InfoIsLive(sr.GetSegmentInfo()));
+                                System.Diagnostics.Debug.Assert(InfoIsLive(sr.SegmentInfo));
                                 sr.DoCommit(null);
                                 // Must checkpoint w/ deleter, because this
                                 // segment reader will have created new _X_N.del
@@ -607,7 +607,7 @@ namespace Lucene.Net.Index
 						SegmentReader sr = ent.Value;
 						if (sr.hasChanges)
 						{
-							System.Diagnostics.Debug.Assert(InfoIsLive(sr.GetSegmentInfo()));
+							System.Diagnostics.Debug.Assert(InfoIsLive(sr.SegmentInfo));
 							sr.DoCommit(null);
                             // Must checkpoint w/ deleter, because this
                             // segment reader will have created new _X_N.del
@@ -746,7 +746,7 @@ namespace Lucene.Net.Index
 			{
 				if (reader != null)
 				{
-					return reader.NumDeletedDocs();
+					return reader.NumDeletedDocs;
 				}
 				else
 				{
@@ -896,43 +896,31 @@ namespace Lucene.Net.Index
 			else
 				throw new System.ArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
 		}
-		
-		/// <summary><p/>Get the current setting of whether newly flushed
-		/// segments will use the compound file format.  Note that
-		/// this just returns the value previously set with
-		/// setUseCompoundFile(boolean), or the default value
-		/// (true).  You cannot use this to query the status of
-		/// previously flushed segments.<p/>
-		/// 
-		/// <p/>Note that this method is a convenience method: it
-		/// just calls mergePolicy.getUseCompoundFile as long as
-		/// mergePolicy is an instance of <see cref="LogMergePolicy" />.
-		/// Otherwise an IllegalArgumentException is thrown.<p/>
-		/// 
-		/// </summary>
-        /// <seealso cref="SetUseCompoundFile(bool)">
-		/// </seealso>
-		public virtual bool GetUseCompoundFile()
-		{
-			return GetLogMergePolicy().GetUseCompoundFile();
-		}
-		
-		/// <summary><p/>Setting to turn on usage of a compound file. When on,
-		/// multiple files for each segment are merged into a
-		/// single file when a new segment is flushed.<p/>
-		/// 
-		/// <p/>Note that this method is a convenience method: it
-		/// just calls mergePolicy.setUseCompoundFile as long as
-		/// mergePolicy is an instance of <see cref="LogMergePolicy" />.
-		/// Otherwise an IllegalArgumentException is thrown.<p/>
-		/// </summary>
-		public virtual void  SetUseCompoundFile(bool value_Renamed)
-		{
-			GetLogMergePolicy().SetUseCompoundFile(value_Renamed);
-			GetLogMergePolicy().SetUseCompoundDocStore(value_Renamed);
-		}
-		
-		/// <summary>Expert: Set the Similarity implementation used by this IndexWriter.
+
+	    /// <summary><p/>Gets or sets the current setting of whether newly flushed
+	    /// segments will use the compound file format.  Note that
+	    /// this just returns the value previously set with
+	    /// setUseCompoundFile(boolean), or the default value
+	    /// (true).  You cannot use this to query the status of
+	    /// previously flushed segments.<p/>
+	    /// 
+	    /// <p/>Note that this method is a convenience method: it
+	    /// just calls mergePolicy.getUseCompoundFile as long as
+	    /// mergePolicy is an instance of <see cref="LogMergePolicy" />.
+	    /// Otherwise an IllegalArgumentException is thrown.<p/>
+	    /// 
+	    /// </summary>
+	    public virtual bool UseCompoundFile
+	    {
+	        get { return GetLogMergePolicy().GetUseCompoundFile(); }
+	        set
+	        {
+	            GetLogMergePolicy().SetUseCompoundFile(value);
+	            GetLogMergePolicy().SetUseCompoundDocStore(value);
+	        }
+	    }
+
+	    /// <summary>Expert: Set the Similarity implementation used by this IndexWriter.
 		/// 
 		/// </summary>
 		/// <seealso cref="Similarity.SetDefault(Similarity)">
@@ -943,58 +931,59 @@ namespace Lucene.Net.Index
 			this.similarity = similarity;
 			docWriter.SetSimilarity(similarity);
 		}
-		
-		/// <summary>Expert: Return the Similarity implementation used by this IndexWriter.
-		/// 
-		/// <p/>This defaults to the current value of <see cref="Similarity.GetDefault()" />.
-		/// </summary>
-		public virtual Similarity GetSimilarity()
-		{
-			EnsureOpen();
-			return this.similarity;
-		}
-		
-		/// <summary>Expert: Set the interval between indexed terms.  Large values cause less
-		/// memory to be used by IndexReader, but slow random-access to terms.  Small
-		/// values cause more memory to be used by an IndexReader, and speed
-		/// random-access to terms.
-		/// 
-		/// This parameter determines the amount of computation required per query
-		/// term, regardless of the number of documents that contain that term.  In
-		/// particular, it is the maximum number of other terms that must be
-		/// scanned before a term is located and its frequency and position information
-		/// may be processed.  In a large index with user-entered query terms, query
-		/// processing time is likely to be dominated not by term lookup but rather
-		/// by the processing of frequency and positional data.  In a small index
-		/// or when many uncommon query terms are generated (e.g., by wildcard
-		/// queries) term lookup may become a dominant cost.
-		/// 
-		/// In particular, <c>numUniqueTerms/interval</c> terms are read into
-		/// memory by an IndexReader, and, on average, <c>interval/2</c> terms
-		/// must be scanned for each random term access.
-		/// 
-		/// </summary>
-		/// <seealso cref="DEFAULT_TERM_INDEX_INTERVAL">
-		/// </seealso>
-		public virtual void  SetTermIndexInterval(int interval)
-		{
-			EnsureOpen();
-			this.termIndexInterval = interval;
-		}
-		
-		/// <summary>Expert: Return the interval between indexed terms.
-		/// 
-		/// </summary>
-		/// <seealso cref="SetTermIndexInterval(int)">
-		/// </seealso>
-		public virtual int GetTermIndexInterval()
-		{
-			// We pass false because this method is called by SegmentMerger while we are in the process of closing
-			EnsureOpen(false);
-			return termIndexInterval;
-		}
-		
-		/// <summary> Constructs an IndexWriter for the index in <c>d</c>.
+
+	    /// <summary>Expert: Return the Similarity implementation used by this IndexWriter.
+	    /// 
+	    /// <p/>This defaults to the current value of <see cref="Similarity.GetDefault()" />.
+	    /// </summary>
+	    public virtual Similarity Similarity
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return this.similarity;
+	        }
+	    }
+
+
+        /// <summary>Expert: Gets or sets the interval between indexed terms.  Large values cause less
+        /// memory to be used by IndexReader, but slow random-access to terms.  Small
+        /// values cause more memory to be used by an IndexReader, and speed
+        /// random-access to terms.
+        /// 
+        /// This parameter determines the amount of computation required per query
+        /// term, regardless of the number of documents that contain that term.  In
+        /// particular, it is the maximum number of other terms that must be
+        /// scanned before a term is located and its frequency and position information
+        /// may be processed.  In a large index with user-entered query terms, query
+        /// processing time is likely to be dominated not by term lookup but rather
+        /// by the processing of frequency and positional data.  In a small index
+        /// or when many uncommon query terms are generated (e.g., by wildcard
+        /// queries) term lookup may become a dominant cost.
+        /// 
+        /// In particular, <c>numUniqueTerms/interval</c> terms are read into
+        /// memory by an IndexReader, and, on average, <c>interval/2</c> terms
+        /// must be scanned for each random term access.
+        /// 
+        /// </summary>
+        /// <seealso cref="DEFAULT_TERM_INDEX_INTERVAL">
+        /// </seealso>
+	    public virtual int TermIndexInterval
+	    {
+	        get
+	        {
+	            // We pass false because this method is called by SegmentMerger while we are in the process of closing
+	            EnsureOpen(false);
+	            return termIndexInterval;
+	        }
+	        set
+	        {
+	            EnsureOpen();
+	            this.termIndexInterval = value;
+	        }
+	    }
+
+	    /// <summary> Constructs an IndexWriter for the index in <c>d</c>.
 		/// Text will be analyzed with <c>a</c>.  If <c>create</c>
 		/// is true, then a new, empty index will be created in
 		/// <c>d</c>, replacing the index already there, if any.
@@ -1288,14 +1277,14 @@ namespace Lucene.Net.Index
 						// preserve write-once.  This is important if
 						// readers are open against the future commit
 						// points.
-						if (commit.GetDirectory() != directory)
+						if (commit.Directory != directory)
 							throw new System.ArgumentException("IndexCommit's directory doesn't match my directory");
 						SegmentInfos oldInfos = new SegmentInfos();
-						oldInfos.Read(directory, commit.GetSegmentsFileName());
+						oldInfos.Read(directory, commit.SegmentsFileName);
 						segmentInfos.Replace(oldInfos);
 						changeCount++;
 						if (infoStream != null)
-							Message("init: loaded commit \"" + commit.GetSegmentsFileName() + "\"");
+							Message("init: loaded commit \"" + commit.SegmentsFileName + "\"");
 					}
 					
 					// We assume that this segments_N was previously
@@ -1380,17 +1369,20 @@ namespace Lucene.Net.Index
 				Message("setMergePolicy " + mp);
 			}
 		}
-		
-		/// <summary> Expert: returns the current MergePolicy in use by this writer.</summary>
-		/// <seealso cref="SetMergePolicy">
-		/// </seealso>
-		public virtual MergePolicy GetMergePolicy()
-		{
-			EnsureOpen();
-			return mergePolicy;
-		}
-		
-		/// <summary> Expert: set the merge scheduler used by this writer.</summary>
+
+	    /// <summary> Expert: returns the current MergePolicy in use by this writer.</summary>
+	    /// <seealso cref="SetMergePolicy">
+	    /// </seealso>
+	    public virtual MergePolicy MergePolicy
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return mergePolicy;
+	        }
+	    }
+
+	    /// <summary> Expert: set the merge scheduler used by this writer.</summary>
 		public virtual void  SetMergeScheduler(MergeScheduler mergeScheduler)
 		{
 			lock (this)
@@ -1411,60 +1403,51 @@ namespace Lucene.Net.Index
 				}
 			}
 		}
-		
-		/// <summary> Expert: returns the current MergePolicy in use by this
-		/// writer.
-		/// </summary>
-		/// <seealso cref="SetMergePolicy">
-		/// </seealso>
-		public virtual MergeScheduler GetMergeScheduler()
-		{
-			EnsureOpen();
-			return mergeScheduler;
-		}
-		
-		/// <summary><p/>Determines the largest segment (measured by
-		/// document count) that may be merged with other segments.
-		/// Small values (e.g., less than 10,000) are best for
-		/// interactive indexing, as this limits the length of
-		/// pauses while indexing to a few seconds.  Larger values
-		/// are best for batched indexing and speedier
-		/// searches.<p/>
-		/// 
-		/// <p/>The default value is <see cref="int.MaxValue" />.<p/>
-		/// 
-		/// <p/>Note that this method is a convenience method: it
-		/// just calls mergePolicy.setMaxMergeDocs as long as
-		/// mergePolicy is an instance of <see cref="LogMergePolicy" />.
-		/// Otherwise an IllegalArgumentException is thrown.<p/>
-		/// 
-		/// <p/>The default merge policy (<see cref="LogByteSizeMergePolicy" />)
-		/// also allows you to set this
-		/// limit by net size (in MB) of the segment, using 
-		/// <see cref="LogByteSizeMergePolicy.SetMaxMergeMB" />.<p/>
-		/// </summary>
-		public virtual void  SetMaxMergeDocs(int maxMergeDocs)
-		{
-			GetLogMergePolicy().SetMaxMergeDocs(maxMergeDocs);
-		}
-		
-		/// <summary> <p/>Returns the largest segment (measured by document
-		/// count) that may be merged with other segments.<p/>
-		/// 
-		/// <p/>Note that this method is a convenience method: it
-		/// just calls mergePolicy.getMaxMergeDocs as long as
-		/// mergePolicy is an instance of <see cref="LogMergePolicy" />.
-		/// Otherwise an IllegalArgumentException is thrown.<p/>
-		/// 
-		/// </summary>
-		/// <seealso cref="SetMaxMergeDocs">
-		/// </seealso>
-		public virtual int GetMaxMergeDocs()
-		{
-			return GetLogMergePolicy().GetMaxMergeDocs();
-		}
-		
-		/// <summary> The maximum number of terms that will be indexed for a single field in a
+
+	    /// <summary> Expert: returns the current MergePolicy in use by this
+	    /// writer.
+	    /// </summary>
+	    /// <seealso cref="SetMergePolicy">
+	    /// </seealso>
+	    public virtual MergeScheduler MergeScheduler
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return mergeScheduler;
+	        }
+	    }
+
+	    /// <summary> <p/>Gets or sets the largest segment (measured by document
+        /// count) that may be merged with other segments.
+        /// <p/> 
+        /// Small values (e.g., less than 10,000) are best for
+        /// interactive indexing, as this limits the length of
+        /// pauses while indexing to a few seconds.  Larger values
+        /// are best for batched indexing and speedier
+        /// searches.
+        /// <p/>
+        /// The default value is <see cref="int.MaxValue" />.
+        /// <p/>
+        /// Note that this method is a convenience method: it
+	    /// just calls mergePolicy.getMaxMergeDocs as long as
+	    /// mergePolicy is an instance of <see cref="LogMergePolicy" />.
+	    /// Otherwise an IllegalArgumentException is thrown.<p/>
+        /// 
+        /// The default merge policy (<see cref="LogByteSizeMergePolicy" />)
+        /// also allows you to set this
+        /// limit by net size (in MB) of the segment, using 
+        /// <see cref="LogByteSizeMergePolicy.SetMaxMergeMB" />.<p/>
+	    /// </summary>
+	    /// <seealso cref="SetMaxMergeDocs">
+	    /// </seealso>
+	    public virtual int MaxMergeDocs
+	    {
+	        get { return GetLogMergePolicy().GetMaxMergeDocs(); }
+	        set { GetLogMergePolicy().SetMaxMergeDocs(value); }
+	    }
+
+	    /// <summary> The maximum number of terms that will be indexed for a single field in a
 		/// document.  This limits the amount of memory required for indexing, so that
 		/// collections with very large files will not crash the indexing process by
 		/// running out of memory.  This setting refers to the number of running terms,
@@ -1497,35 +1480,34 @@ namespace Lucene.Net.Index
 			return maxFieldLength;
 		}
 
-        /// Sets the termsIndexDivisor passed to any readers that
+        /// Gets or sets the termsIndexDivisor passed to any readers that
         /// IndexWriter opens, for example when applying deletes
         /// or creating a near-real-time reader in 
         /// <see cref="GetReader()"/>.  Default value is 
         /// <see cref="IndexReader.DEFAULT_TERMS_INDEX_DIVISOR"/>.
-        public void SetReaderTermsIndexDivisor(int divisor)
-        {
-            EnsureOpen();
-            if (divisor <= 0)
-            {
-                throw new System.ArgumentException("divisor must be >= 1 (got " + divisor + ")");
-            }
-            readerTermsIndexDivisor = divisor;
-            if (infoStream != null)
-            {
-                Message("setReaderTermsIndexDivisor " + readerTermsIndexDivisor);
-            }
-        }
+	    public int ReaderTermsIndexDivisor
+	    {
+	        get
+	        {
+	            EnsureOpen();
+	            return readerTermsIndexDivisor;
+	        }
+	        set
+	        {
+	            EnsureOpen();
+	            if (value <= 0)
+	            {
+	                throw new System.ArgumentException("divisor must be >= 1 (got " + value + ")");
+	            }
+	            readerTermsIndexDivisor = value;
+	            if (infoStream != null)
+	            {
+	                Message("setReaderTermsIndexDivisor " + readerTermsIndexDivisor);
+	            }
+	        }
+	    }
 
-        /// <summary>
-        /// <see cref="SetReaderTermsIndexDivisor"/>
-        /// </summary>
-        public int GetReaderTermsIndexDivisor()
-        {
-            EnsureOpen();
-            return readerTermsIndexDivisor;
-        }
-		
-		/// <summary>Determines the minimal number of documents required
+	    /// <summary>Determines the minimal number of documents required
 		/// before the buffered in-memory documents are flushed as
 		/// a new Segment.  Large values generally gives faster
 		/// indexing.
@@ -1550,10 +1532,12 @@ namespace Lucene.Net.Index
 		{
 			EnsureOpen();
 			if (maxBufferedDocs != DISABLE_AUTO_FLUSH && maxBufferedDocs < 2)
-				throw new System.ArgumentException("maxBufferedDocs must at least be 2 when enabled");
-			if (maxBufferedDocs == DISABLE_AUTO_FLUSH && GetRAMBufferSizeMB() == DISABLE_AUTO_FLUSH)
-				throw new System.ArgumentException("at least one of ramBufferSize and maxBufferedDocs must be enabled");
-			docWriter.SetMaxBufferedDocs(maxBufferedDocs);
+				throw new ArgumentException("maxBufferedDocs must at least be 2 when enabled");
+
+			if (maxBufferedDocs == DISABLE_AUTO_FLUSH && (int)GetRAMBufferSizeMB() == DISABLE_AUTO_FLUSH)
+				throw new ArgumentException("at least one of ramBufferSize and maxBufferedDocs must be enabled");
+
+			docWriter.MaxBufferedDocs = maxBufferedDocs;
 			PushMaxBufferedDocs();
 			if (infoStream != null)
 				Message("setMaxBufferedDocs " + maxBufferedDocs);
@@ -1565,18 +1549,18 @@ namespace Lucene.Net.Index
 		/// </summary>
 		private void  PushMaxBufferedDocs()
 		{
-			if (docWriter.GetMaxBufferedDocs() != DISABLE_AUTO_FLUSH)
+			if (docWriter.MaxBufferedDocs != DISABLE_AUTO_FLUSH)
 			{
 				MergePolicy mp = mergePolicy;
 				if (mp is LogDocMergePolicy)
 				{
 					LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
-					int maxBufferedDocs = docWriter.GetMaxBufferedDocs();
-					if (lmp.GetMinMergeDocs() != maxBufferedDocs)
+					int maxBufferedDocs = docWriter.MaxBufferedDocs;
+					if (lmp.MinMergeDocs != maxBufferedDocs)
 					{
 						if (infoStream != null)
 							Message("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy");
-						lmp.SetMinMergeDocs(maxBufferedDocs);
+						lmp.MinMergeDocs = maxBufferedDocs;
 					}
 				}
 			}
@@ -1590,7 +1574,7 @@ namespace Lucene.Net.Index
 		public virtual int GetMaxBufferedDocs()
 		{
 			EnsureOpen();
-			return docWriter.GetMaxBufferedDocs();
+			return docWriter.MaxBufferedDocs;
 		}
 		
 		/// <summary>Determines the amount of RAM that may be used for
@@ -1703,7 +1687,7 @@ namespace Lucene.Net.Index
 		/// </summary>
 		public virtual void  SetMergeFactor(int mergeFactor)
 		{
-			GetLogMergePolicy().SetMergeFactor(mergeFactor);
+			GetLogMergePolicy().MergeFactor = mergeFactor;
 		}
 		
 		/// <summary> <p/>Returns the number of segments that are merged at
@@ -1720,7 +1704,7 @@ namespace Lucene.Net.Index
 		/// </seealso>
 		public virtual int GetMergeFactor()
 		{
-			return GetLogMergePolicy().GetMergeFactor();
+			return GetLogMergePolicy().MergeFactor;
 		}
 		
 		/// <summary>If non-null, this will be the default infoStream used
@@ -1763,7 +1747,7 @@ namespace Lucene.Net.Index
                     " mergePolicy=" + mergePolicy + 
                     " mergeScheduler=" + mergeScheduler +
 		            " ramBufferSizeMB=" + docWriter.GetRAMBufferSizeMB() + 
-                    " maxBufferedDocs=" +  docWriter.GetMaxBufferedDocs() +
+                    " maxBufferedDocs=" +  docWriter.MaxBufferedDocs +
                     " maxBuffereDeleteTerms=" + docWriter.GetMaxBufferedDeleteTerms() +
 		            " maxFieldLength=" + maxFieldLength + 
                     " index=" + SegString());
@@ -2107,13 +2091,13 @@ namespace Lucene.Net.Index
 			{
                 if (infoStream != null)
                 {
-                    Message("flushDocStores segment=" + docWriter.GetDocStoreSegment());
+                    Message("flushDocStores segment=" + docWriter.DocStoreSegment);
                 }
 
 				bool useCompoundDocStore = false;
                 if (infoStream != null)
                 {
-                    Message("closeDocStores segment=" + docWriter.GetDocStoreSegment());
+                    Message("closeDocStores segment=" + docWriter.DocStoreSegment);
                 }
 
 				System.String docStoreSegment;
@@ -2179,8 +2163,8 @@ namespace Lucene.Net.Index
 					for (int i = 0; i < numSegments; i++)
 					{
 						SegmentInfo si = segmentInfos.Info(i);
-						if (si.GetDocStoreOffset() != - 1 && si.GetDocStoreSegment().Equals(docStoreSegment))
-							si.SetDocStoreIsCompoundFile(true);
+						if (si.DocStoreOffset != - 1 && si.DocStoreSegment.Equals(docStoreSegment))
+							si.DocStoreIsCompoundFile = true;
 					}
 					
 					Checkpoint();
@@ -2221,7 +2205,7 @@ namespace Lucene.Net.Index
 			{
 				int count;
 				if (docWriter != null)
-					count = docWriter.GetNumDocsInRAM();
+					count = docWriter.NumDocsInRAM;
 				else
 					count = 0;
 				
@@ -2245,7 +2229,7 @@ namespace Lucene.Net.Index
 			{
 				int count;
 				if (docWriter != null)
-					count = docWriter.GetNumDocsInRAM();
+					count = docWriter.NumDocsInRAM;
 				else
 					count = 0;
 				
@@ -2585,7 +2569,7 @@ namespace Lucene.Net.Index
 		{
 			lock (this)
 			{
-				return docWriter.GetNumDocsInRAM();
+				return docWriter.NumDocsInRAM;
 			}
 		}
 		
@@ -3118,8 +3102,8 @@ namespace Lucene.Net.Index
 					
 					System.Diagnostics.Debug.Assert(docWriter.GetNumBufferedDeleteTerms() == 0 , 
 						"calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + docWriter.GetNumBufferedDeleteTerms());
-					System.Diagnostics.Debug.Assert(docWriter.GetNumDocsInRAM() == 0 , 
-						"calling startTransaction with buffered documents not supported: numDocsInRAM=" + docWriter.GetNumDocsInRAM());
+					System.Diagnostics.Debug.Assert(docWriter.NumDocsInRAM == 0 , 
+						"calling startTransaction with buffered documents not supported: numDocsInRAM=" + docWriter.NumDocsInRAM);
 					
 					EnsureOpen();
 					
@@ -3746,7 +3730,7 @@ namespace Lucene.Net.Index
 						if (info.dir != directory)
 						{
 							done = false;
-							MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.Range(i, 1 + i), mergePolicy is LogMergePolicy && GetUseCompoundFile());
+							MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.Range(i, 1 + i), mergePolicy is LogMergePolicy && UseCompoundFile);
 							
 							// Returns true if no running merge conflicts
 							// with this one (and, records this merge as
@@ -3923,7 +3907,7 @@ namespace Lucene.Net.Index
 					}
 				}
 				
-				if (mergePolicy is LogMergePolicy && GetUseCompoundFile())
+				if (mergePolicy is LogMergePolicy && UseCompoundFile)
 				{
 					
 					IList<string> files = null;
@@ -3953,7 +3937,7 @@ namespace Lucene.Net.Index
 							merger.CreateCompoundFile(mergedName + ".cfs");
 							lock (this)
 							{
-								info.SetUseCompoundFile(true);
+								info.UseCompoundFile = true;
 							}
 							
 							success = true;
@@ -4177,7 +4161,7 @@ namespace Lucene.Net.Index
 							Message("commit: wrote segments file \"" + pendingCommit.GetCurrentSegmentFileName() + "\"");
 						lastCommitChangeCount = pendingCommitChangeCount;
 						segmentInfos.UpdateGeneration(pendingCommit);
-						segmentInfos.SetUserData(pendingCommit.GetUserData());
+						segmentInfos.UserData = pendingCommit.UserData;
 						SetRollbackSegmentInfos(pendingCommit);
 						deleter.Checkpoint(pendingCommit, true);
 					}
@@ -4291,32 +4275,32 @@ namespace Lucene.Net.Index
 					
 					SegmentInfo newSegment = null;
 					
-					int numDocs = docWriter.GetNumDocsInRAM();
+					int numDocs = docWriter.NumDocsInRAM;
 					
 					// Always flush docs if there are any
 					bool flushDocs = numDocs > 0;
 					
-					System.String docStoreSegment = docWriter.GetDocStoreSegment();
+					System.String docStoreSegment = docWriter.DocStoreSegment;
 
                     System.Diagnostics.Debug.Assert(docStoreSegment != null || numDocs == 0, "dss=" + docStoreSegment + " numDocs=" + numDocs);
 					
 					if (docStoreSegment == null)
 						flushDocStores = false;
 					
-					int docStoreOffset = docWriter.GetDocStoreOffset();
+					int docStoreOffset = docWriter.DocStoreOffset;
 					
 					bool docStoreIsCompoundFile = false;
 					
 					if (infoStream != null)
 					{
-						Message("  flush: segment=" + docWriter.GetSegment() + " docStoreSegment=" + docWriter.GetDocStoreSegment() + " docStoreOffset=" + docStoreOffset + " flushDocs=" + flushDocs + " flushDeletes=" + flushDeletes + " flushDocStores=" + flushDocStores + " numDocs=" + numDocs + " numBufDelTerms=" + docWriter.GetNumBufferedDeleteTerms());
+						Message("  flush: segment=" + docWriter.Segment + " docStoreSegment=" + docWriter.DocStoreSegment + " docStoreOffset=" + docStoreOffset + " flushDocs=" + flushDocs + " flushDeletes=" + flushDeletes + " flushDocStores=" + flushDocStores + " numDocs=" + numDocs + " numBufDelTerms=" + docWriter.GetNumBufferedDeleteTerms());
 						Message("  index before flush " + SegString());
 					}
 					
 					// Check if the doc stores must be separately flushed
 					// because other segments, besides the one we are about
 					// to flush, reference it
-					if (flushDocStores && (!flushDocs || !docWriter.GetSegment().Equals(docWriter.GetDocStoreSegment())))
+					if (flushDocStores && (!flushDocs || !docWriter.Segment.Equals(docWriter.DocStoreSegment)))
 					{
 						// We must separately flush the doc store
 						if (infoStream != null)
@@ -4326,7 +4310,7 @@ namespace Lucene.Net.Index
 						flushDocStores = false;
 					}
 					
-					System.String segment = docWriter.GetSegment();
+					System.String segment = docWriter.Segment;
 					
 					// If we are flushing docs, segment must not be null:
 					System.Diagnostics.Debug.Assert(segment != null || !flushDocs);
@@ -4402,7 +4386,7 @@ namespace Lucene.Net.Index
 							}
 						}
 						
-						newSegment.SetUseCompoundFile(true);
+						newSegment.UseCompoundFile = true;
 						Checkpoint();
 					}
 					
@@ -4448,7 +4432,7 @@ namespace Lucene.Net.Index
 			lock (this)
 			{
 				EnsureOpen();
-				return docWriter.GetNumDocsInRAM();
+				return docWriter.NumDocsInRAM;
 			}
 		}
 		
@@ -4511,7 +4495,7 @@ namespace Lucene.Net.Index
 					int docCount = info.docCount;
 					SegmentReader previousReader = merge.readersClone[i];
 					SegmentReader currentReader = merge.readers[i];
-					if (previousReader.HasDeletions())
+					if (previousReader.HasDeletions)
 					{
 						
 						// There were deletes on this segment when the merge
@@ -4521,7 +4505,7 @@ namespace Lucene.Net.Index
 						// newly flushed deletes but mapping them to the new
 						// docIDs.
 						
-						if (currentReader.NumDeletedDocs() > previousReader.NumDeletedDocs())
+						if (currentReader.NumDeletedDocs > previousReader.NumDeletedDocs)
 						{
 							// This means this segment has had new deletes
 							// committed since we started the merge, so we
@@ -4545,10 +4529,10 @@ namespace Lucene.Net.Index
 						}
 						else
 						{
-							docUpto += docCount - previousReader.NumDeletedDocs();
+							docUpto += docCount - previousReader.NumDeletedDocs;
 						}
 					}
-					else if (currentReader.HasDeletions())
+					else if (currentReader.HasDeletions)
 					{
 						// This segment had no deletes before but now it
 						// does:
@@ -4567,7 +4551,7 @@ namespace Lucene.Net.Index
 						docUpto += info.docCount;
 				}
 				
-				System.Diagnostics.Debug.Assert(mergeReader.NumDeletedDocs() == delCount);
+				System.Diagnostics.Debug.Assert(mergeReader.NumDeletedDocs == delCount);
 				
 				mergeReader.hasChanges = delCount > 0;
 			}
@@ -4616,7 +4600,7 @@ namespace Lucene.Net.Index
                 // format as well:
                 SetMergeDocStoreIsCompoundFile(merge);
 				
-				merge.info.SetHasProx(merger.HasProx());
+				merge.info.HasProx = merger.HasProx();
 				
 				segmentInfos.RemoveRange(start, start + merge.segments.Count - start);
 				System.Diagnostics.Debug.Assert(!segmentInfos.Contains(merge.info));
@@ -4877,7 +4861,7 @@ namespace Lucene.Net.Index
 				
 				bool mergeDocStores = false;
 				bool doFlushDocStore = false;
-				System.String currentDocStoreSegment = docWriter.GetDocStoreSegment();
+				System.String currentDocStoreSegment = docWriter.DocStoreSegment;
 				
 				// Test each segment to be merged: check if we need to
 				// flush/merge doc stores
@@ -4891,12 +4875,12 @@ namespace Lucene.Net.Index
 					
 					// If it has its own (private) doc stores we must
 					// merge the doc stores
-					if (- 1 == si.GetDocStoreOffset())
+					if (- 1 == si.DocStoreOffset)
 						mergeDocStores = true;
 					
 					// If it has a different doc store segment than
 					// previous segments, we must merge the doc stores
-					System.String docStoreSegment = si.GetDocStoreSegment();
+					System.String docStoreSegment = si.DocStoreSegment;
 					if (docStoreSegment == null)
 						mergeDocStores = true;
 					else if (lastDocStoreSegment == null)
@@ -4909,11 +4893,11 @@ namespace Lucene.Net.Index
 					// this will always be the case but for an arbitrary
 					// merge policy this may not be the case
 					if (- 1 == next)
-						next = si.GetDocStoreOffset() + si.docCount;
-					else if (next != si.GetDocStoreOffset())
+						next = si.DocStoreOffset + si.docCount;
+					else if (next != si.DocStoreOffset)
 						mergeDocStores = true;
 					else
-						next = si.GetDocStoreOffset() + si.docCount;
+						next = si.DocStoreOffset + si.docCount;
 					
 					// If the segment comes from a different directory
 					// we must merge
@@ -4922,7 +4906,7 @@ namespace Lucene.Net.Index
 					
 					// If the segment is referencing the current "live"
 					// doc store outputs then we must merge
-					if (si.GetDocStoreOffset() != - 1 && currentDocStoreSegment != null && si.GetDocStoreSegment().Equals(currentDocStoreSegment))
+					if (si.DocStoreOffset != - 1 && currentDocStoreSegment != null && si.DocStoreSegment.Equals(currentDocStoreSegment))
 					{
 						doFlushDocStore = true;
 					}
@@ -4949,9 +4933,9 @@ namespace Lucene.Net.Index
 				else
 				{
 					SegmentInfo si = sourceSegments.Info(0);
-					docStoreOffset = si.GetDocStoreOffset();
-					docStoreSegment2 = si.GetDocStoreSegment();
-					docStoreIsCompoundFile = si.GetDocStoreIsCompoundFile();
+					docStoreOffset = si.DocStoreOffset;
+					docStoreSegment2 = si.DocStoreSegment;
+					docStoreIsCompoundFile = si.DocStoreIsCompoundFile;
 				}
 				
 				if (mergeDocStores && doFlushDocStore)
@@ -5015,7 +4999,7 @@ namespace Lucene.Net.Index
                     diagnostics[key] = details[key];
                 }
 			}
-			info.SetDiagnostics(diagnostics);
+			info.Diagnostics = diagnostics;
 		}
 
 		/// <summary>Does fininishing for a merge, which is fast but holds
@@ -5051,19 +5035,19 @@ namespace Lucene.Net.Index
         {
             lock (this)
             {
-                string mergeDocStoreSegment = merge.info.GetDocStoreSegment();
-                if (mergeDocStoreSegment != null && !merge.info.GetDocStoreIsCompoundFile())
+                string mergeDocStoreSegment = merge.info.DocStoreSegment;
+                if (mergeDocStoreSegment != null && !merge.info.DocStoreIsCompoundFile)
                 {
                     int size = segmentInfos.Count;
                     for (int i = 0; i < size; i++)
                     {
                         SegmentInfo info = segmentInfos.Info(i);
-                        string docStoreSegment = info.GetDocStoreSegment();
+                        string docStoreSegment = info.DocStoreSegment;
                         if (docStoreSegment != null &&
                             docStoreSegment.Equals(mergeDocStoreSegment) &&
-                            info.GetDocStoreIsCompoundFile())
+                            info.DocStoreIsCompoundFile)
                         {
-                            merge.info.SetDocStoreIsCompoundFile(true);
+                            merge.info.DocStoreIsCompoundFile = true;
                             break;
                         }
                     }
@@ -5105,7 +5089,7 @@ namespace Lucene.Net.Index
                             }
                             // This was a private clone and we had the
                             // only reference
-                            System.Diagnostics.Debug.Assert(merge.readersClone[i].GetRefCount() == 0); //: "refCount should be 0 but is " + merge.readersClone[i].getRefCount();
+                            System.Diagnostics.Debug.Assert(merge.readersClone[i].RefCount == 0); //: "refCount should be 0 but is " + merge.readersClone[i].getRefCount();
                             merge.readersClone[i] = null;
                         }
                     }
@@ -5124,7 +5108,7 @@ namespace Lucene.Net.Index
                         {
                             merge.readersClone[i].Close();
                             // This was a private clone and we had the only reference
-                            System.Diagnostics.Debug.Assert(merge.readersClone[i].GetRefCount() == 0);
+                            System.Diagnostics.Debug.Assert(merge.readersClone[i].RefCount == 0);
                             merge.readersClone[i] = null;
                         }
                     }
@@ -5163,7 +5147,7 @@ namespace Lucene.Net.Index
 
             String currentDocStoreSegment;
             lock(this) {
-                currentDocStoreSegment = docWriter.GetDocStoreSegment();
+                currentDocStoreSegment = docWriter.DocStoreSegment;
             }
             bool currentDSSMerged = false;
 
@@ -5189,17 +5173,17 @@ namespace Lucene.Net.Index
                     SegmentReader clone = merge.readersClone[i] = (SegmentReader)reader.Clone(true);
                     merger.Add(clone);
 
-                    if (clone.HasDeletions())
+                    if (clone.HasDeletions)
                     {
                         mergeDocStores = true;
                     }
 
-                    if (info.GetDocStoreOffset() != -1 && currentDocStoreSegment != null)
+                    if (info.DocStoreOffset != -1 && currentDocStoreSegment != null)
                     {
-                        currentDSSMerged |= currentDocStoreSegment.Equals(info.GetDocStoreSegment());
+                        currentDSSMerged |= currentDocStoreSegment.Equals(info.DocStoreSegment);
                     }
 
-                    totDocCount += clone.NumDocs();
+                    totDocCount += clone.NumDocs;
                 }
 
                 if (infoStream != null)
@@ -5313,7 +5297,7 @@ namespace Lucene.Net.Index
                         }
                     }
 
-                    merge.info.SetUseCompoundFile(true);
+                    merge.info.UseCompoundFile = true;
                 }
 
                 int termsIndexDivisor;
@@ -5322,7 +5306,7 @@ namespace Lucene.Net.Index
                 // if the merged segment warmer was not installed when
                 // this merge was started, causing us to not force
                 // the docStores to close, we can't warm it now
-                bool canWarm = merge.info.GetDocStoreSegment() == null || currentDocStoreSegment == null || !merge.info.GetDocStoreSegment().Equals(currentDocStoreSegment);
+                bool canWarm = merge.info.DocStoreSegment == null || currentDocStoreSegment == null || !merge.info.DocStoreSegment.Equals(currentDocStoreSegment);
 
                 if (poolReaders && mergedSegmentWarmer != null && canWarm)
                 {
@@ -5639,12 +5623,12 @@ namespace Lucene.Net.Index
                         // SegmentInfos we are about to sync (the main
                         // SegmentInfos will keep them):
                         toSync = (SegmentInfos) segmentInfos.Clone();
-                        string dss = docWriter.GetDocStoreSegment();
+                        string dss = docWriter.DocStoreSegment;
                         if (dss != null)
                         {
                             while (true)
                             {
-                                String dss2 = toSync.Info(toSync.Count - 1).GetDocStoreSegment();
+                                String dss2 = toSync.Info(toSync.Count - 1).DocStoreSegment;
                                 if (dss2 == null || !dss2.Equals(dss))
                                 {
                                     break;
@@ -5655,7 +5639,7 @@ namespace Lucene.Net.Index
                         }
 						
 						if (commitUserData != null)
-							toSync.SetUserData(commitUserData);
+							toSync.UserData = commitUserData;
 						
 						deleter.IncRef(toSync, false);
 												
@@ -5745,7 +5729,7 @@ namespace Lucene.Net.Index
 							{
 								// My turn to commit
 								
-								if (segmentInfos.GetGeneration() > toSync.GetGeneration())
+								if (segmentInfos.Generation > toSync.Generation)
 									toSync.UpdateGeneration(segmentInfos);
 								
 								bool success = false;
@@ -5962,7 +5946,7 @@ namespace Lucene.Net.Index
 					// stale
 					return false;
                 }
-                else if (infos.GetGeneration() != segmentInfos.GetGeneration())
+                else if (infos.Generation != segmentInfos.Generation)
                 {
                     // if any commit took place since we were opened, we
                     // are stale
@@ -5970,7 +5954,7 @@ namespace Lucene.Net.Index
                 }
                 else
                 {
-                    return !docWriter.AnyChanges();
+                    return !docWriter.AnyChanges;
                 }
 			}
 		}

Modified: incubator/lucene.net/trunk/src/core/Index/InvertedDocConsumerPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/InvertedDocConsumerPerField.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/InvertedDocConsumerPerField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/InvertedDocConsumerPerField.cs Mon Mar 12 22:29:26 2012
@@ -16,8 +16,7 @@
  */
 
 using System;
-
-using Fieldable = Lucene.Net.Documents.Fieldable;
+using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
@@ -29,10 +28,10 @@ namespace Lucene.Net.Index
 		// occurrences for this field in the document.  Return
 		// true if you wish to see inverted tokens for these
 		// fields:
-		internal abstract bool Start(Fieldable[] fields, int count);
+		internal abstract bool Start(IFieldable[] fields, int count);
 		
 		// Called before a field instance is being processed
-		internal abstract void  Start(Fieldable field);
+		internal abstract void  Start(IFieldable field);
 		
 		// Called once per inverted token
 		internal abstract void  Add();



Mime
View raw message