lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [05/62] [abbrv] [partial] lucenenet git commit: Renamed Lucene.Net.Core folder Lucene.Net because the dotnet.exe pack command doesn't allow creating a NuGet package with a different name than its folder. Working around it with the script was much more co
Date Tue, 04 Apr 2017 17:19:11 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SegmentReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SegmentReader.cs b/src/Lucene.Net.Core/Index/SegmentReader.cs
deleted file mode 100644
index a795209..0000000
--- a/src/Lucene.Net.Core/Index/SegmentReader.cs
+++ /dev/null
@@ -1,757 +0,0 @@
-using Lucene.Net.Support;
-using Lucene.Net.Util;
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.Globalization;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IBits = Lucene.Net.Util.IBits;
-    using Codec = Lucene.Net.Codecs.Codec;
-    using CompoundFileDirectory = Lucene.Net.Store.CompoundFileDirectory;
-    using Directory = Lucene.Net.Store.Directory;
-    using DocValuesFormat = Lucene.Net.Codecs.DocValuesFormat;
-    using DocValuesProducer = Lucene.Net.Codecs.DocValuesProducer;
-    using IOContext = Lucene.Net.Store.IOContext;
-    using IOUtils = Lucene.Net.Util.IOUtils;
-    using StoredFieldsReader = Lucene.Net.Codecs.StoredFieldsReader;
-    using TermVectorsReader = Lucene.Net.Codecs.TermVectorsReader;
-
-    /// <summary>
-    /// IndexReader implementation over a single segment.
-    /// <p>
-    /// Instances pointing to the same segment (but with different deletes, etc)
-    /// may share the same core data.
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public sealed class SegmentReader : AtomicReader
-    {
-        private readonly SegmentCommitInfo si;
-        private readonly IBits liveDocs;
-
-        // Normally set to si.docCount - si.delDocCount, unless we
-        // were created as an NRT reader from IW, in which case IW
-        // tells us the docCount:
-        private readonly int numDocs;
-
-        internal readonly SegmentCoreReaders core;
-        internal readonly SegmentDocValues segDocValues;
-
-        internal readonly DisposableThreadLocal<IDictionary<string, object>> docValuesLocal = new DisposableThreadLocalAnonymousInnerClassHelper();
-
-        private class DisposableThreadLocalAnonymousInnerClassHelper : DisposableThreadLocal<IDictionary<string, object>>
-        {
-            public DisposableThreadLocalAnonymousInnerClassHelper()
-            {
-            }
-
-            protected internal override IDictionary<string, object> InitialValue()
-            {
-                return new Dictionary<string, object>();
-            }
-        }
-
-        internal readonly DisposableThreadLocal<IDictionary<string, IBits>> docsWithFieldLocal = new DisposableThreadLocalAnonymousInnerClassHelper2();
-
-        private class DisposableThreadLocalAnonymousInnerClassHelper2 : DisposableThreadLocal<IDictionary<string, IBits>>
-        {
-            public DisposableThreadLocalAnonymousInnerClassHelper2()
-            {
-            }
-
-            protected internal override IDictionary<string, IBits> InitialValue()
-            {
-                return new Dictionary<string, IBits>();
-            }
-        }
-
-        internal readonly IDictionary<string, DocValuesProducer> dvProducersByField = new Dictionary<string, DocValuesProducer>();
-        internal readonly ISet<DocValuesProducer> dvProducers = new IdentityHashSet<DocValuesProducer>();
-
-        private readonly FieldInfos fieldInfos; // LUCENENET specific - since it is readonly, made all internal classes use property
-
-        private readonly IList<long?> dvGens = new List<long?>();
-
-        /// <summary>
-        /// Constructs a new SegmentReader with a new core. </summary>
-        /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
-        // TODO: why is this public?
-        public SegmentReader(SegmentCommitInfo si, int termInfosIndexDivisor, IOContext context)
-        {
-            this.si = si;
-            // TODO if the segment uses CFS, we may open the CFS file twice: once for
-            // reading the FieldInfos (if they are not gen'd) and second time by
-            // SegmentCoreReaders. We can open the CFS here and pass to SCR, but then it
-            // results in less readable code (resource not closed where it was opened).
-            // Best if we could somehow read FieldInfos in SCR but not keep it there, but
-            // constructors don't allow returning two things...
-            fieldInfos = ReadFieldInfos(si);
-            core = new SegmentCoreReaders(this, si.Info.Dir, si, context, termInfosIndexDivisor);
-            segDocValues = new SegmentDocValues();
-
-            bool success = false;
-            Codec codec = si.Info.Codec;
-            try
-            {
-                if (si.HasDeletions)
-                {
-                    // NOTE: the bitvector is stored using the regular directory, not cfs
-                    liveDocs = codec.LiveDocsFormat.ReadLiveDocs(Directory, si, IOContext.READ_ONCE);
-                }
-                else
-                {
-                    Debug.Assert(si.DelCount == 0);
-                    liveDocs = null;
-                }
-                numDocs = si.Info.DocCount - si.DelCount;
-
-                if (FieldInfos.HasDocValues)
-                {
-                    InitDocValuesProducers(codec);
-                }
-
-                success = true;
-            }
-            finally
-            {
-                // With lock-less commits, it's entirely possible (and
-                // fine) to hit a FileNotFound exception above.  In
-                // this case, we want to explicitly close any subset
-                // of things that were opened so that we don't have to
-                // wait for a GC to do so.
-                if (!success)
-                {
-                    DoClose();
-                }
-            }
-        }
-
-        /// <summary>
-        /// Create new SegmentReader sharing core from a previous
-        ///  SegmentReader and loading new live docs from a new
-        ///  deletes file.  Used by openIfChanged.
-        /// </summary>
-        internal SegmentReader(SegmentCommitInfo si, SegmentReader sr)
-            : this(si, sr, si.Info.Codec.LiveDocsFormat.ReadLiveDocs(si.Info.Dir, si, IOContext.READ_ONCE), si.Info.DocCount - si.DelCount)
-        {
-        }
-
-        /// <summary>
-        /// Create new SegmentReader sharing core from a previous
-        ///  SegmentReader and using the provided in-memory
-        ///  liveDocs.  Used by IndexWriter to provide a new NRT
-        ///  reader
-        /// </summary>
-        internal SegmentReader(SegmentCommitInfo si, SegmentReader sr, IBits liveDocs, int numDocs)
-        {
-            this.si = si;
-            this.liveDocs = liveDocs;
-            this.numDocs = numDocs;
-            this.core = sr.core;
-            core.IncRef();
-            this.segDocValues = sr.segDocValues;
-
-            //    System.out.println("[" + Thread.currentThread().getName() + "] SR.init: sharing reader: " + sr + " for gens=" + sr.genDVProducers.keySet());
-
-            // increment refCount of DocValuesProducers that are used by this reader
-            bool success = false;
-            try
-            {
-                Codec codec = si.Info.Codec;
-                if (si.FieldInfosGen == -1)
-                {
-                    fieldInfos = sr.FieldInfos;
-                }
-                else
-                {
-                    fieldInfos = ReadFieldInfos(si);
-                }
-
-                if (FieldInfos.HasDocValues)
-                {
-                    InitDocValuesProducers(codec);
-                }
-                success = true;
-            }
-            finally
-            {
-                if (!success)
-                {
-                    DoClose();
-                }
-            }
-        }
-
-        // initialize the per-field DocValuesProducer
-        private void InitDocValuesProducers(Codec codec)
-        {
-            Directory dir = core.cfsReader != null ? core.cfsReader : si.Info.Dir;
-            DocValuesFormat dvFormat = codec.DocValuesFormat;
-            IDictionary<long?, IList<FieldInfo>> genInfos = GetGenInfos();
-
-            //      System.out.println("[" + Thread.currentThread().getName() + "] SR.initDocValuesProducers: segInfo=" + si + "; gens=" + genInfos.keySet());
-
-            // TODO: can we avoid iterating over fieldinfos several times and creating maps of all this stuff if dv updates do not exist?
-
-            foreach (KeyValuePair<long?, IList<FieldInfo>> e in genInfos)
-            {
-                long? gen = e.Key;
-                IList<FieldInfo> infos = e.Value;
-                DocValuesProducer dvp = segDocValues.GetDocValuesProducer(gen, si, IOContext.READ, dir, dvFormat, infos, TermInfosIndexDivisor);
-                foreach (FieldInfo fi in infos)
-                {
-                    dvProducersByField[fi.Name] = dvp;
-                    dvProducers.Add(dvp);
-                }
-            }
-
-            dvGens.AddRange(genInfos.Keys);
-        }
-
-        /// <summary>
-        /// Reads the most recent <seealso cref="FieldInfos"/> of the given segment info.
-        ///
-        /// @lucene.internal
-        /// </summary>
-        internal static FieldInfos ReadFieldInfos(SegmentCommitInfo info)
-        {
-            Directory dir;
-            bool closeDir;
-            if (info.FieldInfosGen == -1 && info.Info.UseCompoundFile)
-            {
-                // no fieldInfos gen and segment uses a compound file
-                dir = new CompoundFileDirectory(info.Info.Dir, IndexFileNames.SegmentFileName(info.Info.Name, "", IndexFileNames.COMPOUND_FILE_EXTENSION), IOContext.READ_ONCE, false);
-                closeDir = true;
-            }
-            else
-            {
-                // gen'd FIS are read outside CFS, or the segment doesn't use a compound file
-                dir = info.Info.Dir;
-                closeDir = false;
-            }
-
-            try
-            {
-                string segmentSuffix = info.FieldInfosGen == -1 ? "" : info.FieldInfosGen.ToString(CultureInfo.InvariantCulture);//Convert.ToString(info.FieldInfosGen, Character.MAX_RADIX));
-                return info.Info.Codec.FieldInfosFormat.FieldInfosReader.Read(dir, info.Info.Name, segmentSuffix, IOContext.READ_ONCE);
-            }
-            finally
-            {
-                if (closeDir)
-                {
-                    dir.Dispose();
-                }
-            }
-        }
-
-        // returns a gen->List<FieldInfo> mapping. Fields without DV updates have gen=-1
-        private IDictionary<long?, IList<FieldInfo>> GetGenInfos()
-        {
-            IDictionary<long?, IList<FieldInfo>> genInfos = new Dictionary<long?, IList<FieldInfo>>();
-            foreach (FieldInfo fi in FieldInfos)
-            {
-                if (fi.DocValuesType == DocValuesType.NONE)
-                {
-                    continue;
-                }
-                long gen = fi.DocValuesGen;
-                IList<FieldInfo> infos;
-                genInfos.TryGetValue(gen, out infos);
-                if (infos == null)
-                {
-                    infos = new List<FieldInfo>();
-                    genInfos[gen] = infos;
-                }
-                infos.Add(fi);
-            }
-            return genInfos;
-        }
-
-        public override IBits LiveDocs
-        {
-            get
-            {
-                EnsureOpen();
-                return liveDocs;
-            }
-        }
-
-        protected internal override void DoClose()
-        {
-            //System.out.println("SR.close seg=" + si);
-            try
-            {
-                core.DecRef();
-            }
-            finally
-            {
-                dvProducersByField.Clear();
-                try
-                {
-                    IOUtils.Close(docValuesLocal, docsWithFieldLocal);
-                }
-                finally
-                {
-                    segDocValues.DecRef(dvGens);
-                }
-            }
-        }
-
-        public override FieldInfos FieldInfos
-        {
-            get
-            {
-                EnsureOpen();
-                return fieldInfos;
-            }
-        }
-
-        /// <summary>
-        /// Expert: retrieve thread-private {@link
-        ///  StoredFieldsReader}
-        ///  @lucene.internal
-        /// </summary>
-        public StoredFieldsReader FieldsReader
-        {
-            get
-            {
-                EnsureOpen();
-                return core.fieldsReaderLocal.Get();
-            }
-        }
-
-        public override void Document(int docID, StoredFieldVisitor visitor)
-        {
-            CheckBounds(docID);
-            FieldsReader.VisitDocument(docID, visitor);
-        }
-
-        public override Fields Fields
-        {
-            get
-            {
-                EnsureOpen();
-                return core.fields;
-            }
-        }
-
-        public override int NumDocs
-        {
-            get
-            {
-                // Don't call ensureOpen() here (it could affect performance)
-                return numDocs;
-            }
-        }
-
-        public override int MaxDoc
-        {
-            get
-            {
-                // Don't call ensureOpen() here (it could affect performance)
-                return si.Info.DocCount;
-            }
-        }
-
-        /// <summary>
-        /// Expert: retrieve thread-private {@link
-        ///  TermVectorsReader}
-        ///  @lucene.internal
-        /// </summary>
-        public TermVectorsReader TermVectorsReader
-        {
-            get
-            {
-                EnsureOpen();
-                return core.termVectorsLocal.Get();
-            }
-        }
-
-        public override Fields GetTermVectors(int docID)
-        {
-            TermVectorsReader termVectorsReader = TermVectorsReader;
-            if (termVectorsReader == null)
-            {
-                return null;
-            }
-            CheckBounds(docID);
-            return termVectorsReader.Get(docID);
-        }
-
-        private void CheckBounds(int docID)
-        {
-            if (docID < 0 || docID >= MaxDoc)
-            {
-                throw new System.IndexOutOfRangeException("docID must be >= 0 and < maxDoc=" + MaxDoc + " (got docID=" + docID + ")");
-            }
-        }
-
-        public override string ToString()
-        {
-            // SegmentInfo.toString takes dir and number of
-            // *pending* deletions; so we reverse compute that here:
-            return si.ToString(si.Info.Dir, si.Info.DocCount - numDocs - si.DelCount);
-        }
-
-        /// <summary>
-        /// Return the name of the segment this reader is reading.
-        /// </summary>
-        public string SegmentName
-        {
-            get
-            {
-                return si.Info.Name;
-            }
-        }
-
-        /// <summary>
-        /// Return the SegmentInfoPerCommit of the segment this reader is reading.
-        /// </summary>
-        public SegmentCommitInfo SegmentInfo
-        {
-            get
-            {
-                return si;
-            }
-        }
-
-        /// <summary>
-        /// Returns the directory this index resides in. </summary>
-        public Directory Directory
-        {
-            get
-            {
-                // Don't ensureOpen here -- in certain cases, when a
-                // cloned/reopened reader needs to commit, it may call
-                // this method on the closed original reader
-                return si.Info.Dir;
-            }
-        }
-
-        // this is necessary so that cloned SegmentReaders (which
-        // share the underlying postings data) will map to the
-        // same entry in the FieldCache.  See LUCENE-1579.
-        public override object CoreCacheKey
-        {
-            get
-            {
-                // NOTE: if this ever changes, be sure to fix
-                // SegmentCoreReader.notifyCoreClosedListeners to match!
-                // Today it passes "this" as its coreCacheKey:
-                return core;
-            }
-        }
-
-        public override object CombinedCoreAndDeletesKey
-        {
-            get
-            {
-                return this;
-            }
-        }
-
-        /// <summary>
-        /// Returns term infos index divisor originally passed to
-        ///  <seealso cref="#SegmentReader(SegmentCommitInfo, int, IOContext)"/>.
-        /// </summary>
-        public int TermInfosIndexDivisor
-        {
-            get
-            {
-                return core.termsIndexDivisor;
-            }
-        }
-
-        // returns the FieldInfo that corresponds to the given field and type, or
-        // null if the field does not exist, or not indexed as the requested
-        // DovDocValuesType.
-        private FieldInfo GetDVField(string field, DocValuesType type)
-        {
-            FieldInfo fi = FieldInfos.FieldInfo(field);
-            if (fi == null)
-            {
-                // Field does not exist
-                return null;
-            }
-            if (fi.DocValuesType == DocValuesType.NONE)
-            {
-                // Field was not indexed with doc values
-                return null;
-            }
-            if (fi.DocValuesType != type)
-            {
-                // Field DocValues are different than requested type
-                return null;
-            }
-
-            return fi;
-        }
-
-        public override NumericDocValues GetNumericDocValues(string field)
-        {
-            EnsureOpen();
-            FieldInfo fi = GetDVField(field, DocValuesType.NUMERIC);
-            if (fi == null)
-            {
-                return null;
-            }
-
-            IDictionary<string, object> dvFields = docValuesLocal.Get();
-
-            NumericDocValues dvs;
-            object dvsDummy;
-            dvFields.TryGetValue(field, out dvsDummy);
-            dvs = (NumericDocValues)dvsDummy;
-            if (dvs == null)
-            {
-                DocValuesProducer dvProducer;
-                dvProducersByField.TryGetValue(field, out dvProducer);
-                Debug.Assert(dvProducer != null);
-                dvs = dvProducer.GetNumeric(fi);
-                dvFields[field] = dvs;
-            }
-
-            return dvs;
-        }
-
-        public override IBits GetDocsWithField(string field)
-        {
-            EnsureOpen();
-            FieldInfo fi = FieldInfos.FieldInfo(field);
-            if (fi == null)
-            {
-                // Field does not exist
-                return null;
-            }
-            if (fi.DocValuesType == DocValuesType.NONE)
-            {
-                // Field was not indexed with doc values
-                return null;
-            }
-
-            IDictionary<string, IBits> dvFields = docsWithFieldLocal.Get();
-
-            IBits dvs;
-            dvFields.TryGetValue(field, out dvs);
-            if (dvs == null)
-            {
-                DocValuesProducer dvProducer;
-                dvProducersByField.TryGetValue(field, out dvProducer);
-                Debug.Assert(dvProducer != null);
-                dvs = dvProducer.GetDocsWithField(fi);
-                dvFields[field] = dvs;
-            }
-
-            return dvs;
-        }
-
-        public override BinaryDocValues GetBinaryDocValues(string field)
-        {
-            EnsureOpen();
-            FieldInfo fi = GetDVField(field, DocValuesType.BINARY);
-            if (fi == null)
-            {
-                return null;
-            }
-
-            IDictionary<string, object> dvFields = docValuesLocal.Get();
-
-            object ret;
-            BinaryDocValues dvs;
-            dvFields.TryGetValue(field, out ret);
-            dvs = (BinaryDocValues)ret;
-            if (dvs == null)
-            {
-                DocValuesProducer dvProducer;
-                dvProducersByField.TryGetValue(field, out dvProducer);
-                Debug.Assert(dvProducer != null);
-                dvs = dvProducer.GetBinary(fi);
-                dvFields[field] = dvs;
-            }
-
-            return dvs;
-        }
-
-        public override SortedDocValues GetSortedDocValues(string field)
-        {
-            EnsureOpen();
-            FieldInfo fi = GetDVField(field, DocValuesType.SORTED);
-            if (fi == null)
-            {
-                return null;
-            }
-
-            IDictionary<string, object> dvFields = docValuesLocal.Get();
-
-            SortedDocValues dvs;
-            object ret;
-            dvFields.TryGetValue(field, out ret);
-            dvs = (SortedDocValues)ret;
-            if (dvs == null)
-            {
-                DocValuesProducer dvProducer;
-                dvProducersByField.TryGetValue(field, out dvProducer);
-                Debug.Assert(dvProducer != null);
-                dvs = dvProducer.GetSorted(fi);
-                dvFields[field] = dvs;
-            }
-
-            return dvs;
-        }
-
-        public override SortedSetDocValues GetSortedSetDocValues(string field)
-        {
-            EnsureOpen();
-            FieldInfo fi = GetDVField(field, DocValuesType.SORTED_SET);
-            if (fi == null)
-            {
-                return null;
-            }
-
-            IDictionary<string, object> dvFields = docValuesLocal.Get();
-
-            object ret;
-            SortedSetDocValues dvs;
-            dvFields.TryGetValue(field, out ret);
-            dvs = (SortedSetDocValues)ret;
-            if (dvs == null)
-            {
-                DocValuesProducer dvProducer;
-                dvProducersByField.TryGetValue(field, out dvProducer);
-                Debug.Assert(dvProducer != null);
-                dvs = dvProducer.GetSortedSet(fi);
-                dvFields[field] = dvs;
-            }
-
-            return dvs;
-        }
-
-        public override NumericDocValues GetNormValues(string field)
-        {
-            EnsureOpen();
-            FieldInfo fi = FieldInfos.FieldInfo(field);
-            if (fi == null || !fi.HasNorms)
-            {
-                // Field does not exist or does not index norms
-                return null;
-            }
-            return core.GetNormValues(fi);
-        }
-
-        /// <summary>
-        /// Called when the shared core for this SegmentReader
-        /// is closed.
-        /// <p>
-        /// this listener is called only once all SegmentReaders
-        /// sharing the same core are closed.  At this point it
-        /// is safe for apps to evict this reader from any caches
-        /// keyed on <seealso cref="#getCoreCacheKey"/>.  this is the same
-        /// interface that <seealso cref="IFieldCache"/> uses, internally,
-        /// to evict entries.</p>
-        ///
-        /// @lucene.experimental
-        /// </summary>
-        public interface ICoreClosedListener
-        {
-            /// <summary>
-            /// Invoked when the shared core of the original {@code
-            ///  SegmentReader} has closed.
-            /// </summary>
-            void OnClose(object ownerCoreCacheKey);
-        }
-
-        /// <summary>
-        /// Expert: adds a CoreClosedListener to this reader's shared core </summary>
-        public void AddCoreClosedListener(ICoreClosedListener listener)
-        {
-            EnsureOpen();
-            core.AddCoreClosedListener(listener);
-        }
-
-        /// <summary>
-        /// Expert: removes a CoreClosedListener from this reader's shared core </summary>
-        public void RemoveCoreClosedListener(ICoreClosedListener listener)
-        {
-            EnsureOpen();
-            core.RemoveCoreClosedListener(listener);
-        }
-
-        /// <summary>
-        /// Returns approximate RAM Bytes used </summary>
-        public long RamBytesUsed()
-        {
-            EnsureOpen();
-            long ramBytesUsed = 0;
-            if (dvProducers != null)
-            {
-                foreach (DocValuesProducer producer in dvProducers)
-                {
-                    ramBytesUsed += producer.RamBytesUsed();
-                }
-            }
-            if (core != null)
-            {
-                ramBytesUsed += core.RamBytesUsed();
-            }
-            return ramBytesUsed;
-        }
-
-        public override void CheckIntegrity()
-        {
-            EnsureOpen();
-
-            // stored fields
-            FieldsReader.CheckIntegrity();
-
-            // term vectors
-            TermVectorsReader termVectorsReader = TermVectorsReader;
-            if (termVectorsReader != null)
-            {
-                termVectorsReader.CheckIntegrity();
-            }
-
-            // terms/postings
-            if (core.fields != null)
-            {
-                core.fields.CheckIntegrity();
-            }
-
-            // norms
-            if (core.normsProducer != null)
-            {
-                core.normsProducer.CheckIntegrity();
-            }
-
-            // docvalues
-            if (dvProducers != null)
-            {
-                foreach (DocValuesProducer producer in dvProducers)
-                {
-                    producer.CheckIntegrity();
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SegmentWriteState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SegmentWriteState.cs b/src/Lucene.Net.Core/Index/SegmentWriteState.cs
deleted file mode 100644
index 5a8ae5b..0000000
--- a/src/Lucene.Net.Core/Index/SegmentWriteState.cs
+++ /dev/null
@@ -1,142 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using Directory = Lucene.Net.Store.Directory;
-    using InfoStream = Lucene.Net.Util.InfoStream;
-    using IOContext = Lucene.Net.Store.IOContext;
-    using IMutableBits = Lucene.Net.Util.IMutableBits;
-    using PerFieldPostingsFormat = Lucene.Net.Codecs.PerField.PerFieldPostingsFormat; // javadocs
-    using PostingsFormat = Lucene.Net.Codecs.PostingsFormat; // javadocs
-
-    /// <summary>
-    /// Holder class for common parameters used during write.
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class SegmentWriteState
-    {
-        /// <summary>
-        /// <seealso cref="InfoStream"/> used for debugging messages. </summary>
-        public InfoStream InfoStream { get; private set; }
-
-        /// <summary>
-        /// <seealso cref="Directory"/> where this segment will be written
-        ///  to.
-        /// </summary>
-        public Directory Directory { get; private set; }
-
-        /// <summary>
-        /// <seealso cref="SegmentInfo"/> describing this segment. </summary>
-        public SegmentInfo SegmentInfo { get; private set; }
-
-        /// <summary>
-        /// <seealso cref="FieldInfos"/> describing all fields in this
-        ///  segment.
-        /// </summary>
-        public FieldInfos FieldInfos { get; private set; }
-
-        /// <summary>
-        /// Number of deleted documents set while flushing the
-        ///  segment.
-        /// </summary>
-        public int DelCountOnFlush { get; set; }
-
-        /// <summary>
-        /// Deletes and updates to apply while we are flushing the segment. A Term is
-        /// enrolled in here if it was deleted/updated at one point, and it's mapped to
-        /// the docIDUpto, meaning any docID &lt; docIDUpto containing this term should
-        /// be deleted/updated.
-        /// </summary>
-        public BufferedUpdates SegUpdates { get; private set; }
-
-        /// <summary>
-        /// <seealso cref="IMutableBits"/> recording live documents; this is
-        ///  only set if there is one or more deleted documents.
-        /// </summary>
-        public IMutableBits LiveDocs { get; set; }
-
-        /// <summary>
-        /// Unique suffix for any postings files written for this
-        ///  segment.  <seealso cref="PerFieldPostingsFormat"/> sets this for
-        ///  each of the postings formats it wraps.  If you create
-        ///  a new <seealso cref="PostingsFormat"/> then any files you
-        ///  write/read must be derived using this suffix (use
-        ///  <seealso cref="IndexFileNames#segmentFileName(String,String,String)"/>).
-        /// </summary>
-        public string SegmentSuffix { get; private set; }
-
-        /// <summary>
-        /// Expert: The fraction of terms in the "dictionary" which should be stored
-        /// in RAM.  Smaller values use more memory, but make searching slightly
-        /// faster, while larger values use less memory and make searching slightly
-        /// slower.  Searching is typically not dominated by dictionary lookup, so
-        /// tweaking this is rarely useful.
-        /// </summary>
-        public int TermIndexInterval { get; set; } // TODO: this should be private to the codec, not settable here or in IWC
-
-        /// <summary>
-        /// <seealso cref="IOContext"/> for all writes; you should pass this
-        ///  to <seealso cref="Directory#createOutput(String,IOContext)"/>.
-        /// </summary>
-        public IOContext Context { get; private set; }
-
-        /// <summary>
-        /// Sole constructor. </summary>
-        public SegmentWriteState(InfoStream infoStream, Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, int termIndexInterval, BufferedUpdates segUpdates, IOContext context)
-            : this(infoStream, directory, segmentInfo, fieldInfos, termIndexInterval, segUpdates, context, "")
-        {
-        }
-
-        /// <summary>
-        /// Constructor which takes segment suffix.
-        /// </summary>
-        /// <seealso cref= #SegmentWriteState(InfoStream, Directory, SegmentInfo, FieldInfos, int,
-        ///      BufferedUpdates, IOContext) </seealso>
-        public SegmentWriteState(InfoStream infoStream, Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, int termIndexInterval, BufferedUpdates segUpdates, IOContext context, string segmentSuffix)
-        {
-            this.InfoStream = infoStream;
-            this.SegUpdates = segUpdates;
-            this.Directory = directory;
-            this.SegmentInfo = segmentInfo;
-            this.FieldInfos = fieldInfos;
-            this.TermIndexInterval = termIndexInterval;
-            this.SegmentSuffix = segmentSuffix;
-            this.Context = context;
-        }
-
-        /// <summary>
-        /// Create a shallow copy of <seealso cref="SegmentWriteState"/> with a new segment suffix. </summary>
-        public SegmentWriteState(SegmentWriteState state, string segmentSuffix)
-        {
-            InfoStream = state.InfoStream;
-            Directory = state.Directory;
-            SegmentInfo = state.SegmentInfo;
-            FieldInfos = state.FieldInfos;
-            TermIndexInterval = state.TermIndexInterval;
-            Context = state.Context;
-            this.SegmentSuffix = segmentSuffix;
-            SegUpdates = state.SegUpdates;
-            DelCountOnFlush = state.DelCountOnFlush;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SerialMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SerialMergeScheduler.cs b/src/Lucene.Net.Core/Index/SerialMergeScheduler.cs
deleted file mode 100644
index c150594..0000000
--- a/src/Lucene.Net.Core/Index/SerialMergeScheduler.cs
+++ /dev/null
@@ -1,62 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// A <seealso cref="MergeScheduler"/> that simply does each merge
-    ///  sequentially, using the current thread.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class SerialMergeScheduler : MergeScheduler
-    {
-        /// <summary>
-        /// Sole constructor. </summary>
-        public SerialMergeScheduler()
-        {
-        }
-
-        /// <summary>
-        /// Just do the merges in sequence. We do this
-        /// "synchronized" so that even if the application is using
-        /// multiple threads, only one merge may run at a time.
-        /// </summary>
-        public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) // LUCENENET NOTE: This was internal in the original, but the base class is public so there isn't much choice here
-        {
-            lock (this)
-            {
-                while (true)
-                {
-                    MergePolicy.OneMerge merge = writer.NextMerge();
-                    if (merge == null)
-                    {
-                        break;
-                    }
-                    writer.Merge(merge);
-                }
-            }
-        }
-
-        protected override void Dispose(bool disposing)
-        {
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SimpleMergedSegmentWarmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SimpleMergedSegmentWarmer.cs b/src/Lucene.Net.Core/Index/SimpleMergedSegmentWarmer.cs
deleted file mode 100644
index 1090df8..0000000
--- a/src/Lucene.Net.Core/Index/SimpleMergedSegmentWarmer.cs
+++ /dev/null
@@ -1,102 +0,0 @@
-using System;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IndexReaderWarmer = Lucene.Net.Index.IndexWriter.IndexReaderWarmer;
-    using InfoStream = Lucene.Net.Util.InfoStream;
-
-    /// <summary>
-    /// A very simple merged segment warmer that just ensures
-    /// data structures are initialized.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class SimpleMergedSegmentWarmer : IndexReaderWarmer
-    {
-        private readonly InfoStream infoStream;
-
-        /// <summary>
-        /// Creates a new SimpleMergedSegmentWarmer </summary>
-        /// <param name="infoStream"> InfoStream to log statistics about warming. </param>
-        public SimpleMergedSegmentWarmer(InfoStream infoStream)
-        {
-            this.infoStream = infoStream;
-        }
-
-        public override void Warm(AtomicReader reader)
-        {
-            long startTime = Environment.TickCount;
-            int indexedCount = 0;
-            int docValuesCount = 0;
-            int normsCount = 0;
-            foreach (FieldInfo info in reader.FieldInfos)
-            {
-                if (info.IsIndexed)
-                {
-                    reader.GetTerms(info.Name);
-                    indexedCount++;
-
-                    if (info.HasNorms)
-                    {
-                        reader.GetNormValues(info.Name);
-                        normsCount++;
-                    }
-                }
-
-                if (info.HasDocValues)
-                {
-                    switch (info.DocValuesType)
-                    {
-                        case DocValuesType.NUMERIC:
-                            reader.GetNumericDocValues(info.Name);
-                            break;
-
-                        case DocValuesType.BINARY:
-                            reader.GetBinaryDocValues(info.Name);
-                            break;
-
-                        case DocValuesType.SORTED:
-                            reader.GetSortedDocValues(info.Name);
-                            break;
-
-                        case DocValuesType.SORTED_SET:
-                            reader.GetSortedSetDocValues(info.Name);
-                            break;
-
-                        default:
-                            Debug.Assert(false); // unknown dv type
-                            break;
-                    }
-                    docValuesCount++;
-                }
-            }
-
-            reader.Document(0);
-            reader.GetTermVectors(0);
-
-            if (infoStream.IsEnabled("SMSW"))
-            {
-                infoStream.Message("SMSW", "Finished warming segment: " + reader + ", indexed=" + indexedCount + ", docValues=" + docValuesCount + ", norms=" + normsCount + ", time=" + (Environment.TickCount - startTime));
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SingleTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SingleTermsEnum.cs b/src/Lucene.Net.Core/Index/SingleTermsEnum.cs
deleted file mode 100644
index 7f35e75..0000000
--- a/src/Lucene.Net.Core/Index/SingleTermsEnum.cs
+++ /dev/null
@@ -1,57 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    // javadocs
-    using BytesRef = Lucene.Net.Util.BytesRef;
-
-    /// <summary>
-    /// Subclass of FilteredTermsEnum for enumerating a single term.
-    /// <para/>
-    /// For example, this can be used by <see cref="Search.MultiTermQuery"/>s
-    /// that need only visit one term, but want to preserve
-    /// MultiTermQuery semantics such as <see cref="Search.MultiTermQuery.MultiTermRewriteMethod"/>.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public sealed class SingleTermsEnum : FilteredTermsEnum
-    {
-        private readonly BytesRef singleRef;
-
-        /// <summary>
-        /// Creates a new <code>SingleTermsEnum</code>.
-        /// <p>
-        /// After calling the constructor the enumeration is already pointing to the term,
-        /// if it exists.
-        /// </summary>
-        public SingleTermsEnum(TermsEnum tenum, BytesRef termText)
-            : base(tenum)
-        {
-            singleRef = termText;
-            SetInitialSeekTerm(termText);
-        }
-
-        protected override AcceptStatus Accept(BytesRef term)
-        {
-            return term.Equals(singleRef) ? AcceptStatus.YES : AcceptStatus.END;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SingletonSortedSetDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SingletonSortedSetDocValues.cs b/src/Lucene.Net.Core/Index/SingletonSortedSetDocValues.cs
deleted file mode 100644
index cb829fb..0000000
--- a/src/Lucene.Net.Core/Index/SingletonSortedSetDocValues.cs
+++ /dev/null
@@ -1,97 +0,0 @@
-using System;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using BytesRef = Lucene.Net.Util.BytesRef;
-
-    /// <summary>
-    /// Exposes multi-valued view over a single-valued instance.
-    /// <p>
-    /// this can be used if you want to have one multi-valued implementation
-    /// against e.g. FieldCache.getDocTermOrds that also works for single-valued
-    /// fields.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal sealed class SingletonSortedSetDocValues : SortedSetDocValues
-    {
-        private readonly SortedDocValues @in;
-        private int docID;
-        private bool set;
-
-        /// <summary>
-        /// Creates a multi-valued view over the provided SortedDocValues </summary>
-        public SingletonSortedSetDocValues(SortedDocValues @in)
-        {
-            this.@in = @in;
-            Debug.Assert(NO_MORE_ORDS == -1); // this allows our nextOrd() to work for missing values without a check
-        }
-
-        /// <summary>
-        /// Return the wrapped <seealso cref="SortedDocValues"/> </summary>
-        public SortedDocValues SortedDocValues
-        {
-            get
-            {
-                return @in;
-            }
-        }
-
-        public override long NextOrd()
-        {
-            if (set)
-            {
-                return NO_MORE_ORDS;
-            }
-            else
-            {
-                set = true;
-                return @in.GetOrd(docID);
-            }
-        }
-
-        public override void SetDocument(int docID)
-        {
-            this.docID = docID;
-            set = false;
-        }
-
-        public override void LookupOrd(long ord, BytesRef result)
-        {
-            // cast is ok: single-valued cannot exceed Integer.MAX_VALUE
-            @in.LookupOrd((int)ord, result);
-        }
-
-        public override long ValueCount
-        {
-            get
-            {
-                return @in.ValueCount;
-            }
-        }
-
-        public override long LookupTerm(BytesRef key)
-        {
-            return @in.LookupTerm(key);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SlowCompositeReaderWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SlowCompositeReaderWrapper.cs b/src/Lucene.Net.Core/Index/SlowCompositeReaderWrapper.cs
deleted file mode 100644
index 8d80e63..0000000
--- a/src/Lucene.Net.Core/Index/SlowCompositeReaderWrapper.cs
+++ /dev/null
@@ -1,285 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IBits = Lucene.Net.Util.IBits;
-    using MultiSortedDocValues = Lucene.Net.Index.MultiDocValues.MultiSortedDocValues;
-    using MultiSortedSetDocValues = Lucene.Net.Index.MultiDocValues.MultiSortedSetDocValues;
-    using OrdinalMap = Lucene.Net.Index.MultiDocValues.OrdinalMap;
-
-    /// <summary>
-    /// this class forces a composite reader (eg a {@link
-    /// MultiReader} or <seealso cref="DirectoryReader"/>) to emulate an
-    /// atomic reader.  this requires implementing the postings
-    /// APIs on-the-fly, using the static methods in {@link
-    /// MultiFields}, <seealso cref="MultiDocValues"/>, by stepping through
-    /// the sub-readers to merge fields/terms, appending docs, etc.
-    ///
-    /// <p><b>NOTE</b>: this class almost always results in a
-    /// performance hit.  If this is important to your use case,
-    /// you'll get better performance by gathering the sub readers using
-    /// <seealso cref="IndexReader#getContext()"/> to get the
-    /// atomic leaves and then operate per-AtomicReader,
-    /// instead of using this class.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public sealed class SlowCompositeReaderWrapper : AtomicReader
-    {
-        private readonly CompositeReader @in;
-        private readonly Fields fields;
-        private readonly IBits liveDocs;
-
-        /// <summary>
-        /// this method is sugar for getting an <seealso cref="AtomicReader"/> from
-        /// an <seealso cref="IndexReader"/> of any kind. If the reader is already atomic,
-        /// it is returned unchanged, otherwise wrapped by this class.
-        /// </summary>
-        public static AtomicReader Wrap(IndexReader reader)
-        {
-            CompositeReader compositeReader = reader as CompositeReader;
-            if (compositeReader != null)
-            {
-                return new SlowCompositeReaderWrapper(compositeReader);
-            }
-            else
-            {
-                Debug.Assert(reader is AtomicReader);
-                return (AtomicReader)reader;
-            }
-        }
-
-        private SlowCompositeReaderWrapper(CompositeReader reader)
-            : base()
-        {
-            @in = reader;
-            fields = MultiFields.GetFields(@in);
-            liveDocs = MultiFields.GetLiveDocs(@in);
-            @in.RegisterParentReader(this);
-        }
-
-        public override string ToString()
-        {
-            return "SlowCompositeReaderWrapper(" + @in + ")";
-        }
-
-        public override Fields Fields
-        {
-            get
-            {
-                EnsureOpen();
-                return fields;
-            }
-        }
-
-        public override NumericDocValues GetNumericDocValues(string field)
-        {
-            EnsureOpen();
-            return MultiDocValues.GetNumericValues(@in, field);
-        }
-
-        public override IBits GetDocsWithField(string field)
-        {
-            EnsureOpen();
-            return MultiDocValues.GetDocsWithField(@in, field);
-        }
-
-        public override BinaryDocValues GetBinaryDocValues(string field)
-        {
-            EnsureOpen();
-            return MultiDocValues.GetBinaryValues(@in, field);
-        }
-
-        public override SortedDocValues GetSortedDocValues(string field)
-        {
-            EnsureOpen();
-            OrdinalMap map = null;
-            lock (cachedOrdMaps)
-            {
-                if (!cachedOrdMaps.TryGetValue(field, out map))
-                {
-                    // uncached, or not a multi dv
-                    SortedDocValues dv = MultiDocValues.GetSortedValues(@in, field);
-                    MultiSortedDocValues docValues = dv as MultiSortedDocValues;
-                    if (docValues != null)
-                    {
-                        map = docValues.Mapping;
-                        if (map.owner == CoreCacheKey)
-                        {
-                            cachedOrdMaps[field] = map;
-                        }
-                    }
-                    return dv;
-                }
-            }
-            // cached ordinal map
-            if (FieldInfos.FieldInfo(field).DocValuesType != DocValuesType.SORTED)
-            {
-                return null;
-            }
-            int size = @in.Leaves.Count;
-            SortedDocValues[] values = new SortedDocValues[size];
-            int[] starts = new int[size + 1];
-            for (int i = 0; i < size; i++)
-            {
-                AtomicReaderContext context = @in.Leaves[i];
-                SortedDocValues v = context.AtomicReader.GetSortedDocValues(field) ?? DocValues.EMPTY_SORTED;
-                values[i] = v;
-                starts[i] = context.DocBase;
-            }
-            starts[size] = MaxDoc;
-            return new MultiSortedDocValues(values, starts, map);
-        }
-
-        public override SortedSetDocValues GetSortedSetDocValues(string field)
-        {
-            EnsureOpen();
-            OrdinalMap map = null;
-            lock (cachedOrdMaps)
-            {
-                if (!cachedOrdMaps.TryGetValue(field, out map))
-                {
-                    // uncached, or not a multi dv
-                    SortedSetDocValues dv = MultiDocValues.GetSortedSetValues(@in, field);
-                    MultiSortedSetDocValues docValues = dv as MultiSortedSetDocValues;
-                    if (docValues != null)
-                    {
-                        map = docValues.Mapping;
-                        if (map.owner == CoreCacheKey)
-                        {
-                            cachedOrdMaps[field] = map;
-                        }
-                    }
-                    return dv;
-                }
-            }
-            // cached ordinal map
-            if (FieldInfos.FieldInfo(field).DocValuesType != DocValuesType.SORTED_SET)
-            {
-                return null;
-            }
-            Debug.Assert(map != null);
-            int size = @in.Leaves.Count;
-            var values = new SortedSetDocValues[size];
-            int[] starts = new int[size + 1];
-            for (int i = 0; i < size; i++)
-            {
-                AtomicReaderContext context = @in.Leaves[i];
-                SortedSetDocValues v = context.AtomicReader.GetSortedSetDocValues(field) ?? DocValues.EMPTY_SORTED_SET;
-                values[i] = v;
-                starts[i] = context.DocBase;
-            }
-            starts[size] = MaxDoc;
-            return new MultiSortedSetDocValues(values, starts, map);
-        }
-
-        // TODO: this could really be a weak map somewhere else on the coreCacheKey,
-        // but do we really need to optimize slow-wrapper any more?
-        private readonly IDictionary<string, OrdinalMap> cachedOrdMaps = new Dictionary<string, OrdinalMap>();
-
-        public override NumericDocValues GetNormValues(string field)
-        {
-            EnsureOpen();
-            return MultiDocValues.GetNormValues(@in, field);
-        }
-
-        public override Fields GetTermVectors(int docID)
-        {
-            EnsureOpen();
-            return @in.GetTermVectors(docID);
-        }
-
-        public override int NumDocs
-        {
-            get
-            {
-                // Don't call ensureOpen() here (it could affect performance)
-                return @in.NumDocs;
-            }
-        }
-
-        public override int MaxDoc
-        {
-            get
-            {
-                // Don't call ensureOpen() here (it could affect performance)
-                return @in.MaxDoc;
-            }
-        }
-
-        public override void Document(int docID, StoredFieldVisitor visitor)
-        {
-            EnsureOpen();
-            @in.Document(docID, visitor);
-        }
-
-        public override IBits LiveDocs
-        {
-            get
-            {
-                EnsureOpen();
-                return liveDocs;
-            }
-        }
-
-        public override FieldInfos FieldInfos
-        {
-            get
-            {
-                EnsureOpen();
-                return MultiFields.GetMergedFieldInfos(@in);
-            }
-        }
-
-        public override object CoreCacheKey
-        {
-            get
-            {
-                return @in.CoreCacheKey;
-            }
-        }
-
-        public override object CombinedCoreAndDeletesKey
-        {
-            get
-            {
-                return @in.CombinedCoreAndDeletesKey;
-            }
-        }
-
-        protected internal override void DoClose()
-        {
-            // TODO: as this is a wrapper, should we really close the delegate?
-            @in.Dispose();
-        }
-
-        public override void CheckIntegrity()
-        {
-            EnsureOpen();
-            foreach (AtomicReaderContext ctx in @in.Leaves)
-            {
-                ctx.AtomicReader.CheckIntegrity();
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SnapshotDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SnapshotDeletionPolicy.cs b/src/Lucene.Net.Core/Index/SnapshotDeletionPolicy.cs
deleted file mode 100644
index 0cd7e80..0000000
--- a/src/Lucene.Net.Core/Index/SnapshotDeletionPolicy.cs
+++ /dev/null
@@ -1,374 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using Directory = Lucene.Net.Store.Directory;
-
-    /// <summary>
-    /// An <seealso cref="IndexDeletionPolicy"/> that wraps any other
-    /// <seealso cref="IndexDeletionPolicy"/> and adds the ability to hold and later release
-    /// snapshots of an index. While a snapshot is held, the <seealso cref="IndexWriter"/> will
-    /// not remove any files associated with it even if the index is otherwise being
-    /// actively, arbitrarily changed. Because we wrap another arbitrary
-    /// <seealso cref="IndexDeletionPolicy"/>, this gives you the freedom to continue using
-    /// whatever <seealso cref="IndexDeletionPolicy"/> you would normally want to use with your
-    /// index.
-    ///
-    /// <p>
-    /// this class maintains all snapshots in-memory, and so the information is not
-    /// persisted and not protected against system failures. If persistence is
-    /// important, you can use <seealso cref="PersistentSnapshotDeletionPolicy"/>.
-    ///
-    /// @lucene.experimental
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public class SnapshotDeletionPolicy : IndexDeletionPolicy
-    {
-        /// <summary>
-        /// Records how many snapshots are held against each
-        ///  commit generation
-        /// </summary>
-        protected IDictionary<long, int> m_refCounts = new Dictionary<long, int>();
-
-        /// <summary>
-        /// Used to map gen to IndexCommit. </summary>
-        protected IDictionary<long?, IndexCommit> m_indexCommits = new Dictionary<long?, IndexCommit>();
-
-        /// <summary>
-        /// Wrapped <seealso cref="IndexDeletionPolicy"/> </summary>
-        private IndexDeletionPolicy primary;
-
-        /// <summary>
-        /// Most recently committed <seealso cref="IndexCommit"/>. </summary>
-        protected IndexCommit m_lastCommit;
-
-        /// <summary>
-        /// Used to detect misuse </summary>
-        private bool initCalled;
-
-        /// <summary>
-        /// Sole constructor, taking the incoming {@link
-        ///  IndexDeletionPolicy} to wrap.
-        /// </summary>
-        public SnapshotDeletionPolicy(IndexDeletionPolicy primary)
-        {
-            this.primary = primary;
-        }
-
-        public override void OnCommit<T>(IList<T> commits)
-        {
-            lock (this)
-            {
-                primary.OnCommit(WrapCommits(commits));
-                m_lastCommit = commits[commits.Count - 1];
-            }
-        }
-
-        public override void OnInit<T>(IList<T> commits)
-        {
-            lock (this)
-            {
-                initCalled = true;
-                primary.OnInit(WrapCommits(commits));
-                foreach (IndexCommit commit in commits)
-                {
-                    if (m_refCounts.ContainsKey(commit.Generation))
-                    {
-                        m_indexCommits[commit.Generation] = commit;
-                    }
-                }
-                if (commits.Count > 0)
-                {
-                    m_lastCommit = commits[commits.Count - 1];
-                }
-            }
-        }
-
-        /// <summary>
-        /// Release a snapshotted commit.
-        /// </summary>
-        /// <param name="commit">
-        ///          the commit previously returned by <seealso cref="#snapshot"/> </param>
-        public virtual void Release(IndexCommit commit)
-        {
-            lock (this)
-            {
-                long gen = commit.Generation;
-                ReleaseGen(gen);
-            }
-        }
-
-        /// <summary>
-        /// Release a snapshot by generation. </summary>
-        protected internal virtual void ReleaseGen(long gen)
-        {
-            if (!initCalled)
-            {
-                throw new InvalidOperationException("this instance is not being used by IndexWriter; be sure to use the instance returned from writer.getConfig().getIndexDeletionPolicy()");
-            }
-            int? refCount = m_refCounts[gen];
-            if (refCount == null)
-            {
-                throw new System.ArgumentException("commit gen=" + gen + " is not currently snapshotted");
-            }
-            int refCountInt = (int)refCount;
-            Debug.Assert(refCountInt > 0);
-            refCountInt--;
-            if (refCountInt == 0)
-            {
-                m_refCounts.Remove(gen);
-                m_indexCommits.Remove(gen);
-            }
-            else
-            {
-                m_refCounts[gen] = refCountInt;
-            }
-        }
-
-        /// <summary>
-        /// Increments the refCount for this <seealso cref="IndexCommit"/>. </summary>
-        protected internal virtual void IncRef(IndexCommit ic)
-        {
-            lock (this)
-            {
-                long gen = ic.Generation;
-                int refCount;
-                int refCountInt;
-                if (!m_refCounts.TryGetValue(gen, out refCount))
-                {
-                    m_indexCommits[gen] = m_lastCommit;
-                    refCountInt = 0;
-                }
-                else
-                {
-                    refCountInt = (int)refCount;
-                }
-                m_refCounts[gen] = refCountInt + 1;
-            }
-        }
-
-        /// <summary>
-        /// Snapshots the last commit and returns it. Once a commit is 'snapshotted,' it is protected
-        /// from deletion (as long as this <seealso cref="IndexDeletionPolicy"/> is used). The
-        /// snapshot can be removed by calling <seealso cref="#release(IndexCommit)"/> followed
-        /// by a call to <seealso cref="IndexWriter#deleteUnusedFiles()"/>.
-        ///
-        /// <p>
-        /// <b>NOTE:</b> while the snapshot is held, the files it references will not
-        /// be deleted, which will consume additional disk space in your index. If you
-        /// take a snapshot at a particularly bad time (say just before you call
-        /// forceMerge) then in the worst case this could consume an extra 1X of your
-        /// total index size, until you release the snapshot.
-        /// </summary>
-        /// <exception cref="IllegalStateException">
-        ///           if this index does not have any commits yet </exception>
-        /// <returns> the <seealso cref="IndexCommit"/> that was snapshotted. </returns>
-        public virtual IndexCommit Snapshot()
-        {
-            lock (this)
-            {
-                if (!initCalled)
-                {
-                    throw new InvalidOperationException("this instance is not being used by IndexWriter; be sure to use the instance returned from writer.getConfig().getIndexDeletionPolicy()");
-                }
-                if (m_lastCommit == null)
-                {
-                    // No commit yet, eg this is a new IndexWriter:
-                    throw new InvalidOperationException("No index commit to snapshot");
-                }
-
-                IncRef(m_lastCommit);
-
-                return m_lastCommit;
-            }
-        }
-
-        /// <summary>
-        /// Returns all IndexCommits held by at least one snapshot. </summary>
-        public virtual IList<IndexCommit> GetSnapshots()
-        {
-            lock (this)
-            {
-                return new List<IndexCommit>(m_indexCommits.Values);
-            }
-        }
-
-        /// <summary>
-        /// Returns the total number of snapshots currently held. </summary>
-        public virtual int SnapshotCount
-        {
-            get
-            {
-                lock (this)
-                {
-                    int total = 0;
-                    foreach (var refCount in m_refCounts.Values)
-                    {
-                        total += refCount;
-                    }
-
-                    return total;
-                }
-            }
-        }
-
-        /// <summary>
-        /// Retrieve an <seealso cref="IndexCommit"/> from its generation;
-        ///  returns null if this IndexCommit is not currently
-        ///  snapshotted
-        /// </summary>
-        public virtual IndexCommit GetIndexCommit(long gen)
-        {
-            lock (this)
-            {
-                return m_indexCommits[gen];
-            }
-        }
-
-        public override object Clone()
-        {
-            lock (this)
-            {
-                SnapshotDeletionPolicy other = (SnapshotDeletionPolicy)base.Clone();
-                other.primary = (IndexDeletionPolicy)this.primary.Clone();
-                other.m_lastCommit = null;
-                other.m_refCounts = new Dictionary<long, int>(m_refCounts);
-                other.m_indexCommits = new Dictionary<long?, IndexCommit>(m_indexCommits);
-                return other;
-            }
-        }
-
-        /// <summary>
-        /// Wraps each <seealso cref="IndexCommit"/> as a {@link
-        ///  SnapshotCommitPoint}.
-        /// </summary>
-        private IList<IndexCommit> WrapCommits<T>(IList<T> commits)
-            where T : IndexCommit
-        {
-            IList<IndexCommit> wrappedCommits = new List<IndexCommit>(commits.Count);
-            foreach (IndexCommit ic in commits)
-            {
-                wrappedCommits.Add(new SnapshotCommitPoint(this, ic));
-            }
-            return wrappedCommits;
-        }
-
-        /// <summary>
-        /// Wraps a provided <seealso cref="IndexCommit"/> and prevents it
-        ///  from being deleted.
-        /// </summary>
-        private class SnapshotCommitPoint : IndexCommit
-        {
-            private readonly SnapshotDeletionPolicy outerInstance;
-
-            /// <summary>
-            /// The <seealso cref="IndexCommit"/> we are preventing from deletion. </summary>
-            protected IndexCommit m_cp;
-
-            /// <summary>
-            /// Creates a {@code SnapshotCommitPoint} wrapping the provided
-            ///  <seealso cref="IndexCommit"/>.
-            /// </summary>
-            protected internal SnapshotCommitPoint(SnapshotDeletionPolicy outerInstance, IndexCommit cp)
-            {
-                this.outerInstance = outerInstance;
-                this.m_cp = cp;
-            }
-
-            public override string ToString()
-            {
-                return "SnapshotDeletionPolicy.SnapshotCommitPoint(" + m_cp + ")";
-            }
-
-            public override void Delete()
-            {
-                lock (outerInstance)
-                {
-                    // Suppress the delete request if this commit point is
-                    // currently snapshotted.
-                    if (!outerInstance.m_refCounts.ContainsKey(m_cp.Generation))
-                    {
-                        m_cp.Delete();
-                    }
-                }
-            }
-
-            public override Directory Directory
-            {
-                get
-                {
-                    return m_cp.Directory;
-                }
-            }
-
-            public override ICollection<string> FileNames
-            {
-                get
-                {
-                    return m_cp.FileNames;
-                }
-            }
-
-            public override long Generation
-            {
-                get
-                {
-                    return m_cp.Generation;
-                }
-            }
-
-            public override string SegmentsFileName
-            {
-                get
-                {
-                    return m_cp.SegmentsFileName;
-                }
-            }
-
-            public override IDictionary<string, string> UserData
-            {
-                get
-                {
-                    return m_cp.UserData;
-                }
-            }
-
-            public override bool IsDeleted
-            {
-                get
-                {
-                    return m_cp.IsDeleted;
-                }
-            }
-
-            public override int SegmentCount
-            {
-                get
-                {
-                    return m_cp.SegmentCount;
-                }
-            }
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SortedDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SortedDocValues.cs b/src/Lucene.Net.Core/Index/SortedDocValues.cs
deleted file mode 100644
index 0ed7556..0000000
--- a/src/Lucene.Net.Core/Index/SortedDocValues.cs
+++ /dev/null
@@ -1,126 +0,0 @@
-using System;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using BytesRef = Lucene.Net.Util.BytesRef;
-
-    /// <summary>
-    /// A per-document byte[] with presorted values.
-    /// <p>
-    /// Per-Document values in a SortedDocValues are deduplicated, dereferenced,
-    /// and sorted into a dictionary of unique values. A pointer to the
-    /// dictionary value (ordinal) can be retrieved for each document. Ordinals
-    /// are dense and in increasing sorted order.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public abstract class SortedDocValues : BinaryDocValues
-    {
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass
-        /// constructors, typically implicit.)
-        /// </summary>
-        protected SortedDocValues()
-        {
-        }
-
-        /// <summary>
-        /// Returns the ordinal for the specified docID. </summary>
-        /// <param name="docID"> document ID to lookup </param>
-        /// <returns> ordinal for the document: this is dense, starts at 0, then
-        ///         increments by 1 for the next value in sorted order. Note that
-        ///         missing values are indicated by -1. </returns>
-        public abstract int GetOrd(int docID);
-
-        /// <summary>
-        /// Retrieves the value for the specified ordinal. </summary>
-        /// <param name="ord"> ordinal to lookup (must be &gt;= 0 and &lt <seealso cref="#getValueCount()"/>) </param>
-        /// <param name="result"> will be populated with the ordinal's value </param>
-        /// <seealso cref= #getOrd(int)  </seealso>
-        public abstract void LookupOrd(int ord, BytesRef result);
-
-        /// <summary>
-        /// Returns the number of unique values. </summary>
-        /// <returns> number of unique values in this SortedDocValues. this is
-        ///         also equivalent to one plus the maximum ordinal. </returns>
-        public abstract int ValueCount { get; }
-
-        public override void Get(int docID, BytesRef result)
-        {
-            int ord = GetOrd(docID);
-            if (ord == -1)
-            {
-                result.Bytes = BytesRef.EMPTY_BYTES;
-                result.Length = 0;
-                result.Offset = 0;
-            }
-            else
-            {
-                LookupOrd(ord, result);
-            }
-        }
-
-        /// <summary>
-        /// If {@code key} exists, returns its ordinal, else
-        ///  returns {@code -insertionPoint-1}, like {@code
-        ///  Arrays.binarySearch}.
-        /// </summary>
-        ///  <param name="key"> Key to look up
-        ///  </param>
-        public virtual int LookupTerm(BytesRef key)
-        {
-            BytesRef spare = new BytesRef();
-            int low = 0;
-            int high = ValueCount - 1;
-
-            while (low <= high)
-            {
-                int mid = (int)((uint)(low + high) >> 1);
-                LookupOrd(mid, spare);
-                int cmp = spare.CompareTo(key);
-
-                if (cmp < 0)
-                {
-                    low = mid + 1;
-                }
-                else if (cmp > 0)
-                {
-                    high = mid - 1;
-                }
-                else
-                {
-                    return mid; // key found
-                }
-            }
-
-            return -(low + 1); // key not found.
-        }
-
-        /// <summary>
-        /// Returns a <seealso cref="TermsEnum"/> over the values.
-        /// The enum supports <seealso cref="TermsEnum#ord()"/> and <seealso cref="TermsEnum#seekExact(long)"/>.
-        /// </summary>
-        public virtual TermsEnum GetTermsEnum()
-        {
-            return new SortedDocValuesTermsEnum(this);
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a5dc68d0/src/Lucene.Net.Core/Index/SortedDocValuesTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SortedDocValuesTermsEnum.cs b/src/Lucene.Net.Core/Index/SortedDocValuesTermsEnum.cs
deleted file mode 100644
index d9ff7ef..0000000
--- a/src/Lucene.Net.Core/Index/SortedDocValuesTermsEnum.cs
+++ /dev/null
@@ -1,166 +0,0 @@
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-
-namespace Lucene.Net.Index
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    using IBits = Lucene.Net.Util.IBits;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-
-    /// <summary>
-    /// Implements a <seealso cref="TermsEnum"/> wrapping a provided
-    /// <seealso cref="SortedDocValues"/>.
-    /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    internal class SortedDocValuesTermsEnum : TermsEnum
-    {
-        private readonly SortedDocValues values;
-        private int currentOrd = -1;
-        private readonly BytesRef term = new BytesRef();
-
-        /// <summary>
-        /// Creates a new TermsEnum over the provided values </summary>
-        public SortedDocValuesTermsEnum(SortedDocValues values)
-        {
-            this.values = values;
-        }
-
-        public override SeekStatus SeekCeil(BytesRef text)
-        {
-            int ord = values.LookupTerm(text);
-            if (ord >= 0)
-            {
-                currentOrd = ord;
-                term.Offset = 0;
-                // TODO: is there a cleaner way?
-                // term.bytes may be pointing to codec-private byte[]
-                // storage, so we must force new byte[] allocation:
-                term.Bytes = new byte[text.Length];
-                term.CopyBytes(text);
-                return SeekStatus.FOUND;
-            }
-            else
-            {
-                currentOrd = -ord - 1;
-                if (currentOrd == values.ValueCount)
-                {
-                    return SeekStatus.END;
-                }
-                else
-                {
-                    // TODO: hmm can we avoid this "extra" lookup?:
-                    values.LookupOrd(currentOrd, term);
-                    return SeekStatus.NOT_FOUND;
-                }
-            }
-        }
-
-        public override bool SeekExact(BytesRef text)
-        {
-            int ord = values.LookupTerm(text);
-            if (ord >= 0)
-            {
-                term.Offset = 0;
-                // TODO: is there a cleaner way?
-                // term.bytes may be pointing to codec-private byte[]
-                // storage, so we must force new byte[] allocation:
-                term.Bytes = new byte[text.Length];
-                term.CopyBytes(text);
-                currentOrd = ord;
-                return true;
-            }
-            else
-            {
-                return false;
-            }
-        }
-
-        public override void SeekExact(long ord)
-        {
-            Debug.Assert(ord >= 0 && ord < values.ValueCount);
-            currentOrd = (int)ord;
-            values.LookupOrd(currentOrd, term);
-        }
-
-        public override BytesRef Next()
-        {
-            currentOrd++;
-            if (currentOrd >= values.ValueCount)
-            {
-                return null;
-            }
-            values.LookupOrd(currentOrd, term);
-            return term;
-        }
-
-        public override BytesRef Term
-        {
-            get { return term; }
-        }
-
-        public override long Ord
-        {
-            get { return currentOrd; }
-        }
-
-        public override int DocFreq
-        {
-            get { throw new System.NotSupportedException(); }
-        }
-
-        public override long TotalTermFreq
-        {
-            get { return -1; }
-        }
-
-        public override DocsEnum Docs(IBits liveDocs, DocsEnum reuse, DocsFlags flags)
-        {
-            throw new System.NotSupportedException();
-        }
-
-        public override DocsAndPositionsEnum DocsAndPositions(IBits liveDocs, DocsAndPositionsEnum reuse, DocsAndPositionsFlags flags)
-        {
-            throw new System.NotSupportedException();
-        }
-
-        public override IComparer<BytesRef> Comparer
-        {
-            get
-            {
-                return BytesRef.UTF8SortedAsUnicodeComparer;
-            }
-        }
-
-        public override void SeekExact(BytesRef term, TermState state)
-        {
-            Debug.Assert(state != null && state is OrdTermState);
-            this.SeekExact(((OrdTermState)state).Ord);
-        }
-
-        public override TermState GetTermState()
-        {
-            OrdTermState state = new OrdTermState();
-            state.Ord = currentOrd;
-            return state;
-        }
-    }
-}
\ No newline at end of file


Mime
View raw message