lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ccurr...@apache.org
Subject svn commit: r1310635 [4/8] - in /incubator/lucene.net/trunk: build/vs2010/contrib/ build/vs2010/test/ src/contrib/FastVectorHighlighter/ src/contrib/Highlighter/ src/contrib/Memory/ src/contrib/Memory/Properties/ src/contrib/Queries/ src/contrib/Querie...
Date Fri, 06 Apr 2012 23:37:54 GMT
Added: incubator/lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs?rev=1310635&view=auto
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs (added)
+++ incubator/lucene.net/trunk/src/contrib/Memory/MemoryIndex.cs Fri Apr  6 23:37:48 2012
@@ -0,0 +1,1123 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Lucene.Net.Analysis;
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Documents;
+using Lucene.Net.Search;
+using Lucene.Net.Support;
+
+namespace Lucene.Net.Index.Memory
+{
+    /// <summary>
+    /// High-performance single-document main memory Apache Lucene fulltext search index. 
+    /// 
+    /// <h4>Overview</h4>
+    /// 
+    /// This class is a replacement/substitute for a large subset of
+    /// {@link RAMDirectory} functionality. It is designed to
+    /// enable maximum efficiency for on-the-fly matchmaking combining structured and 
+    /// fuzzy fulltext search in realtime streaming applications such as Nux XQuery based XML 
+    /// message queues, publish-subscribe systems for Blogs/newsfeeds, text chat, data acquisition and 
+    /// distribution systems, application level routers, firewalls, classifiers, etc. 
+    /// Rather than targeting fulltext search of infrequent queries over huge persistent 
+    /// data archives (historic search), this class targets fulltext search of huge 
+    /// numbers of queries over comparatively small transient realtime data (prospective 
+    /// search). 
+    /// For example as in 
+    /// <pre>
+    /// float score = search(String text, Query query)
+    /// </pre>
+    /// <p/>
+    /// Each instance can hold at most one Lucene "document", with a document containing
+    /// zero or more "fields", each field having a name and a fulltext value. The
+    /// fulltext value is tokenized (split and transformed) into zero or more index terms 
+    /// (aka words) on <code>addField()</code>, according to the policy implemented by an
+    /// Analyzer. For example, Lucene analyzers can split on whitespace, normalize to lower case
+    /// for case insensitivity, ignore common terms with little discriminatory value such as "he", "in", "and" (stop
+    /// words), reduce the terms to their natural linguistic root form such as "fishing"
+    /// being reduced to "fish" (stemming), resolve synonyms/inflexions/thesauri 
+    /// (upon indexing and/or querying), etc. For details, see
+    /// <a target="_blank" href="http://today.java.net/pub/a/today/2003/07/30/LuceneIntro.html">Lucene Analyzer Intro</a>.
+    /// <p>
+    /// Arbitrary Lucene queries can be run against this class - see <a target="_blank" 
+    /// href="../../../../../../../queryparsersyntax.html">Lucene Query Syntax</a>
+    /// as well as <a target="_blank" 
+    /// href="http://today.java.net/pub/a/today/2003/11/07/QueryParserRules.html">Query Parser Rules</a>.
+    /// Note that a Lucene query selects on the field names and associated (indexed) 
+    /// tokenized terms, not on the original fulltext(s) - the latter are not stored 
+    /// but rather thrown away immediately after tokenization.
+    /// <p>
+    /// For some interesting background information on search technology, see Bob Wyman's
+    /// <a target="_blank" 
+    /// href="http://bobwyman.pubsub.com/main/2005/05/mary_hodder_poi.html">Prospective Search</a>, 
+    /// Jim Gray's
+    /// <a target="_blank" href="http://www.acmqueue.org/modules.php?name=Content&pa=showpage&pid=293&page=4">
+    /// A Call to Arms - Custom subscriptions</a>, and Tim Bray's
+    /// <a target="_blank" 
+    /// href="http://www.tbray.org/ongoing/When/200x/2003/07/30/OnSearchTOC">On Search, the Series</a>.
+    /// 
+    /// 
+    /// <h4>Example Usage</h4> 
+    /// 
+    /// <pre>
+    /// Analyzer analyzer = PatternAnalyzer.DEFAULT_ANALYZER;
+    /// //Analyzer analyzer = new SimpleAnalyzer();
+    /// MemoryIndex index = new MemoryIndex();
+    /// index.addField("content", "Readings about Salmons and other select Alaska fishing Manuals", analyzer);
+    /// index.addField("author", "Tales of James", analyzer);
+    /// QueryParser parser = new QueryParser("content", analyzer);
+    /// float score = index.search(parser.parse("+author:james +salmon~ +fish/// manual~"));
+    /// if (score &gt; 0.0f) {
+    ///     System.out.println("it's a match");
+    /// } else {
+    ///     System.out.println("no match found");
+    /// }
+    /// System.out.println("indexData=" + index.toString());
+    /// </pre>
+    /// 
+    /// 
+    /// <h4>Example XQuery Usage</h4> 
+    /// 
+    /// <pre>
+    /// (: An XQuery that finds all books authored by James that have something to do with "salmon fishing manuals", sorted by relevance :)
+    /// declare namespace lucene = "java:nux.xom.pool.FullTextUtil";
+    /// declare variable $query := "+salmon~ +fish/// manual~"; (: any arbitrary Lucene query can go here :)
+    /// 
+    /// for $book in /books/book[author="James" and lucene:match(abstract, $query) > 0.0]
+    /// let $score := lucene:match($book/abstract, $query)
+    /// order by $score descending
+    /// return $book
+    /// </pre>
+    /// 
+    /// 
+    /// <h4>No thread safety guarantees</h4>
+    /// 
+    /// An instance can be queried multiple times with the same or different queries,
+    /// but an instance is not thread-safe. If desired use idioms such as:
+    /// <pre>
+    /// MemoryIndex index = ...
+    /// synchronized (index) {
+    ///    // read and/or write index (i.e. add fields and/or query)
+    /// } 
+    /// </pre>
+    /// 
+    /// 
+    /// <h4>Performance Notes</h4>
+    /// 
+    /// Internally there's a new data structure geared towards efficient indexing 
+    /// and searching, plus the necessary support code to seamlessly plug into the Lucene 
+    /// framework.
+    /// <p/>
+    /// This class performs very well for very small texts (e.g. 10 chars) 
+    /// as well as for large texts (e.g. 10 MB) and everything in between. 
+    /// Typically, it is about 10-100 times faster than <code>RAMDirectory</code>.
+    /// Note that <code>RAMDirectory</code> has particularly 
+    /// large efficiency overheads for small to medium sized texts, both in time and space.
+    /// Indexing a field with N tokens takes O(N) in the best case, and O(N logN) in the worst 
+    /// case. Memory consumption is probably larger than for <code>RAMDirectory</code>.
+    /// <p/>
+    /// Example throughput of many simple term queries over a single MemoryIndex: 
+    /// ~500000 queries/sec on a MacBook Pro, jdk 1.5.0_06, server VM. 
+    /// As always, your mileage may vary.
+    /// <p/>
+    /// If you're curious about
+    /// the whereabouts of bottlenecks, run java 1.5 with the non-perturbing '-server
+    /// -agentlib:hprof=cpu=samples,depth=10' flags, then study the trace log and
+    /// correlate its hotspot trailer with its call stack headers (see <a
+    /// target="_blank" href="http://java.sun.com/developer/technicalArticles/Programming/HPROF.html">
+    /// hprof tracing </a>).
+    ///
+    ///</summary>
+    [Serializable]
+    public partial class MemoryIndex
+    {
+        /** info for each field: Map<String fieldName, Info field> */
+        private HashMap<String, Info> fields = new HashMap<String, Info>();
+
+        /** fields sorted ascending by fieldName; lazily computed on demand */
+        [NonSerialized] private KeyValuePair<String, Info>[] sortedFields;
+
+        /** pos: positions[3*i], startOffset: positions[3*i +1], endOffset: positions[3*i +2] */
+        private int stride;
+
+        /** Could be made configurable; See {@link Document#setBoost(float)} */
+        private static float docBoost = 1.0f;
+
+        private static long serialVersionUID = 2782195016849084649L;
+
+        private static bool DEBUG = false;
+
+        /**
+         * Constructs an empty instance.
+         */
+        public MemoryIndex()
+            : this(false)
+        {
+        }
+
+        /**
+         * Constructs an empty instance that can optionally store the start and end
+         * character offset of each token term in the text. This can be useful for
+         * highlighting of hit locations with the Lucene highlighter package.
+         * Private until the highlighter package matures, so that this can actually
+         * be meaningfully integrated.
+         * 
+         * @param storeOffsets
+         *            whether or not to store the start and end character offset of
+         *            each token term in the text
+         */
+
+        private MemoryIndex(bool storeOffsets)
+        {
+            this.stride = storeOffsets ? 3 : 1;
+        }
+
+        /**
+         * Convenience method; Tokenizes the given field text and adds the resulting
+         * terms to the index; Equivalent to adding an indexed non-keyword Lucene
+         * {@link org.apache.lucene.document.Field} that is
+         * {@link org.apache.lucene.document.Field.Index#ANALYZED tokenized},
+         * {@link org.apache.lucene.document.Field.Store#NO not stored},
+         * {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions} (or
+         * {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions and offsets}),
+         * 
+         * @param fieldName
+         *            a name to be associated with the text
+         * @param text
+         *            the text to tokenize and index.
+         * @param analyzer
+         *            the analyzer to use for tokenization
+         */
+
+        public void AddField(String fieldName, String text, Analyzer analyzer)
+        {
+            if (fieldName == null)
+                throw new ArgumentException("fieldName must not be null");
+            if (text == null)
+                throw new ArgumentException("text must not be null");
+            if (analyzer == null)
+                throw new ArgumentException("analyzer must not be null");
+
+            TokenStream stream = analyzer.TokenStream(fieldName, new StringReader(text));
+
+            AddField(fieldName, stream);
+        }
+
+        /**
+         * Convenience method; Creates and returns a token stream that generates a
+         * token for each keyword in the given collection, "as is", without any
+         * transforming text analysis. The resulting token stream can be fed into
+         * {@link #addField(String, TokenStream)}, perhaps wrapped into another
+         * {@link org.apache.lucene.analysis.TokenFilter}, as desired.
+         * 
+         * @param keywords
+         *            the keywords to generate tokens for
+         * @return the corresponding token stream
+         */
+
+        public TokenStream CreateKeywordTokenStream<T>(ICollection<T> keywords)
+        {
+            // TODO: deprecate & move this method into AnalyzerUtil?
+            if (keywords == null)
+                throw new ArgumentException("keywords must not be null");
+
+            return new KeywordTokenStream<T>(keywords);
+        }
+
+        /**
+         * Equivalent to <code>addField(fieldName, stream, 1.0f)</code>.
+         * 
+         * @param fieldName
+         *            a name to be associated with the text
+         * @param stream
+         *            the token stream to retrieve tokens from
+         */
+        public void AddField(String fieldName, TokenStream stream)
+        {
+            AddField(fieldName, stream, 1.0f);
+        }
+
+        /**
+         * Iterates over the given token stream and adds the resulting terms to the index;
+         * Equivalent to adding a tokenized, indexed, termVectorStored, unstored,
+         * Lucene {@link org.apache.lucene.document.Field}.
+         * Finally closes the token stream. Note that untokenized keywords can be added with this method via 
+         * {@link #CreateKeywordTokenStream(Collection)}, the Lucene contrib <code>KeywordTokenizer</code> or similar utilities.
+         * 
+         * @param fieldName
+         *            a name to be associated with the text
+         * @param stream
+         *            the token stream to retrieve tokens from.
+         * @param boost
+         *            the boost factor for hits for this field
+         * @see org.apache.lucene.document.Field#setBoost(float)
+         */
+        public void AddField(String fieldName, TokenStream stream, float boost)
+        {
+            try
+            {
+                if (fieldName == null)
+                    throw new ArgumentException("fieldName must not be null");
+                if (stream == null)
+                    throw new ArgumentException("token stream must not be null");
+                if (boost <= 0.0f)
+                    throw new ArgumentException("boost factor must be greater than 0.0");
+                if (fields[fieldName] != null)
+                    throw new ArgumentException("field must not be added more than once");
+
+                var terms = new HashMap<String, ArrayIntList>();
+                int numTokens = 0;
+                int numOverlapTokens = 0;
+                int pos = -1;
+
+                var termAtt = stream.AddAttribute<TermAttribute>();
+                var posIncrAttribute = stream.AddAttribute<PositionIncrementAttribute>();
+                var offsetAtt = stream.AddAttribute<OffsetAttribute>();
+
+                stream.Reset();
+                while (stream.IncrementToken())
+                {
+                    String term = termAtt.Term();
+                    if (term.Length == 0) continue; // nothing to do
+                    //        if (DEBUG) System.Diagnostics.Debug.WriteLine("token='" + term + "'");
+                    numTokens++;
+                    int posIncr = posIncrAttribute.PositionIncrement;
+                    if (posIncr == 0)
+                        numOverlapTokens++;
+                    pos += posIncr;
+
+                    ArrayIntList positions = terms[term];
+                    if (positions == null)
+                    {
+                        // term not seen before
+                        positions = new ArrayIntList(stride);
+                        terms[term] = positions;
+                    }
+                    if (stride == 1)
+                    {
+                        positions.Add(pos);
+                    }
+                    else
+                    {
+                        positions.Add(pos, offsetAtt.StartOffset, offsetAtt.EndOffset);
+                    }
+                }
+                stream.End();
+
+                // ensure infos.numTokens > 0 invariant; needed for correct operation of terms()
+                if (numTokens > 0)
+                {
+                    boost = boost*docBoost; // see DocumentWriter.addDocument(...)
+                    fields[fieldName] = new Info(terms, numTokens, numOverlapTokens, boost);
+                    sortedFields = null; // invalidate sorted view, if any
+                }
+            }
+            catch (IOException e)
+            {
+                // can never happen
+                throw new SystemException(string.Empty, e);
+            }
+            finally
+            {
+                try
+                {
+                    if (stream != null) stream.Close();
+                }
+                catch (IOException e2)
+                {
+                    throw new SystemException(string.Empty, e2);
+                }
+            }
+        }
+
+        /**
+         * Creates and returns a searcher that can be used to execute arbitrary
+         * Lucene queries and to collect the resulting query results as hits.
+         * 
+         * @return a searcher
+         */
+
+        public IndexSearcher CreateSearcher()
+        {
+            MemoryIndexReader reader = new MemoryIndexReader(this);
+            IndexSearcher searcher = new IndexSearcher(reader); // ensures no auto-close !!
+            reader.SetSearcher(searcher); // to later get hold of searcher.getSimilarity()
+            return searcher;
+        }
+
+        /**
+         * Convenience method that efficiently returns the relevance score by
+         * matching this index against the given Lucene query expression.
+         * 
+         * @param query
+         *            an arbitrary Lucene query to run against this index
+         * @return the relevance score of the matchmaking; A number in the range
+         *         [0.0 .. 1.0], with 0.0 indicating no match. The higher the number
+         *         the better the match.
+         *
+         */
+
+        public float Search(Query query)
+        {
+            if (query == null)
+                throw new ArgumentException("query must not be null");
+
+            Searcher searcher = CreateSearcher();
+            try
+            {
+                float[] scores = new float[1]; // inits to 0.0f (no match)
+                searcher.Search(query, new FillingCollector(scores));
+                float score = scores[0];
+                return score;
+            }
+            catch (IOException e)
+            {
+                // can never happen (RAMDirectory)
+                throw new SystemException(string.Empty, e);
+            }
+            finally
+            {
+                // searcher.close();
+                /*
+                 * Note that it is harmless and important for good performance to
+                 * NOT close the index reader!!! This avoids all sorts of
+                 * unnecessary baggage and locking in the Lucene IndexReader
+                 * superclass, all of which is completely unnecessary for this main
+                 * memory index data structure without thread-safety claims.
+                 * 
+                 * Wishing IndexReader would be an interface...
+                 * 
+                 * Actually with the new tight createSearcher() API auto-closing is now
+                 * made impossible, hence searcher.close() would be harmless and also 
+                 * would not degrade performance...
+                 */
+            }
+        }
+
+        /**
+         * Returns a reasonable approximation of the main memory [bytes] consumed by
+         * this instance. Useful for smart memory sensititive caches/pools. Assumes
+         * fieldNames are interned, whereas tokenized terms are memory-overlaid.
+         * 
+         * @return the main memory consumption
+         */
+        public int GetMemorySize()
+        {
+            // for example usage in a smart cache see nux.xom.pool.Pool    
+            int PTR = VM.PTR;
+            int INT = VM.INT;
+            int size = 0;
+            size += VM.SizeOfObject(2*PTR + INT); // memory index
+            if (sortedFields != null) size += VM.SizeOfObjectArray(sortedFields.Length);
+
+            size += VM.SizeOfHashMap(fields.Count);
+            foreach (var entry in fields)
+            {
+                // for each Field Info
+                Info info = entry.Value;
+                size += VM.SizeOfObject(2*INT + 3*PTR); // Info instance vars
+                if (info.SortedTerms != null) size += VM.SizeOfObjectArray(info.SortedTerms.Length);
+
+                int len = info.Terms.Count;
+                size += VM.SizeOfHashMap(len);
+
+                var iter2 = info.Terms.GetEnumerator();
+                while (--len >= 0)
+                {
+                    iter2.MoveNext();
+                    // for each term
+                    KeyValuePair<String, ArrayIntList> e = iter2.Current;
+                    size += VM.SizeOfObject(PTR + 3*INT); // assumes substring() memory overlay
+//        size += STR + 2 * ((String) e.getKey()).length();
+                    ArrayIntList positions = e.Value;
+                    size += VM.SizeOfArrayIntList(positions.Size());
+                }
+            }
+            return size;
+        }
+
+        private int NumPositions(ArrayIntList positions)
+        {
+            return positions.Size()/stride;
+        }
+
+        /** sorts into ascending order (on demand), reusing memory along the way */
+
+        private void SortFields()
+        {
+            if (sortedFields == null) sortedFields = Sort(fields);
+        }
+
+        /** returns a view of the given map's entries, sorted ascending by key */
+
+        private static KeyValuePair<TKey, TValue>[] Sort<TKey, TValue>(HashMap<TKey, TValue> map)
+            where TKey : class, IComparable<TKey>
+        {
+            int size = map.Count;
+
+            var entries = map.ToArray();
+
+            if (size > 1) Array.Sort(entries, TermComparer.KeyComparer);
+            return entries;
+        }
+
+        /**
+         * Returns a String representation of the index data for debugging purposes.
+         * 
+         * @return the string representation
+         */
+
+        public override String ToString()
+        {
+            StringBuilder result = new StringBuilder(256);
+            SortFields();
+            int sumChars = 0;
+            int sumPositions = 0;
+            int sumTerms = 0;
+
+            for (int i = 0; i < sortedFields.Length; i++)
+            {
+                KeyValuePair<String, Info> entry = sortedFields[i];
+                String fieldName = entry.Key;
+                Info info = entry.Value;
+                info.SortTerms();
+                result.Append(fieldName + ":\n");
+
+                int numChars = 0;
+                int numPos = 0;
+                for (int j = 0; j < info.SortedTerms.Length; j++)
+                {
+                    KeyValuePair<String, ArrayIntList> e = info.SortedTerms[j];
+                    String term = e.Key;
+                    ArrayIntList positions = e.Value;
+                    result.Append("\t'" + term + "':" + NumPositions(positions) + ":");
+                    result.Append(positions.ToString(stride)); // ignore offsets
+                    result.Append("\n");
+                    numPos += NumPositions(positions);
+                    numChars += term.Length;
+                }
+
+                result.Append("\tterms=" + info.SortedTerms.Length);
+                result.Append(", positions=" + numPos);
+                result.Append(", Kchars=" + (numChars/1000.0f));
+                result.Append("\n");
+                sumPositions += numPos;
+                sumChars += numChars;
+                sumTerms += info.SortedTerms.Length;
+            }
+
+            result.Append("\nfields=" + sortedFields.Length);
+            result.Append(", terms=" + sumTerms);
+            result.Append(", positions=" + sumPositions);
+            result.Append(", Kchars=" + (sumChars/1000.0f));
+            return result.ToString();
+        }
+
+
+        ///////////////////////////////////////////////////////////////////////////////
+        // Nested classes:
+        ///////////////////////////////////////////////////////////////////////////////
+        /**
+         * Index data structure for a field; Contains the tokenized term texts and
+         * their positions.
+         */
+
+        [Serializable]
+        private sealed class Info
+        {
+            public static readonly IComparer<KeyValuePair<string, Info>> InfoComparer = new TermComparer<Info>();
+            public static readonly IComparer<KeyValuePair<string, ArrayIntList>> ArrayIntListComparer = new TermComparer<ArrayIntList>(); 
+            /**
+             * Term strings and their positions for this field: Map <String
+             * termText, ArrayIntList positions>
+             */
+            private HashMap<String, ArrayIntList> terms;
+
+            /** Terms sorted ascending by term text; computed on demand */
+            [NonSerialized] private KeyValuePair<String, ArrayIntList>[] sortedTerms;
+
+            /** Number of added tokens for this field */
+            private int numTokens;
+
+            /** Number of overlapping tokens for this field */
+            private int numOverlapTokens;
+
+            /** Boost factor for hits for this field */
+            private float boost;
+
+            /** Term for this field's fieldName, lazily computed on demand */
+            [NonSerialized] public Term template;
+
+            private static long serialVersionUID = 2882195016849084649L;
+
+            public Info(HashMap<String, ArrayIntList> terms, int numTokens, int numOverlapTokens, float boost)
+            {
+                this.terms = terms;
+                this.numTokens = numTokens;
+                this.NumOverlapTokens = numOverlapTokens;
+                this.boost = boost;
+            }
+
+            public HashMap<string, ArrayIntList> Terms
+            {
+                get { return terms; }
+            }
+
+            public int NumTokens
+            {
+                get { return numTokens; }
+            }
+
+            public int NumOverlapTokens
+            {
+                get { return numOverlapTokens; }
+                set { numOverlapTokens = value; }
+            }
+
+            public float Boost
+            {
+                get { return boost; }
+            }
+
+            public KeyValuePair<string, ArrayIntList>[] SortedTerms
+            {
+                get { return sortedTerms; }
+            }
+
+            /**
+         * Sorts hashed terms into ascending order, reusing memory along the
+         * way. Note that sorting is lazily delayed until required (often it's
+         * not required at all). If a sorted view is required then hashing +
+         * sort + binary search is still faster and smaller than TreeMap usage
+         * (which would be an alternative and somewhat more elegant approach,
+         * apart from more sophisticated Tries / prefix trees).
+         */
+
+            public void SortTerms()
+            {
+                if (SortedTerms == null) sortedTerms = Sort(Terms);
+            }
+
+            /** note that the frequency can be calculated as numPosition(getPositions(x)) */
+
+            public ArrayIntList GetPositions(String term)
+            {
+                return Terms[term];
+            }
+
+            /** note that the frequency can be calculated as numPosition(getPositions(x)) */
+
+            public ArrayIntList GetPositions(int pos)
+            {
+                return SortedTerms[pos].Value;
+            }
+
+            public float GetBoost()
+            {
+                return Boost;
+            }
+
+        }
+
+
+        ///////////////////////////////////////////////////////////////////////////////
+        // Nested classes:
+        ///////////////////////////////////////////////////////////////////////////////
+        /**
+         * Efficient resizable auto-expanding list holding <code>int</code> elements;
+         * implemented with arrays.
+         */
+
+        [Serializable]
+        private sealed class ArrayIntList
+        {
+
+            private int[] elements;
+            private int size = 0;
+
+            private static long serialVersionUID = 2282195016849084649L;
+
+            private ArrayIntList()
+                : this(10)
+            {
+
+            }
+
+            public ArrayIntList(int initialCapacity)
+            {
+                elements = new int[initialCapacity];
+            }
+
+            public void Add(int elem)
+            {
+                if (size == elements.Length) EnsureCapacity(size + 1);
+                elements[size++] = elem;
+            }
+
+            public void Add(int pos, int start, int end)
+            {
+                if (size + 3 > elements.Length) EnsureCapacity(size + 3);
+                elements[size] = pos;
+                elements[size + 1] = start;
+                elements[size + 2] = end;
+                size += 3;
+            }
+
+            public int Get(int index)
+            {
+                if (index >= size) ThrowIndex(index);
+                return elements[index];
+            }
+
+            public int Size()
+            {
+                return size;
+            }
+
+            public int[] ToArray(int stride)
+            {
+                int[] arr = new int[Size()/stride];
+                if (stride == 1)
+                {
+                    Array.Copy(elements, 0, arr, 0, size);
+                }
+                else
+                {
+                    for (int i = 0, j = 0; j < size; i++, j += stride) arr[i] = elements[j];
+                }
+                return arr;
+            }
+
+            private void EnsureCapacity(int minCapacity)
+            {
+                int newCapacity = Math.Max(minCapacity, (elements.Length*3)/2 + 1);
+                int[] newElements = new int[newCapacity];
+                Array.Copy(elements, 0, newElements, 0, size);
+                elements = newElements;
+            }
+
+            private void ThrowIndex(int index)
+            {
+                throw new IndexOutOfRangeException("index: " + index
+                                                   + ", size: " + size);
+            }
+
+            /** returns the first few positions (without offsets); debug only */
+
+            public string ToString(int stride)
+            {
+                int s = Size()/stride;
+                int len = Math.Min(10, s); // avoid printing huge lists
+                StringBuilder buf = new StringBuilder(4*len);
+                buf.Append("[");
+                for (int i = 0; i < len; i++)
+                {
+                    buf.Append(Get(i*stride));
+                    if (i < len - 1) buf.Append(", ");
+                }
+                if (len != s) buf.Append(", ..."); // and some more...
+                buf.Append("]");
+                return buf.ToString();
+            }
+        }
+
+
+        ///////////////////////////////////////////////////////////////////////////////
+        // Nested classes:
+        ///////////////////////////////////////////////////////////////////////////////
+        private static readonly Term MATCH_ALL_TERM = new Term("");
+
+        /**
+         * Search support for Lucene framework integration; implements all methods
+         * required by the Lucene IndexReader contracts.
+         */
+
+        private sealed partial class MemoryIndexReader : IndexReader
+        {
+            private readonly MemoryIndex _index;
+
+            private Searcher searcher; // needed to find searcher.getSimilarity() 
+
+            internal MemoryIndexReader(MemoryIndex index)
+            {
+                _index = index;
+            }
+
+            private Info GetInfo(String fieldName)
+            {
+                return _index.fields[fieldName];
+            }
+
+            private Info GetInfo(int pos)
+            {
+                return _index.sortedFields[pos].Value;
+            }
+
+            public override int DocFreq(Term term)
+            {
+                Info info = GetInfo(term.Field);
+                int freq = 0;
+                if (info != null) freq = info.GetPositions(term.Text) != null ? 1 : 0;
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.docFreq: " + term + ", freq:" + freq);
+                return freq;
+            }
+
+            public override TermEnum Terms()
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.terms()");
+                return Terms(MATCH_ALL_TERM);
+            }
+
+            public override TermEnum Terms(Term term)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.terms: " + term);
+
+                int i; // index into info.sortedTerms
+                int j; // index into sortedFields
+
+                _index.SortFields();
+                if (_index.sortedFields.Length == 1 && _index.sortedFields[0].Key == term.Field)
+                {
+                    j = 0; // fast path
+                }
+                else
+                {
+                    j = Array.BinarySearch(_index.sortedFields, new KeyValuePair<string, Info>(term.Field, null), Info.InfoComparer);
+                }
+
+                if (j < 0)
+                {
+                    // not found; choose successor
+                    j = -j - 1;
+                    i = 0;
+                    if (j < _index.sortedFields.Length) GetInfo(j).SortTerms();
+                }
+                else
+                {
+                    // found
+                    Info info = GetInfo(j);
+                    info.SortTerms();
+                    i = Array.BinarySearch(info.SortedTerms, new KeyValuePair<string, ArrayIntList>(term.Text, null), Info.ArrayIntListComparer);
+                    if (i < 0)
+                    {
+                        // not found; choose successor
+                        i = -i - 1;
+                        if (i >= info.SortedTerms.Length)
+                        {
+                            // move to next successor
+                            j++;
+                            i = 0;
+                            if (j < _index.sortedFields.Length) GetInfo(j).SortTerms();
+                        }
+                    }
+                }
+                int ix = i;
+                int jx = j;
+
+                return new MemoryTermEnum(_index, this, ix, jx);
+            }
+
+            public override TermPositions TermPositions()
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.termPositions");
+
+                return new MemoryTermPositions(_index, this);
+            }
+
+
+            public override TermDocs TermDocs()
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.termDocs");
+                return TermPositions();
+            }
+
+            public override ITermFreqVector[] GetTermFreqVectors(int docNumber)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.getTermFreqVectors");
+                // This is okay, ToArray() is as optimized as writing it by hand
+                return _index.fields.Keys.Select(k => GetTermFreqVector(docNumber, k)).ToArray();
+            }
+
+            public override void GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.getTermFreqVectors");
+
+                //      if (vectors.length == 0) return null;
+                foreach (String fieldName in _index.fields.Keys)
+                {
+                    GetTermFreqVector(docNumber, fieldName, mapper);
+                }
+            }
+
+            public override void GetTermFreqVector(int docNumber, String field, TermVectorMapper mapper)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.getTermFreqVector");
+                Info info = GetInfo(field);
+                if (info == null)
+                {
+                    return;
+                }
+                info.SortTerms();
+                mapper.SetExpectations(field, info.SortedTerms.Length, _index.stride != 1, true);
+                for (int i = info.SortedTerms.Length; --i >= 0;)
+                {
+
+                    ArrayIntList positions = info.SortedTerms[i].Value;
+                    int size = positions.Size();
+                    var offsets = new TermVectorOffsetInfo[size/_index.stride];
+
+                    for (int k = 0, j = 1; j < size; k++, j += _index.stride)
+                    {
+                        int start = positions.Get(j);
+                        int end = positions.Get(j + 1);
+                        offsets[k] = new TermVectorOffsetInfo(start, end);
+                    }
+                    mapper.Map(info.SortedTerms[i].Key, _index.NumPositions(info.SortedTerms[i].Value), offsets,
+                               (info.SortedTerms[i].Value).ToArray(_index.stride));
+                }
+            }
+
+            public override ITermFreqVector GetTermFreqVector(int docNumber, String fieldName)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.getTermFreqVector");
+                Info info = GetInfo(fieldName);
+                if (info == null) return null; // TODO: or return empty vector impl???
+                info.SortTerms();
+
+                return new MemoryTermPositionVector(_index, info, fieldName);
+            }
+
+            private Similarity GetSimilarity()
+            {
+                if (searcher != null) return searcher.Similarity;
+                return Similarity.Default;
+            }
+
+            internal void SetSearcher(Searcher searcher)
+            {
+                this.searcher = searcher;
+            }
+
+            /** performance hack: cache norms to avoid repeated expensive calculations */
+            private byte[] cachedNorms;
+            private String cachedFieldName;
+            private Similarity cachedSimilarity;
+
+            public override byte[] Norms(String fieldName)
+            {
+                byte[] norms = cachedNorms;
+                Similarity sim = GetSimilarity();
+                if (fieldName != cachedFieldName || sim != cachedSimilarity)
+                {
+                    // not cached?
+                    Info info = GetInfo(fieldName);
+                    int numTokens = info != null ? info.NumTokens : 0;
+                    int numOverlapTokens = info != null ? info.NumOverlapTokens : 0;
+                    float boost = info != null ? info.GetBoost() : 1.0f;
+                    FieldInvertState invertState = new FieldInvertState(0, numTokens, numOverlapTokens, 0, boost);
+                    float n = sim.ComputeNorm(fieldName, invertState);
+                    byte norm = Similarity.EncodeNorm(n);
+                    norms = new byte[] {norm};
+
+                    // cache it for future reuse
+                    cachedNorms = norms;
+                    cachedFieldName = fieldName;
+                    cachedSimilarity = sim;
+                    if (DEBUG)
+                        System.Diagnostics.Debug.WriteLine("MemoryIndexReader.norms: " + fieldName + ":" + n + ":" +
+                                                           norm + ":" + numTokens);
+                }
+                return norms;
+            }
+
+            public override void Norms(String fieldName, byte[] bytes, int offset)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.norms*: " + fieldName);
+                byte[] norms = Norms(fieldName);
+                Buffer.BlockCopy(norms, 0, bytes, offset, norms.Length);
+            }
+
+            protected override void DoSetNorm(int doc, String fieldName, byte value)
+            {
+                throw new NotSupportedException();
+            }
+
+            public override int NumDocs()
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.numDocs");
+                return _index.fields.Count > 0 ? 1 : 0;
+            }
+
+            public override int MaxDoc
+            {
+                get
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.maxDoc");
+                    return 1;
+                }
+            }
+
+            public override Document Document(int n)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.document");
+                return new Document(); // there are no stored fields
+            }
+
+            //When we convert to JDK 1.5 make this Set<String>
+            public override Document Document(int n, FieldSelector fieldSelector)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.document");
+                return new Document(); // there are no stored fields
+            }
+
+            public override bool IsDeleted(int n)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.isDeleted");
+                return false;
+            }
+
+            public override bool HasDeletions
+            {
+                get
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.hasDeletions");
+                    return false;
+                }
+            }
+
+            protected override void DoDelete(int docNum)
+            {
+                throw new NotSupportedException();
+            }
+
+            protected override void DoUndeleteAll()
+            {
+                throw new NotSupportedException();
+            }
+
+            protected override void DoCommit(IDictionary<String, String> commitUserData)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.doCommit");
+
+            }
+
+            protected override void DoClose()
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.doClose");
+            }
+
+            // lucene >= 1.9 (remove this method for lucene-1.4.3)
+            public override ICollection<String> GetFieldNames(FieldOption fieldOption)
+            {
+                if (DEBUG) System.Diagnostics.Debug.WriteLine("MemoryIndexReader.getFieldNamesOption");
+                if (fieldOption == FieldOption.UNINDEXED)
+                    return CollectionsHelper<string>.EmptyList();
+                if (fieldOption == FieldOption.INDEXED_NO_TERMVECTOR)
+                    return CollectionsHelper<string>.EmptyList();
+                if (fieldOption == FieldOption.TERMVECTOR_WITH_OFFSET && _index.stride == 1)
+                    return CollectionsHelper<string>.EmptyList();
+                if (fieldOption == FieldOption.TERMVECTOR_WITH_POSITION_OFFSET && _index.stride == 1)
+                    return CollectionsHelper<string>.EmptyList();
+
+                return _index.fields.Keys.AsReadOnly();
+            }
+        }
+
+
+        ///////////////////////////////////////////////////////////////////////////////
+        // Nested classes:
+        ///////////////////////////////////////////////////////////////////////////////
+        private static class VM
+        {
+
+            public static readonly int PTR = Is64BitVM() ? 8 : 4;
+
+            // bytes occupied by primitive data types
+            public static readonly int BOOLEAN = 1;
+            public static readonly int BYTE = 1;
+            public static readonly int CHAR = 2;
+            public static readonly int SHORT = 2;
+            public static readonly int INT = 4;
+            public static readonly int LONG = 8;
+            public static readonly int FLOAT = 4;
+            public static readonly int DOUBLE = 8;
+
+            private static readonly int LOG_PTR = (int) Math.Round(Log2(PTR));
+
+            /**
+             * Object header of any heap allocated Java object. 
+             * ptr to class, info for monitor, gc, hash, etc.
+             */
+            private static readonly int OBJECT_HEADER = 2*PTR;
+
+            //  assumes n > 0
+            //  64 bit VM:
+            //    0     --> 0*PTR
+            //    1..8  --> 1*PTR
+            //    9..16 --> 2*PTR
+            private static int SizeOf(int n)
+            {
+                return (((n - 1) >> LOG_PTR) + 1) << LOG_PTR;
+            }
+
+            public static int SizeOfObject(int n)
+            {
+                return SizeOf(OBJECT_HEADER + n);
+            }
+
+            public static int SizeOfObjectArray(int len)
+            {
+                return SizeOfObject(INT + PTR*len);
+            }
+
+            public static int SizeOfCharArray(int len)
+            {
+                return SizeOfObject(INT + CHAR*len);
+            }
+
+            public static int SizeOfIntArray(int len)
+            {
+                return SizeOfObject(INT + INT*len);
+            }
+
+            public static int SizeOfString(int len)
+            {
+                return SizeOfObject(3*INT + PTR) + SizeOfCharArray(len);
+            }
+
+            public static int SizeOfHashMap(int len)
+            {
+                return SizeOfObject(4*PTR + 4*INT) + SizeOfObjectArray(len)
+                       + len*SizeOfObject(3*PTR + INT); // entries
+            }
+
+            // note: does not include referenced objects
+            public static int SizeOfArrayList(int len)
+            {
+                return SizeOfObject(PTR + 2*INT) + SizeOfObjectArray(len);
+            }
+
+            public static int SizeOfArrayIntList(int len)
+            {
+                return SizeOfObject(PTR + INT) + SizeOfIntArray(len);
+            }
+
+            private static bool Is64BitVM()
+            {
+                return IntPtr.Size == 8;
+            }
+
+            /** logarithm to the base 2. Example: log2(4) == 2, log2(8) == 3 */
+
+            private static double Log2(double value)
+            {
+                return Math.Log(value, 2);
+                //return Math.Log(value) / Math.Log(2);
+            }
+        }
+
+    }
+}
\ No newline at end of file

Added: incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermEnum.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermEnum.cs?rev=1310635&view=auto
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermEnum.cs (added)
+++ incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermEnum.cs Fri Apr  6 23:37:48 2012
@@ -0,0 +1,82 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.Index.Memory
+{
+    public partial class MemoryIndex
+    {
+        private sealed partial class MemoryIndexReader
+        {
+            private class MemoryTermEnum : TermEnum
+            {
+                private readonly MemoryIndex _index;
+                private readonly MemoryIndexReader _reader;
+                private int _i; // index into info.sortedTerms
+                private int _j; // index into sortedFields
+
+                public MemoryTermEnum(MemoryIndex index, MemoryIndexReader reader, int ix, int jx)
+                {
+                    _index = index;
+                    _reader = reader;
+                    _i = ix; // index into info.sortedTerms
+                    _j = jx; // index into sortedFields
+                }
+
+                public override bool Next()
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine("TermEnum.next");
+                    if (_j >= _index.sortedFields.Length) return false;
+                    Info info = _reader.GetInfo(_j);
+                    if (++_i < info.SortedTerms.Length) return true;
+
+                    // move to successor
+                    _j++;
+                    _i = 0;
+                    if (_j >= _index.sortedFields.Length) return false;
+                    _reader.GetInfo(_j).SortTerms();
+                    return true;
+                }
+
+                public override Term Term()
+                {
+                              if (DEBUG) System.Diagnostics.Debug.WriteLine("TermEnum.term: " + _i);
+                              if (_j >= _index.sortedFields.Length) return null;
+                              Info info = _reader.GetInfo(_j);
+                              if (_i >= info.SortedTerms.Length) return null;
+                    //          if (DEBUG) System.Diagnostics.Debug.WriteLine("TermEnum.term: " + i + ", " + info.sortedTerms[i].getKey());
+                              return CreateTerm(info, _j, info.SortedTerms[_i].Key);
+                }
+
+                public override int DocFreq()
+                {                
+                              if (DEBUG) System.Diagnostics.Debug.WriteLine("TermEnum.docFreq");
+                              if (_j >= _index.sortedFields.Length) return 0;
+                              Info info = _reader.GetInfo(_j);
+                              if (_i >= info.SortedTerms.Length) return 0;
+                              return _index.NumPositions(info.GetPositions(_i));
+                }
+
+                protected override void Dispose(bool disposing)
+                {
+                              if (DEBUG) System.Diagnostics.Debug.WriteLine("TermEnum.close");
+                }
+
+                private Term CreateTerm(Info info, int pos, string text)
+                {
+                    // Assertion: sortFields has already been called before
+                    Term template = info.template;
+                    if (template == null) { // not yet cached?
+                        String fieldName = _index.sortedFields[pos].Key;
+                    template = new Term(fieldName);
+                    info.template = template;
+                    }
+
+                    return template.CreateTerm(text);
+                }
+            }
+        }
+    }
+}

Added: incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositionVector.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositionVector.cs?rev=1310635&view=auto
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositionVector.cs (added)
+++ incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositionVector.cs Fri Apr  6 23:37:48 2012
@@ -0,0 +1,96 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.Index.Memory
+{
+    public partial class MemoryIndex
+    {
+        private sealed partial class MemoryIndexReader
+        {
+            private class MemoryTermPositionVector : TermPositionVector
+            {
+                private readonly MemoryIndex _index;
+                private readonly string _fieldName;
+                private readonly KeyValuePair<String,ArrayIntList>[] sortedTerms;
+
+                public MemoryTermPositionVector(MemoryIndex index, Info info, string fieldName)
+                {
+                    _index = index;
+                    _fieldName = fieldName;
+                    sortedTerms = info.SortedTerms;
+                }
+
+                public string Field
+                {
+                    get { return _fieldName; }
+                }
+
+                public int Size
+                {
+                    get { return sortedTerms.Length; }
+                }
+
+                public string[] GetTerms()
+                {
+                    var terms = new String[sortedTerms.Length];
+                    for (int i = sortedTerms.Length; --i >= 0; )
+                    {
+                        terms[i] = sortedTerms[i].Key;
+                    }
+                    return terms;
+                }
+
+                public int[] GetTermFrequencies()
+                {
+                    int[] freqs = new int[sortedTerms.Length];
+                    for (int i = sortedTerms.Length; --i >= 0; )
+                    {
+                        freqs[i] = _index.NumPositions(sortedTerms[i].Value);
+                    }
+                    return freqs;
+                }
+
+                public int IndexOf(string term)
+                {
+                    int i = Array.BinarySearch(sortedTerms, new KeyValuePair<string, ArrayIntList>(term, null), Info.ArrayIntListComparer);
+                    return i >= 0 ? i : -1;
+                }
+
+                public int[] IndexesOf(string[] terms, int start, int len)
+                {
+                    int[] indexes = new int[len];
+                    for (int i = 0; i < len; i++)
+                    {
+                        indexes[i] = IndexOf(terms[start++]);
+                    }
+                    return indexes;
+                }
+
+                public int[] GetTermPositions(int index)
+                {
+                    return sortedTerms[index].Value.ToArray(_index.stride);
+                }
+
+                public TermVectorOffsetInfo[] GetOffsets(int index)
+                {
+                    if (_index.stride == 1) return null; // no offsets stored
+
+                    ArrayIntList positions = sortedTerms[index].Value;
+                    int size = positions.Size();
+                    TermVectorOffsetInfo[] offsets = new TermVectorOffsetInfo[size / _index.stride];
+
+                    for (int i = 0, j = 1; j < size; i++, j += _index.stride)
+                    {
+                        int start = positions.Get(j);
+                        int end = positions.Get(j + 1);
+                        offsets[i] = new TermVectorOffsetInfo(start, end);
+                    }
+                    return offsets;
+                }
+            }
+        }
+    }
+}

Added: incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositions.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositions.cs?rev=1310635&view=auto
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositions.cs (added)
+++ incubator/lucene.net/trunk/src/contrib/Memory/MemoryTermPositions.cs Fri Apr  6 23:37:48 2012
@@ -0,0 +1,131 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.Index.Memory
+{
+    public partial class MemoryIndex
+    {
+        private sealed partial class MemoryIndexReader
+        {
+            private sealed class MemoryTermPositions : TermPositions
+            {
+                private readonly MemoryIndex _index;
+                private readonly MemoryIndexReader _reader;
+                private bool hasNext;
+                private int cursor = 0;
+                private ArrayIntList current;
+                private Term term;
+
+                public MemoryTermPositions(MemoryIndex index, MemoryIndexReader reader)
+                {
+                    _index = index;
+                    _reader = reader;
+                }
+
+                public void Seek(Term term)
+                {
+                    this.term = term;
+
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".seek: " + term);
+
+                    if (term == null)
+                    {
+                        hasNext = true; // term==null means match all docs
+                    }
+                    else
+                    {
+                        Info info = _reader.GetInfo(term.Field);
+                        current = info == null ? null : info.GetPositions(term.Text);
+                        hasNext = (current != null);
+                        cursor = 0;
+                    }
+                }
+
+                public void Seek(TermEnum termEnum)
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".seekEnum");
+                    Seek(termEnum.Term());
+                }
+
+                public int Doc
+                {
+                    get
+                    {
+                        if (DEBUG) System.Diagnostics.Debug.WriteLine(".doc");
+                        return 0;
+                    }
+                }
+
+                public int Freq
+                {
+                    get
+                    {
+                        int freq = current != null ? _index.NumPositions(current) : (term == null ? 1 : 0);
+                        if (DEBUG) System.Diagnostics.Debug.WriteLine(".freq: " + freq);
+                        return freq;
+                    }
+                }
+
+                public bool Next()
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".next: " + current + ", oldHasNext=" + hasNext);
+                    bool next = hasNext;
+                    hasNext = false;
+                    return next;
+                }
+
+                public int Read(int[] docs, int[] freqs)
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".read: " + docs.Length);
+                    if (!hasNext) return 0;
+                    hasNext = false;
+                    docs[0] = 0;
+                    freqs[0] = Freq;
+                    return 1;
+                }
+
+                public bool SkipTo(int target)
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".skipTo: " + target);
+                    return Next();
+                }
+
+                public void Close()
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".close");
+                }
+
+                public void Dispose()
+                {
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".close");
+                }
+
+                public int NextPosition()
+                {
+                    int pos = current.Get(cursor);
+                    cursor += _index.stride;
+                    if (DEBUG) System.Diagnostics.Debug.WriteLine(".nextPosition: " + pos);
+                    return pos;
+                }
+
+                public int PayloadLength
+                {
+                    get { throw new NotSupportedException(); }
+                }
+
+                public byte[] GetPayload(byte[] data, int offset)
+                {
+                    throw new NotSupportedException();
+                }
+
+                public bool IsPayloadAvailable
+                {
+                    get { return false; }
+                }
+            }
+        }
+    }
+}
\ No newline at end of file

Added: incubator/lucene.net/trunk/src/contrib/Memory/Properties/AssemblyInfo.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Memory/Properties/AssemblyInfo.cs?rev=1310635&view=auto
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Memory/Properties/AssemblyInfo.cs (added)
+++ incubator/lucene.net/trunk/src/contrib/Memory/Properties/AssemblyInfo.cs Fri Apr  6 23:37:48 2012
@@ -0,0 +1,36 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following 
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("Contrib.Memory")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("Contrib.Memory")]
+[assembly: AssemblyCopyright("Copyright ©  2012")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible 
+// to COM components.  If you need to access a type in this assembly from 
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("5d1e7f1d-ae69-4cf0-875e-64c3a5f3a53b")]
+
+// Version information for an assembly consists of the following four values:
+//
+//      Major Version
+//      Minor Version 
+//      Build Number
+//      Revision
+//
+// You can specify all the values or you can default the Build and Revision Numbers 
+// by using the '*' as shown below:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]

Added: incubator/lucene.net/trunk/src/contrib/Memory/TermComparer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Memory/TermComparer.cs?rev=1310635&view=auto
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Memory/TermComparer.cs (added)
+++ incubator/lucene.net/trunk/src/contrib/Memory/TermComparer.cs Fri Apr  6 23:37:48 2012
@@ -0,0 +1,32 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.Index.Memory
+{
+    class TermComparer
+    {
+        /// <summary>
+        /// Sorts term entries into ascending order; also works for
+        /// Arrays.binarySearch() and Arrays.sort()
+        /// </summary>
+        public static int KeyComparer<TKey, TValue>(KeyValuePair<TKey, TValue> x, KeyValuePair<TKey, TValue> y)
+            where TKey : class, IComparable<TKey>
+        {
+            if (x.Key == y.Key) return 0;
+            return typeof (TKey) == typeof (string)
+                       ? string.Compare(x.Key as string, y.Key as string, StringComparison.Ordinal)
+                       : x.Key.CompareTo(y.Key);
+        }
+    }
+
+    sealed class TermComparer<T> : TermComparer, IComparer<KeyValuePair<string, T>>
+    {
+        public int Compare(KeyValuePair<string, T> x, KeyValuePair<string, T> y)
+        {
+            return KeyComparer(x, y);
+        }
+    }
+}

Modified: incubator/lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Queries/BooleanFilter.cs Fri Apr  6 23:37:48 2012
@@ -160,7 +160,7 @@ namespace Lucene.Net.Search
         /// <param name="filterClause">The clause to add.</param>
         public void Add(FilterClause filterClause)
         {
-            if (filterClause.GetOccur() == BooleanClause.Occur.MUST)
+            if (filterClause.GetOccur() == Occur.MUST)
             {
                 if (mustFilters == null)
                 {
@@ -168,7 +168,7 @@ namespace Lucene.Net.Search
                 }
                 mustFilters.Add(filterClause.GetFilter());
             }
-            if (filterClause.GetOccur() == BooleanClause.Occur.SHOULD)
+            if (filterClause.GetOccur() == Occur.SHOULD)
             {
                 if (shouldFilters == null)
                 {
@@ -176,7 +176,7 @@ namespace Lucene.Net.Search
                 }
                 shouldFilters.Add(filterClause.GetFilter());
             }
-            if (filterClause.GetOccur() == BooleanClause.Occur.MUST_NOT)
+            if (filterClause.GetOccur() == Occur.MUST_NOT)
             {
                 if (notFilters == null)
                 {

Modified: incubator/lucene.net/trunk/src/contrib/Queries/BoostingQuery.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Queries/BoostingQuery.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Queries/BoostingQuery.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Queries/BoostingQuery.cs Fri Apr  6 23:37:48 2012
@@ -61,8 +61,8 @@ namespace Lucene.Net.Search
         {
             BooleanQuery result = new AnonymousBooleanQuery(boost);
 
-            result.Add(match, BooleanClause.Occur.MUST);
-            result.Add(context, BooleanClause.Occur.SHOULD);
+            result.Add(match, Occur.MUST);
+            result.Add(context, Occur.SHOULD);
 
             return result;
         }

Modified: incubator/lucene.net/trunk/src/contrib/Queries/FilterClause.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Queries/FilterClause.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Queries/FilterClause.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Queries/FilterClause.cs Fri Apr  6 23:37:48 2012
@@ -31,7 +31,7 @@ namespace Lucene.Net.Search
     [Serializable]
     public class FilterClause
     {
-        BooleanClause.Occur occur;
+        Occur occur;
         Filter filter;
 
         /**
@@ -39,7 +39,7 @@ namespace Lucene.Net.Search
          * @param filter A Filter object containing a BitSet
          * @param occur A parameter implementation indicating SHOULD, MUST or MUST NOT
          */
-        public FilterClause(Filter filter, BooleanClause.Occur occur)
+        public FilterClause(Filter filter, Occur occur)
         {
             this.occur = occur;
             this.filter = filter;
@@ -58,7 +58,7 @@ namespace Lucene.Net.Search
          * Returns this FilterClause's occur parameter
          * @return An Occur object
          */
-        public BooleanClause.Occur GetOccur()
+        public Occur GetOccur()
         {
             return occur;
         }

Modified: incubator/lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Queries/FuzzyLikeThisQuery.cs Fri Apr  6 23:37:48 2012
@@ -296,7 +296,7 @@ namespace Lucene.Net.Search
                     ScoreTerm st = variants[0];
                     TermQuery tq = new FuzzyTermQuery(st.Term, ignoreTF);
                     tq.Boost = st.Score; // set the boost to a mix of IDF and score
-                    bq.Add(tq, BooleanClause.Occur.SHOULD);
+                    bq.Add(tq, Occur.SHOULD);
                 }
                 else
                 {
@@ -305,9 +305,9 @@ namespace Lucene.Net.Search
                     {
                         TermQuery tq = new FuzzyTermQuery(st.Term, ignoreTF);      // found a match
                         tq.Boost = st.Score; // set the boost using the ScoreTerm's score
-                        termVariants.Add(tq, BooleanClause.Occur.SHOULD);          // add to query                    
+                        termVariants.Add(tq, Occur.SHOULD);          // add to query                    
                     }
-                    bq.Add(termVariants, BooleanClause.Occur.SHOULD);          // add to query
+                    bq.Add(termVariants, Occur.SHOULD);          // add to query
                 }
             }
             //TODO possible alternative step 3 - organize above booleans into a new layer of field-based

Modified: incubator/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Queries/Similar/MoreLikeThis.cs Fri Apr  6 23:37:48 2012
@@ -650,7 +650,7 @@ namespace Lucene.Net.Search.Similar
 
                 try
                 {
-                    query.Add(tq, BooleanClause.Occur.SHOULD);
+                    query.Add(tq, Occur.SHOULD);
                 }
                 catch (BooleanQuery.TooManyClauses ignore)
                 {

Modified: incubator/lucene.net/trunk/src/contrib/Queries/Similar/SimilarityQueries.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Queries/Similar/SimilarityQueries.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Queries/Similar/SimilarityQueries.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Queries/Similar/SimilarityQueries.cs Fri Apr  6 23:37:48 2012
@@ -17,6 +17,7 @@
 
 using System;
 using System.Collections.Generic;
+using Lucene.Net.Search;
 using Analyzer = Lucene.Net.Analysis.Analyzer;
 using TokenStream = Lucene.Net.Analysis.TokenStream;
 using Term = Lucene.Net.Index.Term;
@@ -104,7 +105,7 @@ namespace Similarity.Net
                 TermQuery tq = new TermQuery(new Term(field, word));
                 try
                 {
-                    tmp.Add(tq, BooleanClause.Occur.SHOULD);
+                    tmp.Add(tq, Occur.SHOULD);
                 }
                 catch (BooleanQuery.TooManyClauses)
                 {

Modified: incubator/lucene.net/trunk/src/contrib/Regex/SpanRegexQuery.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Regex/SpanRegexQuery.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Regex/SpanRegexQuery.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Regex/SpanRegexQuery.cs Fri Apr  6 23:37:48 2012
@@ -71,7 +71,7 @@ namespace Lucene.Net.Search.Regex
 				BooleanClause clause = clauses[i];
 
 				// Clauses from RegexQuery.Rewrite are always TermQuery's
-				TermQuery tq = (TermQuery) clause.GetQuery();
+				TermQuery tq = (TermQuery) clause.Query;
 
 				sqs[i] = new SpanTermQuery(tq.Term);
 				sqs[i].Boost = tq.Boost;

Modified: incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Similarity/Similar/MoreLikeThis.cs Fri Apr  6 23:37:48 2012
@@ -586,7 +586,7 @@ namespace Similarity.Net
 				
                 try
                 {
-                    query.Add(tq, BooleanClause.Occur.SHOULD);
+                    query.Add(tq, Occur.SHOULD);
                 }
                 catch (BooleanQuery.TooManyClauses ignore)
                 {

Modified: incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/Similarity/Similar/SimilarityQueries.cs Fri Apr  6 23:37:48 2012
@@ -19,6 +19,7 @@ using System;
 using System.Collections.Generic;
 using System.IO;
 using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Search;
 using Analyzer = Lucene.Net.Analysis.Analyzer;
 using TokenStream = Lucene.Net.Analysis.TokenStream;
 using Term = Lucene.Net.Index.Term;
@@ -108,7 +109,7 @@ namespace Similarity.Net
                 var tq = new TermQuery(new Term(field, word));
                 try
                 {
-                    tmp.Add(tq, BooleanClause.Occur.SHOULD);
+                    tmp.Add(tq, Occur.SHOULD);
                 }
                 catch (BooleanQuery.TooManyClauses too)
                 {

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LuceneDictionary.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LuceneDictionary.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LuceneDictionary.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/LuceneDictionary.cs Fri Apr  6 23:37:48 2012
@@ -15,6 +15,8 @@
  * limitations under the License.
  */
 
+using Lucene.Net.Documents;
+
 namespace SpellChecker.Net.Search.Spell
 {
     using System;

Modified: incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/SpellChecker/Spell/SpellChecker.cs Fri Apr  6 23:37:48 2012
@@ -317,14 +317,14 @@ namespace SpellChecker.Net.Search.Spell
         {
             Query tq = new TermQuery(new Term(k, v));
             tq.Boost = boost;
-            q.Add(new BooleanClause(tq, BooleanClause.Occur.SHOULD));
+            q.Add(new BooleanClause(tq, Occur.SHOULD));
         }
 
 
         /// <summary> Add a clause to a boolean query.</summary>
         private static void Add(BooleanQuery q, System.String k, System.String v)
         {
-            q.Add(new BooleanClause(new TermQuery(new Term(k, v)), BooleanClause.Occur.SHOULD));
+            q.Add(new BooleanClause(new TermQuery(new Term(k, v)), Occur.SHOULD));
         }
 
 

Modified: incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/WordNet/SynExpand/SynExpand.cs Fri Apr  6 23:37:48 2012
@@ -120,7 +120,7 @@ namespace WorldNet.Net
 				// [2a] add to level words in
 				var word = (String) it.Current;
 				var tq = new TermQuery(new Term(field, word));
-				tmp.Add(tq, BooleanClause.Occur.SHOULD);
+				tmp.Add(tq, Occur.SHOULD);
 
 			    var c = new CollectorImpl(field, boost);
                 syns.Search(new TermQuery(new Term(Syns2Index.F_WORD, word)), c);
@@ -172,9 +172,9 @@ namespace WorldNet.Net
 
                     var tq = new TermQuery(new Term(field, syn));
                     if (boost > 0) // else keep normal 1.0
-                        tq.SetBoost(boost);
+                        tq.Boost = boost;
 
-                    tmp.Add(tq, BooleanClause.Occur.SHOULD);
+                    tmp.Add(tq, Occur.SHOULD);
                 }
             }
 
@@ -183,9 +183,9 @@ namespace WorldNet.Net
                 this.reader = reader;
             }
 
-            public override bool AcceptsDocsOutOfOrder()
+            public override bool AcceptsDocsOutOfOrder
             {
-                return true;
+                get { return true; }
             }
 
         }

Modified: incubator/lucene.net/trunk/src/contrib/WordNet/SynLookup/SynLookup.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/WordNet/SynLookup/SynLookup.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/WordNet/SynLookup/SynLookup.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/WordNet/SynLookup/SynLookup.cs Fri Apr  6 23:37:48 2012
@@ -64,7 +64,7 @@ namespace WorldNet.Net
 					var hits = searcher.Search(query, countingCollector.numHits).ScoreDocs;
 
 					foreach (var v in
-						hits.Select(t => searcher.Doc(t.doc)).Select(doc => doc.GetValues(Syns2Index.F_SYN)).SelectMany(values => values))
+						hits.Select(t => searcher.Doc(t.Doc)).Select(doc => doc.GetValues(Syns2Index.F_SYN)).SelectMany(values => values))
 					{
 						Console.Out.WriteLine(v);
 					}
@@ -113,7 +113,7 @@ namespace WorldNet.Net
 				// [2a] add to level words in
 				var word = (String)it.Current;
 				var tq = new TermQuery(new Term(field, word));
-				tmp.Add(tq, BooleanClause.Occur.SHOULD);
+				tmp.Add(tq, Occur.SHOULD);
 
 				var c = new CollectorImpl(field, boost);
 				syns.Search(new TermQuery(new Term(Syns2Index.F_WORD, word)), c);
@@ -137,9 +137,9 @@ namespace WorldNet.Net
             public override void SetNextReader(IndexReader reader, int docBase)
             { }
 
-            public override bool AcceptsDocsOutOfOrder()
+            public override bool AcceptsDocsOutOfOrder
             {
-                return true;
+                get { return true; }
             }
         }
 
@@ -173,9 +173,9 @@ namespace WorldNet.Net
 
                     var tq = new TermQuery(new Term(field, syn));
                     if (boost > 0) // else keep normal 1.0
-                        tq.SetBoost(boost);
+                        tq.Boost = boost;
 
-                    tmp.Add(tq, BooleanClause.Occur.SHOULD);
+                    tmp.Add(tq, Occur.SHOULD);
                 }
             }
 
@@ -184,9 +184,9 @@ namespace WorldNet.Net
                 this.reader = reader;
             }
 
-            public override bool AcceptsDocsOutOfOrder()
+            public override bool AcceptsDocsOutOfOrder
             {
-                return true;
+                get { return true; }
             }
 
         }

Modified: incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs (original)
+++ incubator/lucene.net/trunk/src/contrib/WordNet/Syns2Index/Syns2Index.cs Fri Apr  6 23:37:48 2012
@@ -218,7 +218,7 @@ namespace WorldNet.Net
 			using (var dir = FSDirectory.Open(new DirectoryInfo(indexDir)))
 			{
 				var writer = new IndexWriter(dir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
-				writer.SetUseCompoundFile(true); // why?
+				writer.UseCompoundFile = true; // why?
 
 				var i1 = word2Nums.Keys.GetEnumerator();
 				while (i1.MoveNext())

Modified: incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs Fri Apr  6 23:37:48 2012
@@ -1503,7 +1503,7 @@ namespace Lucene.Net.Index
             
             protected internal virtual TermDocs TermDocs(IndexReader reader)
             {
-                return term == null?reader.TermDocs(null):reader.TermDocs();
+                return term == null ? reader.TermDocs(null):reader.TermDocs();
             }
             
             public virtual void  Close()

Modified: incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs Fri Apr  6 23:37:48 2012
@@ -1405,7 +1405,7 @@ namespace Lucene.Net.Index
 					num.SetNum(docIDUpto);
 				deletesInRAM.numTerms++;
 				
-				deletesInRAM.AddBytesUsed(BYTES_PER_DEL_TERM + term.text.Length * CHAR_NUM_BYTE);
+				deletesInRAM.AddBytesUsed(BYTES_PER_DEL_TERM + term.Text.Length * CHAR_NUM_BYTE);
 			}
 		}
 		

Modified: incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/ParallelReader.cs Fri Apr  6 23:37:48 2012
@@ -681,7 +681,7 @@ namespace Lucene.Net.Index
 			public ParallelTermDocs(ParallelReader enclosingInstance, Term term)
 			{
 				InitBlock(enclosingInstance);
-                if (term == null)
+                if(term == null)
                     termDocs = (Enclosing_Instance.readers.Count == 0)
                                    ? null
                                    : Enclosing_Instance.readers[0].TermDocs(null);

Modified: incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/PositionBasedTermVectorMapper.cs Fri Apr  6 23:37:48 2012
@@ -70,7 +70,7 @@ namespace Lucene.Net.Index
 					pos = new TVPositionInfo(positions[i], storeOffsets);
 					currentPositions[posVal] = pos;
 				}
-				pos.addTerm(term, offsets != null?offsets[i]:null);
+				pos.addTerm(term, offsets != null ? offsets[i] : TermVectorOffsetInfo.Null);
 			}
 		}
 		

Modified: incubator/lucene.net/trunk/src/core/Index/SegmentMerger.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/SegmentMerger.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/SegmentMerger.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/SegmentMerger.cs Fri Apr  6 23:37:48 2012
@@ -746,9 +746,9 @@ namespace Lucene.Net.Index
 					top = queue.Top();
 				}
 				
-				if ((System.Object) currentField != (System.Object) term.field)
+				if ((System.Object) currentField != (System.Object) term.Field)
 				{
-					currentField = term.field;
+                    currentField = term.Field;
 					if (termsConsumer != null)
 						termsConsumer.Finish();
 					FieldInfo fieldInfo = fieldInfos.FieldInfo(currentField);
@@ -800,7 +800,7 @@ namespace Lucene.Net.Index
 		private int AppendPostings(FormatPostingsTermsConsumer termsConsumer, SegmentMergeInfo[] smis, int n)
 		{
 			
-			FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(smis[0].term.text);
+			FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(smis[0].term.Text);
 			int df = 0;
 			for (int i = 0; i < n; i++)
 			{

Modified: incubator/lucene.net/trunk/src/core/Index/SegmentTermDocs.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/SegmentTermDocs.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/SegmentTermDocs.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/SegmentTermDocs.cs Fri Apr  6 23:37:48 2012
@@ -92,9 +92,9 @@ namespace Lucene.Net.Index
 		internal virtual void  Seek(TermInfo ti, Term term)
 		{
 			count = 0;
-			FieldInfo fi = parent.core.fieldInfos.FieldInfo(term.field);
-			currentFieldOmitTermFreqAndPositions = (fi != null)?fi.omitTermFreqAndPositions:false;
-			currentFieldStoresPayloads = (fi != null)?fi.storePayloads:false;
+			FieldInfo fi = parent.core.fieldInfos.FieldInfo(term.Field);
+			currentFieldOmitTermFreqAndPositions = (fi != null) && fi.omitTermFreqAndPositions;
+			currentFieldStoresPayloads = (fi != null) && fi.storePayloads;
 			if (ti == null)
 			{
 				df = 0;

Modified: incubator/lucene.net/trunk/src/core/Index/TermBuffer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/TermBuffer.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/TermBuffer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/TermBuffer.cs Fri Apr  6 23:37:48 2012
@@ -69,7 +69,7 @@ namespace Lucene.Net.Index
 		
 		public void  Read(IndexInput input, FieldInfos fieldInfos)
 		{
-			this.term = null; // invalidate cache
+            this.term = null; // invalidate cache
 			int start = input.ReadVInt();
 			int length = input.ReadVInt();
 			int totalLength = start + length;
@@ -129,7 +129,7 @@ namespace Lucene.Net.Index
 		{
 			field = null;
 			text.SetLength(0);
-			term = null;
+            term = null;
 			dirty = true;
 		}
 		
@@ -137,7 +137,7 @@ namespace Lucene.Net.Index
 		{
 			if (field == null)
 			// unset
-				return null;
+                return null;
 			
 			if (term == null)
 				term = new Term(field, new System.String(text.result, 0, text.length), false);

Modified: incubator/lucene.net/trunk/src/core/Index/TermInfosWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/TermInfosWriter.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/TermInfosWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/TermInfosWriter.cs Fri Apr  6 23:37:48 2012
@@ -112,8 +112,8 @@ namespace Lucene.Net.Index
 		
 		internal void  Add(Term term, TermInfo ti)
 		{
-			UnicodeUtil.UTF16toUTF8(term.text, 0, term.text.Length, utf8Result);
-			Add(fieldInfos.FieldNumber(term.field), utf8Result.result, utf8Result.length, ti);
+			UnicodeUtil.UTF16toUTF8(term.Text, 0, term.Text.Length, utf8Result);
+			Add(fieldInfos.FieldNumber(term.Field), utf8Result.result, utf8Result.length, ti);
 		}
 		
 		// Currently used only by assert statements

Modified: incubator/lucene.net/trunk/src/core/Index/TermVectorEntry.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/TermVectorEntry.cs?rev=1310635&r1=1310634&r2=1310635&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/TermVectorEntry.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/TermVectorEntry.cs Fri Apr  6 23:37:48 2012
@@ -28,11 +28,11 @@ namespace Lucene.Net.Index
 		private int frequency;
 		private TermVectorOffsetInfo[] offsets;
 		private int[] positions;
-		
-		
-		public TermVectorEntry()
-		{
-		}
+
+
+        public TermVectorEntry()
+        {
+        }
 		
 		public TermVectorEntry(System.String field, System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
 		{



Mime
View raw message