lucenenet-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nightowl...@apache.org
Subject [31/72] [abbrv] [partial] lucenenet git commit: Lucene.Net.Tests: Removed \core directory and put its contents in root directory
Date Sun, 26 Feb 2017 23:37:19 GMT
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterCommit.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterCommit.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterCommit.cs
new file mode 100644
index 0000000..4cd35f2
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterCommit.cs
@@ -0,0 +1,772 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    using Lucene.Net.Analysis;
+    using Lucene.Net.Randomized.Generators;
+    using Lucene.Net.Support;
+    using NUnit.Framework;
+    using System.IO;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using Field = Field;
+    using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+    using MockDirectoryWrapper = Lucene.Net.Store.MockDirectoryWrapper;
+    using ScoreDoc = Lucene.Net.Search.ScoreDoc;
+    using TermQuery = Lucene.Net.Search.TermQuery;
+    using TestUtil = Lucene.Net.Util.TestUtil;
+
+    [TestFixture]
+    public class TestIndexWriterCommit : LuceneTestCase
+    {
+        private static readonly FieldType StoredTextType = new FieldType(TextField.TYPE_NOT_STORED);
+
+        /*
+         * Simple test for "commit on close": open writer then
+         * add a bunch of docs, making sure reader does not see
+         * these docs until writer is closed.
+         */
+
+        [Test]
+        public virtual void TestCommitOnClose()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+            for (int i = 0; i < 14; i++)
+            {
+                AddDoc(writer);
+            }
+            writer.Dispose();
+
+            Term searchTerm = new Term("content", "aaa");
+            DirectoryReader reader = DirectoryReader.Open(dir);
+            IndexSearcher searcher = NewSearcher(reader);
+            ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+            Assert.AreEqual(14, hits.Length, "first number of hits");
+            reader.Dispose();
+
+            reader = DirectoryReader.Open(dir);
+
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+            for (int i = 0; i < 3; i++)
+            {
+                for (int j = 0; j < 11; j++)
+                {
+                    AddDoc(writer);
+                }
+                IndexReader r = DirectoryReader.Open(dir);
+                searcher = NewSearcher(r);
+                hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+                Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer");
+                r.Dispose();
+                Assert.IsTrue(reader.IsCurrent, "reader should have still been current");
+            }
+
+            // Now, close the writer:
+            writer.Dispose();
+            Assert.IsFalse(reader.IsCurrent, "reader should not be current now");
+
+            IndexReader ir = DirectoryReader.Open(dir);
+            searcher = NewSearcher(ir);
+            hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+            Assert.AreEqual(47, hits.Length, "reader did not see changes after writer was closed");
+            ir.Dispose();
+            reader.Dispose();
+            dir.Dispose();
+        }
+
+        /*
+         * Simple test for "commit on close": open writer, then
+         * add a bunch of docs, making sure reader does not see
+         * them until writer has closed.  Then instead of
+         * closing the writer, call abort and verify reader sees
+         * nothing was added.  Then verify we can open the index
+         * and add docs to it.
+         */
+
+        [Test]
+        public virtual void TestCommitOnCloseAbort()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10));
+            for (int i = 0; i < 14; i++)
+            {
+                AddDoc(writer);
+            }
+            writer.Dispose();
+
+            Term searchTerm = new Term("content", "aaa");
+            IndexReader reader = DirectoryReader.Open(dir);
+            IndexSearcher searcher = NewSearcher(reader);
+            ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+            Assert.AreEqual(14, hits.Length, "first number of hits");
+            reader.Dispose();
+
+            writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10));
+            for (int j = 0; j < 17; j++)
+            {
+                AddDoc(writer);
+            }
+            // Delete all docs:
+            writer.DeleteDocuments(searchTerm);
+
+            reader = DirectoryReader.Open(dir);
+            searcher = NewSearcher(reader);
+            hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+            Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer");
+            reader.Dispose();
+
+            // Now, close the writer:
+            writer.Rollback();
+
+            TestIndexWriter.AssertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
+
+            reader = DirectoryReader.Open(dir);
+            searcher = NewSearcher(reader);
+            hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+            Assert.AreEqual(14, hits.Length, "saw changes after writer.abort");
+            reader.Dispose();
+
+            // Now make sure we can re-open the index, add docs,
+            // and all is good:
+            writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10));
+
+            // On abort, writer in fact may write to the same
+            // segments_N file:
+            if (dir is MockDirectoryWrapper)
+            {
+                ((MockDirectoryWrapper)dir).PreventDoubleWrite = false;
+            }
+
+            for (int i = 0; i < 12; i++)
+            {
+                for (int j = 0; j < 17; j++)
+                {
+                    AddDoc(writer);
+                }
+                IndexReader r = DirectoryReader.Open(dir);
+                searcher = NewSearcher(r);
+                hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+                Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer");
+                r.Dispose();
+            }
+
+            writer.Dispose();
+            IndexReader ir = DirectoryReader.Open(dir);
+            searcher = NewSearcher(ir);
+            hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+            Assert.AreEqual(218, hits.Length, "didn't see changes after close");
+            ir.Dispose();
+
+            dir.Dispose();
+        }
+
+        /*
+         * Verify that a writer with "commit on close" indeed
+         * cleans up the temp segments created after opening
+         * that are not referenced by the starting segments
+         * file.  We check this by using MockDirectoryWrapper to
+         * measure max temp disk space used.
+         */
+
+        [Test]
+        public virtual void TestCommitOnCloseDiskUsage()
+        {
+            // MemoryCodec, since it uses FST, is not necessarily
+            // "additive", ie if you add up N small FSTs, then merge
+            // them, the merged result can easily be larger than the
+            // sum because the merged FST may use array encoding for
+            // some arcs (which uses more space):
+
+            string idFormat = TestUtil.GetPostingsFormat("id");
+            string contentFormat = TestUtil.GetPostingsFormat("content");
+            AssumeFalse("this test cannot run with Memory codec", idFormat.Equals("Memory") || contentFormat.Equals("Memory"));
+            MockDirectoryWrapper dir = NewMockDirectory();
+            Analyzer analyzer;
+            if (Random().NextBoolean())
+            {
+                // no payloads
+                analyzer = new AnalyzerAnonymousInnerClassHelper(this);
+            }
+            else
+            {
+                // fixed length payloads
+                int length = Random().Next(200);
+                analyzer = new AnalyzerAnonymousInnerClassHelper2(this, length);
+            }
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetMaxBufferedDocs(10).SetReaderPooling(false).SetMergePolicy(NewLogMergePolicy(10)));
+            for (int j = 0; j < 30; j++)
+            {
+                AddDocWithIndex(writer, j);
+            }
+            writer.Dispose();
+            dir.ResetMaxUsedSizeInBytes();
+
+            dir.TrackDiskUsage = true;
+            long startDiskUsage = dir.MaxUsedSizeInBytes;
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10).SetMergeScheduler(new SerialMergeScheduler()).SetReaderPooling(false).SetMergePolicy(NewLogMergePolicy(10)));
+            for (int j = 0; j < 1470; j++)
+            {
+                AddDocWithIndex(writer, j);
+            }
+            long midDiskUsage = dir.MaxUsedSizeInBytes;
+            dir.ResetMaxUsedSizeInBytes();
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            DirectoryReader.Open(dir).Dispose();
+
+            long endDiskUsage = dir.MaxUsedSizeInBytes;
+
+            // Ending index is 50X as large as starting index; due
+            // to 3X disk usage normally we allow 150X max
+            // transient usage.  If something is wrong w/ deleter
+            // and it doesn't delete intermediate segments then it
+            // will exceed this 150X:
+            // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
+            Assert.IsTrue(midDiskUsage < 150 * startDiskUsage, "writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage * 150));
+            Assert.IsTrue(endDiskUsage < 150 * startDiskUsage, "writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage * 150));
+            dir.Dispose();
+        }
+
+        private class AnalyzerAnonymousInnerClassHelper : Analyzer
+        {
+            private readonly TestIndexWriterCommit OuterInstance;
+
+            public AnalyzerAnonymousInnerClassHelper(TestIndexWriterCommit outerInstance)
+            {
+                this.OuterInstance = outerInstance;
+            }
+
+            protected internal override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
+            {
+                return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, true));
+            }
+        }
+
+        private class AnalyzerAnonymousInnerClassHelper2 : Analyzer
+        {
+            private readonly TestIndexWriterCommit OuterInstance;
+
+            private int Length;
+
+            public AnalyzerAnonymousInnerClassHelper2(TestIndexWriterCommit outerInstance, int length)
+            {
+                this.OuterInstance = outerInstance;
+                this.Length = length;
+            }
+
+            protected internal override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
+            {
+                Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
+                return new TokenStreamComponents(tokenizer, new MockFixedLengthPayloadFilter(Random(), tokenizer, Length));
+            }
+        }
+
+        /*
+         * Verify that calling forceMerge when writer is open for
+         * "commit on close" works correctly both for rollback()
+         * and close().
+         */
+
+        [Test]
+        public virtual void TestCommitOnCloseForceMerge()
+        {
+            Directory dir = NewDirectory();
+            // Must disable throwing exc on double-write: this
+            // test uses IW.rollback which easily results in
+            // writing to same file more than once
+            if (dir is MockDirectoryWrapper)
+            {
+                ((MockDirectoryWrapper)dir).PreventDoubleWrite = false;
+            }
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy(10)));
+            for (int j = 0; j < 17; j++)
+            {
+                AddDocWithIndex(writer, j);
+            }
+            writer.Dispose();
+
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+            writer.ForceMerge(1);
+
+            // Open a reader before closing (commiting) the writer:
+            DirectoryReader reader = DirectoryReader.Open(dir);
+
+            // Reader should see index as multi-seg at this
+            // point:
+            Assert.IsTrue(reader.Leaves.Count > 1, "Reader incorrectly sees one segment");
+            reader.Dispose();
+
+            // Abort the writer:
+            writer.Rollback();
+            TestIndexWriter.AssertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
+
+            // Open a reader after aborting writer:
+            reader = DirectoryReader.Open(dir);
+
+            // Reader should still see index as multi-segment
+            Assert.IsTrue(reader.Leaves.Count > 1, "Reader incorrectly sees one segment");
+            reader.Dispose();
+
+            if (VERBOSE)
+            {
+                Console.WriteLine("TEST: do real full merge");
+            }
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            if (VERBOSE)
+            {
+                Console.WriteLine("TEST: writer closed");
+            }
+            TestIndexWriter.AssertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
+
+            // Open a reader after aborting writer:
+            reader = DirectoryReader.Open(dir);
+
+            // Reader should see index as one segment
+            Assert.AreEqual(1, reader.Leaves.Count, "Reader incorrectly sees more than one segment");
+            reader.Dispose();
+            dir.Dispose();
+        }
+
+        // LUCENE-2095: make sure with multiple threads commit
+        // doesn't return until all changes are in fact in the
+        // index
+        [Test]
+        public virtual void TestCommitThreadSafety()
+        {
+            const int NUM_THREADS = 5;
+            const double RUN_SEC = 0.5;
+            var dir = NewDirectory();
+            var w = new RandomIndexWriter(Random(), dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy()));
+            TestUtil.ReduceOpenFiles(w.w);
+            w.Commit();
+            var failed = new AtomicBoolean();
+            var threads = new ThreadClass[NUM_THREADS];
+            long endTime = Environment.TickCount + ((long)(RUN_SEC * 1000));
+            for (int i = 0; i < NUM_THREADS; i++)
+            {
+                int finalI = i;
+                threads[i] = new ThreadAnonymousInnerClassHelper(dir, w, failed, endTime, finalI, NewStringField);
+                threads[i].Start();
+            }
+            for (int i = 0; i < NUM_THREADS; i++)
+            {
+                threads[i].Join();
+            }
+            Assert.IsFalse(failed.Get());
+            w.Dispose();
+            dir.Dispose();
+        }
+
+        private class ThreadAnonymousInnerClassHelper : ThreadClass
+        {
+            private readonly Func<string, string, Field.Store, Field> NewStringField;
+            private Directory Dir;
+            private RandomIndexWriter w;
+            private AtomicBoolean Failed;
+            private long EndTime;
+            private int FinalI;
+
+            /// <param name="newStringField">
+            /// LUCENENET specific
+            /// This is passed in because <see cref="LuceneTestCase.NewStringField(string, string, Field.Store)"/>
+            /// is no longer static.
+            /// </param>
+            public ThreadAnonymousInnerClassHelper(Directory dir, RandomIndexWriter w, AtomicBoolean failed, long endTime, int finalI, Func<string, string, Field.Store, Field> newStringField)
+            {
+                NewStringField = newStringField;
+                this.Dir = dir;
+                this.w = w;
+                this.Failed = failed;
+                this.EndTime = endTime;
+                this.FinalI = finalI;
+            }
+
+            public override void Run()
+            {
+                try
+                {
+                    Document doc = new Document();
+                    DirectoryReader r = DirectoryReader.Open(Dir);
+                    Field f = NewStringField("f", "", Field.Store.NO);
+                    doc.Add(f);
+                    int count = 0;
+                    do
+                    {
+                        if (Failed.Get())
+                        {
+                            break;
+                        }
+                        for (int j = 0; j < 10; j++)
+                        {
+                            string s = FinalI + "_" + Convert.ToString(count++);
+                            f.SetStringValue(s);
+                            w.AddDocument(doc);
+                            w.Commit();
+                            DirectoryReader r2 = DirectoryReader.OpenIfChanged(r);
+                            Assert.IsNotNull(r2);
+                            Assert.IsTrue(!r2.Equals(r));
+                            r.Dispose();
+                            r = r2;
+                            Assert.AreEqual(1, r.DocFreq(new Term("f", s)), "term=f:" + s + "; r=" + r);
+                        }
+                    } while (Environment.TickCount < EndTime);
+                    r.Dispose();
+                }
+                catch (Exception t)
+                {
+                    Failed.Set(true);
+                    throw new Exception(t.Message, t);
+                }
+            }
+        }
+
+        // LUCENE-1044: test writer.Commit() when ac=false
+        [Test]
+        public virtual void TestForceCommit()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5)));
+            writer.Commit();
+
+            for (int i = 0; i < 23; i++)
+            {
+                AddDoc(writer);
+            }
+
+            DirectoryReader reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader.NumDocs);
+            writer.Commit();
+            DirectoryReader reader2 = DirectoryReader.OpenIfChanged(reader);
+            Assert.IsNotNull(reader2);
+            Assert.AreEqual(0, reader.NumDocs);
+            Assert.AreEqual(23, reader2.NumDocs);
+            reader.Dispose();
+
+            for (int i = 0; i < 17; i++)
+            {
+                AddDoc(writer);
+            }
+            Assert.AreEqual(23, reader2.NumDocs);
+            reader2.Dispose();
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(23, reader.NumDocs);
+            reader.Dispose();
+            writer.Commit();
+
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(40, reader.NumDocs);
+            reader.Dispose();
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestFutureCommit()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
+            Document doc = new Document();
+            w.AddDocument(doc);
+
+            // commit to "first"
+            IDictionary<string, string> commitData = new Dictionary<string, string>();
+            commitData["tag"] = "first";
+            w.CommitData = commitData;
+            w.Commit();
+
+            // commit to "second"
+            w.AddDocument(doc);
+            commitData["tag"] = "second";
+            w.CommitData = commitData;
+            w.Dispose();
+
+            // open "first" with IndexWriter
+            IndexCommit commit = null;
+            foreach (IndexCommit c in DirectoryReader.ListCommits(dir))
+            {
+                if (c.UserData["tag"].Equals("first"))
+                {
+                    commit = c;
+                    break;
+                }
+            }
+
+            Assert.IsNotNull(commit);
+
+            w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).SetIndexCommit(commit));
+
+            Assert.AreEqual(1, w.NumDocs);
+
+            // commit IndexWriter to "third"
+            w.AddDocument(doc);
+            commitData["tag"] = "third";
+            w.CommitData = commitData;
+            w.Dispose();
+
+            // make sure "second" commit is still there
+            commit = null;
+            foreach (IndexCommit c in DirectoryReader.ListCommits(dir))
+            {
+                if (c.UserData["tag"].Equals("second"))
+                {
+                    commit = c;
+                    break;
+                }
+            }
+
+            Assert.IsNotNull(commit);
+
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestZeroCommits()
+        {
+            // Tests that if we don't call commit(), the directory has 0 commits. this has
+            // changed since LUCENE-2386, where before IW would always commit on a fresh
+            // new index.
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+            try
+            {
+                DirectoryReader.ListCommits(dir);
+                Assert.Fail("listCommits should have thrown an exception over empty index");
+            }
+#pragma warning disable 168
+            catch (IndexNotFoundException e)
+#pragma warning restore 168
+            {
+                // that's expected !
+            }
+            // No changes still should generate a commit, because it's a new index.
+            writer.Dispose();
+            Assert.AreEqual(1, DirectoryReader.ListCommits(dir).Count, "expected 1 commits!");
+            dir.Dispose();
+        }
+
+        // LUCENE-1274: test writer.PrepareCommit()
+        [Test]
+        public virtual void TestPrepareCommit()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5)));
+            writer.Commit();
+
+            for (int i = 0; i < 23; i++)
+            {
+                AddDoc(writer);
+            }
+
+            DirectoryReader reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader.NumDocs);
+
+            writer.PrepareCommit();
+
+            IndexReader reader2 = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader2.NumDocs);
+
+            writer.Commit();
+
+            IndexReader reader3 = DirectoryReader.OpenIfChanged(reader);
+            Assert.IsNotNull(reader3);
+            Assert.AreEqual(0, reader.NumDocs);
+            Assert.AreEqual(0, reader2.NumDocs);
+            Assert.AreEqual(23, reader3.NumDocs);
+            reader.Dispose();
+            reader2.Dispose();
+
+            for (int i = 0; i < 17; i++)
+            {
+                AddDoc(writer);
+            }
+
+            Assert.AreEqual(23, reader3.NumDocs);
+            reader3.Dispose();
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(23, reader.NumDocs);
+            reader.Dispose();
+
+            writer.PrepareCommit();
+
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(23, reader.NumDocs);
+            reader.Dispose();
+
+            writer.Commit();
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(40, reader.NumDocs);
+            reader.Dispose();
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        // LUCENE-1274: test writer.PrepareCommit()
+        [Test]
+        public virtual void TestPrepareCommitRollback()
+        {
+            Directory dir = NewDirectory();
+            if (dir is MockDirectoryWrapper)
+            {
+                ((MockDirectoryWrapper)dir).PreventDoubleWrite = false;
+            }
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5)));
+            writer.Commit();
+
+            for (int i = 0; i < 23; i++)
+            {
+                AddDoc(writer);
+            }
+
+            DirectoryReader reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader.NumDocs);
+
+            writer.PrepareCommit();
+
+            IndexReader reader2 = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader2.NumDocs);
+
+            writer.Rollback();
+
+            IndexReader reader3 = DirectoryReader.OpenIfChanged(reader);
+            Assert.IsNull(reader3);
+            Assert.AreEqual(0, reader.NumDocs);
+            Assert.AreEqual(0, reader2.NumDocs);
+            reader.Dispose();
+            reader2.Dispose();
+
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+            for (int i = 0; i < 17; i++)
+            {
+                AddDoc(writer);
+            }
+
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader.NumDocs);
+            reader.Dispose();
+
+            writer.PrepareCommit();
+
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader.NumDocs);
+            reader.Dispose();
+
+            writer.Commit();
+            reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(17, reader.NumDocs);
+            reader.Dispose();
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        // LUCENE-1274
+        [Test]
+        public virtual void TestPrepareCommitNoChanges()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+            writer.PrepareCommit();
+            writer.Commit();
+            writer.Dispose();
+
+            IndexReader reader = DirectoryReader.Open(dir);
+            Assert.AreEqual(0, reader.NumDocs);
+            reader.Dispose();
+            dir.Dispose();
+        }
+
+        // LUCENE-1382
+        [Test]
+        public virtual void TestCommitUserData()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter w = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2));
+            for (int j = 0; j < 17; j++)
+            {
+                AddDoc(w);
+            }
+            w.Dispose();
+
+            DirectoryReader r = DirectoryReader.Open(dir);
+            // commit(Map) never called for this index
+            Assert.AreEqual(0, r.IndexCommit.UserData.Count);
+            r.Dispose();
+
+            w = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2));
+            for (int j = 0; j < 17; j++)
+            {
+                AddDoc(w);
+            }
+            IDictionary<string, string> data = new Dictionary<string, string>();
+            data["label"] = "test1";
+            w.CommitData = data;
+            w.Dispose();
+
+            r = DirectoryReader.Open(dir);
+            Assert.AreEqual("test1", r.IndexCommit.UserData["label"]);
+            r.Dispose();
+
+            w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+            w.ForceMerge(1);
+            w.Dispose();
+
+            dir.Dispose();
+        }
+
+        /// <summary>
+        /// LUCENENET specific
+        /// Copied from <see cref="TestIndexWriter.AddDoc(IndexWriter)"/>
+        /// to remove inter-class dependency on <see cref="TestIndexWriter"/>
+        /// </summary>
+        private void AddDoc(IndexWriter writer)
+        {
+            Document doc = new Document();
+            doc.Add(NewTextField("content", "aaa", Field.Store.NO));
+            writer.AddDocument(doc);
+        }
+
+        /// <summary>
+        /// LUCENENET specific
+        /// Copied from <seealso cref="TestIndexWriter.AddDocWithIndex(IndexWriter, int)"/>
+        /// to remove inter-class dependency on <see cref="TestIndexWriter"/>.
+        /// </summary>
+        private void AddDocWithIndex(IndexWriter writer, int index)
+        {
+            Document doc = new Document();
+            doc.Add(NewField("content", "aaa " + index, StoredTextType));
+            doc.Add(NewField("id", "" + index, StoredTextType));
+            writer.AddDocument(doc);
+        }
+
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterConfig.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterConfig.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterConfig.cs
new file mode 100644
index 0000000..3dcb3ff
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterConfig.cs
@@ -0,0 +1,539 @@
+using Lucene.Net.Documents;
+using Lucene.Net.Util;
+using System.Collections.Generic;
+using System.Reflection;
+
+namespace Lucene.Net.Index
+{
+    //using AlreadySetException = Lucene.Net.Util.SetOnce.AlreadySetException;
+    using NUnit.Framework;
+    using Codec = Lucene.Net.Codecs.Codec;
+    using DefaultSimilarity = Lucene.Net.Search.Similarities.DefaultSimilarity;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using IndexingChain = Lucene.Net.Index.DocumentsWriterPerThread.IndexingChain;
+    using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+    using InfoStream = Lucene.Net.Util.InfoStream;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+    using Store = Field.Store;
+
+    [TestFixture]
+    public class TestIndexWriterConfig : LuceneTestCase
+    {
+        private sealed class MySimilarity : DefaultSimilarity
+        {
+            // Does not implement anything - used only for type checking on IndexWriterConfig.
+        }
+
+        private sealed class MyIndexingChain : IndexingChain
+        {
+            // Does not implement anything - used only for type checking on IndexWriterConfig.
+            internal override DocConsumer GetChain(DocumentsWriterPerThread documentsWriter)
+            {
+                return null;
+            }
+        }
+
+        [Test]
+        public virtual void TestDefaults()
+        {
+            IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            Assert.AreEqual(typeof(MockAnalyzer), conf.Analyzer.GetType());
+            Assert.IsNull(conf.IndexCommit);
+            Assert.AreEqual(typeof(KeepOnlyLastCommitDeletionPolicy), conf.IndexDeletionPolicy.GetType());
+#if FEATURE_TASKMERGESCHEDULER
+            Assert.AreEqual(typeof(TaskMergeScheduler), conf.MergeScheduler.GetType());
+#else
+            Assert.AreEqual(typeof(ConcurrentMergeScheduler), conf.MergeScheduler.GetType());
+#endif
+            Assert.AreEqual(OpenMode.CREATE_OR_APPEND, conf.OpenMode);
+            // we don't need to assert this, it should be unspecified
+            Assert.IsTrue(IndexSearcher.DefaultSimilarity == conf.Similarity);
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, conf.TermIndexInterval);
+            Assert.AreEqual(IndexWriterConfig.DefaultWriteLockTimeout, conf.WriteLockTimeout);
+            Assert.AreEqual(IndexWriterConfig.WRITE_LOCK_TIMEOUT, IndexWriterConfig.DefaultWriteLockTimeout);
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS, conf.MaxBufferedDeleteTerms);
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB, conf.RAMBufferSizeMB, 0.0);
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS, conf.MaxBufferedDocs);
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_READER_POOLING, conf.ReaderPooling);
+            Assert.IsTrue(DocumentsWriterPerThread.DefaultIndexingChain == conf.IndexingChain);
+            Assert.IsNull(conf.MergedSegmentWarmer);
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR, conf.ReaderTermsIndexDivisor);
+            Assert.AreEqual(typeof(TieredMergePolicy), conf.MergePolicy.GetType());
+            Assert.AreEqual(typeof(ThreadAffinityDocumentsWriterThreadPool), conf.IndexerThreadPool.GetType());
+            Assert.AreEqual(typeof(FlushByRamOrCountsPolicy), conf.FlushPolicy.GetType());
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB, conf.RAMPerThreadHardLimitMB);
+            Assert.AreEqual(Codec.Default, conf.Codec);
+            Assert.AreEqual(InfoStream.Default, conf.InfoStream);
+            Assert.AreEqual(IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM, conf.UseCompoundFile);
+            // Sanity check - validate that all getters are covered.
+            HashSet<string> getters = new HashSet<string>();
+            getters.Add("getAnalyzer");
+            getters.Add("getIndexCommit");
+            getters.Add("getIndexDeletionPolicy");
+            getters.Add("getMaxFieldLength");
+            getters.Add("getMergeScheduler");
+            getters.Add("getOpenMode");
+            getters.Add("getSimilarity");
+            getters.Add("getTermIndexInterval");
+            getters.Add("getWriteLockTimeout");
+            getters.Add("getDefaultWriteLockTimeout");
+            getters.Add("getMaxBufferedDeleteTerms");
+            getters.Add("getRAMBufferSizeMB");
+            getters.Add("getMaxBufferedDocs");
+            getters.Add("getIndexingChain");
+            getters.Add("getMergedSegmentWarmer");
+            getters.Add("getMergePolicy");
+            getters.Add("getMaxThreadStates");
+            getters.Add("getReaderPooling");
+            getters.Add("getIndexerThreadPool");
+            getters.Add("getReaderTermsIndexDivisor");
+            getters.Add("getFlushPolicy");
+            getters.Add("getRAMPerThreadHardLimitMB");
+            getters.Add("getCodec");
+            getters.Add("getInfoStream");
+            getters.Add("getUseCompoundFile");
+
+            foreach (MethodInfo m in typeof(IndexWriterConfig).GetMethods())
+            {
+                if (m.DeclaringType == typeof(IndexWriterConfig) && m.Name.StartsWith("get") && !m.Name.StartsWith("get_"))
+                {
+                    Assert.IsTrue(getters.Contains(m.Name), "method " + m.Name + " is not tested for defaults");
+                }
+            }
+        }
+
+        [Test]
+        public virtual void TestSettersChaining()
+        {
+            // Ensures that every setter returns IndexWriterConfig to allow chaining.
+            HashSet<string> liveSetters = new HashSet<string>();
+            HashSet<string> allSetters = new HashSet<string>();
+            foreach (MethodInfo m in typeof(IndexWriterConfig).GetMethods())
+            {
+                if (m.Name.StartsWith("Set") && !m.IsStatic)
+                {
+                    allSetters.Add(m.Name);
+                    // setters overridden from LiveIndexWriterConfig are returned twice, once with
+                    // IndexWriterConfig return type and second with LiveIndexWriterConfig. The ones
+                    // from LiveIndexWriterConfig are marked 'synthetic', so just collect them and
+                    // assert in the end that we also received them from IWC.
+                    // In C# we do not have them marked synthetic so we look at the declaring type instead.
+                    if (m.DeclaringType.Name == "LiveIndexWriterConfig")
+                    {
+                        liveSetters.Add(m.Name);
+                    }
+                    else
+                    {
+                        Assert.AreEqual(typeof(IndexWriterConfig), m.ReturnType, "method " + m.Name + " does not return IndexWriterConfig");
+                    }
+                }
+            }
+            foreach (string setter in liveSetters)
+            {
+                Assert.IsTrue(allSetters.Contains(setter), "setter method not overridden by IndexWriterConfig: " + setter);
+            }
+        }
+
+        [Test]
+        public virtual void TestReuse()
+        {
+            Directory dir = NewDirectory();
+            // test that IWC cannot be reused across two IWs
+            IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, null);
+            (new RandomIndexWriter(Random(), dir, conf)).Dispose();
+
+            // this should fail
+            try
+            {
+                Assert.IsNotNull(new RandomIndexWriter(Random(), dir, conf));
+                Assert.Fail("should have hit AlreadySetException");
+            }
+#pragma warning disable 168
+            catch (SetOnce<IndexWriter>.AlreadySetException e)
+#pragma warning restore 168
+            {
+                // expected
+            }
+
+            // also cloning it won't help, after it has been used already
+            try
+            {
+                Assert.IsNotNull(new RandomIndexWriter(Random(), dir, (IndexWriterConfig)conf.Clone()));
+                Assert.Fail("should have hit AlreadySetException");
+            }
+#pragma warning disable 168
+            catch (SetOnce<IndexWriter>.AlreadySetException e)
+#pragma warning restore 168
+            {
+                // expected
+            }
+
+            // if it's cloned in advance, it should be ok
+            conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, null);
+            (new RandomIndexWriter(Random(), dir, (IndexWriterConfig)conf.Clone())).Dispose();
+            (new RandomIndexWriter(Random(), dir, (IndexWriterConfig)conf.Clone())).Dispose();
+
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestOverrideGetters()
+        {
+            // Test that IndexWriterConfig overrides all getters, so that javadocs
+            // contain all methods for the users. Also, ensures that IndexWriterConfig
+            // doesn't declare getters that are not declared on LiveIWC.
+            HashSet<string> liveGetters = new HashSet<string>();
+            foreach (MethodInfo m in typeof(LiveIndexWriterConfig).GetMethods())
+            {
+                if (m.Name.StartsWith("get") && !m.IsStatic)
+                {
+                    liveGetters.Add(m.Name);
+                }
+            }
+
+            foreach (MethodInfo m in typeof(IndexWriterConfig).GetMethods())
+            {
+                if (m.Name.StartsWith("get") && !m.Name.StartsWith("get_") && !m.IsStatic)
+                {
+                    Assert.AreEqual(typeof(IndexWriterConfig), m.DeclaringType, "method " + m.Name + " not overrided by IndexWriterConfig");
+                    Assert.IsTrue(liveGetters.Contains(m.Name), "method " + m.Name + " not declared on LiveIndexWriterConfig");
+                }
+            }
+        }
+
+        [Test]
+        public virtual void TestConstants()
+        {
+            // Tests that the values of the constants does not change
+            Assert.AreEqual(1000, IndexWriterConfig.WRITE_LOCK_TIMEOUT);
+            Assert.AreEqual(32, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL);
+            Assert.AreEqual(-1, IndexWriterConfig.DISABLE_AUTO_FLUSH);
+            Assert.AreEqual(IndexWriterConfig.DISABLE_AUTO_FLUSH, IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS);
+            Assert.AreEqual(IndexWriterConfig.DISABLE_AUTO_FLUSH, IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS);
+            Assert.AreEqual(16.0, IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB, 0.0);
+            Assert.AreEqual(false, IndexWriterConfig.DEFAULT_READER_POOLING);
+            Assert.AreEqual(true, IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM);
+            Assert.AreEqual(DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR);
+        }
+
+        [Test]
+        public virtual void TestToString()
+        {
+            string str = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).ToString();
+            foreach (System.Reflection.FieldInfo f in (typeof(IndexWriterConfig).GetFields(
+                BindingFlags.Instance |
+                BindingFlags.NonPublic |
+                BindingFlags.Public |
+                BindingFlags.DeclaredOnly |
+                BindingFlags.Static)))
+            {
+                if (f.IsStatic)
+                {
+                    // Skip static final fields, they are only constants
+                    continue;
+                }
+                else if ("indexingChain".Equals(f.Name))
+                {
+                    // indexingChain is a package-private setting and thus is not output by
+                    // toString.
+                    continue;
+                }
+                if (f.Name.Equals("inUseByIndexWriter"))
+                {
+                    continue;
+                }
+                Assert.IsTrue(str.IndexOf(f.Name) != -1, f.Name + " not found in toString");
+            }
+        }
+
+        [Test]
+        public virtual void TestClone()
+        {
+            IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            IndexWriterConfig clone = (IndexWriterConfig)conf.Clone();
+
+            // Make sure parameters that can't be reused are cloned
+            IndexDeletionPolicy delPolicy = conf.IndexDeletionPolicy;
+            IndexDeletionPolicy delPolicyClone = clone.IndexDeletionPolicy;
+            Assert.IsTrue(delPolicy.GetType() == delPolicyClone.GetType() && (delPolicy != delPolicyClone || delPolicy.Clone() == delPolicyClone.Clone()));
+
+            FlushPolicy flushPolicy = conf.FlushPolicy;
+            FlushPolicy flushPolicyClone = clone.FlushPolicy;
+            Assert.IsTrue(flushPolicy.GetType() == flushPolicyClone.GetType() && (flushPolicy != flushPolicyClone || flushPolicy.Clone() == flushPolicyClone.Clone()));
+
+            DocumentsWriterPerThreadPool pool = conf.IndexerThreadPool;
+            DocumentsWriterPerThreadPool poolClone = clone.IndexerThreadPool;
+            Assert.IsTrue(pool.GetType() == poolClone.GetType() && (pool != poolClone || pool.Clone() == poolClone.Clone()));
+
+            MergePolicy mergePolicy = conf.MergePolicy;
+            MergePolicy mergePolicyClone = clone.MergePolicy;
+            Assert.IsTrue(mergePolicy.GetType() == mergePolicyClone.GetType() && (mergePolicy != mergePolicyClone || mergePolicy.Clone() == mergePolicyClone.Clone()));
+
+            IMergeScheduler mergeSched = conf.MergeScheduler;
+            IMergeScheduler mergeSchedClone = clone.MergeScheduler;
+            Assert.IsTrue(mergeSched.GetType() == mergeSchedClone.GetType() && (mergeSched != mergeSchedClone || mergeSched.Clone() == mergeSchedClone.Clone()));
+
+            conf.SetMergeScheduler(new SerialMergeScheduler());
+#if FEATURE_TASKMERGESCHEDULER
+            Assert.AreEqual(typeof(TaskMergeScheduler), clone.MergeScheduler.GetType());
+#else
+            Assert.AreEqual(typeof(ConcurrentMergeScheduler), clone.MergeScheduler.GetType());
+#endif
+        }
+
+        [Test]
+        public virtual void TestInvalidValues()
+        {
+            IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+
+            // Test IndexDeletionPolicy
+            Assert.AreEqual(typeof(KeepOnlyLastCommitDeletionPolicy), conf.IndexDeletionPolicy.GetType());
+            conf.SetIndexDeletionPolicy(new SnapshotDeletionPolicy(null));
+            Assert.AreEqual(typeof(SnapshotDeletionPolicy), conf.IndexDeletionPolicy.GetType());
+            try
+            {
+                conf.SetIndexDeletionPolicy(null);
+                Assert.Fail();
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // ok
+            }
+
+            // Test MergeScheduler
+#if FEATURE_TASKMERGESCHEDULER
+            Assert.AreEqual(typeof(TaskMergeScheduler), conf.MergeScheduler.GetType());
+#else
+            Assert.AreEqual(typeof(ConcurrentMergeScheduler), conf.MergeScheduler.GetType());
+#endif
+            conf.SetMergeScheduler(new SerialMergeScheduler());
+            Assert.AreEqual(typeof(SerialMergeScheduler), conf.MergeScheduler.GetType());
+            try
+            {
+                conf.SetMergeScheduler(null);
+                Assert.Fail();
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // ok
+            }
+
+            // Test Similarity:
+            // we shouldnt assert what the default is, just that its not null.
+            Assert.IsTrue(IndexSearcher.DefaultSimilarity == conf.Similarity);
+            conf.SetSimilarity(new MySimilarity());
+            Assert.AreEqual(typeof(MySimilarity), conf.Similarity.GetType());
+            try
+            {
+                conf.SetSimilarity(null);
+                Assert.Fail();
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // ok
+            }
+
+            // Test IndexingChain
+            Assert.IsTrue(DocumentsWriterPerThread.DefaultIndexingChain == conf.IndexingChain);
+            conf.SetIndexingChain(new MyIndexingChain());
+            Assert.AreEqual(typeof(MyIndexingChain), conf.IndexingChain.GetType());
+            try
+            {
+                conf.SetIndexingChain(null);
+                Assert.Fail();
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // ok
+            }
+
+            try
+            {
+                conf.SetMaxBufferedDeleteTerms(0);
+                Assert.Fail("should not have succeeded to set maxBufferedDeleteTerms to 0");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            try
+            {
+                conf.SetMaxBufferedDocs(1);
+                Assert.Fail("should not have succeeded to set maxBufferedDocs to 1");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            try
+            {
+                // Disable both MAX_BUF_DOCS and RAM_SIZE_MB
+                conf.SetMaxBufferedDocs(4);
+                conf.SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+                conf.SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+                Assert.Fail("should not have succeeded to disable maxBufferedDocs when ramBufferSizeMB is disabled as well");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            conf.SetRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
+            conf.SetMaxBufferedDocs(IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS);
+            try
+            {
+                conf.SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+                Assert.Fail("should not have succeeded to disable ramBufferSizeMB when maxBufferedDocs is disabled as well");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            // Test setReaderTermsIndexDivisor
+            try
+            {
+                conf.SetReaderTermsIndexDivisor(0);
+                Assert.Fail("should not have succeeded to set termsIndexDivisor to 0");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            // Setting to -1 is ok
+            conf.SetReaderTermsIndexDivisor(-1);
+            try
+            {
+                conf.SetReaderTermsIndexDivisor(-2);
+                Assert.Fail("should not have succeeded to set termsIndexDivisor to < -1");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            try
+            {
+                conf.SetRAMPerThreadHardLimitMB(2048);
+                Assert.Fail("should not have succeeded to set RAMPerThreadHardLimitMB to >= 2048");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            try
+            {
+                conf.SetRAMPerThreadHardLimitMB(0);
+                Assert.Fail("should not have succeeded to set RAMPerThreadHardLimitMB to 0");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // this is expected
+            }
+
+            // Test MergePolicy
+            Assert.AreEqual(typeof(TieredMergePolicy), conf.MergePolicy.GetType());
+            conf.SetMergePolicy(new LogDocMergePolicy());
+            Assert.AreEqual(typeof(LogDocMergePolicy), conf.MergePolicy.GetType());
+            try
+            {
+                conf.SetMergePolicy(null);
+                Assert.Fail();
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException e)
+#pragma warning restore 168
+            {
+                // ok
+            }
+        }
+
+        [Test]
+        public virtual void TestLiveChangeToCFS()
+        {
+            Directory dir = NewDirectory();
+            IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            iwc.SetMergePolicy(NewLogMergePolicy(true));
+            // Start false:
+            iwc.SetUseCompoundFile(false);
+            iwc.MergePolicy.NoCFSRatio = 0.0d;
+            IndexWriter w = new IndexWriter(dir, iwc);
+            // Change to true:
+            w.Config.SetUseCompoundFile(true);
+
+            Document doc = new Document();
+            doc.Add(NewStringField("field", "foo", Store.NO));
+            w.AddDocument(doc);
+            w.Commit();
+            Assert.IsTrue(w.NewestSegment().Info.UseCompoundFile, "Expected CFS after commit");
+
+            doc.Add(NewStringField("field", "foo", Store.NO));
+            w.AddDocument(doc);
+            w.Commit();
+            w.ForceMerge(1);
+            w.Commit();
+
+            // no compound files after merge
+            Assert.IsFalse(w.NewestSegment().Info.UseCompoundFile, "Expected Non-CFS after merge");
+
+            MergePolicy lmp = w.Config.MergePolicy;
+            lmp.NoCFSRatio = 1.0;
+            lmp.MaxCFSSegmentSizeMB = double.PositiveInfinity;
+
+            w.AddDocument(doc);
+            w.ForceMerge(1);
+            w.Commit();
+            Assert.IsTrue(w.NewestSegment().Info.UseCompoundFile, "Expected CFS after merge");
+            w.Dispose();
+            dir.Dispose();
+        }
+    }
+}
\ No newline at end of file


Mime
View raw message