lucene-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Policeman Jenkins Server <jenk...@thetaphi.de>
Subject [JENKINS] Lucene-Solr-7.x-Linux (32bit/jdk1.8.0_144) - Build # 263 - Still Unstable!
Date Tue, 15 Aug 2017 08:04:49 GMT
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/263/
Java: 32bit/jdk1.8.0_144 -server -XX:+UseParallelGC

1 tests failed.
FAILED:  org.apache.solr.cloud.HttpPartitionTest.test

Error Message:
Doc with id=1 not found in http://127.0.0.1:39151/collMinRf_1x3 due to: Path not found: /id; rsp={doc=null}

Stack Trace:
java.lang.AssertionError: Doc with id=1 not found in http://127.0.0.1:39151/collMinRf_1x3 due to: Path not found: /id; rsp={doc=null}
	at __randomizedtesting.SeedInfo.seed([1C77D46E0FDE21C:8993429C4E018FE4]:0)
	at org.junit.Assert.fail(Assert.java:93)
	at org.junit.Assert.assertTrue(Assert.java:43)
	at org.apache.solr.cloud.HttpPartitionTest.assertDocExists(HttpPartitionTest.java:603)
	at org.apache.solr.cloud.HttpPartitionTest.assertDocsExistInAllReplicas(HttpPartitionTest.java:558)
	at org.apache.solr.cloud.HttpPartitionTest.testMinRf(HttpPartitionTest.java:249)
	at org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:127)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
	at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 11421 lines...]
   [junit4] Suite: org.apache.solr.cloud.HttpPartitionTest
   [junit4]   2> 578260 INFO  (SUITE-HttpPartitionTest-seed#[1C77D46E0FDE21C]-worker) [    ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> Creating dataDir: /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/init-core-data-001
   [junit4]   2> 578261 WARN  (SUITE-HttpPartitionTest-seed#[1C77D46E0FDE21C]-worker) [    ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=15 numCloses=15
   [junit4]   2> 578261 INFO  (SUITE-HttpPartitionTest-seed#[1C77D46E0FDE21C]-worker) [    ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=true
   [junit4]   2> 578263 INFO  (SUITE-HttpPartitionTest-seed#[1C77D46E0FDE21C]-worker) [    ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
   [junit4]   2> 578263 INFO  (SUITE-HttpPartitionTest-seed#[1C77D46E0FDE21C]-worker) [    ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   2> 578271 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 578271 INFO  (Thread-1057) [    ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 578271 INFO  (Thread-1057) [    ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 578273 ERROR (Thread-1057) [    ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 578371 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ZkTestServer start zk server on port:35621
   [junit4]   2> 578377 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
   [junit4]   2> 578378 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml
   [junit4]   2> 578379 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 578380 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
   [junit4]   2> 578380 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
   [junit4]   2> 578381 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
   [junit4]   2> 578381 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
   [junit4]   2> 578382 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 578382 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 578383 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
   [junit4]   2> 578383 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
   [junit4]   2> 578384 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Will use TLOG replicas unless explicitly asked otherwise
   [junit4]   2> 578476 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 578476 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@9db57b{/,null,AVAILABLE}
   [junit4]   2> 578476 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@13fd23f{HTTP/1.1,[http/1.1]}{127.0.0.1:43713}
   [junit4]   2> 578477 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server Started @580088ms
   [junit4]   2> 578477 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/tempDir-001/control/data, hostContext=/, hostPort=37207, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/control-001/cores}
   [junit4]   2> 578477 ERROR (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 578477 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.1.0
   [junit4]   2> 578477 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 578477 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config dir: null
   [junit4]   2> 578477 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-08-15T07:15:54.454Z
   [junit4]   2> 578479 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 578479 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/control-001/solr.xml
   [junit4]   2> 578482 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 578484 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35621/solr
   [junit4]   2> 578508 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 578508 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:37207_
   [junit4]   2> 578509 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.c.Overseer Overseer (id=98486278832062468-127.0.0.1:37207_-n_0000000000) starting
   [junit4]   2> 578510 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:37207_
   [junit4]   2> 578511 INFO  (zkCallback-2766-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 578628 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 578634 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 578634 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 578635 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:37207_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/control-001/cores
   [junit4]   2> 578649 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 578650 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35621/solr ready
   [junit4]   2> 578650 INFO  (SocketProxy-Acceptor-37207) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=37974,localport=37207], receiveBufferSize:531000
   [junit4]   2> 578658 INFO  (SocketProxy-Acceptor-37207) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43713,localport=42148], receiveBufferSize=530904
   [junit4]   2> 578658 INFO  (qtp10736164-12144) [n:127.0.0.1:37207_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:37207_&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 578660 INFO  (OverseerThreadFactory-2458-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.CreateCollectionCmd Create collection control_collection
   [junit4]   2> 578762 INFO  (SocketProxy-Acceptor-37207) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=37978,localport=37207], receiveBufferSize:531000
   [junit4]   2> 578762 INFO  (SocketProxy-Acceptor-37207) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43713,localport=42152], receiveBufferSize=530904
   [junit4]   2> 578763 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_    ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 578764 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 578866 INFO  (zkCallback-2766-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 579773 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.1.0
   [junit4]   2> 579782 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
   [junit4]   2> 579863 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 579874 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' using configuration from collection control_collection, trusted=true
   [junit4]   2> 579875 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.control_collection.shard1.replica_n1' (registry 'solr.core.control_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 579875 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 579875 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/control-001/cores/control_collection_shard1_replica_n1], dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/control-001/cores/control_collection_shard1_replica_n1/data/]
   [junit4]   2> 579878 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=47, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 579879 WARN  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 579924 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 579924 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 579925 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 579925 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 579926 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=18.330078125, floorSegmentMB=1.9404296875, forceMergeDeletesPctAllowed=12.174909348976747, segmentsPerTier=13.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 579927 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@1a02fcc[control_collection_shard1_replica_n1] main]
   [junit4]   2> 579928 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 579928 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 579928 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 579929 INFO  (searcherExecutor-2461-thread-1-processing-n:127.0.0.1:37207_ x:control_collection_shard1_replica_n1 s:shard1 c:control_collection) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [control_collection_shard1_replica_n1] Registered new searcher Searcher@1a02fcc[control_collection_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 579929 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1575780463050489856
   [junit4]   2> 579933 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 579933 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 579933 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:37207/control_collection_shard1_replica_n1/
   [junit4]   2> 579933 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 579933 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy http://127.0.0.1:37207/control_collection_shard1_replica_n1/ has no replicas
   [junit4]   2> 579933 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 579934 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:37207/control_collection_shard1_replica_n1/ shard1
   [junit4]   2> 580035 INFO  (zkCallback-2766-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 580084 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 580085 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1322
   [junit4]   2> 580087 INFO  (qtp10736164-12144) [n:127.0.0.1:37207_    ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas
   [junit4]   2> 580188 INFO  (zkCallback-2766-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 580661 INFO  (OverseerCollectionConfigSetProcessor-98486278832062468-127.0.0.1:37207_-n_0000000000) [n:127.0.0.1:37207_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 581088 INFO  (qtp10736164-12144) [n:127.0.0.1:37207_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:37207_&wt=javabin&version=2} status=0 QTime=2429
   [junit4]   2> 581096 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 581097 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35621/solr ready
   [junit4]   2> 581097 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false
   [junit4]   2> 581097 INFO  (SocketProxy-Acceptor-37207) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=38042,localport=37207], receiveBufferSize:531000
   [junit4]   2> 581097 INFO  (SocketProxy-Acceptor-37207) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43713,localport=42216], receiveBufferSize=530904
   [junit4]   2> 581098 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=1&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 581099 INFO  (OverseerThreadFactory-2458-thread-2-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.CreateCollectionCmd Create collection collection1
   [junit4]   2> 581100 WARN  (OverseerThreadFactory-2458-thread-2-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.CreateCollectionCmd It is unusual to create a collection (collection1) without cores.
   [junit4]   2> 581302 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_    ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas
   [junit4]   2> 581302 INFO  (qtp10736164-12146) [n:127.0.0.1:37207_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=1&wt=javabin&version=2} status=0 QTime=204
   [junit4]   2> 581386 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-1-001 of type TLOG
   [junit4]   2> 581386 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 581387 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@1383abd{/,null,AVAILABLE}
   [junit4]   2> 581387 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@9255c1{HTTP/1.1,[http/1.1]}{127.0.0.1:38825}
   [junit4]   2> 581388 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server Started @582999ms
   [junit4]   2> 581388 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/tempDir-001/jetty1, replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=41105, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-1-001/cores}
   [junit4]   2> 581388 ERROR (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 581388 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.1.0
   [junit4]   2> 581388 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 581388 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config dir: null
   [junit4]   2> 581388 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-08-15T07:15:57.365Z
   [junit4]   2> 581390 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 581390 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-1-001/solr.xml
   [junit4]   2> 581392 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 581394 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35621/solr
   [junit4]   2> 581398 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:41105_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 581398 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:41105_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 581399 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:41105_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:41105_
   [junit4]   2> 581400 INFO  (zkCallback-2778-thread-1-processing-n:127.0.0.1:41105_) [n:127.0.0.1:41105_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 581400 INFO  (zkCallback-2773-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 581400 INFO  (zkCallback-2766-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 581540 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:41105_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 581547 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:41105_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 581547 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:41105_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 581548 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:41105_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-1-001/cores
   [junit4]   2> 581566 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=43406,localport=41105], receiveBufferSize:531000
   [junit4]   2> 581566 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=38825,localport=55386], receiveBufferSize=530904
   [junit4]   2> 581567 INFO  (qtp1137578-12192) [n:127.0.0.1:41105_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:41105_&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 581568 INFO  (OverseerThreadFactory-2458-thread-3-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.AddReplicaCmd Node Identified 127.0.0.1:41105_ for creating new replica
   [junit4]   2> 581568 INFO  (OverseerCollectionConfigSetProcessor-98486278832062468-127.0.0.1:37207_-n_0000000000) [n:127.0.0.1:37207_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000002 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 581569 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=43410,localport=41105], receiveBufferSize:531000
   [junit4]   2> 581569 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=38825,localport=55390], receiveBufferSize=530904
   [junit4]   2> 581569 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_    ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t41&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG
   [junit4]   2> 581570 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 582607 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.1.0
   [junit4]   2> 582623 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.s.IndexSchema [collection1_shard2_replica_t41] Schema name=test
   [junit4]   2> 582705 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 582722 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.CoreContainer Creating SolrCore 'collection1_shard2_replica_t41' using configuration from collection collection1, trusted=true
   [junit4]   2> 582722 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard2.replica_t41' (registry 'solr.core.collection1.shard2.replica_t41') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 582722 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 582722 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrCore [[collection1_shard2_replica_t41] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-1-001/cores/collection1_shard2_replica_t41], dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-1-001/cores/collection1_shard2_replica_t41/data/]
   [junit4]   2> 582725 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=47, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 582731 WARN  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 582759 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 582759 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 582760 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 582760 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 582761 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=18.330078125, floorSegmentMB=1.9404296875, forceMergeDeletesPctAllowed=12.174909348976747, segmentsPerTier=13.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 582761 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.s.SolrIndexSearcher Opening [Searcher@6cbe39[collection1_shard2_replica_t41] main]
   [junit4]   2> 582761 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 582761 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 582762 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 582763 INFO  (searcherExecutor-2472-thread-1-processing-n:127.0.0.1:41105_ x:collection1_shard2_replica_t41 s:shard2 c:collection1) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SolrCore [collection1_shard2_replica_t41] Registered new searcher Searcher@6cbe39[collection1_shard2_replica_t41] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 582763 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1575780466022154240
   [junit4]   2> 582766 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 582766 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 582766 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:41105/collection1_shard2_replica_t41/
   [junit4]   2> 582766 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 582766 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.SyncStrategy http://127.0.0.1:41105/collection1_shard2_replica_t41/ has no replicas
   [junit4]   2> 582766 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 582773 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ZkController collection1_shard2_replica_t41 stopping background replication from leader
   [junit4]   2> 582775 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:41105/collection1_shard2_replica_t41/ shard2
   [junit4]   2> 582925 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 582926 INFO  (qtp1137578-12194) [n:127.0.0.1:41105_ c:collection1 s:shard2  x:collection1_shard2_replica_t41] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t41&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG} status=0 QTime=1356
   [junit4]   2> 582927 INFO  (qtp1137578-12192) [n:127.0.0.1:41105_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:41105_&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2} status=0 QTime=1359
   [junit4]   2> 582997 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-2-001 of type TLOG
   [junit4]   2> 582997 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 582998 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@13d21e9{/,null,AVAILABLE}
   [junit4]   2> 582998 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@394511{HTTP/1.1,[http/1.1]}{127.0.0.1:43161}
   [junit4]   2> 582998 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server Started @584609ms
   [junit4]   2> 582998 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/tempDir-001/jetty2, replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=39151, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-2-001/cores}
   [junit4]   2> 582999 ERROR (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 582999 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.1.0
   [junit4]   2> 582999 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 582999 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config dir: null
   [junit4]   2> 582999 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-08-15T07:15:58.976Z
   [junit4]   2> 583004 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 583004 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-2-001/solr.xml
   [junit4]   2> 583007 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 583010 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35621/solr
   [junit4]   2> 583014 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39151_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 583014 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39151_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 583015 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39151_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39151_
   [junit4]   2> 583015 INFO  (zkCallback-2766-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 583016 INFO  (zkCallback-2773-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 583016 INFO  (zkCallback-2784-thread-1-processing-n:127.0.0.1:39151_) [n:127.0.0.1:39151_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 583018 INFO  (zkCallback-2778-thread-1-processing-n:127.0.0.1:41105_) [n:127.0.0.1:41105_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 583067 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39151_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 583074 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39151_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 583074 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39151_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 583075 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39151_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-2-001/cores
   [junit4]   2> 583097 INFO  (SocketProxy-Acceptor-39151) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=36506,localport=39151], receiveBufferSize:531000
   [junit4]   2> 583097 INFO  (SocketProxy-Acceptor-39151) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43161,localport=39304], receiveBufferSize=530904
   [junit4]   2> 583098 INFO  (qtp2570769-12226) [n:127.0.0.1:39151_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:39151_&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 583099 INFO  (OverseerThreadFactory-2458-thread-4-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.AddReplicaCmd Node Identified 127.0.0.1:39151_ for creating new replica
   [junit4]   2> 583099 INFO  (OverseerCollectionConfigSetProcessor-98486278832062468-127.0.0.1:37207_-n_0000000000) [n:127.0.0.1:37207_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000004 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 583100 INFO  (SocketProxy-Acceptor-39151) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=36510,localport=39151], receiveBufferSize:531000
   [junit4]   2> 583100 INFO  (SocketProxy-Acceptor-39151) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43161,localport=39308], receiveBufferSize=530904
   [junit4]   2> 583100 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_    ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t43&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG
   [junit4]   2> 583101 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 584110 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.1.0
   [junit4]   2> 584118 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.s.IndexSchema [collection1_shard1_replica_t43] Schema name=test
   [junit4]   2> 584179 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 584186 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.CoreContainer Creating SolrCore 'collection1_shard1_replica_t43' using configuration from collection collection1, trusted=true
   [junit4]   2> 584186 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard1.replica_t43' (registry 'solr.core.collection1.shard1.replica_t43') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 584187 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 584187 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrCore [[collection1_shard1_replica_t43] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-2-001/cores/collection1_shard1_replica_t43], dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-2-001/cores/collection1_shard1_replica_t43/data/]
   [junit4]   2> 584189 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=47, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 584190 WARN  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 584219 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 584219 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 584219 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 584219 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 584220 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=18.330078125, floorSegmentMB=1.9404296875, forceMergeDeletesPctAllowed=12.174909348976747, segmentsPerTier=13.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 584220 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.s.SolrIndexSearcher Opening [Searcher@b315bd[collection1_shard1_replica_t43] main]
   [junit4]   2> 584221 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 584221 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 584222 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 584223 INFO  (searcherExecutor-2483-thread-1-processing-n:127.0.0.1:39151_ x:collection1_shard1_replica_t43 s:shard1 c:collection1) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SolrCore [collection1_shard1_replica_t43] Registered new searcher Searcher@b315bd[collection1_shard1_replica_t43] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 584223 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1575780467553075200
   [junit4]   2> 584226 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 584226 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 584226 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:39151/collection1_shard1_replica_t43/
   [junit4]   2> 584226 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 584226 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.SyncStrategy http://127.0.0.1:39151/collection1_shard1_replica_t43/ has no replicas
   [junit4]   2> 584226 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 584226 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ZkController collection1_shard1_replica_t43 stopping background replication from leader
   [junit4]   2> 584227 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:39151/collection1_shard1_replica_t43/ shard1
   [junit4]   2> 584378 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 584379 INFO  (qtp2570769-12228) [n:127.0.0.1:39151_ c:collection1 s:shard1  x:collection1_shard1_replica_t43] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t43&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG} status=0 QTime=1278
   [junit4]   2> 584383 INFO  (qtp2570769-12226) [n:127.0.0.1:39151_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:39151_&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2} status=0 QTime=1284
   [junit4]   2> 584456 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-3-001 of type TLOG
   [junit4]   2> 584457 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 584457 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@18e7f6f{/,null,AVAILABLE}
   [junit4]   2> 584458 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@ad4ece{HTTP/1.1,[http/1.1]}{127.0.0.1:39175}
   [junit4]   2> 584458 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.Server Started @586069ms
   [junit4]   2> 584458 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/tempDir-001/jetty3, replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=39657, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-3-001/cores}
   [junit4]   2> 584458 ERROR (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 584459 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.1.0
   [junit4]   2> 584459 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 584459 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null, Default config dir: null
   [junit4]   2> 584459 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-08-15T07:16:00.436Z
   [junit4]   2> 584464 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 584464 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-3-001/solr.xml
   [junit4]   2> 584474 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 584476 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35621/solr
   [junit4]   2> 584480 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39657_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 584481 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39657_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 584483 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39657_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39657_
   [junit4]   2> 584483 INFO  (zkCallback-2773-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 584483 INFO  (zkCallback-2778-thread-1-processing-n:127.0.0.1:41105_) [n:127.0.0.1:41105_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 584484 INFO  (zkCallback-2766-thread-1-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 584483 INFO  (zkCallback-2784-thread-1-processing-n:127.0.0.1:39151_) [n:127.0.0.1:39151_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 584490 INFO  (zkCallback-2790-thread-1-processing-n:127.0.0.1:39657_) [n:127.0.0.1:39657_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 584560 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39657_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 584567 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39657_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 584568 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39657_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 584569 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [n:127.0.0.1:39657_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-3-001/cores
   [junit4]   2> 584609 INFO  (qtp2570769-12227) [n:127.0.0.1:39151_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:39657_&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 584610 INFO  (OverseerCollectionConfigSetProcessor-98486278832062468-127.0.0.1:37207_-n_0000000000) [n:127.0.0.1:37207_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000006 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 584610 INFO  (OverseerThreadFactory-2458-thread-5-processing-n:127.0.0.1:37207_) [n:127.0.0.1:37207_    ] o.a.s.c.AddReplicaCmd Node Identified 127.0.0.1:39657_ for creating new replica
   [junit4]   2> 584611 INFO  (SocketProxy-Acceptor-39657) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=46556,localport=39657], receiveBufferSize:531000
   [junit4]   2> 584612 INFO  (SocketProxy-Acceptor-39657) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39175,localport=37636], receiveBufferSize=530904
   [junit4]   2> 584614 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_    ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t45&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG
   [junit4]   2> 584615 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 585624 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.1.0
   [junit4]   2> 585631 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.s.IndexSchema [collection1_shard2_replica_t45] Schema name=test
   [junit4]   2> 585679 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 585685 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.CoreContainer Creating SolrCore 'collection1_shard2_replica_t45' using configuration from collection collection1, trusted=true
   [junit4]   2> 585685 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard2.replica_t45' (registry 'solr.core.collection1.shard2.replica_t45') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@878a0a
   [junit4]   2> 585685 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 585685 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrCore [[collection1_shard2_replica_t45] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-3-001/cores/collection1_shard2_replica_t45], dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001/shard-3-001/cores/collection1_shard2_replica_t45/data/]
   [junit4]   2> 585687 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=47, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 585688 WARN  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 585714 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 585714 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 585715 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 585715 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 585715 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=18.330078125, floorSegmentMB=1.9404296875, forceMergeDeletesPctAllowed=12.174909348976747, segmentsPerTier=13.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 585716 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.s.SolrIndexSearcher Opening [Searcher@9b0f57[collection1_shard2_replica_t45] main]
   [junit4]   2> 585717 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 585717 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 585718 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 585718 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1575780469120696320
   [junit4]   2> 585718 INFO  (searcherExecutor-2494-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.SolrCore [collection1_shard2_replica_t45] Registered new searcher Searcher@9b0f57[collection1_shard2_replica_t45] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 585721 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.c.ZkController Core needs to recover:collection1_shard2_replica_t45
   [junit4]   2> 585721 INFO  (updateExecutor-2787-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 585722 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true
   [junit4]   2> 585722 INFO  (qtp15737351-12260) [n:127.0.0.1:39657_ c:collection1 s:shard2  x:collection1_shard2_replica_t45] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_t45&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=TLOG} status=0 QTime=1107
   [junit4]   2> 585722 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
   [junit4]   2> 585722 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.ZkController collection1_shard2_replica_t45 stopping background replication from leader
   [junit4]   2> 585722 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[collection1_shard2_replica_t45]
   [junit4]   2> 585722 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 585722 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Publishing state of core [collection1_shard2_replica_t45] as recovering, leader is [http://127.0.0.1:41105/collection1_shard2_replica_t41/] and I am [http://127.0.0.1:39657/collection1_shard2_replica_t45/]
   [junit4]   2> 585723 INFO  (qtp2570769-12227) [n:127.0.0.1:39151_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:39657_&action=ADDREPLICA&collection=collection1&shard=shard2&type=TLOG&wt=javabin&version=2} status=0 QTime=1113
   [junit4]   2> 585727 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Sending prep recovery command to [http://127.0.0.1:41105]; [WaitForState: action=PREPRECOVERY&core=collection1_shard2_replica_t41&nodeName=127.0.0.1:39657_&coreNodeName=core_node46&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 585727 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=43904,localport=41105], receiveBufferSize:531000
   [junit4]   2> 585729 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=38825,localport=55884], receiveBufferSize=530904
   [junit4]   2> 585730 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_    ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node46, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true, maxTime: 183 s
   [junit4]   2> 585730 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 585730 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_    ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1_shard2_replica_t41, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:39657_, coreNodeName=core_node46, onlyIfActiveCheckResult=false, nodeProps: core_node46:{"core":"collection1_shard2_replica_t45","base_url":"http://127.0.0.1:39657","node_name":"127.0.0.1:39657_","state":"down","type":"TLOG"}
   [junit4]   2> 585730 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 30000 for each attempt
   [junit4]   2> 585730 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 586611 INFO  (OverseerCollectionConfigSetProcessor-98486278832062468-127.0.0.1:37207_-n_0000000000) [n:127.0.0.1:37207_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000008 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 586730 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_    ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1_shard2_replica_t41, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:39657_, coreNodeName=core_node46, onlyIfActiveCheckResult=false, nodeProps: core_node46:{"core":"collection1_shard2_replica_t45","base_url":"http://127.0.0.1:39657","node_name":"127.0.0.1:39657_","state":"recovering","type":"TLOG"}
   [junit4]   2> 586730 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_    ] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node46, state: recovering, checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 586730 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:39657_&onlyIfLeaderActive=true&core=collection1_shard2_replica_t41&coreNodeName=core_node46&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=1000
   [junit4]   2> 587231 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Starting Replication Recovery.
   [junit4]   2> 587231 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Attempting to replicate from [http://127.0.0.1:41105/collection1_shard2_replica_t41/].
   [junit4]   2> 587232 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=43926,localport=41105], receiveBufferSize:531000
   [junit4]   2> 587233 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=38825,localport=55906], receiveBufferSize=530904
   [junit4]   2> 587234 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1575780470710337536,optimize=false,openSearcher=false,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 587235 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 587236 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 587237 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.p.LogUpdateProcessorFactory [collection1_shard2_replica_t41]  webapp= path=/update params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&commit_end_point=true&wt=javabin&version=2}{commit=} 0 2
   [junit4]   2> 587239 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=43930,localport=41105], receiveBufferSize:531000
   [junit4]   2> 587240 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=38825,localport=55910], receiveBufferSize=530904
   [junit4]   2> 587241 INFO  (qtp1137578-12188) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.c.S.Request [collection1_shard2_replica_t41]  webapp= path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
   [junit4]   2> 587241 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Master's generation: 1
   [junit4]   2> 587241 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Master's version: 0
   [junit4]   2> 587241 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 587241 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 587241 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Replication Recovery was successful.
   [junit4]   2> 587241 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
   [junit4]   2> 587241 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.ZkController collection1_shard2_replica_t45 starting background replication from leader
   [junit4]   2> 587242 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.ReplicateFromLeader Will start replication from leader with poll interval: 00:00:03
   [junit4]   2> 587244 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.h.ReplicationHandler Poll scheduled at an interval of 3000ms
   [junit4]   2> 587245 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Updating version bucket highest from index after successful recovery.
   [junit4]   2> 587245 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1575780470721871872
   [junit4]   2> 587248 INFO  (recoveryExecutor-2788-thread-1-processing-n:127.0.0.1:39657_ x:collection1_shard2_replica_t45 s:shard2 c:collection1 r:core_node46) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.RecoveryStrategy Finished recovery process, successful=[true]
   [junit4]   2> 587730 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 587731 INFO  (qtp10736164-12140) [n:127.0.0.1:37207_ c:control_collection s:shard1 r:core_node2 x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1575780471231479808,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 587731 INFO  (qtp10736164-12140) [n:127.0.0.1:37207_ c:control_collection s:shard1 r:core_node2 x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 587732 INFO  (qtp10736164-12140) [n:127.0.0.1:37207_ c:control_collection s:shard1 r:core_node2 x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 587732 INFO  (qtp10736164-12140) [n:127.0.0.1:37207_ c:control_collection s:shard1 r:core_node2 x:control_collection_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [control_collection_shard1_replica_n1]  webapp= path=/update params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 1
   [junit4]   2> 587734 INFO  (SocketProxy-Acceptor-41105) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=43938,localport=41105], receiveBufferSize:531000
   [junit4]   2> 587734 INFO  (SocketProxy-Acceptor-39151) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=36904,localport=39151], receiveBufferSize:531000
   [junit4]   2> 587734 INFO  (SocketProxy-Acceptor-39657) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=46598,localport=39657], receiveBufferSize:531000
   [junit4]   2> 587735 INFO  (SocketProxy-Acceptor-39151) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43161,localport=39706], receiveBufferSize=530904
   [junit4]   2> 587735 INFO  (SocketProxy-Acceptor-39657) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39175,localport=37682], receiveBufferSize=530904
   [junit4]   2> 587735 INFO  (qtp1137578-12190) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1575780471235674112,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 587735 INFO  (qtp1137578-12190) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 587735 INFO  (qtp1137578-12190) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 587735 INFO  (qtp1137578-12190) [n:127.0.0.1:41105_ c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_t41] o.a.s.u.p.LogUpdateProcessorFactory [collection1_shard2_replica_t41]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:39151/collection1_shard1_replica_t43/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 0
   [junit4]   2> 5877

[...truncated too long message...]


   [junit4]   2> 604018 INFO  (coreCloseExecutor-2547-thread-2) [n:127.0.0.1:39657_ c:collMinRf_1x3 s:shard1 r:core_node6 x:collMinRf_1x3_shard1_replica_t3] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.collMinRf_1x3.shard1.replica_t3, tag=14528989
   [junit4]   2> 604018 INFO  (coreCloseExecutor-2547-thread-2) [n:127.0.0.1:39657_ c:collMinRf_1x3 s:shard1 r:core_node6 x:collMinRf_1x3_shard1_replica_t3] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@662e1c: rootName = null, domain = solr.core.collMinRf_1x3.shard1.replica_t3, service url = null, agent id = null] for registry solr.core.collMinRf_1x3.shard1.replica_t3 / com.codahale.metrics.MetricRegistry@fd2c4d
   [junit4]   2> 604026 INFO  (coreCloseExecutor-2547-thread-2) [n:127.0.0.1:39657_ c:collMinRf_1x3 s:shard1 r:core_node6 x:collMinRf_1x3_shard1_replica_t3] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.collMinRf_1x3.shard1.leader, tag=14528989
   [junit4]   2> 604027 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.Overseer Overseer (id=98486278832062479-127.0.0.1:39657_-n_0000000003) closing
   [junit4]   2> 604027 INFO  (OverseerStateUpdate-98486278832062479-127.0.0.1:39657_-n_0000000003) [n:127.0.0.1:39657_    ] o.a.s.c.Overseer Overseer Loop exiting : 127.0.0.1:39657_
   [junit4]   2> 606421 WARN  (zkCallback-2790-thread-1-processing-n:127.0.0.1:39657_) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.SyncStrategy Closed, skipping sync up.
   [junit4]   2> 606421 INFO  (zkCallback-2790-thread-1-processing-n:127.0.0.1:39657_) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 606421 INFO  (zkCallback-2790-thread-1-processing-n:127.0.0.1:39657_) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.c.SolrCore [collection1_shard2_replica_t45]  CLOSING SolrCore org.apache.solr.core.SolrCore@179f868
   [junit4]   2> 606422 INFO  (zkCallback-2790-thread-1-processing-n:127.0.0.1:39657_) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.collection1.shard2.replica_t45, tag=24770664
   [junit4]   2> 606422 INFO  (zkCallback-2790-thread-1-processing-n:127.0.0.1:39657_) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@1078623: rootName = null, domain = solr.core.collection1.shard2.replica_t45, service url = null, agent id = null] for registry solr.core.collection1.shard2.replica_t45 / com.codahale.metrics.MetricRegistry@c0bce2
   [junit4]   2> 606432 INFO  (zkCallback-2790-thread-1-processing-n:127.0.0.1:39657_) [n:127.0.0.1:39657_ c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_t45] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.collection1.shard2.leader, tag=24770664
   [junit4]   2> 606433 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.AbstractConnector Stopped ServerConnector@ad4ece{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 606433 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@18e7f6f{/,null,UNAVAILABLE}
   [junit4]   2> 606434 ERROR (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 606434 INFO  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:35621 35621
   [junit4]   2> 611461 INFO  (Thread-1057) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:35621 35621
   [junit4]   2> 611461 WARN  (Thread-1057) [    ] o.a.s.c.ZkTestServer Watch limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2> 	6	/solr/aliases.json
   [junit4]   2> 	4	/solr/security.json
   [junit4]   2> 	4	/solr/configs/conf1
   [junit4]   2> 	3	/solr/collections/collection1/state.json
   [junit4]   2> 	3	/solr/collections/collMinRf_1x3/state.json
   [junit4]   2> 	2	/solr/collections/c8n_crud_1x2/state.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2> 	6	/solr/clusterstate.json
   [junit4]   2> 	6	/solr/clusterprops.json
   [junit4]   2> 	2	/solr/collections/collMinRf_1x3/leader_elect/shard1/election/98486278832062479-core_node6-n_0000000000
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2> 	6	/solr/live_nodes
   [junit4]   2> 	6	/solr/collections
   [junit4]   2> 	4	/solr/overseer/queue
   [junit4]   2> 	4	/solr/overseer/collection-queue-work
   [junit4]   2> 
   [junit4]   2> 611461 WARN  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SocketProxy Closing 3 connections to: http://127.0.0.1:41105/, target: http://127.0.0.1:38825/
   [junit4]   2> 611461 WARN  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SocketProxy Closing 10 connections to: http://127.0.0.1:39657/, target: http://127.0.0.1:39175/
   [junit4]   2> 611461 WARN  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SocketProxy Closing 8 connections to: http://127.0.0.1:39151/, target: http://127.0.0.1:43161/
   [junit4]   2> 611461 WARN  (TEST-HttpPartitionTest.test-seed#[1C77D46E0FDE21C]) [    ] o.a.s.c.SocketProxy Closing 4 connections to: http://127.0.0.1:37207/, target: http://127.0.0.1:43713/
   [junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=HttpPartitionTest -Dtests.method=test -Dtests.seed=1C77D46E0FDE21C -Dtests.multiplier=3 -Dtests.slow=true -Dtests.locale=es-US -Dtests.timezone=Asia/Baku -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1
   [junit4] FAILURE 33.2s J1 | HttpPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Doc with id=1 not found in http://127.0.0.1:39151/collMinRf_1x3 due to: Path not found: /id; rsp={doc=null}
   [junit4]    > 	at __randomizedtesting.SeedInfo.seed([1C77D46E0FDE21C:8993429C4E018FE4]:0)
   [junit4]    > 	at org.apache.solr.cloud.HttpPartitionTest.assertDocExists(HttpPartitionTest.java:603)
   [junit4]    > 	at org.apache.solr.cloud.HttpPartitionTest.assertDocsExistInAllReplicas(HttpPartitionTest.java:558)
   [junit4]    > 	at org.apache.solr.cloud.HttpPartitionTest.testMinRf(HttpPartitionTest.java:249)
   [junit4]    > 	at org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:127)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
   [junit4]    > 	at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> NOTE: leaving temporary files on disk at: /home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.HttpPartitionTest_1C77D46E0FDE21C-001
   [junit4]   2> NOTE: test params are: codec=Lucene70, sim=RandomSimilarity(queryNorm=false): {}, locale=es-US, timezone=Asia/Baku
   [junit4]   2> NOTE: Linux 4.10.0-27-generic i386/Oracle Corporation 1.8.0_144 (32-bit)/cpus=8,threads=1,free=29064528,total=309592064
   [junit4]   2> NOTE: All tests run in this JVM: [TestPartialUpdateDeduplication, TestGraphMLResponseWriter, SchemaVersionSpecificBehaviorTest, CoreAdminHandlerTest, HighlighterTest, BlockJoinFacetDistribTest, TestSweetSpotSimilarityFactory, ZkCLITest, CoreMergeIndexesAdminHandlerTest, HdfsBasicDistributedZk2Test, UpdateRequestProcessorFactoryTest, TestCustomStream, ReplicationFactorTest, VersionInfoTest, TestSha256AuthenticationProvider, TestQueryTypes, TestStreamBody, TestCharFilters, TestManagedSchemaThreadSafety, TestSchemaVersionResource, DocValuesTest, StatelessScriptUpdateProcessorFactoryTest, VMParamsZkACLAndCredentialsProvidersTest, BlobRepositoryCloudTest, TestLRUCache, TestSmileRequest, AssignTest, TestTrackingShardHandlerFactory, TestBM25SimilarityFactory, AtomicUpdatesTest, TestInfoStreamLogging, TestFuzzyAnalyzedSuggestions, SuggestComponentContextFilterQueryTest, TestReloadAndDeleteDocs, IgnoreCommitOptimizeUpdateProcessorFactoryTest, SpatialFilterTest, BadComponentTest, ActionThrottleTest, TestBlobHandler, DistanceFunctionTest, SpellCheckComponentTest, DistributedIntervalFacetingTest, TestSolrConfigHandler, ZkSolrClientTest, TestConfigSetsAPIExclusivity, TestRTGBase, TestNumericTokenStream, TestCloudRecovery, HLLSerializationTest, TestDeleteCollectionOnDownNodes, TestSolrCloudWithHadoopAuthPlugin, SOLR749Test, TestSolrJ, TestSolrCloudWithSecureImpersonation, PrimUtilsTest, JsonLoaderTest, TestBackupRepositoryFactory, ClusterStateUpdateTest, TestAuthorizationFramework, SuggesterTSTTest, AddBlockUpdateTest, DistributedMLTComponentTest, LeaderFailureAfterFreshStartTest, MoreLikeThisHandlerTest, BadCopyFieldTest, TestCursorMarkWithoutUniqueKey, MigrateRouteKeyTest, ImplicitSnitchTest, TestStressLiveNodes, OverseerStatusTest, TestAtomicUpdateErrorCases, ConcurrentDeleteAndCreateCollectionTest, TestAnalyzeInfixSuggestions, TestComplexPhraseLeadingWildcard, TestLegacyFieldReuse, EnumFieldTest, DistributedFacetPivotLongTailTest, ScriptEngineTest, CollectionTooManyReplicasTest, HttpPartitionTest]
   [junit4] Completed [199/729 (1!)] on J1 in 33.22s, 1 test, 1 failure <<< FAILURES!

[...truncated 41220 lines...]

Mime
View raw message