lucene-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Policeman Jenkins Server <jenk...@thetaphi.de>
Subject [JENKINS] Lucene-Solr-master-MacOSX (64bit/jdk1.8.0) - Build # 4046 - Still Unstable!
Date Sat, 03 Jun 2017 17:47:10 GMT
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-MacOSX/4046/
Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC

1 tests failed.
FAILED:  org.apache.solr.cloud.HttpPartitionTest.test

Error Message:
Didn't see all replicas for shard shard1 in collMinRf_1x3 come up within 90000 ms! ClusterState: {   "control_collection":{     "pullReplicas":"0",     "replicationFactor":"1",     "shards":{"shard1":{         "range":"80000000-7fffffff",         "state":"active",         "replicas":{"core_node1":{             "core":"collection1",             "base_url":"http://127.0.0.1:63101",             "node_name":"127.0.0.1:63101_",             "state":"active",             "type":"NRT",             "leader":"true"}}}},     "router":{"name":"compositeId"},     "maxShardsPerNode":"1",     "autoAddReplicas":"false",     "nrtReplicas":"1",     "tlogReplicas":"0",     "autoCreated":"true"},   "collMinRf_1x3":{     "pullReplicas":"0",     "replicationFactor":"1",     "shards":{"shard1":{         "range":"80000000-7fffffff",         "state":"active",         "replicas":{           "core_node1":{             "core":"collMinRf_1x3_shard1_replica_t1",             "base_url":"http://127.0.0.1:63101",             "node_name":"127.0.0.1:63101_",             "state":"recovering",             "type":"TLOG"},           "core_node2":{             "core":"collMinRf_1x3_shard1_replica_t2",             "base_url":"http://127.0.0.1:63108",             "node_name":"127.0.0.1:63108_",             "state":"active",             "type":"TLOG"},           "core_node3":{             "core":"collMinRf_1x3_shard1_replica_t3",             "base_url":"http://127.0.0.1:63157",             "node_name":"127.0.0.1:63157_",             "state":"active",             "type":"TLOG",             "leader":"true"}}}},     "router":{"name":"compositeId"},     "maxShardsPerNode":"1",     "autoAddReplicas":"false",     "nrtReplicas":"0",     "tlogReplicas":"3"},   "collection1":{     "pullReplicas":"0",     "replicationFactor":"1",     "shards":{       "shard1":{         "range":"80000000-ffffffff",         "state":"active",         "replicas":{"core_node2":{             "core":"collection1",             "base_url":"http://127.0.0.1:63119",             "node_name":"127.0.0.1:63119_",             "state":"active",             "type":"TLOG",             "leader":"true"}}},       "shard2":{         "range":"0-7fffffff",         "state":"active",         "replicas":{           "core_node1":{             "core":"collection1",             "base_url":"http://127.0.0.1:63108",             "node_name":"127.0.0.1:63108_",             "state":"active",             "type":"TLOG",             "leader":"true"},           "core_node3":{             "core":"collection1",             "base_url":"http://127.0.0.1:63157",             "node_name":"127.0.0.1:63157_",             "state":"active",             "type":"TLOG"}}}},     "router":{"name":"compositeId"},     "maxShardsPerNode":"1",     "autoAddReplicas":"false",     "nrtReplicas":"1",     "tlogReplicas":"0",     "autoCreated":"true"}}

Stack Trace:
java.lang.AssertionError: Didn't see all replicas for shard shard1 in collMinRf_1x3 come up within 90000 ms! ClusterState: {
  "control_collection":{
    "pullReplicas":"0",
    "replicationFactor":"1",
    "shards":{"shard1":{
        "range":"80000000-7fffffff",
        "state":"active",
        "replicas":{"core_node1":{
            "core":"collection1",
            "base_url":"http://127.0.0.1:63101",
            "node_name":"127.0.0.1:63101_",
            "state":"active",
            "type":"NRT",
            "leader":"true"}}}},
    "router":{"name":"compositeId"},
    "maxShardsPerNode":"1",
    "autoAddReplicas":"false",
    "nrtReplicas":"1",
    "tlogReplicas":"0",
    "autoCreated":"true"},
  "collMinRf_1x3":{
    "pullReplicas":"0",
    "replicationFactor":"1",
    "shards":{"shard1":{
        "range":"80000000-7fffffff",
        "state":"active",
        "replicas":{
          "core_node1":{
            "core":"collMinRf_1x3_shard1_replica_t1",
            "base_url":"http://127.0.0.1:63101",
            "node_name":"127.0.0.1:63101_",
            "state":"recovering",
            "type":"TLOG"},
          "core_node2":{
            "core":"collMinRf_1x3_shard1_replica_t2",
            "base_url":"http://127.0.0.1:63108",
            "node_name":"127.0.0.1:63108_",
            "state":"active",
            "type":"TLOG"},
          "core_node3":{
            "core":"collMinRf_1x3_shard1_replica_t3",
            "base_url":"http://127.0.0.1:63157",
            "node_name":"127.0.0.1:63157_",
            "state":"active",
            "type":"TLOG",
            "leader":"true"}}}},
    "router":{"name":"compositeId"},
    "maxShardsPerNode":"1",
    "autoAddReplicas":"false",
    "nrtReplicas":"0",
    "tlogReplicas":"3"},
  "collection1":{
    "pullReplicas":"0",
    "replicationFactor":"1",
    "shards":{
      "shard1":{
        "range":"80000000-ffffffff",
        "state":"active",
        "replicas":{"core_node2":{
            "core":"collection1",
            "base_url":"http://127.0.0.1:63119",
            "node_name":"127.0.0.1:63119_",
            "state":"active",
            "type":"TLOG",
            "leader":"true"}}},
      "shard2":{
        "range":"0-7fffffff",
        "state":"active",
        "replicas":{
          "core_node1":{
            "core":"collection1",
            "base_url":"http://127.0.0.1:63108",
            "node_name":"127.0.0.1:63108_",
            "state":"active",
            "type":"TLOG",
            "leader":"true"},
          "core_node3":{
            "core":"collection1",
            "base_url":"http://127.0.0.1:63157",
            "node_name":"127.0.0.1:63157_",
            "state":"active",
            "type":"TLOG"}}}},
    "router":{"name":"compositeId"},
    "maxShardsPerNode":"1",
    "autoAddReplicas":"false",
    "nrtReplicas":"1",
    "tlogReplicas":"0",
    "autoCreated":"true"}}
	at __randomizedtesting.SeedInfo.seed([222A3860AFEC3C9:8A769C5CA402AE31]:0)
	at org.junit.Assert.fail(Assert.java:93)
	at org.apache.solr.cloud.AbstractFullDistribZkTestBase.ensureAllReplicasAreActive(AbstractFullDistribZkTestBase.java:1976)
	at org.apache.solr.cloud.HttpPartitionTest.testMinRf(HttpPartitionTest.java:245)
	at org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:126)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
	at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 11470 lines...]
   [junit4] Suite: org.apache.solr.cloud.HttpPartitionTest
   [junit4]   2> Creating dataDir: /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/init-core-data-001
   [junit4]   2> 632452 WARN  (SUITE-HttpPartitionTest-seed#[222A3860AFEC3C9]-worker) [    ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=7 numCloses=7
   [junit4]   2> 632453 INFO  (SUITE-HttpPartitionTest-seed#[222A3860AFEC3C9]-worker) [    ] o.a.s.SolrTestCaseJ4 Using PointFields
   [junit4]   2> 632456 INFO  (SUITE-HttpPartitionTest-seed#[222A3860AFEC3C9]-worker) [    ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776) w/ MAC_OS_X supressed clientAuth
   [junit4]   2> 632456 INFO  (SUITE-HttpPartitionTest-seed#[222A3860AFEC3C9]-worker) [    ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   2> 632457 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 632457 INFO  (Thread-1283) [    ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 632457 INFO  (Thread-1283) [    ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 632461 ERROR (Thread-1283) [    ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 632561 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.ZkTestServer start zk server on port:63096
   [junit4]   2> 632592 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
   [junit4]   2> 632596 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml
   [junit4]   2> 632598 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 632601 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
   [junit4]   2> 632604 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
   [junit4]   2> 632606 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
   [junit4]   2> 632609 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
   [junit4]   2> 632612 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 632614 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 632617 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
   [junit4]   2> 632619 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractZkTestCase put /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
   [junit4]   2> 632622 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Will use TLOG replicas unless explicitly asked otherwise
   [junit4]   2> 632893 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/control-001/cores/collection1
   [junit4]   2> 632896 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 632897 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@7f0ddbfd{/,null,AVAILABLE}
   [junit4]   2> 632898 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@3a2f74c6{HTTP/1.1,[http/1.1]}{127.0.0.1:63102}
   [junit4]   2> 632898 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server Started @637947ms
   [junit4]   2> 632898 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/tempDir-001/control/data, hostContext=/, hostPort=63101, coreRootDirectory=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/control-001/cores}
   [junit4]   2> 632899 ERROR (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 632899 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 632899 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 632899 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 632899 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-06-03T16:45:13.899Z
   [junit4]   2> 632903 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 632903 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/control-001/solr.xml
   [junit4]   2> 632913 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 632918 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:63096/solr
   [junit4]   2> 632958 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 632959 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:63101_
   [junit4]   2> 632960 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.c.Overseer Overseer (id=98075168830849028-127.0.0.1:63101_-n_0000000000) starting
   [junit4]   2> 632970 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:63101_
   [junit4]   2> 632977 INFO  (zkCallback-1671-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 633058 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 633064 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 633064 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 633066 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/control-001/cores
   [junit4]   2> 633066 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63101_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 633067 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 633072 INFO  (OverseerStateUpdate-98075168830849028-127.0.0.1:63101_-n_0000000000) [n:127.0.0.1:63101_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1
   [junit4]   2> 634101 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 634122 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 634315 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 634344 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection control_collection, trusted=true
   [junit4]   2> 634345 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.control_collection.shard1.core_node1' (registry 'solr.core.control_collection.shard1.core_node1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 634345 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 634345 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/control-001/cores/collection1], dataDir=[/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/control-001/cores/collection1/data/]
   [junit4]   2> 634347 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=786670329, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 634354 WARN  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 634472 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 634472 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 634473 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 634473 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 634479 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=87.08984375, floorSegmentMB=0.7314453125, forceMergeDeletesPctAllowed=3.8165469186865186, segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 634480 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@324cb31c[collection1] main]
   [junit4]   2> 634481 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 634482 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 634483 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 634483 INFO  (coreLoadExecutor-2712-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1569202703415902208
   [junit4]   2> 634485 INFO  (searcherExecutor-2713-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@324cb31c[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 634493 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 634493 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 634493 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:63101/collection1/
   [junit4]   2> 634493 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 634493 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:63101/collection1/ has no replicas
   [junit4]   2> 634493 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 634499 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:63101/collection1/ shard1
   [junit4]   2> 634603 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 634610 INFO  (coreZkRegister-2705-thread-1-processing-n:127.0.0.1:63101_ x:collection1 c:control_collection) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 634612 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:63096/solr ready
   [junit4]   2> 634612 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false
   [junit4]   2> 635054 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-1-001/cores/collection1
   [junit4]   2> 635056 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-1-001 of type TLOG
   [junit4]   2> 635056 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 635058 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@3cecf470{/,null,AVAILABLE}
   [junit4]   2> 635058 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@53fa9aa2{HTTP/1.1,[http/1.1]}{127.0.0.1:63109}
   [junit4]   2> 635059 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server Started @640107ms
   [junit4]   2> 635059 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/tempDir-001/jetty1, replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=63108, coreRootDirectory=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-1-001/cores}
   [junit4]   2> 635060 ERROR (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 635060 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 635060 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 635060 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 635060 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-06-03T16:45:16.060Z
   [junit4]   2> 635064 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 635064 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-1-001/solr.xml
   [junit4]   2> 635077 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 635081 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:63096/solr
   [junit4]   2> 635097 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 635102 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 635106 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:63108_
   [junit4]   2> 635108 INFO  (zkCallback-1675-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 635109 INFO  (zkCallback-1671-thread-1-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 635110 INFO  (zkCallback-1680-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 635354 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 635368 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 635368 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 635370 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-1-001/cores
   [junit4]   2> 635370 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63108_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 635372 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 635373 INFO  (OverseerStateUpdate-98075168830849028-127.0.0.1:63101_-n_0000000000) [n:127.0.0.1:63101_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2
   [junit4]   2> 636405 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 636425 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 636677 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 636709 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1, trusted=true
   [junit4]   2> 636709 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard2.core_node1' (registry 'solr.core.collection1.shard2.core_node1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 636709 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 636709 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-1-001/cores/collection1], dataDir=[/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-1-001/cores/collection1/data/]
   [junit4]   2> 636712 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=786670329, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 636715 WARN  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 636888 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 636888 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 636890 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 636890 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 636891 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=87.08984375, floorSegmentMB=0.7314453125, forceMergeDeletesPctAllowed=3.8165469186865186, segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 636891 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@791ce2b3[collection1] main]
   [junit4]   2> 636893 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 636893 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 636894 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 636894 INFO  (coreLoadExecutor-2723-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1569202705944018944
   [junit4]   2> 636897 INFO  (searcherExecutor-2724-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@791ce2b3[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 636910 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 636910 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 636910 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:63108/collection1/
   [junit4]   2> 636910 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 636910 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:63108/collection1/ has no replicas
   [junit4]   2> 636910 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 636911 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ZkController collection1 stopping background replication from leader
   [junit4]   2> 636921 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:63108/collection1/ shard2
   [junit4]   2> 637117 INFO  (coreZkRegister-2718-thread-1-processing-n:127.0.0.1:63108_ x:collection1 c:collection1) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 637702 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-2-001/cores/collection1
   [junit4]   2> 637711 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-2-001 of type TLOG
   [junit4]   2> 637728 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 637731 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@57f49d2e{/,null,AVAILABLE}
   [junit4]   2> 637732 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@56b07fc9{HTTP/1.1,[http/1.1]}{127.0.0.1:63120}
   [junit4]   2> 637732 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server Started @642781ms
   [junit4]   2> 637732 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/tempDir-001/jetty2, replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=63119, coreRootDirectory=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-2-001/cores}
   [junit4]   2> 637732 ERROR (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 637732 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 637732 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 637732 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 637733 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-06-03T16:45:18.733Z
   [junit4]   2> 637741 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 637741 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-2-001/solr.xml
   [junit4]   2> 637774 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 637787 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:63096/solr
   [junit4]   2> 637825 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 637828 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 637840 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:63119_
   [junit4]   2> 637844 INFO  (zkCallback-1675-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 637845 INFO  (zkCallback-1686-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 637844 INFO  (zkCallback-1680-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 637844 INFO  (zkCallback-1671-thread-2-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 638036 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 638046 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 638046 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 638049 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-2-001/cores
   [junit4]   2> 638049 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63119_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 638050 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 638053 INFO  (OverseerStateUpdate-98075168830849028-127.0.0.1:63101_-n_0000000000) [n:127.0.0.1:63101_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1
   [junit4]   2> 639141 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 639194 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 639472 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 639497 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1, trusted=true
   [junit4]   2> 639498 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard1.core_node2' (registry 'solr.core.collection1.shard1.core_node2') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 639498 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 639498 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-2-001/cores/collection1], dataDir=[/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-2-001/cores/collection1/data/]
   [junit4]   2> 639509 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=786670329, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 639517 WARN  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 639694 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 639694 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 639698 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 639699 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 639705 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=87.08984375, floorSegmentMB=0.7314453125, forceMergeDeletesPctAllowed=3.8165469186865186, segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 639705 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@5de6cf29[collection1] main]
   [junit4]   2> 639706 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 639707 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 639707 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 639708 INFO  (coreLoadExecutor-2734-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1569202708894711808
   [junit4]   2> 639710 INFO  (searcherExecutor-2735-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@5de6cf29[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 639720 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 639720 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 639720 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:63119/collection1/
   [junit4]   2> 639720 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 639720 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:63119/collection1/ has no replicas
   [junit4]   2> 639720 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 639721 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ZkController collection1 stopping background replication from leader
   [junit4]   2> 639732 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:63119/collection1/ shard1
   [junit4]   2> 639850 INFO  (coreZkRegister-2729-thread-1-processing-n:127.0.0.1:63119_ x:collection1 c:collection1) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 640966 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-3-001/cores/collection1
   [junit4]   2> 640968 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-3-001 of type TLOG
   [junit4]   2> 640968 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 640971 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@14e6ca48{/,null,AVAILABLE}
   [junit4]   2> 640971 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@6d05fc54{HTTP/1.1,[http/1.1]}{127.0.0.1:63158}
   [junit4]   2> 640971 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.e.j.s.Server Started @646020ms
   [junit4]   2> 640971 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/tempDir-001/jetty3, replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=63157, coreRootDirectory=/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-3-001/cores}
   [junit4]   2> 640971 ERROR (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 640984 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 640984 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 640984 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 640984 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-06-03T16:45:21.984Z
   [junit4]   2> 640998 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 640998 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-3-001/solr.xml
   [junit4]   2> 641013 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 641018 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:63096/solr
   [junit4]   2> 641038 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 641051 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 641063 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:63157_
   [junit4]   2> 641066 INFO  (zkCallback-1680-thread-1-processing-n:127.0.0.1:63108_) [n:127.0.0.1:63108_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 641067 INFO  (zkCallback-1692-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 641066 INFO  (zkCallback-1671-thread-2-processing-n:127.0.0.1:63101_) [n:127.0.0.1:63101_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 641066 INFO  (zkCallback-1675-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 641066 INFO  (zkCallback-1686-thread-1-processing-n:127.0.0.1:63119_) [n:127.0.0.1:63119_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 641332 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 641338 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 641338 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 641340 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-3-001/cores
   [junit4]   2> 641340 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [n:127.0.0.1:63157_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 641341 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 641343 INFO  (OverseerStateUpdate-98075168830849028-127.0.0.1:63101_-n_0000000000) [n:127.0.0.1:63101_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2
   [junit4]   2> 642382 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 642414 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 642576 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 642603 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1, trusted=true
   [junit4]   2> 642604 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard2.core_node3' (registry 'solr.core.collection1.shard2.core_node3') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1348c5aa
   [junit4]   2> 642604 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 642604 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-3-001/cores/collection1], dataDir=[/Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001/shard-3-001/cores/collection1/data/]
   [junit4]   2> 642607 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=786670329, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 642613 WARN  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 642752 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 642752 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 642754 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 642754 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 642778 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=49, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=87.08984375, floorSegmentMB=0.7314453125, forceMergeDeletesPctAllowed=3.8165469186865186, segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
   [junit4]   2> 642799 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@747a5f8c[collection1] main]
   [junit4]   2> 642805 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 642806 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 642807 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 642807 INFO  (coreLoadExecutor-2745-thread-1-processing-n:127.0.0.1:63157_) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1569202712144248832
   [junit4]   2> 642811 INFO  (searcherExecutor-2746-thread-1-processing-n:127.0.0.1:63157_ x:collection1 c:collection1) [n:127.0.0.1:63157_ c:collection1   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@747a5f8c[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 642817 INFO  (coreZkRegister-2740-thread-1-processing-n:127.0.0.1:63157_ x:collection1 c:collection1) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ZkController Core needs to recover:collection1
   [junit4]   2> 642818 INFO  (updateExecutor-1689-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 642818 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true
   [junit4]   2> 642819 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
   [junit4]   2> 642819 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ZkController collection1 stopping background replication from leader
   [junit4]   2> 642819 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[collection1]
   [junit4]   2> 642819 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 642819 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core [collection1] as recovering, leader is [http://127.0.0.1:63108/collection1/] and I am [http://127.0.0.1:63157/collection1/]
   [junit4]   2> 642824 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery command to [http://127.0.0.1:63108]; [WaitForState: action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:63157_&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 642825 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63171,localport=63108], receiveBufferSize:408300
   [junit4]   2> 642827 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63109,localport=63172], receiveBufferSize=408300
   [junit4]   2> 642830 INFO  (qtp1798436630-8255) [n:127.0.0.1:63108_    ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true, maxTime: 183 s
   [junit4]   2> 642831 INFO  (qtp1798436630-8255) [n:127.0.0.1:63108_    ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:63157_, coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: core_node3:{"core":"collection1","base_url":"http://127.0.0.1:63157","node_name":"127.0.0.1:63157_","state":"down","type":"TLOG"}
   [junit4]   2> 642938 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 642938 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 30000 for each attempt
   [junit4]   2> 642938 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 643831 INFO  (qtp1798436630-8255) [n:127.0.0.1:63108_    ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:63157_, coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: core_node3:{"core":"collection1","base_url":"http://127.0.0.1:63157","node_name":"127.0.0.1:63157_","state":"recovering","type":"TLOG"}
   [junit4]   2> 643831 INFO  (qtp1798436630-8255) [n:127.0.0.1:63108_    ] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 643831 INFO  (qtp1798436630-8255) [n:127.0.0.1:63108_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:63157_&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=1001
   [junit4]   2> 644332 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting Replication Recovery.
   [junit4]   2> 644332 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to replicate from [http://127.0.0.1:63108/collection1/].
   [junit4]   2> 644333 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63175,localport=63108], receiveBufferSize:408300
   [junit4]   2> 644334 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63109,localport=63176], receiveBufferSize=408300
   [junit4]   2> 644334 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1569202713745424384,optimize=false,openSearcher=false,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 644335 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 644336 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 644336 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&commit_end_point=true&wt=javabin&version=2}{commit=} 0 2
   [junit4]   2> 644338 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63177,localport=63108], receiveBufferSize:408300
   [junit4]   2> 644338 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63109,localport=63178], receiveBufferSize=408300
   [junit4]   2> 644338 INFO  (qtp1798436630-8255) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Master's generation: 1
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Master's version: 0
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replication Recovery was successful.
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ZkController collection1 starting background replication from leader
   [junit4]   2> 644339 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ReplicateFromLeader Will start replication from leader with poll interval: 00:00:03
   [junit4]   2> 644340 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.ReplicationHandler Poll scheduled at an interval of 3000ms
   [junit4]   2> 644340 WARN  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.ReplicationHandler SolrCloud is enabled for core collection1 but so is old-style replication. Make sure you intend this behavior, it usually indicates a mis-configuration. Master setting is false and slave setting is true
   [junit4]   2> 644340 INFO  (indexFetcher-2752-thread-1) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Updated masterUrl to http://127.0.0.1:63108/collection1/
   [junit4]   2> 644341 INFO  (qtp1798436630-8256) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
   [junit4]   2> 644341 INFO  (indexFetcher-2752-thread-1) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Master's generation: 1
   [junit4]   2> 644341 INFO  (indexFetcher-2752-thread-1) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Master's version: 0
   [junit4]   2> 644341 INFO  (indexFetcher-2752-thread-1) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 644341 INFO  (indexFetcher-2752-thread-1) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 644341 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Updating version bucket highest from index after successful recovery.
   [junit4]   2> 644341 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1569202713752764416
   [junit4]   2> 644343 INFO  (recoveryExecutor-1690-thread-1-processing-n:127.0.0.1:63157_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Finished recovery process, successful=[true]
   [junit4]   2> 644938 INFO  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 644939 INFO  (SocketProxy-Acceptor-63101) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63179,localport=63101], receiveBufferSize:408300
   [junit4]   2> 644940 INFO  (SocketProxy-Acceptor-63101) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63102,localport=63180], receiveBufferSize=408300
   [junit4]   2> 644941 INFO  (qtp1485135750-8219) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1569202714381910016,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 644942 INFO  (qtp1485135750-8219) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 644943 INFO  (qtp1485135750-8219) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 644943 INFO  (qtp1485135750-8219) [n:127.0.0.1:63101_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 1
   [junit4]   2> 644944 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63181,localport=63108], receiveBufferSize:408300
   [junit4]   2> 644944 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63109,localport=63182], receiveBufferSize=408300
   [junit4]   2> 644946 INFO  (SocketProxy-Acceptor-63119) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63183,localport=63119], receiveBufferSize:408300
   [junit4]   2> 644947 INFO  (SocketProxy-Acceptor-63157) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63185,localport=63157], receiveBufferSize:408300
   [junit4]   2> 644947 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63186,localport=63108], receiveBufferSize:408300
   [junit4]   2> 644947 INFO  (SocketProxy-Acceptor-63119) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63120,localport=63184], receiveBufferSize=408300
   [junit4]   2> 644948 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63109,localport=63188], receiveBufferSize=408300
   [junit4]   2> 644949 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1569202714390298624,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 644949 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 644949 INFO  (SocketProxy-Acceptor-63157) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63158,localport=63187], receiveBufferSize=408300
   [junit4]   2> 644950 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 644950 INFO  (qtp1798436630-8259) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:63108/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 1
   [junit4]   2> 644951 INFO  (qtp1093328087-8285) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1569202714392395776,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 644951 INFO  (qtp1092305419-8314) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.TestInjection Start waiting for replica in sync with leader
   [junit4]   2> 644951 INFO  (qtp1093328087-8285) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 644953 INFO  (qtp1093328087-8285) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 644953 INFO  (qtp1093328087-8285) [n:127.0.0.1:63119_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:63108/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 2
   [junit4]   2> 644954 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=63189,localport=63108], receiveBufferSize:408300
   [junit4]   2> 644954 INFO  (SocketProxy-Acceptor-63108) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=63109,localport=63190], receiveBufferSize=408300
   [junit4]   2> 644955 INFO  (qtp1798436630-8255) [n:127.0.0.1:63108_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
   [junit4]   2> 644955 INFO  (qtp1092305419-8314) [n:127.0.0.1:63157_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADE

[...truncated too long message...]

]   2> 	2	/solr/collections/c8n_crud_1x2/state.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2> 	5	/solr/clusterstate.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2> 	5	/solr/live_nodes
   [junit4]   2> 	5	/solr/collections
   [junit4]   2> 	3	/solr/overseer/queue
   [junit4]   2> 	3	/solr/overseer/collection-queue-work
   [junit4]   2> 
   [junit4]   2> 1264252 WARN  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SocketProxy Closing 6 connections to: http://127.0.0.1:63157/, target: http://127.0.0.1:63158/
   [junit4]   2> 1264252 WARN  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SocketProxy Closing 3 connections to: http://127.0.0.1:63119/, target: http://127.0.0.1:63120/
   [junit4]   2> 1264253 WARN  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SocketProxy Closing 11 connections to: http://127.0.0.1:63108/, target: http://127.0.0.1:63109/
   [junit4]   2> 1264253 WARN  (TEST-HttpPartitionTest.test-seed#[222A3860AFEC3C9]) [    ] o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:63101/, target: http://127.0.0.1:63102/
   [junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=HttpPartitionTest -Dtests.method=test -Dtests.seed=222A3860AFEC3C9 -Dtests.slow=true -Dtests.locale=ru-RU -Dtests.timezone=America/Inuvik -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1
   [junit4] FAILURE  632s J0 | HttpPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Didn't see all replicas for shard shard1 in collMinRf_1x3 come up within 90000 ms! ClusterState: {
   [junit4]    >   "control_collection":{
   [junit4]    >     "pullReplicas":"0",
   [junit4]    >     "replicationFactor":"1",
   [junit4]    >     "shards":{"shard1":{
   [junit4]    >         "range":"80000000-7fffffff",
   [junit4]    >         "state":"active",
   [junit4]    >         "replicas":{"core_node1":{
   [junit4]    >             "core":"collection1",
   [junit4]    >             "base_url":"http://127.0.0.1:63101",
   [junit4]    >             "node_name":"127.0.0.1:63101_",
   [junit4]    >             "state":"active",
   [junit4]    >             "type":"NRT",
   [junit4]    >             "leader":"true"}}}},
   [junit4]    >     "router":{"name":"compositeId"},
   [junit4]    >     "maxShardsPerNode":"1",
   [junit4]    >     "autoAddReplicas":"false",
   [junit4]    >     "nrtReplicas":"1",
   [junit4]    >     "tlogReplicas":"0",
   [junit4]    >     "autoCreated":"true"},
   [junit4]    >   "collMinRf_1x3":{
   [junit4]    >     "pullReplicas":"0",
   [junit4]    >     "replicationFactor":"1",
   [junit4]    >     "shards":{"shard1":{
   [junit4]    >         "range":"80000000-7fffffff",
   [junit4]    >         "state":"active",
   [junit4]    >         "replicas":{
   [junit4]    >           "core_node1":{
   [junit4]    >             "core":"collMinRf_1x3_shard1_replica_t1",
   [junit4]    >             "base_url":"http://127.0.0.1:63101",
   [junit4]    >             "node_name":"127.0.0.1:63101_",
   [junit4]    >             "state":"recovering",
   [junit4]    >             "type":"TLOG"},
   [junit4]    >           "core_node2":{
   [junit4]    >             "core":"collMinRf_1x3_shard1_replica_t2",
   [junit4]    >             "base_url":"http://127.0.0.1:63108",
   [junit4]    >             "node_name":"127.0.0.1:63108_",
   [junit4]    >             "state":"active",
   [junit4]    >             "type":"TLOG"},
   [junit4]    >           "core_node3":{
   [junit4]    >             "core":"collMinRf_1x3_shard1_replica_t3",
   [junit4]    >             "base_url":"http://127.0.0.1:63157",
   [junit4]    >             "node_name":"127.0.0.1:63157_",
   [junit4]    >             "state":"active",
   [junit4]    >             "type":"TLOG",
   [junit4]    >             "leader":"true"}}}},
   [junit4]    >     "router":{"name":"compositeId"},
   [junit4]    >     "maxShardsPerNode":"1",
   [junit4]    >     "autoAddReplicas":"false",
   [junit4]    >     "nrtReplicas":"0",
   [junit4]    >     "tlogReplicas":"3"},
   [junit4]    >   "collection1":{
   [junit4]    >     "pullReplicas":"0",
   [junit4]    >     "replicationFactor":"1",
   [junit4]    >     "shards":{
   [junit4]    >       "shard1":{
   [junit4]    >         "range":"80000000-ffffffff",
   [junit4]    >         "state":"active",
   [junit4]    >         "replicas":{"core_node2":{
   [junit4]    >             "core":"collection1",
   [junit4]    >             "base_url":"http://127.0.0.1:63119",
   [junit4]    >             "node_name":"127.0.0.1:63119_",
   [junit4]    >             "state":"active",
   [junit4]    >             "type":"TLOG",
   [junit4]    >             "leader":"true"}}},
   [junit4]    >       "shard2":{
   [junit4]    >         "range":"0-7fffffff",
   [junit4]    >         "state":"active",
   [junit4]    >         "replicas":{
   [junit4]    >           "core_node1":{
   [junit4]    >             "core":"collection1",
   [junit4]    >             "base_url":"http://127.0.0.1:63108",
   [junit4]    >             "node_name":"127.0.0.1:63108_",
   [junit4]    >             "state":"active",
   [junit4]    >             "type":"TLOG",
   [junit4]    >             "leader":"true"},
   [junit4]    >           "core_node3":{
   [junit4]    >             "core":"collection1",
   [junit4]    >             "base_url":"http://127.0.0.1:63157",
   [junit4]    >             "node_name":"127.0.0.1:63157_",
   [junit4]    >             "state":"active",
   [junit4]    >             "type":"TLOG"}}}},
   [junit4]    >     "router":{"name":"compositeId"},
   [junit4]    >     "maxShardsPerNode":"1",
   [junit4]    >     "autoAddReplicas":"false",
   [junit4]    >     "nrtReplicas":"1",
   [junit4]    >     "tlogReplicas":"0",
   [junit4]    >     "autoCreated":"true"}}
   [junit4]    > 	at __randomizedtesting.SeedInfo.seed([222A3860AFEC3C9:8A769C5CA402AE31]:0)
   [junit4]    > 	at org.apache.solr.cloud.AbstractFullDistribZkTestBase.ensureAllReplicasAreActive(AbstractFullDistribZkTestBase.java:1976)
   [junit4]    > 	at org.apache.solr.cloud.HttpPartitionTest.testMinRf(HttpPartitionTest.java:245)
   [junit4]    > 	at org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:126)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
   [junit4]    > 	at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> NOTE: leaving temporary files on disk at: /Users/jenkins/workspace/Lucene-Solr-master-MacOSX/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_222A3860AFEC3C9-001
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene70): {multiDefault=PostingsFormat(name=LuceneFixedGap), a_t=PostingsFormat(name=LuceneFixedGap), id=PostingsFormat(name=LuceneVarGapFixedInterval), text=BlockTreeOrds(blocksize=128)}, docValues:{range_facet_l_dv=DocValuesFormat(name=Lucene70), _version_=DocValuesFormat(name=Memory), intDefault=DocValuesFormat(name=Memory), range_facet_i_dv=DocValuesFormat(name=Lucene70), intDvoDefault=DocValuesFormat(name=Lucene70), range_facet_l=DocValuesFormat(name=Lucene70), timestamp=DocValuesFormat(name=Lucene70)}, maxPointsInLeafNode=20, maxMBSortInHeap=5.35455863106287, sim=RandomSimilarity(queryNorm=false): {}, locale=ru-RU, timezone=America/Inuvik
   [junit4]   2> NOTE: Mac OS X 10.11.6 x86_64/Oracle Corporation 1.8.0_131 (64-bit)/cpus=3,threads=1,free=40921016,total=233213952
   [junit4]   2> NOTE: All tests run in this JVM: [SolrCoreTest, DirectSolrSpellCheckerTest, TestElisionMultitermQuery, TestJmxIntegration, TestReplicaProperties, TestQuerySenderNoQuery, AnalyticsMergeStrategyTest, TestSolrConfigHandler, TestUseDocValuesAsStored, TestStressReorder, ShardRoutingTest, ConnectionManagerTest, TestFieldCollectionResource, TestBlendedInfixSuggestions, UniqFieldsUpdateProcessorFactoryTest, FullHLLTest, TestManagedSynonymGraphFilterFactory, TestTrackingShardHandlerFactory, TestGraphMLResponseWriter, TestRemoteStreaming, OverseerModifyCollectionTest, TestSQLHandlerNonCloud, DocValuesTest, TestMergePolicyConfig, TestFieldCacheVsDocValues, MigrateRouteKeyTest, CloudExitableDirectoryReaderTest, LeaderFailoverAfterPartitionTest, CdcrRequestHandlerTest, RollingRestartTest, PathHierarchyTokenizerFactoryTest, TestStressVersions, TestRestManager, CollectionStateFormat2Test, TestPerFieldSimilarityWithDefaultOverride, TestReloadDeadlock, AnalysisAfterCoreReloadTest, DistributedSpellCheckComponentTest, TestLeaderElectionWithEmptyReplica, DocumentAnalysisRequestHandlerTest, ClassificationUpdateProcessorFactoryTest, TestPushWriter, CopyFieldTest, TestJsonRequest, TestFreeTextSuggestions, TestFaceting, SimplePostToolTest, DateRangeFieldTest, SolrMetricsIntegrationTest, DocValuesNotIndexedTest, BlockCacheTest, CollectionsAPIAsyncDistributedZkTest, DateFieldTest, SolrSlf4jReporterTest, WrapperMergePolicyFactoryTest, TestIBSimilarityFactory, CachingDirectoryFactoryTest, TestDynamicFieldCollectionResource, DistributedFacetPivotSmallTest, TestDelegationWithHadoopAuth, TestPhraseSuggestions, TestExportWriter, QueryElevationComponentTest, DeleteLastCustomShardedReplicaTest, TestNamedUpdateProcessors, TestStressLiveNodes, InfoHandlerTest, SolrMetricManagerTest, SortSpecParsingTest, SolrPluginUtilsTest, HighlighterMaxOffsetTest, TestUpdate, TestDistributedStatsComponentCardinality, MoveReplicaTest, TestOnReconnectListenerSupport, TestDefaultStatsCache, TestGraphTermsQParserPlugin, HdfsWriteToMultipleCollectionsTest, HttpPartitionTest]
   [junit4] Completed [199/724 (1!)] on J0 in 631.84s, 1 test, 1 failure <<< FAILURES!

[...truncated 45075 lines...]

Mime
View raw message