usergrid-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sfeld...@apache.org
Subject [63/68] [abbrv] incubator-usergrid git commit: Merge branch 'pr/225' into two-dot-o
Date Fri, 17 Apr 2015 22:48:08 GMT
Merge branch 'pr/225' into two-dot-o

Conflicts:
	stack/config/src/main/resources/usergrid-default.properties
	stack/core/src/main/resources/usergrid-core-context.xml


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/ee711316
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/ee711316
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/ee711316

Branch: refs/heads/two-dot-o-dev
Commit: ee7113160f04cf9853a1717694d003cbc8d14885
Parents: 7839871 db380df
Author: Dave Johnson <dmjohnson@apigee.com>
Authored: Thu Apr 16 16:16:35 2015 -0400
Committer: Dave Johnson <dmjohnson@apigee.com>
Committed: Thu Apr 16 16:16:35 2015 -0400

----------------------------------------------------------------------
 .../main/resources/usergrid-default.properties  | 37 +++++++++++++-----
 .../main/resources/usergrid-core-context.xml    | 41 +++++++++++---------
 2 files changed, 50 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/ee711316/stack/config/src/main/resources/usergrid-default.properties
----------------------------------------------------------------------
diff --cc stack/config/src/main/resources/usergrid-default.properties
index d42d12c,b49742e..53b6445
--- a/stack/config/src/main/resources/usergrid-default.properties
+++ b/stack/config/src/main/resources/usergrid-default.properties
@@@ -97,16 -58,20 +97,28 @@@ cassandra.writecl=QUORU
  #The maximum number of pending mutations allowed in ram before it is flushed to cassandra
  cassandra.mutation.flushsize=2000
  
--#Keyspace to use for locking
--#Note that if this is deployed in a production cluster, the RF on the keyspace MUST be updated
to use an odd number for it's replication Factor.
--#Even numbers for RF can potentially case the locks to fail, via "split brain" when read
at QUORUM on lock verification
++# Keyspace to use for locking
++# Note that if this is deployed in a production cluster, the RF on the keyspace
++# MUST be updated to use an odd number for it's replication Factor. Even numbers
++# for RF can potentially case the locks to fail, via "split brain" when read at
++# QUORUM on lock verification
  cassandra.lock.keyspace=Locks
 -#locking read & write policies
 +
++# locking read & write policies
+ cassandra.lock.readcl=LOCAL_QUORUM
+ cassandra.lock.writecl=LOCAL_QUORUM
+ 
+ # Timeout in ms before hector considers a thrift socket dead
+ cassandra.thriftSocketTimeout=0
+ # If hector should use host TCP keep alive settings
+ cassandra.useSocketKeepalive=false
+ 
+ 
 +
 +###############################################################################
 +#
 +# General properties
 +
  # false to disable test features
  usergrid.test=false
  
@@@ -180,22 -127,21 +192,27 @@@ usergrid.temp.files=/tmp/usergri
  #The timeout in locks from reading messages transitionally from a queue.  Number of seconds
to wait
  usergrid.queue.lock.timeout=5
  
 -######
 -#Scheduler setup
 -######
  
 -#Time in milliseconds that a job can be started without a heartbeat before being considered
dead.
 -#Note that this must be high enough so that jobs that are iteration based can run an iteration
and update the heartbeat
 +###############################################################################
 +#
 +# Scheduler setup
 +
- #Time in milliseconds that a job can be started without a heartbeat before being considered
dead.
- #Note that this must be high enough so that jobs that are iteration based can run an iteration
and update the heartbeat
++# Time in milliseconds that a job can be started without a heartbeat before being considered
dead.
++# Note that this must be high enough so that jobs that are iteration based can run an iteration
and update the heartbeat
  usergrid.scheduler.job.timeout=120000
--#The path to the queue in the managment app to get jobs from
++
++# The path to the queue in the managment app to get jobs from
  usergrid.scheduler.job.queueName=/jobs
--#The number of executor threads to allow
++
++# The number of executor threads to allow
  usergrid.scheduler.job.workers=4
--#Poll interval to check for new jobs in millseconds.  5 seconds is the default.  It will
run all jobs up to current so this won't limit throughput
++
++# Poll interval to check for new jobs in millseconds.  5 seconds is the default.
++# It will run all jobs up to current so this won't limit throughput
  usergrid.scheduler.job.interval=5000
--#The max number of times a job can fail before removing it permanently. Note that this count
is INCLUSIVE.
--#If the value is 10, the 11th fail will mark the job as dead
++
++# The max number of times a job can fail before removing it permanently. Note that this
count is INCLUSIVE.
++# If the value is 10, the 11th fail will mark the job as dead
  usergrid.scheduler.job.maxfail=10
  
  # Zookeeper instances

http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/ee711316/stack/core/src/main/resources/usergrid-core-context.xml
----------------------------------------------------------------------
diff --cc stack/core/src/main/resources/usergrid-core-context.xml
index cd40d6d,3e49455..4424896
--- a/stack/core/src/main/resources/usergrid-core-context.xml
+++ b/stack/core/src/main/resources/usergrid-core-context.xml
@@@ -43,25 -43,27 +43,24 @@@
  
  	<!-- The Time Resolution used for the cluster -->
  	<bean id="microsecondsTimeResolution" class="me.prettyprint.cassandra.service.clock.MicrosecondsClockResolution"
/>
--  <bean id="traceTagManager" class="org.apache.usergrid.persistence.cassandra.util.TraceTagManager"/>
--  <bean id="traceTagReporter" class="org.apache.usergrid.persistence.cassandra.util.Slf4jTraceTagReporter"/>
++    <bean id="traceTagManager" class="org.apache.usergrid.persistence.cassandra.util.TraceTagManager"/>
++    <bean id="traceTagReporter" class="org.apache.usergrid.persistence.cassandra.util.Slf4jTraceTagReporter"/>
  
--  <bean id="taggedOpTimer" class="org.apache.usergrid.persistence.cassandra.util.TaggedOpTimer">
--    <constructor-arg ref="traceTagManager"/>
--  </bean>
++    <bean id="taggedOpTimer" class="org.apache.usergrid.persistence.cassandra.util.TaggedOpTimer">
++      <constructor-arg ref="traceTagManager"/>
++    </bean>
  
  	<bean id="cassandraHostConfigurator" class="me.prettyprint.cassandra.service.CassandraHostConfigurator">
  		<constructor-arg value="${cassandra.url}" />
          <!-- set the pool size if it's available.  If not go with 50 -->
-         <property name="maxActive" value="${cassandra.connections:50}"/>
-         <!--<property orgAppName="clockResolution" ref="microsecondsTimeResolution"
/>-->
+         <property name="maxActive" value="${cassandra.connections:20}"/>
+         <property name="cassandraThriftSocketTimeout" value="${cassandra.thriftSocketTimeout:0}"
/>
+         <property name="useSocketKeepalive" value="${cassandra.useSocketKeepalive:false}"
/>
 -        <property name="clockResolution" ref="microsecondsTimeResolution" />
++        <!-- <property name="clockResolution" ref="microsecondsTimeResolution" />
-->
          <property name="opTimer" ref="taggedOpTimer"/>
          <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
  	</bean>
  
--
--
--
  	<bean id="cassandraCluster" class="me.prettyprint.cassandra.service.ThriftCluster">
  		<constructor-arg value="${cassandra.cluster}" />
  		<constructor-arg ref="cassandraHostConfigurator" />
@@@ -70,34 -72,31 +69,38 @@@
      <bean id="loadBalancingPolicy" class="me.prettyprint.cassandra.connection.DynamicLoadBalancingPolicy"/>
  
  	<!--  locking for a single node -->
- <!--	<bean orgAppName="lockManager"
-         class="org.apache.usergrid.locking.singlenode.SingleNodeLockManagerImpl" />-->
++
+     <!-- <bean name="lockManager" class="org.apache.usergrid.locking.singlenode.SingleNodeLockManagerImpl"
/> -->
  
  	<!--  hector based locks -->
- 	<!-- Note that if this is deployed in a production cluster, the RF on the keyspace
-     MUST be updated to use an odd number for it's replication Factor.  Even numbers can
-     potentially case the locks to fail, via "split brain" when read at QUORUM on lock verification-->
 -	<!-- Note that if this is deployed in a production cluster, the RF on the keyspace MUST
be updated to use an odd number for it's replication Factor.
 -		  Even numbers can potentially case the locks to fail, via "split brain" when read at
QUORUM on lock verification-->
++	<!-- Note that if this is deployed in a production cluster, the RF on the keyspace MUST
++	     be updated to use an odd number for it's replication Factor. Even numbers can potentially
++	     case the locks to fail, via "split brain" when read at QUORUM on lock verification-->
  
- 	<bean name="lockManager" class="org.apache.usergrid.locking.cassandra.HectorLockManagerImpl"
>
+ 	<bean name="lockManager" class="org.apache.usergrid.locking.cassandra.HectorLockManagerImpl">
  		<property name="cluster" ref="cassandraCluster"/>
  		<property name="keyspaceName" value="${cassandra.lock.keyspace}"/>
- 		<property name="consistencyLevelPolicy" ref="consistencyLevelPolicy"/>
+         <property name="consistencyLevelPolicy" ref="hlockConsistencyLevelPolicy" />
  	</bean>
 +
+     <bean name="hlockConsistencyLevelPolicy" class="me.prettyprint.cassandra.model.ConfigurableConsistencyLevel">
+         <property name="defaultReadConsistencyLevel" value="${cassandra.lock.readcl}"/>
+         <property name="defaultWriteConsistencyLevel" value="${cassandra.lock.writecl}"/>
+     </bean>
+ 
  	<!--  zookeeper locks -->
  	<!--
 -	<bean name="lockManager" class="org.apache.usergrid.locking.zookeeper.ZooKeeperLockManagerImpl"
>
 -		<property name="hostPort" value="${zookeeper.url}"/>
 -		<property name="sessionTimeout" value="2000"/>
 -		<property name="maxAttempts" value="10"/>
 +	<bean orgAppName="lockManager" class="org.apache.usergrid.locking.zookeeper.ZooKeeperLockManagerImpl"
>
 +		<property orgAppName="hostPort" value="${zookeeper.url}"/>
 +		<property orgAppName="sessionTimeout" value="2000"/>
 +		<property orgAppName="maxAttempts" value="10"/>
  	</bean>  -->
  
--
 +    <bean id="injector"
 +   		class="org.apache.usergrid.corepersistence.GuiceFactory">
 +   		<constructor-arg ref="cassandraHostConfigurator" />
 +        <constructor-arg ref="properties" />
 +    </bean>
  
  	<bean id="cassandraService"
  		class="org.apache.usergrid.persistence.cassandra.CassandraService" init-method="init"
destroy-method="destroy">
@@@ -162,7 -161,11 +165,9 @@@
      	<constructor-arg value="${usergrid.index.defaultbucketsize}"/>
      </bean>
  
 -    <bean id="mailUtils" class="org.apache.usergrid.utils.MailUtils" />
 -
+     <bean id="entityManager" class="org.apache.usergrid.persistence.cassandra.EntityManagerImpl"
scope="prototype"/>
+ 
 -    <bean id="relationManager" class="org.apache.usergrid.persistence.cassandra.RelationManagerImpl"
scope="prototype"/>
 +    <bean id="mailUtils" class="org.apache.usergrid.utils.MailUtils" />
  
      <bean id="traceTagAspect" class="org.apache.usergrid.persistence.cassandra.util.TraceTagAspect"/>
  


Mime
View raw message