helix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kisho...@apache.org
Subject git commit: Minor documentation fixes
Date Wed, 19 Dec 2012 20:05:22 GMT
Updated Branches:
  refs/heads/master e0291692d -> 598c5e290


Minor documentation fixes


Project: http://git-wip-us.apache.org/repos/asf/incubator-helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-helix/commit/598c5e29
Tree: http://git-wip-us.apache.org/repos/asf/incubator-helix/tree/598c5e29
Diff: http://git-wip-us.apache.org/repos/asf/incubator-helix/diff/598c5e29

Branch: refs/heads/master
Commit: 598c5e2905d2d7b6dfeef33cb7e07f493f41f16e
Parents: e029169
Author: Kishore Gopalakrishna <g.kishore@gmail.com>
Authored: Wed Dec 19 12:05:12 2012 -0800
Committer: Kishore Gopalakrishna <g.kishore@gmail.com>
Committed: Wed Dec 19 12:05:12 2012 -0800

----------------------------------------------------------------------
 bump-up.command                            |    7 ++
 recipes/distributed-lock-manager/README.md |  123 ++++++++++++-----------
 src/site/markdown/recipes/lock_manager.md  |  123 ++++++++++++-----------
 3 files changed, 139 insertions(+), 114 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/598c5e29/bump-up.command
----------------------------------------------------------------------
diff --git a/bump-up.command b/bump-up.command
index ee73f55..8cd2486 100755
--- a/bump-up.command
+++ b/bump-up.command
@@ -94,6 +94,13 @@ cecho "bump up mockservice/pom.xml" $green
 sed -i "s/${version}/${new_version}/g" mockservice/pom.xml
 grep -C 1 "$new_version" mockservice/pom.xml
 
+for POM in recipes/pom.xml recipes/distributed-lock-manager/pom.xml recipes/rsync-replicated-file-system/pom.xml
recipes/rabbitmq-consumer-group/pom.xml
+do
+  cecho "bump up $POM" $green
+  sed -i "s/${version}/${new_version}/g" $POM 
+  grep -C 1 "$new_version" $POM
+done
+
 cecho "bump up helix-core/src/main/resources/cluster-manager-version.properties" $green
 sed -i "s/${version}/${new_version}/g" helix-core/src/main/resources/cluster-manager-version.properties
 grep -C 1 "$new_version" helix-core/src/main/resources/cluster-manager-version.properties

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/598c5e29/recipes/distributed-lock-manager/README.md
----------------------------------------------------------------------
diff --git a/recipes/distributed-lock-manager/README.md b/recipes/distributed-lock-manager/README.md
index 0304fed..fdba382 100644
--- a/recipes/distributed-lock-manager/README.md
+++ b/recipes/distributed-lock-manager/README.md
@@ -24,9 +24,9 @@ The simplest way to model a lock using zookeeper is (See Zookeeper leader
recipe
 
 * Each process tries to create an emphemeral node.
 * If can successfully create it then, it acquires the lock
-* Else it will watch on the znode and try to acquire the lock again.
+* Else it will watch on the znode and try to acquire the lock again if the current lock holder
disappears 
 
-This is good enough if there is only one lock. But in practice, an application will have
many such locks. Distributing and managing the locks among difference process becomes challenging.
Extending such a solution to many locks will result in
+This is good enough if there is only one lock. But in practice, an application will need
many such locks. Distributing and managing the locks among difference process becomes challenging.
Extending such a solution to many locks will result in
 
 * Uneven distribution of locks among nodes, the node that starts first will acquire all the
lock. Nodes that start later will be idle.
 * When a node fails, how the locks will be distributed among remaining nodes is not predicable.

@@ -78,40 +78,40 @@ localhost_12002 acquired lock:lock-group_11
 localhost_12000 acquired lock:lock-group_6
 localhost_12002 acquired lock:lock-group_0
 localhost_12000 acquired lock:lock-group_9
-lockName	acquired By
+lockName    acquired By
 ======================================
-lock-group_0	localhost_12002
-lock-group_1	localhost_12002
-lock-group_10	localhost_12002
-lock-group_11	localhost_12002
-lock-group_2	localhost_12001
-lock-group_3	localhost_12001
-lock-group_4	localhost_12001
-lock-group_5	localhost_12001
-lock-group_6	localhost_12000
-lock-group_7	localhost_12000
-lock-group_8	localhost_12000
-lock-group_9	localhost_12000
+lock-group_0    localhost_12002
+lock-group_1    localhost_12002
+lock-group_10    localhost_12002
+lock-group_11    localhost_12002
+lock-group_2    localhost_12001
+lock-group_3    localhost_12001
+lock-group_4    localhost_12001
+lock-group_5    localhost_12001
+lock-group_6    localhost_12000
+lock-group_7    localhost_12000
+lock-group_8    localhost_12000
+lock-group_9    localhost_12000
 Stopping localhost_12000
 localhost_12000Interrupted
 localhost_12001 acquired lock:lock-group_9
 localhost_12001 acquired lock:lock-group_8
 localhost_12002 acquired lock:lock-group_6
 localhost_12002 acquired lock:lock-group_7
-lockName	acquired By
+lockName    acquired By
 ======================================
-lock-group_0	localhost_12002
-lock-group_1	localhost_12002
-lock-group_10	localhost_12002
-lock-group_11	localhost_12002
-lock-group_2	localhost_12001
-lock-group_3	localhost_12001
-lock-group_4	localhost_12001
-lock-group_5	localhost_12001
-lock-group_6	localhost_12002
-lock-group_7	localhost_12002
-lock-group_8	localhost_12001
-lock-group_9	localhost_12001
+lock-group_0    localhost_12002
+lock-group_1    localhost_12002
+lock-group_10    localhost_12002
+lock-group_11    localhost_12002
+lock-group_2    localhost_12001
+lock-group_3    localhost_12001
+lock-group_4    localhost_12001
+lock-group_5    localhost_12001
+lock-group_6    localhost_12002
+lock-group_7    localhost_12002
+lock-group_8    localhost_12001
+lock-group_9    localhost_12001
 
 ```
 
@@ -135,6 +135,7 @@ This provides more details on how to setup the cluster and where to plugin
appli
 ##### Create a lock group
 
 Create a lock group and specify the number of locks in the lock group. 
+
 ```
 ./helix-admin --zkSvr localhost:2199  --addResource lock-manager-demo lock-group 6 OnlineOffline
AUTO_REBALANCE
 ```
@@ -172,11 +173,11 @@ LockFactory that creates the lock
  
 ```
 public class LockFactory extends StateModelFactory<Lock>{
-	
-	/* Instantiates the lock handler, one per lockName*/
+    
+    /* Instantiates the lock handler, one per lockName*/
     public Lock create(String lockName)
     {
-    	return new Lock(lockName);
+        return new Lock(lockName);
     }   
 }
 ```
@@ -186,23 +187,27 @@ At node start up, simply join the cluster and helix will invoke the
appropriate
 ```
 public class LockProcess{
 
-	public static void main(String args){
-	    String zkAddress= "localhost:2199";
-	    String clusterName = "lock-manager-demo";
-	    //Give a unique id to each process, most commonly used format hostname_port
-	    String instanceName ="localhost_12000";
-	    ZKHelixAdmin helixAdmin = new ZKHelixAdmin(zkAddress);
-	    //configure the instance and provide some metadata 
-	    InstanceConfig config = new InstanceConfig(instanceName);
-	    config.setHostName("localhost");
-	    config.setPort("12000");
-	    admin.addInstance(clusterName, config);
-	    //join the cluster
-		HelixManager manager = HelixManager.getHelixManager(clusterName,null,InstanceType.PARTICIPANT,zkAddress);
-		manager.getStateMachineEngine().registerStateModelFactory("OnlineOffline", modelFactory);
-		manager.connect();
-		Thread.currentThread.join();
-	}
+  public static void main(String args){
+    String zkAddress= "localhost:2199";
+    String clusterName = "lock-manager-demo";
+    //Give a unique id to each process, most commonly used format hostname_port
+    String instanceName ="localhost_12000";
+    ZKHelixAdmin helixAdmin = new ZKHelixAdmin(zkAddress);
+    //configure the instance and provide some metadata 
+    InstanceConfig config = new InstanceConfig(instanceName);
+    config.setHostName("localhost");
+    config.setPort("12000");
+    admin.addInstance(clusterName, config);
+    //join the cluster
+    HelixManager manager;
+    manager = HelixManagerFactory.getHelixManager(clusterName,
+                                                  instanceName,
+                                                  InstanceType.PARTICIPANT,
+                                                  zkAddress);
+    manager.getStateMachineEngine().registerStateModelFactory("OnlineOffline", modelFactory);
+    manager.connect();
+    Thread.currentThread.join();
+    }
 
 }
 ```
@@ -224,15 +229,19 @@ This is recommended when the number of nodes in the cluster is less
than 100. To
 ```
 public class LockProcess{
 
-	public static void main(String args){
-	    String zkAddress= "localhost:2199";
-	    String clusterName = "lock-manager-demo";
-	    .
-	    .
-	    manager.connect();
-	    final HelixManager controller = HelixControllerMain.startHelixController(zkAddress,
clusterName, "controller", HelixControllerMain.STANDALONE);
-	    Thread.currentThread.join();
-	}
+  public static void main(String args){
+    String zkAddress= "localhost:2199";
+    String clusterName = "lock-manager-demo";
+    .
+    .
+    manager.connect();
+    HelixManager controller;
+    controller = HelixControllerMain.startHelixController(zkAddress, 
+                                                          clusterName,
+                                                          "controller", 
+                                                          HelixControllerMain.STANDALONE);
+    Thread.currentThread.join();
+  }
 }
 ```
 

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/598c5e29/src/site/markdown/recipes/lock_manager.md
----------------------------------------------------------------------
diff --git a/src/site/markdown/recipes/lock_manager.md b/src/site/markdown/recipes/lock_manager.md
index 0304fed..fdba382 100644
--- a/src/site/markdown/recipes/lock_manager.md
+++ b/src/site/markdown/recipes/lock_manager.md
@@ -24,9 +24,9 @@ The simplest way to model a lock using zookeeper is (See Zookeeper leader
recipe
 
 * Each process tries to create an emphemeral node.
 * If can successfully create it then, it acquires the lock
-* Else it will watch on the znode and try to acquire the lock again.
+* Else it will watch on the znode and try to acquire the lock again if the current lock holder
disappears 
 
-This is good enough if there is only one lock. But in practice, an application will have
many such locks. Distributing and managing the locks among difference process becomes challenging.
Extending such a solution to many locks will result in
+This is good enough if there is only one lock. But in practice, an application will need
many such locks. Distributing and managing the locks among difference process becomes challenging.
Extending such a solution to many locks will result in
 
 * Uneven distribution of locks among nodes, the node that starts first will acquire all the
lock. Nodes that start later will be idle.
 * When a node fails, how the locks will be distributed among remaining nodes is not predicable.

@@ -78,40 +78,40 @@ localhost_12002 acquired lock:lock-group_11
 localhost_12000 acquired lock:lock-group_6
 localhost_12002 acquired lock:lock-group_0
 localhost_12000 acquired lock:lock-group_9
-lockName	acquired By
+lockName    acquired By
 ======================================
-lock-group_0	localhost_12002
-lock-group_1	localhost_12002
-lock-group_10	localhost_12002
-lock-group_11	localhost_12002
-lock-group_2	localhost_12001
-lock-group_3	localhost_12001
-lock-group_4	localhost_12001
-lock-group_5	localhost_12001
-lock-group_6	localhost_12000
-lock-group_7	localhost_12000
-lock-group_8	localhost_12000
-lock-group_9	localhost_12000
+lock-group_0    localhost_12002
+lock-group_1    localhost_12002
+lock-group_10    localhost_12002
+lock-group_11    localhost_12002
+lock-group_2    localhost_12001
+lock-group_3    localhost_12001
+lock-group_4    localhost_12001
+lock-group_5    localhost_12001
+lock-group_6    localhost_12000
+lock-group_7    localhost_12000
+lock-group_8    localhost_12000
+lock-group_9    localhost_12000
 Stopping localhost_12000
 localhost_12000Interrupted
 localhost_12001 acquired lock:lock-group_9
 localhost_12001 acquired lock:lock-group_8
 localhost_12002 acquired lock:lock-group_6
 localhost_12002 acquired lock:lock-group_7
-lockName	acquired By
+lockName    acquired By
 ======================================
-lock-group_0	localhost_12002
-lock-group_1	localhost_12002
-lock-group_10	localhost_12002
-lock-group_11	localhost_12002
-lock-group_2	localhost_12001
-lock-group_3	localhost_12001
-lock-group_4	localhost_12001
-lock-group_5	localhost_12001
-lock-group_6	localhost_12002
-lock-group_7	localhost_12002
-lock-group_8	localhost_12001
-lock-group_9	localhost_12001
+lock-group_0    localhost_12002
+lock-group_1    localhost_12002
+lock-group_10    localhost_12002
+lock-group_11    localhost_12002
+lock-group_2    localhost_12001
+lock-group_3    localhost_12001
+lock-group_4    localhost_12001
+lock-group_5    localhost_12001
+lock-group_6    localhost_12002
+lock-group_7    localhost_12002
+lock-group_8    localhost_12001
+lock-group_9    localhost_12001
 
 ```
 
@@ -135,6 +135,7 @@ This provides more details on how to setup the cluster and where to plugin
appli
 ##### Create a lock group
 
 Create a lock group and specify the number of locks in the lock group. 
+
 ```
 ./helix-admin --zkSvr localhost:2199  --addResource lock-manager-demo lock-group 6 OnlineOffline
AUTO_REBALANCE
 ```
@@ -172,11 +173,11 @@ LockFactory that creates the lock
  
 ```
 public class LockFactory extends StateModelFactory<Lock>{
-	
-	/* Instantiates the lock handler, one per lockName*/
+    
+    /* Instantiates the lock handler, one per lockName*/
     public Lock create(String lockName)
     {
-    	return new Lock(lockName);
+        return new Lock(lockName);
     }   
 }
 ```
@@ -186,23 +187,27 @@ At node start up, simply join the cluster and helix will invoke the
appropriate
 ```
 public class LockProcess{
 
-	public static void main(String args){
-	    String zkAddress= "localhost:2199";
-	    String clusterName = "lock-manager-demo";
-	    //Give a unique id to each process, most commonly used format hostname_port
-	    String instanceName ="localhost_12000";
-	    ZKHelixAdmin helixAdmin = new ZKHelixAdmin(zkAddress);
-	    //configure the instance and provide some metadata 
-	    InstanceConfig config = new InstanceConfig(instanceName);
-	    config.setHostName("localhost");
-	    config.setPort("12000");
-	    admin.addInstance(clusterName, config);
-	    //join the cluster
-		HelixManager manager = HelixManager.getHelixManager(clusterName,null,InstanceType.PARTICIPANT,zkAddress);
-		manager.getStateMachineEngine().registerStateModelFactory("OnlineOffline", modelFactory);
-		manager.connect();
-		Thread.currentThread.join();
-	}
+  public static void main(String args){
+    String zkAddress= "localhost:2199";
+    String clusterName = "lock-manager-demo";
+    //Give a unique id to each process, most commonly used format hostname_port
+    String instanceName ="localhost_12000";
+    ZKHelixAdmin helixAdmin = new ZKHelixAdmin(zkAddress);
+    //configure the instance and provide some metadata 
+    InstanceConfig config = new InstanceConfig(instanceName);
+    config.setHostName("localhost");
+    config.setPort("12000");
+    admin.addInstance(clusterName, config);
+    //join the cluster
+    HelixManager manager;
+    manager = HelixManagerFactory.getHelixManager(clusterName,
+                                                  instanceName,
+                                                  InstanceType.PARTICIPANT,
+                                                  zkAddress);
+    manager.getStateMachineEngine().registerStateModelFactory("OnlineOffline", modelFactory);
+    manager.connect();
+    Thread.currentThread.join();
+    }
 
 }
 ```
@@ -224,15 +229,19 @@ This is recommended when the number of nodes in the cluster is less
than 100. To
 ```
 public class LockProcess{
 
-	public static void main(String args){
-	    String zkAddress= "localhost:2199";
-	    String clusterName = "lock-manager-demo";
-	    .
-	    .
-	    manager.connect();
-	    final HelixManager controller = HelixControllerMain.startHelixController(zkAddress,
clusterName, "controller", HelixControllerMain.STANDALONE);
-	    Thread.currentThread.join();
-	}
+  public static void main(String args){
+    String zkAddress= "localhost:2199";
+    String clusterName = "lock-manager-demo";
+    .
+    .
+    manager.connect();
+    HelixManager controller;
+    controller = HelixControllerMain.startHelixController(zkAddress, 
+                                                          clusterName,
+                                                          "controller", 
+                                                          HelixControllerMain.STANDALONE);
+    Thread.currentThread.join();
+  }
 }
 ```
 


Mime
View raw message