stratos-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From lahi...@apache.org
Subject [3/3] stratos git commit: Limiting the instance count to max in dependent scaling up
Date Fri, 26 Dec 2014 15:31:10 GMT
Limiting the instance count to max in dependent scaling up


Project: http://git-wip-us.apache.org/repos/asf/stratos/repo
Commit: http://git-wip-us.apache.org/repos/asf/stratos/commit/35f156d4
Tree: http://git-wip-us.apache.org/repos/asf/stratos/tree/35f156d4
Diff: http://git-wip-us.apache.org/repos/asf/stratos/diff/35f156d4

Branch: refs/heads/master
Commit: 35f156d484a1d3c013234b69a66b6bcacc404310
Parents: cf7b808
Author: Lahiru Sandaruwan <lahirus@apache.org>
Authored: Fri Dec 26 20:49:08 2014 +0530
Committer: Lahiru Sandaruwan <lahirus@apache.org>
Committed: Fri Dec 26 20:50:02 2014 +0530

----------------------------------------------------------------------
 .../src/main/conf/drools/dependent-scaling.drl  | 75 +++++++++++++-------
 1 file changed, 49 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/stratos/blob/35f156d4/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
----------------------------------------------------------------------
diff --git a/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
b/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
index 0d98d0e..b225958 100644
--- a/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
+++ b/products/stratos/modules/distribution/src/main/conf/drools/dependent-scaling.drl
@@ -43,25 +43,42 @@ dialect "mvel"
         clusterInstanceContext : ClusterInstanceContext ()
         autoscaleAlgorithm : AutoscaleAlgorithm() from  delegator.getAutoscaleAlgorithm(algorithmName)
 
+        nonTerminatedMembers : Integer() from clusterInstanceContext.getNonTerminatedMemberCount()
+
         eval(log.debug("Running dependent scaling rule: [network-partition] " + clusterInstanceContext.getNetworkPartitionId()
+ " [cluster-instance] " + clusterInstanceContext.getId()))
-        scaleUp : Boolean() from (clusterInstanceContext.getNonTerminatedMemberCount() <
roundedRequiredInstanceCount )
-        scaleDown : Boolean() from (clusterInstanceContext.getNonTerminatedMemberCount()
> roundedRequiredInstanceCount )
+        scaleUp : Boolean() from (nonTerminatedMembers < roundedRequiredInstanceCount
)
+        scaleDown : Boolean() from (nonTerminatedMembers > roundedRequiredInstanceCount
)
 
 	then
 
         if(scaleUp) {
+
             int clusterMaxMembers = clusterInstanceContext.getMaxInstanceCount();
-            int currentMemberCount = clusterInstanceContext.getNonTerminatedMemberCount();
-            int additionalInstances = roundedRequiredInstanceCount - currentMemberCount;
-            int count = 0;
-            boolean partitionsAvailable = true;
+            if (nonTerminatedMembers < clusterInstanceContext.getMaxInstanceCount()) {
+
+                int additionalInstances = 0;
+                if(clusterInstanceContext.getMaxInstanceCount() < roundedRequiredInstanceCount){
+
+                    additionalInstances = clusterInstanceContext.getMaxInstanceCount() -
nonTerminatedMembers;
+                } else {
+
+                    additionalInstances = roundedRequiredInstanceCount - nonTerminatedMembers;
+                    log.info("[scale-up] Required member count based on dependecy scaling
is higher than max, hence
+                            notifying to parent for possible group scaling or app bursting.
+                            [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId()
+
+                            " [max] " + clusterMaxMembers);
+                    delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(),
clusterInstanceContext.getId());
+                }
+
+                int count = 0;
+                boolean partitionsAvailable = true;
 
-            log.debug("[dependent-scale] is running for [cluster] " + clusterId +
-            " [cluster-instance] " + clusterInstanceContext.getId() + " max member count
is: " +
-                clusterMaxMembers + " current member count is: " + currentMemberCount);
+                log.debug("[dependent-scale] is running for [cluster] " + clusterId +
+                " [cluster-instance] " + clusterInstanceContext.getId() + " max member count
is: " +
+                    clusterMaxMembers + " current member count is: " + nonTerminatedMembers);
+
+                while(count != additionalInstances  && partitionsAvailable) {
 
-            while(count != additionalInstances  && partitionsAvailable) {
-                if(currentMemberCount < clusterMaxMembers) {
                     ClusterLevelPartitionContext partitionContext =  (ClusterLevelPartitionContext)autoscaleAlgorithm.getNextScaleUpPartitionContext(clusterInstanceContext.getPartitionCtxtsAsAnArray());
                     if(partitionContext != null) {
 
@@ -72,27 +89,33 @@ dialect "mvel"
                     } else {
                         partitionsAvailable = false;
                     }
-                } else {
-                    partitionsAvailable = false;
                 }
-            }
-            if(!partitionsAvailable) {
-                if(clusterInstanceContext.isInGroupScalingEnabledSubtree()){
-                    delegator.delegateScalingOverMaxNotification(clusterId,
-                                                    clusterInstanceContext.getNetworkPartitionId(),
-                                                    clusterInstanceContext.getId());
-                    log.info("[dependency-scale][dependent-max-notification] partition is
not
-                    available for [scale-up]. Hence notifying the parent for group scaling"
);
-                } else {
-                    log.warn("[dependency-scale][dependent-max-notification] partition is
not
-                                    available for [scale-up]. All resources are exhausted.
-                                    Please enable group-scaling for further scaleup" );
+
+                if(!partitionsAvailable) {
+                    if(clusterInstanceContext.isInGroupScalingEnabledSubtree()){
+                        delegator.delegateScalingOverMaxNotification(clusterId,
+                                                        clusterInstanceContext.getNetworkPartitionId(),
+                                                        clusterInstanceContext.getId());
+                        log.info("[dependency-scale][dependent-max-notification] partition
is not
+                        available for [scale-up]. Hence notifying the parent for group scaling"
);
+                    } else {
+                        log.warn("[dependency-scale][dependent-max-notification] partition
is not
+                                        available for [scale-up]. All resources are exhausted.
+                                        Please enable group-scaling for further scaleup"
);
+                    }
+
                 }
+            } else {
 
+                 log.info("[scale-up] Trying to scale up over max, hence not scaling up cluster
itself and
+                         notifying to parent for possible group scaling or app bursting.
+                         [cluster] " + clusterId + " [instance id]" + clusterInstanceContext.getId()
+
+                         " [max] " + clusterMaxMembers);
+                 delegator.delegateScalingOverMaxNotification(clusterId, clusterInstanceContext.getNetworkPartitionId(),
clusterInstanceContext.getId());
             }
         } else if (scaleDown) {
 
-            int redundantInstances = clusterInstanceContext.getNonTerminatedMemberCount()
- roundedRequiredInstanceCount;
+            int redundantInstances = nonTerminatedMembers - roundedRequiredInstanceCount;
 
             int count = 0;
 


Mime
View raw message