usergrid-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sfeld...@apache.org
Subject incubator-usergrid git commit: add logging to shard allocation
Date Tue, 07 Jul 2015 19:13:47 GMT
Repository: incubator-usergrid
Updated Branches:
  refs/heads/two-dot-o-dev e3724d7fa -> 28bc4ca8e


add logging to shard allocation


Project: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/commit/28bc4ca8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/tree/28bc4ca8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-usergrid/diff/28bc4ca8

Branch: refs/heads/two-dot-o-dev
Commit: 28bc4ca8e68248ccc3491202e08d3fd4b05bdc2a
Parents: e3724d7
Author: Shawn Feldman <sfeldman@apache.org>
Authored: Tue Jul 7 13:13:46 2015 -0600
Committer: Shawn Feldman <sfeldman@apache.org>
Committed: Tue Jul 7 13:13:46 2015 -0600

----------------------------------------------------------------------
 .../impl/shard/impl/ShardGroupCompactionImpl.java       | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-usergrid/blob/28bc4ca8/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
----------------------------------------------------------------------
diff --git a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
index 619e65d..2194400 100644
--- a/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
+++ b/stack/corepersistence/graph/src/main/java/org/apache/usergrid/persistence/graph/serialization/impl/shard/impl/ShardGroupCompactionImpl.java
@@ -32,6 +32,7 @@ import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.atomic.AtomicLong;
 
 import javax.annotation.Nullable;
 
@@ -79,6 +80,7 @@ import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
 public class ShardGroupCompactionImpl implements ShardGroupCompaction {
 
 
+    private final AtomicLong countAudits;
     private static final Logger LOG = LoggerFactory.getLogger( ShardGroupCompactionImpl.class
);
 
 
@@ -109,6 +111,7 @@ public class ShardGroupCompactionImpl implements ShardGroupCompaction
{
                                      final EdgeShardSerialization edgeShardSerialization)
{
 
         this.timeService = timeService;
+        this.countAudits = new AtomicLong();
         this.graphFig = graphFig;
         this.nodeShardAllocation = nodeShardAllocation;
         this.shardedEdgeSerialization = shardedEdgeSerialization;
@@ -146,7 +149,9 @@ public class ShardGroupCompactionImpl implements ShardGroupCompaction
{
         Preconditions
             .checkArgument( group.shouldCompact( startTime ), "Compaction cannot be run yet.
 Ignoring compaction." );
 
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Compacting shard group. count is {} ", countAudits.get());
+        }
         final CompactionResult.CompactionBuilder resultBuilder = CompactionResult.builder();
 
         final Shard targetShard = group.getCompactionTarget();
@@ -299,6 +304,11 @@ public class ShardGroupCompactionImpl implements ShardGroupCompaction
{
             return Futures.immediateFuture( AuditResult.NOT_CHECKED );
         }
 
+        countAudits.getAndIncrement();
+        
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Auditing shard group. count is {} ", countAudits.get());
+        }
         /**
          * Try and submit.  During back pressure, we may not be able to submit, that's ok.
 Better to drop than to
          * hose the system


Mime
View raw message