helix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hu...@apache.org
Subject [helix] branch master updated: Fix TestZNRecordSizeLimit
Date Tue, 28 May 2019 18:25:19 GMT
This is an automated email from the ASF dual-hosted git repository.

hulee pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/helix.git


The following commit(s) were added to refs/heads/master by this push:
     new 04eefd5  Fix TestZNRecordSizeLimit
04eefd5 is described below

commit 04eefd5942e267667fa4161954f3a8f94656ec5f
Author: Hunter Lee <hulee@linkedin.com>
AuthorDate: Tue May 28 11:25:05 2019 -0700

    Fix TestZNRecordSizeLimit
    
    This diff fixes TestZNRecordSizeLimit so that it considers the default behavior to be
auto-compression of ZNodes enabled.
---
 .../helix/manager/zk/TestZNRecordSizeLimit.java    | 71 ++++++++++++----------
 1 file changed, 38 insertions(+), 33 deletions(-)

diff --git a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
index 736ee5e..bccb425 100644
--- a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
+++ b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
@@ -31,10 +31,14 @@ import org.apache.helix.manager.zk.client.SharedZkClientFactory;
 import org.apache.helix.model.IdealState;
 import org.apache.helix.model.IdealState.RebalanceMode;
 import org.apache.helix.model.InstanceConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.testng.Assert;
 import org.testng.annotations.Test;
 
 public class TestZNRecordSizeLimit extends ZkUnitTestBase {
+  private static Logger LOG = LoggerFactory.getLogger(TestZNRecordSizeLimit.class);
+
   @Test
   public void testZNRecordSizeLimitUseZNRecordSerializer() {
     String className = getShortClassName();
@@ -43,6 +47,7 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
 
     ZNRecordSerializer serializer = new ZNRecordSerializer();
 
+    String root = className;
     byte[] buf = new byte[1024];
     for (int i = 0; i < 1024; i++) {
       buf[i] = 'a';
@@ -58,7 +63,7 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       smallRecord.setSimpleField(i + "", bufStr);
     }
 
-    String path1 = "/" + className + "/test1";
+    String path1 = "/" + root + "/test1";
     _gZkClient.createPersistent(path1, true);
     _gZkClient.writeData(path1, smallRecord);
 
@@ -72,29 +77,27 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
     for (int i = 0; i < 1024; i++) {
       largeRecord.setSimpleField(i + "", bufStr);
     }
-    String path2 = "/" + className + "/test2";
+    String path2 = "/" + root + "/test2";
     _gZkClient.createPersistent(path2, true);
     try {
       _gZkClient.writeData(path2, largeRecord);
-      Assert.fail("Should fail because data size is larger than 1M");
     } catch (HelixException e) {
-      // OK
+      Assert.fail("Should not fail because data size is larger than 1M since compression
applied");
     }
     record = _gZkClient.readData(path2);
-    Assert.assertNull(record);
+    Assert.assertNotNull(record);
 
     // oversized write doesn't overwrite existing data on zk
     record = _gZkClient.readData(path1);
     try {
       _gZkClient.writeData(path1, largeRecord);
-      Assert.fail("Should fail because data size is larger than 1M");
     } catch (HelixException e) {
-      // OK
+      Assert.fail("Should not fail because data size is larger than 1M since compression
applied");
     }
     ZNRecord recordNew = _gZkClient.readData(path1);
     byte[] arr = serializer.serialize(record);
     byte[] arrNew = serializer.serialize(recordNew);
-    Assert.assertTrue(Arrays.equals(arr, arrNew));
+    Assert.assertFalse(Arrays.equals(arr, arrNew));
 
     // test ZkDataAccessor
     ZKHelixAdmin admin = new ZKHelixAdmin(_gZkClient);
@@ -104,7 +107,7 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
 
     // oversized data should not create any new data on zk
     ZKHelixDataAccessor accessor =
-        new ZKHelixDataAccessor(className, new ZkBaseDataAccessor<>(_gZkClient));
+        new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(_gZkClient));
     Builder keyBuilder = accessor.keyBuilder();
 
     IdealState idealState = new IdealState("currentState");
@@ -116,9 +119,10 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       idealState.getRecord().setSimpleField(i + "", bufStr);
     }
     boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
-    Assert.assertFalse(succeed);
-    HelixProperty property = accessor.getProperty(
-        keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
+    Assert.assertTrue(succeed);
+    HelixProperty property =
+        accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1",
+            "partition_1"));
     Assert.assertNull(property);
 
     // legal sized data gets written to zk
@@ -145,13 +149,12 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
     }
     // System.out.println("record: " + idealState.getRecord());
     succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
-    Assert.assertFalse(succeed);
+    Assert.assertTrue(succeed);
     recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
     arr = serializer.serialize(record);
     arrNew = serializer.serialize(recordNew);
-    Assert.assertTrue(Arrays.equals(arr, arrNew));
+    Assert.assertFalse(Arrays.equals(arr, arrNew));
 
-    deleteCluster(className);
     System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at "
         + new Date(System.currentTimeMillis()));
   }
@@ -159,8 +162,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
   @Test
   public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
     String className = getShortClassName();
-    System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at "
-        + new Date(System.currentTimeMillis()));
+    System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " +
new Date(
+        System.currentTimeMillis()));
 
     ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
     HelixZkClient zkClient = SharedZkClientFactory.getInstance()
@@ -168,6 +171,7 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
 
     try {
       zkClient.setZkSerializer(serializer);
+      String root = className;
       byte[] buf = new byte[1024];
       for (int i = 0; i < 1024; i++) {
         buf[i] = 'a';
@@ -183,7 +187,7 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
         smallRecord.setSimpleField(i + "", bufStr);
       }
 
-      String path1 = "/" + className + "/test1";
+      String path1 = "/" + root + "/test1";
       zkClient.createPersistent(path1, true);
       zkClient.writeData(path1, smallRecord);
 
@@ -197,29 +201,29 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       for (int i = 0; i < 1024; i++) {
         largeRecord.setSimpleField(i + "", bufStr);
       }
-      String path2 = "/" + className + "/test2";
+      String path2 = "/" + root + "/test2";
       zkClient.createPersistent(path2, true);
       try {
         zkClient.writeData(path2, largeRecord);
-        Assert.fail("Should fail because data size is larger than 1M");
       } catch (HelixException e) {
-        // OK
+        Assert
+            .fail("Should not fail because data size is larger than 1M since compression
applied");
       }
       record = zkClient.readData(path2);
-      Assert.assertNull(record);
+      Assert.assertNotNull(record);
 
       // oversized write doesn't overwrite existing data on zk
       record = zkClient.readData(path1);
       try {
         zkClient.writeData(path1, largeRecord);
-        Assert.fail("Should fail because data size is larger than 1M");
       } catch (HelixException e) {
-        // OK
+        Assert
+            .fail("Should not fail because data size is larger than 1M since compression
applied");
       }
       ZNRecord recordNew = zkClient.readData(path1);
       byte[] arr = serializer.serialize(record);
       byte[] arrNew = serializer.serialize(recordNew);
-      Assert.assertTrue(Arrays.equals(arr, arrNew));
+      Assert.assertFalse(Arrays.equals(arr, arrNew));
 
       // test ZkDataAccessor
       ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
@@ -229,7 +233,7 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
 
       // oversized data should not create any new data on zk
       ZKHelixDataAccessor accessor =
-          new ZKHelixDataAccessor(className, new ZkBaseDataAccessor<>(zkClient));
+          new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
       Builder keyBuilder = accessor.keyBuilder();
 
       // ZNRecord statusUpdates = new ZNRecord("statusUpdates");
@@ -242,9 +246,9 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
         idealState.getRecord().setSimpleField(i + "", bufStr);
       }
       boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
-      Assert.assertFalse(succeed);
+      Assert.assertTrue(succeed);
       HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
-      Assert.assertNull(property);
+      Assert.assertNotNull(property);
 
       // legal sized data gets written to zk
       idealState.getRecord().getSimpleFields().clear();
@@ -271,17 +275,18 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       }
       // System.out.println("record: " + idealState.getRecord());
       succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
-      Assert.assertFalse(succeed);
+      Assert.assertTrue(succeed);
       recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
       arr = serializer.serialize(record);
       arrNew = serializer.serialize(recordNew);
-      Assert.assertTrue(Arrays.equals(arr, arrNew));
+      Assert.assertFalse(Arrays.equals(arr, arrNew));
+    } catch (HelixException ex) {
+      Assert.fail("Should not fail because data size is larger than 1M since compression
applied");
     } finally {
       zkClient.close();
     }
 
-    deleteCluster(className);
-    System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at "
-        + new Date(System.currentTimeMillis()));
+    System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new
Date(
+        System.currentTimeMillis()));
   }
 }


Mime
View raw message