helix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hu...@apache.org
Subject [helix] branch master updated: Fix incorrect exception type and confusing assertion messages (#976)
Date Wed, 27 May 2020 06:37:27 GMT
This is an automated email from the ASF dual-hosted git repository.

hulee pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/helix.git


The following commit(s) were added to refs/heads/master by this push:
     new d7f7cfc  Fix incorrect exception type and confusing assertion messages (#976)
d7f7cfc is described below

commit d7f7cfcc12f0ff6f5eaa2b5c8e36104105134af3
Author: Huizhi Lu <ihuizhi.lu@gmail.com>
AuthorDate: Tue May 26 23:37:16 2020 -0700

    Fix incorrect exception type and confusing assertion messages (#976)
    
    In TestZNRecordSizeLimit, the assertion message should indicate the data is smaller than
1 MB threshold. HelixException should be ZkMarshallingError because the exception type is
changed in ZNRecord serializer.
---
 .../helix/manager/zk/TestZNRecordSizeLimit.java    | 108 +++++++++++++--------
 1 file changed, 68 insertions(+), 40 deletions(-)

diff --git a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
index 8c8d649..daf99bb 100644
--- a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
+++ b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java
@@ -22,7 +22,6 @@ package org.apache.helix.manager.zk;
 import java.util.Arrays;
 import java.util.Date;
 
-import org.apache.helix.HelixException;
 import org.apache.helix.HelixProperty;
 import org.apache.helix.PropertyKey.Builder;
 import org.apache.helix.TestHelper;
@@ -41,14 +40,18 @@ import org.slf4j.LoggerFactory;
 import org.testng.Assert;
 import org.testng.annotations.Test;
 
+
 public class TestZNRecordSizeLimit extends ZkUnitTestBase {
   private static Logger LOG = LoggerFactory.getLogger(TestZNRecordSizeLimit.class);
 
+  private static final String ASSERTION_MESSAGE =
+      "Should succeed because compressed data is smaller than 1M. Caused by: ";
+
   @Test
   public void testZNRecordSizeLimitUseZNRecordSerializer() {
     String className = getShortClassName();
-    System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at "
-        + new Date(System.currentTimeMillis()));
+    System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(
+        System.currentTimeMillis()));
 
     ZNRecordSerializer serializer = new ZNRecordSerializer();
 
@@ -86,8 +89,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
     _gZkClient.createPersistent(path2, true);
     try {
       _gZkClient.writeData(path2, largeRecord);
-    } catch (HelixException e) {
-      Assert.fail("Should not fail because data size is larger than 1M since compression
applied");
+    } catch (ZkMarshallingError e) {
+      Assert.fail(ASSERTION_MESSAGE + e);
     }
     record = _gZkClient.readData(path2);
     Assert.assertNotNull(record);
@@ -96,13 +99,17 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
     record = _gZkClient.readData(path1);
     try {
       _gZkClient.writeData(path1, largeRecord);
-    } catch (HelixException e) {
-      Assert.fail("Should not fail because data size is larger than 1M since compression
applied");
+    } catch (ZkMarshallingError e) {
+      Assert.fail(ASSERTION_MESSAGE + e);
     }
     ZNRecord recordNew = _gZkClient.readData(path1);
-    byte[] arr = serializer.serialize(record);
-    byte[] arrNew = serializer.serialize(recordNew);
-    Assert.assertFalse(Arrays.equals(arr, arrNew));
+    try {
+      byte[] arr = serializer.serialize(record);
+      byte[] arrNew = serializer.serialize(recordNew);
+      Assert.assertFalse(Arrays.equals(arr, arrNew));
+    } catch (ZkMarshallingError e) {
+      Assert.fail(ASSERTION_MESSAGE + e);
+    }
 
     // test ZkDataAccessor
     ZKHelixAdmin admin = new ZKHelixAdmin(_gZkClient);
@@ -125,9 +132,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
     }
     boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
     Assert.assertTrue(succeed);
-    HelixProperty property =
-        accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1",
-            "partition_1"));
+    HelixProperty property = accessor.getProperty(
+        keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
     Assert.assertNull(property);
 
     // legal sized data gets written to zk
@@ -142,7 +148,11 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
     succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
     Assert.assertTrue(succeed);
     record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
-    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
+    try {
+      Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
+    } catch (ZkMarshallingError e) {
+      Assert.fail(ASSERTION_MESSAGE + e);
+    }
 
     // oversized data should not update existing data on zk
     idealState.getRecord().getSimpleFields().clear();
@@ -156,12 +166,16 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
     succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
     Assert.assertTrue(succeed);
     recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
-    arr = serializer.serialize(record);
-    arrNew = serializer.serialize(recordNew);
-    Assert.assertFalse(Arrays.equals(arr, arrNew));
+    try {
+      byte[] arr = serializer.serialize(record);
+      byte[] arrNew = serializer.serialize(recordNew);
+      Assert.assertFalse(Arrays.equals(arr, arrNew));
+    } catch (ZkMarshallingError e) {
+      Assert.fail(ASSERTION_MESSAGE + e);
+    }
 
-    System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at "
-        + new Date(System.currentTimeMillis()));
+    System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(
+        System.currentTimeMillis()));
   }
 
   @Test(dependsOnMethods = "testZNRecordSizeLimitUseZNRecordSerializer")
@@ -197,7 +211,11 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       zkClient.writeData(path1, smallRecord);
 
       ZNRecord record = zkClient.readData(path1);
-      Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
+      try {
+        Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
+      } catch (ZkMarshallingError e) {
+        Assert.fail(ASSERTION_MESSAGE + e);
+      }
 
       // oversized data doesn't create any data on zk
       // prepare a znode of size larger than 1m
@@ -210,9 +228,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       zkClient.createPersistent(path2, true);
       try {
         zkClient.writeData(path2, largeRecord);
-      } catch (HelixException e) {
-        Assert
-            .fail("Should not fail because data size is larger than 1M since compression
applied");
+      } catch (ZkMarshallingError e) {
+        Assert.fail(ASSERTION_MESSAGE + e);
       }
       record = zkClient.readData(path2);
       Assert.assertNotNull(record);
@@ -221,14 +238,17 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       record = zkClient.readData(path1);
       try {
         zkClient.writeData(path1, largeRecord);
-      } catch (HelixException e) {
-        Assert
-            .fail("Should not fail because data size is larger than 1M since compression
applied");
+      } catch (ZkMarshallingError e) {
+        Assert.fail(ASSERTION_MESSAGE + e);
       }
       ZNRecord recordNew = zkClient.readData(path1);
-      byte[] arr = serializer.serialize(record);
-      byte[] arrNew = serializer.serialize(recordNew);
-      Assert.assertFalse(Arrays.equals(arr, arrNew));
+      try {
+        byte[] arr = serializer.serialize(record);
+        byte[] arrNew = serializer.serialize(recordNew);
+        Assert.assertFalse(Arrays.equals(arr, arrNew));
+      } catch (ZkMarshallingError e) {
+        Assert.fail(ASSERTION_MESSAGE + e);
+      }
 
       // test ZkDataAccessor
       ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
@@ -282,11 +302,13 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
       Assert.assertTrue(succeed);
       recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
-      arr = serializer.serialize(record);
-      arrNew = serializer.serialize(recordNew);
-      Assert.assertFalse(Arrays.equals(arr, arrNew));
-    } catch (HelixException ex) {
-      Assert.fail("Should not fail because data size is larger than 1M since compression
applied");
+      try {
+        byte[] arr = serializer.serialize(record);
+        byte[] arrNew = serializer.serialize(recordNew);
+        Assert.assertFalse(Arrays.equals(arr, arrNew));
+      } catch (ZkMarshallingError e) {
+        Assert.fail(ASSERTION_MESSAGE + e);
+      }
     } finally {
       zkClient.close();
     }
@@ -323,7 +345,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       int rawZnRecordSize = 700;
       int writeSizeLimitKb = 800;
       int writeSizeLimit = writeSizeLimitKb * 1024;
-      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
+      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
+          String.valueOf(writeSizeLimit));
 
       final ZNRecord normalSizeRecord = new ZNRecord("normal-size");
       for (int i = 0; i < rawZnRecordSize; i++) {
@@ -350,7 +373,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       // Set the writeSizeLimit to very small so serialized data size exceeds the writeSizeLimit.
       writeSizeLimitKb = 1;
       writeSizeLimit = writeSizeLimitKb * 1024;
-      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
+      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
+          String.valueOf(writeSizeLimit));
 
       final ZNRecord largeRecord = new ZNRecord("large-size");
       for (int i = 0; i < rawZnRecordSize; i++) {
@@ -377,10 +401,12 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       // Set the writeSizeLimit to 10KB so serialized data size does not exceed writeSizeLimit.
       writeSizeLimitKb = 10;
       writeSizeLimit = writeSizeLimitKb * 1024;
-      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
+      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
+          String.valueOf(writeSizeLimit));
 
       // oversized data should not create any new data on zk
-      ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(root, new ZkBaseDataAccessor<>(ZK_ADDR));
+      ZKHelixDataAccessor accessor =
+          new ZKHelixDataAccessor(root, new ZkBaseDataAccessor<>(ZK_ADDR));
       Builder keyBuilder = accessor.keyBuilder();
 
       IdealState idealState = new IdealState("currentState");
@@ -393,7 +419,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       }
       boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
       Assert.assertTrue(succeed);
-      HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918",
"session_1", "partition_1"));
+      HelixProperty property = accessor.getProperty(
+          keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
       Assert.assertNull(property);
 
       // legal sized data gets written to zk
@@ -413,7 +440,8 @@ public class TestZNRecordSizeLimit extends ZkUnitTestBase {
       // Set small write size limit so writing does not succeed.
       writeSizeLimitKb = 1;
       writeSizeLimit = writeSizeLimitKb * 1024;
-      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
+      System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
+          String.valueOf(writeSizeLimit));
 
       // oversized data should not update existing data on zk
       idealState.setStateModelDefRef("MasterSlave");


Mime
View raw message