hive-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (Jira)" <j...@apache.org>
Subject [jira] [Work logged] (HIVE-22015) [CachedStore] Cache table constraints in CachedStore
Date Fri, 26 Jun 2020 12:19:00 GMT

     [ https://issues.apache.org/jira/browse/HIVE-22015?focusedWorklogId=451534&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-451534
]

ASF GitHub Bot logged work on HIVE-22015:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 26/Jun/20 12:18
            Start Date: 26/Jun/20 12:18
    Worklog Time Spent: 10m 
      Work Description: sankarh commented on a change in pull request #1109:
URL: https://github.com/apache/hive/pull/1109#discussion_r445577541



##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
##########
@@ -402,6 +385,32 @@ private static void updateStatsForAlterTable(RawStore rawStore, Table
tblBefore,
         sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, msgPart.getPartValues(),
             msgPart.getColName());
         break;
+      case MessageBuilder.ADD_PRIMARYKEY_EVENT:
+          AddPrimaryKeyMessage addPrimaryKeyMessage = deserializer.getAddPrimaryKeyMessage(message);

Review comment:
       Should be 2 spaced indentation. Check other places too.

##########
File path: itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java
##########
@@ -295,6 +295,178 @@ public void testTableOpsForUpdateUsingEvents() throws Exception {
     sharedCache.getSdCache().clear();
   }
 
+  @Test
+  public void testConstraintsForUpdateUsingEvents() throws Exception {
+    long lastEventId = -1;
+    RawStore rawStore = hmsHandler.getMS();
+
+    // Prewarm CachedStore
+    CachedStore.setCachePrewarmedState(false);
+    CachedStore.prewarm(rawStore);
+
+    // Add a db via rawStore
+    String dbName = "test_table_ops";
+    String dbOwner = "user1";
+    Database db = createTestDb(dbName, dbOwner);
+    hmsHandler.create_database(db);
+    db = rawStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+
+    String foreignDbName = "test_table_ops_foreign";
+    Database foreignDb = createTestDb(foreignDbName, dbOwner);
+    hmsHandler.create_database(foreignDb);
+    foreignDb = rawStore.getDatabase(DEFAULT_CATALOG_NAME, foreignDbName);
+    // Add a table via rawStore
+    String tblName = "tbl";
+    String tblOwner = "user1";
+    FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+    FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+    List<FieldSchema> cols = new ArrayList<FieldSchema>();
+    cols.add(col1);
+    cols.add(col2);
+    List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+    Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+    String foreignTblName = "ftbl";
+    Table foreignTbl = createTestTbl(foreignDbName, foreignTblName, tblOwner, cols, ptnCols);
+
+    SQLPrimaryKey key = new SQLPrimaryKey(dbName, tblName, col1.getName(), 1, "pk1",
+            false, false, false);
+    SQLUniqueConstraint uC = new SQLUniqueConstraint(DEFAULT_CATALOG_NAME, dbName, tblName,
+            col1.getName(), 2, "uc1", false, false, false);
+    SQLNotNullConstraint nN = new SQLNotNullConstraint(DEFAULT_CATALOG_NAME, dbName, tblName,
+            col1.getName(), "nn1", false, false, false);
+    SQLForeignKey foreignKey = new SQLForeignKey(key.getTable_db(), key.getTable_name(),
key.getColumn_name(),
+            foreignDbName, foreignTblName, key.getColumn_name(), 2, 1,2,
+            "fk1", key.getPk_name(), false, false, false);
+
+    hmsHandler.create_table_with_constraints(tbl,
+            Arrays.asList(key), null, Arrays.asList(uC), Arrays.asList(nN), null, null);
+    hmsHandler.create_table_with_constraints(foreignTbl, null, Arrays.asList(foreignKey),
+            null, null, null, null);
+
+    tbl = rawStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+    foreignTbl = rawStore.getTable(DEFAULT_CATALOG_NAME, foreignDbName, foreignTblName);
+
+    // Read database, table via CachedStore
+    Database dbRead= sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
+    Assert.assertEquals(db, dbRead);
+    Table tblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbName, tblName);
+    compareTables(tblRead, tbl);
+
+    Table foreignTblRead = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, foreignDbName,
foreignTblName);
+    compareTables(foreignTblRead, foreignTbl);
+
+    List<SQLPrimaryKey> keys = rawStore.getPrimaryKeys(DEFAULT_CATALOG_NAME, dbName,
tblName);
+    List<SQLPrimaryKey> keysRead = sharedCache.listCachedPrimaryKeys(DEFAULT_CATALOG_NAME,
dbName, tblName);
+    assertsForPrimarkaryKey(keysRead, 1, 0, keys.get(0));
+
+    List<SQLNotNullConstraint> nNs = rawStore.getNotNullConstraints(DEFAULT_CATALOG_NAME,
dbName, tblName);
+    List<SQLNotNullConstraint> nNsRead = sharedCache.listCachedNotNullConstraints(DEFAULT_CATALOG_NAME,
dbName, tblName);
+    assertsForNotNullConstraints(nNsRead, 1, 0, nNs.get(0));
+
+    List<SQLUniqueConstraint> uns = rawStore.getUniqueConstraints(DEFAULT_CATALOG_NAME,
dbName, tblName);
+    List<SQLUniqueConstraint> unsRead = sharedCache.listCachedUniqueConstraint(DEFAULT_CATALOG_NAME,
dbName, tblName);
+    assertsForUniqueConstraints(unsRead, 1, 0, uns.get(0));
+
+    List<SQLForeignKey> fks = rawStore.getForeignKeys(DEFAULT_CATALOG_NAME, dbName,
tblName, foreignDbName, foreignTblName);
+    List<SQLForeignKey> fksRead = sharedCache.listCachedForeignKeys(DEFAULT_CATALOG_NAME,
foreignDbName,
+            foreignTblName, dbName, tblName);
+    assertsForForeignKey(fksRead, 1, 0, fks.get(0));
+
+    fksRead = sharedCache.listCachedForeignKeys(DEFAULT_CATALOG_NAME, foreignDbName, foreignTblName,
+            dbName, foreignTblName);
+    Assert.assertEquals(fksRead.size(), 0);
+    fksRead = sharedCache.listCachedForeignKeys(DEFAULT_CATALOG_NAME, foreignDbName, foreignTblName,
+            foreignDbName, tblName);
+    Assert.assertEquals(fksRead.size(), 0);
+    fksRead = sharedCache.listCachedForeignKeys(DEFAULT_CATALOG_NAME, foreignDbName, foreignTblName,
+            foreignDbName, foreignTblName);
+    Assert.assertEquals(fksRead.size(), 0);
+
+    fksRead = sharedCache.listCachedForeignKeys(DEFAULT_CATALOG_NAME, foreignDbName, foreignTblName,
+            null, null);
+    Assert.assertEquals(fksRead.size(), 1);
+
+    // Dropping the constraint
+    DropConstraintRequest dropConstraintRequest = new DropConstraintRequest(foreignDbName,
foreignTblName, foreignKey.getFk_name());
+    hmsHandler.drop_constraint(dropConstraintRequest);
+    dropConstraintRequest = new DropConstraintRequest(dbName, tblName, key.getPk_name());
+    hmsHandler.drop_constraint(dropConstraintRequest);
+    dropConstraintRequest = new DropConstraintRequest(dbName, tblName, nN.getNn_name());
+    hmsHandler.drop_constraint(dropConstraintRequest);
+    dropConstraintRequest = new DropConstraintRequest(dbName, tblName, uC.getUk_name());
+    hmsHandler.drop_constraint(dropConstraintRequest);
+
+    keys = sharedCache.listCachedPrimaryKeys(DEFAULT_CATALOG_NAME, dbName, tblName);
+    nNs = sharedCache.listCachedNotNullConstraints(DEFAULT_CATALOG_NAME, dbName, tblName);
+    uns = sharedCache.listCachedUniqueConstraint(DEFAULT_CATALOG_NAME, dbName, tblName);
+    fksRead = sharedCache.listCachedForeignKeys(DEFAULT_CATALOG_NAME, foreignDbName, foreignTblName,
dbName, tblName);
+    Assert.assertEquals(keys.size(), 0);
+    Assert.assertEquals(nNs.size(), 0);
+    Assert.assertEquals(uns.size(), 0);
+    Assert.assertEquals(fksRead.size(), 0);
+
+    // Adding keys back
+    AddPrimaryKeyRequest req = new AddPrimaryKeyRequest(Arrays.asList(key));
+    hmsHandler.add_primary_key(req);
+    keys = sharedCache.listCachedPrimaryKeys(DEFAULT_CATALOG_NAME, dbName, tblName);
+    assertsForPrimarkaryKey(keys, 1, 0, key);
+
+    AddUniqueConstraintRequest uniqueConstraintRequest = new AddUniqueConstraintRequest(Arrays.asList(uC));
+    hmsHandler.add_unique_constraint(uniqueConstraintRequest);
+    uns = sharedCache.listCachedUniqueConstraint(DEFAULT_CATALOG_NAME, dbName, tblName);
+    assertsForUniqueConstraints(uns, 1, 0, uC);
+
+    AddNotNullConstraintRequest notNullConstraintRequest = new AddNotNullConstraintRequest(Arrays.asList(nN));
+    hmsHandler.add_not_null_constraint(notNullConstraintRequest);
+    nNs = sharedCache.listCachedNotNullConstraints(DEFAULT_CATALOG_NAME, dbName, tblName);
+    assertsForNotNullConstraints(nNs, 1, 0, nN);
+
+    AddForeignKeyRequest foreignKeyRequest = new AddForeignKeyRequest(Arrays.asList(foreignKey));
+    hmsHandler.add_foreign_key(foreignKeyRequest);
+    fksRead = sharedCache.listCachedForeignKeys(DEFAULT_CATALOG_NAME, foreignDbName, foreignTblName,
dbName, tblName);
+    assertsForForeignKey(fksRead, 1, 0, foreignKey);
+
+    sharedCache.getDatabaseCache().clear();
+    sharedCache.clearTableCache();
+    sharedCache.getSdCache().clear();
+  }
+
+  private void assertsForPrimarkaryKey(List<SQLPrimaryKey> keys, int size, int ele,
SQLPrimaryKey key) {
+    Assert.assertEquals(keys.size(), size);
+    Assert.assertEquals(keys.get(ele).getPk_name(), key.getPk_name());
+    Assert.assertEquals(keys.get(ele).getColumn_name(), key.getColumn_name());
+    Assert.assertEquals(keys.get(ele).getTable_name(), key.getTable_name());
+    Assert.assertEquals(keys.get(ele).getTable_db(), key.getTable_db());
+  }
+
+  private void assertsForForeignKey(List<SQLForeignKey> keys, int size, int ele, SQLForeignKey
key) {
+    Assert.assertEquals(keys.size(), size);
+    Assert.assertEquals(keys.get(ele).getPk_name(), key.getPk_name());
+    Assert.assertEquals(keys.get(ele).getFk_name(), key.getFk_name());
+    Assert.assertEquals(keys.get(ele).getFktable_db(), key.getFktable_db());
+    Assert.assertEquals(keys.get(ele).getFktable_name(), key.getFktable_name());
+    Assert.assertEquals(keys.get(ele).getPktable_db(), key.getPktable_db());
+    Assert.assertEquals(keys.get(ele).getPktable_name(), key.getPktable_name());
+    Assert.assertEquals(keys.get(ele).getPkcolumn_name(), key.getPkcolumn_name());
+    Assert.assertEquals(keys.get(ele).getFkcolumn_name(), key.getFkcolumn_name());
+  }
+
+  private void assertsForNotNullConstraints(List<SQLNotNullConstraint> nns, int size,
int ele, SQLNotNullConstraint nN) {
+    Assert.assertEquals(nns.size(), size);
+    Assert.assertEquals(nns.get(ele).getNn_name(), nN.getNn_name());
+    Assert.assertEquals(nns.get(ele).getColumn_name(), nN.getColumn_name());
+    Assert.assertEquals(nns.get(ele).getTable_name(), nN.getTable_name());
+    Assert.assertEquals(nns.get(ele).getTable_db(), nN.getTable_db());
+  }
+
+  private void assertsForUniqueConstraints(List<SQLUniqueConstraint> nns, int size,
int ele, SQLUniqueConstraint nN) {

Review comment:
       Arguments names can be uks and uk.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
##########
@@ -543,10 +556,24 @@ static void prewarm(RawStore rawStore) {
                 tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName,
colNames, CacheUtils.HIVE_ENGINE);
                 Deadline.stopTimer();
               }
+              Deadline.startTimer("getPrimaryKeys");
+              rawStore.getPrimaryKeys(catName, dbName, tblName);

Review comment:
       It seems missed to assign the return values from these rawStore calls to the local
variables.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
##########
@@ -867,6 +902,73 @@ private void updateTableColStats(RawStore rawStore, String catName, String
dbNam
       }
     }
 
+    private void updateTableForeignKeys(RawStore rawStore, String catName, String dbName,
String tblName) {
+      LOG.debug("CachedStore: updating cached foreign keys objects for catalog: {}, database:
{}, table: {}", catName,
+              dbName, tblName);
+      try {
+        Deadline.startTimer("getForeignKeys");
+        List<SQLForeignKey> fks = rawStore.getForeignKeys(catName, null, null, dbName,
tblName);
+        Deadline.stopTimer();
+        sharedCache.refreshForeignKeysInCache(StringUtils.normalizeIdentifier(catName),
+                StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName),
fks);
+        LOG.debug("CachedStore: updated cached foreign keys objects for catalog: {}, database:
{}, table: {}", catName,
+                dbName, tblName);
+      } catch (MetaException e) {
+        LOG.info("Updating CachedStore: unable to read foreign keys of table: " + tblName,
e);

Review comment:
       Failure logs can have catName and dbName too.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -265,6 +270,10 @@ public int getObjectSize(Class<?> clazz, Object obj) {
     private int partitionCacheSize;
     private int partitionColStatsCacheSize;
     private int aggrColStatsCacheSize;
+    private int primaryKeyCacheSize;

Review comment:
       Shall consolidate all these size variables into an array(say, memberObjectsSize) of
size = (number of items in enum MemberName) and can refer to it using enum value as index.
It reduces lot of code especially the switch cases.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
##########
@@ -2497,26 +2599,82 @@ long getPartsFound() {
 
   @Override public List<SQLPrimaryKey> getPrimaryKeys(String catName, String dbName,
String tblName)
       throws MetaException {
-    // TODO constraintCache
-    return rawStore.getPrimaryKeys(catName, dbName, tblName);
+    catName = normalizeIdentifier(catName);
+    dbName = StringUtils.normalizeIdentifier(dbName);
+    tblName = StringUtils.normalizeIdentifier(tblName);
+    if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction()))
{
+      return rawStore.getPrimaryKeys(catName, dbName, tblName);
+    }
+
+    Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+    if (tbl == null) {
+      // The table containing the primary keys is not yet loaded in cache
+      return rawStore.getPrimaryKeys(catName, dbName, tblName);
+    }
+    List<SQLPrimaryKey> keys = sharedCache.listCachedPrimaryKeys(catName, dbName, tblName);

Review comment:
       Is it possible that table is cached but not the constraints?

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -470,6 +524,110 @@ boolean cachePartitions(Iterable<Partition> parts, SharedCache
sharedCache, bool
       }
     }
 
+    boolean cachePrimaryKeys(List<SQLPrimaryKey> primaryKeys, boolean fromPrewarm)
{
+      return cacheConstraints(primaryKeys, SQLPrimaryKey.class, fromPrewarm,
+              MemberName.PRIMARY_KEY_CACHE, this.isPrimaryKeyCacheDirty);
+    }
+
+    boolean cacheForeignKeys(List<SQLForeignKey> foreignKeys, boolean fromPrewarm)
{
+      return cacheConstraints(foreignKeys, SQLForeignKey.class, fromPrewarm,
+              MemberName.FOREIGN_KEY_CACHE, this.isForeignKeyCacheDirty);
+    }
+
+    boolean cacheUniqueConstraints(List<SQLUniqueConstraint> uniqueConstraints, boolean
fromPrewarm) {
+      return cacheConstraints(uniqueConstraints, SQLUniqueConstraint.class, fromPrewarm,
+              MemberName.UNIQUE_CONSTRAINT_CACHE, this.isUniqueConstraintCacheDirty);
+    }
+
+    boolean cacheNotNulConstraints(List<SQLNotNullConstraint> notNullConstraints, boolean
fromPrewarm) {
+      return cacheConstraints(notNullConstraints, SQLNotNullConstraint.class, fromPrewarm,
+              MemberName.NOTNULL_CONSTRAINT_CACHE, this.isNotNullConstraintCacheDirty);
+    }
+
+    // Common method to cache constraints
+    private boolean cacheConstraints(List constraintsList,
+                             Class constraintClass,
+                             boolean fromPrewarm,
+                             MemberName mn,
+                             AtomicBoolean dirtyConstaintVariable) {
+      if (constraintsList == null || constraintsList.isEmpty()) {
+        return true;
+      }
+      try {
+        tableLock.writeLock().lock();
+        int size = 0;
+        for(int i=0; i<constraintsList.size(); i++) {
+          if (constraintClass == SQLPrimaryKey.class) {
+            SQLPrimaryKey key = (SQLPrimaryKey) constraintsList.get(i);
+            this.primaryKeyCache.put(key.getPk_name(), key);
+          } else if (constraintClass == SQLForeignKey.class) {
+            SQLForeignKey key = (SQLForeignKey) constraintsList.get(i);
+            this.foreignKeyCache.put(key.getFk_name(), key);
+          } else if (constraintClass == SQLNotNullConstraint.class) {
+            SQLNotNullConstraint key = (SQLNotNullConstraint) constraintsList.get(i);
+            this.notNullConstraintCache.put(key.getNn_name(), key);
+          } else if (constraintClass == SQLUniqueConstraint.class) {
+            SQLUniqueConstraint key = (SQLUniqueConstraint) constraintsList.get(i);
+            this.uniqueConstraintCache.put(key.getUk_name(), key);
+          }
+          size += getObjectSize(constraintClass, constraintsList.get(i));
+        }
+
+        if (!fromPrewarm) {
+          dirtyConstaintVariable.set(true);
+        }
+
+        updateMemberSize(mn, size, SizeMode.Delta);
+        return true;
+      } finally {
+        tableLock.writeLock().unlock();
+      }
+    }
+
+    public List<SQLPrimaryKey> getPrimaryKeys() {
+      List<SQLPrimaryKey> keys = new ArrayList<>();
+      try {
+        tableLock.readLock().lock();
+        keys = new ArrayList<>(this.primaryKeyCache.values());

Review comment:
       Double assignment. Shall directly return from here and avoid local variable. 

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -410,6 +436,34 @@ private void updateMemberSize(MemberName mn, Integer size, SizeMode mode)
{
           aggrColStatsCacheSize = size;
         }
         break;
+      case PRIMARY_KEY_CACHE:
+        if (mode == SizeMode.Delta) {
+          primaryKeyCacheSize += size;
+        } else {
+          primaryKeyCacheSize = size;
+        }
+        break;
+        case FOREIGN_KEY_CACHE:

Review comment:
       Indentation of "case' statement is not matching with others.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
##########
@@ -543,10 +556,24 @@ static void prewarm(RawStore rawStore) {
                 tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName,
colNames, CacheUtils.HIVE_ENGINE);
                 Deadline.stopTimer();
               }
+              Deadline.startTimer("getPrimaryKeys");
+              rawStore.getPrimaryKeys(catName, dbName, tblName);
+              Deadline.stopTimer();
+              Deadline.startTimer("getForeignKeys");
+              rawStore.getForeignKeys(catName, null, null, dbName, tblName);
+              Deadline.stopTimer();
+              Deadline.startTimer("getUniqueConstraints");
+              rawStore.getUniqueConstraints(catName, dbName, tblName);
+              Deadline.stopTimer();
+              Deadline.startTimer("getNotNullConstraints");
+              rawStore.getNotNullConstraints(catName, dbName, tblName);
+              Deadline.stopTimer();
+
               // If the table could not cached due to memory limit, stop prewarm
               boolean isSuccess = sharedCache
                   .populateTableInCache(table, tableColStats, partitions, partitionColStats,
aggrStatsAllPartitions,

Review comment:
       Too many arguments. Can we have another class (TableCacheObjects) to store all these
arguments using set methods?

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -470,6 +524,110 @@ boolean cachePartitions(Iterable<Partition> parts, SharedCache
sharedCache, bool
       }
     }
 
+    boolean cachePrimaryKeys(List<SQLPrimaryKey> primaryKeys, boolean fromPrewarm)
{
+      return cacheConstraints(primaryKeys, SQLPrimaryKey.class, fromPrewarm,
+              MemberName.PRIMARY_KEY_CACHE, this.isPrimaryKeyCacheDirty);
+    }
+
+    boolean cacheForeignKeys(List<SQLForeignKey> foreignKeys, boolean fromPrewarm)
{
+      return cacheConstraints(foreignKeys, SQLForeignKey.class, fromPrewarm,
+              MemberName.FOREIGN_KEY_CACHE, this.isForeignKeyCacheDirty);
+    }
+
+    boolean cacheUniqueConstraints(List<SQLUniqueConstraint> uniqueConstraints, boolean
fromPrewarm) {
+      return cacheConstraints(uniqueConstraints, SQLUniqueConstraint.class, fromPrewarm,
+              MemberName.UNIQUE_CONSTRAINT_CACHE, this.isUniqueConstraintCacheDirty);
+    }
+
+    boolean cacheNotNulConstraints(List<SQLNotNullConstraint> notNullConstraints, boolean
fromPrewarm) {
+      return cacheConstraints(notNullConstraints, SQLNotNullConstraint.class, fromPrewarm,
+              MemberName.NOTNULL_CONSTRAINT_CACHE, this.isNotNullConstraintCacheDirty);
+    }
+
+    // Common method to cache constraints
+    private boolean cacheConstraints(List constraintsList,
+                             Class constraintClass,
+                             boolean fromPrewarm,
+                             MemberName mn,
+                             AtomicBoolean dirtyConstaintVariable) {
+      if (constraintsList == null || constraintsList.isEmpty()) {
+        return true;
+      }
+      try {
+        tableLock.writeLock().lock();
+        int size = 0;
+        for(int i=0; i<constraintsList.size(); i++) {
+          if (constraintClass == SQLPrimaryKey.class) {

Review comment:
       Can be a switch-case.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
##########
@@ -2497,26 +2599,82 @@ long getPartsFound() {
 
   @Override public List<SQLPrimaryKey> getPrimaryKeys(String catName, String dbName,
String tblName)
       throws MetaException {
-    // TODO constraintCache
-    return rawStore.getPrimaryKeys(catName, dbName, tblName);
+    catName = normalizeIdentifier(catName);
+    dbName = StringUtils.normalizeIdentifier(dbName);
+    tblName = StringUtils.normalizeIdentifier(tblName);
+    if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction()))
{
+      return rawStore.getPrimaryKeys(catName, dbName, tblName);
+    }
+
+    Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+    if (tbl == null) {
+      // The table containing the primary keys is not yet loaded in cache
+      return rawStore.getPrimaryKeys(catName, dbName, tblName);
+    }
+    List<SQLPrimaryKey> keys = sharedCache.listCachedPrimaryKeys(catName, dbName, tblName);
+
+    return keys;
   }
 
   @Override public List<SQLForeignKey> getForeignKeys(String catName, String parentDbName,
String parentTblName,
       String foreignDbName, String foreignTblName) throws MetaException {
-    // TODO constraintCache
-    return rawStore.getForeignKeys(catName, parentDbName, parentTblName, foreignDbName, foreignTblName);
+     // Get correct ForeignDBName and TableName
+    catName = normalizeIdentifier(catName);
+    foreignDbName = (foreignDbName == null) ? "" : normalizeIdentifier(foreignDbName);

Review comment:
       If foreign db or table names are null, then we should just invoke rawStore apis instead
of using empty string. It can cause issues.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -514,6 +672,130 @@ public boolean containsPartition(List<String> partVals) {
       return containsPart;
     }
 
+    public void removeConstraint(String name) {
+      try {
+        tableLock.writeLock().lock();
+        Object constraint = null;
+        MemberName mn = null;
+        Class constraintClass = null;
+        if (this.primaryKeyCache.containsKey(name)) {
+          constraint = this.primaryKeyCache.remove(name);
+          mn = MemberName.PRIMARY_KEY_CACHE;
+          isPrimaryKeyCacheDirty.set(true);
+          constraintClass = SQLPrimaryKey.class;
+        } else if (this.foreignKeyCache.containsKey(name)) {
+          constraint = this.foreignKeyCache.remove(name);
+          mn = MemberName.FOREIGN_KEY_CACHE;
+          isForeignKeyCacheDirty.set(true);
+          constraintClass = SQLForeignKey.class;
+        } else if (this.notNullConstraintCache.containsKey(name)) {
+          constraint = this.notNullConstraintCache.remove(name);
+          mn = MemberName.NOTNULL_CONSTRAINT_CACHE;
+          isNotNullConstraintCacheDirty.set(true);
+          constraintClass = SQLNotNullConstraint.class;
+        } else if (this.uniqueConstraintCache.containsKey(name)) {
+          constraint = this.uniqueConstraintCache.remove(name);
+          mn = MemberName.UNIQUE_CONSTRAINT_CACHE;
+          isUniqueConstraintCacheDirty.set(true);
+          constraintClass = SQLUniqueConstraint.class;
+        }
+
+        if(constraint == null) {
+          // Should not reach here

Review comment:
       It can happen that for multi HMS instance case, table can be cached but not constraints
right? If yes, then it can hit here.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -287,6 +296,18 @@ public int getObjectSize(Class<?> clazz, Object obj) {
         new ConcurrentHashMap<String, List<ColumnStatisticsObj>>();
     private AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false);
 
+    private Map<String, SQLPrimaryKey> primaryKeyCache = new ConcurrentHashMap<>();
+    private AtomicBoolean isPrimaryKeyCacheDirty = new AtomicBoolean(false);

Review comment:
       Same as size, even dirty check boolean also can be an array.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -1870,6 +2247,125 @@ public void removePartitionsFromCache(String catName, String dbName,
String tblN
     return parts;
   }
 
+  public List<SQLPrimaryKey> listCachedPrimaryKeys(String catName, String dbName, String
tblName) {
+    List<SQLPrimaryKey> keys = new ArrayList<>();
+    try {
+      cacheLock.readLock().lock();
+      TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName,
dbName, tblName));
+      if (tblWrapper != null) {
+        keys = tblWrapper.getPrimaryKeys();
+      }
+    } finally {
+      cacheLock.readLock().unlock();
+    }
+    return keys;
+  }
+
+  public List<SQLForeignKey> listCachedForeignKeys(String catName, String foreignDbName,
String foreignTblName,
+                                                   String parentDbName, String parentTblName)
{
+    List<SQLForeignKey> keys = new ArrayList<>();
+    try {
+      cacheLock.readLock().lock();
+      TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName,
foreignDbName, foreignTblName));
+      if (tblWrapper != null) {
+        keys = tblWrapper.getForeignKeys();
+      }
+    } finally {
+      cacheLock.readLock().unlock();
+    }
+
+    // filter out required foreign keys based on parent db/tbl name
+    if (!StringUtils.isEmpty(parentTblName) && !StringUtils.isEmpty(parentDbName))
{
+      List<SQLForeignKey> filteredKeys = new ArrayList<>();
+      for (SQLForeignKey key : keys) {

Review comment:
       Shall use List.stream().filter() instead of loop and check.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -470,6 +524,110 @@ boolean cachePartitions(Iterable<Partition> parts, SharedCache
sharedCache, bool
       }
     }
 
+    boolean cachePrimaryKeys(List<SQLPrimaryKey> primaryKeys, boolean fromPrewarm)
{
+      return cacheConstraints(primaryKeys, SQLPrimaryKey.class, fromPrewarm,
+              MemberName.PRIMARY_KEY_CACHE, this.isPrimaryKeyCacheDirty);
+    }
+
+    boolean cacheForeignKeys(List<SQLForeignKey> foreignKeys, boolean fromPrewarm)
{
+      return cacheConstraints(foreignKeys, SQLForeignKey.class, fromPrewarm,
+              MemberName.FOREIGN_KEY_CACHE, this.isForeignKeyCacheDirty);
+    }
+
+    boolean cacheUniqueConstraints(List<SQLUniqueConstraint> uniqueConstraints, boolean
fromPrewarm) {
+      return cacheConstraints(uniqueConstraints, SQLUniqueConstraint.class, fromPrewarm,

Review comment:
       Why do we need to pass both member name and constraint class name. We can derive it
from member name itself.

##########
File path: standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
##########
@@ -1507,6 +1490,305 @@ public Object call() {
     cachedStore.shutdown();
   }
 
+  @Test
+  public void testPrimaryKeys() {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY,
"-1Kb");
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    CachedStore cachedStore = new CachedStore();
+    CachedStore.clearSharedCache();
+    cachedStore.setConfForTest(conf);
+    SharedCache sharedCache = CachedStore.getSharedCache();
+
+    Database db = createDatabaseObject("db", "testUser");
+    Table tbl = createUnpartitionedTableObject(db);
+
+    sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db", tbl.getTableName(), tbl);
+
+    Assert.assertEquals(sharedCache.getCachedTableCount(), 1);
+
+    List<SQLPrimaryKey> origKeys = createPrimaryKeys(tbl);
+    sharedCache.addPrimaryKeysToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+
+    // List operation
+    List<SQLPrimaryKey> cachedKeys = sharedCache.listCachedPrimaryKeys(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getPk_name(), "pk1");
+    Assert.assertEquals(cachedKeys.get(0).getTable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getTable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getColumn_name(), "col1");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // Refresh Operation
+    SQLPrimaryKey modifiedKey = origKeys.get(0);
+    modifiedKey.setColumn_name("col2");
+    modifiedKey.setPk_name("pk_modified");
+
+    sharedCache.refreshPrimaryKeysInCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
+            Arrays.asList(modifiedKey));
+    cachedKeys = sharedCache.listCachedPrimaryKeys(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getPk_name(), "pk_modified");
+    Assert.assertEquals(cachedKeys.get(0).getTable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getTable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getColumn_name(), "col2");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // Add more primary keys
+    sharedCache.addPrimaryKeysToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+    cachedKeys = sharedCache.listCachedPrimaryKeys(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 2);
+
+    // remove constraints
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"pk1");
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"pk_modified");
+
+    cachedKeys = sharedCache.listCachedPrimaryKeys(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 0);
+
+    cachedStore.shutdown();
+  }
+
+  @Test
+  public void testNotNullConstraint() {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY,
"-1Kb");
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    CachedStore cachedStore = new CachedStore();
+    CachedStore.clearSharedCache();
+    cachedStore.setConfForTest(conf);
+    SharedCache sharedCache = CachedStore.getSharedCache();
+
+    Database db = createDatabaseObject("db", "testUser");
+    Table tbl = createUnpartitionedTableObject(db);
+
+    sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db", tbl.getTableName(), tbl);
+
+    Assert.assertEquals(sharedCache.getCachedTableCount(), 1);
+
+    List<SQLNotNullConstraint> origKeys = createNotNullConstraint(tbl);
+    sharedCache.addNotNullConstraintsToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+
+    // List operation
+    List<SQLNotNullConstraint> cachedKeys = sharedCache.listCachedNotNullConstraints(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getNn_name(), "nn1");
+    Assert.assertEquals(cachedKeys.get(0).getTable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getTable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getColumn_name(), "col1");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // Refresh Operation
+    SQLNotNullConstraint modifiedKey = origKeys.get(0);
+    modifiedKey.setColumn_name("col2");
+    modifiedKey.setNn_name("nn_modified");
+
+    sharedCache.refreshNotNullConstraintsInCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
+            Arrays.asList(modifiedKey));
+    cachedKeys = sharedCache.listCachedNotNullConstraints(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getNn_name(), "nn_modified");
+    Assert.assertEquals(cachedKeys.get(0).getTable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getTable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getColumn_name(), "col2");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // Add more primary keys
+    sharedCache.addNotNullConstraintsToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+    cachedKeys = sharedCache.listCachedNotNullConstraints(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 2);
+
+    // remove constraints
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"nn1");
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"nn_modified");
+
+    cachedKeys = sharedCache.listCachedNotNullConstraints(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 0);
+
+    cachedStore.shutdown();
+  }
+
+  @Test
+  public void testUniqueConstraint() {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY,
"-1Kb");
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    CachedStore cachedStore = new CachedStore();
+    CachedStore.clearSharedCache();
+    cachedStore.setConfForTest(conf);
+    SharedCache sharedCache = CachedStore.getSharedCache();
+
+    Database db = createDatabaseObject("db", "testUser");
+    Table tbl = createUnpartitionedTableObject(db);
+
+    sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db", tbl.getTableName(), tbl);
+
+    Assert.assertEquals(sharedCache.getCachedTableCount(), 1);
+
+    List<SQLUniqueConstraint> origKeys = createUniqueConstraint(tbl);
+    sharedCache.addUniqueConstraintsToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+
+    // List operation
+    List<SQLUniqueConstraint> cachedKeys = sharedCache.listCachedUniqueConstraint(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getUk_name(), "uk1");
+    Assert.assertEquals(cachedKeys.get(0).getTable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getTable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getColumn_name(), "col1");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // Refresh Operation
+    SQLUniqueConstraint modifiedKey = origKeys.get(0);
+    modifiedKey.setColumn_name("col2");
+    modifiedKey.setUk_name("uk_modified");
+
+    sharedCache.refreshUniqueConstraintsInCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
+            Arrays.asList(modifiedKey));
+    cachedKeys = sharedCache.listCachedUniqueConstraint(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getUk_name(), "uk_modified");
+    Assert.assertEquals(cachedKeys.get(0).getTable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getTable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getColumn_name(), "col2");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // Add more primary keys
+    sharedCache.addUniqueConstraintsToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+    cachedKeys = sharedCache.listCachedUniqueConstraint(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 2);
+
+    // remove constraints
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"uk1");
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"uk_modified");
+
+    cachedKeys = sharedCache.listCachedUniqueConstraint(DEFAULT_CATALOG_NAME, tbl.getDbName(),
tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 0);
+
+    cachedStore.shutdown();
+  }
+
+  @Test
+  public void testForeignKeys() {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY,
"-1Kb");
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    CachedStore cachedStore = new CachedStore();
+    CachedStore.clearSharedCache();
+    cachedStore.setConfForTest(conf);
+    SharedCache sharedCache = CachedStore.getSharedCache();
+
+    Database db = createDatabaseObject("db", "testUser");
+    Table tbl = createUnpartitionedTableObject(db);
+
+    sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db", tbl.getTableName(), tbl);
+
+    Assert.assertEquals(sharedCache.getCachedTableCount(), 1);
+
+    List<SQLForeignKey> origKeys = createForeignKeys(tbl, tbl);
+    sharedCache.addForeignKeysToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+
+    // List operation
+    List<SQLForeignKey> cachedKeys = sharedCache.listCachedForeignKeys(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(), tbl.getDbName(), tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getFk_name(), "fk1");
+    Assert.assertEquals(cachedKeys.get(0).getFktable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getFktable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getFkcolumn_name(), "col2");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // List operation with different parent table
+    cachedKeys = sharedCache.listCachedForeignKeys(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(), "dummyDB", "dummyTable");
+    Assert.assertEquals(cachedKeys.size(), 0);
+
+    // Refresh Operation
+    SQLForeignKey modifiedKey = origKeys.get(0);
+    modifiedKey.setFkcolumn_name("col3");
+    modifiedKey.setFk_name("fk_modified");
+
+    sharedCache.refreshForeignKeysInCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
+            Arrays.asList(modifiedKey));
+    cachedKeys = sharedCache.listCachedForeignKeys(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(), tbl.getDbName(), tbl.getTableName());
+
+    Assert.assertEquals(cachedKeys.size(), 1);
+    Assert.assertEquals(cachedKeys.get(0).getFk_name(), "fk_modified");
+    Assert.assertEquals(cachedKeys.get(0).getFktable_db(), "db");
+    Assert.assertEquals(cachedKeys.get(0).getFktable_name(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.get(0).getFkcolumn_name(), "col3");
+    Assert.assertEquals(cachedKeys.get(0).getCatName(), DEFAULT_CATALOG_NAME);
+
+    // Add more primary keys
+    sharedCache.addForeignKeysToCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
origKeys);
+    cachedKeys = sharedCache.listCachedForeignKeys(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(), tbl.getDbName(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 2);
+
+    // remove constraints
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"fk1");
+    sharedCache.removeConstraintFromCache(DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(),
"fk_modified");
+
+    cachedKeys = sharedCache.listCachedForeignKeys(
+            DEFAULT_CATALOG_NAME, tbl.getDbName(), tbl.getTableName(), tbl.getDbName(), tbl.getTableName());
+    Assert.assertEquals(cachedKeys.size(), 0);
+
+

Review comment:
       Too many blank lines.

##########
File path: standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
##########
@@ -514,6 +672,130 @@ public boolean containsPartition(List<String> partVals) {
       return containsPart;
     }
 
+    public void removeConstraint(String name) {
+      try {
+        tableLock.writeLock().lock();
+        Object constraint = null;
+        MemberName mn = null;
+        Class constraintClass = null;
+        if (this.primaryKeyCache.containsKey(name)) {
+          constraint = this.primaryKeyCache.remove(name);
+          mn = MemberName.PRIMARY_KEY_CACHE;
+          isPrimaryKeyCacheDirty.set(true);
+          constraintClass = SQLPrimaryKey.class;
+        } else if (this.foreignKeyCache.containsKey(name)) {
+          constraint = this.foreignKeyCache.remove(name);
+          mn = MemberName.FOREIGN_KEY_CACHE;
+          isForeignKeyCacheDirty.set(true);
+          constraintClass = SQLForeignKey.class;
+        } else if (this.notNullConstraintCache.containsKey(name)) {
+          constraint = this.notNullConstraintCache.remove(name);
+          mn = MemberName.NOTNULL_CONSTRAINT_CACHE;
+          isNotNullConstraintCacheDirty.set(true);
+          constraintClass = SQLNotNullConstraint.class;
+        } else if (this.uniqueConstraintCache.containsKey(name)) {
+          constraint = this.uniqueConstraintCache.remove(name);
+          mn = MemberName.UNIQUE_CONSTRAINT_CACHE;
+          isUniqueConstraintCacheDirty.set(true);
+          constraintClass = SQLUniqueConstraint.class;
+        }
+
+        if(constraint == null) {
+          // Should not reach here
+          return;
+        }
+        int size = getObjectSize(constraintClass, constraint);
+        updateMemberSize(mn, -1 * size, SizeMode.Delta);
+
+      } finally {
+        tableLock.writeLock().unlock();
+      }
+    }
+
+    public void refreshPrimaryKeys(List<SQLPrimaryKey> keys) {
+      Map<String, SQLPrimaryKey> newKeys = new HashMap<>();

Review comment:
       Shouldn't we use ConcurrentHashMap here?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 451534)
    Time Spent: 40m  (was: 0.5h)

> [CachedStore] Cache table constraints in CachedStore
> ----------------------------------------------------
>
>                 Key: HIVE-22015
>                 URL: https://issues.apache.org/jira/browse/HIVE-22015
>             Project: Hive
>          Issue Type: Sub-task
>            Reporter: Daniel Dai
>            Assignee: Adesh Kumar Rao
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 40m
>  Remaining Estimate: 0h
>
> Currently table constraints are not cached. Hive will pull all constraints from tables
involved in query, which results multiple db reads (including get_primary_keys, get_foreign_keys,
get_unique_constraints, etc). The effort to cache this is small as it's just another table
component.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Mime
View raw message