diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index fed4a71a01fa..b9e0a41d3e0b 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -668,7 +668,7 @@ public void onCommitTxn(CommitTxnEvent commitTxnEvent, Connection dbConn, SQLGen return; } CommitTxnMessage msg = - MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId(), commitTxnEvent.getDatabases(), commitTxnEvent.getWriteId()); + MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId(), commitTxnEvent.getCatalogs(), commitTxnEvent.getDatabases(), commitTxnEvent.getWriteId()); NotificationEvent event = new NotificationEvent(0, now(), EventType.COMMIT_TXN.toString(), @@ -840,11 +840,12 @@ public void onAddCheckConstraint(AddCheckConstraintEvent addCheckConstraintEvent */ @Override public void onDropConstraint(DropConstraintEvent dropConstraintEvent) throws MetaException { + String catName = dropConstraintEvent.getCatName(); String dbName = dropConstraintEvent.getDbName(); String tableName = dropConstraintEvent.getTableName(); String constraintName = dropConstraintEvent.getConstraintName(); DropConstraintMessage msg = MessageBuilder.getInstance() - .buildDropConstraintMessage(dbName, tableName, constraintName); + .buildDropConstraintMessage(catName, dbName, tableName, constraintName); NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_CONSTRAINT.toString(), msgEncoder.getSerializer().serialize(msg)); @@ -863,8 +864,9 @@ public void onAllocWriteId(AllocWriteIdEvent allocWriteIdEvent, Connection dbCon throws MetaException { String tableName = allocWriteIdEvent.getTableName(); String dbName = allocWriteIdEvent.getDbName(); + String catName = allocWriteIdEvent.getCatName(); AllocWriteIdMessage msg = MessageBuilder.getInstance() - .buildAllocWriteIdMessage(allocWriteIdEvent.getTxnToWriteIdList(), dbName, tableName); + .buildAllocWriteIdMessage(allocWriteIdEvent.getTxnToWriteIdList(), catName, dbName, tableName); NotificationEvent event = new NotificationEvent(0, now(), EventType.ALLOC_WRITE_ID.toString(), msgEncoder.getSerializer().serialize(msg) @@ -914,6 +916,7 @@ public void onBatchAcidWrite(BatchAcidWriteEvent batchAcidWriteEvent, Connection NotificationEvent event = new NotificationEvent(0, now(), EventType.ACID_WRITE.toString(), msgEncoder.getSerializer().serialize(msg)); event.setMessageFormat(msgEncoder.getMessageFormat()); + event.setCatName(batchAcidWriteEvent.getCatalog(i)); event.setDbName(batchAcidWriteEvent.getDatabase(i)); event.setTableName(batchAcidWriteEvent.getTable(i)); eventBatch.add(event); @@ -946,8 +949,8 @@ public void onUpdateTableColumnStat(UpdateTableColumnStatEvent updateTableColumn @Override public void onDeleteTableColumnStat(DeleteTableColumnStatEvent deleteTableColumnStatEvent) throws MetaException { DeleteTableColumnStatMessage msg = MessageBuilder.getInstance() - .buildDeleteTableColumnStatMessage(deleteTableColumnStatEvent.getDBName(), - deleteTableColumnStatEvent.getColName()); + .buildDeleteTableColumnStatMessage(deleteTableColumnStatEvent.getCatName(), + deleteTableColumnStatEvent.getDBName(), deleteTableColumnStatEvent.getColName()); NotificationEvent event = new NotificationEvent(0, now(), EventType.DELETE_TABLE_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); event.setCatName(deleteTableColumnStatEvent.getCatName()); @@ -1008,9 +1011,9 @@ public void onUpdatePartitionColumnStatInBatch(UpdatePartitionColumnStatEventBat @Override public void onDeletePartitionColumnStat(DeletePartitionColumnStatEvent deletePartColStatEvent) throws MetaException { DeletePartitionColumnStatMessage msg = MessageBuilder.getInstance() - .buildDeletePartitionColumnStatMessage(deletePartColStatEvent.getDBName(), - deletePartColStatEvent.getColName(), deletePartColStatEvent.getPartName(), - deletePartColStatEvent.getPartVals()); + .buildDeletePartitionColumnStatMessage(deletePartColStatEvent.getCatName(), + deletePartColStatEvent.getDBName(), deletePartColStatEvent.getColName(), + deletePartColStatEvent.getPartName(), deletePartColStatEvent.getPartVals()); NotificationEvent event = new NotificationEvent(0, now(), EventType.DELETE_PARTITION_COLUMN_STAT.toString(), msgEncoder.getSerializer().serialize(msg)); event.setCatName(deletePartColStatEvent.getCatName()); @@ -1205,25 +1208,28 @@ private void addWriteNotificationLog(List eventBatch, List insertList = new ArrayList<>(); Map> updateMap = new HashMap<>(); try (PreparedStatement pst = dbConn.prepareStatement(select)) { for (int i = 0; i < acidWriteEventList.size(); i++) { + String catName = acidWriteEventList.get(i).getCatalog(); String dbName = acidWriteEventList.get(i).getDatabase(); String tblName = acidWriteEventList.get(i).getTable(); String partition = acidWriteEventList.get(i).getPartition(); Long txnId = acidWriteEventList.get(i).getTxnId(); LOG.debug("Going to execute query <" + select.replaceAll("\\?", "{}") + ">", - quoteString(dbName), quoteString(tblName), quoteString(partition)); - pst.setString(1, dbName); - pst.setString(2, tblName); - pst.setString(3, partition); + quoteString(catName), quoteString(dbName), quoteString(tblName), quoteString(partition)); + pst.setString(1, catName); + pst.setString(2, dbName); + pst.setString(3, tblName); pst.setString(4, partition); - pst.setLong(5, txnId); + pst.setString(5, partition); + pst.setLong(6, txnId); rs = pst.executeQuery(); if (!rs.next()) { insertList.add(i); @@ -1244,15 +1250,16 @@ private void addWriteNotificationLog(List eventBatch, List eventBatch, List", nextNLId , acidWriteEventList.get(idx).getTxnId(), acidWriteEventList.get(idx).getWriteId() - , quoteString(dbName), quoteString(tblName), + , quoteString(catName), quoteString(dbName), quoteString(tblName), quoteString(partition), quoteString(tableObj), quoteString(partitionObj), quoteString(files), currentTime); pst.addBatch(); numRows++; diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergTableOptimizer.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergTableOptimizer.java index 5c612bd0479a..061f8a949dae 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergTableOptimizer.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergTableOptimizer.java @@ -139,7 +139,7 @@ private void addCompactionTargetIfEligible(Table table, org.apache.iceberg.Table Set compactions, ShowCompactResponse currentCompactions, Set skipDBs, Set skipTables) { - CompactionInfo ci = new CompactionInfo(table.getDbName(), table.getTableName(), partitionName, + CompactionInfo ci = new CompactionInfo(table.getCatName(), table.getDbName(), table.getTableName(), partitionName, CompactionType.SMART_OPTIMIZE); // Common Hive compaction eligibility checks diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java index 2db61aa6c06a..fb2a3ca0e737 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java @@ -814,13 +814,14 @@ private List allocateTxns(int numTxns) throws Throwable { private List allocateWriteIds(List txnIds, String dbName, String tblName) throws Throwable { AllocateTableWriteIdsRequest allocateTableWriteIdsRequest = new AllocateTableWriteIdsRequest(dbName, tblName); + allocateTableWriteIdsRequest.setCatName(Warehouse.DEFAULT_CATALOG_NAME); allocateTableWriteIdsRequest.setTxnIds(txnIds); return hmsHandler.allocate_table_write_ids(allocateTableWriteIdsRequest).getTxnToWriteIds(); } private String getValidWriteIds(String dbName, String tblName) throws Throwable { GetValidWriteIdsRequest validWriteIdsRequest = new GetValidWriteIdsRequest( - Collections.singletonList(TableName.getDbTable(dbName, tblName))); + Collections.singletonList(TableName.getQualified(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName))); GetValidWriteIdsResponse validWriteIdsResponse = hmsHandler.get_valid_write_ids(validWriteIdsRequest); return TxnCommonUtils.createValidReaderWriteIdList(validWriteIdsResponse. getTblValidWriteIds().get(0)).writeToString(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java index 4e5ec74442e5..945f763a0afe 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java @@ -355,6 +355,7 @@ List allocateWriteIdsForTablesAndAcquireLocks(String primaryCatName, Strin List txns, HiveConf primaryConf) throws Throwable { AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(); rqst.setDbName(primaryDbName); + rqst.setCatName(primaryCatName); List lockIds = new ArrayList<>(); for(Map.Entry entry : tables.entrySet()) { rqst.setTableName(entry.getKey()); @@ -373,17 +374,17 @@ List allocateWriteIdsForTablesAndAcquireLocks(String primaryCatName, Strin lockIds.add(txnHandler.lock(lockRequest).getLockid()); } } - verifyWriteIdsForTables(tables, primaryConf, primaryDbName); + verifyWriteIdsForTables(tables, primaryConf, PRIMARY_CAT_NAME, primaryDbName); return lockIds; } - void verifyWriteIdsForTables(Map tables, HiveConf conf, String dbName) + void verifyWriteIdsForTables(Map tables, HiveConf conf, String catName, String dbName) throws Throwable { for(Map.Entry entry : tables.entrySet()) { Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from TXN_TO_WRITE_ID"), entry.getValue().longValue(), TestTxnDbUtil.countQueryAgent(conf, - "select count(*) from TXN_TO_WRITE_ID where t2w_database = '" + "select count(*) from TXN_TO_WRITE_ID where t2w_catalog = '" + catName.toLowerCase() + "' and t2w_database = '" + dbName.toLowerCase() + "' and t2w_table = '" + entry.getKey() + "'")); } @@ -411,25 +412,26 @@ void verifyAllOpenTxnsNotAborted(List txns, HiveConf primaryConf) throws T "select count(*) from TXNS where txn_state = 'a' and " + txnIdRange)); } - void verifyNextId(Map tables, String dbName, HiveConf conf) throws Throwable { + void verifyNextId(Map tables, String catName, String dbName, HiveConf conf) throws Throwable { // Verify the next write id for(Map.Entry entry : tables.entrySet()) { String[] nextWriteId = TestTxnDbUtil.queryToString(conf, - "select nwi_next from NEXT_WRITE_ID where nwi_database = '" + "select nwi_next from NEXT_WRITE_ID where nwi_catalog = '" + + catName.toLowerCase() + "' and nwi_database = '" + dbName.toLowerCase() + "' and nwi_table = '" + entry.getKey() + "'").split("\n"); Assert.assertEquals(Long.parseLong(nextWriteId[1].trim()), entry.getValue() + 1); } } - void verifyCompactionQueue(Map tables, String dbName, HiveConf conf) + void verifyCompactionQueue(Map tables, String catName, String dbName, HiveConf conf) throws Throwable { for(Map.Entry entry : tables.entrySet()) { Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from COMPACTION_QUEUE"), entry.getValue().longValue(), TestTxnDbUtil.countQueryAgent(conf, - "select count(*) from COMPACTION_QUEUE where cq_database = '" + dbName + "select count(*) from COMPACTION_QUEUE where cq_catalog = '" + catName + "' and cq_database = '" + dbName + "' and cq_table = '" + entry.getKey() + "'")); } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index f14a71ad9150..a2e692a67c06 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -528,7 +528,7 @@ public boolean validate(Task task) { return validator.hasTask(rootTask); } - private Task getReplLoadRootTask(String sourceDb, String replicadb, boolean isIncrementalDump, + private Task getReplLoadRootTask(String sourceCat, String sourceDb, String replicadb, boolean isIncrementalDump, Tuple tuple) throws Throwable { HiveConf confTemp = driverMirror.getConf(); Path loadPath = new Path(tuple.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); @@ -544,7 +544,7 @@ private Task getReplLoadRootTask(String sourceDb, String replicadb, boolean isIn run only database creation task, and only in next iteration of Repl Load Task execution, remaining tasks will be executed. Hence disabling this to perform the test on task optimization. */ confTemp.setBoolVar(HiveConf.ConfVars.REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET, false); - ReplLoadWork replLoadWork = new ReplLoadWork(confTemp, loadPath.toString(), sourceDb, replicadb, + ReplLoadWork replLoadWork = new ReplLoadWork(confTemp, loadPath.toString(), sourceCat, sourceDb, replicadb, null, null, isIncrementalDump, Long.valueOf(tuple.lastReplId), 0L, metricCollector, false); Task replLoadTask = TaskFactory.get(replLoadWork, confTemp); @@ -565,7 +565,7 @@ public void testTaskCreationOptimization() throws Throwable { Tuple dump = replDumpDb(dbName); //bootstrap load should not have move task - Task task = getReplLoadRootTask(dbName, dbNameReplica, false, dump); + Task task = getReplLoadRootTask(Warehouse.DEFAULT_CATALOG_NAME, dbName, dbNameReplica, false, dump); assertEquals(false, hasMoveTask(task)); assertEquals(true, hasPartitionTask(task)); @@ -579,7 +579,7 @@ public void testTaskCreationOptimization() throws Throwable { // Partition level statistics gets updated as part of the INSERT above. So we see a partition // task corresponding to an ALTER_PARTITION event. - task = getReplLoadRootTask(dbName, dbNameReplica, true, dump); + task = getReplLoadRootTask(Warehouse.DEFAULT_CATALOG_NAME, dbName, dbNameReplica, true, dump); assertEquals(true, hasMoveTask(task)); assertEquals(true, hasPartitionTask(task)); @@ -592,7 +592,7 @@ public void testTaskCreationOptimization() throws Throwable { dump = replDumpDb(dbName); //no move task should be added as the operation is adding a dynamic partition - task = getReplLoadRootTask(dbName, dbNameReplica, true, dump); + task = getReplLoadRootTask(Warehouse.DEFAULT_CATALOG_NAME, dbName, dbNameReplica, true, dump); assertEquals(false, hasMoveTask(task)); assertEquals(true, hasPartitionTask(task)); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java index 9536eea72780..e3373ee8e12f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java @@ -1564,7 +1564,7 @@ public void testAcidTablesBootstrapWithOpenTxnsTimeout() throws Throwable { verifyAllOpenTxnsAborted(txns, primaryConf); //Release the locks releaseLocks(txnHandler, lockIds); - verifyNextId(tables, primaryDbName, primaryConf); + verifyNextId(tables, PRIMARY_CAT_NAME, primaryDbName, primaryConf); // Bootstrap load which should also replicate the aborted write ids on both tables. HiveConf replicaConf = replica.getConf(); @@ -1580,20 +1580,20 @@ public void testAcidTablesBootstrapWithOpenTxnsTimeout() throws Throwable { .verifyResults(new String[] {"10", "11"}); // Verify if HWM is properly set after REPL LOAD - verifyNextId(tables, replicatedDbName, replicaConf); + verifyNextId(tables, PRIMARY_CAT_NAME, replicatedDbName, replicaConf); // Verify if all the aborted write ids are replicated to the replicated DB for(Map.Entry entry : tables.entrySet()) { entry.setValue((long) numTxns); } - verifyWriteIdsForTables(tables, replicaConf, replicatedDbName); + verifyWriteIdsForTables(tables, replicaConf, PRIMARY_CAT_NAME, replicatedDbName); // Verify if entries added in COMPACTION_QUEUE for each table/partition // t1-> 1 entry and t2-> 2 entries (1 per partition) tables.clear(); tables.put("t1", 1L); tables.put("t2", 2L); - verifyCompactionQueue(tables, replicatedDbName, replicaConf); + verifyCompactionQueue(tables, PRIMARY_CAT_NAME, replicatedDbName, replicaConf); } @Test @@ -1693,7 +1693,7 @@ public void testAcidTablesBootstrapWithOpenTxnsDiffDb() throws Throwable { Map tablesInPrimary = new HashMap<>(); tablesInPrimary.put("t1", 1L); tablesInPrimary.put("t2", 2L); - verifyNextId(tablesInPrimary, primaryDbName, primaryConf); + verifyNextId(tablesInPrimary, PRIMARY_CAT_NAME, primaryDbName, primaryConf); // Bootstrap load which should not replicate the write ids on both tables as they are on different db. HiveConf replicaConf = replica.getConf(); @@ -1709,13 +1709,13 @@ public void testAcidTablesBootstrapWithOpenTxnsDiffDb() throws Throwable { .verifyResults(new String[]{"10", "11"}); // Verify if HWM is properly set after REPL LOAD - verifyNextId(tablesInPrimary, replicatedDbName, replicaConf); + verifyNextId(tablesInPrimary, PRIMARY_CAT_NAME, replicatedDbName, replicaConf); // Verify if none of the write ids are not replicated to the replicated DB as they belong to diff db for (Map.Entry entry : tablesInPrimary.entrySet()) { entry.setValue((long) 0); } - verifyWriteIdsForTables(tablesInPrimary, replicaConf, replicatedDbName); + verifyWriteIdsForTables(tablesInPrimary, replicaConf, PRIMARY_CAT_NAME, replicatedDbName); //Abort the txns txnHandler.abortTxns(new AbortTxnsRequest(txns)); verifyAllOpenTxnsAborted(txns, primaryConf); @@ -1777,7 +1777,7 @@ public void testAcidTablesBootstrapWithOpenTxnsWaitingForLock() throws Throwable Map tablesInPrimary = new HashMap<>(); tablesInPrimary.put("t1", 1L); tablesInPrimary.put("t2", 2L); - verifyNextId(tablesInPrimary, primaryDbName, primaryConf); + verifyNextId(tablesInPrimary, PRIMARY_CAT_NAME, primaryDbName, primaryConf); // Bootstrap load which should not replicate the write ids on both tables as they are on different db. HiveConf replicaConf = replica.getConf(); @@ -1793,13 +1793,13 @@ public void testAcidTablesBootstrapWithOpenTxnsWaitingForLock() throws Throwable .verifyResults(new String[] {"10", "11"}); // Verify if HWM is properly set after REPL LOAD - verifyNextId(tablesInPrimary, replicatedDbName, replicaConf); + verifyNextId(tablesInPrimary, PRIMARY_CAT_NAME, replicatedDbName, replicaConf); // Verify if none of the write ids are not replicated to the replicated DB as they belong to diff db for(Map.Entry entry : tablesInPrimary.entrySet()) { entry.setValue((long) 0); } - verifyWriteIdsForTables(tablesInPrimary, replicaConf, replicatedDbName); + verifyWriteIdsForTables(tablesInPrimary, replicaConf, PRIMARY_CAT_NAME, replicatedDbName); //Abort the txns txnHandler.abortTxns(new AbortTxnsRequest(txns)); verifyAllOpenTxnsAborted(txns, primaryConf); @@ -1853,7 +1853,7 @@ public void testAcidTablesBootstrapWithOpenTxnsPrimaryAndSecondaryDb() throws Th verifyAllOpenTxnsNotAborted(txns, primaryConf); // After bootstrap dump, all the opened txns should be aborted as it belongs to db under replication. Verify it. verifyAllOpenTxnsAborted(txnsSameDb, primaryConf); - verifyNextId(tablesInPrimDb, primaryDbName, primaryConf); + verifyNextId(tablesInPrimDb, PRIMARY_CAT_NAME, primaryDbName, primaryConf); // Bootstrap load which should replicate the write ids on both tables as they are on same db and // not on different db. @@ -1870,13 +1870,13 @@ public void testAcidTablesBootstrapWithOpenTxnsPrimaryAndSecondaryDb() throws Th .verifyResults(new String[] {"10", "11"}); // Verify if HWM is properly set after REPL LOAD - verifyNextId(tablesInPrimDb, replicatedDbName, replicaConf); + verifyNextId(tablesInPrimDb, PRIMARY_CAT_NAME, replicatedDbName, replicaConf); // Verify if only the write ids belonging to primary db are replicated to the replicated DB. for(Map.Entry entry : tablesInPrimDb.entrySet()) { entry.setValue((long) numTxns); } - verifyWriteIdsForTables(tablesInPrimDb, replicaConf, replicatedDbName); + verifyWriteIdsForTables(tablesInPrimDb, replicaConf, PRIMARY_CAT_NAME, replicatedDbName); //Abort the txns for secondary db txnHandler.abortTxns(new AbortTxnsRequest(txns)); verifyAllOpenTxnsAborted(txns, primaryConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java index 9562e9c71a0c..26dfe7c7ebc0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java @@ -213,7 +213,7 @@ public void testAcidTablesBootstrapDuringIncrementalWithOpenTxnsTimeout() throws // After bootstrap dump, all the opened txns should be aborted. Verify it. verifyAllOpenTxnsAborted(txns, primaryConf); releaseLocks(txnHandler, lockIds); - verifyNextId(tables, primaryDbName, primaryConf); + verifyNextId(tables, PRIMARY_CAT_NAME, primaryDbName, primaryConf); // Incremental load with ACID bootstrap should also replicate the aborted write ids on // tables t1 and t2 @@ -222,20 +222,20 @@ public void testAcidTablesBootstrapDuringIncrementalWithOpenTxnsTimeout() throws replica.load(replicatedDbName, primaryDbName); verifyIncLoad(replicatedDbName, incDump.lastReplicationId); // Verify if HWM is properly set after REPL LOAD - verifyNextId(tables, replicatedDbName, replicaConf); + verifyNextId(tables, PRIMARY_CAT_NAME, replicatedDbName, replicaConf); // Verify if all the aborted write ids are replicated to the replicated DB for(Map.Entry entry : tables.entrySet()) { entry.setValue((long) numTxns); } - verifyWriteIdsForTables(tables, replicaConf, replicatedDbName); + verifyWriteIdsForTables(tables, replicaConf, PRIMARY_CAT_NAME, replicatedDbName); // Verify if entries added in COMPACTION_QUEUE for each table/partition // t1-> 1 entry and t2-> 2 entries (1 per partition) tables.clear(); tables.put("t1", 1L); tables.put("t2", 4L); - verifyCompactionQueue(tables, replicatedDbName, replicaConf); + verifyCompactionQueue(tables, PRIMARY_CAT_NAME, replicatedDbName, replicaConf); } @Test diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java index 57ded39bd3b0..d74686355a1a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.Partition; @@ -117,9 +118,10 @@ public void cleanupAfterMajorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions(dbName, "camtc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, "camtc", 25); CompactionRequest rqst = new CompactionRequest(dbName, "camtc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); assertCleanerActions(6); @@ -135,9 +137,10 @@ public void cleanupAfterMajorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(dbName, "campc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, "campc", 25); CompactionRequest rqst = new CompactionRequest(dbName, "campc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -153,9 +156,10 @@ public void cleanupAfterMinorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions(dbName, "camitc", 24); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, "camitc", 24); CompactionRequest rqst = new CompactionRequest(dbName, "camitc", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); assertCleanerActions(4); @@ -171,9 +175,10 @@ public void cleanupAfterMinorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions(dbName, "camipc", 24); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, "camipc", 24); CompactionRequest rqst = new CompactionRequest(dbName, "camipc", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics2.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics2.java index a107692c3530..a46aab12ced6 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics2.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics2.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.txn.compactor; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.HMSMetricsListener; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; @@ -47,8 +48,8 @@ public void testWritesToDisabledCompactionDatabase() throws Exception { Map params = new HashMap<>(); params.put(hive_metastoreConstants.NO_AUTO_COMPACT, "false"); Table t = newTable(dbName, "comp_disabled", false, params); - burnThroughTransactions(dbName, t.getTableName(), 1, null, null); - burnThroughTransactions(dbName, t.getTableName(), 1, null, new HashSet<>( + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, t.getTableName(), 1, null, null); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, t.getTableName(), 1, null, new HashSet<>( Collections.singletonList(2L))); Assert.assertEquals(MetricsConstants.WRITES_TO_DISABLED_COMPACTION_TABLE + " value incorrect", @@ -68,8 +69,8 @@ public void testWritesToEnabledCompactionDatabase() throws Exception { Map params = new HashMap<>(); params.put(hive_metastoreConstants.NO_AUTO_COMPACT, "true"); Table t = newTable(dbName, "comp_enabled", false, params); - burnThroughTransactions(dbName, t.getTableName(), 1, null, null); - burnThroughTransactions(dbName, t.getTableName(), 1, null, new HashSet<>( + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, t.getTableName(), 1, null, null); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, t.getTableName(), 1, null, new HashSet<>( Collections.singletonList(2L))); Assert.assertEquals(MetricsConstants.WRITES_TO_DISABLED_COMPACTION_TABLE + " value incorrect", diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 170c7cecb503..ab9e01348456 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.ReplChangeManager; @@ -379,10 +380,10 @@ public void testStatsAfterCompactionPartTbl() throws Exception { Table table = msClient.getTable(dbName, tblName); //compute stats before compaction - CompactionInfo ci = new CompactionInfo(dbName, tblName, "bkt=0", CompactionType.MAJOR); + CompactionInfo ci = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, "bkt=0", CompactionType.MAJOR); statsUpdater.gatherStats(conf, ci, emptyMap(), System.getProperty("user.name"), CompactorUtil.getCompactorJobQueueName(conf, ci, table), msClient); - ci = new CompactionInfo(dbName, tblName, "bkt=1", CompactionType.MAJOR); + ci = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, "bkt=1", CompactionType.MAJOR); statsUpdater.gatherStats(conf, ci, emptyMap(), System.getProperty("user.name"), CompactorUtil.getCompactorJobQueueName(conf, ci, table), msClient); @@ -473,7 +474,7 @@ public void testStatsAfterCompactionTbl() throws Exception { Table table = msClient.getTable(dbName, tblName); //compute stats before compaction - CompactionInfo ci = new CompactionInfo(dbName, tblName, null, CompactionType.MAJOR); + CompactionInfo ci = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, null, CompactionType.MAJOR); statsUpdater.gatherStats(conf, ci, emptyMap(), System.getProperty("user.name"), CompactorUtil.getCompactorJobQueueName(conf, ci, table), msClient); @@ -2329,8 +2330,8 @@ public void testTableProperties() throws Exception { @Test public void testCompactionInfoEquals() { - CompactionInfo compactionInfo = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); - CompactionInfo compactionInfo1 = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, "dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo1 = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, "dbName", "tableName", "partName", CompactionType.MINOR); assertEquals("The object must be equal", compactionInfo, compactionInfo); Assert.assertNotEquals("The object must be not equal", compactionInfo, new Object()); @@ -2339,8 +2340,8 @@ public void testCompactionInfoEquals() { @Test public void testCompactionInfoHashCode() { - CompactionInfo compactionInfo = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); - CompactionInfo compactionInfo1 = new CompactionInfo("dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, "dbName", "tableName", "partName", CompactionType.MINOR); + CompactionInfo compactionInfo1 = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, "dbName", "tableName", "partName", CompactionType.MINOR); Assert.assertEquals("The hash codes must be equal", compactionInfo.hashCode(), compactionInfo1.hashCode()); } @@ -2657,6 +2658,7 @@ private List getCompactionList() throws Exception { private void verifyCompactions(List compacts, SortedSet partNames, String tblName) { for (ShowCompactResponseElement compact : compacts) { + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, compact.getCatName()); Assert.assertEquals("default", compact.getDbname()); Assert.assertEquals(tblName, compact.getTablename()); Assert.assertEquals("initiated", compact.getState()); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java index 98121f7df019..443cbac9b9f9 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.CompactionRequest; @@ -863,7 +864,7 @@ public void testStatsAfterQueryCompactionOnTez() throws Exception { Table table = msClient.getTable(dbName, tblName); //compute stats before compaction - CompactionInfo ci = new CompactionInfo(dbName, tblName, null, CompactionType.MAJOR); + CompactionInfo ci = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, null, CompactionType.MAJOR); new StatsUpdater().gatherStats(conf, ci, emptyMap(), System.getProperty("user.name"), CompactorUtil.getCompactorJobQueueName(conf, ci, table), msClient); @@ -3170,7 +3171,7 @@ public void testStatsAfterCompactionPartTbl(boolean isQueryBased, boolean isAuto Table table = msClient.getTable(dbName, tblName); //compute stats before compaction - CompactionInfo ci = new CompactionInfo(dbName, tblName, "bkt=1", compactionType); + CompactionInfo ci = new CompactionInfo(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, "bkt=1", compactionType); new StatsUpdater().gatherStats(conf, ci, emptyMap(), System.getProperty("user.name"), CompactorUtil.getCompactorJobQueueName(conf, ci, table), msClient); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestIcebergCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestIcebergCompactorOnTez.java index 4ca427333915..e67980993b82 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestIcebergCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestIcebergCompactorOnTez.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.txn.compactor; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; @@ -34,10 +35,11 @@ import static org.apache.hadoop.hive.ql.txn.compactor.TestCompactor.executeStatementOnDriver; public class TestIcebergCompactorOnTez extends CompactorOnTezTest { - + + private static final String CAT_NAME = Warehouse.DEFAULT_CATALOG_NAME; private static final String DB_NAME = "default"; private static final String TABLE_NAME = "ice_orc"; - private static final String QUALIFIED_TABLE_NAME = TxnUtils.getFullTableName(DB_NAME, TABLE_NAME); + private static final String QUALIFIED_TABLE_NAME = TxnUtils.getFullTableName(CAT_NAME, DB_NAME, TABLE_NAME); @Override @Before diff --git a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java index 77b2579df7d6..ef212e1d01e5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java @@ -399,8 +399,8 @@ private void setValidWriteIds(ValidTxnWriteIdList txnWriteIds) { // conditions in ACID. This case is supported only for single source query. Operator source = driverContext.getPlan().getFetchTask().getWork().getSource(); if (source instanceof TableScanOperator tsOp) { - String fullTableName = AcidUtils.getFullTableName(tsOp.getConf().getDatabaseName(), - tsOp.getConf().getTableName()); + String fullTableName = AcidUtils.getFullTableName(tsOp.getConf().getCatalogName(), + tsOp.getConf().getDatabaseName(), tsOp.getConf().getTableName()); ValidWriteIdList writeIdList = txnWriteIds.getTableValidWriteIdList(fullTableName); if (tsOp.getConf().isTranscationalTable() && (writeIdList == null)) { throw new IllegalStateException(String.format( @@ -513,7 +513,7 @@ private void addTableFromEntity(Entity entity, Map tables) { default: return; } - String fullTableName = AcidUtils.getFullTableName(table.getDbName(), table.getTableName()); + String fullTableName = AcidUtils.getFullTableName(table.getCatName(), table.getDbName(), table.getTableName()); tables.put(fullTableName, table); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java b/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java index 82a3f190000e..f595787f8a33 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/HiveQueryLifeTimeHook.java @@ -98,6 +98,7 @@ private void checkAndRollbackCTAS(QueryLifeTimeHookContext ctx) { LOG.info("Performing cleanup as part of rollback: {}", table.getFullTableName().toString()); try { CompactionRequest request = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR); + request.setCatName(table.getCatName()); request.setRunas(TxnUtils.findUserToRunAs(tblPath.toString(), table.getTTable(), conf)); request.putToProperties(META_TABLE_LOCATION, tblPath.toString()); request.putToProperties(IF_PURGE, Boolean.toString(true)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java index 6fb7a499fd24..38ffad2b6832 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsAnalyzer.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.HiveParser; @@ -47,6 +48,7 @@ public ShowCompactionsAnalyzer(QueryState queryState) throws SemanticException { @Override public void analyzeInternal(ASTNode root) throws SemanticException { ctx.setResFile(ctx.getLocalTmpPath()); + String catName = null; String dbName = null; String tblName = null; String poolName = null; @@ -65,8 +67,13 @@ public void analyzeInternal(ASTNode root) throws SemanticException { case HiveParser.TOK_TABTYPE: tblName = child.getChild(0).getText(); if (child.getChild(0).getChildCount() == 2) { + catName = HiveUtils.getCurrentCatalogOrDefault(conf); dbName = child.getChild(0).getChild(0).getText(); tblName = child.getChild(0).getChild(1).getText(); + } else if (child.getChild(0).getChildCount() == 3) { + catName = child.getChild(0).getChild(0).getText(); + dbName = child.getChild(0).getChild(1).getText(); + tblName = child.getChild(0).getChild(2).getText(); } if (child.getChildCount() == 2) { ASTNode partitionSpecNode = (ASTNode) child.getChild(1); @@ -95,7 +102,10 @@ public void analyzeInternal(ASTNode root) throws SemanticException { dbName = stripQuotes(child.getText()); } } - ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile(), compactionId, dbName, tblName, poolName, compactionType, + if (catName == null) { + catName = HiveUtils.getCurrentCatalogOrDefault(conf); + } + ShowCompactionsDesc desc = new ShowCompactionsDesc(ctx.getResFile(), compactionId, catName, dbName, tblName, poolName, compactionType, compactionStatus, partitionSpec, limit, orderBy); Task task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); rootTasks.add(task); @@ -120,6 +130,7 @@ private String getDbColumnName(ASTNode expression) { private enum CompactionColumn { COMPACTIONID("\"CC_ID\""), + CATNAME("\"CC_CATALOG\""), DBNAME("\"CC_DATABASE\""), TABNAME("\"CC_TABLE\""), PARTNAME("\"CC_PARTITION\""), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java index 4e10800ec1bb..541600e3d91b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.process.show.compactions; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.ddl.DDLDesc; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -40,6 +41,7 @@ public class ShowCompactionsDesc implements DDLDesc, Serializable { private String resFile; private long compactionId; private final String poolName; + private final String catName; private final String dbName; private final String tbName; private final String compactionType; @@ -49,11 +51,13 @@ public class ShowCompactionsDesc implements DDLDesc, Serializable { private final String orderBy; - public ShowCompactionsDesc(Path resFile, long compactionId, String dbName, String tbName, String poolName, String compactionType, - String compactionStatus, Map partSpec, short limit, String orderBy) { + public ShowCompactionsDesc(Path resFile, long compactionId, String catName, String dbName, String tbName, + String poolName, String compactionType, String compactionStatus, + Map partSpec, short limit, String orderBy) { this.resFile = resFile.toString(); this.compactionId = compactionId; this.poolName = poolName; + this.catName = catName; this.dbName = dbName; this.tbName = tbName; this.compactionType = compactionType; @@ -74,6 +78,10 @@ public long getCompactionId() { public String getPoolName() { return poolName; } + @Explain(displayName = "catName", explainLevels = {Level.USER, Level.DEFAULT, Level.EXTENDED}) + public String getCatName() { + return catName; + } @Explain(displayName = "dbName", explainLevels = {Level.USER, Level.DEFAULT, Level.EXTENDED}) public String getDbName() { return dbName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java index cd8bd20416c8..ba1f59c0657d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.session.SessionState; @@ -79,6 +80,11 @@ public int execute() throws HiveException { private ShowCompactRequest getShowCompactioRequest(ShowCompactionsDesc desc) throws SemanticException { ShowCompactRequest request = new ShowCompactRequest(); + if(isBlank(desc.getCatName()) && isNotBlank(desc.getTbName())) { + request.setCatName(HiveUtils.getCurrentCatalogOrDefault(context.getConf())); + } else { + request.setCatName(desc.getCatName()); + } if (isBlank(desc.getDbName()) && isNotBlank(desc.getTbName())) { request.setDbName(SessionState.get().getCurrentDatabase()); } else { @@ -114,6 +120,8 @@ private ShowCompactRequest getShowCompactioRequest(ShowCompactionsDesc desc) thr private void writeHeader(DataOutputStream os) throws IOException { os.writeBytes("CompactionId"); os.write(Utilities.tabCode); + os.writeBytes("Catalog"); + os.write(Utilities.tabCode); os.writeBytes("Database"); os.write(Utilities.tabCode); os.writeBytes("Table"); @@ -157,6 +165,8 @@ private void writeHeader(DataOutputStream os) throws IOException { private void writeRow(DataOutputStream os, ShowCompactResponseElement e) throws IOException { os.writeBytes(Long.toString(e.getId())); os.write(Utilities.tabCode); + os.writeBytes(e.getCatName()); + os.write(Utilities.tabCode); os.writeBytes(e.getDbname()); os.write(Utilities.tabCode); os.writeBytes(e.getTablename()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java index 4d73782aaee4..caf34a02828a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java @@ -69,8 +69,8 @@ protected void analyzeCommand(TableName tableName, Map partition return; } - AlterTableAddPartitionDesc desc = new AlterTableAddPartitionDesc(table.getDbName(), table.getTableName(), - ifNotExists, partitions); + AlterTableAddPartitionDesc desc = new AlterTableAddPartitionDesc(table.getCatName(), table.getDbName(), + table.getTableName(), ifNotExists, partitions); Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)); rootTasks.add(ddlTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java index 231e3f14fcf5..5966d9072116 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java @@ -170,6 +170,7 @@ public long getWriteId() { } } + private final String catName; private final String dbName; private final String tableName; private final boolean ifNotExists; @@ -178,14 +179,20 @@ public long getWriteId() { private ReplicationSpec replicationSpec = null; // TODO: make replicationSpec final too - public AlterTableAddPartitionDesc(String dbName, String tableName, boolean ifNotExists, + public AlterTableAddPartitionDesc(String catName, String dbName, String tableName, boolean ifNotExists, List partitions) { + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.ifNotExists = ifNotExists; this.partitions = partitions; } + @Explain(displayName = "cat name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED}) + public String getCatName() { + return catName; + } + @Explain(displayName = "db name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getDbName() { return dbName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java index 571c8e196700..1e8373289eac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java @@ -94,6 +94,7 @@ else if (desc.getPartitionSpec() != null) { CompactionRequest compactionRequest = new CompactionRequest(table.getDbName(), table.getTableName(), compactionTypeStr2ThriftType(desc.getCompactionType())); + compactionRequest.setCatName(table.getCatName()); String poolName = ObjectUtils.defaultIfNull(desc.getPoolName(), CompactorUtil.getPoolName(context.getConf(), table.getTTable(), metadataCache)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewOperation.java index d8aa28158c03..8fa67431b5e1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/create/CreateMaterializedViewOperation.java @@ -71,7 +71,7 @@ public int execute() throws HiveException { sourceTables.add(context.getDb().getTable(tableName).createSourceTable()); } MaterializedViewMetadata metadata = new MaterializedViewMetadata( - MetaStoreUtils.getDefaultCatalog(context.getConf()), + tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), sourceTables, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/update/MaterializedViewUpdateOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/update/MaterializedViewUpdateOperation.java index a213e7ea5df9..51af4e7f9124 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/update/MaterializedViewUpdateOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/update/MaterializedViewUpdateOperation.java @@ -55,7 +55,7 @@ public int execute() throws HiveException { Table mvTable = context.getDb().getTable(desc.getName()); MaterializedViewMetadata newMetadata = mvTable.getMVMetadata().reset( getSnapshotOf(context, mvTable.getMVMetadata().getSourceTableNames())); - context.getDb().updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), newMetadata); + context.getDb().updateCreationMetadata(mvTable.getCatName(), mvTable.getDbName(), mvTable.getTableName(), newMetadata); mvTable.setMaterializedViewMetadata(newMetadata); HiveMaterializedViewsRegistry.get().refreshMaterializedView(context.getDb().getConf(), mvTable); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java index fd12731cbe76..78301cbd6843 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java @@ -120,7 +120,8 @@ public int execute() { " txnToWriteIdList: " +txnToWriteIdList.toString() + " table name: " + tableName); return 0; case REPL_WRITEID_STATE: - txnManager.replTableWriteIdState(work.getValidWriteIdList(), work.getDbName(), tableName, work.getPartNames()); + txnManager.replTableWriteIdState(work.getValidWriteIdList(), work.getCatName(), work.getDbName(), + tableName, work.getPartNames()); LOG.info("Replicated WriteId state for DbName: " + work.getDbName() + " TableName: " + tableName + " ValidWriteIdList: " + work.getValidWriteIdList()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 9287fd75e766..77e7bb5fd4fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -317,11 +317,11 @@ a database ( directory ) listing before providing the lower level listing. This is also required such that the dbTracker / tableTracker are setup correctly always. */ - TableContext tableContext = new TableContext(dbTracker, work.dbNameToLoadIn); + TableContext tableContext = new TableContext(dbTracker, work.catName, work.dbNameToLoadIn); FSTableEvent tableEvent = (FSTableEvent) next; if (TableType.VIRTUAL_VIEW.name().equals(tableEvent.getMetaData().getTable().getTableType())) { tableTracker = new TaskTracker(1); - tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.dbNameToLoadIn, conf, + tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.catName, work.dbNameToLoadIn, conf, (new Path(work.dumpDirectory).getParent()).toString(), work.getMetricCollector())); } else { LoadTable loadTable = new LoadTable(tableEvent, loadContext, iterator.replLogger(), tableContext, @@ -351,7 +351,7 @@ a database ( directory ) // for a table we explicitly try to load partitions as there is no separate partitions events. LoadPartitions loadPartitions = new LoadPartitions(loadContext, iterator.replLogger(), loadTaskTracker, tableEvent, - work.dbNameToLoadIn, tableContext, work.getMetricCollector(), work.tablesToBootstrap); + work.catName, work.dbNameToLoadIn, tableContext, work.getMetricCollector(), work.tablesToBootstrap); TaskTracker partitionsTracker = loadPartitions.tasks(); partitionsPostProcessing(iterator, scope, loadTaskTracker, tableTracker, partitionsTracker); @@ -488,11 +488,12 @@ private TaskTracker addLoadPartitionTasks(Context loadContext, BootstrapEvent ne BootstrapEventsIterator iterator, Scope scope, TaskTracker loadTaskTracker, TaskTracker tableTracker) throws Exception { PartitionEvent event = (PartitionEvent) next; - TableContext tableContext = new TableContext(dbTracker, work.dbNameToLoadIn); + TableContext tableContext = new TableContext(dbTracker, work.catName, work.dbNameToLoadIn); LoadPartitions loadPartitions = new LoadPartitions(loadContext, iterator.replLogger(), tableContext, loadTaskTracker, - event.asTableEvent(), work.dbNameToLoadIn, event.lastPartitionReplicated(), work.getMetricCollector(), - event.lastPartSpecReplicated(), event.lastStageReplicated(), getWork().tablesToBootstrap); + event.asTableEvent(), work.catName, work.dbNameToLoadIn, event.lastPartitionReplicated(), + work.getMetricCollector(), event.lastPartSpecReplicated(), event.lastStageReplicated(), + getWork().tablesToBootstrap); /* the tableTracker here should be a new instance and not an existing one as this can only happen when we break in between loading partitions. @@ -509,7 +510,7 @@ private TaskTracker addLoadConstraintsTasks(Context loadContext, TaskTracker dbTracker, Scope scope) throws IOException, SemanticException { LoadConstraint loadConstraint = - new LoadConstraint(loadContext, (ConstraintEvent) next, work.dbNameToLoadIn, dbTracker, + new LoadConstraint(loadContext, (ConstraintEvent) next, work.catName, work.dbNameToLoadIn, dbTracker, (new Path(work.dumpDirectory)).getParent().toString(), work.getMetricCollector()); TaskTracker constraintTracker = loadConstraint.tasks(); scope.rootTasks.addAll(constraintTracker.tasks()); @@ -520,7 +521,7 @@ private TaskTracker addLoadConstraintsTasks(Context loadContext, private TaskTracker addLoadFunctionTasks(Context loadContext, BootstrapEventsIterator iterator, BootstrapEvent next, TaskTracker dbTracker, Scope scope) throws IOException, SemanticException { LoadFunction loadFunction = new LoadFunction(loadContext, iterator.replLogger(), - (FunctionEvent) next, work.dbNameToLoadIn, dbTracker, (new Path(work.dumpDirectory)).getParent().toString(), + (FunctionEvent) next, work.catName, work.dbNameToLoadIn, dbTracker, (new Path(work.dumpDirectory)).getParent().toString(), work.getMetricCollector()); TaskTracker functionsTracker = loadFunction.tasks(); if (!scope.database) { @@ -532,12 +533,13 @@ private TaskTracker addLoadFunctionTasks(Context loadContext, BootstrapEventsIte return functionsTracker; } - public static Task createViewTask(MetaData metaData, String dbNameToLoadIn, HiveConf conf, + public static Task createViewTask(MetaData metaData, String catalogName, String dbNameToLoadIn, HiveConf conf, String dumpDirectory, ReplicationMetricCollector metricCollector) throws SemanticException { Table table = new Table(metaData.getTable()); + String catName = catalogName == null ? table.getCatName() : catalogName; String dbName = dbNameToLoadIn == null ? table.getDbName() : dbNameToLoadIn; - TableName tableName = HiveTableName.ofNullable(table.getTableName(), dbName); + TableName tableName = HiveTableName.ofNullable(catName, table.getTableName(), dbName, null); String dbDotView = tableName.getNotEmptyDbTable(); String viewOriginalText = table.getViewOriginalText(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index 2f9214f7072c..7720bd74fa66 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -75,6 +75,7 @@ public class ReplLoadWork implements Serializable, ReplLoadWorkMBean { private static final Logger LOG = LoggerFactory.getLogger(ReplLoadWork.class); private static boolean enableMBeansRegistrationForTests = false; public static boolean disableMbeanUnregistrationForTests = false; + final String catName; final String dbNameToLoadIn; final ReplScope currentReplScope; final String dumpDirectory; @@ -108,13 +109,14 @@ public class ReplLoadWork implements Serializable, ReplLoadWorkMBean { final LineageState sessionStateLineageState; public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, - String sourceDbName, String dbNameToLoadIn, ReplScope currentReplScope, + String sourceDbName, String catName, String dbNameToLoadIn, ReplScope currentReplScope, LineageState lineageState, boolean isIncrementalDump, Long eventTo, Long dumpExecutionId, ReplicationMetricCollector metricCollector, boolean replScopeModified) throws IOException, SemanticException { sessionStateLineageState = lineageState; this.dumpDirectory = dumpDirectory; + this.catName = catName; this.dbNameToLoadIn = dbNameToLoadIn; this.currentReplScope = currentReplScope; this.sourceDbName = sourceDbName; @@ -185,7 +187,7 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, this.constraintsIterator = null; } try { - incrementalLoadTasksBuilder = new IncrementalLoadTasksBuilder(dbNameToLoadIn, dumpDirectory, + incrementalLoadTasksBuilder = new IncrementalLoadTasksBuilder(catName, dbNameToLoadIn, dumpDirectory, new IncrementalLoadEventsIterator(dumpDirectory, hiveConf), hiveConf, eventTo, metricCollector, replStatsTracker, shouldFailover, tablesToBootstrap.size()); } catch (HiveException e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java index af5d013c4f4f..796636479706 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java @@ -26,7 +26,7 @@ import java.util.List; public interface TableEvent extends BootstrapEvent { - ImportTableDesc tableDesc(String dbName) throws SemanticException; + ImportTableDesc tableDesc(String catName, String dbName) throws SemanticException; List partitionDescriptions(ImportTableDesc tblDesc) throws SemanticException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java index c7705e603813..11777978e172 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java @@ -69,8 +69,8 @@ public TableEvent asTableEvent() { } @Override - public ImportTableDesc tableDesc(String dbName) throws SemanticException { - return tableEvent.tableDesc(dbName); + public ImportTableDesc tableDesc(String catName, String dbName) throws SemanticException { + return tableEvent.tableDesc(catName, dbName); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java index a141aa2c5381..4c00f203d5b1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java @@ -98,11 +98,12 @@ public MetaData getMetaData() { * and not {@link ImportTableDesc#tableType()} method. */ @Override - public ImportTableDesc tableDesc(String dbName) throws SemanticException { + public ImportTableDesc tableDesc(String catName, String dbName) throws SemanticException { try { Table table = new Table(metadata.getTable()); ImportTableDesc tableDesc - = new ImportTableDesc(StringUtils.isBlank(dbName) ? table.getDbName() : dbName, table); + = new ImportTableDesc(StringUtils.isBlank(catName) ? table.getCatName() : catName, + StringUtils.isBlank(dbName) ? table.getDbName() : dbName, table); if (TableType.EXTERNAL_TABLE.equals(table.getTableType())) { tableDesc.setLocation( table.getDataLocation() == null ? null : table.getDataLocation().toString()); @@ -178,8 +179,8 @@ private AlterTableAddPartitionDesc addPartitionDesc(Path fromPath, ImportTableDe sd.getNumBuckets(), sd.getCols(), sd.getSerdeInfo().getSerializationLib(), sd.getSerdeInfo().getParameters(), sd.getBucketCols(), sd.getSortCols(), columnStatistics, writeId); - AlterTableAddPartitionDesc addPartitionDesc = new AlterTableAddPartitionDesc(tblDesc.getDatabaseName(), - tblDesc.getTableName(), true, ImmutableList.of(partitionDesc)); + AlterTableAddPartitionDesc addPartitionDesc = new AlterTableAddPartitionDesc(tblDesc.getCatName(), + tblDesc.getDatabaseName(), tblDesc.getTableName(), true, ImmutableList.of(partitionDesc)); addPartitionDesc.setReplicationSpec(replicationSpec()); return addPartitionDesc; } catch (Exception e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java index b131b8ef0b3b..f1725368ca25 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java @@ -69,25 +69,28 @@ public class LoadConstraint { private static final Logger LOG = LoggerFactory.getLogger(LoadFunction.class); private Context context; private final ConstraintEvent event; + private final String catName; private final String dbNameToLoadIn; private final TaskTracker tracker; private final MessageDeserializer deserializer = JSONMessageEncoder.getInstance().getDeserializer(); String dumpDirectory; private transient ReplicationMetricCollector metricCollector; - public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoadIn, + public LoadConstraint(Context context, ConstraintEvent event, String catName, String dbNameToLoadIn, TaskTracker existingTracker) { this.context = context; this.event = event; + this.catName = catName; this.dbNameToLoadIn = dbNameToLoadIn; this.tracker = new TaskTracker(existingTracker); } - public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoadIn, + public LoadConstraint(Context context, ConstraintEvent event, String catName, String dbNameToLoadIn, TaskTracker existingTracker, String dumpDirectory, ReplicationMetricCollector metricCollector) { this.context = context; this.event = event; + this.catName = catName; this.dbNameToLoadIn = dbNameToLoadIn; this.tracker = new TaskTracker(existingTracker); this.dumpDirectory = dumpDirectory; @@ -117,7 +120,7 @@ public TaskTracker tasks() throws IOException, SemanticException { pkDumpMetaData.setPayload(pksString); tasks.addAll(pkHandler.handle( new MessageHandler.Context( - dbNameToLoadIn, fromPath.toString(), null, pkDumpMetaData, context.hiveConf, + catName, dbNameToLoadIn, fromPath.toString(), null, pkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } @@ -128,7 +131,7 @@ public TaskTracker tasks() throws IOException, SemanticException { ukDumpMetaData.setPayload(uksString); tasks.addAll(ukHandler.handle( new MessageHandler.Context( - dbNameToLoadIn, fromPath.toString(), null, ukDumpMetaData, context.hiveConf, + catName, dbNameToLoadIn, fromPath.toString(), null, ukDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } @@ -139,7 +142,7 @@ public TaskTracker tasks() throws IOException, SemanticException { nnDumpMetaData.setPayload(nnsString); tasks.addAll(nnHandler.handle( new MessageHandler.Context( - dbNameToLoadIn, fromPath.toString(), null, nnDumpMetaData, context.hiveConf, + catName, dbNameToLoadIn, fromPath.toString(), null, nnDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } @@ -150,7 +153,7 @@ public TaskTracker tasks() throws IOException, SemanticException { dkDumpMetaData.setPayload(dksString); tasks.addAll(dkHandler.handle( new MessageHandler.Context( - dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, + catName, dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } @@ -161,7 +164,7 @@ public TaskTracker tasks() throws IOException, SemanticException { dkDumpMetaData.setPayload(cksString); tasks.addAll(ckHandler.handle( new MessageHandler.Context( - dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, + catName, dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } @@ -172,7 +175,7 @@ public TaskTracker tasks() throws IOException, SemanticException { fkDumpMetaData.setPayload(fksString); tasks.addAll(fkHandler.handle( new MessageHandler.Context( - dbNameToLoadIn, fromPath.toString(), null, fkDumpMetaData, context.hiveConf, + catName, dbNameToLoadIn, fromPath.toString(), null, fkDumpMetaData, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector))); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java index 7350267c06a5..c7c0a24e73df 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java @@ -54,27 +54,31 @@ public class LoadFunction { private Context context; private ReplLogger replLogger; private final FunctionEvent event; + private final String catName; private final String dbNameToLoadIn; private final TaskTracker tracker; String dumpDirectory; private final ReplicationMetricCollector metricCollector; public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, - String dbNameToLoadIn, TaskTracker existingTracker, ReplicationMetricCollector metricCollector) { + String catName, String dbNameToLoadIn, TaskTracker existingTracker, + ReplicationMetricCollector metricCollector) { this.context = context; this.replLogger = replLogger; this.event = event; + this.catName = catName; this.dbNameToLoadIn = dbNameToLoadIn; this.tracker = new TaskTracker(existingTracker); this.metricCollector = metricCollector; } public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event, - String dbNameToLoadIn, TaskTracker existingTracker, + String catName, String dbNameToLoadIn, TaskTracker existingTracker, String dumpDirectory, ReplicationMetricCollector metricCollector) { this.context = context; this.replLogger = replLogger; this.event = event; + this.catName = catName; this.dbNameToLoadIn = dbNameToLoadIn; this.tracker = new TaskTracker(existingTracker); this.dumpDirectory = dumpDirectory; @@ -100,7 +104,7 @@ public TaskTracker tasks() throws IOException, SemanticException { CreateFunctionHandler handler = new CreateFunctionHandler(); List> tasks = handler.handle( new MessageHandler.Context( - dbNameToLoadIn, fromPath.toString(), null, null, context.hiveConf, + catName, dbNameToLoadIn, fromPath.toString(), null, null, context.hiveConf, context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector) ); createFunctionReplLogTask(tasks, handler.getFunctionName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index c3760aaeb976..6091ce3e3477 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -88,14 +88,15 @@ public class LoadPartitions { private final List tablesToBootstrap; private Table table; - public LoadPartitions(Context context, ReplLogger replLogger, TaskTracker tableTracker, TableEvent event, String dbNameToLoadIn, - TableContext tableContext, ReplicationMetricCollector metricCollector, List tablesToBootstrap) throws HiveException { - this(context, replLogger, tableContext, tableTracker, event, dbNameToLoadIn, null, metricCollector, null, + public LoadPartitions(Context context, ReplLogger replLogger, TaskTracker tableTracker, TableEvent event, String catName, + String dbNameToLoadIn, TableContext tableContext, ReplicationMetricCollector metricCollector, + List tablesToBootstrap) throws HiveException { + this(context, replLogger, tableContext, tableTracker, event, catName, dbNameToLoadIn, null, metricCollector, null, PartitionState.Stage.PARTITION, tablesToBootstrap); } public LoadPartitions(Context context, ReplLogger replLogger, TableContext tableContext, TaskTracker limiter, - TableEvent event, String dbNameToLoadIn, AlterTableAddPartitionDesc lastReplicatedPartition, + TableEvent event, String catName, String dbNameToLoadIn, AlterTableAddPartitionDesc lastReplicatedPartition, ReplicationMetricCollector metricCollector, AlterTableAddPartitionDesc.PartitionDesc lastReplicatedPartitionDesc, PartitionState.Stage lastReplicatedStage, List tablesToBootstrap) throws HiveException { this.tracker = new TaskTracker(limiter); @@ -104,7 +105,7 @@ public LoadPartitions(Context context, ReplLogger replLogger, TableContext table this.replLogger = replLogger; this.lastReplicatedPartition = lastReplicatedPartition; this.tableContext = tableContext; - this.tableDesc = event.tableDesc(dbNameToLoadIn); + this.tableDesc = event.tableDesc(catName, dbNameToLoadIn); this.table = ImportSemanticAnalyzer.tableIfExists(tableDesc, context.hiveDb); this.metricCollector = metricCollector; this.lastReplicatedPartitionDesc = lastReplicatedPartitionDesc; @@ -242,8 +243,8 @@ private void addConsolidatedPartitionDesc(AlterTableAddPartitionDesc lastAlterTa src.getSerdeParams(), src.getBucketCols(), src.getSortCols(), src.getColStats(), src.getWriteId())); } - AlterTableAddPartitionDesc consolidatedPartitionDesc = new AlterTableAddPartitionDesc(tableDesc.getDatabaseName(), - tableDesc.getTableName(), true, partitions); + AlterTableAddPartitionDesc consolidatedPartitionDesc = new AlterTableAddPartitionDesc(tableDesc.getCatName(), + tableDesc.getDatabaseName(), tableDesc.getTableName(), true, partitions); //don't need to add ckpt task separately. Added as part of add partition task addPartition((toPartitionCount < totalPartitionCount), consolidatedPartitionDesc); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 54ca6e02fc1a..55789fb5e075 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -89,10 +89,11 @@ public TaskTracker tasks(boolean isBootstrapDuringInc, boolean isSecondFailover) if (event.shouldNotReplicate()) { return tracker; } + String catName = tableContext.catName; String dbName = tableContext.dbNameToLoadIn; //this can never be null or empty; // Create table associated with the import // Executed if relevant, and used to contain all the other details about the table if not. - ImportTableDesc tableDesc = event.tableDesc(dbName); + ImportTableDesc tableDesc = event.tableDesc(catName, dbName); Table table = ImportSemanticAnalyzer.tableIfExists(tableDesc, context.hiveDb); if (isSecondFailover) { @@ -211,8 +212,8 @@ private void newTableTasks(ImportTableDesc tblDesc, Task tblRootTask, TableLo Task parentTask = createTableTask; if (replicationSpec.isTransactionalTableDump()) { List partNames = isPartitioned(tblDesc) ? event.partitions(tblDesc) : null; - ReplTxnWork replTxnWork = new ReplTxnWork(tblDesc.getDatabaseName(), tblDesc.getTableName(), partNames, - replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE, + ReplTxnWork replTxnWork = new ReplTxnWork(tblDesc.getCatName(), tblDesc.getDatabaseName(), tblDesc.getTableName(), + partNames, replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector); Task replTxnTask = TaskFactory.get(replTxnWork, context.hiveConf); parentTask.addDependentTask(replTxnTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/TableContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/TableContext.java index 16679db0c7fe..63bba141d86c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/TableContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/TableContext.java @@ -20,10 +20,12 @@ import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker; public class TableContext { + final String catName; final String dbNameToLoadIn; private final TaskTracker parentTracker; - public TableContext(TaskTracker parentTracker, String dbNameToLoadIn) { + public TableContext(TaskTracker parentTracker, String catName, String dbNameToLoadIn) { + this.catName = catName; this.dbNameToLoadIn = dbNameToLoadIn; this.parentTracker = parentTracker; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 00a3b600fd66..002213741e33 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -64,6 +64,7 @@ * Iterate through the dump directory and create tasks to load the events. */ public class IncrementalLoadTasksBuilder { + private final String catName; private final String dbName; private final IncrementalLoadEventsIterator iterator; private final HashSet inputs; @@ -82,9 +83,10 @@ public ReplLogger getReplLogger() { private final ReplicationMetricCollector metricCollector; private boolean shouldFailover; - public IncrementalLoadTasksBuilder(String dbName, String loadPath, IncrementalLoadEventsIterator iterator, + public IncrementalLoadTasksBuilder(String catName, String dbName, String loadPath, IncrementalLoadEventsIterator iterator, HiveConf conf, Long eventTo, ReplicationMetricCollector metricCollector, ReplStatsTracker replStatsTracker, boolean shouldFailover, int bootstrapTableSize) throws SemanticException { + this.catName = catName; this.dbName = dbName; dumpDirectory = (new Path(loadPath).getParent()).toString(); this.iterator = iterator; @@ -171,7 +173,7 @@ public Task build(Context context, Hive hive, Logger log, // Once this entire chain is generated, we add evTaskRoot to rootTasks, so as to execute the // entire chain - MessageHandler.Context mhContext = new MessageHandler.Context(dbName, location, + MessageHandler.Context mhContext = new MessageHandler.Context(catName, dbName, location, taskChainTail, eventDmd, conf, hive, context, this.log, dumpDirectory, metricCollector); List> evTasks = analyzeEventLoad(mhContext); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index c0e3b25001fc..14c869a7d484 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -2325,6 +2325,7 @@ public static void setValidWriteIdList(Configuration conf, ValidWriteIdList vali */ public static void setValidWriteIdList(Configuration conf, TableScanDesc tsDesc) { if (tsDesc.isTranscationalTable()) { + String catName = tsDesc.getCatalogName(); String dbName = tsDesc.getDatabaseName(); String tableName = tsDesc.getTableName(); ValidWriteIdList validWriteIdList = getTableValidWriteIdList(conf, @@ -2466,7 +2467,11 @@ public static ValidWriteIdList getTableValidWriteIdListWithTxnList( } public static String getFullTableName(String dbName, String tableName) { - return TableName.fromString(tableName, null, dbName).getNotEmptyDbTable().toLowerCase(); + return getFullTableName(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName); + } + + public static String getFullTableName(String catName, String dbName, String tableName) { + return TableName.fromString(tableName, catName, dbName, null).getQualified().toLowerCase(); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 3993dc5e8dfb..5e71e2948bc5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -672,8 +672,14 @@ public Map getReplayedTxnsForPolicy(String replPolicy) throws Lo @Override public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) throws LockException { + replTableWriteIdState(validWriteIdList, HiveUtils.getCurrentCatalogOrDefault(conf), dbName, tableName, partNames); + } + + @Override + public void replTableWriteIdState(String validWriteIdList, String catName, String dbName, String tableName, List partNames) + throws LockException { try { - getMS().replTableWriteIdState(validWriteIdList, dbName, tableName, partNames); + getMS().replTableWriteIdState(validWriteIdList, catName, dbName, tableName, partNames); } catch (TException e) { throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 3ca561707e1a..36d99edf6273 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -274,6 +274,12 @@ public void replTableWriteIdState(String validWriteIdList, String dbName, String // No-op } + @Override + public void replTableWriteIdState(String validWriteIdList, String catName, String dbName, String tableName, List partNames) + throws LockException { + // No-op + } + @Override public Map getReplayedTxnsForPolicy(String replPolicy) throws LockException { // No-op diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java index 75a7847b9824..a1ce3a7f169e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java @@ -103,9 +103,22 @@ public interface HiveTxnManager { * @param partNames List of partitions being written. * @throws LockException in case of failure. */ + @Deprecated void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) throws LockException; + /** + * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark. + * @param validWriteIdList Snapshot of writeid list when the table/partition is dumped. + * @param catName Catalog name + * @param dbName Database name + * @param tableName Table which is written. + * @param partNames List of partitions being written. + * @throws LockException in case of failure. + */ + void replTableWriteIdState(String validWriteIdList, String catName, String dbName, String tableName, List partNames) + throws LockException; + /** * Returns the transaction coordinator managed by this transaction manager. *

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 9beeb20f1c24..e94e077a63da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1096,10 +1096,10 @@ public void alterDataConnector(String dcName, DataConnector connector) } } - public void updateCreationMetadata(String dbName, String tableName, MaterializedViewMetadata metadata) + public void updateCreationMetadata(String catName, String dbName, String tableName, MaterializedViewMetadata metadata) throws HiveException { try { - getMSC().updateCreationMetadata(dbName, tableName, metadata.creationMetadata); + getMSC().updateCreationMetadata(catName, dbName, tableName, metadata.creationMetadata); } catch (TException e) { throw new HiveException("Unable to update creation metadata " + e.getMessage(), e); } @@ -3525,7 +3525,7 @@ public Map, Partition> loadDynamicPartitions(final LoadTable List partNames = result.values().stream().map(Partition::getName).collect(Collectors.toList()); getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, - tbl.getDbName(), tbl.getTableName(), partNames, + tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation)); } LOG.info("Loaded " + result.size() + "partitionsToAdd"); @@ -3986,6 +3986,7 @@ public static void addWriteNotificationLog(HiveConf conf, Table tbl, List getSourceTableFullNames() { private Set toFullTableNames(Collection sourceTables) { return unmodifiableSet(sourceTables.stream() - .map(sourceTable -> TableName.getDbTable( - sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName())) + .map(sourceTable -> TableName.getQualified( + sourceTable.getTable().getCatName(), sourceTable.getTable().getDbName(), + sourceTable.getTable().getTableName())) .collect(Collectors.toSet())); } @@ -96,8 +97,8 @@ public Collection getSourceTables() { */ public MaterializationSnapshot getSnapshot() { if (creationMetadata.getValidTxnList() == null || creationMetadata.getValidTxnList().isEmpty()) { - LOG.debug("Could not obtain materialization snapshot of materialized view {}.{}", - creationMetadata.getDbName(), creationMetadata.getTblName()); + LOG.debug("Could not obtain materialization snapshot of materialized view {}.{}.{}", + creationMetadata.getCatName(), creationMetadata.getDbName(), creationMetadata.getTblName()); return null; } return MaterializationSnapshot.fromJson(creationMetadata.getValidTxnList()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 986dcb7fcbbb..21c569f65181 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -522,14 +522,14 @@ public static TableName getQualifiedTableName(ASTNode tabNameNode, String catalo throw new SemanticException(ASTErrorUtils.getMsg( ErrorMsg.OBJECTNAME_CONTAINS_DOT.getMsg(), tabNameNode)); } - return HiveTableName.ofNullable(tableName, dbName); + return HiveTableName.ofNullable(catalogName, tableName, dbName, null); } final String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); if (tableName.contains(".")) { throw new SemanticException(ASTErrorUtils.getMsg( ErrorMsg.OBJECTNAME_CONTAINS_DOT.getMsg(), tabNameNode)); } - return HiveTableName.ofNullable(tableName); + return HiveTableName.ofNullable(catalogName, tableName, SessionState.get().getCurrentDatabase(), null); } /** @@ -2134,7 +2134,7 @@ protected String getFullTableNameForSQL(ASTNode n) throws SemanticException { switch (n.getType()) { case HiveParser.TOK_TABNAME: TableName tableName = getQualifiedTableName(n); - return HiveTableName.ofNullable(HiveUtils.unparseIdentifier(tableName.getTable(), this.conf), + return HiveTableName.ofNullable(HiveUtils.unparseIdentifier(tableName.getCat(), this.conf), HiveUtils.unparseIdentifier(tableName.getTable(), this.conf), HiveUtils.unparseIdentifier(tableName.getDb(), this.conf), tableName.getTableMetaRef()).getNotEmptyDbTable(); case HiveParser.TOK_TABREF: return getFullTableNameForSQL((ASTNode) n.getChild(0)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java index 20965b0b2ad5..e64af1a06546 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveTableName.java @@ -38,7 +38,7 @@ public HiveTableName(String catName, String dbName, String tableName) { * @throws SemanticException */ public static TableName of(Table table) throws SemanticException { - return ofNullable(table.getTableName(), table.getDbName(), table.getSnapshotRef()); + return ofNullable(table.getCatName(), table.getTableName(), table.getDbName(), table.getSnapshotRef()); } /** @@ -48,60 +48,19 @@ public static TableName of(Table table) throws SemanticException { * @throws SemanticException */ public static void setFrom(String dbTable, Table table) throws SemanticException{ - TableName name = ofNullable(dbTable); + TableName name = ofNullable(SessionState.get().getCurrentCatalog(), dbTable, + SessionState.get().getCurrentDatabase(), null); table.setTableName(name.getTable()); table.setDbName(name.getDb()); + table.setCatalogName(name.getCat()); } - /** - * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a - * {@link TableName}. All parts can be null. - * - * @param dbTableName - * @return a {@link TableName} - * @throws SemanticException - * @deprecated use {@link #of(String)} or {@link #fromString(String, String, String)} - */ - // to be @Deprecated - public static TableName ofNullable(String dbTableName) throws SemanticException { - return ofNullable(dbTableName, SessionState.get().getCurrentDatabase()); - } - - /** - * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a - * {@link TableName}. All parts can be null. This method won't try to find the default db based on the session state. - * - * @param dbTableName - * @return a {@link TableName} - * @throws SemanticException - * @deprecated use {@link #of(String)} or {@link #fromString(String, String, String)} - */ - // to be @Deprecated - public static TableName ofNullableWithNoDefault(String dbTableName) throws SemanticException { - return ofNullable(dbTableName, null); - } - - /** - * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a - * {@link TableName}. All parts can be null. - * - * @param dbTableName - * @param defaultDb - * @return a {@link TableName} - * @throws SemanticException - * @deprecated use {@link #of(String)} or {@link #fromString(String, String, String)} - */ - // to be @Deprecated - public static TableName ofNullable(String dbTableName, String defaultDb) throws SemanticException { - return ofNullable(dbTableName, defaultDb, null); - } - - public static TableName ofNullable(String dbTableName, String defaultDb, String tableMetaRef) throws SemanticException { + public static TableName ofNullable(String catName, String dbTableName, String defaultDb, String tableMetaRef) throws SemanticException { if (dbTableName == null) { return new TableName(null, null, null); } else { try { - return fromString(dbTableName, SessionState.get().getCurrentCatalog(), defaultDb, tableMetaRef); + return fromString(dbTableName, catName, defaultDb, tableMetaRef); } catch (IllegalArgumentException e) { throw new SemanticException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 4d4956fbec13..b3731d16a592 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -269,6 +269,7 @@ public static boolean prepareImport(boolean isImportCmd, replicationSpec.setReplSpecType(ReplicationSpec.Type.IMPORT); } + String catname = rv.getTable().getCatName(); String dbname = rv.getTable().getDbName(); if ((overrideDBName != null) && (!overrideDBName.isEmpty())) { // If the parsed statement contained a db.tablename specification, prefer that. @@ -280,7 +281,7 @@ public static boolean prepareImport(boolean isImportCmd, ImportTableDesc tblDesc; org.apache.hadoop.hive.metastore.api.Table tblObj = rv.getTable(); try { - tblDesc = getBaseCreateTableDescFromTable(dbname, tblObj); + tblDesc = getBaseCreateTableDescFromTable(catname, dbname, tblObj); } catch (Exception e) { throw new HiveException(e); } @@ -322,7 +323,7 @@ public static boolean prepareImport(boolean isImportCmd, for (Partition partition : partitions) { // TODO: this should ideally not create AddPartitionDesc per partition AlterTableAddPartitionDesc partsDesc = - getBaseAddPartitionDescFromPartition(fromPath, dbname, tblDesc, partition, + getBaseAddPartitionDescFromPartition(fromPath, catname, dbname, tblDesc, partition, replicationSpec, x.getConf()); partitionDescs.add(partsDesc); } @@ -390,7 +391,7 @@ public static boolean prepareImport(boolean isImportCmd, return tableExists; } - private static AlterTableAddPartitionDesc getBaseAddPartitionDescFromPartition(Path fromPath, String dbName, + private static AlterTableAddPartitionDesc getBaseAddPartitionDescFromPartition(Path fromPath, String catName, String dbName, ImportTableDesc tblDesc, Partition partition, ReplicationSpec replicationSpec, HiveConf conf) throws MetaException, SemanticException { Map partitionSpec = EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()); @@ -414,14 +415,14 @@ private static AlterTableAddPartitionDesc getBaseAddPartitionDescFromPartition(P partitionSpec, location, partition.getParameters(), sd.getInputFormat(), sd.getOutputFormat(), sd.getNumBuckets(), sd.getCols(), sd.getSerdeInfo().getSerializationLib(), sd.getSerdeInfo().getParameters(), sd.getBucketCols(), sd.getSortCols(), null, writeId); - return new AlterTableAddPartitionDesc(dbName, tblDesc.getTableName(), true, ImmutableList.of(partitionDesc)); + return new AlterTableAddPartitionDesc(catName, dbName, tblDesc.getTableName(), true, ImmutableList.of(partitionDesc)); } - private static ImportTableDesc getBaseCreateTableDescFromTable(String dbName, + private static ImportTableDesc getBaseCreateTableDescFromTable(String catName, String dbName, org.apache.hadoop.hive.metastore.api.Table tblObj) throws Exception { Table table = new Table(tblObj); - return new ImportTableDesc(dbName, table); + return new ImportTableDesc(catName, dbName, table); } private static Task loadTable(URI fromURI, ImportTableDesc tblDesc, boolean replace, Path tgtPath, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index a31be8bb2065..86a718cbe885 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -361,6 +361,7 @@ private void analyzeReplLoad(ASTNode ast) throws SemanticException { ReplicationMetricCollector metricCollector = initReplicationLoadMetricCollector(loadPath.toString(), replScope.getDbName(), dmd); ReplLoadWork replLoadWork = new ReplLoadWork(conf, loadPath.toString(), sourceDbNameOrPattern, + replScope.getCatName(), replScope.getDbName(), dmd.getReplScope(), queryState.getLineageState(), evDump, dmd.getEventTo(), dmd.getDumpExecutionId(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 5c4f049f0350..1de98a243c7a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -7431,8 +7431,8 @@ private Operator genMaterializedViewDataOrgPlan(List sortColInfos, L return result; } - private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException { - TableName qTableName = HiveTableName.ofNullable(tableName, dbName); + private void setStatsForNonNativeTable(String catName, String dbName, String tableName) throws SemanticException { + TableName qTableName = HiveTableName.ofNullable(catName, tableName, dbName, null); Map mapProp = new HashMap<>(); mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null); AlterTableUnsetPropertiesDesc alterTblDesc = new AlterTableUnsetPropertiesDesc(qTableName, null, null, false, @@ -7665,7 +7665,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } else { // This is a non-native table. // We need to set stats as inaccurate. - setStatsForNonNativeTable(destinationTable.getDbName(), destinationTable.getTableName()); + setStatsForNonNativeTable(destinationTable.getCatName(), destinationTable.getDbName(), destinationTable.getTableName()); // true if it is insert overwrite. boolean overwrite = !qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName(), destinationTable.getSnapshotRef()); @@ -8099,7 +8099,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } else { // This is a non-native table. // We need to set stats as inaccurate. - setStatsForNonNativeTable(tableDescriptor.getDbName(), tableDescriptor.getTableName()); + setStatsForNonNativeTable(tableDescriptor.getCatName(), tableDescriptor.getDbName(), + tableDescriptor.getTableName()); ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx.getPartSpec()); ltd.setInsertOverwrite(false); ltd.setLoadFileType(LoadFileType.KEEP_EXISTING); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java index 7ec56e818dec..7e8c2a8e56d6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java @@ -109,8 +109,10 @@ private void createDumpFileForTable(Context withinContext, org.apache.hadoop.hiv } private List getAllWriteEventInfo(Context withinContext) throws Exception { + String contextCatName = StringUtils.normalizeIdentifier(withinContext.replScope.getCatName()); String contextDbName = StringUtils.normalizeIdentifier(withinContext.replScope.getDbName()); GetAllWriteEventInfoRequest request = new GetAllWriteEventInfoRequest(eventMessage.getTxnId()); + request.setCatName(contextCatName); request.setDbName(contextDbName); List writeEventInfoList = withinContext.db.getMSC().getAllWriteEventInfo(request); return ((writeEventInfoList == null) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java index 6cd3965757ab..2a09d414186c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java @@ -58,7 +58,7 @@ public List> handle(Context context) } Task abortTxnTask = TaskFactory.get( - new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, null, + new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.catName, context.dbName, null, msg.getTxnId(), ReplTxnWork.OperationType.REPL_ABORT_TXN, context.eventOnlyReplicationSpec(), context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java index d62a6692bfc4..17f6f87b478c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java @@ -44,13 +44,14 @@ public List> handle(Context context) AllocWriteIdMessage msg = deserializer.getAllocWriteIdMessage(context.dmd.getPayload()); + String catName = (context.catName != null && !context.catName.isEmpty() ? context.catName : msg.getCat()); String dbName = (context.dbName != null && !context.dbName.isEmpty() ? context.dbName : msg.getDB()); // We need table name for alloc write id and that is received from source. String tableName = msg.getTableName(); // Repl policy should be created based on the table name in context. - ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), dbName, tableName, + ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), catName, dbName, tableName, ReplTxnWork.OperationType.REPL_ALLOC_WRITE_ID, msg.getTxnToWriteIdList(), context.eventOnlyReplicationSpec(), context.getDumpDirectory(), context.getMetricCollector()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java index 65329a8f0d31..1b9f1bfb50aa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java @@ -54,6 +54,7 @@ public List> handle(Context context) int numEntry = (msg.getTables() == null ? 0 : msg.getTables().size()); List> tasks = new ArrayList<>(); String dbName = context.dbName; + String catName = context.catName; String tableNamePrev = null; String tblName = null; @@ -68,7 +69,7 @@ public List> handle(Context context) context.getMetricCollector().setSrcTimeInProgress(msg.getTimestamp()); } - ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, + ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.catName, context.dbName, null, msg.getTxnId(), ReplTxnWork.OperationType.REPL_COMMIT_TXN, context.eventOnlyReplicationSpec(), context.getDumpDirectory(), context.getMetricCollector()); @@ -82,6 +83,7 @@ public List> handle(Context context) for (int idx = 0; idx < numEntry; idx++) { String actualTblName = msg.getTables().get(idx); String actualDBName = msg.getDatabases().get(idx); + String actualCatName = msg.getCatalogs().get(idx); String completeName = Table.getCompleteName(actualDBName, actualTblName); // One import task per table. Events for same table are kept together in one dump directory during dump and are @@ -90,9 +92,10 @@ public List> handle(Context context) // The data location is created by source, so the location should be formed based on the table name in msg. Path location = HiveUtils.getDumpPath(new Path(context.location), actualDBName, actualTblName); tblName = actualTblName; + catName = (context.isCatNameEmpty() ? actualCatName : context.catName); // for warehouse level dump, use db name from write event dbName = (context.isDbNameEmpty() ? actualDBName : context.dbName); - Context currentContext = new Context(context, dbName, + Context currentContext = new Context(context, catName, dbName, context.getDumpDirectory(), context.getMetricCollector()); currentContext.setLocation(location.toUri().toString()); @@ -108,6 +111,7 @@ public List> handle(Context context) try { WriteEventInfo writeEventInfo = new WriteEventInfo(msg.getWriteIds().get(idx), dbName, tblName, msg.getFiles(idx)); + writeEventInfo.setCatalog(catName); if (msg.getPartitions().get(idx) != null && !msg.getPartitions().get(idx).isEmpty()) { writeEventInfo.setPartition(msg.getPartitions().get(idx)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java index 70299f1b5d6a..14249b8b1c28 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java @@ -35,9 +35,10 @@ public class DropConstraintHandler extends AbstractMessageHandler { public List> handle(Context context) throws SemanticException { DropConstraintMessage msg = deserializer.getDropConstraintMessage(context.dmd.getPayload()); + final String actualCatName = context.isCatNameEmpty() ? msg.getCat() : context.catName; final String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; final String actualTblName = msg.getTable(); - final TableName tName = HiveTableName.ofNullable(actualTblName, actualDbName); + final TableName tName = HiveTableName.ofNullable(actualCatName, actualTblName, actualDbName, null); String constraintName = msg.getConstraint(); AlterTableDropConstraintDesc dropConstraintsDesc = diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index 448cb2f471aa..55967c1fc8f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -39,13 +39,15 @@ public List> handle(Context context) throws SemanticException { try { DropPartitionMessage msg = deserializer.getDropPartitionMessage(context.dmd.getPayload()); + String actualCatName = context.isCatNameEmpty() ? msg.getCat() : context.catName; String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; String actualTblName = msg.getTable(); Map> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions()); if (partSpecs.size() > 0) { AlterTableDropPartitionDesc dropPtnDesc = - new AlterTableDropPartitionDesc(HiveTableName.ofNullable(actualTblName, actualDbName), partSpecs, true, + new AlterTableDropPartitionDesc(HiveTableName.ofNullable(actualCatName, actualTblName, + actualDbName, null), partSpecs, true, context.eventOnlyReplicationSpec()); Task dropPtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java index 611c4863b88f..d7128f5cff5b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java @@ -55,7 +55,8 @@ public List> handle(Context withinContext) InsertMessage insertMessage = deserializer.getInsertMessage(withinContext.dmd.getPayload()); String actualDbName = withinContext.isDbNameEmpty() ? insertMessage.getDB() : withinContext.dbName; - Context currentContext = new Context(withinContext, actualDbName, + String actualCatName = withinContext.isCatNameEmpty() ? insertMessage.getCat() : withinContext.catName; + Context currentContext = new Context(withinContext, actualCatName, actualDbName, withinContext.getDumpDirectory(), withinContext.getMetricCollector()); // Piggybacking in Import logic for now diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java index 57d62b3a66cc..065916e2cf23 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java @@ -48,6 +48,7 @@ public interface MessageHandler { class Context { public String location; + public final String catName; public final String dbName; public final Task precursor; public DumpMetaData dmd; @@ -58,9 +59,10 @@ class Context { String dumpDirectory; private transient ReplicationMetricCollector metricCollector; - public Context(String dbName, String location, + public Context(String catName, String dbName, String location, Task precursor, DumpMetaData dmd, HiveConf hiveConf, Hive db, org.apache.hadoop.hive.ql.Context nestedContext, Logger log) { + this.catName = catName; this.dbName = dbName; this.location = location; this.precursor = precursor; @@ -71,10 +73,11 @@ public Context(String dbName, String location, this.log = log; } - public Context(String dbName, String location, + public Context(String catName, String dbName, String location, Task precursor, DumpMetaData dmd, HiveConf hiveConf, Hive db, org.apache.hadoop.hive.ql.Context nestedContext, Logger log, String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.catName = catName; this.dbName = dbName; this.location = location; this.precursor = precursor; @@ -87,7 +90,8 @@ public Context(String dbName, String location, this.metricCollector = metricCollector; } - public Context(Context other, String dbName) { + public Context(Context other, String catName, String dbName) { + this.catName = catName; this.dbName = dbName; this.location = other.location; this.precursor = other.precursor; @@ -98,7 +102,8 @@ public Context(Context other, String dbName) { this.log = other.log; } - public Context(Context other, String dbName, String dumpDirectory, ReplicationMetricCollector metricCollector) { + public Context(Context other, String catName, String dbName, String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.catName = catName; this.dbName = dbName; this.location = other.location; this.precursor = other.precursor; @@ -115,6 +120,10 @@ public boolean isDbNameEmpty() { return StringUtils.isEmpty(dbName); } + public boolean isCatNameEmpty() { + return StringUtils.isEmpty(catName); + } + /** * not sure why we have this, this should always be read from the _metadata file via the * {@link org.apache.hadoop.hive.ql.parse.repl.load.MetadataJson#readReplicationSpec} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java index dc61814b5b5e..77749a1f9efd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java @@ -43,7 +43,7 @@ public List> handle(Context context) OpenTxnMessage msg = deserializer.getOpenTxnMessage(context.dmd.getPayload()); Task openTxnTask = TaskFactory.get( - new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, null, + new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.catName, context.dbName, null, msg.getTxnIds(), ReplTxnWork.OperationType.REPL_OPEN_TXN, context.eventOnlyReplicationSpec(), context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java index 59188cd71477..c8e3992864e5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java @@ -68,7 +68,7 @@ public List> handle(Context context) throws SemanticException { // REPL LOAD is not partition level. It is always DB or table level. So, passing null for partition specs. if (TableType.VIRTUAL_VIEW.name().equals(rv.getTable().getTableType())) { - importTasks.add(ReplLoadTask.createViewTask(rv, context.dbName, context.hiveConf, + importTasks.add(ReplLoadTask.createViewTask(rv, context.catName, context.dbName, context.hiveConf, context.getDumpDirectory(), context.getMetricCollector())); } else { ImportSemanticAnalyzer.prepareImport(false, isLocationSet, isExternal, false, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 708e242694bc..a1fae4f3b2f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -45,16 +45,18 @@ * */ public class ImportTableDesc { + private String catName = null; private String dbName = null; private CreateTableDesc createTblDesc = null; - public ImportTableDesc(String dbName, Table table) throws Exception { + public ImportTableDesc(String catName, String dbName, Table table) throws Exception { if (table.getTableType() == TableType.VIRTUAL_VIEW || table.getTableType() == TableType.MATERIALIZED_VIEW) { throw new IllegalStateException("Trying to import view or materialized view: " + table.getTableName()); } + this.catName = catName; this.dbName = dbName; - TableName tableName = HiveTableName.ofNullable(table.getTableName(), dbName); + TableName tableName = HiveTableName.ofNullable(catName, table.getTableName(), dbName, null); this.createTblDesc = new CreateTableDesc(tableName, false, // isExternal: set to false here, can be overwritten by the IMPORT stmt @@ -114,6 +116,10 @@ public String getTableName() throws SemanticException { return createTblDesc.getFullTableName().getTable(); } + public String getCatName() { + return createTblDesc.getFullTableName().getCat(); + } + public List getPartCols() { return createTblDesc.getPartCols(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java index 2d2dc3f7cf08..35230becd2a9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java @@ -38,6 +38,7 @@ public class ReplTxnWork implements Serializable { private static final long serialVersionUID = 1L; private String replPolicy; + private String catName; private String dbName; private String tableName; private List partNames; @@ -61,9 +62,10 @@ public enum OperationType { OperationType operation; - public ReplTxnWork(String replPolicy, String dbName, String tableName, List txnIds, OperationType type, + public ReplTxnWork(String replPolicy, String catName, String dbName, String tableName, List txnIds, OperationType type, List txnToWriteIdList, ReplicationSpec replicationSpec) { this.txnIds = txnIds; + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.operation = type; @@ -74,45 +76,46 @@ public ReplTxnWork(String replPolicy, String dbName, String tableName, List txnIds, OperationType type, + public ReplTxnWork(String replPolicy, String catName, String dbName, String tableName, List txnIds, OperationType type, ReplicationSpec replicationSpec) { - this(replPolicy, dbName, tableName, txnIds, type, null, replicationSpec); + this(replPolicy, catName, dbName, tableName, txnIds, type, null, replicationSpec); } - public ReplTxnWork(String replPolicy, String dbName, String tableName, List txnIds, OperationType type, + public ReplTxnWork(String replPolicy, String catName, String dbName, String tableName, List txnIds, OperationType type, ReplicationSpec replicationSpec, String dumpDirectory, ReplicationMetricCollector metricCollector) { - this(replPolicy, dbName, tableName, txnIds, type, null, replicationSpec); + this(replPolicy, catName, dbName, tableName, txnIds, type, null, replicationSpec); this.dumpDirectory = dumpDirectory; this.metricCollector = metricCollector; } - public ReplTxnWork(String replPolicy, String dbName, String tableName, Long txnId, + public ReplTxnWork(String replPolicy, String catName, String dbName, String tableName, Long txnId, OperationType type, ReplicationSpec replicationSpec) { - this(replPolicy, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec); + this(replPolicy, catName, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec); } - public ReplTxnWork(String replPolicy, String dbName, String tableName, Long txnId, + public ReplTxnWork(String replPolicy, String catName, String dbName, String tableName, Long txnId, OperationType type, ReplicationSpec replicationSpec, String dumpDirectory, ReplicationMetricCollector metricCollector) { - this(replPolicy, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec); + this(replPolicy, catName, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec); this.dumpDirectory = dumpDirectory; this.metricCollector = metricCollector; } - public ReplTxnWork(String replPolicy, String dbName, String tableName, OperationType type, + public ReplTxnWork(String replPolicy, String catName, String dbName, String tableName, OperationType type, List txnToWriteIdList, ReplicationSpec replicationSpec) { - this(replPolicy, dbName, tableName, null, type, txnToWriteIdList, replicationSpec); + this(replPolicy, catName, dbName, tableName, null, type, txnToWriteIdList, replicationSpec); } - public ReplTxnWork(String replPolicy, String dbName, String tableName, OperationType type, + public ReplTxnWork(String replPolicy, String catName, String dbName, String tableName, OperationType type, List txnToWriteIdList, ReplicationSpec replicationSpec, String dumpDirectory, ReplicationMetricCollector metricCollector) { - this(replPolicy, dbName, tableName, null, type, txnToWriteIdList, replicationSpec); + this(replPolicy, catName, dbName, tableName, null, type, txnToWriteIdList, replicationSpec); this.dumpDirectory = dumpDirectory; this.metricCollector = metricCollector; } - public ReplTxnWork(String dbName, String tableName, List partNames, + public ReplTxnWork(String catName, String dbName, String tableName, List partNames, String validWriteIdList, OperationType type) { + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.partNames = partNames; @@ -120,9 +123,10 @@ public ReplTxnWork(String dbName, String tableName, List partNames, this.operation = type; } - public ReplTxnWork(String dbName, String tableName, List partNames, + public ReplTxnWork(String catName, String dbName, String tableName, List partNames, String validWriteIdList, OperationType type, String dumpDirectory, ReplicationMetricCollector metricCollector) { + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.partNames = partNames; @@ -143,6 +147,10 @@ public List getTxnIds() { return txnIds; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java index 58ce207c0c6c..33405932f79c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java @@ -177,6 +177,11 @@ public String getDbName() { return properties.getProperty(hive_metastoreConstants.META_TABLE_DB); } + @Explain(displayName = "catalog name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getCatName() { + return properties.getProperty(hive_metastoreConstants.META_TABLE_CAT); + } + @Explain(displayName = "input format") public String getInputFileFormatClassName() { return getInputFileFormatClass().getName(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index 60b14fa90f75..05f6bdce8ef5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -130,6 +130,7 @@ public class TableScanDesc extends AbstractOperatorDesc implements IStatsGatherD // input file name (big) to bucket number private Map bucketFileNameMapping; + private String catName = null; private String dbName = null; private String tableName = null; @@ -182,6 +183,7 @@ public TableScanDesc(final String alias, List vcs, Table tblMetad this.tableMetadata = tblMetadata; if (tblMetadata != null) { + catName = tblMetadata.getCatName(); dbName = tblMetadata.getDbName(); tableName = tblMetadata.getTableName(); numBuckets = tblMetadata.getNumBuckets(); @@ -226,6 +228,11 @@ public String getDatabaseName() { return this.dbName; } + @Explain(displayName = "catalog", jsonOnly = true) + public String getCatalogName() { + return this.catName; + } + @Explain(displayName = "columns", jsonOnly = true) public List getColumnNamesForExplain() { return this.neededColumns; @@ -465,7 +472,7 @@ public boolean getIsMetadataOnly() { @Signature public String getQualifiedTable() { - return dbName + "." + tableName; + return catName + "." + dbName + "." + tableName; } public Table getTableMetadata() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index 56f40f860d0a..2c5d4ff332c8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -519,9 +519,8 @@ private List getTablesToCheck() throws MetaException, NoSuchObjectExc private ValidReaderWriteIdList getWriteIds( TableName fullTableName) throws NoSuchTxnException, MetaException { - // TODO: acid utils don't support catalogs GetValidWriteIdsRequest req = new GetValidWriteIdsRequest( - Lists.newArrayList(fullTableName.getDbTable())); + Lists.newArrayList(fullTableName.getQualified())); return TxnCommonUtils.createValidReaderWriteIdList( txnHandler.getValidWriteIds(req).getTblValidWriteIds().get(0)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java index f0953d385b55..a763bc1748a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java @@ -338,6 +338,7 @@ public static LockRequest createLockRequest(HiveConf conf, CompactionInfo ci, lo private static CompactionResponse requestCompaction(CompactionInfo ci, String runAs, String hostname, TxnStore txnHandler) throws MetaException { CompactionRequest compactionRequest = new CompactionRequest(ci.dbname, ci.tableName, ci.type); + compactionRequest.setCatName(ci.catName); if (ci.partName != null) compactionRequest.setPartitionname(ci.partName); compactionRequest.setRunas(runAs); @@ -463,8 +464,8 @@ private static CompactionType checkForCompaction(final CompactionInfo ci, final deltaSizes.put(delta.getPath(), getDirSize(fs, delta)); } long deltaSize = deltaSizes.values().stream().reduce(0L, Long::sum); - AcidMetricService.updateMetricsFromInitiator(ci.dbname, ci.tableName, ci.partName, conf, txnHandler, baseSize, - deltaSizes, acidDirectory.getObsolete()); + AcidMetricService.updateMetricsFromInitiator(ci.catName, ci.dbname, ci.tableName, ci.partName, + conf, txnHandler, baseSize, deltaSizes, acidDirectory.getObsolete()); if (runJobAsSelf(runAs)) { return determineCompactionType(ci, acidDirectory, tblProperties, baseSize, deltaSize, conf); @@ -492,7 +493,7 @@ private static ValidWriteIdList resolveValidWriteIds(Table t, TxnStore txnHandle throws NoSuchTxnException, MetaException { ValidTxnList validTxnList = ValidReadTxnList.fromValue(conf.get(ValidTxnList.VALID_TXNS_KEY)); // The response will have one entry per table and hence we get only one ValidWriteIdList - String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + String fullTableName = TxnUtils.getFullTableName(t.getCatName(), t.getDbName(), t.getTableName()); GetValidWriteIdsRequest validWriteIdsRequest = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName)); validWriteIdsRequest.setValidTxnList(validTxnList.writeToString()); @@ -538,7 +539,7 @@ public static CompactionResponse initiateCompactionForPartition(Table table, Par conf.set(ValidTxnList.VALID_TXNS_KEY, validTxnList.writeToString()); CompactionResponse compactionResponse; CompactionInfo compactionInfo = - new CompactionInfo(table.getDbName(), table.getTableName(), compactionRequest.getPartitionname(), + new CompactionInfo(table.getCatName(), table.getDbName(), table.getTableName(), compactionRequest.getPartitionname(), compactionRequest.getType()); compactionInfo.initiatorId = compactionRequest.getInitiatorId(); compactionInfo.orderByClause = compactionRequest.getOrderByClause(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index 5dc40b2ba9fc..436635bb2c58 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -196,7 +196,7 @@ protected boolean isCacheEnabled() { protected String resolveUserToRunAs(Map cache, Table t, Partition p) throws IOException, InterruptedException { //Figure out who we should run the file operations as - String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); + String fullTableName = TxnUtils.getFullTableName(t.getCatName(), t.getDbName(), t.getTableName()); StorageDescriptor sd = CompactorUtil.resolveStorageDescriptor(t, p); String user = cache.get(fullTableName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java index 16603f72615f..b6b3b6222ff9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java @@ -111,7 +111,7 @@ protected Partition resolvePartition(String dbName, String tableName, String par protected ValidReaderWriteIdList getValidCleanerWriteIdList(CompactionInfo info, ValidTxnList validTxnList) throws Exception { List tblNames = Collections.singletonList( - TxnUtils.getFullTableName(info.dbname, info.tableName)); + TxnUtils.getFullTableName(info.catName, info.dbname, info.tableName)); GetValidWriteIdsRequest request = new GetValidWriteIdsRequest(tblNames); request.setValidTxnList(validTxnList.writeToString()); @@ -157,7 +157,7 @@ fs, path, getConf(), validWriteIdList, Ref.from(false), false, if (!deleted.isEmpty()) { AcidMetricService.updateMetricsFromCleaner( - info.dbname, info.tableName, info.partName, dir.getObsolete(), getConf(), + info.catName, info.dbname, info.tableName, info.partName, dir.getObsolete(), getConf(), txnHandler); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/AcidCompactionService.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/AcidCompactionService.java index ab1581c836e5..802af9f5df55 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/AcidCompactionService.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/AcidCompactionService.java @@ -141,7 +141,7 @@ public Boolean compact(Table table, CompactionInfo ci) throws Exception { } } - String fullTableName = TxnUtils.getFullTableName(table.getDbName(), table.getTableName()); + String fullTableName = TxnUtils.getFullTableName(table.getCatName(), table.getDbName(), table.getTableName()); // Find the partition we will be working with, if there is one. Partition p; @@ -257,7 +257,7 @@ public Boolean compact(Table table, CompactionInfo ci) throws Exception { msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); compactionTxn.wasSuccessful(); - AcidMetricService.updateMetricsFromWorker(ci.dbname, ci.tableName, ci.partName, ci.type, + AcidMetricService.updateMetricsFromWorker(ci.catName, ci.dbname, ci.tableName, ci.partName, ci.type, dir.getCurrentDirectories().size(), dir.getDeleteDeltas().size(), conf, msc); } catch (Throwable e) { computeStats = false; @@ -359,7 +359,7 @@ void open(CompactionInfo ci) throws TException { lockId = res.getLockid(); CompactionHeartbeatService.getInstance(conf).startHeartbeat(txnId, lockId, - TxnUtils.getFullTableName(ci.dbname, ci.tableName)); + TxnUtils.getFullTableName(ci.catName, ci.dbname, ci.tableName)); } private LockRequest createLockRequest(CompactionInfo ci) { diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java index 0dcbced33abe..83200c38ed49 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java @@ -96,6 +96,7 @@ public TestCompactionTxnHandler() throws Exception { @Test public void testFindNextToCompact() throws Exception { CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); long now = System.currentTimeMillis(); @@ -114,6 +115,7 @@ public void testFindNextToCompact() throws Exception { List compacts = rsp.getCompacts(); assertEquals(1, compacts.size()); ShowCompactResponseElement c = compacts.get(0); + assertEquals(Warehouse.DEFAULT_CATALOG_NAME, c.getCatName()); assertEquals("foo", c.getDbname()); assertEquals("bar", c.getTablename()); assertEquals("ds=today", c.getPartitionname()); @@ -127,10 +129,12 @@ public void testFindNextToCompact() throws Exception { @Test public void testFindNextToCompact2() throws Exception { CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=yesterday"); txnHandler.compact(rqst); @@ -173,6 +177,7 @@ public void testFindNextToCompactNothingToCompact() throws Exception { @Test public void testMarkCompacted() throws Exception { CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); CompactionInfo ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION)); @@ -187,6 +192,7 @@ public void testMarkCompacted() throws Exception { List compacts = rsp.getCompacts(); assertEquals(1, compacts.size()); ShowCompactResponseElement c = compacts.get(0); + assertEquals(Warehouse.DEFAULT_CATALOG_NAME, c.getCatName()); assertEquals("foo", c.getDbname()); assertEquals("bar", c.getTablename()); assertEquals("ds=today", c.getPartitionname()); @@ -198,6 +204,7 @@ public void testMarkCompacted() throws Exception { @Test public void testFindNextToClean() throws Exception { CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); assertEquals(0, txnHandler.findReadyToClean(0, 0).size()); @@ -219,6 +226,7 @@ public void testFindNextToClean() throws Exception { List compacts = rsp.getCompacts(); assertEquals(1, compacts.size()); ShowCompactResponseElement c = compacts.get(0); + assertEquals(Warehouse.DEFAULT_CATALOG_NAME, c.getCatName()); assertEquals("foo", c.getDbname()); assertEquals("bar", c.getTablename()); assertEquals("ds=today", c.getPartitionname()); @@ -230,6 +238,7 @@ public void testFindNextToClean() throws Exception { @Test public void testMarkCleaned() throws Exception { CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); assertEquals(0, txnHandler.findReadyToClean(0, 0).size()); @@ -261,6 +270,7 @@ public void testShowCompactions() throws Exception { final String tableName = "bar"; final String partitionName = "ds=today"; CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partitionName); txnHandler.compact(rqst); ShowCompactResponse showCompactResponse = txnHandler.showCompact(new ShowCompactRequest()); @@ -279,9 +289,10 @@ public void testGetLatestCommittedCompaction() throws Exception { final String dbName = "foo"; final String tableName = "bar"; final String errorMessage = "Dummy error"; - addSucceededCompaction(dbName, tableName, null, CompactionType.MINOR); - addFailedCompaction(dbName, tableName, CompactionType.MINOR, null, errorMessage); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, null, CompactionType.MINOR); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, null, errorMessage); GetLatestCommittedCompactionInfoRequest rqst = new GetLatestCommittedCompactionInfoRequest(); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setDbname(dbName); rqst.setTablename(tableName); GetLatestCommittedCompactionInfoResponse response = txnHandler.getLatestCommittedCompactionInfo(rqst); @@ -312,9 +323,10 @@ public void testGetLatestCommittedCompactionPartition() throws Exception { final String tableName = "bar"; final String partitionName = "ds=today"; final String errorMessage = "Dummy error"; - addSucceededCompaction(dbName, tableName, partitionName, CompactionType.MINOR); - addFailedCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partitionName, CompactionType.MINOR); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); GetLatestCommittedCompactionInfoRequest rqst = new GetLatestCommittedCompactionInfoRequest(); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setDbname(dbName); rqst.setTablename(tableName); rqst.addToPartitionnames(partitionName); @@ -328,7 +340,7 @@ public void testGetLatestCommittedCompactionPartition() throws Exception { assertEquals(CompactionType.MINOR, lci.getType()); final String anotherPartitionName = "ds=yesterday"; - addWaitingForCleaningCompaction(dbName, tableName, CompactionType.MINOR, anotherPartitionName, errorMessage); + addWaitingForCleaningCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, anotherPartitionName, errorMessage); rqst.addToPartitionnames(anotherPartitionName); response = txnHandler.getLatestCommittedCompactionInfo(rqst); @@ -360,9 +372,10 @@ public void testGetLatestSucceededCompaction() throws Exception { final String tableName = "bar"; final String partitionName = "ds=today"; final String errorMessage = "Dummy error"; - addSucceededCompaction(dbName, tableName, partitionName, CompactionType.MINOR); - addSucceededCompaction(dbName, tableName, partitionName, CompactionType.MINOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partitionName, CompactionType.MINOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partitionName, CompactionType.MINOR); GetLatestCommittedCompactionInfoRequest rqst = new GetLatestCommittedCompactionInfoRequest(); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setDbname(dbName); rqst.setTablename(tableName); rqst.addToPartitionnames(partitionName); @@ -376,7 +389,7 @@ public void testGetLatestSucceededCompaction() throws Exception { assertEquals(partitionName, lci.getPartitionname()); assertEquals(CompactionType.MINOR, lci.getType()); - addWaitingForCleaningCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); + addWaitingForCleaningCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); response = txnHandler.getLatestCommittedCompactionInfo(rqst); assertNotNull(response); @@ -392,9 +405,10 @@ public void testGetLatestCompactionWithIdFilter() throws Exception { final String dbName = "foo"; final String tableName = "bar"; final String partitionName = "ds=today"; - addSucceededCompaction(dbName, tableName, partitionName, CompactionType.MINOR); - addSucceededCompaction(dbName, tableName, partitionName, CompactionType.MINOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partitionName, CompactionType.MINOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partitionName, CompactionType.MINOR); GetLatestCommittedCompactionInfoRequest rqst = new GetLatestCommittedCompactionInfoRequest(); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setDbname(dbName); rqst.setTablename(tableName); rqst.addToPartitionnames(partitionName); @@ -430,13 +444,14 @@ public void testGetNoCompaction() throws Exception { final String tableName = "bar"; final String errorMessage = "Dummy error"; GetLatestCommittedCompactionInfoRequest rqst = new GetLatestCommittedCompactionInfoRequest(); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setDbname(dbName); rqst.setTablename(tableName); GetLatestCommittedCompactionInfoResponse response = txnHandler.getLatestCommittedCompactionInfo(rqst); assertEquals(0, response.getCompactionsSize()); - addFailedCompaction(dbName, tableName, CompactionType.MINOR, null, errorMessage); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, null, errorMessage); response = txnHandler.getLatestCommittedCompactionInfo(rqst); assertEquals(0, response.getCompactionsSize()); @@ -451,6 +466,7 @@ public void testMarkFailed() throws Exception { final String status = "failed"; final String errorMessage = "Dummy error"; CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partitionName); txnHandler.compact(rqst); assertEquals(0, txnHandler.findReadyToClean(0, 0).size()); @@ -480,7 +496,7 @@ public void testMarkFailed() throws Exception { // Add more failed compactions so that the total is exactly COMPACTOR_INITIATOR_FAILED_THRESHOLD for (int i = 1; i < MetastoreConf.getIntVar(conf, COMPACTOR_INITIATOR_FAILED_THRESHOLD); i++) { - addFailedCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); } // Now checkFailedCompactions() will return true assertTrue(txnHandler.checkFailedCompactions(ci)); @@ -489,22 +505,23 @@ public void testMarkFailed() throws Exception { assertFalse(txnHandler.checkFailedCompactions(ci)); MetastoreConf.setTimeVar(conf, COMPACTOR_INITIATOR_FAILED_RETRY_TIME, 7, TimeUnit.DAYS); // Check the output of show compactions - checkShowCompaction(dbName, tableName, partitionName, status, errorMessage); + checkShowCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partitionName, status, errorMessage); // Now add enough failed compactions to ensure purgeCompactionHistory() will attempt delete; // HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE is enough for this. // But we also want enough to tickle the code in TxnUtils.buildQueryWithINClauseStrings() // so that it produces multiple queries. For that we need at least 290. for (int i = 0 ; i < 300; i++) { - addFailedCompaction(dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, partitionName, errorMessage); } - checkShowCompaction(dbName, tableName, partitionName, status, errorMessage); + checkShowCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partitionName, status, errorMessage); txnHandler.purgeCompactionHistory(); } - private void checkShowCompaction(String dbName, String tableName, String partition, + private void checkShowCompaction(String catName, String dbName, String tableName, String partition, String status, String errorMessage) throws MetaException { ShowCompactResponse showCompactResponse = txnHandler.showCompact(new ShowCompactRequest()); showCompactResponse.getCompacts().forEach(e -> { + assertEquals(catName, e.getCatName()); assertEquals(dbName, e.getDbname()); assertEquals(tableName, e.getTablename()); assertEquals(partition, e.getPartitionname()); @@ -513,11 +530,12 @@ private void checkShowCompaction(String dbName, String tableName, String partiti }); } - private void addFailedCompaction(String dbName, String tableName, CompactionType type, + private void addFailedCompaction(String catName, String dbName, String tableName, CompactionType type, String partitionName, String errorMessage) throws MetaException { CompactionRequest rqst; CompactionInfo ci; rqst = new CompactionRequest(dbName, tableName, type); + rqst.setCatName(catName); if (partitionName != null) { rqst.setPartitionname(partitionName); } @@ -528,8 +546,9 @@ private void addFailedCompaction(String dbName, String tableName, CompactionType txnHandler.markFailed(ci); } - private void addSucceededCompaction(String dbName, String tableName, String partitionName, CompactionType type) throws MetaException { + private void addSucceededCompaction(String catName, String dbName, String tableName, String partitionName, CompactionType type) throws MetaException { CompactionRequest rqst = new CompactionRequest(dbName, tableName, type); + rqst.setCatName(catName); CompactionInfo ci; if (partitionName != null) { rqst.setPartitionname(partitionName); @@ -540,9 +559,10 @@ private void addSucceededCompaction(String dbName, String tableName, String part txnHandler.markCleaned(ci); } - private void addWaitingForCleaningCompaction(String dbName, String tableName, CompactionType type, + private void addWaitingForCleaningCompaction(String catName, String dbName, String tableName, CompactionType type, String partitionName, String errorMessage) throws MetaException { CompactionRequest rqst = new CompactionRequest(dbName, tableName, type); + rqst.setCatName(catName); CompactionInfo ci; if (partitionName != null) { rqst.setPartitionname(partitionName); @@ -554,17 +574,17 @@ private void addWaitingForCleaningCompaction(String dbName, String tableName, Co txnHandler.markCompacted(ci); } - private void addDidNotInitiateCompaction(String dbName, String tableName, String partitionName, + private void addDidNotInitiateCompaction(String catName, String dbName, String tableName, String partitionName, CompactionType type, String errorMessage) throws MetaException { - CompactionInfo ci = new CompactionInfo(dbName, tableName, partitionName, type); + CompactionInfo ci = new CompactionInfo(catName, dbName, tableName, partitionName, type); ci.errorMessage = errorMessage; ci.id = 0; txnHandler.markFailed(ci); } - private void addRefusedCompaction(String dbName, String tableName, String partitionName, + private void addRefusedCompaction(String catName, String dbName, String tableName, String partitionName, CompactionType type, String errorMessage) throws MetaException { - CompactionInfo ci = new CompactionInfo(dbName, tableName, partitionName, type); + CompactionInfo ci = new CompactionInfo(catName, dbName, tableName, partitionName, type); ci.errorMessage = errorMessage; ci.id = 0; txnHandler.markRefused(ci); @@ -579,43 +599,44 @@ public void testPurgeCompactionHistory() throws Exception { MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_REFUSED, 2); txnHandler.setConf(conf); + String catName = Warehouse.DEFAULT_CATALOG_NAME; String dbName = "default"; String tableName = "tpch"; String part1 = "(p=1)"; String part2 = "(p=2)"; // 3 successful compactions on p=1 - addSucceededCompaction(dbName, tableName, part1, CompactionType.MAJOR); - addSucceededCompaction(dbName, tableName, part1, CompactionType.MAJOR); - addSucceededCompaction(dbName, tableName, part1, CompactionType.MAJOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, CompactionType.MAJOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, CompactionType.MAJOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, CompactionType.MAJOR); // 3 failed on p=1 - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part1, "message"); - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part1, "message"); - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part1, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part1, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part1, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part1, "message"); //4 failed on p=2 - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part2, "message"); - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part2, "message"); - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part2, "message"); - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part2, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part2, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part2, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part2, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part2, "message"); // 3 did not initiate on p=1 - addDidNotInitiateCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addDidNotInitiateCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addDidNotInitiateCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); + addDidNotInitiateCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addDidNotInitiateCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addDidNotInitiateCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); // 3 refused on p=1 - addRefusedCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addRefusedCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addRefusedCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); + addRefusedCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addRefusedCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addRefusedCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); - countCompactionsInHistory(dbName, tableName, part1, 3, 3, 3, 3); - countCompactionsInHistory(dbName, tableName, part2, 0, 4, 0, 0); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, 3, 3, 3, 3); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part2, 0, 4, 0, 0); txnHandler.purgeCompactionHistory(); - countCompactionsInHistory(dbName, tableName, part1, 2, 2, 2, 2); - countCompactionsInHistory(dbName, tableName, part2, 0, 2, 0, 0); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, 2, 2, 2, 2); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part2, 0, 2, 0, 0); } @Test @@ -627,58 +648,60 @@ public void testPurgeCompactionHistoryTimeout() throws Exception { MetastoreConf.setTimeVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_TIMEOUT, 1, TimeUnit.MILLISECONDS); txnHandler.setConf(conf); + String catName = Warehouse.DEFAULT_CATALOG_NAME; String dbName = "default"; String tableName = "tpch"; String part1 = "(p=1)"; - addFailedCompaction(dbName, tableName, CompactionType.MINOR, part1, "message"); - addDidNotInitiateCompaction(dbName, tableName, part1, CompactionType.MINOR, "message"); - addRefusedCompaction(dbName, tableName, part1, CompactionType.MINOR, "message"); - addSucceededCompaction(dbName, tableName, part1, CompactionType.MINOR); - addFailedCompaction(dbName, tableName, CompactionType.MINOR, part1, "message"); - addDidNotInitiateCompaction(dbName, tableName, part1, CompactionType.MINOR, "message"); - addRefusedCompaction(dbName, tableName, part1, CompactionType.MINOR, "message"); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, part1, "message"); + addDidNotInitiateCompaction(catName, dbName, tableName, part1, CompactionType.MINOR, "message"); + addRefusedCompaction(catName, dbName, tableName, part1, CompactionType.MINOR, "message"); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, CompactionType.MINOR); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MINOR, part1, "message"); + addDidNotInitiateCompaction(catName, dbName, tableName, part1, CompactionType.MINOR, "message"); + addRefusedCompaction(catName, dbName, tableName, part1, CompactionType.MINOR, "message"); - countCompactionsInHistory(dbName, tableName, part1, 1, 2, 2, 2); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, 1, 2, 2, 2); txnHandler.purgeCompactionHistory(); // the oldest 3 compactions should be cleaned - countCompactionsInHistory(dbName, tableName, part1, 1, 1, 1, 1); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, 1, 1, 1, 1); - addSucceededCompaction(dbName, tableName, part1, CompactionType.MAJOR); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, CompactionType.MAJOR); txnHandler.purgeCompactionHistory(); // only 2 succeeded compactions should be left - countCompactionsInHistory(dbName, tableName, part1, 2, 0, 0, 0); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, 2, 0, 0, 0); - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part1, "message"); - addDidNotInitiateCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addRefusedCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addSucceededCompaction(dbName, tableName, part1, CompactionType.MINOR); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part1, "message"); + addDidNotInitiateCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addRefusedCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, CompactionType.MINOR); // succeeded minor compaction shouldn't cause cleanup, but the oldest succeeded will be cleaned up txnHandler.purgeCompactionHistory(); - countCompactionsInHistory(dbName, tableName, part1, 2, 1, 1, 1); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, 2, 1, 1, 1); - addFailedCompaction(dbName, tableName, CompactionType.MAJOR, part1, "message"); - addDidNotInitiateCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addRefusedCompaction(dbName, tableName, part1, CompactionType.MAJOR, "message"); - addSucceededCompaction(dbName, tableName, part1, CompactionType.MAJOR); + addFailedCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, CompactionType.MAJOR, part1, "message"); + addDidNotInitiateCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addRefusedCompaction(catName, dbName, tableName, part1, CompactionType.MAJOR, "message"); + addSucceededCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, CompactionType.MAJOR); // only 2 succeeded compactions should be left txnHandler.purgeCompactionHistory(); - countCompactionsInHistory(dbName, tableName, part1, 2, 0, 0, 0); - checkShowCompaction(dbName, tableName, part1, "succeeded", null); + countCompactionsInHistory(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, 2, 0, 0, 0); + checkShowCompaction(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, part1, "succeeded", null); } - private void countCompactionsInHistory(String dbName, String tableName, String partition, + private void countCompactionsInHistory(String catName, String dbName, String tableName, String partition, int expectedSucceeded, int expectedFailed, int expectedDidNotInitiate, int expextedRefused) throws MetaException { ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest()); List filteredToPartition = resp.getCompacts().stream() - .filter(e -> e.getDbname().equals(dbName) && e.getTablename().equals(tableName) && + .filter(e -> e.getCatName().equals(catName) && e.getDbname().equals(dbName) && + e.getTablename().equals(tableName) && (partition == null || partition.equals(e.getPartitionname()))).collect(Collectors.toList()); assertEquals(expectedSucceeded, filteredToPartition.stream().filter(e -> e.getState().equals(TxnStore.SUCCEEDED_RESPONSE)).count()); @@ -690,6 +713,7 @@ private void countCompactionsInHistory(String dbName, String tableName, String p @Test public void testRevokeTimedOutWorkers() throws Exception { CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); rqst = new CompactionRequest("foo", "baz", CompactionType.MINOR); txnHandler.compact(rqst); @@ -755,6 +779,7 @@ public void testFindPotentialCompactions() throws Exception { //simulate prev failed compaction CompactionRequest rqst = new CompactionRequest("mydb", "mytable", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); CompactionInfo ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION)); txnHandler.markFailed(ci); @@ -770,7 +795,7 @@ public void testFindPotentialCompactions() throws Exception { public void testMarkCleanedCleansTxnsAndTxnComponents() throws Exception { long txnid = openTxn(); - long mytableWriteId = allocateTableWriteIds("mydb", "mytable", txnid); + long mytableWriteId = allocateTableWriteIds(Warehouse.DEFAULT_CATALOG_NAME, "mydb", "mytable", txnid); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb"); comp.setTablename("mytable"); @@ -796,7 +821,7 @@ public void testMarkCleanedCleansTxnsAndTxnComponents() txnHandler.abortTxn(new AbortTxnRequest(txnid)); txnid = openTxn(); - long fooWriteId = allocateTableWriteIds("mydb", "foo", txnid); + long fooWriteId = allocateTableWriteIds(Warehouse.DEFAULT_CATALOG_NAME, "mydb", "foo", txnid); comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb"); comp.setTablename("foo"); @@ -825,6 +850,7 @@ public void testMarkCleanedCleansTxnsAndTxnComponents() // Now clean them and check that they are removed from the count. CompactionRequest rqst = new CompactionRequest("mydb", "mytable", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); assertEquals(0, txnHandler.findReadyToClean(0, 0).size()); ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION)); @@ -852,6 +878,7 @@ public void testMarkCleanedCleansTxnsAndTxnComponents() assertEquals(3, txnList.getOpen_txnsSize()); rqst = new CompactionRequest("mydb", "foo", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("bar"); txnHandler.compact(rqst); assertEquals(0, txnHandler.findReadyToClean(0, 0).size()); @@ -879,6 +906,7 @@ public void testMarkCleanedCleansTxnsAndTxnComponents() @Test public void addDynamicPartitions() throws Exception { + String catName = Warehouse.DEFAULT_CATALOG_NAME; String dbName = "default"; String tableName = "adp_table"; OpenTxnsResponse openTxns = txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost")); @@ -886,6 +914,7 @@ public void addDynamicPartitions() throws Exception { AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName); rqst.setTxnIds(openTxns.getTxn_ids()); + rqst.setCatName(catName); AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst); long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId(); assertEquals(txnId, writeIds.getTxnToWriteIds().get(0).getTxnId()); @@ -904,6 +933,7 @@ public void addDynamicPartitions() throws Exception { AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, Arrays.asList("ds=yesterday", "ds=today")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(dop); txnHandler.addDynamicPartitions(adp); txnHandler.commitTxn(new CommitTxnRequest(txnId)); @@ -930,6 +960,7 @@ public void testEnqueueTimeEvenAfterFailed() throws Exception { final String tableName = "bar"; final String partitionName = "ds=today"; CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partitionName); long before = System.currentTimeMillis(); txnHandler.compact(rqst); @@ -953,6 +984,7 @@ public void testEnqueueTimeThroughLifeCycle() throws Exception { final String tableName = "bar"; final String partitionName = "ds=today"; CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partitionName); long before = System.currentTimeMillis(); txnHandler.compact(rqst); @@ -1001,8 +1033,8 @@ public void testFindPotentialCompactions_limitFetchSize() throws Exception { public void testFindNextToClean_limitFetchSize() throws Exception { MetastoreConf.setLongVar(conf, COMPACTOR_FETCH_SIZE, 1); - createAReadyToCleanCompaction("foo", "bar", "ds=today", CompactionType.MINOR); - createAReadyToCleanCompaction("foo2", "bar2", "ds=today", CompactionType.MINOR); + createAReadyToCleanCompaction(Warehouse.DEFAULT_CATALOG_NAME, "foo", "bar", "ds=today", CompactionType.MINOR); + createAReadyToCleanCompaction(Warehouse.DEFAULT_CATALOG_NAME, "foo2", "bar2", "ds=today", CompactionType.MINOR); assertNull(txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION))); @@ -1061,15 +1093,17 @@ private long openTxn() throws MetaException { return txns.get(0); } - private long allocateTableWriteIds (String dbName, String tblName, long txnid) throws Exception { + private long allocateTableWriteIds (String catName, String dbName, String tblName, long txnid) throws Exception { AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tblName); + rqst.setCatName(catName); rqst.setTxnIds(Collections.singletonList(txnid)); AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst); return writeIds.getTxnToWriteIds().get(0).getWriteId(); } - private void createAReadyToCleanCompaction(String dbName, String tableName, String partitionName, CompactionType compactionType) throws MetaException { + private void createAReadyToCleanCompaction(String catName, String dbName, String tableName, String partitionName, CompactionType compactionType) throws MetaException { CompactionRequest rqst = new CompactionRequest(dbName, tableName, compactionType); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partitionName); txnHandler.compact(rqst); diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index d8b111db1e4c..1015e758d51f 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; @@ -176,6 +177,7 @@ public void testAbortTxn() throws Exception { parts.add("p=1"); AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "T"); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setTxnIds(Collections.singletonList(3L)); AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst); long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId(); @@ -183,6 +185,7 @@ public void testAbortTxn() throws Exception { assertEquals(1, writeId); AddDynamicPartitions adp = new AddDynamicPartitions(3, writeId, "default", "T", parts); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.INSERT); txnHandler.addDynamicPartitions(adp); GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo(); @@ -1298,6 +1301,7 @@ public void testHeartbeatNoLock() throws Exception { @Test public void testCompactMajorWithPartition() throws Exception { CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); @@ -1669,6 +1673,7 @@ public void testReplAllocWriteId() throws Exception { srcTxnToWriteId.add(new TxnToWriteId(startTxnId+idx, idx+1)); } AllocateTableWriteIdsRequest allocMsg = new AllocateTableWriteIdsRequest("destdb", "tbl1"); + allocMsg.setCatName(Warehouse.DEFAULT_CATALOG_NAME); allocMsg.setReplPolicy("destdb.*"); allocMsg.setSrcTxnToWriteIdList(srcTxnToWriteId); targetTxnToWriteId = txnHandler.allocateTableWriteIds(allocMsg).getTxnToWriteIds(); @@ -1689,6 +1694,7 @@ public void testReplAllocWriteId() throws Exception { srcTxnToWriteId = new ArrayList<>(); srcTxnToWriteId.add(new TxnToWriteId(startTxnId, 2*numTxn+1)); allocMsg = new AllocateTableWriteIdsRequest("destdb", "tbl2"); + allocMsg.setCatName(Warehouse.DEFAULT_CATALOG_NAME); allocMsg.setReplPolicy("destdb.*"); allocMsg.setSrcTxnToWriteIdList(srcTxnToWriteId); @@ -1716,6 +1722,7 @@ public void testReplAllocWriteId() throws Exception { @Test public void allocateNextWriteIdRetriesAfterDetectingConflictingConcurrentInsert() throws Exception { + String catName = Warehouse.DEFAULT_CATALOG_NAME; String dbName = "abc"; String tableName = "def"; int numTxns = 2; @@ -1736,6 +1743,7 @@ public void allocateNextWriteIdRetriesAfterDetectingConflictingConcurrentInsert( OpenTxnsResponse resp = txnHandler.openTxns(new OpenTxnRequest(numTxns, "me", "localhost")); AllocateTableWriteIdsRequest request = new AllocateTableWriteIdsRequest(dbName, tableName); + request.setCatName(catName); resp.getTxn_ids().forEach(request::addToTxnIds); // thread 1: allocating write ids for dbName.tableName @@ -1817,6 +1825,7 @@ private void testGetMaterializationInvalidationInfo( SourceTable sourceTable = new SourceTable(); sourceTable.setTable(table); CreationMetadata creationMetadata = new CreationMetadata(); + creationMetadata.setCatName(Warehouse.DEFAULT_CATALOG_NAME); creationMetadata.setDbName("default"); creationMetadata.setTblName("mat1"); creationMetadata.setTablesUsed(new HashSet() {{ add("default.t1"); }}); @@ -1868,6 +1877,7 @@ private void testGetMaterializationInvalidationInfoWithValidReaderWriteIdList( } CreationMetadata creationMetadata = new CreationMetadata(); + creationMetadata.setCatName(Warehouse.DEFAULT_CATALOG_NAME); creationMetadata.setDbName("default"); creationMetadata.setTblName("mat1"); creationMetadata.setTablesUsed(new HashSet() {{ add("default.t1"); }}); diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerWithOneConnection.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerWithOneConnection.java index c2708b2bb6a1..8bd671494bd5 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerWithOneConnection.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandlerWithOneConnection.java @@ -56,7 +56,7 @@ public TestTxnHandlerWithOneConnection() throws Exception { @Test public void testGetValidWriteIds() throws Exception { GetValidWriteIdsRequest req = new GetValidWriteIdsRequest(); - req.setFullTableNames(Collections.singletonList("foo.bar")); + req.setFullTableNames(Collections.singletonList("hive.foo.bar")); txnHandler.getValidWriteIds(req); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index a74d3170b6a2..36a01beff129 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.MetastoreTaskThread; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; @@ -1114,6 +1115,7 @@ public void updateDeletePartitioned() throws Exception { runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=1) (a,b) " + makeValuesClause(tableData)); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=2) (a,b) " + makeValuesClause(tableData)); CompactionRequest request = new CompactionRequest("default", Table.ACIDTBLPART.name(), CompactionType.MAJOR); + request.setCatName(Warehouse.DEFAULT_CATALOG_NAME); request.setPartitionname("p=1"); txnHandler.compact(request); runWorker(hiveConf); @@ -2325,9 +2327,11 @@ public void testCleanerForTxnToWriteId(boolean useMinHistoryWriteId) throws Exce // All inserts are committed and hence would expect in TXN_TO_WRITE_ID, 3 entries for acidTbl // and 2 entries for acidTblPart as each insert would have allocated a writeid. - String acidTblWhereClause = " where t2w_database = " + quoteString("default") + + String acidTblWhereClause = " where t2w_catalog = " + quoteString(Warehouse.DEFAULT_CATALOG_NAME) + + " and t2w_database = " + quoteString("default") + " and t2w_table = " + quoteString(Table.ACIDTBL.name().toLowerCase()); - String acidTblPartWhereClause = " where t2w_database = " + quoteString("default") + + String acidTblPartWhereClause = " where t2w_catalog = " + quoteString(Warehouse.DEFAULT_CATALOG_NAME) + + " and t2w_database = " + quoteString("default") + " and t2w_table = " + quoteString(Table.ACIDTBLPART.name().toLowerCase()); assertTxnToWriteIdRowCount( @@ -2407,7 +2411,7 @@ public void testCleanerForTxnToWriteId(boolean useMinHistoryWriteId) throws Exce // commit a new txn to advance min open writeId TxnStoreHelper.wrap(txnHandler) - .allocateTableWriteId("default", Table.ACIDTBL.name(), txnId); + .allocateTableWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", Table.ACIDTBL.name(), txnId); txnMgr.commitTxn(); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java index c6abfa8ea153..e809e4dc2e7e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.repl.ReplScope; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.repl.util.FileList; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -108,7 +109,7 @@ public void removeDBPropertyToPreventRenameWhenBootstrapDumpOfTableFails() throw try (MockedStatic utilsMockedStatic = mockStatic(Utils.class)) { List tableList = Arrays.asList("a1", "a2"); String dbRandomKey = "akeytoberandom"; - ReplScope replScope = new ReplScope("default"); + ReplScope replScope = new ReplScope(Warehouse.DEFAULT_CATALOG_NAME, "default"); utilsMockedStatic.when(() -> Utils.matchesDb(same(hive), eq("default"))).thenReturn(Collections.singletonList("default")); utilsMockedStatic.when(() -> Utils.getAllTables(same(hive), eq("default"), eq(replScope))).thenReturn(tableList); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index fad6593a6360..36004294a674 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.MetastoreTaskThread; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; @@ -506,10 +507,10 @@ public void testMetastoreTablesCleanup() throws Exception { driver.run("insert into temp.T13p partition (ds='today', hour='1') values (7, 7)"); driver.run("insert into temp.T13p partition (ds='tomorrow', hour='2') values (8, 8)"); int count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t10', 't11')"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t10', 't11')"); Assert.assertEquals(4, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t12p', 't13p')"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t12p', 't13p')"); Assert.assertEquals(5, count); // Fail some inserts, so that we have records in TXN_COMPONENTS @@ -519,72 +520,72 @@ public void testMetastoreTablesCleanup() throws Exception { driver.run("insert into temp.T12p partition (ds='today', hour='1') values (11, 11)"); driver.run("insert into temp.T13p partition (ds='today', hour='1') values (12, 12)"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + - "where \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"TC_CATALOG\"='hive' AND \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(4, count); conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // Drop a table/partition; corresponding records in TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS should disappear count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + - "where \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t10'"); + "where \"TC_CATALOG\"='hive' AND \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t10'"); Assert.assertEquals(1, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t10'"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t10'"); Assert.assertEquals(2, count); driver.run("drop table temp.T10"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + - "where \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t10'"); + "where \"TC_CATALOG\"='hive' and \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t10'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t10'"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t10'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + - "where \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t12p' and \"TC_PARTITION\"='ds=today/hour=1'"); + "where \"TC_CATALOG\"='hive' and \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t12p' and \"TC_PARTITION\"='ds=today/hour=1'"); Assert.assertEquals(1, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t12p' and \"CTC_PARTITION\"='ds=today/hour=1'"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t12p' and \"CTC_PARTITION\"='ds=today/hour=1'"); Assert.assertEquals(1, count); driver.run("alter table temp.T12p drop partition (ds='today', hour='1')"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + - "where \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t12p' and \"TC_PARTITION\"='ds=today/hour=1'"); + "where \"TC_CATALOG\"='hive' and \"TC_DATABASE\"='temp' and \"TC_TABLE\"='t12p' and \"TC_PARTITION\"='ds=today/hour=1'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t12p' and \"CTC_PARTITION\"='ds=today/hour=1'"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\"='t12p' and \"CTC_PARTITION\"='ds=today/hour=1'"); Assert.assertEquals(0, count); // Successfully perform compaction on a table/partition, so that we have successful records in COMPLETED_COMPACTIONS driver.run("alter table temp.T11 compact 'minor'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='i'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='i'"); Assert.assertEquals(1, count); runWorker(conf); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='r' and \"CQ_TYPE\"='i'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='r' and \"CQ_TYPE\"='i'"); Assert.assertEquals(1, count); runCleaner(conf); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t11' and \"CC_STATE\"='s' and \"CC_TYPE\"='i'"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t11' and \"CC_STATE\"='s' and \"CC_TYPE\"='i'"); Assert.assertEquals(1, count); driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'minor'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + "and \"CQ_STATE\"='i' and \"CQ_TYPE\"='i'"); Assert.assertEquals(1, count); runWorker(conf); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + "and \"CQ_STATE\"='r' and \"CQ_TYPE\"='i'"); Assert.assertEquals(1, count); runCleaner(conf); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p' and \"CC_STATE\"='s' and \"CC_TYPE\"='i'"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p' and \"CC_STATE\"='s' and \"CC_TYPE\"='i'"); Assert.assertEquals(1, count); // Fail compaction, so that we have failed records in COMPLETED_COMPACTIONS. @@ -594,90 +595,90 @@ public void testMetastoreTablesCleanup() throws Exception { conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); driver.run("alter table temp.T11 compact 'major'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); Assert.assertEquals(1, count); runWorker(conf); // will fail count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t11' and \"CC_STATE\"='f' and \"CC_TYPE\"='a'"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t11' and \"CC_STATE\"='f' and \"CC_TYPE\"='a'"); Assert.assertEquals(1, count); driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + "and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); Assert.assertEquals(1, count); runWorker(conf); // will fail count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + "and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p' and \"CC_STATE\"='f' and \"CC_TYPE\"='a'"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p' and \"CC_STATE\"='f' and \"CC_TYPE\"='a'"); Assert.assertEquals(1, count); conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, false); // Put 2 records into COMPACTION_QUEUE and do nothing driver.run("alter table temp.T11 compact 'major'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); Assert.assertEquals(1, count); driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p' and \"CQ_PARTITION\"='ds=tomorrow/hour=2' " + "and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); Assert.assertEquals(1, count); // Drop a table/partition, corresponding records in COMPACTION_QUEUE and COMPLETED_COMPACTIONS should disappear driver.run("drop table temp.T11"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t11'"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t11'"); Assert.assertEquals(0, count); driver.run("alter table temp.T12p drop partition (ds='tomorrow', hour='2')"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t12p'"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p'"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p'"); Assert.assertEquals(0, count); // Put 1 record into COMPACTION_QUEUE and do nothing driver.run("alter table temp.T13p partition (ds='today', hour='1') compact 'major'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t13p' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t13p' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); Assert.assertEquals(1, count); // Drop database, everything in all 4 meta tables should disappear count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + - "where \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"TC_CATALOG\"='hive' and \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(1, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(2, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(1, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); driver.run("drop database if exists temp cascade"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + - "where \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"TC_CATALOG\"='hive' and \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + - "where \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"CTC_CATALOG\"='hive' and \"CTC_DATABASE\"='temp' and \"CTC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + - "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' and \"CQ_DATABASE\"='temp' and \"CQ_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + - "where \"CC_DATABASE\"='temp' and \"CC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); + "where \"CC_CATALOG\"='hive' and \"CC_DATABASE\"='temp' and \"CC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(0, count); } @@ -1105,6 +1106,7 @@ public void testWriteSetTracking3() throws Exception { long writeId = txnMgr.getTableWriteId("default", "TAB_PART"); AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, "default", "TAB_PART", Collections.singletonList("p=blah")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn(); @@ -1163,12 +1165,14 @@ public void testWriteSetTracking4() throws Exception { Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\"")); AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "tab2"); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setTxnIds(Collections.singletonList(txnMgr2.getCurrentTxnId())); AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst); Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId()); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), "default", "tab2", Collections.EMPTY_LIST); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr2.commitTxn(); @@ -1186,12 +1190,14 @@ public void testWriteSetTracking4() throws Exception { Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\"")); rqst = new AllocateTableWriteIdsRequest("default", "tab2"); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setTxnIds(Collections.singletonList(txnMgr2.getCurrentTxnId())); writeIds = txnHandler.allocateTableWriteIds(rqst); Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId()); adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), "default", "tab2", Collections.singletonList("p=two")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); //simulate partition update txnMgr2.commitTxn(); @@ -1207,6 +1213,7 @@ public void testWriteSetTracking4() throws Exception { txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running"); rqst = new AllocateTableWriteIdsRequest("default", "tab2"); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setTxnIds(Collections.singletonList(txnMgr.getCurrentTxnId())); writeIds = txnHandler.allocateTableWriteIds(rqst); Assert.assertEquals(txnMgr.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId()); @@ -1214,6 +1221,7 @@ public void testWriteSetTracking4() throws Exception { //so generate empty Dyn Part call adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), "default", "tab2", Collections.EMPTY_LIST); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn(); @@ -1262,12 +1270,14 @@ public void testWriteSetTracking5() throws Exception { txnMgr.rollbackTxn(); AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "TAB_PART"); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setTxnIds(Collections.singletonList(txnId)); AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst); Assert.assertEquals(txnId, writeIds.getTxnToWriteIds().get(0).getTxnId()); AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeIds.getTxnToWriteIds().get(0).getWriteId(), "default", "TAB_PART", Collections.singletonList("p=blah")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\"")); @@ -1349,6 +1359,7 @@ public void testWriteSetTracking7() throws Exception { long writeId = txnMgr2.getTableWriteId("default", "tab2"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab2", Collections.singletonList("p=two")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr2.commitTxn(); //txnid:idTxnUpdate1 @@ -1359,6 +1370,7 @@ public void testWriteSetTracking7() throws Exception { writeId = txnMgr.getTableWriteId("default", "tab2"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab2", Collections.singletonList("p=one")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn(); //txnid:idTxnUpdate2 @@ -1407,6 +1419,7 @@ public void testWriteSetTracking7() throws Exception { writeId = txnMgr2.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", Collections.singletonList("p=one")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr2.commitTxn(); //txnid:idTxnUpdate3 @@ -1419,6 +1432,7 @@ public void testWriteSetTracking7() throws Exception { writeId = txnMgr.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", Collections.singletonList("p=two")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn(); //txnid:idTxnUpdate4 @@ -1470,6 +1484,7 @@ public void testWriteSetTracking8() throws Exception { long writeId = txnMgr2.getTableWriteId("default", "tab1"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", Collections.singletonList("p=one")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr2.commitTxn(); //txnid:idTxnUpdate1 @@ -1481,6 +1496,7 @@ public void testWriteSetTracking8() throws Exception { writeId = txnMgr.getTableWriteId("default", "tab1"); adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", Collections.singletonList("p=two")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr.commitTxn(); //txnid:idTxnUpdate2 @@ -1533,6 +1549,7 @@ public void testWriteSetTracking9() throws Exception { long writeId = txnMgr2.getTableWriteId("default", "tab1"); AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", Collections.singletonList("p=one")); + adp.setCatName(Warehouse.DEFAULT_CATALOG_NAME); adp.setOperationType(DataOperationType.UPDATE); txnHandler.addDynamicPartitions(adp); txnMgr2.commitTxn(); //txnid:idTxnUpdate1 diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java index dbdc79769dc8..580cd67b8c9f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java @@ -169,7 +169,7 @@ private static void createMaterializedView(String materializedViewName, Table so materializedView.getSd().setCols(sourceTable.getCols()); MaterializedViewMetadata metadata = new MaterializedViewMetadata( - MetaStoreUtils.getDefaultCatalog(conf), + sourceTable.getCatName(), sourceTable.getDbName(), materializedViewName, Sets.newHashSet(sourceTable.createSourceTable()), diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java index 9452148fa3bd..e32bc15cc727 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.ResourceType; import org.apache.hadoop.hive.metastore.api.ResourceUri; @@ -75,7 +76,7 @@ public class TestPrimaryToReplicaResourceFunction { public void setup() { MetaData metadata = new MetaData(null, null, null, null, functionObj); Context context = - new Context("primaryDb", null, null, null, hiveConf, null, null, logger); + new Context(Warehouse.DEFAULT_CATALOG_NAME, "primaryDb", null, null, null, hiveConf, null, null, logger); when(hiveConf.getVar(HiveConf.ConfVars.REPL_FUNCTIONS_ROOT_DIR)) .thenReturn("/someBasePath/withADir/"); timeMockedStatic = mockStatic(Time.class); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/testutil/TxnStoreHelper.java b/ql/src/test/org/apache/hadoop/hive/ql/testutil/TxnStoreHelper.java index 68a18fa9495a..f141c08b7c05 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/testutil/TxnStoreHelper.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/testutil/TxnStoreHelper.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.testutil; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -46,9 +47,10 @@ public static TxnStoreHelper wrap(TxnStore txnHandler) { /** * Allocates a new write ID for the table in the given transaction. */ - public long allocateTableWriteId(String dbName, String tblName, long txnId) + public long allocateTableWriteId(String catName, String dbName, String tblName, long txnId) throws TxnAbortedException, NoSuchTxnException, MetaException { AllocateTableWriteIdsRequest request = new AllocateTableWriteIdsRequest(dbName, tblName.toLowerCase()); + request.setCatName(catName); request.setTxnIds(Collections.singletonList(txnId)); AllocateTableWriteIdsResponse response = txnHandler.allocateTableWriteIds(request); @@ -62,12 +64,12 @@ public void registerMinOpenWriteId(String dbName, String tblName, long txnId) th if (!ConfVars.useMinHistoryWriteId()) { return; } - long maxWriteId = txnHandler.getMaxAllocatedTableWriteId( - new MaxAllocatedTableWriteIdRequest(dbName, tblName.toLowerCase())) - .getMaxWriteId(); + MaxAllocatedTableWriteIdRequest rqst = new MaxAllocatedTableWriteIdRequest(dbName, tblName.toLowerCase()); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); + long maxWriteId = txnHandler.getMaxAllocatedTableWriteId(rqst).getMaxWriteId(); txnHandler.addWriteIdsToMinHistory(txnId, Collections.singletonMap( - TxnUtils.getFullTableName(dbName, tblName), maxWriteId + 1)); + TxnUtils.getFullTableName(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName), maxWriteId + 1)); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index 322045b0dadd..1734e1127266 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.RecordIdentifier; import org.apache.hadoop.hive.ql.io.RecordUpdater; +import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.testutil.TxnStoreHelper; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -181,22 +182,34 @@ protected void runAcidMetricService() { } protected Table newTable(String dbName, String tableName, boolean partitioned) throws TException { - return newTable(dbName, tableName, partitioned, new HashMap<>(), null, false); + return newTable(HiveUtils.getCurrentCatalogOrDefault(conf), dbName, tableName, partitioned, new HashMap<>()); } protected Table newTable(String dbName, String tableName, boolean partitioned, Map parameters) throws TException { - return newTable(dbName, tableName, partitioned, parameters, null, false); + return newTable(HiveUtils.getCurrentCatalogOrDefault(conf), dbName, tableName, partitioned, parameters); + } + protected Table newTable(String catName, String dbName, String tableName, boolean partitioned, + Map parameters) throws TException { + return newTable(catName, dbName, tableName, partitioned, parameters, null, false); } protected Table newTable(String dbName, String tableName, boolean partitioned, + Map parameters, List sortCols, + boolean isTemporary) throws TException { + return newTable(HiveUtils.getCurrentCatalogOrDefault(conf), dbName, tableName, partitioned, + parameters, sortCols, isTemporary); + } + + protected Table newTable(String catName, String dbName, String tableName, boolean partitioned, Map parameters, List sortCols, boolean isTemporary) throws TException { Table table = new Table(); table.setTableType(TableType.MANAGED_TABLE.name()); table.setTableName(tableName); + table.setCatName(catName); table.setDbName(dbName); table.setOwner("me"); table.setSd(newStorageDescriptor(getLocation(tableName, null), sortCols)); @@ -222,7 +235,7 @@ protected Table newTable(String dbName, String tableName, boolean partitioned, } // drop the table first, in case some previous test created it - ms.dropTable(dbName, tableName); + ms.dropTable(catName, dbName, tableName); ms.createTable(table); return table; @@ -263,10 +276,10 @@ protected long openTxn(TxnType txnType) throws MetaException { return txns.getFirst(); } - protected long allocateWriteId(String dbName, String tblName, long txnid) + protected long allocateWriteId(String catName, String dbName, String tblName, long txnid) throws MetaException, TxnAbortedException, NoSuchTxnException { return TxnStoreHelper.wrap(txnHandler) - .allocateTableWriteId(dbName, tblName, txnid); + .allocateTableWriteId(catName, dbName, tblName, txnid); } protected void addDeltaFileWithTxnComponents(Table t, Partition p, int numRecords, boolean abort) @@ -349,21 +362,22 @@ protected List getDirectories(HiveConf conf, Table t, Partition p) throws return paths; } - protected void burnThroughTransactions(String dbName, String tblName, int num) + protected void burnThroughTransactions(String catName, String dbName, String tblName, int num) throws MetaException, NoSuchTxnException, TxnAbortedException { - burnThroughTransactions(dbName, tblName, num, null, null); + burnThroughTransactions(catName, dbName, tblName, num, null, null); } - protected void burnThroughTransactions(String dbName, String tblName, int num, Set open, Set aborted) + protected void burnThroughTransactions(String catName, String dbName, String tblName, int num, Set open, Set aborted) throws NoSuchTxnException, TxnAbortedException, MetaException { - burnThroughTransactions(dbName, tblName, num, open, aborted, null); + burnThroughTransactions(catName, dbName, tblName, num, open, aborted, null); } - protected void burnThroughTransactions(String dbName, String tblName, int num, Set open, Set aborted, + protected void burnThroughTransactions(String catName, String dbName, String tblName, int num, Set open, Set aborted, LockRequest lockReq) throws MetaException, NoSuchTxnException, TxnAbortedException { OpenTxnsResponse rsp = txnHandler.openTxns(new OpenTxnRequest(num, "me", "localhost")); AllocateTableWriteIdsRequest awiRqst = new AllocateTableWriteIdsRequest(dbName, tblName); + awiRqst.setCatName(catName); awiRqst.setTxnIds(rsp.getTxn_ids()); AllocateTableWriteIdsResponse awiResp = txnHandler.allocateTableWriteIds(awiRqst); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java index 30815442ff25..44eca7b90f36 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ReplChangeManager; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; @@ -111,9 +112,10 @@ public void testRetryAfterFailedCleanup(boolean delayEnabled) throws Exception { addBaseFile(t, null, 20L, 20); addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "retry_test", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "retry_test", 25); CompactionRequest rqst = new CompactionRequest("default", "retry_test", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst); addBaseFile(t, null, 25L, 25, compactTxn); @@ -183,9 +185,10 @@ public void testRetentionAfterFailedCleanup() throws Exception { addBaseFile(t, null, 20L, 20); addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "retry_test", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "retry_test", 25); CompactionRequest rqst = new CompactionRequest("default", "retry_test", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst); addBaseFile(t, null, 25L, 25, compactTxn); @@ -233,9 +236,10 @@ public void cleanupAfterMajorTableCompaction() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "camtc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camtc", 25); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst); addBaseFile(t, null, 25L, 25, compactTxn); @@ -261,9 +265,10 @@ public void cleanupAfterIOWAndMajorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); //IOW - burnThroughTransactions("default", "camtc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camtc", 25); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst); addBaseFile(t, null, 25L, 25, compactTxn); @@ -288,9 +293,10 @@ public void cleanupAfterMajorTableCompactionWithLongRunningQuery() throws Except addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "camtc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camtc", 25); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst, CommitAction.MARK_COMPACTED); addBaseFile(t, null, 25L, 25, 26); @@ -337,9 +343,10 @@ public void cleanupAfterMajorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions("default", "campc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "campc", 25); CompactionRequest rqst = new CompactionRequest("default", "campc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -365,9 +372,10 @@ public void cleanupAfterMinorTableCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions("default", "camitc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camitc", 25); CompactionRequest rqst = new CompactionRequest("default", "camitc", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); startCleaner(); @@ -404,9 +412,10 @@ public void cleanupAfterMinorPartitionCompaction() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions("default", "camipc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camipc", 25); CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -443,9 +452,10 @@ public void cleanupAfterMajorPartitionCompactionNoBase() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions("default", "campcnb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "campcnb", 25); CompactionRequest rqst = new CompactionRequest("default", "campcnb", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -470,9 +480,10 @@ public void droppedTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions("default", "dt", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dt", 25); CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); // Drop table will clean the table entry from the compaction queue and hence cleaner have no effect @@ -494,9 +505,10 @@ public void droppedPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions("default", "dp", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dp", 25); CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -525,9 +537,10 @@ public void processCompactionCandidatesInParallel() throws Exception { partitions.add(p); } - burnThroughTransactions("default", "camipc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camipc", 25); for (int i = 0; i < 10; i++) { CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today" + i); compactInTxn(rqst); } @@ -568,9 +581,10 @@ public void delayedCleanupAfterMajorCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions("default", "dcamc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamc", 25); CompactionRequest rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); conf.setBoolVar(HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED, true); @@ -608,9 +622,10 @@ public void delayedCleanupAfterMinorCompactionOnPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions("default", "dcamicop", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamicop", 25); CompactionRequest rqst = new CompactionRequest("default", "dcamicop", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -660,16 +675,17 @@ public void delayedCleanupAfterMinorAndMajorCompaction() throws Exception { addDeltaFile(t, p, 21L, 21L, 1); addDeltaFile(t, p, 22L, 22L, 1); - burnThroughTransactions("default", "dcamimcop", 22); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamimcop", 22); CompactionRequest rqst = new CompactionRequest("default", "dcamimcop", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); addDeltaFile(t, p, 21L, 22L, 2); // one more delta after compact addDeltaFile(t, p, 23L, 23L, 1); - burnThroughTransactions("default", "dcamimcop", 1); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamimcop", 1); conf.setBoolVar(HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED, true); conf.setTimeVar(HIVE_COMPACTOR_CLEANER_RETENTION_TIME, 5, TimeUnit.SECONDS); @@ -678,6 +694,7 @@ public void delayedCleanupAfterMinorAndMajorCompaction() throws Exception { Thread.sleep(conf.getTimeVar(HIVE_COMPACTOR_CLEANER_RETENTION_TIME, TimeUnit.MILLISECONDS)); rqst = new CompactionRequest("default", "dcamimcop", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); long compactTxn = compactInTxn(rqst); addBaseFile(t, p, 23L, 23, compactTxn); @@ -730,7 +747,7 @@ public void testReadyForCleaningPileup() throws Exception { addBaseFile(t, p, 20L, 20); addDeltaFile(t, p, 21L, 21L, 1); addDeltaFile(t, p, 22L, 22L, 1); - burnThroughTransactions(dbName, tblName, 22); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 22); // block cleaner with an open txn long blockingTxn = openTxn(); @@ -738,6 +755,7 @@ public void testReadyForCleaningPileup() throws Exception { .registerMinOpenWriteId(dbName, tblName, blockingTxn); CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partName); long compactTxn = compactInTxn(rqst); addDeltaFile(t, p, 21, 22, 2); @@ -757,8 +775,9 @@ public void testReadyForCleaningPileup() throws Exception { // major compaction addDeltaFile(t, p, 23L, 23L, 1); addDeltaFile(t, p, 24L, 24L, 1); - burnThroughTransactions(dbName, tblName, 2, null, new HashSet<>(Collections.singletonList(compactTxn + 1))); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 2, null, new HashSet<>(Collections.singletonList(compactTxn + 1))); rqst = new CompactionRequest(dbName, tblName, CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partName); compactTxn = compactInTxn(rqst); addBaseFile(t, p, 24, 24, compactTxn); @@ -812,9 +831,10 @@ public void noCleanupAfterMajorCompaction() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addBaseFile(t, null, 25L, 25); - burnThroughTransactions("default", "dcamc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamc", 25); CompactionRequest rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); startCleaner(); @@ -832,6 +852,7 @@ public void noCleanupAfterMajorCompaction() throws Exception { t.getParameters().put("no_cleanup", "false"); ms.alter_table("default", "dcamc", t); rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); startCleaner(); @@ -858,9 +879,10 @@ public void noCleanupAfterMinorCompactionOnPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions("default", "dcamicop", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamicop", 25); CompactionRequest rqst = new CompactionRequest("default", "dcamicop", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -880,6 +902,7 @@ public void noCleanupAfterMinorCompactionOnPartition() throws Exception { p.getParameters().put("NO_CLEANUP", "false"); ms.alter_partition("default", "dcamicop", p); rqst = new CompactionRequest("default", "dcamicop", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); @@ -915,9 +938,10 @@ public void withSingleBaseCleanerSucceeds() throws Exception { addBaseFile(t, null, 25L, 25); - burnThroughTransactions("default", "dcamc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamc", 25); CompactionRequest rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); startCleaner(); @@ -935,12 +959,13 @@ public void withNewerBaseCleanerSucceeds() throws Exception { addBaseFile(t, null, 25L, 25); - burnThroughTransactions("default", "dcamc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamc", 25); CompactionRequest rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); - burnThroughTransactions("default", "dcamc", 1); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dcamc", 1); addBaseFile(t, null, 26L, 26); startCleaner(); @@ -962,9 +987,10 @@ public void withNotYetVisibleBase() throws Exception { Table t = newTable(dbName, tableName, false); addBaseFile(t, null, 20L, 20); - burnThroughTransactions(dbName, tableName, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, 25); CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst); addBaseFile(t, null, 25L, 25, compactTxn); @@ -983,9 +1009,10 @@ public void cleanMultipleTimesWithSameWatermark() throws Exception { addBaseFile(t, null, 20L, 20); addDeltaFile(t, null, 21L, 22L, 2); - burnThroughTransactions(dbName, tableName, 22); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, 22); CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); addBaseFile(t, null, 22L, 22); compactInTxn(rqst); @@ -1015,9 +1042,10 @@ public void nothingToCleanAfterAbortsBase() throws Exception { addBaseFile(t, null, 20L, 1); addDeltaFile(t, null, 21L, 21L, 2); addDeltaFile(t, null, 22L, 22L, 2); - burnThroughTransactions(dbName, tableName, 22, null, new HashSet<>(Arrays.asList(21L, 22L))); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, 22, null, new HashSet<>(Arrays.asList(21L, 22L))); CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); CompactionResponse response = txnHandler.compact(rqst); @@ -1046,9 +1074,10 @@ public void nothingToCleanAfterAbortsDelta() throws Exception { addDeltaFile(t, null, 20L, 20L, 1); addDeltaFile(t, null, 21L, 21L, 2); addDeltaFile(t, null, 22L, 22L, 2); - burnThroughTransactions(dbName, tableName, 22, null, new HashSet<>(Arrays.asList(21L, 22L))); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, 22, null, new HashSet<>(Arrays.asList(21L, 22L))); CompactionRequest rqst = new CompactionRequest(dbName, tableName, CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); CompactionResponse response = txnHandler.compact(rqst); @@ -1081,7 +1110,7 @@ public void testReady() throws Exception { addDeltaFile(t, p, 20L, 20L, 1); addDeltaFile(t, p, 21L, 21L, 1); addDeltaFile(t, p, 22L, 22L, 1); - burnThroughTransactions(dbName, tblName, 22); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 22); // block cleaner with an open txn long blockingTxn = openTxn(); @@ -1089,6 +1118,7 @@ public void testReady() throws Exception { .registerMinOpenWriteId(dbName, tblName, blockingTxn); CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partName); long compactTxn = compactInTxn(rqst); addDeltaFile(t, p, 20, 22, 2, compactTxn); @@ -1118,9 +1148,10 @@ public void testCompactionHighWatermarkIsHonored() throws Exception { addDeltaFile(t, p, 20L, 20L, 1); addDeltaFile(t, p, 21L, 21L, 1); addDeltaFile(t, p, 22L, 22L, 1); - burnThroughTransactions(dbName, tblName, 22); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 22); CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partName); long compactTxn = compactInTxn(rqst); addDeltaFile(t, p, 20, 22, 3, compactTxn); @@ -1128,9 +1159,10 @@ public void testCompactionHighWatermarkIsHonored() throws Exception { //2nd minor addDeltaFile(t, p, 23L, 23L, 1); addDeltaFile(t, p, 24L, 24L, 1); - burnThroughTransactions(dbName, tblName, 2); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 2); rqst = new CompactionRequest(dbName, tblName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname(partName); compactTxn = compactInTxn(rqst); addDeltaFile(t, p, 20, 24, 5, compactTxn); @@ -1169,9 +1201,10 @@ public void testCleanupOnConcurrentMinorCompactions() throws Exception { // Overlapping compacted deltas with different visibilityTxnIDs simulating concurrent compaction from two workers addDeltaFile(t, null, 22L, 23L, 2, 24); addDeltaFile(t, null, 22L, 23L, 2, 25); - burnThroughTransactions(dbName, tblName, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 25); CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); compactInTxn(rqst); startCleaner(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithMinHistoryWriteId.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithMinHistoryWriteId.java index 8b1216335acd..066bdb469210 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithMinHistoryWriteId.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithMinHistoryWriteId.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionType; @@ -61,6 +62,7 @@ protected boolean useMinHistoryWriteId() { public void cleanupAfterAbortedAndRetriedMajorCompaction() throws Exception { Table t = prepareTestTable(); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst, CommitAction.ABORT); addBaseFile(t, null, 25L, 25, compactTxn); @@ -85,6 +87,7 @@ public void cleanupAfterAbortedAndRetriedMajorCompaction() throws Exception { public void cleanupAfterKilledAndRetriedMajorCompaction() throws Exception { Table t = prepareTestTable(); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst, CommitAction.NONE); addBaseFile(t, null, 25L, 25, compactTxn); @@ -124,6 +127,7 @@ private static void revokeTimedoutWorkers(Configuration conf) throws Exception { public void cleanupAfterMajorCompactionWithQueryWaitingToLockTheSnapshot() throws Exception { Table t = prepareTestTable(); CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst, CommitAction.MARK_COMPACTED); addBaseFile(t, null, 25L, 25, compactTxn); @@ -153,7 +157,7 @@ private Table prepareTestTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 25L, 25, 2); - burnThroughTransactions("default", "camtc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camtc", 25); return t; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java index 5632ba6bb63e..a0889a00cdb2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HMSMetricsListener; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.CompactionRequest; @@ -113,7 +114,7 @@ public void testInitiatorPerfMetricsEnabled() throws Exception { comp.setOperationType(DataOperationType.UPDATE); components.add(comp); } - burnThroughTransactions("default", "ime", 23); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "ime", 23); long txnid = openTxn(); LockRequest req = new LockRequest(components, "me", "localhost"); @@ -121,7 +122,7 @@ public void testInitiatorPerfMetricsEnabled() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - long writeid = allocateWriteId("default", "ime", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "ime", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -160,7 +161,7 @@ public void testInitiatorPerfMetricsDisabled() throws Exception { comp.setOperationType(DataOperationType.UPDATE); components.add(comp); } - burnThroughTransactions("default", "imd", 23); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "imd", 23); long txnid = openTxn(); LockRequest req = new LockRequest(components, "me", "localhost"); @@ -168,7 +169,7 @@ public void testInitiatorPerfMetricsDisabled() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - long writeid = allocateWriteId("default", "imd", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "imd", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -202,7 +203,7 @@ public void testOldestReadyForCleaningAge() throws Exception { addBaseFile(old, oldP, 20L, 20); addDeltaFile(old, oldP, 21L, 22L, 2); addDeltaFile(old, oldP, 23L, 24L, 2); - burnThroughTransactions(DB_NAME, OLD_TABLE_NAME, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, DB_NAME, OLD_TABLE_NAME, 25); doCompaction(DB_NAME, OLD_TABLE_NAME, OLD_PARTITION_NAME, CompactionType.MINOR); long oldTableEnd = System.currentTimeMillis(); @@ -211,7 +212,7 @@ public void testOldestReadyForCleaningAge() throws Exception { addBaseFile(young, youngP, 20L, 20); addDeltaFile(young, youngP, 21L, 22L, 2); addDeltaFile(young, youngP, 23L, 24L, 2); - burnThroughTransactions(DB_NAME, YOUNG_TABLE_NAME, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, DB_NAME, YOUNG_TABLE_NAME, 25); doCompaction(DB_NAME, YOUNG_TABLE_NAME, YOUNG_PARTITION_NAME, CompactionType.MINOR); long acidMetricsStart = System.currentTimeMillis(); @@ -302,9 +303,10 @@ public void testCleanerPerfMetricsEnabled() throws Exception { partitions.add(p); } - burnThroughTransactions("default", "camipc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camipc", 25); for (int i = 0; i < 10; i++) { CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today" + i); compactInTxn(rqst); } @@ -325,6 +327,7 @@ public void testCleanerPerfMetricsEnabled() throws Exception { addBaseFile(t, p, 25L, 25, 26 + i); CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today" + i); compactInTxn(rqst); } @@ -356,9 +359,10 @@ public void testCleanerPerfMetricsDisabled() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "camipc", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "camipc", 25); CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); long compactTxn = compactInTxn(rqst); addBaseFile(t, p, 25L, 25, compactTxn); @@ -388,7 +392,7 @@ public void testWorkerPerfMetrics() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "mapwb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mapwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mapwb", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -435,32 +439,32 @@ public void testWorkerPerfMetrics() throws Exception { public void testUpdateCompactionMetrics() { ShowCompactResponse scr = new ShowCompactResponse(); List elements = new ArrayList<>(); - elements.add(generateElement(1,"db", "tb", null, CompactionType.MAJOR, TxnStore.FAILED_RESPONSE)); + elements.add(generateElement(1, Warehouse.DEFAULT_CATALOG_NAME, "db", "tb", null, CompactionType.MAJOR, TxnStore.FAILED_RESPONSE)); // Check for overwrite - elements.add(generateElement(2,"db", "tb", null, CompactionType.MAJOR, TxnStore.INITIATED_RESPONSE)); - elements.add(generateElement(3,"db", "tb2", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE)); - elements.add(generateElement(5,"db", "tb3", "p1", CompactionType.MINOR, TxnStore.DID_NOT_INITIATE_RESPONSE)); + elements.add(generateElement(2, Warehouse.DEFAULT_CATALOG_NAME, "db", "tb", null, CompactionType.MAJOR, TxnStore.INITIATED_RESPONSE)); + elements.add(generateElement(3, Warehouse.DEFAULT_CATALOG_NAME, "db", "tb2", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE)); + elements.add(generateElement(5, Warehouse.DEFAULT_CATALOG_NAME, "db", "tb3", "p1", CompactionType.MINOR, TxnStore.DID_NOT_INITIATE_RESPONSE)); // Check for overwrite where the order is different - elements.add(generateElement(4,"db", "tb3", "p1", CompactionType.MINOR, TxnStore.FAILED_RESPONSE)); + elements.add(generateElement(4, Warehouse.DEFAULT_CATALOG_NAME, "db", "tb3", "p1", CompactionType.MINOR, TxnStore.FAILED_RESPONSE)); - elements.add(generateElement(6,"db1", "tb", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE, + elements.add(generateElement(6,Warehouse.DEFAULT_CATALOG_NAME, "db1", "tb", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE, System.currentTimeMillis(), true, WORKER_VERSION, WORKER_VERSION, 10)); - elements.add(generateElement(7,"db1", "tb2", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE)); - elements.add(generateElement(8,"db1", "tb3", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE)); + elements.add(generateElement(7, Warehouse.DEFAULT_CATALOG_NAME, "db1", "tb2", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE)); + elements.add(generateElement(8, Warehouse.DEFAULT_CATALOG_NAME, "db1", "tb3", null, CompactionType.MINOR, TxnStore.FAILED_RESPONSE)); - elements.add(generateElement(9,"db2", "tb", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); - elements.add(generateElement(10,"db2", "tb2", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); - elements.add(generateElement(11,"db2", "tb3", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); - elements.add(generateElement(12,"db2", "tb4", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); + elements.add(generateElement(9, Warehouse.DEFAULT_CATALOG_NAME, "db2", "tb", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); + elements.add(generateElement(10, Warehouse.DEFAULT_CATALOG_NAME, "db2", "tb2", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); + elements.add(generateElement(11, Warehouse.DEFAULT_CATALOG_NAME, "db2", "tb3", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); + elements.add(generateElement(12, Warehouse.DEFAULT_CATALOG_NAME, "db2", "tb4", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE)); - elements.add(generateElement(13,"db3", "tb3", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE)); + elements.add(generateElement(13, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb3", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE)); // test null initiator version and worker version - elements.add(generateElement(14,"db3", "tb4", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + elements.add(generateElement(14,Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb4", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(), false, null, null,20)); - elements.add(generateElement(15,"db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + elements.add(generateElement(15,Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(),true, WORKER_VERSION, WORKER_VERSION, 30)); - elements.add(generateElement(16,"db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE)); - elements.add(generateElement(17,"db3", "tb7", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + elements.add(generateElement(16, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE)); + elements.add(generateElement(17,Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb7", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(),true, WORKER_VERSION, WORKER_VERSION,40)); scr.setCompacts(elements); @@ -494,9 +498,9 @@ public void testUpdateCompactionMetrics() { @Test public void testAgeMetricsNotSet() { List elements = ImmutableList.of( - generateElement(1, "db", "tb", null, CompactionType.MAJOR, TxnStore.FAILED_RESPONSE, 1L), - generateElement(5, "db", "tb3", "p1", CompactionType.MINOR, TxnStore.DID_NOT_INITIATE_RESPONSE, 2L), - generateElement(9, "db2", "tb", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE, 3L) + generateElement(1, Warehouse.DEFAULT_CATALOG_NAME, "db", "tb", null, CompactionType.MAJOR, TxnStore.FAILED_RESPONSE, 1L), + generateElement(5, Warehouse.DEFAULT_CATALOG_NAME, "db", "tb3", "p1", CompactionType.MINOR, TxnStore.DID_NOT_INITIATE_RESPONSE, 2L), + generateElement(9, Warehouse.DEFAULT_CATALOG_NAME, "db2", "tb", null, CompactionType.MINOR, TxnStore.SUCCEEDED_RESPONSE, 3L) ); ShowCompactResponse scr = new ShowCompactResponse(); @@ -514,7 +518,7 @@ public void testInitiatedAgeMetrics() { ShowCompactResponse scr = new ShowCompactResponse(); long start = System.currentTimeMillis() - 1000L; List elements = ImmutableList.of( - generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, start) + generateElement(15, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, start) ); scr.setCompacts(elements); @@ -533,7 +537,7 @@ public void testWorkingAgeMetrics() { long start = System.currentTimeMillis() - 1000L; List elements = ImmutableList.of( - generateElement(17, "db3", "tb7", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + generateElement(17, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb7", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, System.currentTimeMillis(), true, WORKER_VERSION, WORKER_VERSION, start) ); @@ -553,7 +557,7 @@ public void testCleaningAgeMetrics() { long start = System.currentTimeMillis() - 1000L; List elements = ImmutableList.of( - generateElement(19, "db3", "tb7", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, + generateElement(19, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb7", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, System.currentTimeMillis(), true, WORKER_VERSION, WORKER_VERSION, -1L, start) ); @@ -573,9 +577,9 @@ public void testInitiatedAgeMetricsOrder() { long start = System.currentTimeMillis(); List elements = ImmutableList.of( - generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, + generateElement(15, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, start - 1_000L), - generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, + generateElement(16, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb6", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, start - 15_000L) ); @@ -586,9 +590,9 @@ public void testInitiatedAgeMetricsOrder() { // Check the reverse order elements = ImmutableList.of( - generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, + generateElement(16, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb6", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, start - 25_000L), - generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, + generateElement(15, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.INITIATED_RESPONSE, start - 1_000L) ); scr.setCompacts(elements); @@ -604,9 +608,9 @@ public void testWorkingAgeMetricsOrder() { long start = System.currentTimeMillis(); List elements = ImmutableList.of( - generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + generateElement(15, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, start - 1_000L), - generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + generateElement(16, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, start - 15_000L) ); @@ -617,9 +621,9 @@ public void testWorkingAgeMetricsOrder() { // Check the reverse order elements = ImmutableList.of( - generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + generateElement(16, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, start - 25_000L), - generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, + generateElement(15, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, start - 1_000L) ); scr.setCompacts(elements); @@ -635,9 +639,9 @@ public void testCleaningAgeMetricsOrder() { long start = System.currentTimeMillis(); List elements = ImmutableList.of( - generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, + generateElement(15, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, -1L, start - 1_000L), - generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, + generateElement(16, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb6", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, -1L, start - 15_000L) ); @@ -648,9 +652,9 @@ public void testCleaningAgeMetricsOrder() { // Check the reverse order elements = ImmutableList.of( - generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, + generateElement(16, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb6", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, -1L, start - 25_000L), - generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, + generateElement(15, Warehouse.DEFAULT_CATALOG_NAME, "db3", "tb5", null, CompactionType.MINOR, TxnStore.CLEANING_RESPONSE, start, false, WORKER_VERSION, WORKER_VERSION, -1L, start - 1_000L) ); scr.setCompacts(elements); @@ -669,7 +673,7 @@ public void testDBMetrics() throws Exception { MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TXN_USE_MIN_HISTORY_LEVEL, false); TxnHandler.ConfVars.setUseMinHistoryLevel(false); long start = System.currentTimeMillis(); - burnThroughTransactions(t.getDbName(), t.getTableName(), 24, new HashSet<>(Arrays.asList(22L, 23L, 24L)), null); + burnThroughTransactions(t.getCatName(), t.getDbName(), t.getTableName(), 24, new HashSet<>(Arrays.asList(22L, 23L, 24L)), null); openTxn(TxnType.REPL_CREATED); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, t.getDbName()); @@ -722,7 +726,7 @@ public void testDBMetrics() throws Exception { Metrics.getOrCreateGauge(MetricsConstants.NUM_TXN_TO_WRITEID).intValue()); start = System.currentTimeMillis(); - burnThroughTransactions(dbName, tblName, 3, null, new HashSet<>(Arrays.asList(26L, 28L))); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 3, null, new HashSet<>(Arrays.asList(26L, 28L))); Thread.sleep(1000); runAcidMetricService(); @@ -743,13 +747,13 @@ public void testTxnHandlerCounters() throws Exception { String tblName = "txnhandlercounters"; Table t = newTable(dbName, tblName, false); - burnThroughTransactions(t.getDbName(), t.getTableName(), 3, null, new HashSet<>(Arrays.asList(2L, 3L))); + burnThroughTransactions(t.getCatName(), t.getDbName(), t.getTableName(), 3, null, new HashSet<>(Arrays.asList(2L, 3L))); Assert.assertEquals(MetricsConstants.TOTAL_NUM_ABORTED_TXNS + " value incorrect", 2, Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).getCount()); Assert.assertEquals(MetricsConstants.TOTAL_NUM_COMMITTED_TXNS + " value incorrect", 1, Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_COMMITTED_TXNS).getCount()); - burnThroughTransactions(t.getDbName(), t.getTableName(), 3, null, new HashSet<>(Collections.singletonList(4L))); + burnThroughTransactions(t.getCatName(), t.getDbName(), t.getTableName(), 3, null, new HashSet<>(Collections.singletonList(4L))); Assert.assertEquals(MetricsConstants.TOTAL_NUM_ABORTED_TXNS + " value incorrect", 3, Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).getCount()); Assert.assertEquals(MetricsConstants.TOTAL_NUM_COMMITTED_TXNS + " value incorrect", @@ -776,11 +780,11 @@ public void testTablesWithXAbortedTxns() throws Exception { LockRequest lockReq = new LockRequest(Lists.newArrayList(comp), "me", "localhost"); comp.setTablename(t1.getTableName()); - burnThroughTransactions(t1.getDbName(), t1.getTableName(), 20, null, abort1, lockReq); + burnThroughTransactions(t1.getCatName(), t1.getDbName(), t1.getTableName(), 20, null, abort1, lockReq); comp.setTablename(t2.getTableName()); - burnThroughTransactions(t2.getDbName(), t2.getTableName(), 20, null, abort2, lockReq); + burnThroughTransactions(t2.getCatName(), t2.getDbName(), t2.getTableName(), 20, null, abort2, lockReq); comp.setTablename(t3.getTableName()); - burnThroughTransactions(t3.getDbName(), t3.getTableName(), 30, null, abort3, lockReq); + burnThroughTransactions(t3.getCatName(), t3.getDbName(), t3.getTableName(), 30, null, abort3, lockReq); runAcidMetricService(); @@ -815,13 +819,13 @@ public void testPartTablesWithXAbortedTxns() throws Exception { comp.setPartitionname(String.format(partPattern, part1)); - burnThroughTransactions(t.getDbName(), t.getTableName(), 10, null, abort1, lockReq); + burnThroughTransactions(t.getCatName(), t.getDbName(), t.getTableName(), 10, null, abort1, lockReq); comp.setPartitionname(String.format(partPattern, part2)); - burnThroughTransactions(t.getDbName(), t.getTableName(), 10, null, abort2, lockReq); + burnThroughTransactions(t.getCatName(), t.getDbName(), t.getTableName(), 10, null, abort2, lockReq); comp.setPartitionname(String.format(partPattern, part3)); - burnThroughTransactions(t.getDbName(), t.getTableName(), 10, null, null, lockReq); + burnThroughTransactions(t.getCatName(), t.getDbName(), t.getTableName(), 10, null, null, lockReq); runAcidMetricService(); @@ -842,12 +846,12 @@ public void testWritesToDisabledCompactionTable() throws Exception { Map params = new HashMap<>(); params.put(hive_metastoreConstants.NO_AUTO_COMPACT, "true"); Table disabledTbl = newTable(dbName, "comp_disabled", false, params); - burnThroughTransactions(disabledTbl.getDbName(), disabledTbl.getTableName(), 1, null, null); - burnThroughTransactions(disabledTbl.getDbName(), disabledTbl.getTableName(), 1, null, new HashSet<>( + burnThroughTransactions(disabledTbl.getCatName(), disabledTbl.getDbName(), disabledTbl.getTableName(), 1, null, null); + burnThroughTransactions(disabledTbl.getCatName(), disabledTbl.getDbName(), disabledTbl.getTableName(), 1, null, new HashSet<>( Collections.singletonList(2L))); Table enabledTbl = newTable(dbName, "comp_enabled", false); - burnThroughTransactions(enabledTbl.getDbName(), enabledTbl.getTableName(), 1, null, null); + burnThroughTransactions(enabledTbl.getCatName(), enabledTbl.getDbName(), enabledTbl.getTableName(), 1, null, null); Assert.assertEquals(MetricsConstants.WRITES_TO_DISABLED_COMPACTION_TABLE + " value incorrect", 2, Metrics.getOrCreateGauge(MetricsConstants.WRITES_TO_DISABLED_COMPACTION_TABLE).intValue()); @@ -878,7 +882,7 @@ public void testInitiatorDurationMeasuredCorrectly() throws Exception { comp.setOperationType(DataOperationType.UPDATE); components.add(comp); } - burnThroughTransactions(DEFAULT_DB, TABLE_NAME, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, DEFAULT_DB, TABLE_NAME, 25); long txnId = openTxn(); @@ -887,7 +891,7 @@ public void testInitiatorDurationMeasuredCorrectly() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - allocateWriteId(DEFAULT_DB, TABLE_NAME, txnId); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, DEFAULT_DB, TABLE_NAME, txnId); txnHandler.commitTxn(new CommitTxnRequest(txnId)); long initiatorStart = System.currentTimeMillis(); @@ -912,7 +916,7 @@ public void testCleanerDurationMeasuredCorrectly() throws Exception { addBaseFile(table, partition, 20L, 20); addDeltaFile(table, partition, 21L, 22L, 2); addDeltaFile(table, partition, 23L, 24L, 2); - burnThroughTransactions(DB_NAME, TABLE_NAME, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, DB_NAME, TABLE_NAME, 25); doCompaction(DB_NAME, TABLE_NAME, PARTITION_NAME, CompactionType.MINOR); long cleanerStart = System.currentTimeMillis(); @@ -962,7 +966,7 @@ public void testInitiatorFailuresCountedCorrectly() throws Exception { components.add(comp); } - burnThroughTransactions(DEFAULT_DB, tableName, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, DEFAULT_DB, tableName, 25); long txnid = openTxn(); @@ -971,7 +975,7 @@ public void testInitiatorFailuresCountedCorrectly() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - long writeid = allocateWriteId(DEFAULT_DB, tableName, txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, DEFAULT_DB, tableName, txnid); Assert.assertEquals(26, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); } @@ -1018,9 +1022,10 @@ public void testCleanerFailuresCountedCorrectly() throws Exception { addDeltaFile(table, p, 21L, 24L, 4); } - burnThroughTransactions(DEFAULT_DB, tableName, 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, DEFAULT_DB, tableName, 25); for (int i = 0; i < partitionCount; i++) { CompactionRequest rqst = new CompactionRequest(DEFAULT_DB, tableName, CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=" + PARTITION_NAME + i); compactInTxn(rqst); } @@ -1038,33 +1043,34 @@ public void testCleanerFailuresCountedCorrectly() throws Exception { .getCount()); } - private ShowCompactResponseElement generateElement(long id, String db, String table, String partition, + private ShowCompactResponseElement generateElement(long id, String cat, String db, String table, String partition, CompactionType type, String state) { - return generateElement(id, db, table, partition, type, state, System.currentTimeMillis()); + return generateElement(id, cat, db, table, partition, type, state, System.currentTimeMillis()); } - private ShowCompactResponseElement generateElement(long id, String db, String table, String partition, + private ShowCompactResponseElement generateElement(long id, String cat, String db, String table, String partition, CompactionType type, String state, long enqueueTime) { - return generateElement(id, db, table, partition, type, state, enqueueTime, false); + return generateElement(id, cat, db, table, partition, type, state, enqueueTime, false); } - private ShowCompactResponseElement generateElement(long id, String db, String table, String partition, + private ShowCompactResponseElement generateElement(long id, String cat, String db, String table, String partition, CompactionType type, String state, long enqueueTime, boolean manuallyInitiatedCompaction) { - return generateElement(id, db, table, partition, type, state, enqueueTime, manuallyInitiatedCompaction, + return generateElement(id, cat, db, table, partition, type, state, enqueueTime, manuallyInitiatedCompaction, null, null, -1); } - private ShowCompactResponseElement generateElement(long id, String db, String table, String partition, + private ShowCompactResponseElement generateElement(long id, String cat, String db, String table, String partition, CompactionType type, String state, long enqueueTime, boolean manuallyInitiatedCompaction, String initiatorVersion, String workerVersion, long startTime) { - return generateElement(id, db, table, partition, type, state, enqueueTime, manuallyInitiatedCompaction, + return generateElement(id, cat, db, table, partition, type, state, enqueueTime, manuallyInitiatedCompaction, initiatorVersion, workerVersion, startTime, -1L); } - private ShowCompactResponseElement generateElement(long id, String db, String table, String partition, + private ShowCompactResponseElement generateElement(long id, String cat, String db, String table, String partition, CompactionType type, String state, long enqueueTime, boolean manuallyInitiatedCompaction, String initiatorVersion, String workerVersion, long startTime, long cleanerStartTime) { ShowCompactResponseElement element = new ShowCompactResponseElement(db, table, type, state); + element.setCatName(cat); element.setId(id); element.setPartitionname(partition); element.setEnqueueTime(enqueueTime); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java index 28ec7750e9cd..d19f8a053831 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java @@ -121,7 +121,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { components.add(createLockComponent(dbName, tblName, partName)); - burnThroughTransactions(dbName, tblName, 23); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 23); long txnId = openTxn(); LockRequest req = new LockRequest(components, "me", "localhost"); @@ -129,7 +129,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - long writeId = allocateWriteId(dbName, tblName, txnId); + long writeId = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, txnId); Assert.assertEquals(24, writeId); txnHandler.commitTxn(new CommitTxnRequest(txnId)); @@ -169,7 +169,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { addDeltaFile(t, p, 27L, 28L, 20); addDeltaFile(t, p, 29L, 30L, 2); - burnThroughTransactions(dbName, tblName, 30); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 30); txnId = openTxn(); req = new LockRequest(components, "me", "localhost"); @@ -177,7 +177,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - writeId = allocateWriteId(dbName, tblName, txnId); + writeId = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, txnId); Assert.assertEquals(55, writeId); txnHandler.commitTxn(new CommitTxnRequest(txnId)); // Change these params to initiate MINOR compaction @@ -277,7 +277,7 @@ public void testDeltaFileMetricMultiPartitionedTable() throws Exception { components.add(createLockComponent(dbName, tblName, part2Name)); components.add(createLockComponent(dbName, tblName, part3Name)); - burnThroughTransactions(dbName, tblName, 19); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 19); long txnId = openTxn(); LockRequest req = new LockRequest(components, "me", "localhost"); @@ -285,7 +285,7 @@ public void testDeltaFileMetricMultiPartitionedTable() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - allocateWriteId(dbName, tblName, txnId); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, txnId); txnHandler.commitTxn(new CommitTxnRequest(txnId)); HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 2); @@ -360,7 +360,7 @@ public void testDeltaFileMetricUnpartitionedTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 20); components.add(createLockComponent(dbName, tblName, null)); - burnThroughTransactions(dbName, tblName, 24); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, 24); long txnId = openTxn(); LockRequest req = new LockRequest(components, "me", "localhost"); @@ -368,7 +368,7 @@ public void testDeltaFileMetricUnpartitionedTable() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - long writeId = allocateWriteId(dbName, tblName, txnId); + long writeId = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, txnId); Assert.assertEquals(25, writeId); txnHandler.commitTxn(new CommitTxnRequest(txnId)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java index 1b2d6e1701a3..e386a788fa0d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.common.ServerUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; @@ -76,10 +77,12 @@ public void nothing() throws Exception { public void recoverFailedWorkers() throws Exception { Table t = newTable("default", "rflw1", false); CompactionRequest rqst = new CompactionRequest("default", "rflw1", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); t = newTable("default", "rflw2", false); rqst = new CompactionRequest("default", "rflw2", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); txnHandler.findNextToCompact(aFindNextCompactRequest(ServerUtils.hostname() + "-193892", WORKER_VERSION)); @@ -320,6 +323,7 @@ public void noCompactWhenCompactAlreadyScheduled() throws Exception { } CompactionRequest rqst = new CompactionRequest("default", "ncwcas", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); @@ -346,7 +350,7 @@ public void compactTableHighDeltaPct() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "cthdp", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cthdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -357,7 +361,7 @@ public void compactTableHighDeltaPct() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "cthdp", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cthdp", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -380,7 +384,7 @@ public void compactPartitionHighDeltaPct() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "cphdp", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cphdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -392,7 +396,7 @@ public void compactPartitionHighDeltaPct() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "cphdp", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cphdp", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -416,7 +420,7 @@ public void compactCamelCasePartitionValue() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "test_table", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "test_table", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -428,7 +432,7 @@ public void compactCamelCasePartitionValue() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "test_table", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "test_table", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -451,7 +455,7 @@ public void noCompactTableDeltaPctNotHighEnough() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "nctdpnhe", 53); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctdpnhe", 53); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -462,7 +466,7 @@ public void noCompactTableDeltaPctNotHighEnough() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "nctdpnhe", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctdpnhe", txnid); Assert.assertEquals(54, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -489,7 +493,7 @@ public void compactTableTooManyDeltas() throws Exception { addDeltaFile(t, null, 210L, 210L, 1); addDeltaFile(t, null, 211L, 211L, 1); - burnThroughTransactions("default", "cttmd", 210); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cttmd", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -500,7 +504,7 @@ public void compactTableTooManyDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "cttmd", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cttmd", txnid); Assert.assertEquals(211, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -532,7 +536,7 @@ public void compactPartitionTooManyDeltas() throws Exception { addDeltaFile(t, p, 210L, 210L, 1); addDeltaFile(t, p, 211L, 211L, 1); - burnThroughTransactions("default", "cptmd", 210); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cptmd", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -544,7 +548,7 @@ public void compactPartitionTooManyDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "cptmd", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cptmd", txnid); Assert.assertEquals(211, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -567,7 +571,7 @@ public void noCompactTableNotEnoughDeltas() throws Exception { addDeltaFile(t, null, 201L, 205L, 5); addDeltaFile(t, null, 206L, 211L, 6); - burnThroughTransactions("default", "nctned", 210); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctned", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -578,7 +582,7 @@ public void noCompactTableNotEnoughDeltas() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "nctned", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctned", txnid); Assert.assertEquals(211, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -605,7 +609,7 @@ public void chooseMajorOverMinorWhenBothValid() throws Exception { addDeltaFile(t, null, 300L, 310L, 11); addDeltaFile(t, null, 311L, 321L, 11); - burnThroughTransactions("default", "cmomwbv", 320); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cmomwbv", 320); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -616,7 +620,7 @@ public void chooseMajorOverMinorWhenBothValid() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "cmomwbv", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "cmomwbv", txnid); Assert.assertEquals(321, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -647,7 +651,7 @@ public void enoughDeltasNoBase() throws Exception { addDeltaFile(t, p, 210L, 210L, 1); addDeltaFile(t, p, 211L, 211L, 1); - burnThroughTransactions("default", "ednb", 210); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "ednb", 210); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -659,7 +663,7 @@ public void enoughDeltasNoBase() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "ednb", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "ednb", txnid); Assert.assertEquals(211, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -683,7 +687,7 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "ttospgocr", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "ttospgocr", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -695,7 +699,7 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "ttospgocr", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "ttospgocr", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -709,7 +713,7 @@ public void twoTxnsOnSamePartitionGenerateOneCompactionRequest() throws Exceptio req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); res = txnHandler.lock(req); - writeid = allocateWriteId("default", "ttospgocr", txnid); + writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "ttospgocr", txnid); Assert.assertEquals(25, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -733,7 +737,7 @@ public void noCompactTableDynamicPartitioning() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "nctdp", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctdp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -744,7 +748,7 @@ public void noCompactTableDynamicPartitioning() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "nctdp", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctdp", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -763,7 +767,7 @@ public void dropTable() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "dt", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "dt", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -774,7 +778,7 @@ public void dropTable() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "dt", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "dt", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -796,7 +800,7 @@ public void dropPartition() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "dp", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "dp", 23); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default"); @@ -808,7 +812,7 @@ public void dropPartition() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "dp", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "dp", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -838,7 +842,7 @@ public void processCompactionCandidatesInParallel() throws Exception { comp.setOperationType(DataOperationType.UPDATE); components.add(comp); } - burnThroughTransactions("default", "dp", 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "dp", 23); long txnid = openTxn(); LockRequest req = new LockRequest(components, "me", "localhost"); @@ -846,7 +850,7 @@ public void processCompactionCandidatesInParallel() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - long writeid = allocateWriteId("default", "dp", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "dp", txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); @@ -866,7 +870,7 @@ public void compactTableWithMultipleBase() throws Exception { addBaseFile(t, null, 50L, 50); addBaseFile(t, null, 100L, 50); - burnThroughTransactions("default", "nctdpnhe", 102); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctdpnhe", 102); long txnid = openTxn(); LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); @@ -877,7 +881,7 @@ public void compactTableWithMultipleBase() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); req.setTxnid(txnid); LockResponse res = txnHandler.lock(req); - long writeid = allocateWriteId("default", "nctdpnhe", txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, "default", "nctdpnhe", txnid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); startInitiator(); @@ -1089,7 +1093,7 @@ public void testMetaCache() throws Exception { comp.setOperationType(DataOperationType.UPDATE); components.add(comp); } - burnThroughTransactions(dbname, tableName, 23); + allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbname, tableName, 23); long txnid = openTxn(); LockRequest req = new LockRequest(components, "me", "localhost"); @@ -1097,7 +1101,7 @@ public void testMetaCache() throws Exception { LockResponse res = txnHandler.lock(req); Assert.assertEquals(LockState.ACQUIRED, res.getState()); - long writeid = allocateWriteId(dbname, tableName, txnid); + long writeid = allocateWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbname, tableName, txnid); Assert.assertEquals(24, writeid); txnHandler.commitTxn(new CommitTxnRequest(txnid)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java index e775b4567155..020501064004 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java @@ -53,7 +53,8 @@ void testCreateBaseJobConfHasCorrectJobQueue(ConfSetup input) { Table tbl = createPersonTable(); tbl.setParameters(input.tableProperties); MRCompactor compactor = new MRCompactor(null); - CompactionInfo ci = new CompactionInfo(tbl.getDbName(), tbl.getTableName(), null, CompactionType.MAJOR); + CompactionInfo ci = new CompactionInfo(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), null, + CompactionType.MAJOR); ci.properties = new StringableMap(input.compactionProperties).toString(); HiveConf conf = new HiveConf(); input.confProperties.forEach(conf::set); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java index bf01034711e3..e9061b23b6ab 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest; @@ -254,9 +255,10 @@ public void sortedTable() throws Exception { addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); - burnThroughTransactions("default", "st", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "st", 25); CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); startWorker(); @@ -280,9 +282,10 @@ public void sortedPartition() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); - burnThroughTransactions("default", "sp", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "sp", 25); CompactionRequest rqst = new CompactionRequest("default", "sp", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); @@ -303,9 +306,10 @@ public void minorTableWithBase() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "mtwb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); startWorker();//adds delta and delete_delta @@ -362,9 +366,10 @@ public void minorWithOpenInMiddle() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions("default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); startWorker(); @@ -398,9 +403,10 @@ public void minorWithAborted() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions("default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(rqst); startWorker(); @@ -434,7 +440,7 @@ public void minorPartitionWithBase() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "mpwb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mpwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mpwb", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -487,7 +493,7 @@ public void minorTableNoBase() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions("default", "mtnb", 5); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtnb", 5); CompactionRequest rqst = new CompactionRequest("default", "mtnb", CompactionType.MINOR); txnHandler.compact(rqst); @@ -540,7 +546,7 @@ public void majorTableWithBase() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "matwb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "matwb", 25); CompactionRequest rqst = new CompactionRequest("default", "matwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -616,7 +622,7 @@ private void compactNoBaseLotsOfDeltas(CompactionType type) throws Exception { * and then the 'requested' * minor compaction to combine delta_21_23, delta_25_33 and delta_35_35 to make delta_21_35 * or major compaction to create base_35*/ - burnThroughTransactions("default", "mapwb", 35); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mapwb", 35); CompactionRequest rqst = new CompactionRequest("default", "mapwb", type); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); @@ -677,7 +683,7 @@ public void majorPartitionWithBase() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "mapwb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mapwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mapwb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -721,7 +727,7 @@ public void majorTableNoBase() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions("default", "matnb", 4); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "matnb", 4); CompactionRequest rqst = new CompactionRequest("default", "matnb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -776,7 +782,7 @@ public void majorTableLegacy() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "matl", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "matl", 25); CompactionRequest rqst = new CompactionRequest("default", "matl", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -820,7 +826,7 @@ public void minorTableLegacy() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "mtl", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtl", 25); CompactionRequest rqst = new CompactionRequest("default", "mtl", CompactionType.MINOR); txnHandler.compact(rqst); @@ -863,7 +869,7 @@ public void majorPartitionWithBaseMissingBuckets() throws Exception { addDeltaFile(t, p, 21L, 22L, 2, 2, false); addDeltaFile(t, p, 23L, 26L, 4); - burnThroughTransactions("default", "mapwbmb", 27); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mapwbmb", 27); CompactionRequest rqst = new CompactionRequest("default", "mapwbmb", CompactionType.MAJOR); rqst.setPartitionname("ds=today"); @@ -917,7 +923,7 @@ public void majorWithOpenInMiddle() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions("default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtwb", 27, new HashSet(Arrays.asList(23L)), null); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -953,7 +959,7 @@ public void majorWithAborted() throws Exception { addDeltaFile(t, null, 23L, 25L, 3); addLengthFile(t, null, 23L, 25L, 3); addDeltaFile(t, null, 26L, 27L, 2); - burnThroughTransactions("default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtwb", 27, null, new HashSet(Arrays.asList(24L, 25L))); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -992,7 +998,7 @@ public void testWorkerAndInitiatorVersion() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "mtwb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtwb", 25); CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR); String initiatorVersion = "INITIATOR_VERSION"; @@ -1037,7 +1043,7 @@ public void testDoesNotGatherStatsIfCompactionFails() throws Exception { addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "mtwb", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "mtwb", 25); txnHandler.compact(new CompactionRequest("default", "mtwb", CompactionType.MINOR)); @@ -1060,7 +1066,7 @@ public void droppedTable() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions("default", "dt", 4); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dt", 4); CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MAJOR); txnHandler.compact(rqst); @@ -1084,7 +1090,7 @@ public void droppedPartition() throws Exception { addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); - burnThroughTransactions("default", "dp", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "dp", 25); CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); @@ -1106,7 +1112,7 @@ public void oneDeltaWithAbortedTxn() throws Exception { addDeltaFile(t, null, 0, 2L, 3); Set aborted = new HashSet<>(); aborted.add(1L); - burnThroughTransactions("default", "delta1", 3, null, aborted); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "delta1", 3, null, aborted); // MR verifyTxn1IsAborted(0, t, CompactionType.MAJOR); @@ -1124,7 +1130,7 @@ public void oneDeltaWithAbortedTxn() throws Exception { TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY); Table mm = newTable("default", "delta1", false, parameters); addDeltaFile(mm, null, 0, 2L, 3); - burnThroughTransactions("default", "delta1", 3, null, aborted); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "delta1", 3, null, aborted); verifyTxn1IsAborted(0, t, CompactionType.MAJOR); verifyTxn1IsAborted(1, t, CompactionType.MINOR); } @@ -1138,7 +1144,7 @@ public void insertOnlyDisabled() throws Exception { addDeltaFile(t, null, 1L, 2L, 2); addDeltaFile(t, null, 3L, 4L, 2); - burnThroughTransactions("default", "iod", 5); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "iod", 5); conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_COMPACT_MM, false); CompactionRequest rqst = new CompactionRequest("default", "iod", CompactionType.MINOR); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java index d6f79214a450..2755bc713bc4 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ReplChangeManager; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionType; @@ -214,6 +215,7 @@ public void testCleaningOfAbortedDirectoriesOnTopOfBase() throws Exception { addDeltaFileWithTxnComponents(t, null, 2, false); CompactionRequest cr = new CompactionRequest(dbName, tableName, CompactionType.MAJOR); + cr.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(cr); // Run compaction @@ -261,6 +263,7 @@ public void testCleaningOfAbortedDirectoriesBelowBase() throws Exception { addDeltaFileWithTxnComponents(t, null, 2, false); CompactionRequest cr = new CompactionRequest(dbName, tableName, CompactionType.MAJOR); + cr.setCatName(Warehouse.DEFAULT_CATALOG_NAME); txnHandler.compact(cr); // Run compaction @@ -357,7 +360,7 @@ public void testAbortCleanupNotUpdatingSpecificCompactionTables(boolean isPartit runInitiator(conf); // Initiator must not add anything to compaction_queue String compactionQueuePresence = "SELECT COUNT(*) FROM \"COMPACTION_QUEUE\" " + - " WHERE \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + + " WHERE \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL"); assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, compactionQueuePresence)); @@ -372,10 +375,10 @@ public void testAbortCleanupNotUpdatingSpecificCompactionTables(boolean isPartit assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, compactionQueuePresence)); assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "SELECT COUNT(*) FROM \"COMPLETED_COMPACTIONS\" " + - " WHERE \"CC_DATABASE\" = '" + dbName+ "' AND \"CC_TABLE\" = '" + tableName + "' AND \"CC_PARTITION\"" + + " WHERE \"CC_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CC_DATABASE\" = '" + dbName+ "' AND \"CC_TABLE\" = '" + tableName + "' AND \"CC_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL"))); assertEquals(1, TestTxnDbUtil.countQueryAgent(conf, "SELECT COUNT(*) FROM \"COMPLETED_TXN_COMPONENTS\" " + - " WHERE \"CTC_DATABASE\" = '" + dbName+ "' AND \"CTC_TABLE\" = '" + tableName + "' AND \"CTC_PARTITION\"" + + " WHERE \"CTC_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CTC_DATABASE\" = '" + dbName+ "' AND \"CTC_TABLE\" = '" + tableName + "' AND \"CTC_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL"))); List directories = getDirectories(conf, t, null); @@ -417,11 +420,11 @@ public void testRetryEntryOnFailures(boolean isPartitioned) throws Exception { ShowCompactResponse scr = txnHandler.showCompact(new ShowCompactRequest()); assertEquals(1, scr.getCompactsSize()); ShowCompactResponseElement scre = scr.getCompacts().getFirst(); - assertTrue(scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) + assertTrue(scre.getCatName().equals(Warehouse.DEFAULT_CATALOG_NAME) && scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) && (isPartitioned ? scre.getPartitionname().equals("ds=" + partName) : scre.getPartitionname() == null) && "ready for cleaning".equalsIgnoreCase(scre.getState()) && scre.getType() == CompactionType.ABORT_TXN_CLEANUP && scre.getErrorMessage().equalsIgnoreCase("Testing retry")); - String whereClause = " WHERE \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + + String whereClause = " WHERE \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL") + " AND \"CQ_TYPE\" = 'c' AND \"CQ_STATE\" = 'r'"; String retryRetentionQuery = "SELECT \"CQ_RETRY_RETENTION\" FROM \"COMPACTION_QUEUE\" " + whereClause; assertEquals(Long.toString(MetastoreConf.getTimeVar(conf, ConfVars.HIVE_COMPACTOR_CLEANER_RETRY_RETENTION_TIME, TimeUnit.MILLISECONDS)), @@ -465,11 +468,11 @@ public void testRetryInfoBeingUsed(boolean isPartitioned) throws Exception { ShowCompactResponse scr = txnHandler.showCompact(new ShowCompactRequest()); assertEquals(1, scr.getCompactsSize()); ShowCompactResponseElement scre = scr.getCompacts().getFirst(); - assertTrue(scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) + assertTrue(scre.getCatName().equals(Warehouse.DEFAULT_CATALOG_NAME) && scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) && (isPartitioned ? scre.getPartitionname().equals("ds=" + partName) : scre.getPartitionname() == null) && "ready for cleaning".equalsIgnoreCase(scre.getState()) && scre.getType() == CompactionType.ABORT_TXN_CLEANUP && scre.getErrorMessage().equalsIgnoreCase("Testing retry")); - String whereClause = " WHERE \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + + String whereClause = " WHERE \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL") + " AND \"CQ_TYPE\" = 'c' AND \"CQ_STATE\" = 'r'"; String retryRetentionQuery = "SELECT \"CQ_RETRY_RETENTION\" FROM \"COMPACTION_QUEUE\" " + whereClause; assertEquals(Long.toString(retryRetentionTime), TestTxnDbUtil.queryToString(conf, retryRetentionQuery, false) @@ -522,11 +525,11 @@ public void testRetryWithinRetentionTime(boolean isPartitioned) throws Exception ShowCompactResponse scr = txnHandler.showCompact(new ShowCompactRequest()); assertEquals(1, scr.getCompactsSize()); ShowCompactResponseElement scre = scr.getCompacts().getFirst(); - assertTrue(scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) + assertTrue(scre.getCatName().equals(Warehouse.DEFAULT_CATALOG_NAME) && scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) && (isPartitioned ? scre.getPartitionname().equals("ds=" + partName) : scre.getPartitionname() == null) && "ready for cleaning".equalsIgnoreCase(scre.getState()) && scre.getType() == CompactionType.ABORT_TXN_CLEANUP && scre.getErrorMessage().equalsIgnoreCase("Testing retry")); - String whereClause = " WHERE \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + + String whereClause = " WHERE \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL") + " AND \"CQ_TYPE\" = 'c' AND \"CQ_STATE\" = 'r'"; String retryRetentionQuery = "SELECT \"CQ_RETRY_RETENTION\" FROM \"COMPACTION_QUEUE\" " + whereClause; assertEquals(Long.toString(MetastoreConf.getTimeVar(conf, ConfVars.HIVE_COMPACTOR_CLEANER_RETRY_RETENTION_TIME, TimeUnit.MILLISECONDS)), @@ -590,11 +593,11 @@ public void testRetryUpdateRetentionTimeWhenFailedTwice(boolean isPartitioned) t ShowCompactResponse scr = txnHandler.showCompact(new ShowCompactRequest()); assertEquals(1, scr.getCompactsSize()); ShowCompactResponseElement scre = scr.getCompacts().getFirst(); - assertTrue(scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) + assertTrue(scre.getCatName().equals(Warehouse.DEFAULT_CATALOG_NAME) && scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) && (isPartitioned ? scre.getPartitionname().equals("ds=" + partName) : scre.getPartitionname() == null) && "ready for cleaning".equalsIgnoreCase(scre.getState()) && scre.getType() == CompactionType.ABORT_TXN_CLEANUP && scre.getErrorMessage().equalsIgnoreCase("Testing retry")); - String whereClause = " WHERE \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + + String whereClause = " WHERE \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL") + " AND \"CQ_TYPE\" = 'c' AND \"CQ_STATE\" = 'r'"; String retryRetentionQuery = "SELECT \"CQ_RETRY_RETENTION\" FROM \"COMPACTION_QUEUE\" " + whereClause; assertEquals(Long.toString(retryRetentionTime), TestTxnDbUtil.queryToString(conf, retryRetentionQuery, false) @@ -657,11 +660,11 @@ public void testRetryUpdateErrorMessageWhenFailedTwice(boolean isPartitioned) th ShowCompactResponse scr = txnHandler.showCompact(new ShowCompactRequest()); assertEquals(1, scr.getCompactsSize()); ShowCompactResponseElement scre = scr.getCompacts().getFirst(); - assertTrue(scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) + assertTrue(scre.getCatName().equals(Warehouse.DEFAULT_CATALOG_NAME) && scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) && (isPartitioned ? scre.getPartitionname().equals("ds=" + partName) : scre.getPartitionname() == null) && "ready for cleaning".equalsIgnoreCase(scre.getState()) && scre.getType() == CompactionType.ABORT_TXN_CLEANUP && scre.getErrorMessage().equalsIgnoreCase("Testing first retry")); - String whereClause = " WHERE \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + + String whereClause = " WHERE \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL") + " AND \"CQ_TYPE\" = 'c' AND \"CQ_STATE\" = 'r'"; String retryRetentionQuery = "SELECT \"CQ_RETRY_RETENTION\" FROM \"COMPACTION_QUEUE\" " + whereClause; assertEquals(Long.toString(retryRetentionTime), TestTxnDbUtil.queryToString(conf, retryRetentionQuery, false) @@ -725,11 +728,11 @@ public void testZeroRetryRetentionTimeForAbortCleanup(boolean isPartitioned) thr ShowCompactResponse scr = txnHandler.showCompact(new ShowCompactRequest()); assertEquals(1, scr.getCompactsSize()); ShowCompactResponseElement scre = scr.getCompacts().getFirst(); - assertTrue(scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) + assertTrue(scre.getCatName().equals(Warehouse.DEFAULT_CATALOG_NAME) && scre.getDbname().equals(dbName) && scre.getTablename().equals(tableName) && (isPartitioned ? scre.getPartitionname().equals("ds=" + partName) : scre.getPartitionname() == null) && "ready for cleaning".equalsIgnoreCase(scre.getState()) && scre.getType() == CompactionType.ABORT_TXN_CLEANUP && scre.getErrorMessage().equalsIgnoreCase("Testing retry")); - String whereClause = " WHERE \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + + String whereClause = " WHERE \"CQ_CATALOG\" = '" + Warehouse.DEFAULT_CATALOG_NAME + "' AND \"CQ_DATABASE\" = '" + dbName+ "' AND \"CQ_TABLE\" = '" + tableName + "' AND \"CQ_PARTITION\"" + (isPartitioned ? " = 'ds=" + partName + "'" : " IS NULL") + " AND \"CQ_TYPE\" = 'c' AND \"CQ_STATE\" = 'r'"; String retryRetentionQuery = "SELECT \"CQ_RETRY_RETENTION\" FROM \"COMPACTION_QUEUE\" " + whereClause; assertEquals(Integer.toString(0), TestTxnDbUtil.queryToString(conf, retryRetentionQuery, false) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestHandler.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestHandler.java index 4c482914a76f..bca74ca3be03 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestHandler.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ReplChangeManager; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionType; @@ -55,9 +56,10 @@ public void testCompactionHandlerAndFsRemover() throws Exception { addDeltaFile(t, p, 23L, 24L, 2); addBaseFile(t, p, 25L, 25); - burnThroughTransactions(t.getDbName(), t.getTableName(), 25); + burnThroughTransactions(t.getCatName(), t.getDbName(), t.getTableName(), 25); CompactionRequest rqst = new CompactionRequest(t.getDbName(), t.getTableName(), CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setPartitionname("ds=today"); compactInTxn(rqst); MetadataCache metadataCache = new MetadataCache(true); @@ -84,9 +86,10 @@ public void testMetaCache() throws Exception { addBaseFile(t, null, 20L, 20); addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); - burnThroughTransactions("default", "retry_test", 25); + burnThroughTransactions(Warehouse.DEFAULT_CATALOG_NAME, "default", "retry_test", 25); CompactionRequest rqst = new CompactionRequest("default", "retry_test", CompactionType.MAJOR); + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); long compactTxn = compactInTxn(rqst); addBaseFile(t, null, 25L, 25, compactTxn); diff --git a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 7b03f7f096e8..6387b5e9f6da 100644 --- a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -864,6 +864,7 @@ default Materialization getMaterializationInvalidationInfo(CreationMetadata cm, /** * Updates the creation metadata for the materialized view. */ + @Deprecated default void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) throws TException { throw new UnsupportedOperationException("MetaStore client does not support updating creation metadata"); @@ -3638,6 +3639,7 @@ default void abortTxns(AbortTxnsRequest abortTxnsRequest) throws TException { * @param tableName table to which the write ID to be allocated * @throws TException */ + @Deprecated default long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs"); } @@ -3650,11 +3652,38 @@ default long allocateTableWriteId(long txnId, String dbName, String tableName) t * @param reallocate should we reallocate already mapped writeId (if true) or reuse (if false) * @throws TException */ + @Deprecated default long allocateTableWriteId(long txnId, String dbName, String tableName, boolean reallocate) throws TException { throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs " + "with reallocate option"); } + /** + * Allocate a per table write ID and associate it with the given transaction. + * @param txnId id of transaction to which the allocated write ID to be associated. + * @param catName name of catalog in which the table belongs. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @throws TException + */ + default long allocateTableWriteId(long txnId, String catName, String dbName, String tableName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs"); + } + + /** + * Allocate a per table write ID and associate it with the given transaction. + * @param txnId id of transaction to which the allocated write ID to be associated. + * @param catName name of catalog in which the table belongs. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @param reallocate should we reallocate already mapped writeId (if true) or reuse (if false) + * @throws TException + */ + default long allocateTableWriteId(long txnId, String catName, String dbName, String tableName, boolean reallocate) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs " + + "with reallocate option"); + } + /** * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark. * @param validWriteIdList Snapshot of writeid list when the table/partition is dumped. @@ -3663,11 +3692,27 @@ default long allocateTableWriteId(long txnId, String dbName, String tableName, b * @param partNames List of partitions being written. * @throws TException in case of failure to replicate the writeid state */ + @Deprecated default void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) throws TException { throw new UnsupportedOperationException("MetaStore client does not support replicating table write IDs state"); } + /** + * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark. + * @param validWriteIdList Snapshot of writeid list when the table/partition is dumped. + * @param catName Catalog name + * @param dbName Database name + * @param tableName Table which is written. + * @param partNames List of partitions being written. + * @throws TException in case of failure to replicate the writeid state + */ + default void replTableWriteIdState(String validWriteIdList, String catName, String dbName, String tableName, + List partNames) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support replicating table write IDs state"); + } + /** * Allocate a per table write ID and associate it with the given transaction. * @param txnIds ids of transaction batchto which the allocated write ID to be associated. @@ -3675,11 +3720,26 @@ default void replTableWriteIdState(String validWriteIdList, String dbName, Strin * @param tableName table to which the write ID to be allocated * @throws TException */ + @Deprecated default List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) throws TException { throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs in batch"); } + /** + * Allocate a per table write ID and associate it with the given transaction. + * @param txnIds ids of transaction batchto which the allocated write ID to be associated. + * @param catName name of catalog in which the table belongs. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @throws TException + */ + default List allocateTableWriteIdsBatch(List txnIds, String catName, String dbName, + String tableName) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support allocating table write IDs in batch"); + } + /** * Allocate a per table write ID and associate it with the given transaction. Used by replication load task. * @param dbName name of DB in which the table belongs. @@ -3688,12 +3748,27 @@ default List allocateTableWriteIdsBatch(List txnIds, String * @param srcTxnToWriteIdList List of txn to write id map sent from the source cluster. * @throws TException */ + @Deprecated default List replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, List srcTxnToWriteIdList) throws TException { throw new UnsupportedOperationException("MetaStore client does not support replicating allocating table write " + "IDs in batch"); } + /** + * Allocate a per table write ID and associate it with the given transaction. Used by replication load task. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the write ID to be allocated + * @param replPolicy Used by replication task to identify the source cluster. + * @param srcTxnToWriteIdList List of txn to write id map sent from the source cluster. + * @throws TException + */ + default List replAllocateTableWriteIdsBatch(String catName, String dbName, String tableName, String replPolicy, + List srcTxnToWriteIdList) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support replicating allocating table write " + + "IDs in batch"); + } + /** * Get the maximum allocated writeId for the given table * @param dbName name of DB in which the table belongs. @@ -3701,10 +3776,23 @@ default List replAllocateTableWriteIdsBatch(String dbName, String * @return the maximum allocated writeId * @throws TException */ + @Deprecated default long getMaxAllocatedWriteId(String dbName, String tableName) throws TException { throw new UnsupportedOperationException("MetaStore client does not support getting maximum allocated write IDs"); } + /** + * Get the maximum allocated writeId for the given table + * @param catName name of catalog in which the table belongs. + * @param dbName name of DB in which the table belongs. + * @param tableName table from which the writeId is queried + * @return the maximum allocated writeId + * @throws TException + */ + default long getMaxAllocatedWriteId(String catName, String dbName, String tableName) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support getting maximum allocated write IDs"); + } + /** * Seed an ACID table with the given writeId. If the table already contains writes it will fail. * @param dbName name of DB in which the table belongs. @@ -3712,10 +3800,23 @@ default long getMaxAllocatedWriteId(String dbName, String tableName) throws TExc * @param seedWriteId the start value of writeId * @throws TException */ + @Deprecated default void seedWriteId(String dbName, String tableName, long seedWriteId) throws TException { throw new UnsupportedOperationException("MetaStore client does not support seeding write IDs"); } + /** + * Seed an ACID table with the given writeId. If the table already contains writes it will fail. + * @param catName name of catalog in which the table belongs. + * @param dbName name of DB in which the table belongs. + * @param tableName table to which the writeId will be set + * @param seedWriteId the start value of writeId + * @throws TException + */ + default void seedWriteId(String catName, String dbName, String tableName, long seedWriteId) throws TException { + throw new UnsupportedOperationException("MetaStore client does not support seeding write IDs"); + } + /** * Seed or increment the global txnId to the given value. * If the actual txnId is greater or equal than the seed value, it wil fail @@ -3936,12 +4037,30 @@ default GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInf * @param partNames partition name, as constructed by Warehouse.makePartName * @throws TException */ + @Deprecated default void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, List partNames, DataOperationType operationType) throws TException { throw new UnsupportedOperationException("MetaStore client does not support adding dynamic partitions"); } + /** + * Send a list of partitions to the metastore to indicate which partitions were loaded + * dynamically. + * @param txnId id of the transaction + * @param writeId table write id for this txn + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partNames partition name, as constructed by Warehouse.makePartName + * @throws TException + */ + default void addDynamicPartitions(long txnId, long writeId, String catName, String dbName, String tableName, + List partNames, DataOperationType operationType) + throws TException { + throw new UnsupportedOperationException("MetaStore client does not support adding dynamic partitions"); + } + /** * Performs the commit/rollback to the metadata storage for insert operator from external storage handler. * @param table table name diff --git a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java index 78c20bddace2..4c2d62adcc3a 100644 --- a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java +++ b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.client.utils.HiveMetaStoreClientUtils; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -242,6 +243,7 @@ public final List getTableObjectsByName(String catName, String dbName, Li } @Override + @Deprecated public final void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) throws MetaException, TException { updateCreationMetadata(getDefaultCatalog(conf), dbName, tableName, cm); @@ -739,7 +741,12 @@ public final void abortTxns(List txnids) throws TException { @Override public final long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { - return allocateTableWriteId(txnId, dbName, tableName, false); + return allocateTableWriteId(txnId, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, false); + } + + @Override + public final long allocateTableWriteId(long txnId, String catName, String dbName, String tableName) throws TException { + return allocateTableWriteId(txnId, catName, dbName, tableName, false); } @Override diff --git a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java index f8f3c9ab8459..c585c78412fc 100644 --- a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java +++ b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.api.Package; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; @@ -864,7 +865,13 @@ public void updateTableParams(List updates) throws TException @Override public long allocateTableWriteId(long txnId, String dbName, String tableName, boolean reallocate) throws TException { - return delegate.allocateTableWriteId(txnId, dbName, tableName, reallocate); + return delegate.allocateTableWriteId(txnId, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, reallocate); + } + + @Override + public long allocateTableWriteId(long txnId, String catName, String dbName, String tableName, boolean reallocate) + throws TException { + return delegate.allocateTableWriteId(txnId, catName, dbName, tableName, reallocate); } @Override @@ -873,26 +880,54 @@ public void replTableWriteIdState(String validWriteIdList, String dbName, String delegate.replTableWriteIdState(validWriteIdList, dbName, tableName, partNames); } + @Override + public void replTableWriteIdState(String validWriteIdList, String catName, String dbName, String tableName, + List partNames) throws TException { + delegate.replTableWriteIdState(validWriteIdList, catName, dbName, tableName, partNames); + } + @Override public List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) throws TException { - return delegate.allocateTableWriteIdsBatch(txnIds, dbName, tableName); + return allocateTableWriteIdsBatch(txnIds, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName); + } + + @Override + public List allocateTableWriteIdsBatch(List txnIds, String catName, String dbName, String tableName) + throws TException { + return delegate.allocateTableWriteIdsBatch(txnIds, catName, dbName, tableName); } @Override public List replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, List srcTxnToWriteIdList) throws TException { - return delegate.replAllocateTableWriteIdsBatch(dbName, tableName, replPolicy, srcTxnToWriteIdList); + return replAllocateTableWriteIdsBatch(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, replPolicy, srcTxnToWriteIdList); + } + + @Override + public List replAllocateTableWriteIdsBatch(String catName, String dbName, String tableName, String replPolicy, + List srcTxnToWriteIdList) throws TException { + return delegate.replAllocateTableWriteIdsBatch(catName, dbName, tableName, replPolicy, srcTxnToWriteIdList); } @Override public long getMaxAllocatedWriteId(String dbName, String tableName) throws TException { - return delegate.getMaxAllocatedWriteId(dbName, tableName); + return getMaxAllocatedWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName); + } + + @Override + public long getMaxAllocatedWriteId(String catName, String dbName, String tableName) throws TException { + return delegate.getMaxAllocatedWriteId(catName, dbName, tableName); } @Override public void seedWriteId(String dbName, String tableName, long seedWriteId) throws TException { - delegate.seedWriteId(dbName, tableName, seedWriteId); + seedWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, seedWriteId); + } + + @Override + public void seedWriteId(String catName, String dbName, String tableName, long seedWriteId) throws TException { + delegate.seedWriteId(catName, dbName, tableName, seedWriteId); } @Override @@ -959,9 +994,16 @@ public GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo } @Override + @Deprecated public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, List partNames, DataOperationType operationType) throws TException { - delegate.addDynamicPartitions(txnId, writeId, dbName, tableName, partNames, operationType); + addDynamicPartitions(txnId, writeId, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partNames, operationType); + } + + @Override + public void addDynamicPartitions(long txnId, long writeId, String catName, String dbName, String tableName, + List partNames, DataOperationType operationType) throws TException { + delegate.addDynamicPartitions(txnId, writeId, catName, dbName, tableName, partNames, operationType); } @Override diff --git a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java index af1dd777c587..0785bc535055 100644 --- a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java +++ b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.metastore.MetaStorePlainSaslHelper; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.api.Package; import org.apache.hadoop.hive.metastore.client.utils.HiveMetaStoreClientUtils; @@ -3132,6 +3133,13 @@ public void abortTxns(AbortTxnsRequest abortTxnsRequest) throws TException { @Override public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) throws TException { + replTableWriteIdState(validWriteIdList, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partNames); + } + + @Override + public void replTableWriteIdState(String validWriteIdList, String catName, String dbName, String tableName, + List partNames) + throws TException { String user; try { user = UserGroupInformation.getCurrentUser().getUserName(); @@ -3150,6 +3158,7 @@ public void replTableWriteIdState(String validWriteIdList, String dbName, String ReplTblWriteIdStateRequest rqst = new ReplTblWriteIdStateRequest(validWriteIdList, user, hostName, dbName, tableName); + rqst.setCatName(catName); if (partNames != null) { rqst.setPartNames(partNames); } @@ -3158,18 +3167,30 @@ public void replTableWriteIdState(String validWriteIdList, String dbName, String @Override public long allocateTableWriteId(long txnId, String dbName, String tableName, boolean shouldRealloc) throws TException { - return allocateTableWriteIdsBatch(Collections.singletonList(txnId), dbName, tableName, shouldRealloc).get(0).getWriteId(); + return allocateTableWriteId(txnId, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, shouldRealloc); + } + + @Override + public long allocateTableWriteId(long txnId, String catName, String dbName, String tableName, boolean shouldRealloc) throws TException { + return allocateTableWriteIdsBatch(Collections.singletonList(txnId), catName, dbName, tableName, + shouldRealloc).get(0).getWriteId(); } @Override public List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) throws TException { - return allocateTableWriteIdsBatch(txnIds, dbName, tableName, false); + return allocateTableWriteIdsBatch(txnIds, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, false); + } + + @Override + public List allocateTableWriteIdsBatch(List txnIds, String catname, String dbName, String tableName) throws TException { + return allocateTableWriteIdsBatch(txnIds, catname, dbName, tableName, false); } - private List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName, + private List allocateTableWriteIdsBatch(List txnIds, String catName, String dbName, String tableName, boolean shouldRealloc) throws TException { AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName); + rqst.setCatName(catName); rqst.setTxnIds(txnIds); rqst.setReallocate(shouldRealloc); return allocateTableWriteIdsBatchIntr(rqst); @@ -3178,7 +3199,14 @@ private List allocateTableWriteIdsBatch(List txnIds, String @Override public List replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy, List srcTxnToWriteIdList) throws TException { + return replAllocateTableWriteIdsBatch(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, replPolicy, srcTxnToWriteIdList); + } + + @Override + public List replAllocateTableWriteIdsBatch(String catName, String dbName, String tableName, + String replPolicy, List srcTxnToWriteIdList) throws TException { AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName); + rqst.setCatName(catName); rqst.setReplPolicy(replPolicy); rqst.setSrcTxnToWriteIdList(srcTxnToWriteIdList); return allocateTableWriteIdsBatchIntr(rqst); @@ -3190,12 +3218,26 @@ private List allocateTableWriteIdsBatchIntr(AllocateTableWriteIdsR @Override public long getMaxAllocatedWriteId(String dbName, String tableName) throws TException { - return client.get_max_allocated_table_write_id(new MaxAllocatedTableWriteIdRequest(dbName, tableName)).getMaxWriteId(); + return getMaxAllocatedWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName); + } + + @Override + public long getMaxAllocatedWriteId(String catName, String dbName, String tableName) throws TException { + MaxAllocatedTableWriteIdRequest rqst = new MaxAllocatedTableWriteIdRequest(dbName, tableName); + rqst.setCatName(catName); + return client.get_max_allocated_table_write_id(rqst).getMaxWriteId(); } @Override public void seedWriteId(String dbName, String tableName, long seedWriteId) throws TException { - client.seed_write_id(new SeedTableWriteIdsRequest(dbName, tableName, seedWriteId)); + seedWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, seedWriteId); + } + + @Override + public void seedWriteId(String catName, String dbName, String tableName, long seedWriteId) throws TException { + SeedTableWriteIdsRequest rqst = new SeedTableWriteIdsRequest(dbName, tableName, seedWriteId); + rqst.setCatName(catName); + client.seed_write_id(rqst); } @Override @@ -3260,13 +3302,21 @@ public GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo GetLatestCommittedCompactionInfoRequest request) throws TException { GetLatestCommittedCompactionInfoResponse response = client.get_latest_committed_compaction_info(request); return FilterUtils.filterCommittedCompactionInfoStructIfEnabled(isClientFilterEnabled, filterHook, - getDefaultCatalog(conf), request.getDbname(), request.getTablename(), response); + request.getCatName(), request.getDbname(), request.getTablename(), response); } @Override + @Deprecated public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, List partNames, DataOperationType operationType) throws TException { + addDynamicPartitions(txnId, writeId, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName, partNames, operationType); + } + + @Override + public void addDynamicPartitions(long txnId, long writeId, String catName, String dbName, String tableName, + List partNames, DataOperationType operationType) throws TException { AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, partNames); + adp.setCatName(catName); adp.setOperationType(operationType); client.add_dynamic_partitions(adp); } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp index 9f0b0c8cf8d4..9751fbada1e7 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp @@ -51,6 +51,8 @@ hive_metastoreConstants::hive_metastoreConstants() { META_TABLE_NAME = "name"; + META_TABLE_CAT = "cat"; + META_TABLE_DB = "db"; META_TABLE_LOCATION = "location"; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h index 504b54a01d99..490d0aacec57 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h @@ -35,6 +35,7 @@ class hive_metastoreConstants { std::string FIELD_TO_DIMENSION; std::string IF_PURGE; std::string META_TABLE_NAME; + std::string META_TABLE_CAT; std::string META_TABLE_DB; std::string META_TABLE_LOCATION; std::string META_TABLE_SERDE; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 4d827d63ad2f..a60307ea1e72 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -23315,6 +23315,11 @@ void WriteEventInfo::__set_partitionObj(const std::string& val) { this->partitionObj = val; __isset.partitionObj = true; } + +void WriteEventInfo::__set_catalog(const std::string& val) { + this->catalog = val; +__isset.catalog = true; +} std::ostream& operator<<(std::ostream& out, const WriteEventInfo& obj) { obj.printTo(out); @@ -23403,6 +23408,14 @@ uint32_t WriteEventInfo::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catalog); + this->__isset.catalog = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -23459,6 +23472,11 @@ uint32_t WriteEventInfo::write(::apache::thrift::protocol::TProtocol* oprot) con xfer += oprot->writeString(this->partitionObj); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catalog) { + xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->catalog); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -23473,6 +23491,7 @@ void swap(WriteEventInfo &a, WriteEventInfo &b) { swap(a.partition, b.partition); swap(a.tableObj, b.tableObj); swap(a.partitionObj, b.partitionObj); + swap(a.catalog, b.catalog); swap(a.__isset, b.__isset); } @@ -23484,6 +23503,7 @@ WriteEventInfo::WriteEventInfo(const WriteEventInfo& other904) { partition = other904.partition; tableObj = other904.tableObj; partitionObj = other904.partitionObj; + catalog = other904.catalog; __isset = other904.__isset; } WriteEventInfo& WriteEventInfo::operator=(const WriteEventInfo& other905) { @@ -23494,6 +23514,7 @@ WriteEventInfo& WriteEventInfo::operator=(const WriteEventInfo& other905) { partition = other905.partition; tableObj = other905.tableObj; partitionObj = other905.partitionObj; + catalog = other905.catalog; __isset = other905.__isset; return *this; } @@ -23507,6 +23528,7 @@ void WriteEventInfo::printTo(std::ostream& out) const { out << ", " << "partition="; (__isset.partition ? (out << to_string(partition)) : (out << "")); out << ", " << "tableObj="; (__isset.tableObj ? (out << to_string(tableObj)) : (out << "")); out << ", " << "partitionObj="; (__isset.partitionObj ? (out << to_string(partitionObj)) : (out << "")); + out << ", " << "catalog="; (__isset.catalog ? (out << to_string(catalog)) : (out << "")); out << ")"; } @@ -24153,6 +24175,11 @@ void ReplTblWriteIdStateRequest::__set_partNames(const std::vector this->partNames = val; __isset.partNames = true; } + +void ReplTblWriteIdStateRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const ReplTblWriteIdStateRequest& obj) { obj.printTo(out); @@ -24246,6 +24273,14 @@ uint32_t ReplTblWriteIdStateRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -24306,6 +24341,11 @@ uint32_t ReplTblWriteIdStateRequest::write(::apache::thrift::protocol::TProtocol } xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -24319,6 +24359,7 @@ void swap(ReplTblWriteIdStateRequest &a, ReplTblWriteIdStateRequest &b) { swap(a.dbName, b.dbName); swap(a.tableName, b.tableName); swap(a.partNames, b.partNames); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -24329,6 +24370,7 @@ ReplTblWriteIdStateRequest::ReplTblWriteIdStateRequest(const ReplTblWriteIdState dbName = other931.dbName; tableName = other931.tableName; partNames = other931.partNames; + catName = other931.catName; __isset = other931.__isset; } ReplTblWriteIdStateRequest& ReplTblWriteIdStateRequest::operator=(const ReplTblWriteIdStateRequest& other932) { @@ -24338,6 +24380,7 @@ ReplTblWriteIdStateRequest& ReplTblWriteIdStateRequest::operator=(const ReplTblW dbName = other932.dbName; tableName = other932.tableName; partNames = other932.partNames; + catName = other932.catName; __isset = other932.__isset; return *this; } @@ -24350,6 +24393,7 @@ void ReplTblWriteIdStateRequest::printTo(std::ostream& out) const { out << ", " << "dbName=" << to_string(dbName); out << ", " << "tableName=" << to_string(tableName); out << ", " << "partNames="; (__isset.partNames ? (out << to_string(partNames)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -24977,6 +25021,11 @@ void AllocateTableWriteIdsRequest::__set_reallocate(const bool val) { this->reallocate = val; __isset.reallocate = true; } + +void AllocateTableWriteIdsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const AllocateTableWriteIdsRequest& obj) { obj.printTo(out); @@ -25079,6 +25128,14 @@ uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtoco xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -25144,6 +25201,11 @@ uint32_t AllocateTableWriteIdsRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeBool(this->reallocate); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -25157,6 +25219,7 @@ void swap(AllocateTableWriteIdsRequest &a, AllocateTableWriteIdsRequest &b) { swap(a.replPolicy, b.replPolicy); swap(a.srcTxnToWriteIdList, b.srcTxnToWriteIdList); swap(a.reallocate, b.reallocate); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -25167,6 +25230,7 @@ AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWr replPolicy = other971.replPolicy; srcTxnToWriteIdList = other971.srcTxnToWriteIdList; reallocate = other971.reallocate; + catName = other971.catName; __isset = other971.__isset; } AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other972) { @@ -25176,6 +25240,7 @@ AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const Allo replPolicy = other972.replPolicy; srcTxnToWriteIdList = other972.srcTxnToWriteIdList; reallocate = other972.reallocate; + catName = other972.catName; __isset = other972.__isset; return *this; } @@ -25188,6 +25253,7 @@ void AllocateTableWriteIdsRequest::printTo(std::ostream& out) const { out << ", " << "replPolicy="; (__isset.replPolicy ? (out << to_string(replPolicy)) : (out << "")); out << ", " << "srcTxnToWriteIdList="; (__isset.srcTxnToWriteIdList ? (out << to_string(srcTxnToWriteIdList)) : (out << "")); out << ", " << "reallocate="; (__isset.reallocate ? (out << to_string(reallocate)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -25315,6 +25381,11 @@ void MaxAllocatedTableWriteIdRequest::__set_dbName(const std::string& val) { void MaxAllocatedTableWriteIdRequest::__set_tableName(const std::string& val) { this->tableName = val; } + +void MaxAllocatedTableWriteIdRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const MaxAllocatedTableWriteIdRequest& obj) { obj.printTo(out); @@ -25361,6 +25432,14 @@ uint32_t MaxAllocatedTableWriteIdRequest::read(::apache::thrift::protocol::TProt xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -25390,6 +25469,11 @@ uint32_t MaxAllocatedTableWriteIdRequest::write(::apache::thrift::protocol::TPro xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -25399,15 +25483,21 @@ void swap(MaxAllocatedTableWriteIdRequest &a, MaxAllocatedTableWriteIdRequest &b using ::std::swap; swap(a.dbName, b.dbName); swap(a.tableName, b.tableName); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } MaxAllocatedTableWriteIdRequest::MaxAllocatedTableWriteIdRequest(const MaxAllocatedTableWriteIdRequest& other981) { dbName = other981.dbName; tableName = other981.tableName; + catName = other981.catName; + __isset = other981.__isset; } MaxAllocatedTableWriteIdRequest& MaxAllocatedTableWriteIdRequest::operator=(const MaxAllocatedTableWriteIdRequest& other982) { dbName = other982.dbName; tableName = other982.tableName; + catName = other982.catName; + __isset = other982.__isset; return *this; } void MaxAllocatedTableWriteIdRequest::printTo(std::ostream& out) const { @@ -25415,6 +25505,7 @@ void MaxAllocatedTableWriteIdRequest::printTo(std::ostream& out) const { out << "MaxAllocatedTableWriteIdRequest("; out << "dbName=" << to_string(dbName); out << ", " << "tableName=" << to_string(tableName); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -25526,6 +25617,11 @@ void SeedTableWriteIdsRequest::__set_tableName(const std::string& val) { void SeedTableWriteIdsRequest::__set_seedWriteId(const int64_t val) { this->seedWriteId = val; } + +void SeedTableWriteIdsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const SeedTableWriteIdsRequest& obj) { obj.printTo(out); @@ -25581,6 +25677,14 @@ uint32_t SeedTableWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* i xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -25616,6 +25720,11 @@ uint32_t SeedTableWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeI64(this->seedWriteId); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -25626,17 +25735,23 @@ void swap(SeedTableWriteIdsRequest &a, SeedTableWriteIdsRequest &b) { swap(a.dbName, b.dbName); swap(a.tableName, b.tableName); swap(a.seedWriteId, b.seedWriteId); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } SeedTableWriteIdsRequest::SeedTableWriteIdsRequest(const SeedTableWriteIdsRequest& other985) { dbName = other985.dbName; tableName = other985.tableName; seedWriteId = other985.seedWriteId; + catName = other985.catName; + __isset = other985.__isset; } SeedTableWriteIdsRequest& SeedTableWriteIdsRequest::operator=(const SeedTableWriteIdsRequest& other986) { dbName = other986.dbName; tableName = other986.tableName; seedWriteId = other986.seedWriteId; + catName = other986.catName; + __isset = other986.__isset; return *this; } void SeedTableWriteIdsRequest::printTo(std::ostream& out) const { @@ -25645,6 +25760,7 @@ void SeedTableWriteIdsRequest::printTo(std::ostream& out) const { out << "dbName=" << to_string(dbName); out << ", " << "tableName=" << to_string(tableName); out << ", " << "seedWriteId=" << to_string(seedWriteId); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -28042,6 +28158,11 @@ void CompactionRequest::__set_orderByClause(const std::string& val) { this->orderByClause = val; __isset.orderByClause = true; } + +void CompactionRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const CompactionRequest& obj) { obj.printTo(out); @@ -28178,6 +28299,14 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 12: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28262,6 +28391,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->orderByClause); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 12); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -28280,6 +28414,7 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.poolName, b.poolName); swap(a.numberOfBuckets, b.numberOfBuckets); swap(a.orderByClause, b.orderByClause); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -28295,6 +28430,7 @@ CompactionRequest::CompactionRequest(const CompactionRequest& other1054) { poolName = other1054.poolName; numberOfBuckets = other1054.numberOfBuckets; orderByClause = other1054.orderByClause; + catName = other1054.catName; __isset = other1054.__isset; } CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other1055) { @@ -28309,6 +28445,7 @@ CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other10 poolName = other1055.poolName; numberOfBuckets = other1055.numberOfBuckets; orderByClause = other1055.orderByClause; + catName = other1055.catName; __isset = other1055.__isset; return *this; } @@ -28326,6 +28463,7 @@ void CompactionRequest::printTo(std::ostream& out) const { out << ", " << "poolName="; (__isset.poolName ? (out << to_string(poolName)) : (out << "")); out << ", " << "numberOfBuckets="; (__isset.numberOfBuckets ? (out << to_string(numberOfBuckets)) : (out << "")); out << ", " << "orderByClause="; (__isset.orderByClause ? (out << to_string(orderByClause)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -28424,6 +28562,11 @@ void CompactionInfoStruct::__set_orderByClause(const std::string& val) { this->orderByClause = val; __isset.orderByClause = true; } + +void CompactionInfoStruct::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const CompactionInfoStruct& obj) { obj.printTo(out); @@ -28610,6 +28753,14 @@ uint32_t CompactionInfoStruct::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 20: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28726,6 +28877,11 @@ uint32_t CompactionInfoStruct::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeString(this->orderByClause); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 20); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -28752,6 +28908,7 @@ void swap(CompactionInfoStruct &a, CompactionInfoStruct &b) { swap(a.poolname, b.poolname); swap(a.numberOfBuckets, b.numberOfBuckets); swap(a.orderByClause, b.orderByClause); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -28775,6 +28932,7 @@ CompactionInfoStruct::CompactionInfoStruct(const CompactionInfoStruct& other1057 poolname = other1057.poolname; numberOfBuckets = other1057.numberOfBuckets; orderByClause = other1057.orderByClause; + catName = other1057.catName; __isset = other1057.__isset; } CompactionInfoStruct& CompactionInfoStruct::operator=(const CompactionInfoStruct& other1058) { @@ -28797,6 +28955,7 @@ CompactionInfoStruct& CompactionInfoStruct::operator=(const CompactionInfoStruct poolname = other1058.poolname; numberOfBuckets = other1058.numberOfBuckets; orderByClause = other1058.orderByClause; + catName = other1058.catName; __isset = other1058.__isset; return *this; } @@ -28822,6 +28981,7 @@ void CompactionInfoStruct::printTo(std::ostream& out) const { out << ", " << "poolname="; (__isset.poolname ? (out << to_string(poolname)) : (out << "")); out << ", " << "numberOfBuckets="; (__isset.numberOfBuckets ? (out << to_string(numberOfBuckets)) : (out << "")); out << ", " << "orderByClause="; (__isset.orderByClause ? (out << to_string(orderByClause)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -28952,6 +29112,11 @@ void CompactionMetricsDataStruct::__set_version(const int32_t val) { void CompactionMetricsDataStruct::__set_threshold(const int32_t val) { this->threshold = val; } + +void CompactionMetricsDataStruct::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const CompactionMetricsDataStruct& obj) { obj.printTo(out); @@ -29044,6 +29209,14 @@ uint32_t CompactionMetricsDataStruct::read(::apache::thrift::protocol::TProtocol xfer += iprot->skip(ftype); } break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -29102,6 +29275,11 @@ uint32_t CompactionMetricsDataStruct::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeI32(this->threshold); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -29116,6 +29294,7 @@ void swap(CompactionMetricsDataStruct &a, CompactionMetricsDataStruct &b) { swap(a.metricvalue, b.metricvalue); swap(a.version, b.version); swap(a.threshold, b.threshold); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -29127,6 +29306,7 @@ CompactionMetricsDataStruct::CompactionMetricsDataStruct(const CompactionMetrics metricvalue = other1062.metricvalue; version = other1062.version; threshold = other1062.threshold; + catName = other1062.catName; __isset = other1062.__isset; } CompactionMetricsDataStruct& CompactionMetricsDataStruct::operator=(const CompactionMetricsDataStruct& other1063) { @@ -29137,6 +29317,7 @@ CompactionMetricsDataStruct& CompactionMetricsDataStruct::operator=(const Compac metricvalue = other1063.metricvalue; version = other1063.version; threshold = other1063.threshold; + catName = other1063.catName; __isset = other1063.__isset; return *this; } @@ -29150,6 +29331,7 @@ void CompactionMetricsDataStruct::printTo(std::ostream& out) const { out << ", " << "metricvalue=" << to_string(metricvalue); out << ", " << "version=" << to_string(version); out << ", " << "threshold=" << to_string(threshold); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -29268,6 +29450,11 @@ __isset.partitionName = true; void CompactionMetricsDataRequest::__set_type(const CompactionMetricsMetricType::type val) { this->type = val; } + +void CompactionMetricsDataRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const CompactionMetricsDataRequest& obj) { obj.printTo(out); @@ -29333,6 +29520,14 @@ uint32_t CompactionMetricsDataRequest::read(::apache::thrift::protocol::TProtoco xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -29373,6 +29568,11 @@ uint32_t CompactionMetricsDataRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeI32(static_cast(this->type)); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -29384,6 +29584,7 @@ void swap(CompactionMetricsDataRequest &a, CompactionMetricsDataRequest &b) { swap(a.tblName, b.tblName); swap(a.partitionName, b.partitionName); swap(a.type, b.type); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -29392,6 +29593,7 @@ CompactionMetricsDataRequest::CompactionMetricsDataRequest(const CompactionMetri tblName = other1067.tblName; partitionName = other1067.partitionName; type = other1067.type; + catName = other1067.catName; __isset = other1067.__isset; } CompactionMetricsDataRequest& CompactionMetricsDataRequest::operator=(const CompactionMetricsDataRequest& other1068) { @@ -29399,6 +29601,7 @@ CompactionMetricsDataRequest& CompactionMetricsDataRequest::operator=(const Comp tblName = other1068.tblName; partitionName = other1068.partitionName; type = other1068.type; + catName = other1068.catName; __isset = other1068.__isset; return *this; } @@ -29409,6 +29612,7 @@ void CompactionMetricsDataRequest::printTo(std::ostream& out) const { out << ", " << "tblName=" << to_string(tblName); out << ", " << "partitionName="; (__isset.partitionName ? (out << to_string(partitionName)) : (out << "")); out << ", " << "type=" << to_string(type); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -29624,6 +29828,11 @@ void ShowCompactRequest::__set_order(const std::string& val) { this->order = val; __isset.order = true; } + +void ShowCompactRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const ShowCompactRequest& obj) { obj.printTo(out); @@ -29726,6 +29935,14 @@ uint32_t ShowCompactRequest::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; + case 10: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -29788,6 +30005,11 @@ uint32_t ShowCompactRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->order); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 10); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -29804,6 +30026,7 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { swap(a.state, b.state); swap(a.limit, b.limit); swap(a.order, b.order); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -29817,6 +30040,7 @@ ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other1072) { state = other1072.state; limit = other1072.limit; order = other1072.order; + catName = other1072.catName; __isset = other1072.__isset; } ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other1073) { @@ -29829,6 +30053,7 @@ ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& othe state = other1073.state; limit = other1073.limit; order = other1073.order; + catName = other1073.catName; __isset = other1073.__isset; return *this; } @@ -29844,6 +30069,7 @@ void ShowCompactRequest::printTo(std::ostream& out) const { out << ", " << "state="; (__isset.state ? (out << to_string(state)) : (out << "")); out << ", " << "limit="; (__isset.limit ? (out << to_string(limit)) : (out << "")); out << ", " << "order="; (__isset.order ? (out << to_string(order)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -29967,6 +30193,11 @@ void ShowCompactResponseElement::__set_hightestWriteId(const int64_t val) { this->hightestWriteId = val; __isset.hightestWriteId = true; } + +void ShowCompactResponseElement::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const ShowCompactResponseElement& obj) { obj.printTo(out); @@ -30193,6 +30424,14 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* xfer += iprot->skip(ftype); } break; + case 25: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -30334,6 +30573,11 @@ uint32_t ShowCompactResponseElement::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeI64(this->hightestWriteId); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 25); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -30365,6 +30609,7 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.txnId, b.txnId); swap(a.commitTime, b.commitTime); swap(a.hightestWriteId, b.hightestWriteId); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -30393,6 +30638,7 @@ ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponse txnId = other1075.txnId; commitTime = other1075.commitTime; hightestWriteId = other1075.hightestWriteId; + catName = other1075.catName; __isset = other1075.__isset; } ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other1076) { @@ -30420,6 +30666,7 @@ ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowComp txnId = other1076.txnId; commitTime = other1076.commitTime; hightestWriteId = other1076.hightestWriteId; + catName = other1076.catName; __isset = other1076.__isset; return *this; } @@ -30450,6 +30697,7 @@ void ShowCompactResponseElement::printTo(std::ostream& out) const { out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "")); out << ", " << "commitTime="; (__isset.commitTime ? (out << to_string(commitTime)) : (out << "")); out << ", " << "hightestWriteId="; (__isset.hightestWriteId ? (out << to_string(hightestWriteId)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -31001,6 +31249,11 @@ void GetLatestCommittedCompactionInfoRequest::__set_lastCompactionId(const int64 this->lastCompactionId = val; __isset.lastCompactionId = true; } + +void GetLatestCommittedCompactionInfoRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const GetLatestCommittedCompactionInfoRequest& obj) { obj.printTo(out); @@ -31075,6 +31328,14 @@ uint32_t GetLatestCommittedCompactionInfoRequest::read(::apache::thrift::protoco xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -31122,6 +31383,11 @@ uint32_t GetLatestCommittedCompactionInfoRequest::write(::apache::thrift::protoc xfer += oprot->writeI64(this->lastCompactionId); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -31133,6 +31399,7 @@ void swap(GetLatestCommittedCompactionInfoRequest &a, GetLatestCommittedCompacti swap(a.tablename, b.tablename); swap(a.partitionnames, b.partitionnames); swap(a.lastCompactionId, b.lastCompactionId); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -31141,6 +31408,7 @@ GetLatestCommittedCompactionInfoRequest::GetLatestCommittedCompactionInfoRequest tablename = other1111.tablename; partitionnames = other1111.partitionnames; lastCompactionId = other1111.lastCompactionId; + catName = other1111.catName; __isset = other1111.__isset; } GetLatestCommittedCompactionInfoRequest& GetLatestCommittedCompactionInfoRequest::operator=(const GetLatestCommittedCompactionInfoRequest& other1112) { @@ -31148,6 +31416,7 @@ GetLatestCommittedCompactionInfoRequest& GetLatestCommittedCompactionInfoRequest tablename = other1112.tablename; partitionnames = other1112.partitionnames; lastCompactionId = other1112.lastCompactionId; + catName = other1112.catName; __isset = other1112.__isset; return *this; } @@ -31158,6 +31427,7 @@ void GetLatestCommittedCompactionInfoRequest::printTo(std::ostream& out) const { out << ", " << "tablename=" << to_string(tablename); out << ", " << "partitionnames="; (__isset.partitionnames ? (out << to_string(partitionnames)) : (out << "")); out << ", " << "lastCompactionId="; (__isset.lastCompactionId ? (out << to_string(lastCompactionId)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -31440,6 +31710,11 @@ void AddDynamicPartitions::__set_operationType(const DataOperationType::type val this->operationType = val; __isset.operationType = true; } + +void AddDynamicPartitions::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const AddDynamicPartitions& obj) { obj.printTo(out); @@ -31535,6 +31810,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -31595,6 +31878,11 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeI32(static_cast(this->operationType)); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -31608,6 +31896,7 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.tablename, b.tablename); swap(a.partitionnames, b.partitionnames); swap(a.operationType, b.operationType); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -31618,6 +31907,7 @@ AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other1130 tablename = other1130.tablename; partitionnames = other1130.partitionnames; operationType = other1130.operationType; + catName = other1130.catName; __isset = other1130.__isset; } AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other1131) { @@ -31627,6 +31917,7 @@ AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions tablename = other1131.tablename; partitionnames = other1131.partitionnames; operationType = other1131.operationType; + catName = other1131.catName; __isset = other1131.__isset; return *this; } @@ -31639,6 +31930,7 @@ void AddDynamicPartitions::printTo(std::ostream& out) const { out << ", " << "tablename=" << to_string(tablename); out << ", " << "partitionnames=" << to_string(partitionnames); out << ", " << "operationType="; (__isset.operationType ? (out << to_string(operationType)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -33804,6 +34096,11 @@ void WriteNotificationLogRequest::__set_partitionVals(const std::vectorpartitionVals = val; __isset.partitionVals = true; } + +void WriteNotificationLogRequest::__set_cat(const std::string& val) { + this->cat = val; +__isset.cat = true; +} std::ostream& operator<<(std::ostream& out, const WriteNotificationLogRequest& obj) { obj.printTo(out); @@ -33897,6 +34194,14 @@ uint32_t WriteNotificationLogRequest::read(::apache::thrift::protocol::TProtocol xfer += iprot->skip(ftype); } break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->cat); + this->__isset.cat = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -33957,6 +34262,11 @@ uint32_t WriteNotificationLogRequest::write(::apache::thrift::protocol::TProtoco } xfer += oprot->writeFieldEnd(); } + if (this->__isset.cat) { + xfer += oprot->writeFieldBegin("cat", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->cat); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -33970,6 +34280,7 @@ void swap(WriteNotificationLogRequest &a, WriteNotificationLogRequest &b) { swap(a.table, b.table); swap(a.fileInfo, b.fileInfo); swap(a.partitionVals, b.partitionVals); + swap(a.cat, b.cat); swap(a.__isset, b.__isset); } @@ -33980,6 +34291,7 @@ WriteNotificationLogRequest::WriteNotificationLogRequest(const WriteNotification table = other1252.table; fileInfo = other1252.fileInfo; partitionVals = other1252.partitionVals; + cat = other1252.cat; __isset = other1252.__isset; } WriteNotificationLogRequest& WriteNotificationLogRequest::operator=(const WriteNotificationLogRequest& other1253) { @@ -33989,6 +34301,7 @@ WriteNotificationLogRequest& WriteNotificationLogRequest::operator=(const WriteN table = other1253.table; fileInfo = other1253.fileInfo; partitionVals = other1253.partitionVals; + cat = other1253.cat; __isset = other1253.__isset; return *this; } @@ -34001,6 +34314,7 @@ void WriteNotificationLogRequest::printTo(std::ostream& out) const { out << ", " << "table=" << to_string(table); out << ", " << "fileInfo=" << to_string(fileInfo); out << ", " << "partitionVals="; (__isset.partitionVals ? (out << to_string(partitionVals)) : (out << "")); + out << ", " << "cat="; (__isset.cat ? (out << to_string(cat)) : (out << "")); out << ")"; } @@ -54650,6 +54964,11 @@ void GetAllWriteEventInfoRequest::__set_tableName(const std::string& val) { this->tableName = val; __isset.tableName = true; } + +void GetAllWriteEventInfoRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} std::ostream& operator<<(std::ostream& out, const GetAllWriteEventInfoRequest& obj) { obj.printTo(out); @@ -54703,6 +55022,14 @@ uint32_t GetAllWriteEventInfoRequest::read(::apache::thrift::protocol::TProtocol xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -54736,6 +55063,11 @@ uint32_t GetAllWriteEventInfoRequest::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -54746,6 +55078,7 @@ void swap(GetAllWriteEventInfoRequest &a, GetAllWriteEventInfoRequest &b) { swap(a.txnId, b.txnId); swap(a.dbName, b.dbName); swap(a.tableName, b.tableName); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -54753,12 +55086,14 @@ GetAllWriteEventInfoRequest::GetAllWriteEventInfoRequest(const GetAllWriteEventI txnId = other1917.txnId; dbName = other1917.dbName; tableName = other1917.tableName; + catName = other1917.catName; __isset = other1917.__isset; } GetAllWriteEventInfoRequest& GetAllWriteEventInfoRequest::operator=(const GetAllWriteEventInfoRequest& other1918) { txnId = other1918.txnId; dbName = other1918.dbName; tableName = other1918.tableName; + catName = other1918.catName; __isset = other1918.__isset; return *this; } @@ -54768,6 +55103,7 @@ void GetAllWriteEventInfoRequest::printTo(std::ostream& out) const { out << "txnId=" << to_string(txnId); out << ", " << "dbName="; (__isset.dbName ? (out << to_string(dbName)) : (out << "")); out << ", " << "tableName="; (__isset.tableName ? (out << to_string(tableName)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h index fde2a96483a3..694acb17b345 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -9319,10 +9319,11 @@ void swap(CommitTxnKeyValue &a, CommitTxnKeyValue &b); std::ostream& operator<<(std::ostream& out, const CommitTxnKeyValue& obj); typedef struct _WriteEventInfo__isset { - _WriteEventInfo__isset() : partition(false), tableObj(false), partitionObj(false) {} + _WriteEventInfo__isset() : partition(false), tableObj(false), partitionObj(false), catalog(true) {} bool partition :1; bool tableObj :1; bool partitionObj :1; + bool catalog :1; } _WriteEventInfo__isset; class WriteEventInfo : public virtual ::apache::thrift::TBase { @@ -9330,14 +9331,14 @@ class WriteEventInfo : public virtual ::apache::thrift::TBase { WriteEventInfo(const WriteEventInfo&); WriteEventInfo& operator=(const WriteEventInfo&); - WriteEventInfo() noexcept - : writeId(0), - database(), - table(), - files(), - partition(), - tableObj(), - partitionObj() { + WriteEventInfo() : writeId(0), + database(), + table(), + files(), + partition(), + tableObj(), + partitionObj(), + catalog("hive") { } virtual ~WriteEventInfo() noexcept; @@ -9348,6 +9349,7 @@ class WriteEventInfo : public virtual ::apache::thrift::TBase { std::string partition; std::string tableObj; std::string partitionObj; + std::string catalog; _WriteEventInfo__isset __isset; @@ -9365,6 +9367,8 @@ class WriteEventInfo : public virtual ::apache::thrift::TBase { void __set_partitionObj(const std::string& val); + void __set_catalog(const std::string& val); + bool operator == (const WriteEventInfo & rhs) const { if (!(writeId == rhs.writeId)) @@ -9387,6 +9391,10 @@ class WriteEventInfo : public virtual ::apache::thrift::TBase { return false; else if (__isset.partitionObj && !(partitionObj == rhs.partitionObj)) return false; + if (__isset.catalog != rhs.__isset.catalog) + return false; + else if (__isset.catalog && !(catalog == rhs.catalog)) + return false; return true; } bool operator != (const WriteEventInfo &rhs) const { @@ -9632,8 +9640,9 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b); std::ostream& operator<<(std::ostream& out, const CommitTxnRequest& obj); typedef struct _ReplTblWriteIdStateRequest__isset { - _ReplTblWriteIdStateRequest__isset() : partNames(false) {} + _ReplTblWriteIdStateRequest__isset() : partNames(false), catName(true) {} bool partNames :1; + bool catName :1; } _ReplTblWriteIdStateRequest__isset; class ReplTblWriteIdStateRequest : public virtual ::apache::thrift::TBase { @@ -9641,12 +9650,12 @@ class ReplTblWriteIdStateRequest : public virtual ::apache::thrift::TBase { ReplTblWriteIdStateRequest(const ReplTblWriteIdStateRequest&); ReplTblWriteIdStateRequest& operator=(const ReplTblWriteIdStateRequest&); - ReplTblWriteIdStateRequest() noexcept - : validWriteIdlist(), - user(), - hostName(), - dbName(), - tableName() { + ReplTblWriteIdStateRequest() : validWriteIdlist(), + user(), + hostName(), + dbName(), + tableName(), + catName("hive") { } virtual ~ReplTblWriteIdStateRequest() noexcept; @@ -9656,6 +9665,7 @@ class ReplTblWriteIdStateRequest : public virtual ::apache::thrift::TBase { std::string dbName; std::string tableName; std::vector partNames; + std::string catName; _ReplTblWriteIdStateRequest__isset __isset; @@ -9671,6 +9681,8 @@ class ReplTblWriteIdStateRequest : public virtual ::apache::thrift::TBase { void __set_partNames(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const ReplTblWriteIdStateRequest & rhs) const { if (!(validWriteIdlist == rhs.validWriteIdlist)) @@ -9687,6 +9699,10 @@ class ReplTblWriteIdStateRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.partNames && !(partNames == rhs.partNames)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ReplTblWriteIdStateRequest &rhs) const { @@ -9912,11 +9928,12 @@ void swap(TxnToWriteId &a, TxnToWriteId &b); std::ostream& operator<<(std::ostream& out, const TxnToWriteId& obj); typedef struct _AllocateTableWriteIdsRequest__isset { - _AllocateTableWriteIdsRequest__isset() : txnIds(false), replPolicy(false), srcTxnToWriteIdList(false), reallocate(true) {} + _AllocateTableWriteIdsRequest__isset() : txnIds(false), replPolicy(false), srcTxnToWriteIdList(false), reallocate(true), catName(true) {} bool txnIds :1; bool replPolicy :1; bool srcTxnToWriteIdList :1; bool reallocate :1; + bool catName :1; } _AllocateTableWriteIdsRequest__isset; class AllocateTableWriteIdsRequest : public virtual ::apache::thrift::TBase { @@ -9924,11 +9941,11 @@ class AllocateTableWriteIdsRequest : public virtual ::apache::thrift::TBase { AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest&); AllocateTableWriteIdsRequest& operator=(const AllocateTableWriteIdsRequest&); - AllocateTableWriteIdsRequest() noexcept - : dbName(), - tableName(), - replPolicy(), - reallocate(false) { + AllocateTableWriteIdsRequest() : dbName(), + tableName(), + replPolicy(), + reallocate(false), + catName("hive") { } virtual ~AllocateTableWriteIdsRequest() noexcept; @@ -9938,6 +9955,7 @@ class AllocateTableWriteIdsRequest : public virtual ::apache::thrift::TBase { std::string replPolicy; std::vector srcTxnToWriteIdList; bool reallocate; + std::string catName; _AllocateTableWriteIdsRequest__isset __isset; @@ -9953,6 +9971,8 @@ class AllocateTableWriteIdsRequest : public virtual ::apache::thrift::TBase { void __set_reallocate(const bool val); + void __set_catName(const std::string& val); + bool operator == (const AllocateTableWriteIdsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -9975,6 +9995,10 @@ class AllocateTableWriteIdsRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.reallocate && !(reallocate == rhs.reallocate)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const AllocateTableWriteIdsRequest &rhs) const { @@ -10029,31 +10053,44 @@ void swap(AllocateTableWriteIdsResponse &a, AllocateTableWriteIdsResponse &b); std::ostream& operator<<(std::ostream& out, const AllocateTableWriteIdsResponse& obj); +typedef struct _MaxAllocatedTableWriteIdRequest__isset { + _MaxAllocatedTableWriteIdRequest__isset() : catName(true) {} + bool catName :1; +} _MaxAllocatedTableWriteIdRequest__isset; class MaxAllocatedTableWriteIdRequest : public virtual ::apache::thrift::TBase { public: MaxAllocatedTableWriteIdRequest(const MaxAllocatedTableWriteIdRequest&); MaxAllocatedTableWriteIdRequest& operator=(const MaxAllocatedTableWriteIdRequest&); - MaxAllocatedTableWriteIdRequest() noexcept - : dbName(), - tableName() { + MaxAllocatedTableWriteIdRequest() : dbName(), + tableName(), + catName("hive") { } virtual ~MaxAllocatedTableWriteIdRequest() noexcept; std::string dbName; std::string tableName; + std::string catName; + + _MaxAllocatedTableWriteIdRequest__isset __isset; void __set_dbName(const std::string& val); void __set_tableName(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const MaxAllocatedTableWriteIdRequest & rhs) const { if (!(dbName == rhs.dbName)) return false; if (!(tableName == rhs.tableName)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const MaxAllocatedTableWriteIdRequest &rhs) const { @@ -10109,22 +10146,29 @@ void swap(MaxAllocatedTableWriteIdResponse &a, MaxAllocatedTableWriteIdResponse std::ostream& operator<<(std::ostream& out, const MaxAllocatedTableWriteIdResponse& obj); +typedef struct _SeedTableWriteIdsRequest__isset { + _SeedTableWriteIdsRequest__isset() : catName(true) {} + bool catName :1; +} _SeedTableWriteIdsRequest__isset; class SeedTableWriteIdsRequest : public virtual ::apache::thrift::TBase { public: SeedTableWriteIdsRequest(const SeedTableWriteIdsRequest&); SeedTableWriteIdsRequest& operator=(const SeedTableWriteIdsRequest&); - SeedTableWriteIdsRequest() noexcept - : dbName(), - tableName(), - seedWriteId(0) { + SeedTableWriteIdsRequest() : dbName(), + tableName(), + seedWriteId(0), + catName("hive") { } virtual ~SeedTableWriteIdsRequest() noexcept; std::string dbName; std::string tableName; int64_t seedWriteId; + std::string catName; + + _SeedTableWriteIdsRequest__isset __isset; void __set_dbName(const std::string& val); @@ -10132,6 +10176,8 @@ class SeedTableWriteIdsRequest : public virtual ::apache::thrift::TBase { void __set_seedWriteId(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const SeedTableWriteIdsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -10140,6 +10186,10 @@ class SeedTableWriteIdsRequest : public virtual ::apache::thrift::TBase { return false; if (!(seedWriteId == rhs.seedWriteId)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const SeedTableWriteIdsRequest &rhs) const { @@ -11069,7 +11119,7 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b); std::ostream& operator<<(std::ostream& out, const HeartbeatTxnRangeResponse& obj); typedef struct _CompactionRequest__isset { - _CompactionRequest__isset() : partitionname(false), runas(false), properties(false), initiatorId(false), initiatorVersion(false), poolName(false), numberOfBuckets(false), orderByClause(false) {} + _CompactionRequest__isset() : partitionname(false), runas(false), properties(false), initiatorId(false), initiatorVersion(false), poolName(false), numberOfBuckets(false), orderByClause(false), catName(true) {} bool partitionname :1; bool runas :1; bool properties :1; @@ -11078,6 +11128,7 @@ typedef struct _CompactionRequest__isset { bool poolName :1; bool numberOfBuckets :1; bool orderByClause :1; + bool catName :1; } _CompactionRequest__isset; class CompactionRequest : public virtual ::apache::thrift::TBase { @@ -11085,17 +11136,17 @@ class CompactionRequest : public virtual ::apache::thrift::TBase { CompactionRequest(const CompactionRequest&); CompactionRequest& operator=(const CompactionRequest&); - CompactionRequest() noexcept - : dbname(), - tablename(), - partitionname(), - type(static_cast(0)), - runas(), - initiatorId(), - initiatorVersion(), - poolName(), - numberOfBuckets(0), - orderByClause() { + CompactionRequest() : dbname(), + tablename(), + partitionname(), + type(static_cast(0)), + runas(), + initiatorId(), + initiatorVersion(), + poolName(), + numberOfBuckets(0), + orderByClause(), + catName("hive") { } virtual ~CompactionRequest() noexcept; @@ -11114,6 +11165,7 @@ class CompactionRequest : public virtual ::apache::thrift::TBase { std::string poolName; int32_t numberOfBuckets; std::string orderByClause; + std::string catName; _CompactionRequest__isset __isset; @@ -11139,6 +11191,8 @@ class CompactionRequest : public virtual ::apache::thrift::TBase { void __set_orderByClause(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const CompactionRequest & rhs) const { if (!(dbname == rhs.dbname)) @@ -11179,6 +11233,10 @@ class CompactionRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.orderByClause && !(orderByClause == rhs.orderByClause)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const CompactionRequest &rhs) const { @@ -11198,7 +11256,7 @@ void swap(CompactionRequest &a, CompactionRequest &b); std::ostream& operator<<(std::ostream& out, const CompactionRequest& obj); typedef struct _CompactionInfoStruct__isset { - _CompactionInfoStruct__isset() : partitionname(false), runas(false), properties(false), toomanyaborts(false), state(false), workerId(false), start(false), highestWriteId(false), errorMessage(false), hasoldabort(false), enqueueTime(false), retryRetention(false), poolname(false), numberOfBuckets(false), orderByClause(false) {} + _CompactionInfoStruct__isset() : partitionname(false), runas(false), properties(false), toomanyaborts(false), state(false), workerId(false), start(false), highestWriteId(false), errorMessage(false), hasoldabort(false), enqueueTime(false), retryRetention(false), poolname(false), numberOfBuckets(false), orderByClause(false), catName(true) {} bool partitionname :1; bool runas :1; bool properties :1; @@ -11214,6 +11272,7 @@ typedef struct _CompactionInfoStruct__isset { bool poolname :1; bool numberOfBuckets :1; bool orderByClause :1; + bool catName :1; } _CompactionInfoStruct__isset; class CompactionInfoStruct : public virtual ::apache::thrift::TBase { @@ -11221,26 +11280,26 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase { CompactionInfoStruct(const CompactionInfoStruct&); CompactionInfoStruct& operator=(const CompactionInfoStruct&); - CompactionInfoStruct() noexcept - : id(0), - dbname(), - tablename(), - partitionname(), - type(static_cast(0)), - runas(), - properties(), - toomanyaborts(0), - state(), - workerId(), - start(0), - highestWriteId(0), - errorMessage(), - hasoldabort(0), - enqueueTime(0), - retryRetention(0), - poolname(), - numberOfBuckets(0), - orderByClause() { + CompactionInfoStruct() : id(0), + dbname(), + tablename(), + partitionname(), + type(static_cast(0)), + runas(), + properties(), + toomanyaborts(0), + state(), + workerId(), + start(0), + highestWriteId(0), + errorMessage(), + hasoldabort(0), + enqueueTime(0), + retryRetention(0), + poolname(), + numberOfBuckets(0), + orderByClause(), + catName("hive") { } virtual ~CompactionInfoStruct() noexcept; @@ -11267,6 +11326,7 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase { std::string poolname; int32_t numberOfBuckets; std::string orderByClause; + std::string catName; _CompactionInfoStruct__isset __isset; @@ -11308,6 +11368,8 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase { void __set_orderByClause(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const CompactionInfoStruct & rhs) const { if (!(id == rhs.id)) @@ -11378,6 +11440,10 @@ class CompactionInfoStruct : public virtual ::apache::thrift::TBase { return false; else if (__isset.orderByClause && !(orderByClause == rhs.orderByClause)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const CompactionInfoStruct &rhs) const { @@ -11441,8 +11507,9 @@ void swap(OptionalCompactionInfoStruct &a, OptionalCompactionInfoStruct &b); std::ostream& operator<<(std::ostream& out, const OptionalCompactionInfoStruct& obj); typedef struct _CompactionMetricsDataStruct__isset { - _CompactionMetricsDataStruct__isset() : partitionname(false) {} + _CompactionMetricsDataStruct__isset() : partitionname(false), catName(true) {} bool partitionname :1; + bool catName :1; } _CompactionMetricsDataStruct__isset; class CompactionMetricsDataStruct : public virtual ::apache::thrift::TBase { @@ -11450,14 +11517,14 @@ class CompactionMetricsDataStruct : public virtual ::apache::thrift::TBase { CompactionMetricsDataStruct(const CompactionMetricsDataStruct&); CompactionMetricsDataStruct& operator=(const CompactionMetricsDataStruct&); - CompactionMetricsDataStruct() noexcept - : dbname(), - tblname(), - partitionname(), - type(static_cast(0)), - metricvalue(0), - version(0), - threshold(0) { + CompactionMetricsDataStruct() : dbname(), + tblname(), + partitionname(), + type(static_cast(0)), + metricvalue(0), + version(0), + threshold(0), + catName("hive") { } virtual ~CompactionMetricsDataStruct() noexcept; @@ -11472,6 +11539,7 @@ class CompactionMetricsDataStruct : public virtual ::apache::thrift::TBase { int32_t metricvalue; int32_t version; int32_t threshold; + std::string catName; _CompactionMetricsDataStruct__isset __isset; @@ -11489,6 +11557,8 @@ class CompactionMetricsDataStruct : public virtual ::apache::thrift::TBase { void __set_threshold(const int32_t val); + void __set_catName(const std::string& val); + bool operator == (const CompactionMetricsDataStruct & rhs) const { if (!(dbname == rhs.dbname)) @@ -11507,6 +11577,10 @@ class CompactionMetricsDataStruct : public virtual ::apache::thrift::TBase { return false; if (!(threshold == rhs.threshold)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const CompactionMetricsDataStruct &rhs) const { @@ -11570,8 +11644,9 @@ void swap(CompactionMetricsDataResponse &a, CompactionMetricsDataResponse &b); std::ostream& operator<<(std::ostream& out, const CompactionMetricsDataResponse& obj); typedef struct _CompactionMetricsDataRequest__isset { - _CompactionMetricsDataRequest__isset() : partitionName(false) {} + _CompactionMetricsDataRequest__isset() : partitionName(false), catName(true) {} bool partitionName :1; + bool catName :1; } _CompactionMetricsDataRequest__isset; class CompactionMetricsDataRequest : public virtual ::apache::thrift::TBase { @@ -11579,11 +11654,11 @@ class CompactionMetricsDataRequest : public virtual ::apache::thrift::TBase { CompactionMetricsDataRequest(const CompactionMetricsDataRequest&); CompactionMetricsDataRequest& operator=(const CompactionMetricsDataRequest&); - CompactionMetricsDataRequest() noexcept - : dbName(), - tblName(), - partitionName(), - type(static_cast(0)) { + CompactionMetricsDataRequest() : dbName(), + tblName(), + partitionName(), + type(static_cast(0)), + catName("hive") { } virtual ~CompactionMetricsDataRequest() noexcept; @@ -11595,6 +11670,7 @@ class CompactionMetricsDataRequest : public virtual ::apache::thrift::TBase { * @see CompactionMetricsMetricType */ CompactionMetricsMetricType::type type; + std::string catName; _CompactionMetricsDataRequest__isset __isset; @@ -11606,6 +11682,8 @@ class CompactionMetricsDataRequest : public virtual ::apache::thrift::TBase { void __set_type(const CompactionMetricsMetricType::type val); + void __set_catName(const std::string& val); + bool operator == (const CompactionMetricsDataRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -11618,6 +11696,10 @@ class CompactionMetricsDataRequest : public virtual ::apache::thrift::TBase { return false; if (!(type == rhs.type)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const CompactionMetricsDataRequest &rhs) const { @@ -11700,7 +11782,7 @@ void swap(CompactionResponse &a, CompactionResponse &b); std::ostream& operator<<(std::ostream& out, const CompactionResponse& obj); typedef struct _ShowCompactRequest__isset { - _ShowCompactRequest__isset() : id(false), poolName(false), dbName(false), tbName(false), partName(false), type(false), state(false), limit(false), order(false) {} + _ShowCompactRequest__isset() : id(false), poolName(false), dbName(false), tbName(false), partName(false), type(false), state(false), limit(false), order(false), catName(true) {} bool id :1; bool poolName :1; bool dbName :1; @@ -11710,6 +11792,7 @@ typedef struct _ShowCompactRequest__isset { bool state :1; bool limit :1; bool order :1; + bool catName :1; } _ShowCompactRequest__isset; class ShowCompactRequest : public virtual ::apache::thrift::TBase { @@ -11717,16 +11800,16 @@ class ShowCompactRequest : public virtual ::apache::thrift::TBase { ShowCompactRequest(const ShowCompactRequest&); ShowCompactRequest& operator=(const ShowCompactRequest&); - ShowCompactRequest() noexcept - : id(0), - poolName(), - dbName(), - tbName(), - partName(), - type(static_cast(0)), - state(), - limit(0), - order() { + ShowCompactRequest() : id(0), + poolName(), + dbName(), + tbName(), + partName(), + type(static_cast(0)), + state(), + limit(0), + order(), + catName("hive") { } virtual ~ShowCompactRequest() noexcept; @@ -11743,6 +11826,7 @@ class ShowCompactRequest : public virtual ::apache::thrift::TBase { std::string state; int64_t limit; std::string order; + std::string catName; _ShowCompactRequest__isset __isset; @@ -11764,6 +11848,8 @@ class ShowCompactRequest : public virtual ::apache::thrift::TBase { void __set_order(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const ShowCompactRequest & rhs) const { if (__isset.id != rhs.__isset.id) @@ -11802,6 +11888,10 @@ class ShowCompactRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.order && !(order == rhs.order)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ShowCompactRequest &rhs) const { @@ -11821,7 +11911,7 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b); std::ostream& operator<<(std::ostream& out, const ShowCompactRequest& obj); typedef struct _ShowCompactResponseElement__isset { - _ShowCompactResponseElement__isset() : partitionname(false), workerid(false), start(false), runAs(false), hightestTxnId(false), metaInfo(false), endTime(false), hadoopJobId(true), id(false), errorMessage(false), enqueueTime(false), workerVersion(false), initiatorId(false), initiatorVersion(false), cleanerStart(false), poolName(false), nextTxnId(false), txnId(false), commitTime(false), hightestWriteId(false) {} + _ShowCompactResponseElement__isset() : partitionname(false), workerid(false), start(false), runAs(false), hightestTxnId(false), metaInfo(false), endTime(false), hadoopJobId(true), id(false), errorMessage(false), enqueueTime(false), workerVersion(false), initiatorId(false), initiatorVersion(false), cleanerStart(false), poolName(false), nextTxnId(false), txnId(false), commitTime(false), hightestWriteId(false), catName(true) {} bool partitionname :1; bool workerid :1; bool start :1; @@ -11842,6 +11932,7 @@ typedef struct _ShowCompactResponseElement__isset { bool txnId :1; bool commitTime :1; bool hightestWriteId :1; + bool catName :1; } _ShowCompactResponseElement__isset; class ShowCompactResponseElement : public virtual ::apache::thrift::TBase { @@ -11872,7 +11963,8 @@ class ShowCompactResponseElement : public virtual ::apache::thrift::TBase { nextTxnId(0), txnId(0), commitTime(0), - hightestWriteId(0) { + hightestWriteId(0), + catName("hive") { } virtual ~ShowCompactResponseElement() noexcept; @@ -11904,6 +11996,7 @@ class ShowCompactResponseElement : public virtual ::apache::thrift::TBase { int64_t txnId; int64_t commitTime; int64_t hightestWriteId; + std::string catName; _ShowCompactResponseElement__isset __isset; @@ -11955,6 +12048,8 @@ class ShowCompactResponseElement : public virtual ::apache::thrift::TBase { void __set_hightestWriteId(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const ShowCompactResponseElement & rhs) const { if (!(dbname == rhs.dbname)) @@ -12045,6 +12140,10 @@ class ShowCompactResponseElement : public virtual ::apache::thrift::TBase { return false; else if (__isset.hightestWriteId && !(hightestWriteId == rhs.hightestWriteId)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ShowCompactResponseElement &rhs) const { @@ -12255,9 +12354,10 @@ void swap(AbortCompactResponse &a, AbortCompactResponse &b); std::ostream& operator<<(std::ostream& out, const AbortCompactResponse& obj); typedef struct _GetLatestCommittedCompactionInfoRequest__isset { - _GetLatestCommittedCompactionInfoRequest__isset() : partitionnames(false), lastCompactionId(false) {} + _GetLatestCommittedCompactionInfoRequest__isset() : partitionnames(false), lastCompactionId(false), catName(true) {} bool partitionnames :1; bool lastCompactionId :1; + bool catName :1; } _GetLatestCommittedCompactionInfoRequest__isset; class GetLatestCommittedCompactionInfoRequest : public virtual ::apache::thrift::TBase { @@ -12265,10 +12365,10 @@ class GetLatestCommittedCompactionInfoRequest : public virtual ::apache::thrift: GetLatestCommittedCompactionInfoRequest(const GetLatestCommittedCompactionInfoRequest&); GetLatestCommittedCompactionInfoRequest& operator=(const GetLatestCommittedCompactionInfoRequest&); - GetLatestCommittedCompactionInfoRequest() noexcept - : dbname(), - tablename(), - lastCompactionId(0) { + GetLatestCommittedCompactionInfoRequest() : dbname(), + tablename(), + lastCompactionId(0), + catName("hive") { } virtual ~GetLatestCommittedCompactionInfoRequest() noexcept; @@ -12276,6 +12376,7 @@ class GetLatestCommittedCompactionInfoRequest : public virtual ::apache::thrift: std::string tablename; std::vector partitionnames; int64_t lastCompactionId; + std::string catName; _GetLatestCommittedCompactionInfoRequest__isset __isset; @@ -12287,6 +12388,8 @@ class GetLatestCommittedCompactionInfoRequest : public virtual ::apache::thrift: void __set_lastCompactionId(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const GetLatestCommittedCompactionInfoRequest & rhs) const { if (!(dbname == rhs.dbname)) @@ -12301,6 +12404,10 @@ class GetLatestCommittedCompactionInfoRequest : public virtual ::apache::thrift: return false; else if (__isset.lastCompactionId && !(lastCompactionId == rhs.lastCompactionId)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const GetLatestCommittedCompactionInfoRequest &rhs) const { @@ -12419,8 +12526,9 @@ void swap(FindNextCompactRequest &a, FindNextCompactRequest &b); std::ostream& operator<<(std::ostream& out, const FindNextCompactRequest& obj); typedef struct _AddDynamicPartitions__isset { - _AddDynamicPartitions__isset() : operationType(true) {} + _AddDynamicPartitions__isset() : operationType(true), catName(true) {} bool operationType :1; + bool catName :1; } _AddDynamicPartitions__isset; class AddDynamicPartitions : public virtual ::apache::thrift::TBase { @@ -12428,12 +12536,12 @@ class AddDynamicPartitions : public virtual ::apache::thrift::TBase { AddDynamicPartitions(const AddDynamicPartitions&); AddDynamicPartitions& operator=(const AddDynamicPartitions&); - AddDynamicPartitions() noexcept - : txnid(0), - writeid(0), - dbname(), - tablename(), - operationType((DataOperationType::type)5) { + AddDynamicPartitions() : txnid(0), + writeid(0), + dbname(), + tablename(), + operationType((DataOperationType::type)5), + catName("hive") { operationType = (DataOperationType::type)5; } @@ -12449,6 +12557,7 @@ class AddDynamicPartitions : public virtual ::apache::thrift::TBase { * @see DataOperationType */ DataOperationType::type operationType; + std::string catName; _AddDynamicPartitions__isset __isset; @@ -12464,6 +12573,8 @@ class AddDynamicPartitions : public virtual ::apache::thrift::TBase { void __set_operationType(const DataOperationType::type val); + void __set_catName(const std::string& val); + bool operator == (const AddDynamicPartitions & rhs) const { if (!(txnid == rhs.txnid)) @@ -12480,6 +12591,10 @@ class AddDynamicPartitions : public virtual ::apache::thrift::TBase { return false; else if (__isset.operationType && !(operationType == rhs.operationType)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const AddDynamicPartitions &rhs) const { @@ -13243,8 +13358,9 @@ void swap(FireEventResponse &a, FireEventResponse &b); std::ostream& operator<<(std::ostream& out, const FireEventResponse& obj); typedef struct _WriteNotificationLogRequest__isset { - _WriteNotificationLogRequest__isset() : partitionVals(false) {} + _WriteNotificationLogRequest__isset() : partitionVals(false), cat(true) {} bool partitionVals :1; + bool cat :1; } _WriteNotificationLogRequest__isset; class WriteNotificationLogRequest : public virtual ::apache::thrift::TBase { @@ -13252,11 +13368,11 @@ class WriteNotificationLogRequest : public virtual ::apache::thrift::TBase { WriteNotificationLogRequest(const WriteNotificationLogRequest&); WriteNotificationLogRequest& operator=(const WriteNotificationLogRequest&); - WriteNotificationLogRequest() noexcept - : txnId(0), - writeId(0), - db(), - table() { + WriteNotificationLogRequest() : txnId(0), + writeId(0), + db(), + table(), + cat("hive") { } virtual ~WriteNotificationLogRequest() noexcept; @@ -13266,6 +13382,7 @@ class WriteNotificationLogRequest : public virtual ::apache::thrift::TBase { std::string table; InsertEventRequestData fileInfo; std::vector partitionVals; + std::string cat; _WriteNotificationLogRequest__isset __isset; @@ -13281,6 +13398,8 @@ class WriteNotificationLogRequest : public virtual ::apache::thrift::TBase { void __set_partitionVals(const std::vector & val); + void __set_cat(const std::string& val); + bool operator == (const WriteNotificationLogRequest & rhs) const { if (!(txnId == rhs.txnId)) @@ -13297,6 +13416,10 @@ class WriteNotificationLogRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.partitionVals && !(partitionVals == rhs.partitionVals)) return false; + if (__isset.cat != rhs.__isset.cat) + return false; + else if (__isset.cat && !(cat == rhs.cat)) + return false; return true; } bool operator != (const WriteNotificationLogRequest &rhs) const { @@ -21413,9 +21536,10 @@ void swap(Package &a, Package &b); std::ostream& operator<<(std::ostream& out, const Package& obj); typedef struct _GetAllWriteEventInfoRequest__isset { - _GetAllWriteEventInfoRequest__isset() : dbName(false), tableName(false) {} + _GetAllWriteEventInfoRequest__isset() : dbName(false), tableName(false), catName(true) {} bool dbName :1; bool tableName :1; + bool catName :1; } _GetAllWriteEventInfoRequest__isset; class GetAllWriteEventInfoRequest : public virtual ::apache::thrift::TBase { @@ -21423,16 +21547,17 @@ class GetAllWriteEventInfoRequest : public virtual ::apache::thrift::TBase { GetAllWriteEventInfoRequest(const GetAllWriteEventInfoRequest&); GetAllWriteEventInfoRequest& operator=(const GetAllWriteEventInfoRequest&); - GetAllWriteEventInfoRequest() noexcept - : txnId(0), - dbName(), - tableName() { + GetAllWriteEventInfoRequest() : txnId(0), + dbName(), + tableName(), + catName("hive") { } virtual ~GetAllWriteEventInfoRequest() noexcept; int64_t txnId; std::string dbName; std::string tableName; + std::string catName; _GetAllWriteEventInfoRequest__isset __isset; @@ -21442,6 +21567,8 @@ class GetAllWriteEventInfoRequest : public virtual ::apache::thrift::TBase { void __set_tableName(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const GetAllWriteEventInfoRequest & rhs) const { if (!(txnId == rhs.txnId)) @@ -21454,6 +21581,10 @@ class GetAllWriteEventInfoRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.tableName && !(tableName == rhs.tableName)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const GetAllWriteEventInfoRequest &rhs) const { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 886070d049c8..85b904bb53aa 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -17,6 +17,7 @@ private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField PARTITIONNAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionnames", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.protocol.TField OPERATION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationType", org.apache.thrift.protocol.TType.I32, (short)6); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)7); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new AddDynamicPartitionsStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new AddDynamicPartitionsTupleSchemeFactory(); @@ -27,6 +28,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String tablename; // required private @org.apache.thrift.annotation.Nullable java.util.List partitionnames; // required private @org.apache.thrift.annotation.Nullable DataOperationType operationType; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -39,7 +41,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { * * @see DataOperationType */ - OPERATION_TYPE((short)6, "operationType"); + OPERATION_TYPE((short)6, "operationType"), + CAT_NAME((short)7, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -67,6 +70,8 @@ public static _Fields findByThriftId(int fieldId) { return PARTITIONNAMES; case 6: // OPERATION_TYPE return OPERATION_TYPE; + case 7: // CAT_NAME + return CAT_NAME; default: return null; } @@ -111,7 +116,7 @@ public java.lang.String getFieldName() { private static final int __TXNID_ISSET_ID = 0; private static final int __WRITEID_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.OPERATION_TYPE}; + private static final _Fields optionals[] = {_Fields.OPERATION_TYPE,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -128,6 +133,8 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.OPERATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("operationType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, DataOperationType.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddDynamicPartitions.class, metaDataMap); } @@ -135,6 +142,8 @@ public java.lang.String getFieldName() { public AddDynamicPartitions() { this.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.UNSET; + this.catName = "hive"; + } public AddDynamicPartitions( @@ -174,6 +183,9 @@ public AddDynamicPartitions(AddDynamicPartitions other) { if (other.isSetOperationType()) { this.operationType = other.operationType; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public AddDynamicPartitions deepCopy() { @@ -191,6 +203,8 @@ public void clear() { this.partitionnames = null; this.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.UNSET; + this.catName = "hive"; + } public long getTxnid() { @@ -357,6 +371,30 @@ public void setOperationTypeIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case TXNID: @@ -407,6 +445,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -431,6 +477,9 @@ public java.lang.Object getFieldValue(_Fields field) { case OPERATION_TYPE: return getOperationType(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -454,6 +503,8 @@ public boolean isSet(_Fields field) { return isSetPartitionnames(); case OPERATION_TYPE: return isSetOperationType(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -525,6 +576,15 @@ public boolean equals(AddDynamicPartitions that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -552,6 +612,10 @@ public int hashCode() { if (isSetOperationType()) hashCode = hashCode * 8191 + operationType.getValue(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -623,6 +687,16 @@ public int compareTo(AddDynamicPartitions other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -685,6 +759,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -808,6 +892,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -856,6 +948,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -888,10 +987,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition if (struct.isSetOperationType()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetOperationType()) { oprot.writeI32(struct.operationType.getValue()); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -916,11 +1021,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions } } struct.setPartitionnamesIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.findByValue(iprot.readI32()); struct.setOperationTypeIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index 30b90b796a15..f6f651d61906 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -17,6 +17,7 @@ private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("srcTxnToWriteIdList", org.apache.thrift.protocol.TType.LIST, (short)5); private static final org.apache.thrift.protocol.TField REALLOCATE_FIELD_DESC = new org.apache.thrift.protocol.TField("reallocate", org.apache.thrift.protocol.TType.BOOL, (short)6); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)7); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new AllocateTableWriteIdsRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new AllocateTableWriteIdsRequestTupleSchemeFactory(); @@ -27,6 +28,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String replPolicy; // optional private @org.apache.thrift.annotation.Nullable java.util.List srcTxnToWriteIdList; // optional private boolean reallocate; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -35,7 +37,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TXN_IDS((short)3, "txnIds"), REPL_POLICY((short)4, "replPolicy"), SRC_TXN_TO_WRITE_ID_LIST((short)5, "srcTxnToWriteIdList"), - REALLOCATE((short)6, "reallocate"); + REALLOCATE((short)6, "reallocate"), + CAT_NAME((short)7, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -63,6 +66,8 @@ public static _Fields findByThriftId(int fieldId) { return SRC_TXN_TO_WRITE_ID_LIST; case 6: // REALLOCATE return REALLOCATE; + case 7: // CAT_NAME + return CAT_NAME; default: return null; } @@ -106,7 +111,7 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __REALLOCATE_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.TXN_IDS,_Fields.REPL_POLICY,_Fields.SRC_TXN_TO_WRITE_ID_LIST,_Fields.REALLOCATE}; + private static final _Fields optionals[] = {_Fields.TXN_IDS,_Fields.REPL_POLICY,_Fields.SRC_TXN_TO_WRITE_ID_LIST,_Fields.REALLOCATE,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -124,6 +129,8 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TxnToWriteId.class)))); tmpMap.put(_Fields.REALLOCATE, new org.apache.thrift.meta_data.FieldMetaData("reallocate", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdsRequest.class, metaDataMap); } @@ -131,6 +138,8 @@ public java.lang.String getFieldName() { public AllocateTableWriteIdsRequest() { this.reallocate = false; + this.catName = "hive"; + } public AllocateTableWriteIdsRequest( @@ -168,6 +177,9 @@ public AllocateTableWriteIdsRequest(AllocateTableWriteIdsRequest other) { this.srcTxnToWriteIdList = __this__srcTxnToWriteIdList; } this.reallocate = other.reallocate; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public AllocateTableWriteIdsRequest deepCopy() { @@ -183,6 +195,8 @@ public void clear() { this.srcTxnToWriteIdList = null; this.reallocate = false; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -359,6 +373,30 @@ public void setReallocateIsSet(boolean value) { __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REALLOCATE_ISSET_ID, value); } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DB_NAME: @@ -409,6 +447,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -433,6 +479,9 @@ public java.lang.Object getFieldValue(_Fields field) { case REALLOCATE: return isReallocate(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -456,6 +505,8 @@ public boolean isSet(_Fields field) { return isSetSrcTxnToWriteIdList(); case REALLOCATE: return isSetReallocate(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -527,6 +578,15 @@ public boolean equals(AllocateTableWriteIdsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -558,6 +618,10 @@ public int hashCode() { if (isSetReallocate()) hashCode = hashCode * 8191 + ((reallocate) ? 131071 : 524287); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -629,6 +693,16 @@ public int compareTo(AllocateTableWriteIdsRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -701,6 +775,16 @@ public java.lang.String toString() { sb.append(this.reallocate); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -823,6 +907,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -886,6 +978,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeBool(struct.reallocate); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -918,7 +1017,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetReallocate()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetCatName()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetTxnIds()) { { oprot.writeI32(struct.txnIds.size()); @@ -943,6 +1045,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetReallocate()) { oprot.writeBool(struct.reallocate); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -952,7 +1057,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId struct.setDbNameIsSet(true); struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(4); + java.util.BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(org.apache.thrift.protocol.TType.I64); @@ -988,6 +1093,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId struct.reallocate = iprot.readBool(); struct.setReallocateIsSet(true); } + if (incoming.get(4)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java index cd6b544580b0..af91264fe9bd 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionInfoStruct.java @@ -30,6 +30,7 @@ private static final org.apache.thrift.protocol.TField POOLNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolname", org.apache.thrift.protocol.TType.STRING, (short)17); private static final org.apache.thrift.protocol.TField NUMBER_OF_BUCKETS_FIELD_DESC = new org.apache.thrift.protocol.TField("numberOfBuckets", org.apache.thrift.protocol.TType.I32, (short)18); private static final org.apache.thrift.protocol.TField ORDER_BY_CLAUSE_FIELD_DESC = new org.apache.thrift.protocol.TField("orderByClause", org.apache.thrift.protocol.TType.STRING, (short)19); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)20); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new CompactionInfoStructStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new CompactionInfoStructTupleSchemeFactory(); @@ -53,6 +54,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String poolname; // optional private int numberOfBuckets; // optional private @org.apache.thrift.annotation.Nullable java.lang.String orderByClause; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -78,7 +80,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { RETRY_RETENTION((short)16, "retryRetention"), POOLNAME((short)17, "poolname"), NUMBER_OF_BUCKETS((short)18, "numberOfBuckets"), - ORDER_BY_CLAUSE((short)19, "orderByClause"); + ORDER_BY_CLAUSE((short)19, "orderByClause"), + CAT_NAME((short)20, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -132,6 +135,8 @@ public static _Fields findByThriftId(int fieldId) { return NUMBER_OF_BUCKETS; case 19: // ORDER_BY_CLAUSE return ORDER_BY_CLAUSE; + case 20: // CAT_NAME + return CAT_NAME; default: return null; } @@ -182,7 +187,7 @@ public java.lang.String getFieldName() { private static final int __RETRYRETENTION_ISSET_ID = 6; private static final int __NUMBEROFBUCKETS_ISSET_ID = 7; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID,_Fields.ERROR_MESSAGE,_Fields.HASOLDABORT,_Fields.ENQUEUE_TIME,_Fields.RETRY_RETENTION,_Fields.POOLNAME,_Fields.NUMBER_OF_BUCKETS,_Fields.ORDER_BY_CLAUSE}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.TOOMANYABORTS,_Fields.STATE,_Fields.WORKER_ID,_Fields.START,_Fields.HIGHEST_WRITE_ID,_Fields.ERROR_MESSAGE,_Fields.HASOLDABORT,_Fields.ENQUEUE_TIME,_Fields.RETRY_RETENTION,_Fields.POOLNAME,_Fields.NUMBER_OF_BUCKETS,_Fields.ORDER_BY_CLAUSE,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -224,11 +229,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); tmpMap.put(_Fields.ORDER_BY_CLAUSE, new org.apache.thrift.meta_data.FieldMetaData("orderByClause", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionInfoStruct.class, metaDataMap); } public CompactionInfoStruct() { + this.catName = "hive"; + } public CompactionInfoStruct( @@ -291,6 +300,9 @@ public CompactionInfoStruct(CompactionInfoStruct other) { if (other.isSetOrderByClause()) { this.orderByClause = other.orderByClause; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public CompactionInfoStruct deepCopy() { @@ -326,6 +338,8 @@ public void clear() { setNumberOfBucketsIsSet(false); this.numberOfBuckets = 0; this.orderByClause = null; + this.catName = "hive"; + } public long getId() { @@ -776,6 +790,30 @@ public void setOrderByClauseIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case ID: @@ -930,6 +968,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -993,6 +1039,9 @@ public java.lang.Object getFieldValue(_Fields field) { case ORDER_BY_CLAUSE: return getOrderByClause(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -1042,6 +1091,8 @@ public boolean isSet(_Fields field) { return isSetNumberOfBuckets(); case ORDER_BY_CLAUSE: return isSetOrderByClause(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -1230,6 +1281,15 @@ public boolean equals(CompactionInfoStruct that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -1311,6 +1371,10 @@ public int hashCode() { if (isSetOrderByClause()) hashCode = hashCode * 8191 + orderByClause.hashCode(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -1512,6 +1576,16 @@ public int compareTo(CompactionInfoStruct other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1682,6 +1756,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1895,6 +1979,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionInfoStruc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 20: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -2017,6 +2109,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionInfoStru oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -2084,7 +2183,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruc if (struct.isSetOrderByClause()) { optionals.set(14); } - oprot.writeBitSet(optionals, 15); + if (struct.isSetCatName()) { + optionals.set(15); + } + oprot.writeBitSet(optionals, 16); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -2130,6 +2232,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruc if (struct.isSetOrderByClause()) { oprot.writeString(struct.orderByClause); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -2143,7 +2248,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruct struct.setTablenameIsSet(true); struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32()); struct.setTypeIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(15); + java.util.BitSet incoming = iprot.readBitSet(16); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -2204,6 +2309,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionInfoStruct struct.orderByClause = iprot.readString(); struct.setOrderByClauseIsSet(true); } + if (incoming.get(15)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataRequest.java index fd6d2098a4c3..736a2e054c8f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataRequest.java @@ -15,6 +15,7 @@ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new CompactionMetricsDataRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new CompactionMetricsDataRequestTupleSchemeFactory(); @@ -23,6 +24,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String tblName; // required private @org.apache.thrift.annotation.Nullable java.lang.String partitionName; // optional private @org.apache.thrift.annotation.Nullable CompactionMetricsMetricType type; // required + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -33,7 +35,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { * * @see CompactionMetricsMetricType */ - TYPE((short)4, "type"); + TYPE((short)4, "type"), + CAT_NAME((short)5, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -57,6 +60,8 @@ public static _Fields findByThriftId(int fieldId) { return PARTITION_NAME; case 4: // TYPE return TYPE; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -98,7 +103,7 @@ public java.lang.String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PARTITION_NAME}; + private static final _Fields optionals[] = {_Fields.PARTITION_NAME,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -110,11 +115,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, CompactionMetricsMetricType.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionMetricsDataRequest.class, metaDataMap); } public CompactionMetricsDataRequest() { + this.catName = "hive"; + } public CompactionMetricsDataRequest( @@ -144,6 +153,9 @@ public CompactionMetricsDataRequest(CompactionMetricsDataRequest other) { if (other.isSetType()) { this.type = other.type; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public CompactionMetricsDataRequest deepCopy() { @@ -156,6 +168,8 @@ public void clear() { this.tblName = null; this.partitionName = null; this.type = null; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -262,6 +276,30 @@ public void setTypeIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DB_NAME: @@ -296,6 +334,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -314,6 +360,9 @@ public java.lang.Object getFieldValue(_Fields field) { case TYPE: return getType(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -333,6 +382,8 @@ public boolean isSet(_Fields field) { return isSetPartitionName(); case TYPE: return isSetType(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -386,6 +437,15 @@ public boolean equals(CompactionMetricsDataRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -409,6 +469,10 @@ public int hashCode() { if (isSetType()) hashCode = hashCode * 8191 + type.getValue(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -460,6 +524,16 @@ public int compareTo(CompactionMetricsDataRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -514,6 +588,16 @@ public java.lang.String toString() { sb.append(this.type); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -601,6 +685,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionMetricsDa org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -636,6 +728,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionMetricsD oprot.writeI32(struct.type.getValue()); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -660,10 +759,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionMetricsDa if (struct.isSetPartitionName()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetPartitionName()) { oprot.writeString(struct.partitionName); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -675,11 +780,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionMetricsDat struct.setTblNameIsSet(true); struct.type = org.apache.hadoop.hive.metastore.api.CompactionMetricsMetricType.findByValue(iprot.readI32()); struct.setTypeIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.partitionName = iprot.readString(); struct.setPartitionNameIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataStruct.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataStruct.java index 3884473760d8..75f52ca203d0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataStruct.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionMetricsDataStruct.java @@ -18,6 +18,7 @@ private static final org.apache.thrift.protocol.TField METRICVALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("metricvalue", org.apache.thrift.protocol.TType.I32, (short)5); private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.I32, (short)6); private static final org.apache.thrift.protocol.TField THRESHOLD_FIELD_DESC = new org.apache.thrift.protocol.TField("threshold", org.apache.thrift.protocol.TType.I32, (short)7); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)8); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new CompactionMetricsDataStructStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new CompactionMetricsDataStructTupleSchemeFactory(); @@ -29,6 +30,7 @@ private int metricvalue; // required private int version; // required private int threshold; // required + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -42,7 +44,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TYPE((short)4, "type"), METRICVALUE((short)5, "metricvalue"), VERSION((short)6, "version"), - THRESHOLD((short)7, "threshold"); + THRESHOLD((short)7, "threshold"), + CAT_NAME((short)8, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return VERSION; case 7: // THRESHOLD return THRESHOLD; + case 8: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,7 +122,7 @@ public java.lang.String getFieldName() { private static final int __VERSION_ISSET_ID = 1; private static final int __THRESHOLD_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -135,11 +140,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); tmpMap.put(_Fields.THRESHOLD, new org.apache.thrift.meta_data.FieldMetaData("threshold", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionMetricsDataStruct.class, metaDataMap); } public CompactionMetricsDataStruct() { + this.catName = "hive"; + } public CompactionMetricsDataStruct( @@ -182,6 +191,9 @@ public CompactionMetricsDataStruct(CompactionMetricsDataStruct other) { this.metricvalue = other.metricvalue; this.version = other.version; this.threshold = other.threshold; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public CompactionMetricsDataStruct deepCopy() { @@ -200,6 +212,8 @@ public void clear() { this.version = 0; setThresholdIsSet(false); this.threshold = 0; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -372,6 +386,30 @@ public void setThresholdIsSet(boolean value) { __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __THRESHOLD_ISSET_ID, value); } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DBNAME: @@ -430,6 +468,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -457,6 +503,9 @@ public java.lang.Object getFieldValue(_Fields field) { case THRESHOLD: return getThreshold(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -482,6 +531,8 @@ public boolean isSet(_Fields field) { return isSetVersion(); case THRESHOLD: return isSetThreshold(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -562,6 +613,15 @@ public boolean equals(CompactionMetricsDataStruct that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -591,6 +651,10 @@ public int hashCode() { hashCode = hashCode * 8191 + threshold; + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -672,6 +736,16 @@ public int compareTo(CompactionMetricsDataStruct other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -738,6 +812,16 @@ public java.lang.String toString() { sb.append("threshold:"); sb.append(this.threshold); first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -863,6 +947,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionMetricsDa org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 8: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -907,6 +999,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionMetricsD oprot.writeFieldBegin(THRESHOLD_FIELD_DESC); oprot.writeI32(struct.threshold); oprot.writeFieldEnd(); + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -934,10 +1033,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionMetricsDa if (struct.isSetPartitionname()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -955,11 +1060,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionMetricsDat struct.setVersionIsSet(true); struct.threshold = iprot.readI32(); struct.setThresholdIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index 7d6a08220c21..25fc562c6f46 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -22,6 +22,7 @@ private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("poolName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final org.apache.thrift.protocol.TField NUMBER_OF_BUCKETS_FIELD_DESC = new org.apache.thrift.protocol.TField("numberOfBuckets", org.apache.thrift.protocol.TType.I32, (short)10); private static final org.apache.thrift.protocol.TField ORDER_BY_CLAUSE_FIELD_DESC = new org.apache.thrift.protocol.TField("orderByClause", org.apache.thrift.protocol.TType.STRING, (short)11); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new CompactionRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new CompactionRequestTupleSchemeFactory(); @@ -37,6 +38,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String poolName; // optional private int numberOfBuckets; // optional private @org.apache.thrift.annotation.Nullable java.lang.String orderByClause; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -54,7 +56,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { INITIATOR_VERSION((short)8, "initiatorVersion"), POOL_NAME((short)9, "poolName"), NUMBER_OF_BUCKETS((short)10, "numberOfBuckets"), - ORDER_BY_CLAUSE((short)11, "orderByClause"); + ORDER_BY_CLAUSE((short)11, "orderByClause"), + CAT_NAME((short)12, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -92,6 +95,8 @@ public static _Fields findByThriftId(int fieldId) { return NUMBER_OF_BUCKETS; case 11: // ORDER_BY_CLAUSE return ORDER_BY_CLAUSE; + case 12: // CAT_NAME + return CAT_NAME; default: return null; } @@ -135,7 +140,7 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __NUMBEROFBUCKETS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.POOL_NAME,_Fields.NUMBER_OF_BUCKETS,_Fields.ORDER_BY_CLAUSE}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.POOL_NAME,_Fields.NUMBER_OF_BUCKETS,_Fields.ORDER_BY_CLAUSE,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -163,11 +168,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); tmpMap.put(_Fields.ORDER_BY_CLAUSE, new org.apache.thrift.meta_data.FieldMetaData("orderByClause", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionRequest.class, metaDataMap); } public CompactionRequest() { + this.catName = "hive"; + } public CompactionRequest( @@ -218,6 +227,9 @@ public CompactionRequest(CompactionRequest other) { if (other.isSetOrderByClause()) { this.orderByClause = other.orderByClause; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public CompactionRequest deepCopy() { @@ -238,6 +250,8 @@ public void clear() { setNumberOfBucketsIsSet(false); this.numberOfBuckets = 0; this.orderByClause = null; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -521,6 +535,30 @@ public void setOrderByClauseIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DBNAME: @@ -611,6 +649,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -650,6 +696,9 @@ public java.lang.Object getFieldValue(_Fields field) { case ORDER_BY_CLAUSE: return getOrderByClause(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -683,6 +732,8 @@ public boolean isSet(_Fields field) { return isSetNumberOfBuckets(); case ORDER_BY_CLAUSE: return isSetOrderByClause(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -799,6 +850,15 @@ public boolean equals(CompactionRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -850,6 +910,10 @@ public int hashCode() { if (isSetOrderByClause()) hashCode = hashCode * 8191 + orderByClause.hashCode(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -971,6 +1035,16 @@ public int compareTo(CompactionRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1091,6 +1165,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1248,6 +1332,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 12: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1338,6 +1430,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1383,7 +1482,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetOrderByClause()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -1415,6 +1517,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetOrderByClause()) { oprot.writeString(struct.orderByClause); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1426,7 +1531,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st struct.setTablenameIsSet(true); struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32()); struct.setTypeIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(8); + java.util.BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -1470,6 +1575,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st struct.orderByClause = iprot.readString(); struct.setOrderByClauseIsSet(true); } + if (incoming.get(8)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllWriteEventInfoRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllWriteEventInfoRequest.java index f4c7f95d94a7..83ea86cc857c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllWriteEventInfoRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllWriteEventInfoRequest.java @@ -14,6 +14,7 @@ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)1); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new GetAllWriteEventInfoRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new GetAllWriteEventInfoRequestTupleSchemeFactory(); @@ -21,12 +22,14 @@ private long txnId; // required private @org.apache.thrift.annotation.Nullable java.lang.String dbName; // optional private @org.apache.thrift.annotation.Nullable java.lang.String tableName; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TXN_ID((short)1, "txnId"), DB_NAME((short)2, "dbName"), - TABLE_NAME((short)3, "tableName"); + TABLE_NAME((short)3, "tableName"), + CAT_NAME((short)4, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -48,6 +51,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 3: // TABLE_NAME return TABLE_NAME; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -91,7 +96,7 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __TXNID_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME}; + private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -101,11 +106,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetAllWriteEventInfoRequest.class, metaDataMap); } public GetAllWriteEventInfoRequest() { + this.catName = "hive"; + } public GetAllWriteEventInfoRequest( @@ -128,6 +137,9 @@ public GetAllWriteEventInfoRequest(GetAllWriteEventInfoRequest other) { if (other.isSetTableName()) { this.tableName = other.tableName; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public GetAllWriteEventInfoRequest deepCopy() { @@ -140,6 +152,8 @@ public void clear() { this.txnId = 0; this.dbName = null; this.tableName = null; + this.catName = "hive"; + } public long getTxnId() { @@ -212,6 +226,30 @@ public void setTableNameIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case TXN_ID: @@ -238,6 +276,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -253,6 +299,9 @@ public java.lang.Object getFieldValue(_Fields field) { case TABLE_NAME: return getTableName(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -270,6 +319,8 @@ public boolean isSet(_Fields field) { return isSetDbName(); case TABLE_NAME: return isSetTableName(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -314,6 +365,15 @@ public boolean equals(GetAllWriteEventInfoRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -331,6 +391,10 @@ public int hashCode() { if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -372,6 +436,16 @@ public int compareTo(GetAllWriteEventInfoRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -416,6 +490,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -489,6 +573,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllWriteEventInf org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -519,6 +611,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllWriteEventIn oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -544,13 +643,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllWriteEventInf if (struct.isSetTableName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -558,7 +663,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllWriteEventInfo org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.txnId = iprot.readI64(); struct.setTxnIdIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -567,6 +672,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllWriteEventInfo struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetLatestCommittedCompactionInfoRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetLatestCommittedCompactionInfoRequest.java index 9abd8f520fe8..a7a5d14bac9f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetLatestCommittedCompactionInfoRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetLatestCommittedCompactionInfoRequest.java @@ -15,6 +15,7 @@ private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField PARTITIONNAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionnames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField LAST_COMPACTION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("lastCompactionId", org.apache.thrift.protocol.TType.I64, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new GetLatestCommittedCompactionInfoRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new GetLatestCommittedCompactionInfoRequestTupleSchemeFactory(); @@ -23,13 +24,15 @@ private @org.apache.thrift.annotation.Nullable java.lang.String tablename; // required private @org.apache.thrift.annotation.Nullable java.util.List partitionnames; // optional private long lastCompactionId; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TABLENAME((short)2, "tablename"), PARTITIONNAMES((short)3, "partitionnames"), - LAST_COMPACTION_ID((short)4, "lastCompactionId"); + LAST_COMPACTION_ID((short)4, "lastCompactionId"), + CAT_NAME((short)5, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -53,6 +56,8 @@ public static _Fields findByThriftId(int fieldId) { return PARTITIONNAMES; case 4: // LAST_COMPACTION_ID return LAST_COMPACTION_ID; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -96,7 +101,7 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __LASTCOMPACTIONID_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAMES,_Fields.LAST_COMPACTION_ID}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAMES,_Fields.LAST_COMPACTION_ID,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -109,11 +114,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.LAST_COMPACTION_ID, new org.apache.thrift.meta_data.FieldMetaData("lastCompactionId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetLatestCommittedCompactionInfoRequest.class, metaDataMap); } public GetLatestCommittedCompactionInfoRequest() { + this.catName = "hive"; + } public GetLatestCommittedCompactionInfoRequest( @@ -141,6 +150,9 @@ public GetLatestCommittedCompactionInfoRequest(GetLatestCommittedCompactionInfoR this.partitionnames = __this__partitionnames; } this.lastCompactionId = other.lastCompactionId; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public GetLatestCommittedCompactionInfoRequest deepCopy() { @@ -154,6 +166,8 @@ public void clear() { this.partitionnames = null; setLastCompactionIdIsSet(false); this.lastCompactionId = 0; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -266,6 +280,30 @@ public void setLastCompactionIdIsSet(boolean value) { __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __LASTCOMPACTIONID_ISSET_ID, value); } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DBNAME: @@ -300,6 +338,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -318,6 +364,9 @@ public java.lang.Object getFieldValue(_Fields field) { case LAST_COMPACTION_ID: return getLastCompactionId(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -337,6 +386,8 @@ public boolean isSet(_Fields field) { return isSetPartitionnames(); case LAST_COMPACTION_ID: return isSetLastCompactionId(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -390,6 +441,15 @@ public boolean equals(GetLatestCommittedCompactionInfoRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -413,6 +473,10 @@ public int hashCode() { if (isSetLastCompactionId()) hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(lastCompactionId); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -464,6 +528,16 @@ public int compareTo(GetLatestCommittedCompactionInfoRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -516,6 +590,16 @@ public java.lang.String toString() { sb.append(this.lastCompactionId); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -611,6 +695,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetLatestCommittedC org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -653,6 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetLatestCommitted oprot.writeI64(struct.lastCompactionId); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -679,7 +778,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetLatestCommittedC if (struct.isSetLastCompactionId()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetPartitionnames()) { { oprot.writeI32(struct.partitionnames.size()); @@ -692,6 +794,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetLatestCommittedC if (struct.isSetLastCompactionId()) { oprot.writeI64(struct.lastCompactionId); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -701,7 +806,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetLatestCommittedCo struct.setDbnameIsSet(true); struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list975 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); @@ -719,6 +824,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetLatestCommittedCo struct.lastCompactionId = iprot.readI64(); struct.setLastCompactionIdIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MaxAllocatedTableWriteIdRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MaxAllocatedTableWriteIdRequest.java index 646dcdc93074..812c9dba6b03 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MaxAllocatedTableWriteIdRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MaxAllocatedTableWriteIdRequest.java @@ -13,17 +13,20 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new MaxAllocatedTableWriteIdRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new MaxAllocatedTableWriteIdRequestTupleSchemeFactory(); private @org.apache.thrift.annotation.Nullable java.lang.String dbName; // required private @org.apache.thrift.annotation.Nullable java.lang.String tableName; // required + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), - TABLE_NAME((short)2, "tableName"); + TABLE_NAME((short)2, "tableName"), + CAT_NAME((short)3, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -43,6 +46,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TABLE_NAME return TABLE_NAME; + case 3: // CAT_NAME + return CAT_NAME; default: return null; } @@ -84,6 +89,7 @@ public java.lang.String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -91,11 +97,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MaxAllocatedTableWriteIdRequest.class, metaDataMap); } public MaxAllocatedTableWriteIdRequest() { + this.catName = "hive"; + } public MaxAllocatedTableWriteIdRequest( @@ -117,6 +127,9 @@ public MaxAllocatedTableWriteIdRequest(MaxAllocatedTableWriteIdRequest other) { if (other.isSetTableName()) { this.tableName = other.tableName; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public MaxAllocatedTableWriteIdRequest deepCopy() { @@ -127,6 +140,8 @@ public MaxAllocatedTableWriteIdRequest deepCopy() { public void clear() { this.dbName = null; this.tableName = null; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -177,6 +192,30 @@ public void setTableNameIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DB_NAME: @@ -195,6 +234,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -207,6 +254,9 @@ public java.lang.Object getFieldValue(_Fields field) { case TABLE_NAME: return getTableName(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -222,6 +272,8 @@ public boolean isSet(_Fields field) { return isSetDbName(); case TABLE_NAME: return isSetTableName(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -257,6 +309,15 @@ public boolean equals(MaxAllocatedTableWriteIdRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -272,6 +333,10 @@ public int hashCode() { if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -303,6 +368,16 @@ public int compareTo(MaxAllocatedTableWriteIdRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -339,6 +414,16 @@ public java.lang.String toString() { sb.append(this.tableName); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -406,6 +491,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MaxAllocatedTableWr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -429,6 +522,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, MaxAllocatedTableW oprot.writeString(struct.tableName); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -448,6 +548,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, MaxAllocatedTableWr org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; oprot.writeString(struct.dbName); oprot.writeString(struct.tableName); + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -457,6 +565,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MaxAllocatedTableWri struct.setDbNameIsSet(true); struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); + java.util.BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java index 714ef2733cfd..3ce194cc1bd1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java @@ -17,6 +17,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)6); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)7); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new ReplTblWriteIdStateRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new ReplTblWriteIdStateRequestTupleSchemeFactory(); @@ -27,6 +28,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String dbName; // required private @org.apache.thrift.annotation.Nullable java.lang.String tableName; // required private @org.apache.thrift.annotation.Nullable java.util.List partNames; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -35,7 +37,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { HOST_NAME((short)3, "hostName"), DB_NAME((short)4, "dbName"), TABLE_NAME((short)5, "tableName"), - PART_NAMES((short)6, "partNames"); + PART_NAMES((short)6, "partNames"), + CAT_NAME((short)7, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -63,6 +66,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 6: // PART_NAMES return PART_NAMES; + case 7: // CAT_NAME + return CAT_NAME; default: return null; } @@ -104,7 +109,7 @@ public java.lang.String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PART_NAMES}; + private static final _Fields optionals[] = {_Fields.PART_NAMES,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -121,11 +126,15 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ReplTblWriteIdStateRequest.class, metaDataMap); } public ReplTblWriteIdStateRequest() { + this.catName = "hive"; + } public ReplTblWriteIdStateRequest( @@ -166,6 +175,9 @@ public ReplTblWriteIdStateRequest(ReplTblWriteIdStateRequest other) { java.util.List __this__partNames = new java.util.ArrayList(other.partNames); this.partNames = __this__partNames; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ReplTblWriteIdStateRequest deepCopy() { @@ -180,6 +192,8 @@ public void clear() { this.dbName = null; this.tableName = null; this.partNames = null; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -342,6 +356,30 @@ public void setPartNamesIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case VALID_WRITE_IDLIST: @@ -392,6 +430,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -416,6 +462,9 @@ public java.lang.Object getFieldValue(_Fields field) { case PART_NAMES: return getPartNames(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -439,6 +488,8 @@ public boolean isSet(_Fields field) { return isSetTableName(); case PART_NAMES: return isSetPartNames(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -510,6 +561,15 @@ public boolean equals(ReplTblWriteIdStateRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -541,6 +601,10 @@ public int hashCode() { if (isSetPartNames()) hashCode = hashCode * 8191 + partNames.hashCode(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -612,6 +676,16 @@ public int compareTo(ReplTblWriteIdStateRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -682,6 +756,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -803,6 +887,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ReplTblWriteIdState org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -855,6 +947,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ReplTblWriteIdStat oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -881,7 +980,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdState if (struct.isSetPartNames()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); @@ -891,6 +993,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdState } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -906,7 +1011,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateR struct.setDbNameIsSet(true); struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list851 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); @@ -920,6 +1025,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateR } struct.setPartNamesIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SeedTableWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SeedTableWriteIdsRequest.java index 70e51e2c0758..fea95d7e75ac 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SeedTableWriteIdsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SeedTableWriteIdsRequest.java @@ -14,6 +14,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField SEED_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("seedWriteId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new SeedTableWriteIdsRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new SeedTableWriteIdsRequestTupleSchemeFactory(); @@ -21,12 +22,14 @@ private @org.apache.thrift.annotation.Nullable java.lang.String dbName; // required private @org.apache.thrift.annotation.Nullable java.lang.String tableName; // required private long seedWriteId; // required + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TABLE_NAME((short)2, "tableName"), - SEED_WRITE_ID((short)3, "seedWriteId"); + SEED_WRITE_ID((short)3, "seedWriteId"), + CAT_NAME((short)4, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -48,6 +51,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 3: // SEED_WRITE_ID return SEED_WRITE_ID; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -91,6 +96,7 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __SEEDWRITEID_ISSET_ID = 0; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -100,11 +106,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.SEED_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("seedWriteId", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SeedTableWriteIdsRequest.class, metaDataMap); } public SeedTableWriteIdsRequest() { + this.catName = "hive"; + } public SeedTableWriteIdsRequest( @@ -131,6 +141,9 @@ public SeedTableWriteIdsRequest(SeedTableWriteIdsRequest other) { this.tableName = other.tableName; } this.seedWriteId = other.seedWriteId; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public SeedTableWriteIdsRequest deepCopy() { @@ -143,6 +156,8 @@ public void clear() { this.tableName = null; setSeedWriteIdIsSet(false); this.seedWriteId = 0; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -215,6 +230,30 @@ public void setSeedWriteIdIsSet(boolean value) { __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SEEDWRITEID_ISSET_ID, value); } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DB_NAME: @@ -241,6 +280,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -256,6 +303,9 @@ public java.lang.Object getFieldValue(_Fields field) { case SEED_WRITE_ID: return getSeedWriteId(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -273,6 +323,8 @@ public boolean isSet(_Fields field) { return isSetTableName(); case SEED_WRITE_ID: return isSetSeedWriteId(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -317,6 +369,15 @@ public boolean equals(SeedTableWriteIdsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -334,6 +395,10 @@ public int hashCode() { hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(seedWriteId); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -375,6 +440,16 @@ public int compareTo(SeedTableWriteIdsRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -415,6 +490,16 @@ public java.lang.String toString() { sb.append("seedWriteId:"); sb.append(this.seedWriteId); first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -496,6 +581,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SeedTableWriteIdsRe org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -522,6 +615,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SeedTableWriteIdsR oprot.writeFieldBegin(SEED_WRITE_ID_FIELD_DESC); oprot.writeI64(struct.seedWriteId); oprot.writeFieldEnd(); + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -542,6 +642,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SeedTableWriteIdsRe oprot.writeString(struct.dbName); oprot.writeString(struct.tableName); oprot.writeI64(struct.seedWriteId); + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -553,6 +661,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SeedTableWriteIdsReq struct.setTableNameIsSet(true); struct.seedWriteId = iprot.readI64(); struct.setSeedWriteIdIsSet(true); + java.util.BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java index 4f12c519dabb..7cbf22189391 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java @@ -20,6 +20,7 @@ private static final org.apache.thrift.protocol.TField STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("state", org.apache.thrift.protocol.TType.STRING, (short)7); private static final org.apache.thrift.protocol.TField LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("limit", org.apache.thrift.protocol.TType.I64, (short)8); private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("order", org.apache.thrift.protocol.TType.STRING, (short)9); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)10); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new ShowCompactRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new ShowCompactRequestTupleSchemeFactory(); @@ -33,6 +34,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String state; // optional private long limit; // optional private @org.apache.thrift.annotation.Nullable java.lang.String order; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -48,7 +50,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TYPE((short)6, "type"), STATE((short)7, "state"), LIMIT((short)8, "limit"), - ORDER((short)9, "order"); + ORDER((short)9, "order"), + CAT_NAME((short)10, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return LIMIT; case 9: // ORDER return ORDER; + case 10: // CAT_NAME + return CAT_NAME; default: return null; } @@ -126,7 +131,7 @@ public java.lang.String getFieldName() { private static final int __ID_ISSET_ID = 0; private static final int __LIMIT_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.ID,_Fields.POOL_NAME,_Fields.DB_NAME,_Fields.TB_NAME,_Fields.PART_NAME,_Fields.TYPE,_Fields.STATE,_Fields.LIMIT,_Fields.ORDER}; + private static final _Fields optionals[] = {_Fields.ID,_Fields.POOL_NAME,_Fields.DB_NAME,_Fields.TB_NAME,_Fields.PART_NAME,_Fields.TYPE,_Fields.STATE,_Fields.LIMIT,_Fields.ORDER,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -148,11 +153,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.ORDER, new org.apache.thrift.meta_data.FieldMetaData("order", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowCompactRequest.class, metaDataMap); } public ShowCompactRequest() { + this.catName = "hive"; + } /** @@ -183,6 +192,9 @@ public ShowCompactRequest(ShowCompactRequest other) { if (other.isSetOrder()) { this.order = other.order; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ShowCompactRequest deepCopy() { @@ -202,6 +214,8 @@ public void clear() { setLimitIsSet(false); this.limit = 0; this.order = null; + this.catName = "hive"; + } public long getId() { @@ -424,6 +438,30 @@ public void setOrderIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case ID: @@ -498,6 +536,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -531,6 +577,9 @@ public java.lang.Object getFieldValue(_Fields field) { case ORDER: return getOrder(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -560,6 +609,8 @@ public boolean isSet(_Fields field) { return isSetLimit(); case ORDER: return isSetOrder(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -658,6 +709,15 @@ public boolean equals(ShowCompactRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -701,6 +761,10 @@ public int hashCode() { if (isSetOrder()) hashCode = hashCode * 8191 + order.hashCode(); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -802,6 +866,16 @@ public int compareTo(ShowCompactRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -904,6 +978,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1021,6 +1105,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactRequest org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 10: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1093,6 +1185,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRequest oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1138,7 +1237,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactRequest if (struct.isSetOrder()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + optionals.set(9); + } + oprot.writeBitSet(optionals, 10); if (struct.isSetId()) { oprot.writeI64(struct.id); } @@ -1166,12 +1268,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactRequest if (struct.isSetOrder()) { oprot.writeString(struct.order); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactRequest struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(9); + java.util.BitSet incoming = iprot.readBitSet(10); if (incoming.get(0)) { struct.id = iprot.readI64(); struct.setIdIsSet(true); @@ -1208,6 +1313,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactRequest s struct.order = iprot.readString(); struct.setOrderIsSet(true); } + if (incoming.get(9)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java index ee0f4bf50187..44ba2ead2d8d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java @@ -35,6 +35,7 @@ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)22); private static final org.apache.thrift.protocol.TField COMMIT_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("commitTime", org.apache.thrift.protocol.TType.I64, (short)23); private static final org.apache.thrift.protocol.TField HIGHTEST_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("hightestWriteId", org.apache.thrift.protocol.TType.I64, (short)24); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)25); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new ShowCompactResponseElementStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new ShowCompactResponseElementTupleSchemeFactory(); @@ -63,6 +64,7 @@ private long txnId; // optional private long commitTime; // optional private long hightestWriteId; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -93,7 +95,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { NEXT_TXN_ID((short)21, "nextTxnId"), TXN_ID((short)22, "txnId"), COMMIT_TIME((short)23, "commitTime"), - HIGHTEST_WRITE_ID((short)24, "hightestWriteId"); + HIGHTEST_WRITE_ID((short)24, "hightestWriteId"), + CAT_NAME((short)25, "catName"); private static final java.util.Map byName = new java.util.HashMap(); @@ -157,6 +160,8 @@ public static _Fields findByThriftId(int fieldId) { return COMMIT_TIME; case 24: // HIGHTEST_WRITE_ID return HIGHTEST_WRITE_ID; + case 25: // CAT_NAME + return CAT_NAME; default: return null; } @@ -209,7 +214,7 @@ public java.lang.String getFieldName() { private static final int __COMMITTIME_ISSET_ID = 8; private static final int __HIGHTESTWRITEID_ISSET_ID = 9; private short __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID,_Fields.ERROR_MESSAGE,_Fields.ENQUEUE_TIME,_Fields.WORKER_VERSION,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.CLEANER_START,_Fields.POOL_NAME,_Fields.NEXT_TXN_ID,_Fields.TXN_ID,_Fields.COMMIT_TIME,_Fields.HIGHTEST_WRITE_ID}; + private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID,_Fields.ERROR_MESSAGE,_Fields.ENQUEUE_TIME,_Fields.WORKER_VERSION,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.CLEANER_START,_Fields.POOL_NAME,_Fields.NEXT_TXN_ID,_Fields.TXN_ID,_Fields.COMMIT_TIME,_Fields.HIGHTEST_WRITE_ID,_Fields.CAT_NAME}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -261,6 +266,8 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.HIGHTEST_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("hightestWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowCompactResponseElement.class, metaDataMap); } @@ -268,6 +275,8 @@ public java.lang.String getFieldName() { public ShowCompactResponseElement() { this.hadoopJobId = "None"; + this.catName = "hive"; + } public ShowCompactResponseElement( @@ -340,6 +349,9 @@ public ShowCompactResponseElement(ShowCompactResponseElement other) { this.txnId = other.txnId; this.commitTime = other.commitTime; this.hightestWriteId = other.hightestWriteId; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ShowCompactResponseElement deepCopy() { @@ -383,6 +395,8 @@ public void clear() { this.commitTime = 0; setHightestWriteIdIsSet(false); this.hightestWriteId = 0; + this.catName = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -949,6 +963,30 @@ public void setHightestWriteIdIsSet(boolean value) { __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __HIGHTESTWRITEID_ISSET_ID, value); } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatName() { + return this.catName; + } + + public void setCatName(@org.apache.thrift.annotation.Nullable java.lang.String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case DBNAME: @@ -1143,6 +1181,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((java.lang.String)value); + } + break; + } } @@ -1221,6 +1267,9 @@ public java.lang.Object getFieldValue(_Fields field) { case HIGHTEST_WRITE_ID: return getHightestWriteId(); + case CAT_NAME: + return getCatName(); + } throw new java.lang.IllegalStateException(); } @@ -1280,6 +1329,8 @@ public boolean isSet(_Fields field) { return isSetCommitTime(); case HIGHTEST_WRITE_ID: return isSetHightestWriteId(); + case CAT_NAME: + return isSetCatName(); } throw new java.lang.IllegalStateException(); } @@ -1513,6 +1564,15 @@ public boolean equals(ShowCompactResponseElement that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -1616,6 +1676,10 @@ public int hashCode() { if (isSetHightestWriteId()) hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(hightestWriteId); + hashCode = hashCode * 8191 + ((isSetCatName()) ? 131071 : 524287); + if (isSetCatName()) + hashCode = hashCode * 8191 + catName.hashCode(); + return hashCode; } @@ -1867,6 +1931,16 @@ public int compareTo(ShowCompactResponseElement other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatName(), other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -2079,6 +2153,16 @@ public java.lang.String toString() { sb.append(this.hightestWriteId); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -2332,6 +2416,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 25: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -2485,6 +2577,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeI64(struct.hightestWriteId); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -2567,7 +2666,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse if (struct.isSetHightestWriteId()) { optionals.set(19); } - oprot.writeBitSet(optionals, 20); + if (struct.isSetCatName()) { + optionals.set(20); + } + oprot.writeBitSet(optionals, 21); if (struct.isSetPartitionname()) { oprot.writeString(struct.partitionname); } @@ -2628,6 +2730,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse if (struct.isSetHightestWriteId()) { oprot.writeI64(struct.hightestWriteId); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -2641,7 +2746,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponseE struct.setTypeIsSet(true); struct.state = iprot.readString(); struct.setStateIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(20); + java.util.BitSet incoming = iprot.readBitSet(21); if (incoming.get(0)) { struct.partitionname = iprot.readString(); struct.setPartitionnameIsSet(true); @@ -2722,6 +2827,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponseE struct.hightestWriteId = iprot.readI64(); struct.setHightestWriteIdIsSet(true); } + if (incoming.get(20)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteEventInfo.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteEventInfo.java index 55763ebf3143..05d3f7371678 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteEventInfo.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteEventInfo.java @@ -18,6 +18,7 @@ private static final org.apache.thrift.protocol.TField PARTITION_FIELD_DESC = new org.apache.thrift.protocol.TField("partition", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField TABLE_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("tableObj", org.apache.thrift.protocol.TType.STRING, (short)6); private static final org.apache.thrift.protocol.TField PARTITION_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionObj", org.apache.thrift.protocol.TType.STRING, (short)7); + private static final org.apache.thrift.protocol.TField CATALOG_FIELD_DESC = new org.apache.thrift.protocol.TField("catalog", org.apache.thrift.protocol.TType.STRING, (short)8); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new WriteEventInfoStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new WriteEventInfoTupleSchemeFactory(); @@ -29,6 +30,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String partition; // optional private @org.apache.thrift.annotation.Nullable java.lang.String tableObj; // optional private @org.apache.thrift.annotation.Nullable java.lang.String partitionObj; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String catalog; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -38,7 +40,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { FILES((short)4, "files"), PARTITION((short)5, "partition"), TABLE_OBJ((short)6, "tableObj"), - PARTITION_OBJ((short)7, "partitionObj"); + PARTITION_OBJ((short)7, "partitionObj"), + CATALOG((short)8, "catalog"); private static final java.util.Map byName = new java.util.HashMap(); @@ -68,6 +71,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_OBJ; case 7: // PARTITION_OBJ return PARTITION_OBJ; + case 8: // CATALOG + return CATALOG; default: return null; } @@ -111,7 +116,7 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __WRITEID_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITION,_Fields.TABLE_OBJ,_Fields.PARTITION_OBJ}; + private static final _Fields optionals[] = {_Fields.PARTITION,_Fields.TABLE_OBJ,_Fields.PARTITION_OBJ,_Fields.CATALOG}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -129,11 +134,15 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PARTITION_OBJ, new org.apache.thrift.meta_data.FieldMetaData("partitionObj", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CATALOG, new org.apache.thrift.meta_data.FieldMetaData("catalog", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WriteEventInfo.class, metaDataMap); } public WriteEventInfo() { + this.catalog = "hive"; + } public WriteEventInfo( @@ -174,6 +183,9 @@ public WriteEventInfo(WriteEventInfo other) { if (other.isSetPartitionObj()) { this.partitionObj = other.partitionObj; } + if (other.isSetCatalog()) { + this.catalog = other.catalog; + } } public WriteEventInfo deepCopy() { @@ -190,6 +202,8 @@ public void clear() { this.partition = null; this.tableObj = null; this.partitionObj = null; + this.catalog = "hive"; + } public long getWriteId() { @@ -358,6 +372,30 @@ public void setPartitionObjIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCatalog() { + return this.catalog; + } + + public void setCatalog(@org.apache.thrift.annotation.Nullable java.lang.String catalog) { + this.catalog = catalog; + } + + public void unsetCatalog() { + this.catalog = null; + } + + /** Returns true if field catalog is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalog() { + return this.catalog != null; + } + + public void setCatalogIsSet(boolean value) { + if (!value) { + this.catalog = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case WRITE_ID: @@ -416,6 +454,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CATALOG: + if (value == null) { + unsetCatalog(); + } else { + setCatalog((java.lang.String)value); + } + break; + } } @@ -443,6 +489,9 @@ public java.lang.Object getFieldValue(_Fields field) { case PARTITION_OBJ: return getPartitionObj(); + case CATALOG: + return getCatalog(); + } throw new java.lang.IllegalStateException(); } @@ -468,6 +517,8 @@ public boolean isSet(_Fields field) { return isSetTableObj(); case PARTITION_OBJ: return isSetPartitionObj(); + case CATALOG: + return isSetCatalog(); } throw new java.lang.IllegalStateException(); } @@ -548,6 +599,15 @@ public boolean equals(WriteEventInfo that) { return false; } + boolean this_present_catalog = true && this.isSetCatalog(); + boolean that_present_catalog = true && that.isSetCatalog(); + if (this_present_catalog || that_present_catalog) { + if (!(this_present_catalog && that_present_catalog)) + return false; + if (!this.catalog.equals(that.catalog)) + return false; + } + return true; } @@ -581,6 +641,10 @@ public int hashCode() { if (isSetPartitionObj()) hashCode = hashCode * 8191 + partitionObj.hashCode(); + hashCode = hashCode * 8191 + ((isSetCatalog()) ? 131071 : 524287); + if (isSetCatalog()) + hashCode = hashCode * 8191 + catalog.hashCode(); + return hashCode; } @@ -662,6 +726,16 @@ public int compareTo(WriteEventInfo other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCatalog(), other.isSetCatalog()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalog()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalog, other.catalog); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -740,6 +814,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCatalog()) { + if (!first) sb.append(", "); + sb.append("catalog:"); + if (this.catalog == null) { + sb.append("null"); + } else { + sb.append(this.catalog); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -857,6 +941,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WriteEventInfo stru org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 8: // CATALOG + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catalog = iprot.readString(); + struct.setCatalogIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -909,6 +1001,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WriteEventInfo str oprot.writeFieldEnd(); } } + if (struct.catalog != null) { + if (struct.isSetCatalog()) { + oprot.writeFieldBegin(CATALOG_FIELD_DESC); + oprot.writeString(struct.catalog); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -940,7 +1039,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteEventInfo stru if (struct.isSetPartitionObj()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCatalog()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetPartition()) { oprot.writeString(struct.partition); } @@ -950,6 +1052,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteEventInfo stru if (struct.isSetPartitionObj()) { oprot.writeString(struct.partitionObj); } + if (struct.isSetCatalog()) { + oprot.writeString(struct.catalog); + } } @Override @@ -963,7 +1068,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteEventInfo struc struct.setTableIsSet(true); struct.files = iprot.readString(); struct.setFilesIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(3); + java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.partition = iprot.readString(); struct.setPartitionIsSet(true); @@ -976,6 +1081,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteEventInfo struc struct.partitionObj = iprot.readString(); struct.setPartitionObjIsSet(true); } + if (incoming.get(3)) { + struct.catalog = iprot.readString(); + struct.setCatalogIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java index fb5e59cb17df..4f382bae03e7 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java @@ -17,6 +17,7 @@ private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField FILE_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("fileInfo", org.apache.thrift.protocol.TType.STRUCT, (short)5); private static final org.apache.thrift.protocol.TField PARTITION_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionVals", org.apache.thrift.protocol.TType.LIST, (short)6); + private static final org.apache.thrift.protocol.TField CAT_FIELD_DESC = new org.apache.thrift.protocol.TField("cat", org.apache.thrift.protocol.TType.STRING, (short)7); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new WriteNotificationLogRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new WriteNotificationLogRequestTupleSchemeFactory(); @@ -27,6 +28,7 @@ private @org.apache.thrift.annotation.Nullable java.lang.String table; // required private @org.apache.thrift.annotation.Nullable InsertEventRequestData fileInfo; // required private @org.apache.thrift.annotation.Nullable java.util.List partitionVals; // optional + private @org.apache.thrift.annotation.Nullable java.lang.String cat; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -35,7 +37,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB((short)3, "db"), TABLE((short)4, "table"), FILE_INFO((short)5, "fileInfo"), - PARTITION_VALS((short)6, "partitionVals"); + PARTITION_VALS((short)6, "partitionVals"), + CAT((short)7, "cat"); private static final java.util.Map byName = new java.util.HashMap(); @@ -63,6 +66,8 @@ public static _Fields findByThriftId(int fieldId) { return FILE_INFO; case 6: // PARTITION_VALS return PARTITION_VALS; + case 7: // CAT + return CAT; default: return null; } @@ -107,7 +112,7 @@ public java.lang.String getFieldName() { private static final int __TXNID_ISSET_ID = 0; private static final int __WRITEID_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARTITION_VALS}; + private static final _Fields optionals[] = {_Fields.PARTITION_VALS,_Fields.CAT}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -124,11 +129,15 @@ public java.lang.String getFieldName() { tmpMap.put(_Fields.PARTITION_VALS, new org.apache.thrift.meta_data.FieldMetaData("partitionVals", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT, new org.apache.thrift.meta_data.FieldMetaData("cat", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WriteNotificationLogRequest.class, metaDataMap); } public WriteNotificationLogRequest() { + this.cat = "hive"; + } public WriteNotificationLogRequest( @@ -168,6 +177,9 @@ public WriteNotificationLogRequest(WriteNotificationLogRequest other) { java.util.List __this__partitionVals = new java.util.ArrayList(other.partitionVals); this.partitionVals = __this__partitionVals; } + if (other.isSetCat()) { + this.cat = other.cat; + } } public WriteNotificationLogRequest deepCopy() { @@ -184,6 +196,8 @@ public void clear() { this.table = null; this.fileInfo = null; this.partitionVals = null; + this.cat = "hive"; + } public long getTxnId() { @@ -342,6 +356,30 @@ public void setPartitionValsIsSet(boolean value) { } } + @org.apache.thrift.annotation.Nullable + public java.lang.String getCat() { + return this.cat; + } + + public void setCat(@org.apache.thrift.annotation.Nullable java.lang.String cat) { + this.cat = cat; + } + + public void unsetCat() { + this.cat = null; + } + + /** Returns true if field cat is set (has been assigned a value) and false otherwise */ + public boolean isSetCat() { + return this.cat != null; + } + + public void setCatIsSet(boolean value) { + if (!value) { + this.cat = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case TXN_ID: @@ -392,6 +430,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case CAT: + if (value == null) { + unsetCat(); + } else { + setCat((java.lang.String)value); + } + break; + } } @@ -416,6 +462,9 @@ public java.lang.Object getFieldValue(_Fields field) { case PARTITION_VALS: return getPartitionVals(); + case CAT: + return getCat(); + } throw new java.lang.IllegalStateException(); } @@ -439,6 +488,8 @@ public boolean isSet(_Fields field) { return isSetFileInfo(); case PARTITION_VALS: return isSetPartitionVals(); + case CAT: + return isSetCat(); } throw new java.lang.IllegalStateException(); } @@ -510,6 +561,15 @@ public boolean equals(WriteNotificationLogRequest that) { return false; } + boolean this_present_cat = true && this.isSetCat(); + boolean that_present_cat = true && that.isSetCat(); + if (this_present_cat || that_present_cat) { + if (!(this_present_cat && that_present_cat)) + return false; + if (!this.cat.equals(that.cat)) + return false; + } + return true; } @@ -537,6 +597,10 @@ public int hashCode() { if (isSetPartitionVals()) hashCode = hashCode * 8191 + partitionVals.hashCode(); + hashCode = hashCode * 8191 + ((isSetCat()) ? 131071 : 524287); + if (isSetCat()) + hashCode = hashCode * 8191 + cat.hashCode(); + return hashCode; } @@ -608,6 +672,16 @@ public int compareTo(WriteNotificationLogRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetCat(), other.isSetCat()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCat()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cat, other.cat); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -670,6 +744,16 @@ public java.lang.String toString() { } first = false; } + if (isSetCat()) { + if (!first) sb.append(", "); + sb.append("cat:"); + if (this.cat == null) { + sb.append("null"); + } else { + sb.append(this.cat); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -797,6 +881,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WriteNotificationLo org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // CAT + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.cat = iprot.readString(); + struct.setCatIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -845,6 +937,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WriteNotificationL oprot.writeFieldEnd(); } } + if (struct.cat != null) { + if (struct.isSetCat()) { + oprot.writeFieldBegin(CAT_FIELD_DESC); + oprot.writeString(struct.cat); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -871,7 +970,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLo if (struct.isSetPartitionVals()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCat()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); @@ -881,6 +983,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLo } } } + if (struct.isSetCat()) { + oprot.writeString(struct.cat); + } } @Override @@ -897,7 +1002,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLog struct.fileInfo = new InsertEventRequestData(); struct.fileInfo.read(iprot); struct.setFileInfoIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(1); + java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list1121 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); @@ -911,6 +1016,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLog } struct.setPartitionValsIsSet(true); } + if (incoming.get(1)) { + struct.cat = iprot.readString(); + struct.setCatIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java index f5a102ab9647..62783c5a46a1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java @@ -49,6 +49,8 @@ public static final java.lang.String META_TABLE_NAME = "name"; + public static final java.lang.String META_TABLE_CAT = "cat"; + public static final java.lang.String META_TABLE_DB = "db"; public static final java.lang.String META_TABLE_LOCATION = "location"; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AddDynamicPartitions.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AddDynamicPartitions.php index cae2a6cf901a..353f25cfa13b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AddDynamicPartitions.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AddDynamicPartitions.php @@ -56,6 +56,11 @@ class AddDynamicPartitions 'type' => TType::I32, 'class' => '\metastore\DataOperationType', ), + 7 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -82,6 +87,10 @@ class AddDynamicPartitions * @var int */ public $operationType = 5; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -104,6 +113,9 @@ public function __construct($vals = null) if (isset($vals['operationType'])) { $this->operationType = $vals['operationType']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -177,6 +189,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -228,6 +247,11 @@ public function write($output) $xfer += $output->writeI32($this->operationType); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 7); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AllocateTableWriteIdsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AllocateTableWriteIdsRequest.php index 944ce93bfb10..875530a5b376 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AllocateTableWriteIdsRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AllocateTableWriteIdsRequest.php @@ -60,6 +60,11 @@ class AllocateTableWriteIdsRequest 'isRequired' => false, 'type' => TType::BOOL, ), + 7 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -86,6 +91,10 @@ class AllocateTableWriteIdsRequest * @var bool */ public $reallocate = false; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -108,6 +117,9 @@ public function __construct($vals = null) if (isset($vals['reallocate'])) { $this->reallocate = $vals['reallocate']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -191,6 +203,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -249,6 +268,11 @@ public function write($output) $xfer += $output->writeBool($this->reallocate); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 7); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php index 338e1cb962b8..1794dbea43f0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionInfoStruct.php @@ -117,6 +117,11 @@ class CompactionInfoStruct 'isRequired' => false, 'type' => TType::STRING, ), + 20 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -195,6 +200,10 @@ class CompactionInfoStruct * @var string */ public $orderByClause = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -256,6 +265,9 @@ public function __construct($vals = null) if (isset($vals['orderByClause'])) { $this->orderByClause = $vals['orderByClause']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -411,6 +423,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 20: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -520,6 +539,11 @@ public function write($output) $xfer += $output->writeString($this->orderByClause); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 20); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataRequest.php index e7fce8f5ae34..426e0a49c4ab 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataRequest.php @@ -42,6 +42,11 @@ class CompactionMetricsDataRequest 'type' => TType::I32, 'class' => '\metastore\CompactionMetricsMetricType', ), + 5 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -60,6 +65,10 @@ class CompactionMetricsDataRequest * @var int */ public $type = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -76,6 +85,9 @@ public function __construct($vals = null) if (isset($vals['type'])) { $this->type = $vals['type']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -126,6 +138,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -160,6 +179,11 @@ public function write($output) $xfer += $output->writeI32($this->type); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataStruct.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataStruct.php index 0bf69abf2a7d..92a28bc5f982 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataStruct.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionMetricsDataStruct.php @@ -57,6 +57,11 @@ class CompactionMetricsDataStruct 'isRequired' => true, 'type' => TType::I32, ), + 8 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -87,6 +92,10 @@ class CompactionMetricsDataStruct * @var int */ public $threshold = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -112,6 +121,9 @@ public function __construct($vals = null) if (isset($vals['threshold'])) { $this->threshold = $vals['threshold']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -183,6 +195,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -232,6 +251,11 @@ public function write($output) $xfer += $output->writeI32($this->threshold); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 8); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php index 2625552184f4..9e4181a4602e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/CompactionRequest.php @@ -85,6 +85,11 @@ class CompactionRequest 'isRequired' => false, 'type' => TType::STRING, ), + 12 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -131,6 +136,10 @@ class CompactionRequest * @var string */ public $orderByClause = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -168,6 +177,9 @@ public function __construct($vals = null) if (isset($vals['orderByClause'])) { $this->orderByClause = $vals['orderByClause']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -279,6 +291,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 12: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -356,6 +375,11 @@ public function write($output) $xfer += $output->writeString($this->orderByClause); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 12); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Constant.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Constant.php index 84961065fd53..41e227e26d75 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Constant.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Constant.php @@ -38,6 +38,7 @@ final class Constant extends \Thrift\Type\TConstant static protected $FIELD_TO_DIMENSION; static protected $IF_PURGE; static protected $META_TABLE_NAME; + static protected $META_TABLE_CAT; static protected $META_TABLE_DB; static protected $META_TABLE_LOCATION; static protected $META_TABLE_SERDE; @@ -163,6 +164,11 @@ protected static function init_META_TABLE_NAME() return "name"; } + protected static function init_META_TABLE_CAT() + { + return "cat"; + } + protected static function init_META_TABLE_DB() { return "db"; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetAllWriteEventInfoRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetAllWriteEventInfoRequest.php index 83e44cfb021d..2cbc0883608a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetAllWriteEventInfoRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetAllWriteEventInfoRequest.php @@ -36,6 +36,11 @@ class GetAllWriteEventInfoRequest 'isRequired' => false, 'type' => TType::STRING, ), + 4 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -50,6 +55,10 @@ class GetAllWriteEventInfoRequest * @var string */ public $tableName = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -63,6 +72,9 @@ public function __construct($vals = null) if (isset($vals['tableName'])) { $this->tableName = $vals['tableName']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -106,6 +118,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -135,6 +154,11 @@ public function write($output) $xfer += $output->writeString($this->tableName); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetLatestCommittedCompactionInfoRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetLatestCommittedCompactionInfoRequest.php index 35c534cd2be8..243027c427a5 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetLatestCommittedCompactionInfoRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetLatestCommittedCompactionInfoRequest.php @@ -45,6 +45,11 @@ class GetLatestCommittedCompactionInfoRequest 'isRequired' => false, 'type' => TType::I64, ), + 5 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -63,6 +68,10 @@ class GetLatestCommittedCompactionInfoRequest * @var int */ public $lastCompactionId = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -79,6 +88,9 @@ public function __construct($vals = null) if (isset($vals['lastCompactionId'])) { $this->lastCompactionId = $vals['lastCompactionId']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -138,6 +150,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -179,6 +198,11 @@ public function write($output) $xfer += $output->writeI64($this->lastCompactionId); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/MaxAllocatedTableWriteIdRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/MaxAllocatedTableWriteIdRequest.php index 26adcc36bf86..7224233c67f1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/MaxAllocatedTableWriteIdRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/MaxAllocatedTableWriteIdRequest.php @@ -31,6 +31,11 @@ class MaxAllocatedTableWriteIdRequest 'isRequired' => true, 'type' => TType::STRING, ), + 3 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -41,6 +46,10 @@ class MaxAllocatedTableWriteIdRequest * @var string */ public $tableName = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -51,6 +60,9 @@ public function __construct($vals = null) if (isset($vals['tableName'])) { $this->tableName = $vals['tableName']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -87,6 +99,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -111,6 +130,11 @@ public function write($output) $xfer += $output->writeString($this->tableName); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 3); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplTblWriteIdStateRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplTblWriteIdStateRequest.php index 4f3445967df1..d676b92d32d8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplTblWriteIdStateRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplTblWriteIdStateRequest.php @@ -55,6 +55,11 @@ class ReplTblWriteIdStateRequest 'type' => TType::STRING, ), ), + 7 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -81,6 +86,10 @@ class ReplTblWriteIdStateRequest * @var string[] */ public $partNames = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -103,6 +112,9 @@ public function __construct($vals = null) if (isset($vals['partNames'])) { $this->partNames = $vals['partNames']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -176,6 +188,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -227,6 +246,11 @@ public function write($output) $output->writeListEnd(); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 7); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SeedTableWriteIdsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SeedTableWriteIdsRequest.php index 47402ef283a6..119a99f008c1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SeedTableWriteIdsRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SeedTableWriteIdsRequest.php @@ -36,6 +36,11 @@ class SeedTableWriteIdsRequest 'isRequired' => true, 'type' => TType::I64, ), + 4 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -50,6 +55,10 @@ class SeedTableWriteIdsRequest * @var int */ public $seedWriteId = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -63,6 +72,9 @@ public function __construct($vals = null) if (isset($vals['seedWriteId'])) { $this->seedWriteId = $vals['seedWriteId']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -106,6 +118,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -135,6 +154,11 @@ public function write($output) $xfer += $output->writeI64($this->seedWriteId); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php index 543e1c9749e0..528f9af73191 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactRequest.php @@ -67,6 +67,11 @@ class ShowCompactRequest 'isRequired' => false, 'type' => TType::STRING, ), + 10 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -105,6 +110,10 @@ class ShowCompactRequest * @var string */ public $order = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -136,6 +145,9 @@ public function __construct($vals = null) if (isset($vals['order'])) { $this->order = $vals['order']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -221,6 +233,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 10: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -280,6 +299,11 @@ public function write($output) $xfer += $output->writeString($this->order); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 10); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php index a6a8f0967d90..2d5d051b35d4 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php @@ -142,6 +142,11 @@ class ShowCompactResponseElement 'isRequired' => false, 'type' => TType::I64, ), + 25 => array( + 'var' => 'catName', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -240,6 +245,10 @@ class ShowCompactResponseElement * @var int */ public $hightestWriteId = null; + /** + * @var string + */ + public $catName = "hive"; public function __construct($vals = null) { @@ -316,6 +325,9 @@ public function __construct($vals = null) if (isset($vals['hightestWriteId'])) { $this->hightestWriteId = $vals['hightestWriteId']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -506,6 +518,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 25: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -640,6 +659,11 @@ public function write($output) $xfer += $output->writeI64($this->hightestWriteId); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 25); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteEventInfo.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteEventInfo.php index f66adc5f23d6..c04160b8c821 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteEventInfo.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteEventInfo.php @@ -56,6 +56,11 @@ class WriteEventInfo 'isRequired' => false, 'type' => TType::STRING, ), + 8 => array( + 'var' => 'catalog', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -86,6 +91,10 @@ class WriteEventInfo * @var string */ public $partitionObj = null; + /** + * @var string + */ + public $catalog = "hive"; public function __construct($vals = null) { @@ -111,6 +120,9 @@ public function __construct($vals = null) if (isset($vals['partitionObj'])) { $this->partitionObj = $vals['partitionObj']; } + if (isset($vals['catalog'])) { + $this->catalog = $vals['catalog']; + } } } @@ -182,6 +194,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catalog); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -231,6 +250,11 @@ public function write($output) $xfer += $output->writeString($this->partitionObj); $xfer += $output->writeFieldEnd(); } + if ($this->catalog !== null) { + $xfer += $output->writeFieldBegin('catalog', TType::STRING, 8); + $xfer += $output->writeString($this->catalog); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteNotificationLogRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteNotificationLogRequest.php index 58a8f31a9d67..73b15a5dd873 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteNotificationLogRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/WriteNotificationLogRequest.php @@ -56,6 +56,11 @@ class WriteNotificationLogRequest 'type' => TType::STRING, ), ), + 7 => array( + 'var' => 'cat', + 'isRequired' => false, + 'type' => TType::STRING, + ), ); /** @@ -82,6 +87,10 @@ class WriteNotificationLogRequest * @var string[] */ public $partitionVals = null; + /** + * @var string + */ + public $cat = "hive"; public function __construct($vals = null) { @@ -104,6 +113,9 @@ public function __construct($vals = null) if (isset($vals['partitionVals'])) { $this->partitionVals = $vals['partitionVals']; } + if (isset($vals['cat'])) { + $this->cat = $vals['cat']; + } } } @@ -178,6 +190,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->cat); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -232,6 +251,11 @@ public function write($output) $output->writeListEnd(); $xfer += $output->writeFieldEnd(); } + if ($this->cat !== null) { + $xfer += $output->writeFieldBegin('cat', TType::STRING, 7); + $xfer += $output->writeString($this->cat); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/constants.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/constants.py index b5891397a6e2..e0a1a71d3830 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/constants.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/constants.py @@ -32,6 +32,7 @@ FIELD_TO_DIMENSION = "field_to_dimension" IF_PURGE = "ifPurge" META_TABLE_NAME = "name" +META_TABLE_CAT = "cat" META_TABLE_DB = "db" META_TABLE_LOCATION = "location" META_TABLE_SERDE = "serde" diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 14f8b05ccbf0..9228f77e251f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -13329,11 +13329,12 @@ class WriteEventInfo(object): - partition - tableObj - partitionObj + - catalog """ - def __init__(self, writeId=None, database=None, table=None, files=None, partition=None, tableObj=None, partitionObj=None,): + def __init__(self, writeId=None, database=None, table=None, files=None, partition=None, tableObj=None, partitionObj=None, catalog="hive",): self.writeId = writeId self.database = database self.table = table @@ -13341,6 +13342,7 @@ def __init__(self, writeId=None, database=None, table=None, files=None, partitio self.partition = partition self.tableObj = tableObj self.partitionObj = partitionObj + self.catalog = catalog def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -13386,6 +13388,11 @@ def read(self, iprot): self.partitionObj = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catalog = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13424,6 +13431,10 @@ def write(self, oprot): oprot.writeFieldBegin('partitionObj', TType.STRING, 7) oprot.writeString(self.partitionObj.encode('utf-8') if sys.version_info[0] == 2 else self.partitionObj) oprot.writeFieldEnd() + if self.catalog is not None: + oprot.writeFieldBegin('catalog', TType.STRING, 8) + oprot.writeString(self.catalog.encode('utf-8') if sys.version_info[0] == 2 else self.catalog) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13806,17 +13817,19 @@ class ReplTblWriteIdStateRequest(object): - dbName - tableName - partNames + - catName """ - def __init__(self, validWriteIdlist=None, user=None, hostName=None, dbName=None, tableName=None, partNames=None,): + def __init__(self, validWriteIdlist=None, user=None, hostName=None, dbName=None, tableName=None, partNames=None, catName="hive",): self.validWriteIdlist = validWriteIdlist self.user = user self.hostName = hostName self.dbName = dbName self.tableName = tableName self.partNames = partNames + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -13862,6 +13875,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13899,6 +13917,10 @@ def write(self, oprot): oprot.writeString(iter751.encode('utf-8') if sys.version_info[0] == 2 else iter751) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 7) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14282,17 +14304,19 @@ class AllocateTableWriteIdsRequest(object): - replPolicy - srcTxnToWriteIdList - reallocate + - catName """ - def __init__(self, dbName=None, tableName=None, txnIds=None, replPolicy=None, srcTxnToWriteIdList=None, reallocate=False,): + def __init__(self, dbName=None, tableName=None, txnIds=None, replPolicy=None, srcTxnToWriteIdList=None, reallocate=False, catName="hive",): self.dbName = dbName self.tableName = tableName self.txnIds = txnIds self.replPolicy = replPolicy self.srcTxnToWriteIdList = srcTxnToWriteIdList self.reallocate = reallocate + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -14344,6 +14368,11 @@ def read(self, iprot): self.reallocate = iprot.readBool() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14384,6 +14413,10 @@ def write(self, oprot): oprot.writeFieldBegin('reallocate', TType.BOOL, 6) oprot.writeBool(self.reallocate) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 7) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14479,13 +14512,15 @@ class MaxAllocatedTableWriteIdRequest(object): Attributes: - dbName - tableName + - catName """ - def __init__(self, dbName=None, tableName=None,): + def __init__(self, dbName=None, tableName=None, catName="hive",): self.dbName = dbName self.tableName = tableName + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -14506,6 +14541,11 @@ def read(self, iprot): self.tableName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14524,6 +14564,10 @@ def write(self, oprot): oprot.writeFieldBegin('tableName', TType.STRING, 2) oprot.writeString(self.tableName.encode('utf-8') if sys.version_info[0] == 2 else self.tableName) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 3) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14611,14 +14655,16 @@ class SeedTableWriteIdsRequest(object): - dbName - tableName - seedWriteId + - catName """ - def __init__(self, dbName=None, tableName=None, seedWriteId=None,): + def __init__(self, dbName=None, tableName=None, seedWriteId=None, catName="hive",): self.dbName = dbName self.tableName = tableName self.seedWriteId = seedWriteId + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -14644,6 +14690,11 @@ def read(self, iprot): self.seedWriteId = iprot.readI64() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14666,6 +14717,10 @@ def write(self, oprot): oprot.writeFieldBegin('seedWriteId', TType.I64, 3) oprot.writeI64(self.seedWriteId) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16039,11 +16094,12 @@ class CompactionRequest(object): - poolName - numberOfBuckets - orderByClause + - catName """ - def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, initiatorId=None, initiatorVersion=None, poolName=None, numberOfBuckets=None, orderByClause=None,): + def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, initiatorId=None, initiatorVersion=None, poolName=None, numberOfBuckets=None, orderByClause=None, catName="hive",): self.dbname = dbname self.tablename = tablename self.partitionname = partitionname @@ -16055,6 +16111,7 @@ def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, r self.poolName = poolName self.numberOfBuckets = numberOfBuckets self.orderByClause = orderByClause + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -16126,6 +16183,11 @@ def read(self, iprot): self.orderByClause = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) + elif fid == 12: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -16184,6 +16246,10 @@ def write(self, oprot): oprot.writeFieldBegin('orderByClause', TType.STRING, 11) oprot.writeString(self.orderByClause.encode('utf-8') if sys.version_info[0] == 2 else self.orderByClause) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 12) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16230,11 +16296,12 @@ class CompactionInfoStruct(object): - poolname - numberOfBuckets - orderByClause + - catName """ - def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None, errorMessage=None, hasoldabort=None, enqueueTime=None, retryRetention=None, poolname=None, numberOfBuckets=None, orderByClause=None,): + def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, type=None, runas=None, properties=None, toomanyaborts=None, state=None, workerId=None, start=None, highestWriteId=None, errorMessage=None, hasoldabort=None, enqueueTime=None, retryRetention=None, poolname=None, numberOfBuckets=None, orderByClause=None, catName="hive",): self.id = id self.dbname = dbname self.tablename = tablename @@ -16254,6 +16321,7 @@ def __init__(self, id=None, dbname=None, tablename=None, partitionname=None, typ self.poolname = poolname self.numberOfBuckets = numberOfBuckets self.orderByClause = orderByClause + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -16359,6 +16427,11 @@ def read(self, iprot): self.orderByClause = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) + elif fid == 20: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -16445,6 +16518,10 @@ def write(self, oprot): oprot.writeFieldBegin('orderByClause', TType.STRING, 19) oprot.writeString(self.orderByClause.encode('utf-8') if sys.version_info[0] == 2 else self.orderByClause) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 20) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16539,11 +16616,12 @@ class CompactionMetricsDataStruct(object): - metricvalue - version - threshold + - catName """ - def __init__(self, dbname=None, tblname=None, partitionname=None, type=None, metricvalue=None, version=None, threshold=None,): + def __init__(self, dbname=None, tblname=None, partitionname=None, type=None, metricvalue=None, version=None, threshold=None, catName="hive",): self.dbname = dbname self.tblname = tblname self.partitionname = partitionname @@ -16551,6 +16629,7 @@ def __init__(self, dbname=None, tblname=None, partitionname=None, type=None, met self.metricvalue = metricvalue self.version = version self.threshold = threshold + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -16596,6 +16675,11 @@ def read(self, iprot): self.threshold = iprot.readI32() else: iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -16634,6 +16718,10 @@ def write(self, oprot): oprot.writeFieldBegin('threshold', TType.I32, 7) oprot.writeI32(self.threshold) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 8) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16729,15 +16817,17 @@ class CompactionMetricsDataRequest(object): - tblName - partitionName - type + - catName """ - def __init__(self, dbName=None, tblName=None, partitionName=None, type=None,): + def __init__(self, dbName=None, tblName=None, partitionName=None, type=None, catName="hive",): self.dbName = dbName self.tblName = tblName self.partitionName = partitionName self.type = type + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -16768,6 +16858,11 @@ def read(self, iprot): self.type = iprot.readI32() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -16794,6 +16889,10 @@ def write(self, oprot): oprot.writeFieldBegin('type', TType.I32, 4) oprot.writeI32(self.type) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -16926,11 +17025,12 @@ class ShowCompactRequest(object): - state - limit - order + - catName """ - def __init__(self, id=None, poolName=None, dbName=None, tbName=None, partName=None, type=None, state=None, limit=None, order=None,): + def __init__(self, id=None, poolName=None, dbName=None, tbName=None, partName=None, type=None, state=None, limit=None, order=None, catName="hive",): self.id = id self.poolName = poolName self.dbName = dbName @@ -16940,6 +17040,7 @@ def __init__(self, id=None, poolName=None, dbName=None, tbName=None, partName=No self.state = state self.limit = limit self.order = order + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -16995,6 +17096,11 @@ def read(self, iprot): self.order = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -17041,6 +17147,10 @@ def write(self, oprot): oprot.writeFieldBegin('order', TType.STRING, 9) oprot.writeString(self.order.encode('utf-8') if sys.version_info[0] == 2 else self.order) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 10) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -17086,11 +17196,12 @@ class ShowCompactResponseElement(object): - txnId - commitTime - hightestWriteId + - catName """ - def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId="None", id=None, errorMessage=None, enqueueTime=None, workerVersion=None, initiatorId=None, initiatorVersion=None, cleanerStart=None, poolName=None, nextTxnId=None, txnId=None, commitTime=None, hightestWriteId=None,): + def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, state=None, workerid=None, start=None, runAs=None, hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId="None", id=None, errorMessage=None, enqueueTime=None, workerVersion=None, initiatorId=None, initiatorVersion=None, cleanerStart=None, poolName=None, nextTxnId=None, txnId=None, commitTime=None, hightestWriteId=None, catName="hive",): self.dbname = dbname self.tablename = tablename self.partitionname = partitionname @@ -17115,6 +17226,7 @@ def __init__(self, dbname=None, tablename=None, partitionname=None, type=None, s self.txnId = txnId self.commitTime = commitTime self.hightestWriteId = hightestWriteId + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -17245,6 +17357,11 @@ def read(self, iprot): self.hightestWriteId = iprot.readI64() else: iprot.skip(ftype) + elif fid == 25: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -17351,6 +17468,10 @@ def write(self, oprot): oprot.writeFieldBegin('hightestWriteId', TType.I64, 24) oprot.writeI64(self.hightestWriteId) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 25) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -17692,15 +17813,17 @@ class GetLatestCommittedCompactionInfoRequest(object): - tablename - partitionnames - lastCompactionId + - catName """ - def __init__(self, dbname=None, tablename=None, partitionnames=None, lastCompactionId=None,): + def __init__(self, dbname=None, tablename=None, partitionnames=None, lastCompactionId=None, catName="hive",): self.dbname = dbname self.tablename = tablename self.partitionnames = partitionnames self.lastCompactionId = lastCompactionId + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -17736,6 +17859,11 @@ def read(self, iprot): self.lastCompactionId = iprot.readI64() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -17765,6 +17893,10 @@ def write(self, oprot): oprot.writeFieldBegin('lastCompactionId', TType.I64, 4) oprot.writeI64(self.lastCompactionId) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -17943,17 +18075,19 @@ class AddDynamicPartitions(object): - tablename - partitionnames - operationType + - catName """ - def __init__(self, txnid=None, writeid=None, dbname=None, tablename=None, partitionnames=None, operationType=5,): + def __init__(self, txnid=None, writeid=None, dbname=None, tablename=None, partitionnames=None, operationType=5, catName="hive",): self.txnid = txnid self.writeid = writeid self.dbname = dbname self.tablename = tablename self.partitionnames = partitionnames self.operationType = operationType + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -17999,6 +18133,11 @@ def read(self, iprot): self.operationType = iprot.readI32() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -18036,6 +18175,10 @@ def write(self, oprot): oprot.writeFieldBegin('operationType', TType.I32, 6) oprot.writeI32(self.operationType) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 7) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -19250,17 +19393,19 @@ class WriteNotificationLogRequest(object): - table - fileInfo - partitionVals + - cat """ - def __init__(self, txnId=None, writeId=None, db=None, table=None, fileInfo=None, partitionVals=None,): + def __init__(self, txnId=None, writeId=None, db=None, table=None, fileInfo=None, partitionVals=None, cat="hive",): self.txnId = txnId self.writeId = writeId self.db = db self.table = table self.fileInfo = fileInfo self.partitionVals = partitionVals + self.cat = cat def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -19307,6 +19452,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.cat = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -19344,6 +19494,10 @@ def write(self, oprot): oprot.writeString(iter988.encode('utf-8') if sys.version_info[0] == 2 else iter988) oprot.writeListEnd() oprot.writeFieldEnd() + if self.cat is not None: + oprot.writeFieldBegin('cat', TType.STRING, 7) + oprot.writeString(self.cat.encode('utf-8') if sys.version_info[0] == 2 else self.cat) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31193,14 +31347,16 @@ class GetAllWriteEventInfoRequest(object): - txnId - dbName - tableName + - catName """ - def __init__(self, txnId=None, dbName=None, tableName=None,): + def __init__(self, txnId=None, dbName=None, tableName=None, catName="hive",): self.txnId = txnId self.dbName = dbName self.tableName = tableName + self.catName = catName def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -31226,6 +31382,11 @@ def read(self, iprot): self.tableName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -31248,6 +31409,10 @@ def write(self, oprot): oprot.writeFieldBegin('tableName', TType.STRING, 3) oprot.writeString(self.tableName.encode('utf-8') if sys.version_info[0] == 2 else self.tableName) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName.encode('utf-8') if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -33754,6 +33919,7 @@ def __ne__(self, other): (5, TType.STRING, 'partition', 'UTF8', None, ), # 5 (6, TType.STRING, 'tableObj', 'UTF8', None, ), # 6 (7, TType.STRING, 'partitionObj', 'UTF8', None, ), # 7 + (8, TType.STRING, 'catalog', 'UTF8', "hive", ), # 8 ) all_structs.append(ReplLastIdInfo) ReplLastIdInfo.thrift_spec = ( @@ -33792,6 +33958,7 @@ def __ne__(self, other): (4, TType.STRING, 'dbName', 'UTF8', None, ), # 4 (5, TType.STRING, 'tableName', 'UTF8', None, ), # 5 (6, TType.LIST, 'partNames', (TType.STRING, 'UTF8', False), None, ), # 6 + (7, TType.STRING, 'catName', 'UTF8', "hive", ), # 7 ) all_structs.append(GetValidWriteIdsRequest) GetValidWriteIdsRequest.thrift_spec = ( @@ -33829,6 +33996,7 @@ def __ne__(self, other): (4, TType.STRING, 'replPolicy', 'UTF8', None, ), # 4 (5, TType.LIST, 'srcTxnToWriteIdList', (TType.STRUCT, [TxnToWriteId, None], False), None, ), # 5 (6, TType.BOOL, 'reallocate', None, False, ), # 6 + (7, TType.STRING, 'catName', 'UTF8', "hive", ), # 7 ) all_structs.append(AllocateTableWriteIdsResponse) AllocateTableWriteIdsResponse.thrift_spec = ( @@ -33840,6 +34008,7 @@ def __ne__(self, other): None, # 0 (1, TType.STRING, 'dbName', 'UTF8', None, ), # 1 (2, TType.STRING, 'tableName', 'UTF8', None, ), # 2 + (3, TType.STRING, 'catName', 'UTF8', "hive", ), # 3 ) all_structs.append(MaxAllocatedTableWriteIdResponse) MaxAllocatedTableWriteIdResponse.thrift_spec = ( @@ -33852,6 +34021,7 @@ def __ne__(self, other): (1, TType.STRING, 'dbName', 'UTF8', None, ), # 1 (2, TType.STRING, 'tableName', 'UTF8', None, ), # 2 (3, TType.I64, 'seedWriteId', None, None, ), # 3 + (4, TType.STRING, 'catName', 'UTF8', "hive", ), # 4 ) all_structs.append(SeedTxnIdRequest) SeedTxnIdRequest.thrift_spec = ( @@ -33978,6 +34148,7 @@ def __ne__(self, other): (9, TType.STRING, 'poolName', 'UTF8', None, ), # 9 (10, TType.I32, 'numberOfBuckets', None, None, ), # 10 (11, TType.STRING, 'orderByClause', 'UTF8', None, ), # 11 + (12, TType.STRING, 'catName', 'UTF8', "hive", ), # 12 ) all_structs.append(CompactionInfoStruct) CompactionInfoStruct.thrift_spec = ( @@ -34001,6 +34172,7 @@ def __ne__(self, other): (17, TType.STRING, 'poolname', 'UTF8', None, ), # 17 (18, TType.I32, 'numberOfBuckets', None, None, ), # 18 (19, TType.STRING, 'orderByClause', 'UTF8', None, ), # 19 + (20, TType.STRING, 'catName', 'UTF8', "hive", ), # 20 ) all_structs.append(OptionalCompactionInfoStruct) OptionalCompactionInfoStruct.thrift_spec = ( @@ -34017,6 +34189,7 @@ def __ne__(self, other): (5, TType.I32, 'metricvalue', None, None, ), # 5 (6, TType.I32, 'version', None, None, ), # 6 (7, TType.I32, 'threshold', None, None, ), # 7 + (8, TType.STRING, 'catName', 'UTF8', "hive", ), # 8 ) all_structs.append(CompactionMetricsDataResponse) CompactionMetricsDataResponse.thrift_spec = ( @@ -34030,6 +34203,7 @@ def __ne__(self, other): (2, TType.STRING, 'tblName', 'UTF8', None, ), # 2 (3, TType.STRING, 'partitionName', 'UTF8', None, ), # 3 (4, TType.I32, 'type', None, None, ), # 4 + (5, TType.STRING, 'catName', 'UTF8', "hive", ), # 5 ) all_structs.append(CompactionResponse) CompactionResponse.thrift_spec = ( @@ -34051,6 +34225,7 @@ def __ne__(self, other): (7, TType.STRING, 'state', 'UTF8', None, ), # 7 (8, TType.I64, 'limit', None, None, ), # 8 (9, TType.STRING, 'order', 'UTF8', None, ), # 9 + (10, TType.STRING, 'catName', 'UTF8', "hive", ), # 10 ) all_structs.append(ShowCompactResponseElement) ShowCompactResponseElement.thrift_spec = ( @@ -34079,6 +34254,7 @@ def __ne__(self, other): (22, TType.I64, 'txnId', None, None, ), # 22 (23, TType.I64, 'commitTime', None, None, ), # 23 (24, TType.I64, 'hightestWriteId', None, None, ), # 24 + (25, TType.STRING, 'catName', 'UTF8', "hive", ), # 25 ) all_structs.append(ShowCompactResponse) ShowCompactResponse.thrift_spec = ( @@ -34111,6 +34287,7 @@ def __ne__(self, other): (2, TType.STRING, 'tablename', 'UTF8', None, ), # 2 (3, TType.LIST, 'partitionnames', (TType.STRING, 'UTF8', False), None, ), # 3 (4, TType.I64, 'lastCompactionId', None, None, ), # 4 + (5, TType.STRING, 'catName', 'UTF8', "hive", ), # 5 ) all_structs.append(GetLatestCommittedCompactionInfoResponse) GetLatestCommittedCompactionInfoResponse.thrift_spec = ( @@ -34133,6 +34310,7 @@ def __ne__(self, other): (4, TType.STRING, 'tablename', 'UTF8', None, ), # 4 (5, TType.LIST, 'partitionnames', (TType.STRING, 'UTF8', False), None, ), # 5 (6, TType.I32, 'operationType', None, 5, ), # 6 + (7, TType.STRING, 'catName', 'UTF8', "hive", ), # 7 ) all_structs.append(BasicTxnInfo) BasicTxnInfo.thrift_spec = ( @@ -34234,6 +34412,7 @@ def __ne__(self, other): (4, TType.STRING, 'table', 'UTF8', None, ), # 4 (5, TType.STRUCT, 'fileInfo', [InsertEventRequestData, None], None, ), # 5 (6, TType.LIST, 'partitionVals', (TType.STRING, 'UTF8', False), None, ), # 6 + (7, TType.STRING, 'cat', 'UTF8', "hive", ), # 7 ) all_structs.append(WriteNotificationLogResponse) WriteNotificationLogResponse.thrift_spec = ( @@ -35217,6 +35396,7 @@ def __ne__(self, other): (1, TType.I64, 'txnId', None, None, ), # 1 (2, TType.STRING, 'dbName', 'UTF8', None, ), # 2 (3, TType.STRING, 'tableName', 'UTF8', None, ), # 3 + (4, TType.STRING, 'catName', 'UTF8', "hive", ), # 4 ) all_structs.append(DeleteColumnStatisticsRequest) DeleteColumnStatisticsRequest.thrift_spec = ( diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_constants.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_constants.rb index e7c30a2c4dc1..b36296ca1e61 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_constants.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_constants.rb @@ -47,6 +47,8 @@ META_TABLE_NAME = %q"name" +META_TABLE_CAT = %q"cat" + META_TABLE_DB = %q"db" META_TABLE_LOCATION = %q"location" diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index 90480b9c9789..0c64476c20d2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3987,6 +3987,7 @@ class WriteEventInfo PARTITION = 5 TABLEOBJ = 6 PARTITIONOBJ = 7 + CATALOG = 8 FIELDS = { WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'}, @@ -3995,7 +3996,8 @@ class WriteEventInfo FILES => {:type => ::Thrift::Types::STRING, :name => 'files'}, PARTITION => {:type => ::Thrift::Types::STRING, :name => 'partition', :optional => true}, TABLEOBJ => {:type => ::Thrift::Types::STRING, :name => 'tableObj', :optional => true}, - PARTITIONOBJ => {:type => ::Thrift::Types::STRING, :name => 'partitionObj', :optional => true} + PARTITIONOBJ => {:type => ::Thrift::Types::STRING, :name => 'partitionObj', :optional => true}, + CATALOG => {:type => ::Thrift::Types::STRING, :name => 'catalog', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4102,6 +4104,7 @@ class ReplTblWriteIdStateRequest DBNAME = 4 TABLENAME = 5 PARTNAMES = 6 + CATNAME = 7 FIELDS = { VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdlist'}, @@ -4109,7 +4112,8 @@ class ReplTblWriteIdStateRequest HOSTNAME => {:type => ::Thrift::Types::STRING, :name => 'hostName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, - PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true} + PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4219,6 +4223,7 @@ class AllocateTableWriteIdsRequest REPLPOLICY = 4 SRCTXNTOWRITEIDLIST = 5 REALLOCATE = 6 + CATNAME = 7 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -4226,7 +4231,8 @@ class AllocateTableWriteIdsRequest TXNIDS => {:type => ::Thrift::Types::LIST, :name => 'txnIds', :element => {:type => ::Thrift::Types::I64}, :optional => true}, REPLPOLICY => {:type => ::Thrift::Types::STRING, :name => 'replPolicy', :optional => true}, SRCTXNTOWRITEIDLIST => {:type => ::Thrift::Types::LIST, :name => 'srcTxnToWriteIdList', :element => {:type => ::Thrift::Types::STRUCT, :class => ::TxnToWriteId}, :optional => true}, - REALLOCATE => {:type => ::Thrift::Types::BOOL, :name => 'reallocate', :default => false, :optional => true} + REALLOCATE => {:type => ::Thrift::Types::BOOL, :name => 'reallocate', :default => false, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4260,10 +4266,12 @@ class MaxAllocatedTableWriteIdRequest include ::Thrift::Struct, ::Thrift::Struct_Union DBNAME = 1 TABLENAME = 2 + CATNAME = 3 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, - TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'} + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4298,11 +4306,13 @@ class SeedTableWriteIdsRequest DBNAME = 1 TABLENAME = 2 SEEDWRITEID = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, - SEEDWRITEID => {:type => ::Thrift::Types::I64, :name => 'seedWriteId'} + SEEDWRITEID => {:type => ::Thrift::Types::I64, :name => 'seedWriteId'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4674,6 +4684,7 @@ class CompactionRequest POOLNAME = 9 NUMBEROFBUCKETS = 10 ORDERBYCLAUSE = 11 + CATNAME = 12 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, @@ -4686,7 +4697,8 @@ class CompactionRequest INITIATORVERSION => {:type => ::Thrift::Types::STRING, :name => 'initiatorVersion', :optional => true}, POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', :optional => true}, NUMBEROFBUCKETS => {:type => ::Thrift::Types::I32, :name => 'numberOfBuckets', :optional => true}, - ORDERBYCLAUSE => {:type => ::Thrift::Types::STRING, :name => 'orderByClause', :optional => true} + ORDERBYCLAUSE => {:type => ::Thrift::Types::STRING, :name => 'orderByClause', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4724,6 +4736,7 @@ class CompactionInfoStruct POOLNAME = 17 NUMBEROFBUCKETS = 18 ORDERBYCLAUSE = 19 + CATNAME = 20 FIELDS = { ID => {:type => ::Thrift::Types::I64, :name => 'id'}, @@ -4744,7 +4757,8 @@ class CompactionInfoStruct RETRYRETENTION => {:type => ::Thrift::Types::I64, :name => 'retryRetention', :optional => true}, POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolname', :optional => true}, NUMBEROFBUCKETS => {:type => ::Thrift::Types::I32, :name => 'numberOfBuckets', :optional => true}, - ORDERBYCLAUSE => {:type => ::Thrift::Types::STRING, :name => 'orderByClause', :optional => true} + ORDERBYCLAUSE => {:type => ::Thrift::Types::STRING, :name => 'orderByClause', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4787,6 +4801,7 @@ class CompactionMetricsDataStruct METRICVALUE = 5 VERSION = 6 THRESHOLD = 7 + CATNAME = 8 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, @@ -4795,7 +4810,8 @@ class CompactionMetricsDataStruct TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::CompactionMetricsMetricType}, METRICVALUE => {:type => ::Thrift::Types::I32, :name => 'metricvalue'}, VERSION => {:type => ::Thrift::Types::I32, :name => 'version'}, - THRESHOLD => {:type => ::Thrift::Types::I32, :name => 'threshold'} + THRESHOLD => {:type => ::Thrift::Types::I32, :name => 'threshold'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4837,12 +4853,14 @@ class CompactionMetricsDataRequest TBLNAME = 2 PARTITIONNAME = 3 TYPE = 4 + CATNAME = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, PARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'partitionName', :optional => true}, - TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::CompactionMetricsMetricType} + TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :enum_class => ::CompactionMetricsMetricType}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4895,6 +4913,7 @@ class ShowCompactRequest STATE = 7 LIMIT = 8 ORDER = 9 + CATNAME = 10 FIELDS = { ID => {:type => ::Thrift::Types::I64, :name => 'id', :optional => true}, @@ -4905,7 +4924,8 @@ class ShowCompactRequest TYPE => {:type => ::Thrift::Types::I32, :name => 'type', :optional => true, :enum_class => ::CompactionType}, STATE => {:type => ::Thrift::Types::STRING, :name => 'state', :optional => true}, LIMIT => {:type => ::Thrift::Types::I64, :name => 'limit', :optional => true}, - ORDER => {:type => ::Thrift::Types::STRING, :name => 'order', :optional => true} + ORDER => {:type => ::Thrift::Types::STRING, :name => 'order', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -4945,6 +4965,7 @@ class ShowCompactResponseElement TXNID = 22 COMMITTIME = 23 HIGHTESTWRITEID = 24 + CATNAME = 25 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, @@ -4970,7 +4991,8 @@ class ShowCompactResponseElement NEXTTXNID => {:type => ::Thrift::Types::I64, :name => 'nextTxnId', :optional => true}, TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :optional => true}, COMMITTIME => {:type => ::Thrift::Types::I64, :name => 'commitTime', :optional => true}, - HIGHTESTWRITEID => {:type => ::Thrift::Types::I64, :name => 'hightestWriteId', :optional => true} + HIGHTESTWRITEID => {:type => ::Thrift::Types::I64, :name => 'hightestWriteId', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -5070,12 +5092,14 @@ class GetLatestCommittedCompactionInfoRequest TABLENAME = 2 PARTITIONNAMES = 3 LASTCOMPACTIONID = 4 + CATNAME = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'}, PARTITIONNAMES => {:type => ::Thrift::Types::LIST, :name => 'partitionnames', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - LASTCOMPACTIONID => {:type => ::Thrift::Types::I64, :name => 'lastCompactionId', :optional => true} + LASTCOMPACTIONID => {:type => ::Thrift::Types::I64, :name => 'lastCompactionId', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -5133,6 +5157,7 @@ class AddDynamicPartitions TABLENAME = 4 PARTITIONNAMES = 5 OPERATIONTYPE = 6 + CATNAME = 7 FIELDS = { TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'}, @@ -5140,7 +5165,8 @@ class AddDynamicPartitions DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'}, PARTITIONNAMES => {:type => ::Thrift::Types::LIST, :name => 'partitionnames', :element => {:type => ::Thrift::Types::STRING}}, - OPERATIONTYPE => {:type => ::Thrift::Types::I32, :name => 'operationType', :default => 5, :optional => true, :enum_class => ::DataOperationType} + OPERATIONTYPE => {:type => ::Thrift::Types::I32, :name => 'operationType', :default => 5, :optional => true, :enum_class => ::DataOperationType}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -5444,6 +5470,7 @@ class WriteNotificationLogRequest TABLE = 4 FILEINFO = 5 PARTITIONVALS = 6 + CAT = 7 FIELDS = { TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId'}, @@ -5451,7 +5478,8 @@ class WriteNotificationLogRequest DB => {:type => ::Thrift::Types::STRING, :name => 'db'}, TABLE => {:type => ::Thrift::Types::STRING, :name => 'table'}, FILEINFO => {:type => ::Thrift::Types::STRUCT, :name => 'fileInfo', :class => ::InsertEventRequestData}, - PARTITIONVALS => {:type => ::Thrift::Types::LIST, :name => 'partitionVals', :element => {:type => ::Thrift::Types::STRING}, :optional => true} + PARTITIONVALS => {:type => ::Thrift::Types::LIST, :name => 'partitionVals', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, + CAT => {:type => ::Thrift::Types::STRING, :name => 'cat', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -8450,11 +8478,13 @@ class GetAllWriteEventInfoRequest TXNID = 1 DBNAME = 2 TABLENAME = 3 + CATNAME = 4 FIELDS = { TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName', :optional => true}, - TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName', :optional => true} + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplScope.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplScope.java index ccf22b9c7be8..6c40b61b5859 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplScope.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplScope.java @@ -26,6 +26,7 @@ * tables included under the scope of replication. */ public class ReplScope implements Serializable { + private String catName; private String dbName; private Pattern dbNamePattern; @@ -38,10 +39,19 @@ public class ReplScope implements Serializable { public ReplScope() { } - public ReplScope(String dbName) { + public ReplScope(String catName, String dbName) { + setCatName(catName); setDbName(dbName); } + public void setCatName(String catName) { + this.catName = catName; + } + + public String getCatName() { + return catName; + } + public void setDbName(String dbName) { this.dbName = dbName; this.dbNamePattern = (((dbName == null) || "*".equals(dbName)) diff --git a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto index 85cd0d48a709..f57aa0922705 100644 --- a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto +++ b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto @@ -1988,6 +1988,7 @@ message WriteEventInfo { string partition = 5; string table_obj = 6; string partition_obj = 7; + string catalog = 8; } message ReplLastIdInfo { @@ -2031,6 +2032,7 @@ message ReplTblWriteidStateRequest { string host_name = 3; string db_name = 4; repeated string part_names = 5; + string cat_name = 6; } message ReplTblWriteidStateResponse { @@ -2065,6 +2067,7 @@ message AllocateTableWriteIdsRequest { repeated int64 txn_ids = 3; string repl_policy = 4; repeated TxnToWriteId src_txn_to_write_id_lists = 5; + string cat_name = 6; } message AllocateTableWriteIdsResponse { @@ -2074,6 +2077,7 @@ message AllocateTableWriteIdsResponse { message GetMaxAllocatedTableWriteIdRequest { string db_name = 1; string table_name = 2; + string cat_name = 3; } message GetMaxAllocatedTableWriteIdResponse { int64 max_write_id = 1; @@ -2240,6 +2244,7 @@ message CompactRequest { map properties = 6; string initiator_id = 7; string initiator_version = 8; + string catname = 9; } message CompactResponse { @@ -2254,6 +2259,7 @@ message Compact2Request { map properties = 6; string initiator_id = 7; string initiator_version = 8; + string catname = 9; } message Compact2Response { @@ -2286,6 +2292,7 @@ message ShowCompactResponseElement { string initiator_id = 17; string initiator_version = 18; int64 cleaner_start = 19; + string catname = 20; } message ShowCompactResponse { @@ -2299,6 +2306,7 @@ message AddDynamicPartitionsRequest { string tablename = 4; repeated string partitionnames = 5; DataOperationType operation_type = 6; + string catname = 7; } message AddDynamicPartitionsResponse { @@ -2325,6 +2333,7 @@ message CompactionInfoStruct { bool hasoldabort = 14; int64 enqueue_time = 15; int64 retry_retention = 16; + string catname = 17; } message OptionalCompactionInfoStruct { @@ -2370,6 +2379,7 @@ message CompactionMetricsDataStruct { int32 metricvalue = 5; int32 version = 6; int32 threshold = 7; + string catname = 8; } message UpdateCompactionMetricsDataResponse { @@ -2381,6 +2391,7 @@ message RemoveCompactionMetricsDataRequest { string tbl_name = 2; string partition_name = 3; CompactionMetricsMetricType type = 4; + string cat_name = 5; } message RemoveCompactionMetricsDataResponse { @@ -2399,6 +2410,7 @@ message GetLatestCommittedCompactionInfoRequest { string tablename = 2; repeated string partionnames = 3; int64 last_compaction_id = 4; + string catname = 5; } message GetLatestCommittedCompactionInfoResponse { @@ -3191,6 +3203,7 @@ message GetAllWriteEventInfoRequest { int64 txn_id = 1; string db_name = 2; string table_name = 3; + string cat_name = 4; } message GetAllWriteEventInfoResponse { diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index ec9246a32d3c..aa2fdeb4c210 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -1137,6 +1137,7 @@ struct WriteEventInfo { 5: optional string partition, 6: optional string tableObj, // repl txn task does not need table object for commit 7: optional string partitionObj, + 8: optional string catalog = "hive", } struct ReplLastIdInfo { @@ -1174,11 +1175,12 @@ struct ReplTblWriteIdStateRequest { 4: required string dbName, 5: required string tableName, 6: optional list partNames, + 7: optional string catName = "hive", } // Request msg to get the valid write ids list for the given list of tables wrt to input validTxnList struct GetValidWriteIdsRequest { - 1: required list fullTableNames, // Full table names of format . + 1: required list fullTableNames, // Full table names of format .. 2: optional string validTxnList, // Valid txn list string wrt the current txn of the caller 3: optional i64 writeId, //write id to be used to get the current txn id } @@ -1216,6 +1218,7 @@ struct AllocateTableWriteIdsRequest { // If false, reuse previously allocate writeIds for txnIds. If true, remove older txnId to writeIds mappings // and regenerate (this is useful during re-compilation when we need to ensure writeIds are regenerated) 6: optional bool reallocate = false; + 7: optional string catName = "hive", } struct AllocateTableWriteIdsResponse { @@ -1225,6 +1228,7 @@ struct AllocateTableWriteIdsResponse { struct MaxAllocatedTableWriteIdRequest { 1: required string dbName, 2: required string tableName, + 3: optional string catName = "hive", } struct MaxAllocatedTableWriteIdResponse { 1: required i64 maxWriteId, @@ -1233,6 +1237,7 @@ struct SeedTableWriteIdsRequest { 1: required string dbName, 2: required string tableName, 3: required i64 seedWriteId, + 4: optional string catName = "hive", } struct SeedTxnIdRequest { 1: required i64 seedTxnId, @@ -1344,6 +1349,7 @@ struct CompactionRequest { 9: optional string poolName 10: optional i32 numberOfBuckets 11: optional string orderByClause; + 12: optional string catName = "hive", } struct CompactionInfoStruct { @@ -1366,6 +1372,7 @@ struct CompactionInfoStruct { 17: optional string poolname 18: optional i32 numberOfBuckets 19: optional string orderByClause; + 20: optional string catName = "hive", } struct OptionalCompactionInfoStruct { @@ -1386,6 +1393,7 @@ struct CompactionMetricsDataStruct { 5: required i32 metricvalue 6: required i32 version 7: required i32 threshold + 8: optional string catName = "hive", } struct CompactionMetricsDataResponse { @@ -1397,6 +1405,7 @@ struct CompactionMetricsDataRequest { 2: required string tblName, 3: optional string partitionName 4: required CompactionMetricsMetricType type + 5: optional string catName = "hive", } struct CompactionResponse { @@ -1415,7 +1424,8 @@ struct ShowCompactRequest { 6: optional CompactionType type, 7: optional string state, 8: optional i64 limit, - 9: optional string order + 9: optional string order, + 10: optional string catName = "hive", } struct ShowCompactResponseElement { @@ -1442,8 +1452,8 @@ struct ShowCompactResponseElement { 21: optional i64 nextTxnId, 22: optional i64 txnId, 23: optional i64 commitTime, - 24: optional i64 hightestWriteId - + 24: optional i64 hightestWriteId, + 25: optional string catName = "hive", } struct ShowCompactResponse { @@ -1471,6 +1481,7 @@ struct GetLatestCommittedCompactionInfoRequest { 2: required string tablename, 3: optional list partitionnames, 4: optional i64 lastCompactionId, + 5: optional string catName = "hive", } struct GetLatestCommittedCompactionInfoResponse { @@ -1489,7 +1500,8 @@ struct AddDynamicPartitions { 3: required string dbname, 4: required string tablename, 5: required list partitionnames, - 6: optional DataOperationType operationType = DataOperationType.UNSET + 6: optional DataOperationType operationType = DataOperationType.UNSET, + 7: optional string catName = "hive", } struct BasicTxnInfo { @@ -1589,6 +1601,7 @@ struct WriteNotificationLogRequest { 4: required string table, 5: required InsertEventRequestData fileInfo, 6: optional list partitionVals, + 7: optional string cat = "hive", } struct WriteNotificationLogResponse { @@ -2538,7 +2551,8 @@ struct Package { struct GetAllWriteEventInfoRequest { 1: required i64 txnId, 2: optional string dbName, - 3: optional string tableName + 3: optional string tableName, + 4: optional string catName = "hive" } struct DeleteColumnStatisticsRequest { @@ -3392,6 +3406,7 @@ const string BUCKET_COUNT = "bucket_count", const string FIELD_TO_DIMENSION = "field_to_dimension", const string IF_PURGE = "ifPurge", const string META_TABLE_NAME = "name", +const string META_TABLE_CAT = "cat", const string META_TABLE_DB = "db", const string META_TABLE_LOCATION = "location", const string META_TABLE_SERDE = "serde", diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 9838d9d2573c..05961d341fa4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -6103,11 +6103,12 @@ public void commit_txn(CommitTxnRequest rqst) throws TException { List partitionValue = null; Partition ptnObj = null; String root; - Table tbl = getTblObject(writeEventInfo.getDatabase(), writeEventInfo.getTable(), null); + Table tbl = getTblObject(writeEventInfo.getDatabase(), writeEventInfo.getTable(), writeEventInfo.getCatalog()); if (writeEventInfo.getPartition() != null && !writeEventInfo.getPartition().isEmpty()) { partitionValue = Warehouse.getPartValuesFromPartName(writeEventInfo.getPartition()); - ptnObj = getPartitionObj(writeEventInfo.getDatabase(), writeEventInfo.getTable(), partitionValue, tbl); + ptnObj = getPartitionObj(writeEventInfo.getCatalog(), writeEventInfo.getDatabase(), writeEventInfo.getTable(), + partitionValue, tbl); root = ptnObj.getSd().getLocation(); } else { root = tbl.getSd().getLocation(); @@ -6134,6 +6135,7 @@ public void commit_txn(CommitTxnRequest rqst) throws TException { WriteNotificationLogRequest wnRqst = new WriteNotificationLogRequest(targetTxnId, writeEventInfo.getWriteId(), writeEventInfo.getDatabase(), writeEventInfo.getTable(), insertData); + wnRqst.setCat(writeEventInfo.getCatalog()); if (partitionValue != null) { wnRqst.setPartitionVals(partitionValue); } @@ -6218,8 +6220,8 @@ public boolean update_compaction_metrics_data(CompactionMetricsDataStruct struct @Override public void remove_compaction_metrics_data(CompactionMetricsDataRequest request) throws MetaException, TException { - getTxnHandler().removeCompactionMetricsData(request.getDbName(), request.getTblName(), request.getPartitionName(), - CompactionMetricsDataConverter.thriftCompactionMetricType2DbType(request.getType())); + getTxnHandler().removeCompactionMetricsData(request.getCatName(), request.getDbName(), request.getTblName(), + request.getPartitionName(), CompactionMetricsDataConverter.thriftCompactionMetricType2DbType(request.getType())); } @Override @@ -6236,12 +6238,12 @@ public void update_compactor_state(CompactionInfoStruct cr, long highWaterMark) @Override public GetLatestCommittedCompactionInfoResponse get_latest_committed_compaction_info( GetLatestCommittedCompactionInfoRequest rqst) throws MetaException { - if (rqst.getDbname() == null || rqst.getTablename() == null) { - throw new MetaException("Database name and table name cannot be null."); + if (rqst.getCatName() == null || rqst.getDbname() == null || rqst.getTablename() == null) { + throw new MetaException("Catalog name, Database name and table name cannot be null."); } GetLatestCommittedCompactionInfoResponse response = getTxnHandler().getLatestCommittedCompactionInfo(rqst); return FilterUtils.filterCommittedCompactionInfoStructIfEnabled(isServerFilterEnabled, filterHook, - getDefaultCatalog(conf), rqst.getDbname(), rqst.getTablename(), response); + rqst.getCatName(), rqst.getDbname(), rqst.getTablename(), response); } @Override @@ -6250,7 +6252,7 @@ public AllocateTableWriteIdsResponse allocate_table_write_ids( AllocateTableWriteIdsResponse response = getTxnHandler().allocateTableWriteIds(rqst); if (listeners != null && !listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALLOC_WRITE_ID, - new AllocWriteIdEvent(response.getTxnToWriteIds(), rqst.getDbName(), + new AllocWriteIdEvent(response.getTxnToWriteIds(), rqst.getCatName(), rqst.getDbName(), rqst.getTableName(), this)); } return response; @@ -6294,10 +6296,12 @@ private Table getTblObject(String db, String table, String catalog) throws MetaE return get_table_req(req).getTable(); } - private Partition getPartitionObj(String db, String table, List partitionVals, Table tableObj) - throws MetaException, NoSuchObjectException { + private Partition getPartitionObj(String catName, String db, String table, List partitionVals, Table tableObj) + throws MetaException, NoSuchObjectException, TException { if (tableObj.isSetPartitionKeys() && !tableObj.getPartitionKeys().isEmpty()) { - return get_partition(db, table, partitionVals); + GetPartitionRequest rqst = new GetPartitionRequest(db, table, partitionVals); + rqst.setCatName(catName); + return get_partition_req(rqst).getPartition(); } return null; } @@ -6305,8 +6309,9 @@ private Partition getPartitionObj(String db, String table, List partitio @Override public WriteNotificationLogResponse add_write_notification_log(WriteNotificationLogRequest rqst) throws TException { - Table tableObj = getTblObject(rqst.getDb(), rqst.getTable(), null); - Partition ptnObj = getPartitionObj(rqst.getDb(), rqst.getTable(), rqst.getPartitionVals(), tableObj); + Table tableObj = getTblObject(rqst.getDb(), rqst.getTable(), rqst.getCat()); + Partition ptnObj = getPartitionObj(rqst.getCat(), rqst.getDb(), rqst.getTable(), rqst.getPartitionVals(), + tableObj); addTxnWriteNotificationLog(tableObj, ptnObj, rqst); return new WriteNotificationLogResponse(); } @@ -8054,7 +8059,8 @@ public List get_all_write_event_info(GetAllWriteEventInfoRequest Exception ex = null; try { List writeEventInfoList = - getMS().getAllWriteEventInfo(request.getTxnId(), request.getDbName(), request.getTableName()); + getMS().getAllWriteEventInfo(request.getTxnId(), request.getCatName(), + request.getDbName(), request.getTableName()); return writeEventInfoList == null ? Collections.emptyList() : writeEventInfoList; } catch (Exception e) { LOG.error("Caught exception", e); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 447d1a6a2c21..033cf6227bd9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1554,7 +1554,7 @@ public List isPartOfMaterializedView(String catName, String dbName, Stri Set tables = p.getTables(); for (MMVSource sourceTable : tables) { MTable table = sourceTable.getTable(); - if (dbName.equals(table.getDatabase().getName()) && tblName.equals(table.getTableName())) { + if (catName.equals(table.getDatabase().getCatalogName()) && dbName.equals(table.getDatabase().getName()) && tblName.equals(table.getTableName())) { LOG.info("Cannot drop table " + table.getTableName() + " as it is being used by MView " + p.getTblName()); mViewList.add(p.getDbName() + "." + p.getTblName()); @@ -11140,7 +11140,14 @@ public void cleanWriteNotificationEvents(int olderThan) { } @Override + @Deprecated public List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException { + return getAllWriteEventInfo(txnId, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName); + } + + @Override + public List getAllWriteEventInfo(long txnId, String catName, String dbName, String tableName) + throws MetaException { List writeEventInfoList = null; boolean commited = false; Query query = null; @@ -11148,6 +11155,9 @@ public List getAllWriteEventInfo(long txnId, String dbName, Stri openTransaction(); List parameterVals = new ArrayList<>(); StringBuilder filterBuilder = new StringBuilder(" txnId == " + Long.toString(txnId)); + if (catName != null && !"*".equals(catName)) { + appendSimpleCondition(filterBuilder, "catalog", new String[]{catName}, parameterVals); + } if (dbName != null && !"*".equals(dbName)) { // * means get all database, so no need to add filter appendSimpleCondition(filterBuilder, "database", new String[]{dbName}, parameterVals); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 9ac7f921e077..b796f530f9fa 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -2028,8 +2028,18 @@ Map> getPartitionColsWithStats(String catName, String dbNam * @param dbName the name of db for which dump is being taken * @param tableName the name of the table for which the dump is being taken */ + @Deprecated List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException; + /** + * Get all write events for a specific transaction . + * @param txnId get all the events done by this transaction + * @param catName the name of catalog for which dump is being taken + * @param dbName the name of db for which dump is being taken + * @param tableName the name of the table for which the dump is being taken + */ + List getAllWriteEventInfo(long txnId, String catName, String dbName, String tableName) throws MetaException; + /** * Checking if table is part of a materialized view. * @param catName catalog the table is in diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index 786cdd615fc0..b2f87866d19e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -224,8 +224,10 @@ private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throw files already present in the table. Not needed if oldTable is insertOnly.*/ TxnStore t = TxnUtils.getTxnStore(getConf()); //For now assume no partition may have > 10M files. Perhaps better to count them. - t.seedWriteId(new SeedTableWriteIdsRequest(newTable.getDbName(), - newTable.getTableName(), 10000000)); + SeedTableWriteIdsRequest rqst = new SeedTableWriteIdsRequest(newTable.getDbName(), + newTable.getTableName(), 10000000); + rqst.setCatName(newTable.getCatName()); + t.seedWriteId(rqst); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 7df41ec68a30..197ef8095e4d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -3049,9 +3049,15 @@ public long getCacheUpdateCount() { rawStore.cleanWriteNotificationEvents(olderThan); } + @Deprecated @Override public List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException { - return rawStore.getAllWriteEventInfo(txnId, dbName, tableName); + return getAllWriteEventInfo(txnId, Warehouse.DEFAULT_CATALOG_NAME, dbName, tableName); + } + + @Override public List getAllWriteEventInfo(long txnId, String catName, String dbName, String tableName) + throws MetaException { + return rawStore.getAllWriteEventInfo(txnId, catName, dbName, tableName); } static boolean isNotInBlackList(String catName, String dbName, String tblName) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AcidWriteEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AcidWriteEvent.java index 001179a3f8d6..701d5f889637 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AcidWriteEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AcidWriteEvent.java @@ -60,6 +60,10 @@ public List getChecksums() { return writeNotificationLogRequest.getFileInfo().getFilesAddedChecksum(); } + public String getCatalog() { + return StringUtils.normalizeIdentifier(writeNotificationLogRequest.getCat()); + } + public String getDatabase() { return StringUtils.normalizeIdentifier(writeNotificationLogRequest.getDb()); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AllocWriteIdEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AllocWriteIdEvent.java index 2a719f2bcb54..2dec887aaec6 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AllocWriteIdEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AllocWriteIdEvent.java @@ -35,15 +35,17 @@ public class AllocWriteIdEvent extends ListenerEvent { private final List txnToWriteIdList; private final String tableName; private final String dbName; + private final String catName; - public AllocWriteIdEvent(List txnToWriteIdList, String dbName, String tableName) { - this(txnToWriteIdList, dbName, tableName, null); + public AllocWriteIdEvent(List txnToWriteIdList, String catName, String dbName, String tableName) { + this(txnToWriteIdList, catName, dbName, tableName, null); } - public AllocWriteIdEvent(List txnToWriteIdList, String dbName, String tableName, IHMSHandler handler) { + public AllocWriteIdEvent(List txnToWriteIdList, String catName, String dbName, String tableName, IHMSHandler handler) { super(true, handler); this.txnToWriteIdList = txnToWriteIdList; this.tableName = tableName; + this.catName = catName; this.dbName = dbName; } @@ -58,4 +60,8 @@ public String getTableName() { public String getDbName() { return dbName; } + + public String getCatName() { + return catName; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/BatchAcidWriteEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/BatchAcidWriteEvent.java index b98e33d5efbb..abe4e3c2ff4c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/BatchAcidWriteEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/BatchAcidWriteEvent.java @@ -70,6 +70,10 @@ public List getChecksums(int idx) { return writeNotificationLogRequestList.get(idx).getFileInfo().getFilesAddedChecksum(); } + public String getCatalog(int idx) { + return StringUtils.normalizeIdentifier(writeNotificationLogRequestList.get(idx).getCat()); + } + public String getDatabase(int idx) { return StringUtils.normalizeIdentifier(writeNotificationLogRequestList.get(idx).getDb()); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java index 265a43d6756d..345d6ede2eb9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java @@ -35,24 +35,26 @@ public class CommitCompactionEvent extends ListenerEvent { private final Long txnId; private final Long compactionId; private final CompactionType type; + private final String catname; private final String dbname; private final String tableName; private final String partName; public CommitCompactionEvent(Long txnId, CompactionInfo ci) { - this(txnId, ci.id, ci.type, ci.dbname, ci.tableName, ci.partName, null); + this(txnId, ci.id, ci.type, ci.catName, ci.dbname, ci.tableName, ci.partName, null); } public CommitCompactionEvent(Long txnId, CompactionInfo ci, IHMSHandler handler) { - this(txnId, ci.id, ci.type, ci.dbname, ci.tableName, ci.partName, handler); + this(txnId, ci.id, ci.type, ci.catName, ci.dbname, ci.tableName, ci.partName, handler); } - public CommitCompactionEvent(Long txnId, Long compactionId, CompactionType type, String dbname, String tableName, - String partName, IHMSHandler handler) { + public CommitCompactionEvent(Long txnId, Long compactionId, CompactionType type, String catname, String dbname, + String tableName, String partName, IHMSHandler handler) { super(true, handler); this.txnId = txnId; this.compactionId = compactionId; this.type = type; + this.catname = catname; this.dbname = dbname; this.tableName = tableName; this.partName = partName; @@ -70,6 +72,10 @@ public CompactionType getType() { return type; } + public String getCatName() { + return catname; + } + public String getDbname() { return dbname; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java index b28d4a16074c..a2dfe349b059 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitTxnEvent.java @@ -36,14 +36,15 @@ public class CommitTxnEvent extends ListenerEvent { private final Long txnId; private final TxnType txnType; private final List writeId; + private final List catalogs; private final List databases; public CommitTxnEvent(Long transactionId, IHMSHandler handler) { - this(transactionId, null, handler, null, null); + this(transactionId, null, handler, null, null, null); } public CommitTxnEvent(Long transactionId, TxnType txnType) { - this(transactionId, txnType, null, null, null); + this(transactionId, txnType, null, null, null, null); } /** @@ -53,11 +54,12 @@ public CommitTxnEvent(Long transactionId, TxnType txnType) { * @param databases list of databases for which commit txn event is fired * @param writeId write id for transaction */ - public CommitTxnEvent(Long transactionId, TxnType txnType, IHMSHandler handler, List databases, List writeId) { + public CommitTxnEvent(Long transactionId, TxnType txnType, IHMSHandler handler, List catalogs, List databases, List writeId) { super(true, handler); this.txnId = transactionId; this.txnType = txnType; this.writeId = writeId; + this.catalogs = catalogs; this.databases = databases; } @@ -88,4 +90,8 @@ public List getWriteId() { public List getDatabases() { return databases; } + + public List getCatalogs() { + return catalogs; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitCompactionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitCompactionMessage.java index fa81ac685a4f..a8650622c6d5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitCompactionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitCompactionMessage.java @@ -19,9 +19,6 @@ package org.apache.hadoop.hive.metastore.messaging; import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import java.util.List; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java index 74753762cd99..e1913014956c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/CommitTxnMessage.java @@ -42,6 +42,8 @@ protected CommitTxnMessage() { public abstract List getWriteIds(); + public abstract List getCatalogs(); + public abstract List getDatabases(); public abstract List getTables(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java index c7ddd58127b9..ed68e933f44d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java @@ -111,6 +111,8 @@ public EventType getEventType() { */ public abstract String getServicePrincipal(); + public abstract String getCat(); + /** * Getter for the name of the Database on which the Metastore operation is done. * @return Database-name (String). @@ -133,6 +135,9 @@ public EventMessage checkValid() { if (getEventType() == null) { throw new IllegalStateException("Event-type unset."); } + if (getCat() == null) { + throw new IllegalArgumentException("Cat-name unset"); + } if (getDB() == null) { throw new IllegalArgumentException("DB-name unset."); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java index 6a0d0dd93c88..b8ae6ecd24ce 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java @@ -278,9 +278,9 @@ public AddCheckConstraintMessage buildAddCheckConstraintMessage( return new JSONAddCheckConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, ccs, now()); } - public DropConstraintMessage buildDropConstraintMessage(String dbName, String tableName, + public DropConstraintMessage buildDropConstraintMessage(String catName, String dbName, String tableName, String constraintName) { - return new JSONDropConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, dbName, tableName, + return new JSONDropConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catName, dbName, tableName, constraintName, now()); } @@ -303,8 +303,8 @@ public OpenTxnMessage buildOpenTxnMessage(Long fromTxnId, Long toTxnId) { return new JSONOpenTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fromTxnId, toTxnId, now()); } - public CommitTxnMessage buildCommitTxnMessage(Long txnId, List databases, List writeIds) { - return new JSONCommitTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, now(), databases, writeIds); + public CommitTxnMessage buildCommitTxnMessage(Long txnId, List catalogs, List databases, List writeIds) { + return new JSONCommitTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, now(), catalogs, databases, writeIds); } public AbortTxnMessage buildAbortTxnMessage(Long txnId, List dbsUpdated, List writeIds) { @@ -312,9 +312,9 @@ public AbortTxnMessage buildAbortTxnMessage(Long txnId, List dbsUpdated, } public AllocWriteIdMessage buildAllocWriteIdMessage(List txnToWriteIdList, - String dbName, String tableName) { + String catName, String dbName, String tableName) { return new JSONAllocWriteIdMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnToWriteIdList, - dbName, tableName, now()); + catName, dbName, tableName, now()); } public AcidWriteMessage buildAcidWriteMessage(AcidWriteEvent acidWriteEvent, @@ -331,8 +331,8 @@ public JSONUpdateTableColumnStatMessage buildUpdateTableColumnStatMessage(Column colStats, tableObj, parameters, writeId); } - public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String dbName, String colName) { - return new JSONDeleteTableColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), dbName, colName); + public JSONDeleteTableColumnStatMessage buildDeleteTableColumnStatMessage(String catName, String dbName, String colName) { + return new JSONDeleteTableColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), catName, dbName, colName); } public JSONUpdatePartitionColumnStatMessage buildUpdatePartitionColumnStatMessage(ColumnStatistics colStats, @@ -342,9 +342,9 @@ public JSONUpdatePartitionColumnStatMessage buildUpdatePartitionColumnStatMessag parameters, tableObj, writeId); } - public JSONDeletePartitionColumnStatMessage buildDeletePartitionColumnStatMessage(String dbName, String colName, + public JSONDeletePartitionColumnStatMessage buildDeletePartitionColumnStatMessage(String catName, String dbName, String colName, String partName, List partValues) { - return new JSONDeletePartitionColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), dbName, + return new JSONDeletePartitionColumnStatMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), catName, dbName, colName, partName, partValues); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAbortTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAbortTxnMessage.java index d010121f48a2..597b0b3340cb 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAbortTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAbortTxnMessage.java @@ -71,6 +71,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java index a5d8f78b7fa3..277d98ca7063 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java @@ -39,7 +39,7 @@ public class JSONAcidWriteMessage extends AcidWriteMessage { private Long txnid, writeId, timestamp; @JsonProperty - private String server, servicePrincipal, database, table, partition, tableObjJson, partitionObjJson; + private String server, servicePrincipal, catalog, database, table, partition, tableObjJson, partitionObjJson; @JsonProperty private List files; @@ -56,6 +56,7 @@ public JSONAcidWriteMessage(String server, String servicePrincipal, Long timesta this.txnid = acidWriteEvent.getTxnId(); this.server = server; this.servicePrincipal = servicePrincipal; + this.catalog = acidWriteEvent.getCatalog(); this.database = acidWriteEvent.getDatabase(); this.table = acidWriteEvent.getTable(); this.writeId = acidWriteEvent.getWriteId(); @@ -83,6 +84,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return catalog; + } + @Override public String getDB() { return database; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddCheckConstraintMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddCheckConstraintMessage.java index 0012603f2785..6cf3988f387c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddCheckConstraintMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddCheckConstraintMessage.java @@ -68,6 +68,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddDefaultConstraintMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddDefaultConstraintMessage.java index d438570f13f0..9add48bb3e4c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddDefaultConstraintMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddDefaultConstraintMessage.java @@ -68,6 +68,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java index c3d6fb6de890..a01ebaacb3fb 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java @@ -73,6 +73,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java index f9f351fa35f8..60fab7d8369e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java @@ -68,6 +68,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java index 6494cb8dc72b..fb2ce6a2c5b3 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java @@ -44,7 +44,7 @@ public class JSONAddPartitionMessage extends AddPartitionMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjJson; @JsonProperty Long timestamp; @@ -72,6 +72,7 @@ public JSONAddPartitionMessage(String server, String servicePrincipal, Table tab Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = tableObj.getCatName(); this.db = tableObj.getDbName(); this.table = tableObj.getTableName(); this.tableType = tableObj.getTableType(); @@ -104,6 +105,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java index 606a051635c1..6bd32e52f157 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java @@ -73,6 +73,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java index ebdcd94e937e..c258ae0b96e0 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java @@ -70,6 +70,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAllocWriteIdMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAllocWriteIdMessage.java index 33b24fedb73b..92a86ccc2158 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAllocWriteIdMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAllocWriteIdMessage.java @@ -33,7 +33,7 @@ public class JSONAllocWriteIdMessage extends AllocWriteIdMessage { @JsonProperty - private String server, servicePrincipal, dbName, tableName; + private String server, servicePrincipal, catName, dbName, tableName; @JsonProperty private List txnIdList, writeIdList; @@ -50,7 +50,7 @@ public JSONAllocWriteIdMessage() { } public JSONAllocWriteIdMessage(String server, String servicePrincipal, - List txnToWriteIdList, String dbName, String tableName, Long timestamp) { + List txnToWriteIdList, String catName, String dbName, String tableName, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; this.timestamp = timestamp; @@ -61,6 +61,7 @@ public JSONAllocWriteIdMessage(String server, String servicePrincipal, this.writeIdList.add(txnToWriteId.getWriteId()); } this.tableName = tableName; + this.catName = catName; this.dbName = dbName; this.txnToWriteIdList = txnToWriteIdList; } @@ -75,6 +76,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return catName; + } + @Override public String getDB() { return dbName; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java index 7b7c12e490f2..923bcf3c3436 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java @@ -50,6 +50,11 @@ public JSONAlterCatalogMessage(String server, String servicePrincipal, checkValid(); } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java index 5f9dae4675a0..585a7e475354 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java @@ -31,7 +31,7 @@ public class JSONAlterDatabaseMessage extends AlterDatabaseMessage { @JsonProperty - String server, servicePrincipal, db, dbObjBeforeJson, dbObjAfterJson; + String server, servicePrincipal, cat, db, dbObjBeforeJson, dbObjAfterJson; @JsonProperty Long timestamp; @@ -46,6 +46,7 @@ public JSONAlterDatabaseMessage(String server, String servicePrincipal, Database dbObjBefore, Database dbObjAfter, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = dbObjBefore.getCatalogName(); this.db = dbObjBefore.getName(); this.timestamp = timestamp; try { @@ -67,6 +68,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java index 414402f70591..a8537644286d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java @@ -34,7 +34,7 @@ public class JSONAlterPartitionMessage extends AlterPartitionMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjJson; @JsonProperty String isTruncateOp; @@ -58,6 +58,7 @@ public JSONAlterPartitionMessage(String server, String servicePrincipal, Table t Partition partitionObjBefore, Partition partitionObjAfter, boolean isTruncateOp, Long writeId, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = tableObj.getCatName(); this.db = tableObj.getDbName(); this.table = tableObj.getTableName(); this.tableType = tableObj.getTableType(); @@ -85,6 +86,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionsMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionsMessage.java index 41622c8c90bb..03c051f7fadc 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionsMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionsMessage.java @@ -36,7 +36,7 @@ public class JSONAlterPartitionsMessage extends AlterPartitionsMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjJson; @JsonProperty String isTruncateOp; @@ -60,6 +60,7 @@ public JSONAlterPartitionsMessage(String server, String servicePrincipal, Table List partitionsAfter, boolean isTruncateOp, Long writeId, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = tableObj.getCatName(); this.db = tableObj.getDbName(); this.table = tableObj.getTableName(); this.tableType = tableObj.getTableType(); @@ -92,6 +93,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java index 8c621b2701a3..9d8ec09b4e90 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java @@ -31,7 +31,7 @@ public class JSONAlterTableMessage extends AlterTableMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjBeforeJson, tableObjAfterJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjBeforeJson, tableObjAfterJson; @JsonProperty String isTruncateOp; @@ -49,6 +49,7 @@ public JSONAlterTableMessage(String server, String servicePrincipal, Table table boolean isTruncateOp, Long writeId, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = tableObjBefore.getCatName(); this.db = tableObjBefore.getDbName(); this.table = tableObjBefore.getTableName(); this.tableType = tableObjBefore.getTableType(); @@ -74,6 +75,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitCompactionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitCompactionMessage.java index ed3eb7a9b76c..58b5bd74c942 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitCompactionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitCompactionMessage.java @@ -47,6 +47,9 @@ public class JSONCommitCompactionMessage extends CommitCompactionMessage { @JsonProperty private CompactionType type; + @JsonProperty + private String catName; + @JsonProperty private String dbname; @@ -70,6 +73,7 @@ public JSONCommitCompactionMessage(String server, String servicePrincipal, long this.txnid = event.getTxnId(); this.compactionId = event.getCompactionId(); this.type = event.getType(); + this.catName = event.getCatName(); this.dbname = event.getDbname(); this.tableName = event.getTableName(); this.partName = event.getPartName(); @@ -90,6 +94,11 @@ public String getServer() { return server; } + @Override + public String getCat() { + return catName; + } + @Override public String getDB() { return dbname; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java index a146bc6fea0b..ba3af6ff0d57 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java @@ -50,7 +50,7 @@ public class JSONCommitTxnMessage extends CommitTxnMessage { private List writeIds; @JsonProperty - private List databases, tables, partitions, tableObjs, partitionObjs, files; + private List catalogs, databases, tables, partitions, tableObjs, partitionObjs, files; /** * Default constructor, needed for Jackson. @@ -63,6 +63,7 @@ public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, this.txnid = txnid; this.server = server; this.servicePrincipal = servicePrincipal; + this.catalogs = null; this.databases = null; this.tables = null; this.writeIds = null; @@ -72,8 +73,9 @@ public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, this.files = null; } - public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, Long timestamp, List databases, List writeIds) { + public JSONCommitTxnMessage(String server, String servicePrincipal, Long txnid, Long timestamp, List catalogs, List databases, List writeIds) { this(server, servicePrincipal, txnid, timestamp); + this.catalogs = catalogs; this.databases = databases; this.writeIds = writeIds; } @@ -88,6 +90,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; @@ -108,6 +115,11 @@ public List getWriteIds() { return writeIds; } + @Override + public List getCatalogs() { + return catalogs; + } + @Override public List getDatabases() { return databases; @@ -146,6 +158,9 @@ public List getFilesList() { @Override public void addWriteEventInfo(List writeEventInfoList) { + if (this.catalogs == null) { + this.catalogs = Lists.newArrayList(); + } if (this.databases == null) { this.databases = Lists.newArrayList(); } @@ -169,6 +184,7 @@ public void addWriteEventInfo(List writeEventInfoList) { } for (WriteEventInfo writeEventInfo : writeEventInfoList) { + this.catalogs.add(writeEventInfo.getCatalog()); this.databases.add(writeEventInfo.getDatabase()); this.tables.add(writeEventInfo.getTable()); this.writeIds.add(writeEventInfo.getWriteId()); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java index e9038d03d928..1ba10516588d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java @@ -45,10 +45,15 @@ public JSONCreateCatalogMessage(String server, String servicePrincipal, String c } @Override - public String getDB() { + public String getCat() { return null; } + @Override + public String getDB() { + return catalog; + } + @Override public String getServer() { return server; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java index 4ee854adb3c6..dd8ef85e1fc6 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java @@ -32,7 +32,7 @@ public class JSONCreateDatabaseMessage extends CreateDatabaseMessage { @JsonProperty - String server, servicePrincipal, db, dbJson; + String server, servicePrincipal, cat, db, dbJson; @JsonProperty Long timestamp; @@ -46,6 +46,7 @@ public JSONCreateDatabaseMessage(String server, String servicePrincipal, Databas Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = db.getCatalogName(); this.db = db.getName(); this.timestamp = timestamp; try { @@ -61,6 +62,11 @@ public Database getDatabaseObject() throws Exception { return (Database) MessageBuilder.getTObj(dbJson, Database.class); } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java index bb50052a2b28..17b15c17c609 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java @@ -32,7 +32,7 @@ public class JSONCreateFunctionMessage extends CreateFunctionMessage { @JsonProperty - String server, servicePrincipal, db, functionObjJson; + String server, servicePrincipal, cat, db, functionObjJson; @JsonProperty Long timestamp; @@ -45,6 +45,7 @@ public JSONCreateFunctionMessage() {} public JSONCreateFunctionMessage(String server, String servicePrincipal, Function fn, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = fn.getCatName(); this.db = fn.getDbName(); this.timestamp = timestamp; try { @@ -55,6 +56,11 @@ public JSONCreateFunctionMessage(String server, String servicePrincipal, Functio checkValid(); } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java index 145ee4b19979..fc9d8052476e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java @@ -36,7 +36,7 @@ public class JSONCreateTableMessage extends CreateTableMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjJson; @JsonProperty Long timestamp; @JsonProperty @@ -48,10 +48,11 @@ public class JSONCreateTableMessage extends CreateTableMessage { public JSONCreateTableMessage() { } - public JSONCreateTableMessage(String server, String servicePrincipal, String db, String table, + public JSONCreateTableMessage(String server, String servicePrincipal, String cat, String db, String table, String tableType, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = cat; this.db = db; this.table = table; this.tableType = tableType; @@ -59,14 +60,14 @@ public JSONCreateTableMessage(String server, String servicePrincipal, String db, checkValid(); } - public JSONCreateTableMessage(String server, String servicePrincipal, String db, String table, + public JSONCreateTableMessage(String server, String servicePrincipal, String cat, String db, String table, Long timestamp) { - this(server, servicePrincipal, db, table, null, timestamp); + this(server, servicePrincipal, cat, db, table, null, timestamp); } public JSONCreateTableMessage(String server, String servicePrincipal, Table tableObj, Iterator fileIter, Long timestamp) { - this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), + this(server, servicePrincipal, tableObj.getCatName(), tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), timestamp); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); @@ -86,6 +87,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeletePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeletePartitionColumnStatMessage.java index a06348e56b90..cd222ccd5aec 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeletePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeletePartitionColumnStatMessage.java @@ -32,7 +32,7 @@ public class JSONDeletePartitionColumnStatMessage extends DeletePartitionColumnS private Long timestamp; @JsonProperty - private String server, servicePrincipal, database, colName, partName; + private String server, servicePrincipal, catalog, database, colName, partName; @JsonProperty private List partValues; @@ -44,11 +44,12 @@ public JSONDeletePartitionColumnStatMessage() { } public JSONDeletePartitionColumnStatMessage(String server, String servicePrincipal, Long timestamp, - String dbName, String colName, String partName, + String catName, String dbName, String colName, String partName, List partValues) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; + this.catalog = catName; this.database = dbName; this.partValues = partValues; this.partName = partName; @@ -60,6 +61,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return catalog; + } + @Override public String getDB() { return database; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeleteTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeleteTableColumnStatMessage.java index 4fdba253f38f..614fdea80f58 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeleteTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDeleteTableColumnStatMessage.java @@ -31,7 +31,7 @@ public class JSONDeleteTableColumnStatMessage extends DeleteTableColumnStatMessa private Long timestamp; @JsonProperty - private String server, servicePrincipal, database, colName; + private String server, servicePrincipal, catalog, database, colName; /** * Default constructor, needed for Jackson. @@ -40,10 +40,11 @@ public JSONDeleteTableColumnStatMessage() { } public JSONDeleteTableColumnStatMessage(String server, String servicePrincipal, Long timestamp, - String dbName, String colName) { + String catName, String dbName, String colName) { this.timestamp = timestamp; this.server = server; this.servicePrincipal = servicePrincipal; + this.catalog = catName; this.database = dbName; this.colName = colName; } @@ -53,6 +54,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return catalog; + } + @Override public String getDB() { return database; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java index 35763511734c..2135318a2f81 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java @@ -41,6 +41,11 @@ public JSONDropCatalogMessage(String server, String servicePrincipal, String cat this.timestamp = timestamp; } + @Override + public String getCat() { + return catalog; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropConstraintMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropConstraintMessage.java index 8dcfd8b35df3..6c9793296e71 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropConstraintMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropConstraintMessage.java @@ -29,7 +29,7 @@ public class JSONDropConstraintMessage extends DropConstraintMessage { @JsonProperty - String server, servicePrincipal, dbName, tableName, constraintName; + String server, servicePrincipal, catName, dbName, tableName, constraintName; @JsonProperty Long timestamp; @@ -40,11 +40,12 @@ public class JSONDropConstraintMessage extends DropConstraintMessage { public JSONDropConstraintMessage() { } - public JSONDropConstraintMessage(String server, String servicePrincipal, String dbName, + public JSONDropConstraintMessage(String server, String servicePrincipal, String catName, String dbName, String tableName, String constraintName, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; this.timestamp = timestamp; + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.constraintName = constraintName; @@ -60,6 +61,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return catName; + } + @Override public String getDB() { return dbName; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropDatabaseMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropDatabaseMessage.java index ae791610ae56..32db6c37502c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropDatabaseMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropDatabaseMessage.java @@ -32,7 +32,7 @@ public class JSONDropDatabaseMessage extends DropDatabaseMessage { @JsonProperty - String server, servicePrincipal, db, dbJson; + String server, servicePrincipal, cat, db, dbJson; @JsonProperty Long timestamp; @@ -45,6 +45,7 @@ public JSONDropDatabaseMessage() {} public JSONDropDatabaseMessage(String server, String servicePrincipal, Database db, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = db.getCatalogName(); this.db = db.getName(); this.timestamp = timestamp; try { @@ -62,6 +63,11 @@ public JSONDropDatabaseMessage(String server, String servicePrincipal, Database @Override public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropFunctionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropFunctionMessage.java index a7a7a1f1dfd2..57ff411f380a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropFunctionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropFunctionMessage.java @@ -30,7 +30,7 @@ public class JSONDropFunctionMessage extends DropFunctionMessage { @JsonProperty - String server, servicePrincipal, db, functionName; + String server, servicePrincipal, cat, db, functionName; @JsonProperty Long timestamp; @@ -43,12 +43,18 @@ public JSONDropFunctionMessage() {} public JSONDropFunctionMessage(String server, String servicePrincipal, Function fn, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = fn.getCatName(); this.db = fn.getDbName(); this.functionName = fn.getFunctionName(); this.timestamp = timestamp; checkValid(); } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java index 23e5496a67b9..2f57ec33ac27 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java @@ -35,7 +35,7 @@ public class JSONDropPartitionMessage extends DropPartitionMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjJson; @JsonProperty Long timestamp; @@ -49,15 +49,16 @@ public class JSONDropPartitionMessage extends DropPartitionMessage { public JSONDropPartitionMessage() { } - public JSONDropPartitionMessage(String server, String servicePrincipal, String db, String table, + public JSONDropPartitionMessage(String server, String servicePrincipal, String cat, String db, String table, List> partitions, Long timestamp) { - this(server, servicePrincipal, db, table, null, partitions, timestamp); + this(server, servicePrincipal, cat, db, table, null, partitions, timestamp); } - public JSONDropPartitionMessage(String server, String servicePrincipal, String db, String table, + public JSONDropPartitionMessage(String server, String servicePrincipal, String cat, String db, String table, String tableType, List> partitions, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = cat; this.db = db; this.table = table; this.tableType = tableType; @@ -68,7 +69,7 @@ public JSONDropPartitionMessage(String server, String servicePrincipal, String d public JSONDropPartitionMessage(String server, String servicePrincipal, Table tableObj, List> partitionKeyValues, long timestamp) { - this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), + this(server, servicePrincipal, tableObj.getCatName(), tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), partitionKeyValues, timestamp); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); @@ -87,6 +88,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java index 1ef2ad001571..4a74e72d664a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java @@ -32,7 +32,7 @@ public class JSONDropTableMessage extends DropTableMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjJson; @JsonProperty Long timestamp; @@ -43,15 +43,16 @@ public class JSONDropTableMessage extends DropTableMessage { public JSONDropTableMessage() { } - public JSONDropTableMessage(String server, String servicePrincipal, String db, String table, + public JSONDropTableMessage(String server, String servicePrincipal, String cat, String db, String table, Long timestamp) { - this(server, servicePrincipal, db, table, null, timestamp); + this(server, servicePrincipal, cat, db, table, null, timestamp); } - public JSONDropTableMessage(String server, String servicePrincipal, String db, String table, + public JSONDropTableMessage(String server, String servicePrincipal, String cat, String db, String table, String tableType, Long timestamp) { this.server = server; this.servicePrincipal = servicePrincipal; + this.cat = cat; this.db = db; this.table = table; this.tableType = tableType; @@ -61,7 +62,7 @@ public JSONDropTableMessage(String server, String servicePrincipal, String db, S public JSONDropTableMessage(String server, String servicePrincipal, Table tableObj, Long timestamp) { - this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), + this(server, servicePrincipal, tableObj.getCatName(), tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), timestamp); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); @@ -100,6 +101,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java index 40d480b7e38c..5763bbae4e00 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java @@ -37,7 +37,7 @@ public class JSONInsertMessage extends InsertMessage { @JsonProperty - String server, servicePrincipal, db, table, tableType, tableObjJson, ptnObjJson; + String server, servicePrincipal, cat, db, table, tableType, tableObjJson, ptnObjJson; @JsonProperty Long timestamp; @@ -63,6 +63,7 @@ public JSONInsertMessage(String server, String servicePrincipal, Table tableObj, throw new IllegalArgumentException("Table not valid."); } + this.cat = tableObj.getCatName(); this.db = tableObj.getDbName(); this.table = tableObj.getTableName(); this.tableType = tableObj.getTableType(); @@ -114,6 +115,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONOpenTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONOpenTxnMessage.java index 0c4615bb096f..5c067f9797fd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONOpenTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONOpenTxnMessage.java @@ -79,6 +79,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return null; + } + @Override public String getDB() { return null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONReloadMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONReloadMessage.java index 9cf5baa2c644..85ed01ab1871 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONReloadMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONReloadMessage.java @@ -41,7 +41,7 @@ public class JSONReloadMessage extends ReloadMessage { private Long timestamp; @JsonProperty - private String server, servicePrincipal, db, table, tableObjJson, ptnObjJson, refreshEvent; + private String server, servicePrincipal, cat, db, table, tableObjJson, ptnObjJson, refreshEvent; @JsonProperty List partitionListJson; @@ -61,6 +61,7 @@ public JSONReloadMessage(String server, String servicePrincipal, Table tableObj, throw new IllegalArgumentException("Table not valid."); } + this.cat = tableObj.getCatName(); this.db = tableObj.getDbName(); this.table = tableObj.getTableName(); @@ -106,6 +107,11 @@ public String getServicePrincipal() { return servicePrincipal; } + @Override + public String getCat() { + return cat; + } + @Override public String getDB() { return db; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java index fd7fe0041903..a5964197d255 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdatePartitionColumnStatMessage.java @@ -38,7 +38,7 @@ public class JSONUpdatePartitionColumnStatMessage extends UpdatePartitionColumnS private Long writeId, timestamp; @JsonProperty - private String server, servicePrincipal, database; + private String server, servicePrincipal, catalog, database; @JsonProperty private String colStatsJson; @@ -66,6 +66,7 @@ public JSONUpdatePartitionColumnStatMessage(String server, String servicePrincip this.server = server; this.servicePrincipal = servicePrincipal; this.writeId = writeId; + this.catalog = colStats.getStatsDesc().getCatName(); this.database = colStats.getStatsDesc().getDbName(); this.partVals = partVals; try { @@ -82,6 +83,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return catalog; + } + @Override public String getDB() { return database; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java index 275d204957cb..b1aed6a8f873 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONUpdateTableColumnStatMessage.java @@ -36,7 +36,7 @@ public class JSONUpdateTableColumnStatMessage extends UpdateTableColumnStatMessa private Long writeId, timestamp; @JsonProperty - private String server, servicePrincipal, database; + private String server, servicePrincipal, catalog, database; @JsonProperty private String colStatsJson; @@ -60,6 +60,7 @@ public JSONUpdateTableColumnStatMessage(String server, String servicePrincipal, this.server = server; this.servicePrincipal = servicePrincipal; this.writeId = writeId; + this.catalog = colStats.getStatsDesc().getCatName(); this.database = colStats.getStatsDesc().getDbName(); try { this.colStatsJson = MessageBuilder.createTableColumnStatJson(colStats); @@ -75,6 +76,11 @@ public Long getTimestamp() { return timestamp; } + @Override + public String getCat() { + return catalog; + } + @Override public String getDB() { return database; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java index d80f84219eea..424c8dfa30ab 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java @@ -129,7 +129,7 @@ public void run() { } } - public static void updateMetricsFromInitiator(String dbName, String tableName, + public static void updateMetricsFromInitiator(String catName, String dbName, String tableName, String partitionName, Configuration conf, TxnStore txnHandler, long baseSize, Map activeDeltaSizes, List obsoleteDeltaPaths) { if (!metricsEnabled) { @@ -159,10 +159,10 @@ public static void updateMetricsFromInitiator(String dbName, String tableName, int numObsoleteDeltas = filterOutBaseAndOriginalFiles(obsoleteDeltaPaths).size(); - updateDeltaMetrics(dbName, tableName, partitionName, NUM_DELTAS, numDeltas, deltasThreshold, txnHandler); - updateDeltaMetrics(dbName, tableName, partitionName, NUM_SMALL_DELTAS, numSmallDeltas, deltasThreshold, + updateDeltaMetrics(catName, dbName, tableName, partitionName, NUM_DELTAS, numDeltas, deltasThreshold, txnHandler); + updateDeltaMetrics(catName, dbName, tableName, partitionName, NUM_SMALL_DELTAS, numSmallDeltas, deltasThreshold, txnHandler); - updateDeltaMetrics(dbName, tableName, partitionName, CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS, + updateDeltaMetrics(catName, dbName, tableName, partitionName, CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS, numObsoleteDeltas, obsoleteDeltasThreshold, txnHandler); LOG.debug("Finished updating delta file metrics from initiator.\n deltaPctThreshold = {}, deltasThreshold = {}, " @@ -174,7 +174,7 @@ public static void updateMetricsFromInitiator(String dbName, String tableName, } } - public static void updateMetricsFromWorker(String dbName, String tableName, + public static void updateMetricsFromWorker(String catName, String dbName, String tableName, String partitionName, CompactionType type, int preWorkerActiveDeltaCount, int preWorkerDeleteDeltaCount, Configuration conf, IMetaStoreClient client) { if (!(MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED) && @@ -192,19 +192,19 @@ public static void updateMetricsFromWorker(String dbName, String tableName, // we have an instance of the AcidDirectory before the compaction worker was started // from this we can get how many delta directories existed // the previously active delta directories are now moved to obsolete - updateDeltaMetrics(dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_OBSOLETE_DELTAS, + updateDeltaMetrics(catName, dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_OBSOLETE_DELTAS, preWorkerActiveDeltaCount, obsoleteDeltasThreshold, client); // We don't know the size of the newly create delta directories, that would require a fresh AcidDirectory // Clear the small delta num counter from the cache for this key - removeDeltaMetrics(dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_SMALL_DELTAS, client); + removeDeltaMetrics(catName, dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_SMALL_DELTAS, client); // The new number of active delta dirs are either 0, 1 or 2. // If we ran MAJOR compaction, no new delta is created, just base dir // If we ran MINOR compaction, we can have 1 or 2 new delta dirs, depending on whether we had deltas or // delete deltas. if (type == CompactionType.MAJOR) { - removeDeltaMetrics(dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_DELTAS, client); + removeDeltaMetrics(catName, dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_DELTAS, client); } else { int numNewDeltas = 0; // check whether we had deltas @@ -218,7 +218,7 @@ public static void updateMetricsFromWorker(String dbName, String tableName, } // recalculate the delta count - updateDeltaMetrics(dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_DELTAS, numNewDeltas, + updateDeltaMetrics(catName, dbName, tableName, partitionName, CompactionMetricsMetricType.NUM_DELTAS, numNewDeltas, deltasThreshold, client); } @@ -231,7 +231,7 @@ public static void updateMetricsFromWorker(String dbName, String tableName, } } - public static void updateMetricsFromCleaner(String dbName, String tableName, String partitionName, + public static void updateMetricsFromCleaner(String catName, String dbName, String tableName, String partitionName, List deletedFiles, Configuration conf, TxnStore txnHandler) { if (!metricsEnabled) { LOG.debug("Acid metric collection is not enabled. To turn it on, \"metastore.acidmetrics.thread.on\" and " @@ -248,7 +248,7 @@ public static void updateMetricsFromCleaner(String dbName, String tableName, Str int numObsoleteDeltas = 0; if (prevObsoleteDelta != null) { numObsoleteDeltas = prevObsoleteDelta.getMetricValue() - filterOutBaseAndOriginalFiles(deletedFiles).size(); - updateDeltaMetrics(dbName, tableName, partitionName, CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS, + updateDeltaMetrics(catName, dbName, tableName, partitionName, CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS, numObsoleteDeltas, obsoleteDeltasThreshold, txnHandler); } @@ -435,9 +435,9 @@ private static List filterOutBaseAndOriginalFiles(List paths) { .startsWith(AcidConstants.DELETE_DELTA_PREFIX)).collect(Collectors.toList()); } - private static void updateDeltaMetrics(String dbName, String tblName, String partitionName, + private static void updateDeltaMetrics(String catName, String dbName, String tblName, String partitionName, CompactionMetricsData.MetricType type, int numDeltas, int deltasThreshold, TxnStore txnHandler) throws MetaException { - CompactionMetricsData data = new CompactionMetricsData.Builder() + CompactionMetricsData data = new CompactionMetricsData.Builder().catName(catName) .dbName(dbName).tblName(tblName).partitionName(partitionName).metricType(type).metricValue(numDeltas).version(0) .threshold(deltasThreshold).build(); if (!txnHandler.updateCompactionMetricsData(data)) { @@ -445,9 +445,10 @@ private static void updateDeltaMetrics(String dbName, String tblName, String par } } - private static void updateDeltaMetrics(String dbName, String tblName, String partitionName, + private static void updateDeltaMetrics(String catName, String dbName, String tblName, String partitionName, CompactionMetricsMetricType type, int numDeltas, int deltasThreshold, IMetaStoreClient client) throws TException { CompactionMetricsDataStruct struct = new CompactionMetricsDataStruct(); + struct.setCatName(catName); struct.setDbname(dbName); struct.setTblname(tblName); struct.setPartitionname(partitionName); @@ -461,9 +462,10 @@ private static void updateDeltaMetrics(String dbName, String tblName, String par } - private static void removeDeltaMetrics(String dbName, String tblName, String partitionName, + private static void removeDeltaMetrics(String catName, String dbName, String tblName, String partitionName, CompactionMetricsMetricType type, IMetaStoreClient client) throws TException { CompactionMetricsDataRequest request = new CompactionMetricsDataRequest(dbName, tblName, type); + request.setCatName(catName); request.setPartitionName(partitionName); client.removeCompactionMetricsData(request); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java index 531ae67fcfaa..08b2e7bae3d1 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java @@ -27,6 +27,7 @@ public class CompactionMetricsDataConverter { public static CompactionMetricsDataStruct dataToStruct(CompactionMetricsData data) throws MetaException { CompactionMetricsDataStruct struct = new CompactionMetricsDataStruct(); + struct.setCatName(data.getCatName()); struct.setDbname(data.getDbName()); struct.setTblname(data.getTblName()); struct.setPartitionname(data.getPartitionName()); @@ -39,6 +40,7 @@ public static CompactionMetricsDataStruct dataToStruct(CompactionMetricsData dat public static CompactionMetricsData structToData(CompactionMetricsDataStruct struct) throws MetaException { return new CompactionMetricsData.Builder() + .catName(struct.getCatName()) .dbName(struct.getDbname()) .tblName(struct.getTblname()) .partitionName(struct.getPartitionname()) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index d1a0bf94fca6..4f0bbcb749d2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore.txn; import org.apache.hadoop.hive.common.classification.RetrySemantics; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -317,6 +318,7 @@ public void updateCompactorState(CompactionInfo ci, long compactionTxnId) throws MapSqlParameterSource parameterSource = new MapSqlParameterSource() .addValue("txnId", compactionTxnId) + .addValue("catName", ci.catName) .addValue("dbName", ci.dbname) .addValue("tableName", ci.tableName) .addValue("partName", ci.partName, Types.VARCHAR) @@ -334,9 +336,9 @@ public void updateCompactorState(CompactionInfo ci, long compactionTxnId) throws * a new write id (so as not to invalidate result set caches/materialized views) but * we need to set it to something to that markCleaned() only cleans TXN_COMPONENTS up to * the level to which aborted files/data has been cleaned.*/ - "INSERT INTO \"TXN_COMPONENTS\"(\"TC_TXNID\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", " + + "INSERT INTO \"TXN_COMPONENTS\"(\"TC_TXNID\", \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", " + "\"TC_WRITEID\", \"TC_OPERATION_TYPE\") " + - "VALUES(:txnId, :dbName, :tableName, :partName, :highestWriteId, :operationType)", + "VALUES(:txnId, :catName, :dbName, :tableName, :partName, :highestWriteId, :operationType)", parameterSource, ParameterizedCommand.EXACTLY_ONE_ROW); } @@ -450,11 +452,12 @@ public void setCleanerRetryRetentionTimeOnError(CompactionInfo info) throws Meta try (TxnStore.MutexAPI.LockHandle ignored = getMutexAPI().acquireLock(MUTEX_KEY.CompactionScheduler.name())) { long id = new GenerateCompactionQueueIdFunction().execute(jdbcResource); int updCnt = jdbcResource.execute( - "INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + + "INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_CATALOG\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + " \"CQ_TYPE\", \"CQ_STATE\", \"CQ_RETRY_RETENTION\", \"CQ_ERROR_MESSAGE\", \"CQ_COMMIT_TIME\") " + - " VALUES (:id, :db, :table, :partition, :type, :state, :retention, :msg, " + getEpochFn(dbProduct) + ")", + " VALUES (:id, :cat, :db, :table, :partition, :type, :state, :retention, :msg, " + getEpochFn(dbProduct) + ")", new MapSqlParameterSource() .addValue("id", id) + .addValue("cat", info.catName) .addValue("db", info.dbname) .addValue("table", info.tableName) .addValue("partition", info.partName, Types.VARCHAR) @@ -466,7 +469,7 @@ public void setCleanerRetryRetentionTimeOnError(CompactionInfo info) throws Meta if (updCnt == 0) { LOG.error("Unable to update/insert compaction queue record: {}. updCnt={}", info, updCnt); throw new MetaException("Unable to insert abort retry entry into COMPACTION QUEUE: " + - " CQ_DATABASE=" + info.dbname + ", CQ_TABLE=" + info.tableName + ", CQ_PARTITION" + info.partName); + " CQ_CATALOG=" + info.catName + ", CQ_DATABASE=" + info.dbname + ", CQ_TABLE=" + info.tableName + ", CQ_PARTITION" + info.partName); } } catch (Exception e) { throw new MetaException("Failed to set retry retention time for compaction item: " + info + " Error: " + e); @@ -545,15 +548,29 @@ public List getTopCompactionMetricsDataPerType(int limit) } @Override + @Deprecated public CompactionMetricsData getCompactionMetricsData(String dbName, String tblName, String partitionName, CompactionMetricsData.MetricType type) throws MetaException { - return jdbcResource.execute(new CompactionMetricsDataHandler(dbName, tblName, partitionName, type)); + return getCompactionMetricsData(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, partitionName, type); + } + + @Override + public CompactionMetricsData getCompactionMetricsData(String catName, String dbName, String tblName, String partitionName, + CompactionMetricsData.MetricType type) throws MetaException { + return jdbcResource.execute(new CompactionMetricsDataHandler(catName, dbName, tblName, partitionName, type)); } @Override + @Deprecated public void removeCompactionMetricsData(String dbName, String tblName, String partitionName, CompactionMetricsData.MetricType type) throws MetaException { - jdbcResource.execute(new RemoveCompactionMetricsDataCommand(dbName, tblName, partitionName, type)); + removeCompactionMetricsData(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, partitionName, type); + } + + @Override + public void removeCompactionMetricsData(String catName, String dbName, String tblName, String partitionName, + CompactionMetricsData.MetricType type) throws MetaException { + jdbcResource.execute(new RemoveCompactionMetricsDataCommand(catName, dbName, tblName, partitionName, type)); } } \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index a847e01aca4d..ca536e7a77bd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -540,6 +540,9 @@ public static void notifyCommitOrAbortEvent(long txnId, EventMessage.EventType e List writeIds = txnWriteDetails.stream() .map(TxnWriteDetails::getWriteId) .toList(); + List catalogs = txnWriteDetails.stream() + .map(TxnWriteDetails::getCatName) + .toList(); List databases = txnWriteDetails.stream() .map(TxnWriteDetails::getDbName) .toList(); @@ -547,7 +550,7 @@ public static void notifyCommitOrAbortEvent(long txnId, EventMessage.EventType e if (eventType.equals(EventMessage.EventType.ABORT_TXN)) { txnEvent = new AbortTxnEvent(txnId, txnType, null, databases, writeIds); } else { - txnEvent = new CommitTxnEvent(txnId, txnType, null, databases, writeIds); + txnEvent = new CommitTxnEvent(txnId, txnType, null, catalogs, databases, writeIds); } MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, eventType, txnEvent, dbConn, sqlGenerator); @@ -705,8 +708,9 @@ public void seedWriteId(SeedTableWriteIdsRequest rqst) throws MetaException { // The initial value for write id should be 1 and hence we add 1 with number of write ids // allocated here jdbcResource.getJdbcTemplate().update( - "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (:db, :table, :writeId)", + "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_CATALOG\", \"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (:cat, :db, :table, :writeId)", new MapSqlParameterSource() + .addValue("cat", rqst.getCatName()) .addValue("db", rqst.getDbName()) .addValue("table", rqst.getTableName()) .addValue("writeId", rqst.getSeedWriteId() + 1)); @@ -911,7 +915,12 @@ public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst @Override public long getTxnIdForWriteId(String dbName, String tblName, long writeId) throws MetaException { - return jdbcResource.execute(new TxnIdForWriteIdHandler(writeId, dbName, tblName)); + return getTxnIdForWriteId(Warehouse.DEFAULT_CATALOG_NAME, dbName, tblName, writeId); + } + + @Override + public long getTxnIdForWriteId(String catName, String dbName, String tblName, long writeId) throws MetaException { + return jdbcResource.execute(new TxnIdForWriteIdHandler(writeId, catName, dbName, tblName)); } @Override @@ -968,9 +977,10 @@ public void addDynamicPartitions(AddDynamicPartitions rqst) throws NoSuchTxnExce } jdbcResource.execute(new InsertTxnComponentsCommand(rqst)); jdbcResource.getJdbcTemplate().update("DELETE FROM \"TXN_COMPONENTS\" " + - "WHERE \"TC_TXNID\" = :txnId AND \"TC_DATABASE\" = :dbName AND \"TC_TABLE\" = :tableName AND \"TC_PARTITION\" IS NULL", + "WHERE \"TC_TXNID\" = :txnId AND \"TC_CATALOG\" = :catName AND \"TC_DATABASE\" = :dbName AND \"TC_TABLE\" = :tableName AND \"TC_PARTITION\" IS NULL", new MapSqlParameterSource() .addValue("txnId", rqst.getTxnid()) + .addValue("catName", org.apache.commons.lang3.StringUtils.lowerCase(rqst.getCatName())) .addValue("dbName", org.apache.commons.lang3.StringUtils.lowerCase(rqst.getDbname())) .addValue("tableName", org.apache.commons.lang3.StringUtils.lowerCase(rqst.getTablename()))); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index 138acedc8547..f5117b43833b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -287,9 +287,14 @@ Materialization getMaterializationInvalidationInfo( throws MetaException; @RetrySemantics.ReadOnly + @Deprecated long getTxnIdForWriteId(String dbName, String tblName, long writeId) throws MetaException; + @RetrySemantics.ReadOnly + long getTxnIdForWriteId(String catName, String dbName, String tblName, long writeId) + throws MetaException; + @SqlRetry @Transactional(POOL_TX) @RetrySemantics.ReadOnly @@ -939,9 +944,26 @@ interface LockHandle extends AutoCloseable { @RetrySemantics.ReadOnly @SqlRetry @Transactional(POOL_COMPACTOR) + @Deprecated CompactionMetricsData getCompactionMetricsData(String dbName, String tblName, String partitionName, CompactionMetricsData.MetricType type) throws MetaException; + /** + * Returns ACID metrics related info for a specific resource and metric type. If no record is found matching the + * filter criteria, null will be returned. + * @param dbName name of database, non-null + * @param tblName name of the table, non-null + * @param partitionName name of the partition, can be null + * @param type type of the delta metric, non-null + * @return instance of delta metrics info, can be null + * @throws MetaException + */ + @RetrySemantics.ReadOnly + @SqlRetry + @Transactional(POOL_COMPACTOR) + CompactionMetricsData getCompactionMetricsData(String catName, String dbName, String tblName, String partitionName, + CompactionMetricsData.MetricType type) throws MetaException; + /** * Remove records from the compaction metrics cache matching the filter criteria passed in as parameters * @param dbName name of the database, non-null @@ -953,9 +975,25 @@ CompactionMetricsData getCompactionMetricsData(String dbName, String tblName, St @SqlRetry @Transactional(POOL_COMPACTOR) @RetrySemantics.SafeToRetry + @Deprecated void removeCompactionMetricsData(String dbName, String tblName, String partitionName, CompactionMetricsData.MetricType type) throws MetaException; + /** + * Remove records from the compaction metrics cache matching the filter criteria passed in as parameters + * @param catName name of the catalog, non-null + * @param dbName name of the database, non-null + * @param tblName name of the table, non-null + * @param partitionName name of the partition, non-null + * @param type type of the delta metric, non-null + * @throws MetaException + */ + @SqlRetry + @Transactional(POOL_COMPACTOR) + @RetrySemantics.SafeToRetry + void removeCompactionMetricsData(String catName, String dbName, String tblName, String partitionName, + CompactionMetricsData.MetricType type) throws MetaException; + /** * Returns the top ACID metrics from each type {@link CompactionMetricsData.MetricType} * @param limit number of returned records for each type diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index cd56311abc05..39f8e42e9113 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -203,8 +203,8 @@ public static boolean isTableSoftDeleteEnabled(Table table, boolean isSoftDelete /** * Should produce the result as <dbName>.<tableName>. */ - public static String getFullTableName(String dbName, String tableName) { - return dbName.toLowerCase() + "." + tableName.toLowerCase(); + public static String getFullTableName(String catName, String dbName, String tableName) { + return catName.toLowerCase() + "." + dbName.toLowerCase() + "." + tableName.toLowerCase(); } public static String[] getDbTableName(String fullTableName) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionInfo.java index e1c1f492bab8..bee9d1268b67 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionInfo.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionInfo.java @@ -45,6 +45,7 @@ public class CompactionInfo implements Comparable { * being resetted. This will be fixed at HIVE-21056. */ public long id; + public String catName; public String dbname; public String tableName; public String partName; @@ -88,14 +89,15 @@ public class CompactionInfo implements Comparable { private String fullTableName = null; private StringableMap propertiesMap; - public CompactionInfo(String dbname, String tableName, String partName, CompactionType type) { + public CompactionInfo(String catName, String dbname, String tableName, String partName, CompactionType type) { + this.catName = catName; this.dbname = dbname; this.tableName = tableName; this.partName = partName; this.type = type; } - public CompactionInfo(long id, String dbname, String tableName, String partName, char state) { - this(dbname, tableName, partName, null); + public CompactionInfo(long id, String catName, String dbname, String tableName, String partName, char state) { + this(catName, dbname, tableName, partName, null); this.id = id; this.state = state; } @@ -133,7 +135,9 @@ public String getFullPartitionName() { public String getFullTableName() { if (fullTableName == null) { - StringBuilder buf = new StringBuilder(dbname); + StringBuilder buf = new StringBuilder(catName); + buf.append('.'); + buf.append(dbname); buf.append('.'); buf.append(tableName); fullTableName = buf.toString(); @@ -165,6 +169,7 @@ public int compareTo(CompactionInfo o) { public String toString() { return new ToStringBuilder(this) .append("id", id) + .append("catName", catName) .append("dbname", dbname) .append("tableName", tableName) .append("partName", partName) @@ -278,7 +283,8 @@ public static CompactionInfo compactionStructToInfo(CompactionInfoStruct cr) { if (cr == null) { return null; } - CompactionInfo ci = new CompactionInfo(cr.getDbname(), cr.getTablename(), cr.getPartitionname(), cr.getType()); + CompactionInfo ci = new CompactionInfo(cr.getCatName(), cr.getDbname(), cr.getTablename(), cr.getPartitionname(), + cr.getType()); ci.id = cr.getId(); ci.runAs = cr.getRunas(); ci.properties = cr.getProperties(); @@ -326,6 +332,7 @@ public static CompactionInfoStruct compactionInfoToStruct(CompactionInfo ci) { return null; } CompactionInfoStruct cr = new CompactionInfoStruct(ci.id, ci.dbname, ci.tableName, ci.type); + cr.setCatName(ci.catName); cr.setPartitionname(ci.partName); cr.setRunas(ci.runAs); cr.setProperties(ci.properties); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionMetricsData.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionMetricsData.java index 33dd362307d4..de1ea461b53d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionMetricsData.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionMetricsData.java @@ -19,6 +19,7 @@ public class CompactionMetricsData { + private final String catName; private final String dbName; private final String tblName; private final String partitionName; @@ -45,6 +46,7 @@ public String toString() { } private CompactionMetricsData(Builder builder) { + this.catName = builder.catName; this.dbName = builder.dbName; this.tblName = builder.tblName; this.partitionName = builder.partitionName; @@ -54,6 +56,10 @@ private CompactionMetricsData(Builder builder) { this.threshold = builder.threshold; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } @@ -89,12 +95,13 @@ public boolean isEmpty() { @Override public String toString() { - return "DeltaMetricsInfo{" + "dbName='" + dbName + '\'' + ", tblName='" + tblName + '\'' + ", partitionName='" + return "DeltaMetricsInfo{" + "catName='" + catName + '\'' + ", dbName='" + dbName + '\'' + ", tblName='" + tblName + '\'' + ", partitionName='" + partitionName + '\'' + ", metricType=" + metricType + ", metricValue=" + metricValue + ", version=" + version + '}'; } public static class Builder { + private String catName; private String dbName; private String tblName; private String partitionName; @@ -107,6 +114,11 @@ public CompactionMetricsData build() { return new CompactionMetricsData(this); } + public Builder catName(String catName) { + this.catName = catName; + return this; + } + public Builder dbName(String dbName) { this.dbName = dbName; return this; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/TxnWriteDetails.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/TxnWriteDetails.java index 45f115da5d32..06708fbbfd05 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/TxnWriteDetails.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/TxnWriteDetails.java @@ -22,11 +22,13 @@ */ public class TxnWriteDetails { private final long txnId; + private final String catName; private final String dbName; private final long writeId; - public TxnWriteDetails(long txnId, String dbName, long writeId) { + public TxnWriteDetails(long txnId, String catName, String dbName, long writeId) { this.txnId = txnId; + this.catName = catName; this.dbName = dbName; this.writeId = writeId; } @@ -35,6 +37,7 @@ public TxnWriteDetails(long txnId, String dbName, long writeId) { public String toString() { return "TxnToWriteID{" + "txnId=" + txnId + + ", catName='" + catName + '\'' + ", dbName='" + dbName + '\'' + ", writeId=" + writeId + '}'; @@ -44,6 +47,10 @@ public long getTxnId() { return txnId; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToMinHistoryCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToMinHistoryCommand.java index dfe87bd34085..07f4858b68ad 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToMinHistoryCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToMinHistoryCommand.java @@ -33,7 +33,7 @@ public class AddWriteIdsToMinHistoryCommand implements ParameterizedBatchCommand, ConditionalCommand { private static final String MIN_HISTORY_WRITE_ID_INSERT_QUERY = "INSERT INTO \"MIN_HISTORY_WRITE_ID\" (\"MH_TXNID\", " + - "\"MH_DATABASE\", \"MH_TABLE\", \"MH_WRITEID\") VALUES (?, ?, ?, ?)"; + "\"MH_CATALOG\", \"MH_DATABASE\", \"MH_TABLE\", \"MH_WRITEID\") VALUES (?, ?, ?, ?, ?)"; private final List params; @@ -41,7 +41,7 @@ public AddWriteIdsToMinHistoryCommand(long txnId, Map minOpenWrite this.params = new ArrayList<>(); for (Map.Entry validWriteId : minOpenWriteIds.entrySet()) { String[] names = TxnUtils.getDbTableName(validWriteId.getKey()); - params.add(new Object[]{ txnId, names[0], names[1], validWriteId.getValue() }); + params.add(new Object[]{ txnId, names[0], names[1], names[2], validWriteId.getValue() }); } } @@ -61,7 +61,8 @@ public ParameterizedPreparedStatementSetter getPreparedStatementSetter ps.setLong(1, (Long)argument[0]); ps.setString(2, argument[1].toString()); ps.setString(3, argument[2].toString()); - ps.setLong(4, (Long)argument[3]); + ps.setString(4, argument[3].toString()); + ps.setLong(5, (Long)argument[4]); }; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToTxnToWriteIdCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToTxnToWriteIdCommand.java index eaa6d1e5d9be..65c8b36019c4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToTxnToWriteIdCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToTxnToWriteIdCommand.java @@ -30,10 +30,11 @@ public class AddWriteIdsToTxnToWriteIdCommand implements ParameterizedBatchComma private final List params; - public AddWriteIdsToTxnToWriteIdCommand(String dbName, String tableName, long writeId, List txnIds, List txnToWriteIds) { + public AddWriteIdsToTxnToWriteIdCommand(String catName, String dbName, String tableName, long writeId, + List txnIds, List txnToWriteIds) { this.params = new ArrayList<>(); for (long txnId : txnIds) { - params.add(new Object[]{ txnId, dbName, tableName, writeId }); + params.add(new Object[]{ txnId, catName, dbName, tableName, writeId }); txnToWriteIds.add(new TxnToWriteId(txnId, writeId)); writeId++; } @@ -42,7 +43,7 @@ public AddWriteIdsToTxnToWriteIdCommand(String dbName, String tableName, long wr @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) { - return "INSERT INTO \"TXN_TO_WRITE_ID\" (\"T2W_TXNID\", \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\") VALUES (?, ?, ?, ?)"; + return "INSERT INTO \"TXN_TO_WRITE_ID\" (\"T2W_TXNID\", \"T2W_CATALOG\", \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\") VALUES (?, ?, ?, ?, ?)"; } @Override @@ -56,7 +57,8 @@ public ParameterizedPreparedStatementSetter getPreparedStatementSetter ps.setLong(1, (Long)argument[0]); ps.setString(2, argument[1].toString()); ps.setString(3, argument[2].toString()); - ps.setLong(4, (Long)argument[3]); + ps.setString(4, argument[3].toString()); + ps.setLong(5, (Long)argument[4]); }; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionInfoCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionInfoCommand.java index 4af24c5d4a50..bbd88a03aeb9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionInfoCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionInfoCommand.java @@ -37,13 +37,13 @@ public class InsertCompactionInfoCommand implements ParameterizedCommand { //language=SQL private static final String INSERT = "INSERT INTO \"COMPLETED_COMPACTIONS\" " + - " (\"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", " + + " (\"CC_ID\", \"CC_CATALOG\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", " + " \"CC_TBLPROPERTIES\", \"CC_WORKER_ID\", \"CC_START\", \"CC_END\", \"CC_RUN_AS\", " + " \"CC_HIGHEST_WRITE_ID\", \"CC_META_INFO\", \"CC_HADOOP_JOB_ID\", \"CC_ERROR_MESSAGE\", " + " \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\"," + " \"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_COMMIT_TIME\", \"CC_POOL_NAME\", \"CC_NUMBER_OF_BUCKETS\", " + " \"CC_ORDER_BY\") " + - " VALUES(:id,:dbname,:tableName,:partName,:state,:type,:properties,:workerId,:start,:endTime," + + " VALUES(:id,:catName, :dbname,:tableName,:partName,:state,:type,:properties,:workerId,:start,:endTime," + " :runAs,:highestWriteId,:metaInfo,:hadoopJobId,:errorMessage,:enqueueTime,:workerVersion,:initiatorId," + " :initiatorVersion,:nextTxnId,:txnId,:commitTime,:poolName,:numberOfBuckets,:orderByClause)"; @@ -68,6 +68,7 @@ public SqlParameterSource getQueryParameters() { try { return new MapSqlParameterSource() .addValue("id", ci.id) + .addValue("catName", ci.catName) .addValue("dbname", ci.dbname) .addValue("tableName", ci.tableName) .addValue("partName", ci.partName) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionRequestCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionRequestCommand.java index 7e2d9bf52b5c..335530fa5913 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionRequestCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionRequestCommand.java @@ -60,10 +60,10 @@ public Function resultPolicy() { @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { - return "INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", \"CQ_STATE\", " + + return "INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_CATALOG\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", \"CQ_STATE\", " + "\"CQ_TYPE\", \"CQ_POOL_NAME\", \"CQ_NUMBER_OF_BUCKETS\", \"CQ_ORDER_BY\", \"CQ_TBLPROPERTIES\", \"CQ_RUN_AS\", " + "\"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", \"CQ_HIGHEST_WRITE_ID\", \"CQ_TXN_ID\", \"CQ_ENQUEUE_TIME\") " + - "VALUES(:id, :dbName, :tableName, :partition, :state, :type, :poolName, :buckets, :orderBy, :tblProperties, " + + "VALUES(:id, :catName, :dbName, :tableName, :partition, :state, :type, :poolName, :buckets, :orderBy, :tblProperties, " + ":runAs, :initiatorId, :initiatorVersion, :highestWriteId, :txnId, " + getEpochFn(databaseProduct) + ")"; } @@ -72,6 +72,7 @@ public SqlParameterSource getQueryParameters() { try { return new MapSqlParameterSource() .addValue("id", id) + .addValue("catName", rqst.getCatName(), Types.VARCHAR) .addValue("dbName", rqst.getDbname(), Types.VARCHAR) .addValue("tableName", rqst.getTablename(), Types.VARCHAR) .addValue("partition", rqst.getPartitionname(), Types.VARCHAR) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompletedTxnComponentsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompletedTxnComponentsCommand.java index 9f9086b755db..d6e7723c6c68 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompletedTxnComponentsCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompletedTxnComponentsCommand.java @@ -42,8 +42,8 @@ public InsertCompletedTxnComponentsCommand(long txnId, char isUpdateDelete, List public String getParameterizedQueryString(DatabaseProduct databaseProduct) { return "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" " + - "(\"CTC_TXNID\", \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", \"CTC_WRITEID\", \"CTC_UPDATE_DELETE\") " + - "VALUES (?, ?, ?, ?, ?, ?)"; + "(\"CTC_TXNID\", \"CTC_CATALOG\", \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", \"CTC_WRITEID\", \"CTC_UPDATE_DELETE\") " + + "VALUES (?, ?, ?, ?, ?, ?, ?)"; } @Override @@ -55,11 +55,12 @@ public List getQueryParameters() { public ParameterizedPreparedStatementSetter getPreparedStatementSetter() { return (ps, argument) -> { ps.setLong(1, txnId); - ps.setString(2, argument.getDatabase()); - ps.setString(3, argument.getTable()); - ps.setString(4, argument.getPartition()); - ps.setLong(5, argument.getWriteId()); - ps.setString(6, Character.toString(isUpdateDelete)); + ps.setString(2, argument.getCatalog()); + ps.setString(3, argument.getDatabase()); + ps.setString(4, argument.getTable()); + ps.setString(5, argument.getPartition()); + ps.setLong(6, argument.getWriteId()); + ps.setString(7, Character.toString(isUpdateDelete)); }; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertTxnComponentsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertTxnComponentsCommand.java index 2f4da649c5ad..c7ad91eba58d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertTxnComponentsCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertTxnComponentsCommand.java @@ -44,10 +44,10 @@ public class InsertTxnComponentsCommand implements ParameterizedBatchCommand { private final LockRequest lockRequest; - private final Map, Long> writeIds; + private final Map>, Long> writeIds; private final AddDynamicPartitions dynamicPartitions; - public InsertTxnComponentsCommand(LockRequest lockRequest, Map, Long> writeIds) { + public InsertTxnComponentsCommand(LockRequest lockRequest, Map>, Long> writeIds) { this.lockRequest = lockRequest; this.writeIds = writeIds; this.dynamicPartitions = null; @@ -62,8 +62,8 @@ public InsertTxnComponentsCommand(AddDynamicPartitions dynamicPartitions) { @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) { return "INSERT INTO \"TXN_COMPONENTS\" (" + - "\"TC_TXNID\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_OPERATION_TYPE\", \"TC_WRITEID\")" + - " VALUES (?, ?, ?, ?, ?, ?)"; + "\"TC_TXNID\", \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_OPERATION_TYPE\", \"TC_WRITEID\")" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; } @Override @@ -79,7 +79,8 @@ public ParameterizedPreparedStatementSetter getPreparedStatementSetter ps.setString(3, (String)argument[2]); ps.setString(4, (String)argument[3]); ps.setString(5, (String)argument[4]); - ps.setObject(6, argument[5], Types.BIGINT); + ps.setString(6, (String)argument[5]); + ps.setObject(7, argument[6], Types.BIGINT); }; } @@ -105,6 +106,7 @@ private List getQueryParametersByLockRequest() { Function> getWriteIdKey = lockComponent -> Pair.of(StringUtils.lowerCase(lockComponent.getDbname()), StringUtils.lowerCase(lockComponent.getTablename())); + String catName = StringUtils.lowerCase(lc.getCatName()); String dbName = StringUtils.lowerCase(lc.getDbname()); String tblName = StringUtils.lowerCase(lc.getTablename()); String partName = TxnUtils.normalizePartitionCase(lc.getPartitionname()); @@ -127,7 +129,7 @@ private List getQueryParametersByLockRequest() { } Long writeId = writeIds.get(writeIdKey); - params.add(new Object[]{lockRequest.getTxnid(), dbName, tblName, partName, opType.getSqlConst(), writeId}); + params.add(new Object[]{lockRequest.getTxnid(), catName, dbName, tblName, partName, opType.getSqlConst(), writeId}); alreadyAddedTables.add(writeIdKey); } return params; @@ -145,6 +147,7 @@ private List getQueryParametersByDynamicPartitions() { for (String partName : dynamicPartitions.getPartitionnames()) { params.add(new Object[]{ dynamicPartitions.getTxnid(), + dynamicPartitions.getCatName().toLowerCase(), dynamicPartitions.getDbname().toLowerCase(), dynamicPartitions.getTablename().toLowerCase(), partName, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveCompactionMetricsDataCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveCompactionMetricsDataCommand.java index a3fd4a41abc2..d759b11a5c71 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveCompactionMetricsDataCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveCompactionMetricsDataCommand.java @@ -31,15 +31,18 @@ public class RemoveCompactionMetricsDataCommand implements ParameterizedCommand //language=SQL private static final String DELETE_COMPACTION_METRICS_CACHE = - "DELETE FROM \"COMPACTION_METRICS_CACHE\" WHERE \"CMC_DATABASE\" = :db AND \"CMC_TABLE\" = :table " + + "DELETE FROM \"COMPACTION_METRICS_CACHE\" WHERE \"CMC_CATALOG\" = :cat AND \"CMC_DATABASE\" = :db AND \"CMC_TABLE\" = :table " + "AND \"CMC_METRIC_TYPE\" = :type AND (:partition IS NULL OR \"CMC_PARTITION\" = :partition)"; + private final String catName; private final String dbName; private final String tblName; private final String partitionName; private final CompactionMetricsData.MetricType type; - public RemoveCompactionMetricsDataCommand(String dbName, String tblName, String partitionName, CompactionMetricsData.MetricType type) { + public RemoveCompactionMetricsDataCommand(String catName, String dbName, String tblName, String partitionName, + CompactionMetricsData.MetricType type) { + this.catName = catName; this.dbName = dbName; this.tblName = tblName; this.partitionName = partitionName; @@ -59,6 +62,7 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw @Override public SqlParameterSource getQueryParameters() { return new MapSqlParameterSource() + .addValue("cat", catName) .addValue("db", dbName) .addValue("table", tblName) .addValue("type", type.toString()) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveDuplicateCompleteTxnComponentsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveDuplicateCompleteTxnComponentsCommand.java index ca481a05c833..d235a07350e5 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveDuplicateCompleteTxnComponentsCommand.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveDuplicateCompleteTxnComponentsCommand.java @@ -45,12 +45,13 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw return "DELETE tc " + "FROM \"COMPLETED_TXN_COMPONENTS\" tc " + "INNER JOIN (" + - " SELECT \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\"," + + " SELECT \"CTC_CATALOG\", \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\"," + " MAX(\"CTC_WRITEID\") highestWriteId," + " MAX(CASE WHEN \"CTC_UPDATE_DELETE\" = 'Y' THEN \"CTC_WRITEID\" END) updateWriteId" + " FROM \"COMPLETED_TXN_COMPONENTS\"" + - " GROUP BY \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\"" + + " GROUP BY \"CTC_CATALOG\", \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\"" + ") c ON " + + " tc.\"CTC_CATALOG\" = c.\"CTC_CATALOG\" " + " tc.\"CTC_DATABASE\" = c.\"CTC_DATABASE\" " + " AND tc.\"CTC_TABLE\" = c.\"CTC_TABLE\"" + " AND (tc.\"CTC_PARTITION\" = c.\"CTC_PARTITION\" OR (tc.\"CTC_PARTITION\" IS NULL AND c.\"CTC_PARTITION\" IS NULL)) " + @@ -67,7 +68,8 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw "EXISTS (" + " SELECT 1" + " FROM \"COMPLETED_TXN_COMPONENTS\" c" + - " WHERE tc.\"CTC_DATABASE\" = c.\"CTC_DATABASE\"" + + " WHERE tc.\"CTC_CATALOG\" = c.\"CTC_CATALOG\"" + + " AND tc.\"CTC_DATABASE\" = c.\"CTC_DATABASE\"" + " AND tc.\"CTC_TABLE\" = c.\"CTC_TABLE\"" + " AND %s" + " AND (tc.\"CTC_UPDATE_DELETE\" = 'N' OR c.\"CTC_UPDATE_DELETE\" = 'Y')" + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortCompactionFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortCompactionFunction.java index bd4fa91961db..1c08601ca53e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortCompactionFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortCompactionFunction.java @@ -51,7 +51,7 @@ public class AbortCompactionFunction implements TransactionalFunction txnIds; + String catName = rqst.getCatName().toLowerCase(); String dbName = rqst.getDbName().toLowerCase(); String tblName = rqst.getTableName().toLowerCase(); boolean shouldReallocate = rqst.isReallocate(); @@ -124,9 +125,10 @@ public AllocateTableWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcRes // during query recompilation after lock acquistion, it is important to realloc new writeIds // to ensure writeIds are committed in increasing order. jdbcResource.execute(new InClauseBatchCommand<>( - "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = :dbName AND \"T2W_TABLE\" = :tableName AND " + + "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_CATALOG\" = :catName AND \"T2W_DATABASE\" = :dbName AND \"T2W_TABLE\" = :tableName AND " + "\"T2W_TXNID\" IN (:txnIds)", new MapSqlParameterSource() + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tblName) .addValue("txnIds", txnIds), @@ -137,13 +139,14 @@ public AllocateTableWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcRes // The write id would have been already allocated in case of multi-statement txns where // first write on a table will allocate write id and rest of the writes should re-use it. prefix.append("SELECT \"T2W_TXNID\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE") - .append(" \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND "); + .append(" \"T2W_CATALOG\" = ? AND \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND "); TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), queries, prefix, suffix, txnIds, "\"T2W_TXNID\"", false, false); for (String query : queries) { try (PreparedStatement pStmt = jdbcResource.getSqlGenerator().prepareStmtWithParameters(dbConn, query, params)) { if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + query.replace("?", "'{}'") + ">", dbName, tblName); + LOG.debug("Going to execute query <" + query.replace("?", "'{}'") + ">", catName, dbName, + tblName); } try (ResultSet rs = pStmt.executeQuery()) { while (rs.next()) { @@ -182,20 +185,21 @@ public AllocateTableWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcRes // Get the next write id for the given table and update it with new next write id. // This is select for update query which takes a lock if the table entry is already there in NEXT_WRITE_ID String query = jdbcResource.getSqlGenerator().addForUpdateClause( - "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName"); + "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_CATALOG\" = :catName AND \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName"); if (LOG.isDebugEnabled()) { LOG.debug("Going to execute query {}", query); } Long nextWriteId = jdbcResource.getJdbcTemplate().query(query, new MapSqlParameterSource() + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tblName), (ResultSet rs) -> rs.next() ? rs.getLong(1) : null); if (nextWriteId == null) { - query = "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") " + - "VALUES (:dbName, :tableName, :nextId)"; + query = "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_CATALOG\", \"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") " + + "VALUES (:catName, :dbName, :tableName, :nextId)"; if (LOG.isDebugEnabled()) { LOG.debug("Going to execute query {}", query); } @@ -206,11 +210,12 @@ public AllocateTableWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcRes writeId = (srcWriteId > 0) ? srcWriteId : 1; jdbcResource.getJdbcTemplate().update(query, new MapSqlParameterSource() + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tblName) .addValue("nextId", writeId + numOfWriteIds)); } else { - query = "UPDATE \"NEXT_WRITE_ID\" SET \"NWI_NEXT\" = :nextId WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName"; + query = "UPDATE \"NEXT_WRITE_ID\" SET \"NWI_NEXT\" = :nextId WHERE \"NWI_CATALOG\" = :catName AND \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName"; if (LOG.isDebugEnabled()) { LOG.debug("Going to execute query {}", query); } @@ -219,6 +224,7 @@ public AllocateTableWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcRes // Update the NEXT_WRITE_ID for the given table after incrementing by number of write ids allocated jdbcResource.getJdbcTemplate().update(query, new MapSqlParameterSource() + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tblName) .addValue("nextId", writeId + numOfWriteIds)); @@ -228,29 +234,31 @@ public AllocateTableWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcRes // This is possible in case of first incremental repl after bootstrap where concurrent write // and drop table was performed at source during bootstrap dump. if ((srcWriteId > 0) && (srcWriteId != nextWriteId)) { - query = "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = :dbName AND \"T2W_TABLE\" = :tableName"; + query = "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_CATALOG\" = :catName AND \"T2W_DATABASE\" = :dbName AND \"T2W_TABLE\" = :tableName"; if (LOG.isDebugEnabled()) { LOG.debug("Going to execute query {}", query); } jdbcResource.getJdbcTemplate().update(query, new MapSqlParameterSource() + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tblName)); } } // Map the newly allocated write ids against the list of txns which doesn't have pre-allocated write ids - jdbcResource.execute(new AddWriteIdsToTxnToWriteIdCommand(dbName, tblName, writeId, txnIds, txnToWriteIds)); + jdbcResource.execute(new AddWriteIdsToTxnToWriteIdCommand(catName, dbName, tblName, writeId, txnIds, txnToWriteIds)); if (transactionalListeners != null) { MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, EventMessage.EventType.ALLOC_WRITE_ID, - new AllocWriteIdEvent(txnToWriteIds, dbName, tblName), + new AllocWriteIdEvent(txnToWriteIds, catName, dbName, tblName), dbConn, jdbcResource.getSqlGenerator()); } - LOG.info("Allocated write ids for dbName={}, tblName={} (txnIds: {})", dbName, tblName, rqst.getTxnIds()); + LOG.info("Allocated write ids for catName={}, dbName={}, tblName={} (txnIds: {})", catName, dbName, tblName, + rqst.getTxnIds()); return new AllocateTableWriteIdsResponse(txnToWriteIds); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CheckLockFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CheckLockFunction.java index acc996130418..ccd88406792e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CheckLockFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CheckLockFunction.java @@ -105,19 +105,20 @@ public LockResponse execute(MultiDataSourceJdbcResource jdbcResource) throws Met } - Object[] args = new Object[writeSet.size() * 4 + 1]; + Object[] args = new Object[writeSet.size() * 5 + 1]; int index = 0; args[index++] = writeSet.get(0).getTxnId(); - StringBuilder sb = new StringBuilder(" \"WS_DATABASE\", \"WS_TABLE\", \"WS_PARTITION\", " + + StringBuilder sb = new StringBuilder(" \"WS_CATALOG\", \"WS_DATABASE\", \"WS_TABLE\", \"WS_PARTITION\", " + "\"WS_TXNID\", \"WS_COMMIT_ID\" " + "FROM \"WRITE_SET\" WHERE WS_COMMIT_ID >= ? AND (");//see commitTxn() for more info on this inequality for (int i = 0; i < writeSet.size(); i++) { - sb.append("(\"WS_DATABASE\" = ? AND \"WS_TABLE\" = ? AND (\"WS_PARTITION\" = ? OR ? IS NULL)"); + sb.append("(\"WS_CATALOG\" = ? AND \"WS_DATABASE\" = ? AND \"WS_TABLE\" = ? AND (\"WS_PARTITION\" = ? OR ? IS NULL)"); if (i < writeSet.size() - 1) { sb.append(" OR "); } sb.append(")"); LockInfo info = writeSet.get(i); + args[index++] = info.getCat(); args[index++] = info.getDb(); args[index++] = info.getTable(); args[index++] = info.getPartition(); @@ -128,6 +129,7 @@ public LockResponse execute(MultiDataSourceJdbcResource jdbcResource) throws Met WriteSetInfo info = null; if (rs.next()) { info = new WriteSetInfo(); + info.catalog = rs.getString("WS_CATALOG"); info.database = rs.getString("WS_DATABASE"); info.table = rs.getString("WS_TABLE"); info.partition = rs.getString("WS_PARTITION"); @@ -333,6 +335,7 @@ private void acquire(MultiDataSourceJdbcResource jdbcResource, List lo } static class WriteSetInfo { + String catalog; String database; String table; String partition; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java index 5a6696158a7b..981ca5be351c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java @@ -53,37 +53,44 @@ public class CleanupRecordsFunction implements TransactionalFunction { new LinkedHashMap, String>() {{ put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType), "DELETE FROM \"TXN_COMPONENTS\" WHERE " + + "\"TC_CATALOG\" = :catName AND " + "\"TC_DATABASE\" = :dbName AND " + "(\"TC_TABLE\" = :tableName OR :tableName IS NULL) AND " + "(\"TC_PARTITION\" = :partName OR :partName IS NULL)"); put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType), "DELETE FROM \"COMPLETED_TXN_COMPONENTS\" WHERE " + + "(\"CTC_CATALOG\" = :catName) AND " + "(\"CTC_DATABASE\" = :dbName) AND " + "(\"CTC_TABLE\" = :tableName OR :tableName IS NULL) AND " + "(\"CTC_PARTITION\" = :partName OR :partName IS NULL)"); put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType), "DELETE FROM \"COMPACTION_QUEUE\" WHERE " + + "\"CQ_CATALOG\" = :catName AND " + "\"CQ_DATABASE\" = :dbName AND " + "(\"CQ_TABLE\" = :tableName OR :tableName IS NULL) AND " + "(\"CQ_PARTITION\" = :partName OR :partName IS NULL) AND " + "(\"CQ_TXN_ID\" != :txnId OR :txnId IS NULL)"); put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType), "DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE " + + "\"CC_CATALOG\" = :catName AND " + "\"CC_DATABASE\" = :dbName AND " + "(\"CC_TABLE\" = :tableName OR :tableName IS NULL) AND " + "(\"CC_PARTITION\" = :partName OR :partName IS NULL)"); put((hiveObjectType, keepTxnToWriteIdMetaData) -> HiveObjectType.DATABASE.equals(hiveObjectType) || (HiveObjectType.TABLE.equals(hiveObjectType) && !keepTxnToWriteIdMetaData), "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE " + + "\"T2W_CATALOG\" = :catName AND " + "\"T2W_DATABASE\" = :dbName AND " + "(\"T2W_TABLE\" = :tableName OR :tableName IS NULL)"); put((hiveObjectType, keepTxnToWriteIdMetaData) -> HiveObjectType.DATABASE.equals(hiveObjectType) || HiveObjectType.TABLE.equals(hiveObjectType) && !keepTxnToWriteIdMetaData, "DELETE FROM \"NEXT_WRITE_ID\" WHERE " + + "\"NWI_CATALOG\" = :catName AND " + "\"NWI_DATABASE\" = :dbName AND " + "(\"NWI_TABLE\" = :tableName OR :tableName IS NULL)"); put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType), "DELETE FROM \"COMPACTION_METRICS_CACHE\" WHERE " + + "\"CMC_CATALOG\" = :catName AND " + "\"CMC_DATABASE\" = :dbName AND " + "(\"CMC_TABLE\" = :tableName OR :tableName IS NULL) AND " + "(\"CMC_PARTITION\" = :partName OR :partName IS NULL)"); @@ -121,6 +128,7 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti return null; } paramSources.add(new MapSqlParameterSource() + .addValue("catName", db.getCatalogName().toLowerCase()) .addValue("dbName", db.getName().toLowerCase()) .addValue("tableName", null, Types.VARCHAR) .addValue("partName", null, Types.VARCHAR) @@ -134,6 +142,7 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti return null; } paramSources.add(new MapSqlParameterSource() + .addValue("catName", db.getCatalogName().toLowerCase()) .addValue("dbName", table.getDbName().toLowerCase()) .addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR) .addValue("partName", null, Types.VARCHAR) @@ -152,6 +161,7 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti Partition partition = partitionIterator.next(); partVals = partition.getValues(); paramSources.add(new MapSqlParameterSource() + .addValue("catName", db.getCatalogName().toLowerCase()) .addValue("dbName", table.getDbName().toLowerCase()) .addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR) .addValue("partName", Warehouse.makePartName(partCols, partVals), Types.VARCHAR) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CommitTxnFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CommitTxnFunction.java index 522ce558df5a..025e3d1d7a2d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CommitTxnFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CommitTxnFunction.java @@ -226,9 +226,9 @@ private CommitInfo prepareWriteSetAndCheckConflicts(MultiDataSourceJdbcResource """, OperationType.UPDATE, OperationType.DELETE); String writeSetInsertSql = """ INSERT INTO "WRITE_SET" - ("WS_DATABASE", "WS_TABLE", "WS_PARTITION", "WS_TXNID", "WS_COMMIT_ID", "WS_OPERATION_TYPE") + ("WS_CATALOG", "WS_DATABASE", "WS_TABLE", "WS_PARTITION", "WS_TXNID", "WS_COMMIT_ID", "WS_OPERATION_TYPE") SELECT DISTINCT - "TC_DATABASE", "TC_TABLE", "TC_PARTITION", "TC_TXNID", + "TC_CATALOG", "TC_DATABASE", "TC_TABLE", "TC_PARTITION", "TC_TXNID", :commitId, "TC_OPERATION_TYPE" """; @@ -308,10 +308,10 @@ private WriteSetInfo checkForWriteConflict(MultiDataSourceJdbcResource jdbcResou throws MetaException { String writeConflictQuery = jdbcResource.getSqlGenerator().addLimitClause(1, "\"COMMITTED\".\"WS_TXNID\", \"COMMITTED\".\"WS_COMMIT_ID\", " + - "\"COMMITTED\".\"WS_DATABASE\", \"COMMITTED\".\"WS_TABLE\", \"COMMITTED\".\"WS_PARTITION\", " + + "\"COMMITTED\".\"WS_CATALOG\", \"COMMITTED\".\"WS_DATABASE\", \"COMMITTED\".\"WS_TABLE\", \"COMMITTED\".\"WS_PARTITION\", " + "\"CUR\".\"WS_OPERATION_TYPE\" \"CUR_OP\", \"COMMITTED\".\"WS_OPERATION_TYPE\" \"COMMITTED_OP\" " + "FROM \"WRITE_SET\" \"COMMITTED\" INNER JOIN \"WRITE_SET\" \"CUR\" " + - "ON \"COMMITTED\".\"WS_DATABASE\"=\"CUR\".\"WS_DATABASE\" AND \"COMMITTED\".\"WS_TABLE\"=\"CUR\".\"WS_TABLE\" " + + "ON \"COMMITTED\".\"WS_CATALOG\"=\"CUR\".\"WS_CATALOG\" AND \"COMMITTED\".\"WS_DATABASE\"=\"CUR\".\"WS_DATABASE\" AND \"COMMITTED\".\"WS_TABLE\"=\"CUR\".\"WS_TABLE\" " + //For partitioned table we always track writes at partition level (never at table) //and for non partitioned - always at table level, thus the same table should never //have entries with partition key and w/o @@ -340,7 +340,8 @@ private WriteSetInfo checkForWriteConflict(MultiDataSourceJdbcResource jdbcResou ? new WriteSetInfo( rs.getLong("WS_TXNID"), rs.getLong("WS_COMMIT_ID"), rs.getString("CUR_OP"), rs.getString("COMMITTED_OP"), - rs.getString("WS_DATABASE"), rs.getString("WS_TABLE"), rs.getString("WS_PARTITION") + rs.getString("WS_CATALOG"), rs.getString("WS_DATABASE"), + rs.getString("WS_TABLE"), rs.getString("WS_PARTITION") ) : null); } @@ -386,9 +387,9 @@ private void handleCompletedTxnComponents(MultiDataSourceJdbcResource jdbcResour private void moveTxnComponentsToCompleted(MultiDataSourceJdbcResource jdbcResource, long txnid, char isUpdateDelete) { // Move the record from txn_components into completed_txn_components so that the compactor // knows where to look to compact. - String query = "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" (\"CTC_TXNID\", \"CTC_DATABASE\", " + + String query = "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" (\"CTC_TXNID\", \"CTC_CATALOG\", \"CTC_DATABASE\", " + "\"CTC_TABLE\", \"CTC_PARTITION\", \"CTC_WRITEID\", \"CTC_UPDATE_DELETE\") SELECT \"TC_TXNID\", " + - "\"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_WRITEID\", :flag FROM \"TXN_COMPONENTS\" " + + "\"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_WRITEID\", :flag FROM \"TXN_COMPONENTS\" " + "WHERE \"TC_TXNID\" = :txnid AND \"TC_OPERATION_TYPE\" <> :type"; //we only track compactor activity in TXN_COMPONENTS to handle the case where the //compactor txn aborts - so don't bother copying it to COMPLETED_TXN_COMPONENTS @@ -714,7 +715,7 @@ private record DbEntityParam(long id, String key, String value) { private record WriteSetInfo( long txnId, long commitId, String currOpType, String opType, - String database, String table, String partition) { + String catalog, String database, String table, String partition) { } private record CommitInfo( diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CompactFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CompactFunction.java index 5331dc9562ed..8063eda7ee12 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CompactFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CompactFunction.java @@ -73,7 +73,7 @@ public CompactionResponse execute(MultiDataSourceJdbcResource jdbcResource) thro handle = mutexAPI.acquireLock(TxnStore.MUTEX_KEY.CompactionScheduler.name()); GetValidWriteIdsRequest request = new GetValidWriteIdsRequest( - Collections.singletonList(getFullTableName(rqst.getDbname(), rqst.getTablename()))); + Collections.singletonList(getFullTableName(rqst.getCatName(), rqst.getDbname(), rqst.getTablename()))); final ValidCompactorWriteIdList tblValidWriteIds = TxnUtils.createValidCompactWriteIdList( new GetValidWriteIdsFunction(request, openTxnTimeOutMillis).execute(jdbcResource).getTblValidWriteIds().get(0)); @@ -85,11 +85,12 @@ public CompactionResponse execute(MultiDataSourceJdbcResource jdbcResource) thro Pair existing = npJdbcTemplate.query( "SELECT \"CQ_ID\", \"CQ_STATE\" FROM \"COMPACTION_QUEUE\" WHERE (\"CQ_STATE\" IN(:states) OR" + " (\"CQ_STATE\" = :readyForCleaningState AND \"CQ_HIGHEST_WRITE_ID\" = :highestWriteId)) AND" + - " \"CQ_DATABASE\"= :dbName AND \"CQ_TABLE\"= :tableName AND ((:partition is NULL AND \"CQ_PARTITION\" IS NULL) OR \"CQ_PARTITION\" = :partition)", + " \"CQ_CATALOG\"= :catName AND \"CQ_DATABASE\"= :dbName AND \"CQ_TABLE\"= :tableName AND ((:partition is NULL AND \"CQ_PARTITION\" IS NULL) OR \"CQ_PARTITION\" = :partition)", new MapSqlParameterSource() .addValue("states", Arrays.asList(Character.toString(INITIATED_STATE), Character.toString(WORKING_STATE))) .addValue("readyForCleaningState", READY_FOR_CLEANING, Types.VARCHAR) .addValue("highestWriteId", tblValidWriteIds.getHighWatermark()) + .addValue("catName", rqst.getCatName()) .addValue("dbName", rqst.getDbname()) .addValue("tableName", rqst.getTablename()) .addValue("partition", rqst.getPartitionname(), Types.VARCHAR), @@ -101,8 +102,8 @@ public CompactionResponse execute(MultiDataSourceJdbcResource jdbcResource) thro }); if (existing != null) { String state = CompactionState.fromSqlConst(existing.getValue()).toString(); - LOG.info("Ignoring request to compact {}/{}/{} since it is already {} with id={}", rqst.getDbname(), - rqst.getTablename(), rqst.getPartitionname(), state, existing.getKey()); + LOG.info("Ignoring request to compact {}/{}/{}/{} since it is already {} with id={}", rqst.getCatName(), + rqst.getDbname(), rqst.getTablename(), rqst.getPartitionname(), state, existing.getKey()); CompactionResponse resp = new CompactionResponse(-1, REFUSED_RESPONSE, false); resp.setErrormessage("Compaction is already scheduled with state='" + state + "' and id=" + existing.getKey()); return resp; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetMaterializationInvalidationInfoFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetMaterializationInvalidationInfoFunction.java index 24026b232eea..ac068a35caf8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetMaterializationInvalidationInfoFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetMaterializationInvalidationInfoFunction.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.common.ValidReadTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Materialization; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -122,12 +123,13 @@ public Materialization execute(MultiDataSourceJdbcResource jdbcResource) throws queryCompactionQueue.append("OR"); } String[] names = TxnUtils.getDbTableName(fullyQualifiedName); - assert (names.length == 2); - queryUpdateDelete.append(" (\"CTC_DATABASE\"=? AND \"CTC_TABLE\"=?"); - queryCompletedCompactions.append(" (\"CC_DATABASE\"=? AND \"CC_TABLE\"=?"); - queryCompactionQueue.append(" (\"CQ_DATABASE\"=? AND \"CQ_TABLE\"=?"); + assert (names.length == 3); + queryUpdateDelete.append(" (\"CTC_CATALOG\"=? AND \"CTC_DATABASE\"=? AND \"CTC_TABLE\"=?"); + queryCompletedCompactions.append(" (\"CC_CATALOG\"=? AND \"CC_DATABASE\"=? AND \"CC_TABLE\"=?"); + queryCompactionQueue.append(" (\"CQ_CATALOG\"=? AND \"CQ_DATABASE\"=? AND \"CQ_TABLE\"=?"); params.add(names[0]); params.add(names[1]); + params.add(names[2]); queryUpdateDelete.append(" AND (\"CTC_WRITEID\" > " + tblValidWriteIdList.getHighWatermark()); queryCompletedCompactions.append(" AND (\"CC_HIGHEST_WRITE_ID\" > " + tblValidWriteIdList.getHighWatermark()); queryUpdateDelete.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " : diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsForTableFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsForTableFunction.java index a7f2b64606ba..f3c04bd2e32b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsForTableFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsForTableFunction.java @@ -52,28 +52,30 @@ public GetValidWriteIdsForTableFunction(ValidTxnList validTxnList, String fullTa @Override public TableValidWriteIds execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException { String[] names = TxnUtils.getDbTableName(fullTableName); - assert (names.length == 2); + assert (names.length == 3); // Find the writeId high watermark based upon txnId high watermark. If found, then, need to // traverse through all write Ids less than writeId HWM to make exceptions list. // The writeHWM = min(NEXT_WRITE_ID.nwi_next-1, max(TXN_TO_WRITE_ID.t2w_writeid under txnHwm)) long writeIdHwm = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query( "SELECT MAX(\"T2W_WRITEID\") FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_TXNID\" <= :txnHwm " - + " AND \"T2W_DATABASE\" = :db AND \"T2W_TABLE\" = :table", + + " AND \"T2W_CATALOG\" = :cat AND \"T2W_DATABASE\" = :db AND \"T2W_TABLE\" = :table", new MapSqlParameterSource() .addValue("txnHwm", validTxnList.getHighWatermark()) - .addValue("db", names[0]) - .addValue("table", names[1]), new HwmExtractor())); + .addValue("cat", names[0]) + .addValue("db", names[1]) + .addValue("table", names[2]), new HwmExtractor())); // If no writeIds allocated by txns under txnHwm, then find writeHwm from NEXT_WRITE_ID. if (writeIdHwm <= 0) { // Need to subtract 1 as nwi_next would be the next write id to be allocated but we need highest // allocated write id. writeIdHwm = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query( - "SELECT \"NWI_NEXT\" -1 FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :db AND \"NWI_TABLE\" = :table", + "SELECT \"NWI_NEXT\" -1 FROM \"NEXT_WRITE_ID\" WHERE \"NWI_CATALOG\" = :cat AND \"NWI_DATABASE\" = :db AND \"NWI_TABLE\" = :table", new MapSqlParameterSource() - .addValue("db", names[0]) - .addValue("table", names[1]), new HwmExtractor())); + .addValue("cat", names[0]) + .addValue("db", names[1]) + .addValue("table", names[2]), new HwmExtractor())); } final List invalidWriteIdList = new ArrayList<>(); @@ -88,11 +90,12 @@ public TableValidWriteIds execute(MultiDataSourceJdbcResource jdbcResource) thro // using binary search. jdbcResource.getJdbcTemplate().query( "SELECT \"T2W_TXNID\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_WRITEID\" <= :writeIdHwm" + - " AND \"T2W_DATABASE\" = :db AND \"T2W_TABLE\" = :table ORDER BY \"T2W_WRITEID\" ASC", + " AND \"T2W_CATALOG\" = :cat AND \"T2W_DATABASE\" = :db AND \"T2W_TABLE\" = :table ORDER BY \"T2W_WRITEID\" ASC", new MapSqlParameterSource() .addValue("writeIdHwm", writeIdHwm) - .addValue("db", names[0]) - .addValue("table", names[1]), rs -> { + .addValue("cat", names[0]) + .addValue("db", names[1]) + .addValue("table", names[2]), rs -> { while (rs.next()) { long txnId = rs.getLong(1); long writeId = rs.getLong(2); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsFunction.java index f690c47ea073..bfa4a6e0a7e9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsFunction.java @@ -74,13 +74,14 @@ public GetValidWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcResource private long getTxnId(MultiDataSourceJdbcResource jdbcResource, String fullTableName, Long writeId) throws MetaException { String[] names = TxnUtils.getDbTableName(fullTableName); - assert (names.length == 2); + assert (names.length == 3); Long txnId = jdbcResource.getJdbcTemplate().query( - "SELECT \"T2W_TXNID\" FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = :db AND " + "SELECT \"T2W_TXNID\" FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_CATALOG\" = :cat AND \"T2W_DATABASE\" = :db AND " + "\"T2W_TABLE\" = :table AND \"T2W_WRITEID\" = :writeId", new MapSqlParameterSource() - .addValue("db", names[0]) - .addValue("table", names[1]) + .addValue("cat", names[0]) + .addValue("db", names[1]) + .addValue("table", names[2]) .addValue("writeId", writeId), (ResultSet rs) -> { if(rs.next()) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MarkCleanedFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MarkCleanedFunction.java index 2bd96f53a1da..c46902280a97 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MarkCleanedFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MarkCleanedFunction.java @@ -56,7 +56,7 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti .addValue("succeeded", Character.toString(SUCCEEDED_STATE), Types.CHAR); jdbcTemplate.update(""" INSERT INTO "COMPLETED_COMPACTIONS"( - "CC_ID", "CC_DATABASE", "CC_TABLE", "CC_PARTITION", + "CC_ID", "CC_CATALOG", "CC_DATABASE", "CC_TABLE", "CC_PARTITION", "CC_STATE", "CC_TYPE", "CC_TBLPROPERTIES", "CC_WORKER_ID", "CC_START", "CC_END", "CC_RUN_AS", "CC_HIGHEST_WRITE_ID", "CC_META_INFO", "CC_HADOOP_JOB_ID", "CC_ERROR_MESSAGE", "CC_ENQUEUE_TIME", @@ -64,7 +64,7 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti "CC_NEXT_TXN_ID", "CC_TXN_ID", "CC_COMMIT_TIME", "CC_POOL_NAME", "CC_NUMBER_OF_BUCKETS", "CC_ORDER_BY") SELECT - "CQ_ID", "CQ_DATABASE", "CQ_TABLE", "CQ_PARTITION", + "CQ_ID", "CQ_CATALOG", "CQ_DATABASE", "CQ_TABLE", "CQ_PARTITION", :succeeded, "CQ_TYPE", "CQ_TBLPROPERTIES", "CQ_WORKER_ID", "CQ_START", %s, "CQ_RUN_AS", "CQ_HIGHEST_WRITE_ID", "CQ_META_INFO", "CQ_HADOOP_JOB_ID", "CQ_ERROR_MESSAGE", "CQ_ENQUEUE_TIME", @@ -90,7 +90,7 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti // again but only up to the highest write ID include in this compaction job. //highestWriteId will be NULL in upgrade scenarios String deleteQuery = """ - DELETE FROM "COMPLETED_TXN_COMPONENTS" WHERE "CTC_DATABASE" = :db AND "CTC_TABLE" = :table + DELETE FROM "COMPLETED_TXN_COMPONENTS" WHERE "CTC_CATALOG" = :cat AND "CTC_DATABASE" = :db AND "CTC_TABLE" = :table """; if (info.partName != null) { deleteQuery += """ @@ -103,6 +103,7 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti """; } param = new MapSqlParameterSource() + .addValue("cat", info.catName) .addValue("db", info.dbname) .addValue("table", info.tableName) .addValue("writeId", info.highestWriteId); @@ -134,6 +135,7 @@ private void removeTxnComponents(CompactionInfo info, MultiDataSourceJdbcResourc */ MapSqlParameterSource params = new MapSqlParameterSource() .addValue("state", TxnStatus.ABORTED.getSqlConst(), Types.CHAR) + .addValue("cat", info.catName) .addValue("db", info.dbname) .addValue("table", info.tableName) .addValue("partition", info.partName, Types.VARCHAR); @@ -143,7 +145,7 @@ private void removeTxnComponents(CompactionInfo info, MultiDataSourceJdbcResourc WHERE "TC_TXNID" IN ( SELECT "TXN_ID" FROM "TXNS" WHERE "TXN_STATE" = :state ) - AND "TC_DATABASE" = :db AND "TC_TABLE" = :table + AND "TC_CATALOG" = :cat AND "TC_DATABASE" = :db AND "TC_TABLE" = :table AND (:partition is NULL OR "TC_PARTITION" = :partition) AND "TC_WRITEID" %s """; @@ -177,11 +179,12 @@ private void removeCompactionAndAbortRetryEntries(CompactionInfo info, NamedPara """; if (!info.isAbortedTxnCleanup()) { deleteQuery += """ - OR ("CQ_DATABASE" = :db AND "CQ_TABLE" = :table + OR ("CQ_CATALOG" = :cat AND "CQ_DATABASE" = :db AND "CQ_TABLE" = :table AND (:partition is NULL OR "CQ_PARTITION" = :partition) AND "CQ_TYPE" = :type) """; - params.addValue("db", info.dbname) + params.addValue("cat", info.catName) + .addValue("db", info.dbname) .addValue("table", info.tableName) .addValue("partition", info.partName, Types.VARCHAR) .addValue("type", Character.toString(TxnStore.ABORT_TXN_CLEANUP_TYPE), Types.CHAR); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/NextCompactionFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/NextCompactionFunction.java index f39ba8b3d147..b3d5bdf88fa3 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/NextCompactionFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/NextCompactionFunction.java @@ -66,7 +66,7 @@ public NextCompactionFunction(FindNextCompactRequest request, Timestamp currentD @Override public CompactionInfo execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException { StringBuilder sb = new StringBuilder(); - sb.append("SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + + sb.append("SELECT \"CQ_ID\", \"CQ_CATALOG\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + "\"CQ_TYPE\", \"CQ_WORKER_ID\", \"CQ_WORKER_VERSION\", \"CQ_POOL_NAME\", \"CQ_NUMBER_OF_BUCKETS\", \"CQ_ORDER_BY\", " + "\"CQ_TBLPROPERTIES\" FROM \"COMPACTION_QUEUE\" WHERE \"CQ_STATE\" = :state AND "); boolean hasPoolName = StringUtils.isNotBlank(request.getPoolName()); @@ -91,6 +91,7 @@ public CompactionInfo extractData(ResultSet rs) throws SQLException, DataAccessE while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.id = rs.getLong("CQ_ID"); + info.catName = rs.getString("CQ_CATALOG"); info.dbname = rs.getString("CQ_DATABASE"); info.tableName = rs.getString("CQ_TABLE"); info.partName = rs.getString("CQ_PARTITION"); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PurgeCompactionHistoryFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PurgeCompactionHistoryFunction.java index 1a5888499643..953c53d033b3 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PurgeCompactionHistoryFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PurgeCompactionHistoryFunction.java @@ -57,9 +57,9 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti int refusedRetention = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_REFUSED); /* cc_id is monotonically increasing so for any entity sorts in order of compaction history, thus this query groups by entity and withing group sorts most recent first */ - jdbcTemplate.query("SELECT \"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", " + jdbcTemplate.query("SELECT \"CC_ID\", \"CC_CATALOG\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", " + "\"CC_STATE\" , \"CC_START\", \"CC_TYPE\" " - + "FROM \"COMPLETED_COMPACTIONS\" ORDER BY \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\"," + + + "FROM \"COMPLETED_COMPACTIONS\" ORDER BY \"CC_CATALOG\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\"," + "\"CC_ID\" DESC", rs -> { String lastCompactedEntity = null; RetentionCounters counters = null; @@ -69,10 +69,10 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti */ while (rs.next()) { CompactionInfo ci = new CompactionInfo( - rs.getLong(1), rs.getString(2), rs.getString(3), - rs.getString(4), rs.getString(5).charAt(0)); - ci.start = rs.getLong(6); - ci.type = TxnUtils.dbCompactionType2ThriftType(rs.getString(7).charAt(0)); + rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), + rs.getString(5), rs.getString(6).charAt(0)); + ci.start = rs.getLong(7); + ci.type = TxnUtils.dbCompactionType2ThriftType(rs.getString(8).charAt(0)); if (!ci.getFullPartitionName().equals(lastCompactedEntity)) { lastCompactedEntity = ci.getFullPartitionName(); counters = new RetentionCounters(didNotInitiateRetention, failedRetention, succeededRetention, refusedRetention); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReplTableWriteIdStateFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReplTableWriteIdStateFunction.java index b47cf7fdcfd9..68c70e824b45 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReplTableWriteIdStateFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReplTableWriteIdStateFunction.java @@ -61,6 +61,7 @@ public ReplTableWriteIdStateFunction(ReplTblWriteIdStateRequest rqst, TxnStore.M public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException { long openTxnTimeOutMillis = MetastoreConf.getTimeVar(jdbcResource.getConf(), MetastoreConf.ConfVars.TXN_OPENTXN_TIMEOUT, TimeUnit.MILLISECONDS); + String catName = rqst.getCatName().toLowerCase(); String dbName = rqst.getDbName().toLowerCase(); String tblName = rqst.getTableName().toLowerCase(); ValidWriteIdList validWriteIdList = ValidReaderWriteIdList.fromValue(rqst.getValidWriteIdlist()); @@ -69,16 +70,17 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti // Check if this txn state is already replicated for this given table. If yes, then it is // idempotent case and just return. boolean found = Boolean.TRUE.equals(npjdbcTemplate.query( - "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName", + "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_CATALOG\" = :catName AND \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName", new MapSqlParameterSource() + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tblName), ResultSet::next )); if (found) { - LOG.info("Idempotent flow: WriteId state <{}> is already applied for the table: {}.{}", - validWriteIdList, dbName, tblName); + LOG.info("Idempotent flow: WriteId state <{}> is already applied for the table: {}.{}.{}", + validWriteIdList, catName, dbName, tblName); return null; } @@ -98,18 +100,19 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti // Map each aborted write id with each allocated txn. List params = new ArrayList<>(txnIds.size()); for (int i = 0; i < txnIds.size(); i++) { - params.add(new Object[] {txnIds.get(i), dbName, tblName, abortedWriteIds.get(i)}); + params.add(new Object[] {txnIds.get(i), catName, dbName, tblName, abortedWriteIds.get(i)}); LOG.info("Allocated writeID: {} for txnId: {}", abortedWriteIds.get(i), txnIds.get(i)); } int maxBatchSize = MetastoreConf.getIntVar(jdbcResource.getConf(), MetastoreConf.ConfVars.JDBC_MAX_BATCH_SIZE); jdbcResource.getJdbcTemplate().getJdbcTemplate().batchUpdate( - "INSERT INTO \"TXN_TO_WRITE_ID\" (\"T2W_TXNID\", \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\") VALUES (?, ?, ?, ?)", + "INSERT INTO \"TXN_TO_WRITE_ID\" (\"T2W_TXNID\", \"T2W_CATALOG\", \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\") VALUES (?, ?, ?, ?, ?)", params, maxBatchSize, (PreparedStatement ps, Object[] statementParams) -> { ps.setLong(1, (Long)statementParams[0]); ps.setString(2, statementParams[1].toString()); ps.setString(3, statementParams[2].toString()); - ps.setLong(4, (Long)statementParams[3]); + ps.setString(4, statementParams[3].toString()); + ps.setLong(5, (Long)statementParams[4]); }); // Abort all the allocated txns so that the mapped write ids are referred as aborted ones. @@ -125,17 +128,19 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti // First allocation of write id (hwm+1) should add the table to the next_write_id meta table. npjdbcTemplate.update( - "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (:dbName, :tableName, :nextWriteId)", + "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_CATALOG\", \"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (:catName, :dbName, :tableName, :nextWriteId)", new MapSqlParameterSource() + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tblName) .addValue("nextWriteId", nextWriteId)); - LOG.info("WriteId state <{}> is applied for the table: {}.{}", validWriteIdList, dbName, tblName); + LOG.info("WriteId state <{}> is applied for the table: {}.{}.{}", validWriteIdList, catName, dbName, tblName); // Schedule Major compaction on all the partitions/table to clean aborted data if (numAbortedWrites > 0) { CompactionRequest compactRqst = new CompactionRequest(rqst.getDbName(), rqst.getTableName(), CompactionType.MAJOR); + compactRqst.setCatName(rqst.getCatName()); if (rqst.isSetPartNames()) { for (String partName : rqst.getPartNames()) { compactRqst.setPartitionname(partName); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/TopCompactionMetricsDataPerTypeFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/TopCompactionMetricsDataPerTypeFunction.java index 16cfcfecc679..3b3b039f616b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/TopCompactionMetricsDataPerTypeFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/TopCompactionMetricsDataPerTypeFunction.java @@ -32,7 +32,7 @@ public class TopCompactionMetricsDataPerTypeFunction implements TransactionalFunction> { private static final String NO_SELECT_COMPACTION_METRICS_CACHE_FOR_TYPE_QUERY = - "\"CMC_DATABASE\", \"CMC_TABLE\", \"CMC_PARTITION\", \"CMC_METRIC_VALUE\", \"CMC_VERSION\" FROM " + + "\"CMC_CATALOG\", \"CMC_DATABASE\", \"CMC_TABLE\", \"CMC_PARTITION\", \"CMC_METRIC_VALUE\", \"CMC_VERSION\" FROM " + "\"COMPACTION_METRICS_CACHE\" WHERE \"CMC_METRIC_TYPE\" = :type ORDER BY \"CMC_METRIC_VALUE\" DESC"; private final int limit; @@ -66,12 +66,13 @@ public CompactionMetricsDataMapper(CompactionMetricsData.MetricType type) { @Override public CompactionMetricsData mapRow(ResultSet rs, int rowNum) throws SQLException { return builder - .dbName(rs.getString(1)) - .tblName(rs.getString(2)) - .partitionName(rs.getString(3)) + .catName(rs.getString(1)) + .dbName(rs.getString(2)) + .tblName(rs.getString(3)) + .partitionName(rs.getString(4)) .metricType(type) - .metricValue(rs.getInt(4)) - .version(rs.getInt(5)) + .metricValue(rs.getInt(5)) + .version(rs.getInt(6)) .build(); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdateCompactionMetricsDataFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdateCompactionMetricsDataFunction.java index 1f6ac60a6eb9..91786076166d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdateCompactionMetricsDataFunction.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdateCompactionMetricsDataFunction.java @@ -39,7 +39,8 @@ public UpdateCompactionMetricsDataFunction(CompactionMetricsData data) { @Override public Boolean execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException { CompactionMetricsData prevMetricsData = jdbcResource.execute( - new CompactionMetricsDataHandler(data.getDbName(), data.getTblName(), data.getPartitionName(), data.getMetricType())); + new CompactionMetricsDataHandler(data.getCatName(), data.getDbName(), data.getTblName(), + data.getPartitionName(), data.getMetricType())); boolean updateRes; if (data.getMetricValue() >= data.getThreshold()) { @@ -51,7 +52,7 @@ public Boolean execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExce } else { if (prevMetricsData != null) { int result = jdbcResource.execute(new RemoveCompactionMetricsDataCommand( - data.getDbName(), data.getTblName(), data.getPartitionName(), data.getMetricType())); + data.getCatName(), data.getDbName(), data.getTblName(), data.getPartitionName(), data.getMetricType())); updateRes = result > 0; } else { return true; @@ -63,12 +64,13 @@ public Boolean execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExce private boolean updateCompactionMetricsData(CompactionMetricsData data, CompactionMetricsData prevData, NamedParameterJdbcTemplate jdbcTemplate) { return jdbcTemplate.update( "UPDATE \"COMPACTION_METRICS_CACHE\" SET \"CMC_METRIC_VALUE\" = :value, \"CMC_VERSION\" = :newVersion " + - "WHERE \"CMC_DATABASE\" = :db AND \"CMC_TABLE\" = :table AND \"CMC_METRIC_TYPE\" = :type " + + "WHERE \"CMC_CATALOG\" = :cat AND \"CMC_DATABASE\" = :db AND \"CMC_TABLE\" = :table AND \"CMC_METRIC_TYPE\" = :type " + "AND \"CMC_VERSION\" = :oldVersion AND (:partition IS NULL OR \"CMC_PARTITION\" = :partition)", new MapSqlParameterSource() .addValue("value", data.getMetricValue()) .addValue("oldVersion", prevData.getVersion()) .addValue("newVersion", prevData.getVersion() + 1) + .addValue("cat", data.getCatName()) .addValue("db", data.getDbName()) .addValue("table", data.getTblName()) .addValue("type", data.getMetricType().toString()) @@ -78,9 +80,10 @@ private boolean updateCompactionMetricsData(CompactionMetricsData data, Compacti private boolean createCompactionMetricsData(CompactionMetricsData data, NamedParameterJdbcTemplate jdbcTemplate) { return jdbcTemplate.update( "INSERT INTO \"COMPACTION_METRICS_CACHE\" ( " + - "\"CMC_DATABASE\", \"CMC_TABLE\", \"CMC_PARTITION\", \"CMC_METRIC_TYPE\", \"CMC_METRIC_VALUE\", " + - "\"CMC_VERSION\" ) VALUES (:db, :table, :partition, :type, :value, 1)", + "\"CMC_CATALOG\", \"CMC_DATABASE\", \"CMC_TABLE\", \"CMC_PARTITION\", \"CMC_METRIC_TYPE\", \"CMC_METRIC_VALUE\", " + + "\"CMC_VERSION\" ) VALUES (:cat, :db, :table, :partition, :type, :value, 1)", new MapSqlParameterSource() + .addValue("cat", data.getCatName()) .addValue("db", data.getDbName()) .addValue("table", data.getTblName()) .addValue("partition", data.getPartitionName(), Types.VARCHAR) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortedTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortedTxnHandler.java index f31b2d19d3a6..9b5d1838dfb1 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortedTxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortedTxnHandler.java @@ -43,10 +43,10 @@ public class AbortedTxnHandler implements QueryHandler> { //language=SQL @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { - return databaseProduct.addLimitClause(fetchSize, " \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", " + - "MIN(\"TXN_STARTED\"), COUNT(*) FROM \"TXNS\", \"TXN_COMPONENTS\" " + + return databaseProduct.addLimitClause(fetchSize, " \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", " + + "\"TC_PARTITION\", MIN(\"TXN_STARTED\"), COUNT(*) FROM \"TXNS\", \"TXN_COMPONENTS\" " + " WHERE \"TXN_ID\" = \"TC_TXNID\" AND \"TXN_STATE\" = :state " + - "GROUP BY \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" "); + "GROUP BY \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" "); } @Override @@ -59,13 +59,14 @@ public SqlParameterSource getQueryParameters() { public Set extractData(ResultSet rs) throws SQLException, DataAccessException { Set response = new HashSet<>(); while (rs.next()) { - boolean pastTimeThreshold = checkAbortedTimeThreshold && rs.getLong(4) + abortedTimeThreshold < systemTime; - int numAbortedTxns = rs.getInt(5); + boolean pastTimeThreshold = checkAbortedTimeThreshold && rs.getLong(5) + abortedTimeThreshold < systemTime; + int numAbortedTxns = rs.getInt(6); if (numAbortedTxns > abortedThreshold || pastTimeThreshold) { CompactionInfo candidate = new CompactionInfo(); - candidate.dbname = rs.getString(1); - candidate.tableName = rs.getString(2); - candidate.partName = rs.getString(3); + candidate.catName = rs.getString(1); + candidate.dbname = rs.getString(2); + candidate.tableName = rs.getString(3); + candidate.partName = rs.getString(4); candidate.tooManyAborts = numAbortedTxns > abortedThreshold; candidate.hasOldAbort = pastTimeThreshold; response.add(candidate); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CheckFailedCompactionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CheckFailedCompactionsHandler.java index b6a16cd09d19..699a98d891ae 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CheckFailedCompactionsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CheckFailedCompactionsHandler.java @@ -49,14 +49,15 @@ public CheckFailedCompactionsHandler(Configuration conf, CompactionInfo ci) { @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { return "SELECT \"CC_STATE\", \"CC_ENQUEUE_TIME\" FROM \"COMPLETED_COMPACTIONS\" WHERE " + - "\"CC_DATABASE\" = :dbName AND \"CC_TABLE\" = :tableName AND (:partName IS NULL OR \"CC_PARTITION\" = :partName) " + - "AND \"CC_STATE\" != :state ORDER BY \"CC_ID\" DESC"; + "\"CC_CATALOG\" = :catName AND \"CC_DATABASE\" = :dbName AND \"CC_TABLE\" = :tableName " + + "AND (:partName IS NULL OR \"CC_PARTITION\" = :partName) AND \"CC_STATE\" != :state ORDER BY \"CC_ID\" DESC"; } @Override public SqlParameterSource getQueryParameters() { return new MapSqlParameterSource() .addValue("state", Character.toString(DID_NOT_INITIATE), Types.CHAR) + .addValue("catName", ci.catName) .addValue("dbName", ci.dbname) .addValue("tableName", ci.tableName) .addValue("partName", ci.partName, Types.VARCHAR); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionCandidateHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionCandidateHandler.java index 98158783277e..ab8f8b608bb1 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionCandidateHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionCandidateHandler.java @@ -43,18 +43,18 @@ public class CompactionCandidateHandler implements QueryHandler 0 ? "LEFT JOIN ( " + " SELECT \"C1\".* FROM \"COMPLETED_COMPACTIONS\" \"C1\" " + " INNER JOIN ( " + " SELECT MAX(\"CC_ID\") \"CC_ID\" FROM \"COMPLETED_COMPACTIONS\" " + - " GROUP BY \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\"" + + " GROUP BY \"CC_CATALOG\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\"" + " ) \"C2\" " + " ON \"C1\".\"CC_ID\" = \"C2\".\"CC_ID\" " + " WHERE \"C1\".\"CC_STATE\" IN (:didNotInit, :failed)" + ") \"C\" " + - "ON \"TC\".\"CTC_DATABASE\" = \"C\".\"CC_DATABASE\" AND \"TC\".\"CTC_TABLE\" = \"C\".\"CC_TABLE\" " + + "ON \"TC\".\"CTC_CATALOG\" = \"C\".\"CC_CATALOG\" AND \"TC\".\"CTC_DATABASE\" = \"C\".\"CC_DATABASE\" AND \"TC\".\"CTC_TABLE\" = \"C\".\"CC_TABLE\" " + " AND (\"TC\".\"CTC_PARTITION\" = \"C\".\"CC_PARTITION\" OR (\"TC\".\"CTC_PARTITION\" IS NULL AND \"C\".\"CC_PARTITION\" IS NULL)) " + "WHERE \"C\".\"CC_ID\" IS NOT NULL OR " + databaseProduct.isWithinCheckInterval("\"TC\".\"CTC_TIMESTAMP\"", checkInterval) : "")); } @@ -71,9 +71,10 @@ public Set extractData(ResultSet rs) throws SQLException, DataAc Set response = new HashSet<>(); while (rs.next()) { CompactionInfo candidate = new CompactionInfo(); - candidate.dbname = rs.getString(1); - candidate.tableName = rs.getString(2); - candidate.partName = rs.getString(3); + candidate.catName = rs.getString(1); + candidate.dbname = rs.getString(2); + candidate.tableName = rs.getString(3); + candidate.partName = rs.getString(4); response.add(candidate); } return response; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionMetricsDataHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionMetricsDataHandler.java index 1e112f4db1e6..07afbf271173 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionMetricsDataHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionMetricsDataHandler.java @@ -34,15 +34,18 @@ public class CompactionMetricsDataHandler implements QueryHandler> { private static final String TABLE_SELECT = "SELECT \"COLUMN_NAME\" FROM \"TAB_COL_STATS\" " + "INNER JOIN \"TBLS\" ON \"TAB_COL_STATS\".\"TBL_ID\" = \"TBLS\".\"TBL_ID\" " + "INNER JOIN \"DBS\" ON \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " + - "WHERE \"DBS\".\"NAME\" = :dbName AND \"TBLS\".\"TBL_NAME\" = :tableName"; + "WHERE \"DBS\".\"CTLG_NAME\" = :catName AND \"DBS\".\"NAME\" = :dbName AND \"TBLS\".\"TBL_NAME\" = :tableName"; //language=SQL private static final String PARTITION_SELECT = "SELECT \"COLUMN_NAME\" FROM \"PART_COL_STATS\" " + "INNER JOIN \"PARTITIONS\" ON \"PART_COL_STATS\".\"PART_ID\" = \"PARTITIONS\".\"PART_ID\" " + "INNER JOIN \"TBLS\" ON \"PARTITIONS\".\"TBL_ID\" = \"TBLS\".\"TBL_ID\" " + "INNER JOIN \"DBS\" ON \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" " + - "WHERE \"DBS\".\"NAME\" = :dbName AND \"TBLS\".\"TBL_NAME\" = :tableName AND \"PARTITIONS\".\"PART_NAME\" = :partName"; + "WHERE \"DBS\".\"CTLG_NAME\" = :catName AND \"DBS\".\"NAME\" = :dbName AND \"TBLS\".\"TBL_NAME\" = :tableName AND \"PARTITIONS\".\"PART_NAME\" = :partName"; private final CompactionInfo ci; @@ -61,6 +61,7 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) { @Override public SqlParameterSource getQueryParameters() { MapSqlParameterSource params = new MapSqlParameterSource() + .addValue("catName", ci.catName) .addValue("dbName", ci.dbname) .addValue("tableName", ci.tableName); if (ci.partName != null) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetCompactionInfoHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetCompactionInfoHandler.java index 7da6d9d377e0..9ad55168dcf8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetCompactionInfoHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetCompactionInfoHandler.java @@ -36,7 +36,7 @@ public class GetCompactionInfoHandler implements QueryHandler { // language=SQL public static final String SELECT_BY_ID = - "SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + "SELECT \"CQ_ID\", \"CQ_CATALOG\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + "\"CQ_STATE\", \"CQ_TYPE\", \"CQ_TBLPROPERTIES\", \"CQ_WORKER_ID\", \"CQ_START\", \"CQ_RUN_AS\", " + "\"CQ_HIGHEST_WRITE_ID\", \"CQ_META_INFO\", \"CQ_HADOOP_JOB_ID\", \"CQ_ERROR_MESSAGE\", " + "\"CQ_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\", \"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", " @@ -45,7 +45,7 @@ public class GetCompactionInfoHandler implements QueryHandler { // language=SQL public static final String SELECT_BY_TXN_ID = - "SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + "SELECT \"CQ_ID\", \"CQ_CATALOG\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + "\"CQ_STATE\", \"CQ_TYPE\", \"CQ_TBLPROPERTIES\", \"CQ_WORKER_ID\", \"CQ_START\", \"CQ_RUN_AS\", " + "\"CQ_HIGHEST_WRITE_ID\", \"CQ_META_INFO\", \"CQ_HADOOP_JOB_ID\", \"CQ_ERROR_MESSAGE\", " + "\"CQ_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\", \"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", " @@ -72,6 +72,7 @@ public CompactionInfo extractData(ResultSet rs) throws SQLException, DataAccessE if (rs.next()) { CompactionInfo fullCi = new CompactionInfo(); fullCi.id = rs.getLong("CQ_ID"); + fullCi.catName = rs.getString("CQ_CATALOG"); fullCi.dbname = rs.getString("CQ_DATABASE"); fullCi.tableName = rs.getString("CQ_TABLE"); fullCi.partName = rs.getString("CQ_PARTITION"); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLatestCommittedCompactionInfoHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLatestCommittedCompactionInfoHandler.java index 31a0d5da522f..7e83b5b6fd17 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLatestCommittedCompactionInfoHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLatestCommittedCompactionInfoHandler.java @@ -49,14 +49,14 @@ public GetLatestCommittedCompactionInfoHandler(GetLatestCommittedCompactionInfoR public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { return "SELECT * FROM ( " + - "SELECT \"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_TYPE\" FROM \"COMPLETED_COMPACTIONS\" " + + "SELECT \"CC_ID\", \"CC_CATALOG\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_TYPE\" FROM \"COMPLETED_COMPACTIONS\" " + " WHERE \"CC_STATE\" = :succeeded " + "UNION ALL " + - "SELECT \"CQ_ID\" AS \"CC_ID\", \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", " + + "SELECT \"CQ_ID\" AS \"CC_ID\", \"CQ_CATALOG\" AS \"CC_CATALOG\" \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", " + "\"CQ_PARTITION\" AS \"CC_PARTITION\", \"CQ_TYPE\" AS \"CC_TYPE\" FROM \"COMPACTION_QUEUE\" " + " WHERE \"CQ_STATE\" = :readyForCleaning) AS compactions " + "WHERE " + - "\"CC_DATABASE\" = :dbName AND \"CC_TABLE\" = :tableName AND " + + "\"CC_CATALOG\" = :catName AND \"CC_DATABASE\" = :dbName AND \"CC_TABLE\" = :tableName AND " + "(\"CC_PARTITION\" IN (:partitionNames) OR :emptyPartitionNames = TRUE) AND " + "(\"CC_ID\" > :id OR :id IS NULL) " + "ORDER BY \"CC_ID\" DESC"; @@ -67,6 +67,7 @@ public SqlParameterSource getQueryParameters() { return new MapSqlParameterSource() .addValue("succeeded", CompactionState.SUCCEEDED.getSqlConst(), Types.CHAR) .addValue("readyForCleaning", CompactionState.READY_FOR_CLEANING.getSqlConst(), Types.CHAR) + .addValue("catName", rqst.getCatName()) .addValue("dbName", rqst.getDbname()) .addValue("tableName", rqst.getTablename()) .addValue("emptyPartitionNames", CollectionUtils.isEmpty(rqst.getPartitionnames()), Types.BOOLEAN) @@ -81,13 +82,14 @@ public GetLatestCommittedCompactionInfoResponse extractData(ResultSet rs) throws while (rs.next()) { CompactionInfoStruct lci = new CompactionInfoStruct(); lci.setId(rs.getLong(1)); - lci.setDbname(rs.getString(2)); - lci.setTablename(rs.getString(3)); - String partition = rs.getString(4); + lci.setCatName(rs.getString(2)); + lci.setDbname(rs.getString(3)); + lci.setTablename(rs.getString(4)); + String partition = rs.getString(5); if (!rs.wasNull()) { lci.setPartitionname(partition); } - lci.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0))); + lci.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(6).charAt(0))); // Only put the latest record of each partition into response if (!partitionSet.contains(partition)) { response.addToCompactions(lci); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetMaxAllocatedTableWriteIdHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetMaxAllocatedTableWriteIdHandler.java index 5fcfd8d974a5..a2c3f2ffb93a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetMaxAllocatedTableWriteIdHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetMaxAllocatedTableWriteIdHandler.java @@ -39,12 +39,13 @@ public GetMaxAllocatedTableWriteIdHandler(MaxAllocatedTableWriteIdRequest rqst) @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { - return "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName"; + return "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_CATALOG\" = :catName AND \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName"; } @Override public SqlParameterSource getQueryParameters() { return new MapSqlParameterSource() + .addValue("catName", rqst.getCatName()) .addValue("dbName", rqst.getDbName()) .addValue("tableName", rqst.getTableName()); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsHandler.java index 8ae2023a89a3..35a8ad08a439 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsHandler.java @@ -31,11 +31,11 @@ import java.util.HashMap; import java.util.Map; -public class GetWriteIdsHandler implements QueryHandler, Long>> { +public class GetWriteIdsHandler implements QueryHandler>, Long>> { //language=SQL - private static final String SELECT_WRITE_ID_QUERY = "SELECT \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\" " + + private static final String SELECT_WRITE_ID_QUERY = "SELECT \"T2W_CATALOG\", \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\" " + "FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_TXNID\" = :txnId "; private final LockRequest lockRequest; @@ -49,7 +49,8 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw StringBuilder sb = new StringBuilder(SELECT_WRITE_ID_QUERY); sb.append(" AND ("); for(int i = 0; i< lockRequest.getComponentSize(); i++) { - sb.append("(\"T2W_DATABASE\" = ").append(":db").append(i) + sb.append("(\"T2W_CATALOG\" = ").append(":cat").append(i) + .append("AND \"T2W_DATABASE\" = ").append(":db").append(i) .append(" AND \"T2W_TABLE\" = :table").append(i).append(")"); if(i < lockRequest.getComponentSize() - 1) { sb.append(" OR "); @@ -64,6 +65,7 @@ public SqlParameterSource getQueryParameters() { MapSqlParameterSource params = new MapSqlParameterSource() .addValue("txnId", lockRequest.getTxnid()); for(int i = 0; i< lockRequest.getComponentSize(); i++) { + params.addValue("cat" + i, lockRequest.getComponent().get(i).getCatName()); params.addValue("db" + i, lockRequest.getComponent().get(i).getDbname()); params.addValue("table" + i, lockRequest.getComponent().get(i).getTablename()); } @@ -71,10 +73,10 @@ public SqlParameterSource getQueryParameters() { } @Override - public Map, Long> extractData(ResultSet rs) throws SQLException, DataAccessException { - Map, Long> writeIds = new HashMap<>(); + public Map>, Long> extractData(ResultSet rs) throws SQLException, DataAccessException { + Map>, Long> writeIds = new HashMap<>(); while (rs.next()) { - writeIds.put(Pair.of(rs.getString("T2W_DATABASE"), rs.getString("T2W_TABLE")), rs.getLong("T2W_WRITEID")); + writeIds.put(Pair.of(rs.getString("T2W_CATALOG"), Pair.of(rs.getString("T2W_DATABASE"), rs.getString("T2W_TABLE"))), rs.getLong("T2W_WRITEID")); } return writeIds; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsMappingForTxnIdsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsMappingForTxnIdsHandler.java index 4d680c981ad7..4089b1222f19 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsMappingForTxnIdsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsMappingForTxnIdsHandler.java @@ -42,7 +42,7 @@ public GetWriteIdsMappingForTxnIdsHandler(Set txnIds) { @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { - return "SELECT DISTINCT \"T2W_TXNID\", \"T2W_DATABASE\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" \"COMMITTED\" WHERE \"T2W_TXNID\" IN (:txnIds)"; + return "SELECT DISTINCT \"T2W_TXNID\", \"T2W_CATALOG\", \"T2W_DATABASE\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" \"COMMITTED\" WHERE \"T2W_TXNID\" IN (:txnIds)"; } @Override @@ -54,7 +54,7 @@ public SqlParameterSource getQueryParameters() { public List extractData(ResultSet rs) throws SQLException, DataAccessException { List dbsUpdated = new ArrayList<>(); while (rs.next()) { - TxnWriteDetails entry = new TxnWriteDetails(rs.getLong(1), rs.getString(2), rs.getLong(3)); + TxnWriteDetails entry = new TxnWriteDetails(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getLong(4)); dbsUpdated.add(entry); } return dbsUpdated; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/LatestTxnIdInConflictHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/LatestTxnIdInConflictHandler.java index ccbe0c512da4..13308eb23988 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/LatestTxnIdInConflictHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/LatestTxnIdInConflictHandler.java @@ -51,12 +51,13 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw " SELECT MAX(\"COMMITTED\".\"WS_TXNID\")" + " FROM \"WRITE_SET\" \"COMMITTED\"" + " INNER JOIN (" + - " SELECT DISTINCT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_TXNID\"" + + " SELECT DISTINCT \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_TXNID\"" + " FROM \"TXN_COMPONENTS\"" + " WHERE \"TC_TXNID\" = :txnId" + " AND \"TC_OPERATION_TYPE\" IN (:types)" + " ) \"CUR\"" + - " ON \"COMMITTED\".\"WS_DATABASE\" = \"CUR\".\"TC_DATABASE\"" + + " ON \"COMMITTED\".\"WS_CATALOG\" = \"CUR\".\"TC_CATALOG\"" + + " AND \"COMMITTED\".\"WS_DATABASE\" = \"CUR\".\"TC_DATABASE\"" + " AND \"COMMITTED\".\"WS_TABLE\" = \"CUR\".\"TC_TABLE\"" + // For partitioned table we always track writes at partition level (never at table) // and for non partitioned - always at table level, thus the same table should never diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanAbortHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanAbortHandler.java index ae88852ebef9..1857d82bd88b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanAbortHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanAbortHandler.java @@ -44,35 +44,37 @@ public class ReadyToCleanAbortHandler implements QueryHandler extractData(ResultSet rs) throws DataAccessException int numAbortedTxns = rs.getInt("ABORTED_TXN_COUNT"); if (numAbortedTxns > abortedThreshold || pastTimeThreshold) { CompactionInfo info = new CompactionInfo(); + info.catName = rs.getString("CAT"); info.dbname = rs.getString("DB"); info.tableName = rs.getString("TBL"); info.partName = rs.getString("PART"); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanHandler.java index 1e1ea51420c9..a3376b99ac21 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanHandler.java @@ -60,7 +60,7 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw " AND (\"CQ_COMMIT_TIME\" < (" + getEpochFn(databaseProduct) + " - \"CQ_RETRY_RETENTION\" - " + retentionTime + ") OR \"CQ_COMMIT_TIME\" IS NULL)"; String queryStr = - " \"CQ_ID\", \"cq1\".\"CQ_DATABASE\", \"cq1\".\"CQ_TABLE\", \"cq1\".\"CQ_PARTITION\"," + + " \"CQ_ID\", \"cq1\".\"CQ_CATALOG\", \"cq1\".\"CQ_DATABASE\", \"cq1\".\"CQ_TABLE\", \"cq1\".\"CQ_PARTITION\"," + " \"CQ_TYPE\", \"CQ_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\", \"CQ_TBLPROPERTIES\", \"CQ_RETRY_RETENTION\", " + " \"CQ_NEXT_TXN_ID\""; if (TxnHandler.ConfVars.useMinHistoryWriteId()) { @@ -69,11 +69,12 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw queryStr += " FROM \"COMPACTION_QUEUE\" \"cq1\" " + "INNER JOIN (" + - " SELECT MIN(\"CQ_HIGHEST_WRITE_ID\") \"MIN_WRITE_ID_HWM\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\"" + + " SELECT MIN(\"CQ_HIGHEST_WRITE_ID\") \"MIN_WRITE_ID_HWM\", \"CQ_CATALOG\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\"" + " FROM \"COMPACTION_QUEUE\"" + whereClause + - " GROUP BY \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\") \"cq2\" " + - "ON \"cq1\".\"CQ_DATABASE\" = \"cq2\".\"CQ_DATABASE\""+ + " GROUP BY \"CQ_CATALOG\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\") \"cq2\" " + + "ON \"cq1\".\"CQ_CATALOG\" = \"cq2\".\"CQ_CATALOG\""+ + " AND \"cq1\".\"CQ_DATABASE\" = \"cq2\".\"CQ_DATABASE\""+ " AND \"cq1\".\"CQ_TABLE\" = \"cq2\".\"CQ_TABLE\""+ " AND (\"cq1\".\"CQ_PARTITION\" = \"cq2\".\"CQ_PARTITION\"" + " OR \"cq1\".\"CQ_PARTITION\" IS NULL AND \"cq2\".\"CQ_PARTITION\" IS NULL)" + @@ -82,10 +83,11 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw if (TxnHandler.ConfVars.useMinHistoryWriteId()) { queryStr += "LEFT JOIN (" + - " SELECT MIN(\"MH_WRITEID\") \"MIN_OPEN_WRITE_ID\", \"MH_DATABASE\", \"MH_TABLE\"" + + " SELECT MIN(\"MH_WRITEID\") \"MIN_OPEN_WRITE_ID\", \"MH_CATALOG\", \"MH_DATABASE\", \"MH_TABLE\"" + " FROM \"MIN_HISTORY_WRITE_ID\"" + - " GROUP BY \"MH_DATABASE\", \"MH_TABLE\") \"hwm\" " + - "ON \"cq1\".\"CQ_DATABASE\" = \"hwm\".\"MH_DATABASE\"" + + " GROUP BY \"MH_CATALOG\", \"MH_DATABASE\", \"MH_TABLE\") \"hwm\" " + + "ON \"cq1\".\"CQ_CATALOG\" = \"hwm\".\"MH_CATALOG\"" + + " AND \"cq1\".\"CQ_DATABASE\" = \"hwm\".\"MH_DATABASE\"" + " AND \"cq1\".\"CQ_TABLE\" = \"hwm\".\"MH_TABLE\""; whereClause += " AND (\"CQ_HIGHEST_WRITE_ID\" < \"MIN_OPEN_WRITE_ID\"-1 OR \"MIN_OPEN_WRITE_ID\" IS NULL)"; @@ -110,17 +112,18 @@ public List extractData(ResultSet rs) throws SQLException, DataA while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.id = rs.getLong(1); - info.dbname = rs.getString(2); - info.tableName = rs.getString(3); - info.partName = rs.getString(4); - info.type = TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0)); - info.runAs = rs.getString(6); - info.highestWriteId = rs.getLong(7); - info.properties = rs.getString(8); - info.retryRetention = rs.getInt(9); - info.nextTxnId = rs.getLong(10); + info.catName = rs.getString(2); + info.dbname = rs.getString(3); + info.tableName = rs.getString(4); + info.partName = rs.getString(5); + info.type = TxnUtils.dbCompactionType2ThriftType(rs.getString(6).charAt(0)); + info.runAs = rs.getString(7); + info.highestWriteId = rs.getLong(8); + info.properties = rs.getString(9); + info.retryRetention = rs.getInt(10); + info.nextTxnId = rs.getLong(11); if (TxnHandler.ConfVars.useMinHistoryWriteId()) { - long value = rs.getLong(11); + long value = rs.getLong(12); info.minOpenWriteId = !rs.wasNull() ? value : Long.MAX_VALUE; } infos.add(info); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowCompactHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowCompactHandler.java index d8d2c8da473b..d98733ca3c28 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowCompactHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowCompactHandler.java @@ -46,7 +46,7 @@ public class ShowCompactHandler implements QueryHandler { //language=SQL private static final String SHOW_COMPACTION_QUERY = " XX.* FROM ( SELECT " + - " \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\", " + + " \"CQ_CATALOG\" AS \"CC_CATALOG\", \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\", " + " \"CQ_STATE\" AS \"CC_STATE\", \"CQ_TYPE\" AS \"CC_TYPE\", \"CQ_WORKER_ID\" AS \"CC_WORKER_ID\", " + " \"CQ_START\" AS \"CC_START\", -1 \"CC_END\", \"CQ_RUN_AS\" AS \"CC_RUN_AS\", " + " \"CQ_HADOOP_JOB_ID\" AS \"CC_HADOOP_JOB_ID\", \"CQ_ID\" AS \"CC_ID\", \"CQ_ERROR_MESSAGE\" AS \"CC_ERROR_MESSAGE\", " + @@ -59,7 +59,7 @@ public class ShowCompactHandler implements QueryHandler { " \"COMPACTION_QUEUE\" " + "UNION ALL " + "SELECT " + - " \"CC_DATABASE\" , \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", \"CC_WORKER_ID\", " + + " \"CC_CATALOG\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", \"CC_WORKER_ID\", " + " \"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", \"CC_ID\", \"CC_ERROR_MESSAGE\", " + " \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\", " + " -1 , \"CC_POOL_NAME\", \"CC_TXN_ID\", \"CC_NEXT_TXN_ID\", \"CC_COMMIT_TIME\", " + @@ -68,6 +68,7 @@ public class ShowCompactHandler implements QueryHandler { " \"COMPLETED_COMPACTIONS\" ) XX " + "WHERE " + " (\"CC_ID\" = :id OR :id IS NULL) AND " + + " (\"CC_CATALOG\" = :catName OR :catName IS NULL) AND " + " (\"CC_DATABASE\" = :dbName OR :dbName IS NULL) AND " + " (\"CC_TABLE\" = :tableName OR :tableName IS NULL) AND " + " (\"CC_PARTITION\" = :partition OR :partition IS NULL) AND " + @@ -113,6 +114,7 @@ public SqlParameterSource getQueryParameters() { try { return new MapSqlParameterSource() .addValue("id", id, Types.BIGINT) + .addValue("catName", request.getCatName(), Types.VARCHAR) .addValue("dbName", request.getDbName(), Types.VARCHAR) .addValue("tableName", request.getTbName(), Types.VARCHAR) .addValue("partition", request.getPartName(), Types.VARCHAR) @@ -129,49 +131,50 @@ public ShowCompactResponse extractData(ResultSet rs) throws SQLException, DataAc ShowCompactResponse response = new ShowCompactResponse(new ArrayList<>()); while (rs.next()) { ShowCompactResponseElement e = new ShowCompactResponseElement(); - e.setDbname(rs.getString(1)); - e.setTablename(rs.getString(2)); - e.setPartitionname(rs.getString(3)); - e.setState(CompactionState.fromSqlConst(rs.getString(4)).toString()); + e.setCatName(rs.getString(1)); + e.setDbname(rs.getString(2)); + e.setTablename(rs.getString(3)); + e.setPartitionname(rs.getString(4)); + e.setState(CompactionState.fromSqlConst(rs.getString(5)).toString()); try { - e.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0))); + e.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(6).charAt(0))); } catch (SQLException ex) { //do nothing to handle RU/D if we add another status } - e.setWorkerid(rs.getString(6)); - long start = rs.getLong(7); + e.setWorkerid(rs.getString(7)); + long start = rs.getLong(8); if (!rs.wasNull()) { e.setStart(start); } - long endTime = rs.getLong(8); + long endTime = rs.getLong(9); if (endTime != -1) { e.setEndTime(endTime); } - e.setRunAs(rs.getString(9)); - e.setHadoopJobId(rs.getString(10)); - e.setId(rs.getLong(11)); - e.setErrorMessage(rs.getString(12)); - long enqueueTime = rs.getLong(13); + e.setRunAs(rs.getString(10)); + e.setHadoopJobId(rs.getString(11)); + e.setId(rs.getLong(12)); + e.setErrorMessage(rs.getString(13)); + long enqueueTime = rs.getLong(14); if (!rs.wasNull()) { e.setEnqueueTime(enqueueTime); } - e.setWorkerVersion(rs.getString(14)); - e.setInitiatorId(rs.getString(15)); - e.setInitiatorVersion(rs.getString(16)); - long cleanerStart = rs.getLong(17); + e.setWorkerVersion(rs.getString(15)); + e.setInitiatorId(rs.getString(16)); + e.setInitiatorVersion(rs.getString(17)); + long cleanerStart = rs.getLong(18); if (!rs.wasNull() && (cleanerStart != -1)) { e.setCleanerStart(cleanerStart); } - String poolName = rs.getString(18); + String poolName = rs.getString(19); if (isBlank(poolName)) { e.setPoolName(DEFAULT_POOL_NAME); } else { e.setPoolName(poolName); } - e.setTxnId(rs.getLong(19)); - e.setNextTxnId(rs.getLong(20)); - e.setCommitTime(rs.getLong(21)); - e.setHightestTxnId(rs.getLong(22)); + e.setTxnId(rs.getLong(20)); + e.setNextTxnId(rs.getLong(21)); + e.setCommitTime(rs.getLong(22)); + e.setHightestTxnId(rs.getLong(23)); response.addToCompacts(e); } return response; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TablesWithAbortedTxnsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TablesWithAbortedTxnsHandler.java index 16b768dc34bb..dc902f8f586d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TablesWithAbortedTxnsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TablesWithAbortedTxnsHandler.java @@ -35,9 +35,9 @@ public class TablesWithAbortedTxnsHandler implements QueryHandler> { //language=SQL private static final String SELECT_TABLES_WITH_X_ABORTED_TXNS = - "SELECT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" FROM \"TXN_COMPONENTS\" " + + "SELECT \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" FROM \"TXN_COMPONENTS\" " + "INNER JOIN \"TXNS\" ON \"TC_TXNID\" = \"TXN_ID\" WHERE \"TXN_STATE\" = :abortedState " + - "GROUP BY \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" HAVING COUNT(\"TXN_ID\") > :txnThreshold"; + "GROUP BY \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" HAVING COUNT(\"TXN_ID\") > :txnThreshold"; private final int txnThreshold; @@ -61,8 +61,8 @@ public SqlParameterSource getQueryParameters() { public Set extractData(ResultSet rs) throws SQLException, DataAccessException { Set resourceNames = new TreeSet<>(); while (rs.next()) { - String resourceName = rs.getString(1) + "." + rs.getString(2); - String partName = rs.getString(3); + String resourceName = rs.getString(1) + "." + rs.getString(2) + "." + rs.getString(3); + String partName = rs.getString(4); resourceName = partName != null ? resourceName + "#" + partName : resourceName; resourceNames.add(resourceName); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TxnIdForWriteIdHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TxnIdForWriteIdHandler.java index 9fdb3465d724..3c48e8432684 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TxnIdForWriteIdHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TxnIdForWriteIdHandler.java @@ -30,11 +30,13 @@ public class TxnIdForWriteIdHandler implements QueryHandler { private final long writeId; + private final String catName; private final String dbName; private final String tableName; - public TxnIdForWriteIdHandler(long writeId, String dbName, String tableName) { + public TxnIdForWriteIdHandler(long writeId, String catName, String dbName, String tableName) { this.writeId = writeId; + this.catName = catName; this.dbName = dbName; this.tableName = tableName; } @@ -42,13 +44,14 @@ public TxnIdForWriteIdHandler(long writeId, String dbName, String tableName) { @Override public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { return "SELECT \"T2W_TXNID\" FROM \"TXN_TO_WRITE_ID\" WHERE" - + " \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND \"T2W_WRITEID\" = " + writeId; + + " \"T2W_CATALOG\" = ? AND \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND \"T2W_WRITEID\" = " + writeId; } @Override public SqlParameterSource getQueryParameters() { return new MapSqlParameterSource() .addValue("writeId", writeId) + .addValue("catName", catName) .addValue("dbName", dbName) .addValue("tableName", tableName); } diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.3.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.3.0.derby.sql index 39d17cc88be7..467b1349867c 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.3.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.3.0.derby.sql @@ -537,6 +537,7 @@ INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG varchar(128) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(256), TC_PARTITION varchar(767), @@ -548,6 +549,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(128) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -556,7 +558,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_UPDATE_DELETE char(1) NOT NULL ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE TXN_LOCK_TBL ( TXN_LOCK bigint NOT NULL @@ -593,6 +595,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(128) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(256) NOT NULL, CQ_PARTITION varchar(767), @@ -627,6 +630,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(128) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(256) NOT NULL, CC_PARTITION varchar(767), @@ -653,10 +657,11 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_ORDER_BY varchar(4000) ); -CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION); -- HIVE-25842 CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_CATALOG varchar(128) NOT NULL, CMC_DATABASE varchar(128) NOT NULL, CMC_TABLE varchar(256) NOT NULL, CMC_PARTITION varchar(767), @@ -672,9 +677,10 @@ CREATE TABLE AUX_TABLE ( PRIMARY KEY(MT_KEY1, MT_KEY2) ); ---1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK +--1st 5 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK --This is a good candidate for Index orgainzed table CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(128) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(256) NOT NULL, WS_PARTITION varchar(767), @@ -685,30 +691,33 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(128) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(128) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_WRITE_ID ( MH_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + MH_CATALOG varchar(128) NOT NULL, MH_DATABASE varchar(128) NOT NULL, MH_TABLE varchar(256) NOT NULL, MH_WRITEID bigint NOT NULL ); -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, @@ -775,6 +784,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_ID bigint NOT NULL, WNL_TXNID bigint NOT NULL, WNL_WRITEID bigint NOT NULL, + WNL_CATALOG varchar(128) NOT NULL, WNL_DATABASE varchar(128) NOT NULL, WNL_TABLE varchar(256) NOT NULL, WNL_PARTITION varchar(767) NOT NULL, @@ -782,7 +792,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_PARTITION_OBJ clob, WNL_FILES clob, WNL_EVENT_TIME integer NOT NULL, - PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION) + PRIMARY KEY (WNL_TXNID, WNL_CATALOG, WNL_DATABASE, WNL_TABLE, WNL_PARTITION) ); INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.2.0-to-4.3.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.2.0-to-4.3.0.derby.sql index 4aa072134523..7549f07a937b 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.2.0-to-4.3.0.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.2.0-to-4.3.0.derby.sql @@ -1,7 +1,30 @@ +DROP INDEX COMPLETED_TXN_COMPONENTS_IDX; +DROP INDEX COMPLETED_COMPACTIONS_RES; +DROP INDEX TBL_TO_TXN_ID_IDX; +DROP INDEX TBL_TO_WRITE_ID_IDX; +DROP INDEX NEXT_WRITE_ID_IDX; + ALTER TABLE HIVE_LOCKS ADD COLUMN HL_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; ALTER TABLE MATERIALIZATION_REBUILD_LOCKS ADD COLUMN MRL_CAT_NAME varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_COMPONENTS ADD COLUMN TC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD COLUMN CTC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPACTION_QUEUE ADD COLUMN CQ_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS ADD COLUMN CC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPACTION_METRICS_CACHE ADD COLUMN CMC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE WRITE_SET ADD COLUMN WS_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_TO_WRITE_ID ADD COLUMN T2W_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE NEXT_WRITE_ID ADD COLUMN NWI_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE MIN_HISTORY_WRITE_ID ADD COLUMN MH_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD COLUMN WNL_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG DROP PRIMARY KEY; +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD PRIMARY KEY (WNL_TXNID, WNL_CATALOG, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE,CC_TABLE,CC_PARTITION); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='4.3.0', VERSION_COMMENT='Hive release version 4.3.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/hive/hive-schema-4.3.0.hive.sql b/standalone-metastore/metastore-server/src/main/sql/hive/hive-schema-4.3.0.hive.sql index 4a4ed9aeec1a..1f9c7811ac8b 100644 --- a/standalone-metastore/metastore-server/src/main/sql/hive/hive-schema-4.3.0.hive.sql +++ b/standalone-metastore/metastore-server/src/main/sql/hive/hive-schema-4.3.0.hive.sql @@ -81,6 +81,7 @@ CREATE EXTERNAL TABLE IF NOT EXISTS `DBS` ( `DB_ID` bigint, `DB_LOCATION_URI` string, `NAME` string, + `CTLG_NAME` string, `OWNER_NAME` string, `OWNER_TYPE` string, CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE @@ -93,6 +94,7 @@ TBLPROPERTIES ( \"DB_ID\", \"DB_LOCATION_URI\", \"NAME\", + \"CTLG_NAME\", \"OWNER_NAME\", \"OWNER_TYPE\" FROM @@ -1067,6 +1069,7 @@ LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_I CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` ( `CQ_ID` bigint, + `CQ_CATALOG` string, `CQ_DATABASE` string, `CQ_TABLE` string, `CQ_PARTITION` string, @@ -1096,6 +1099,7 @@ TBLPROPERTIES ( "hive.sql.query" = "SELECT \"COMPACTION_QUEUE\".\"CQ_ID\", + \"COMPACTION_QUEUE\".\"CQ_CATALOG\", \"COMPACTION_QUEUE\".\"CQ_DATABASE\", \"COMPACTION_QUEUE\".\"CQ_TABLE\", \"COMPACTION_QUEUE\".\"CQ_PARTITION\", @@ -1124,6 +1128,7 @@ FROM \"COMPACTION_QUEUE\" CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` ( `CC_ID` bigint, + `CC_CATALOG` string, `CC_DATABASE` string, `CC_TABLE` string, `CC_PARTITION` string, @@ -1153,6 +1158,7 @@ TBLPROPERTIES ( "hive.sql.query" = "SELECT \"COMPLETED_COMPACTIONS\".\"CC_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_CATALOG\", \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\", \"COMPLETED_COMPACTIONS\".\"CC_TABLE\", \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\", @@ -1211,7 +1217,7 @@ CREATE OR REPLACE VIEW `COMPACTIONS` ) AS SELECT CC_ID, - 'default', + CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION, @@ -1242,7 +1248,7 @@ FROM COMPLETED_COMPACTIONS UNION ALL SELECT CQ_ID, - 'default', + CQ_CATALOG, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, @@ -1361,6 +1367,7 @@ FROM \"TXNS\"" CREATE EXTERNAL TABLE IF NOT EXISTS `TXN_COMPONENTS` ( `TC_TXNID` bigint, + `TC_CATALOG` string, `TC_DATABASE` string, `TC_TABLE` string, `TC_PARTITION` string, @@ -1373,6 +1380,7 @@ TBLPROPERTIES ( "hive.sql.query" = "SELECT \"TC_TXNID\", + \"TC_CATALOG\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", @@ -1393,6 +1401,7 @@ CREATE OR REPLACE VIEW `TRANSACTIONS` ( `META_INFO`, `HEARTBEAT_COUNT`, `TYPE`, + `TC_CATALOG`, `TC_DATABASE`, `TC_TABLE`, `TC_PARTITION`, @@ -1410,6 +1419,7 @@ SELECT DISTINCT T.`TXN_META_INFO`, T.`TXN_HEARTBEAT_COUNT`, CASE WHEN T.`TXN_TYPE` = 0 THEN 'DEFAULT' WHEN T.`TXN_TYPE` = 1 THEN 'REPL_CREATED' WHEN T.`TXN_TYPE` = 2 THEN 'READ_ONLY' WHEN T.`TXN_TYPE` = 3 THEN 'COMPACTION' END AS TXN_TYPE, + TC.`TC_CATALOG`, TC.`TC_DATABASE`, TC.`TC_TABLE`, TC.`TC_PARTITION`, @@ -1664,7 +1674,7 @@ CREATE OR REPLACE VIEW `SCHEMATA` `SQL_PATH` ) AS SELECT DISTINCT - 'default', + D.`CTLG_NAME`, D.`NAME`, D.`OWNER_NAME`, cast(null as string), @@ -1696,7 +1706,7 @@ CREATE OR REPLACE VIEW `TABLES` `COMMIT_ACTION` ) AS SELECT DISTINCT - 'default', + D.CTLG_NAME, D.NAME, T.TBL_NAME, IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), @@ -1731,7 +1741,7 @@ CREATE OR REPLACE VIEW `TABLE_PRIVILEGES` SELECT DISTINCT P.`GRANTOR`, P.`PRINCIPAL_NAME`, - 'default', + D.`CTLG_NAME`, D.`NAME`, T.`TBL_NAME`, P.`TBL_PRIV`, @@ -1799,7 +1809,7 @@ CREATE OR REPLACE VIEW `COLUMNS` `DECLARED_NUMERIC_SCALE` ) AS SELECT DISTINCT - 'default', + D.CTLG_NAME, D.NAME, T.TBL_NAME, C.COLUMN_NAME, @@ -1913,7 +1923,7 @@ CREATE OR REPLACE VIEW `COLUMN_PRIVILEGES` SELECT DISTINCT P.`GRANTOR`, P.`PRINCIPAL_NAME`, - 'default', + D.`CTLG_NAME`, D.`NAME`, T.`TBL_NAME`, P.`COLUMN_NAME`, @@ -1956,7 +1966,7 @@ CREATE OR REPLACE VIEW `VIEWS` `IS_TRIGGER_INSERTABLE_INTO` ) AS SELECT DISTINCT - 'default', + D.CTLG_NAME, D.NAME, T.TBL_NAME, T.VIEW_ORIGINAL_TEXT, @@ -2036,7 +2046,7 @@ SELECT DISTINCT C_TBLPROPERTIES FROM `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`) - JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) + JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) AND (C.`C_CATALOG` = D.`CTLG_NAME`) LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) WHERE (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL @@ -2090,6 +2100,7 @@ CREATE OR REPLACE VIEW `TRANSACTIONS` ( `META_INFO`, `HEARTBEAT_COUNT`, `TYPE`, + `TC_CATALOG`, `TC_DATABASE`, `TC_TABLE`, `TC_PARTITION`, @@ -2107,13 +2118,14 @@ SELECT DISTINCT `META_INFO`, `HEARTBEAT_COUNT`, `TYPE`, + `TC_CATALOG`, `TC_DATABASE`, `TC_TABLE`, `TC_PARTITION`, `TC_OPERATION_TYPE`, `TC_WRITEID` FROM `SYS`.`TRANSACTIONS` AS TXN JOIN `sys`.`TBLS` T ON (TXN.`TC_TABLE` = T.`TBL_NAME`) - JOIN `sys`.`DBS` D ON (TXN.`TC_DATABASE` = D.`NAME`) + JOIN `sys`.`DBS` D ON (TXN.`TC_DATABASE` = D.`NAME`) AND (TXN.`TC_CATALOG` = D.`CTLG_NAME`) LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) WHERE (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL @@ -2159,7 +2171,7 @@ SELECT DISTINCT `BLOCKEDBY_EXT_ID`, `BLOCKEDBY_INT_ID` FROM SYS.`LOCKS` AS L JOIN `sys`.`TBLS` T ON (L.`TABLE` = T.`TBL_NAME`) - JOIN `sys`.`DBS` D ON (L.`DB` = D.`NAME`) + JOIN `sys`.`DBS` D ON (L.`DB` = D.`NAME`) AND (L.`CATALOG` = D.`CTLG_NAME`) LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) WHERE (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL diff --git a/standalone-metastore/metastore-server/src/main/sql/hive/upgrade-4.2.0-to-4.3.0.hive.sql b/standalone-metastore/metastore-server/src/main/sql/hive/upgrade-4.2.0-to-4.3.0.hive.sql index 9769d67b3c79..d74624bf30f7 100644 --- a/standalone-metastore/metastore-server/src/main/sql/hive/upgrade-4.2.0-to-4.3.0.hive.sql +++ b/standalone-metastore/metastore-server/src/main/sql/hive/upgrade-4.2.0-to-4.3.0.hive.sql @@ -1,5 +1,829 @@ SELECT 'Upgrading MetaStore schema from 4.2.0 to 4.3.0'; -ALTER TABLE `HIVE_LOCKS` ADD COLUMNS (`HL_CATALOG` string); +USE SYS; + +DROP TABLE IF EXISTS `DBS`; +CREATE EXTERNAL TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint, + `DB_LOCATION_URI` string, + `NAME` string, + `CTLG_NAME` string, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"DB_LOCATION_URI\", + \"NAME\", + \"CTLG_NAME\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"DBS\"" +); + +DROP TABLE IF EXISTS `TXN_COMPONENTS`; +CREATE EXTERNAL TABLE IF NOT EXISTS `TXN_COMPONENTS` ( + `TC_TXNID` bigint, + `TC_CATALOG` string, + `TC_DATABASE` string, + `TC_TABLE` string, + `TC_PARTITION` string, + `TC_OPERATION_TYPE` string, + `TC_WRITEID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TC_TXNID\", + \"TC_CATALOG\", + \"TC_DATABASE\", + \"TC_TABLE\", + \"TC_PARTITION\", + \"TC_OPERATION_TYPE\", + \"TC_WRITEID\" +FROM \"TXN_COMPONENTS\"" +); + +DROP TABLE IF EXISTS `COMPLETED_COMPACTIONS`; +CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` ( + `CC_ID` bigint, + `CC_CATALOG` string, + `CC_DATABASE` string, + `CC_TABLE` string, + `CC_PARTITION` string, + `CC_STATE` string, + `CC_TYPE` string, + `CC_TBLPROPERTIES` string, + `CC_WORKER_ID` string, + `CC_ENQUEUE_TIME` bigint, + `CC_START` bigint, + `CC_END` bigint, + `CC_RUN_AS` string, + `CC_HIGHEST_WRITE_ID` bigint, + `CC_HADOOP_JOB_ID` string, + `CC_ERROR_MESSAGE` string, + `CC_NEXT_TXN_ID` bigint, + `CC_TXN_ID` bigint, + `CC_COMMIT_TIME` bigint, + `CC_INITIATOR_ID` string, + `CC_INITIATOR_VERSION` string, + `CC_WORKER_VERSION` string, + `CC_POOL_NAME` string, + `CC_NUMBER_OF_BUCKETS` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPLETED_COMPACTIONS\".\"CC_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_CATALOG\", + \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\", + \"COMPLETED_COMPACTIONS\".\"CC_TABLE\", + \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\", + \"COMPLETED_COMPACTIONS\".\"CC_STATE\", + \"COMPLETED_COMPACTIONS\".\"CC_TYPE\", + \"COMPLETED_COMPACTIONS\".\"CC_TBLPROPERTIES\", + \"COMPLETED_COMPACTIONS\".\"CC_WORKER_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_ENQUEUE_TIME\", + \"COMPLETED_COMPACTIONS\".\"CC_START\", + \"COMPLETED_COMPACTIONS\".\"CC_END\", + \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\", + \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_ERROR_MESSAGE\", + \"COMPLETED_COMPACTIONS\".\"CC_NEXT_TXN_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_TXN_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_COMMIT_TIME\", + \"COMPLETED_COMPACTIONS\".\"CC_INITIATOR_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_INITIATOR_VERSION\", + \"COMPLETED_COMPACTIONS\".\"CC_WORKER_VERSION\", + \"COMPLETED_COMPACTIONS\".\"CC_POOL_NAME\", + \"COMPLETED_COMPACTIONS\".\"CC_NUMBER_OF_BUCKETS\" +FROM \"COMPLETED_COMPACTIONS\"" +); + +DROP TABLE IF EXISTS `COMPACTION_QUEUE`; +CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` ( + `CQ_ID` bigint, + `CQ_CATALOG` string, + `CQ_DATABASE` string, + `CQ_TABLE` string, + `CQ_PARTITION` string, + `CQ_STATE` string, + `CQ_TYPE` string, + `CQ_TBLPROPERTIES` string, + `CQ_WORKER_ID` string, + `CQ_ENQUEUE_TIME` bigint, + `CQ_START` bigint, + `CQ_RUN_AS` string, + `CQ_HIGHEST_WRITE_ID` bigint, + `CQ_HADOOP_JOB_ID` string, + `CQ_ERROR_MESSAGE` string, + `CQ_NEXT_TXN_ID` bigint, + `CQ_TXN_ID` bigint, + `CQ_COMMIT_TIME` bigint, + `CQ_INITIATOR_ID` string, + `CQ_INITIATOR_VERSION` string, + `CQ_WORKER_VERSION` string, + `CQ_CLEANER_START` bigint, + `CQ_POOL_NAME` string, + `CQ_NUMBER_OF_BUCKETS` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPACTION_QUEUE\".\"CQ_ID\", + \"COMPACTION_QUEUE\".\"CQ_CATALOG\", + \"COMPACTION_QUEUE\".\"CQ_DATABASE\", + \"COMPACTION_QUEUE\".\"CQ_TABLE\", + \"COMPACTION_QUEUE\".\"CQ_PARTITION\", + \"COMPACTION_QUEUE\".\"CQ_STATE\", + \"COMPACTION_QUEUE\".\"CQ_TYPE\", + \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\", + \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\", + \"COMPACTION_QUEUE\".\"CQ_ENQUEUE_TIME\", + \"COMPACTION_QUEUE\".\"CQ_START\", + \"COMPACTION_QUEUE\".\"CQ_RUN_AS\", + \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\", + \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\", + \"COMPACTION_QUEUE\".\"CQ_ERROR_MESSAGE\", + \"COMPACTION_QUEUE\".\"CQ_NEXT_TXN_ID\", + \"COMPACTION_QUEUE\".\"CQ_TXN_ID\", + \"COMPACTION_QUEUE\".\"CQ_COMMIT_TIME\", + \"COMPACTION_QUEUE\".\"CQ_INITIATOR_ID\", + \"COMPACTION_QUEUE\".\"CQ_INITIATOR_VERSION\", + \"COMPACTION_QUEUE\".\"CQ_WORKER_VERSION\", + \"COMPACTION_QUEUE\".\"CQ_CLEANER_START\", + \"COMPACTION_QUEUE\".\"CQ_POOL_NAME\", + \"COMPACTION_QUEUE\".\"CQ_NUMBER_OF_BUCKETS\" +FROM \"COMPACTION_QUEUE\"" +); + +DROP TABLE IF EXISTS `HIVE_LOCKS`; +CREATE EXTERNAL TABLE `HIVE_LOCKS` ( + `HL_LOCK_EXT_ID` bigint, + `HL_LOCK_INT_ID` bigint, + `HL_TXNID` bigint, + `HL_CATALOG` string, + `HL_DB` string, + `HL_TABLE` string, + `HL_PARTITION` string, + `HL_LOCK_STATE` string, + `HL_LOCK_TYPE` string, + `HL_LAST_HEARTBEAT` bigint, + `HL_ACQUIRED_AT` bigint, + `HL_USER` string, + `HL_HOST` string, + `HL_HEARTBEAT_COUNT` int, + `HL_AGENT_INFO` string, + `HL_BLOCKEDBY_EXT_ID` bigint, + `HL_BLOCKEDBY_INT_ID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"HL_LOCK_EXT_ID\", + \"HL_LOCK_INT_ID\", + \"HL_TXNID\", + \"HL_CATALOG\", + \"HL_DB\", + \"HL_TABLE\", + \"HL_PARTITION\", + \"HL_LOCK_STATE\", + \"HL_LOCK_TYPE\", + \"HL_LAST_HEARTBEAT\", + \"HL_ACQUIRED_AT\", + \"HL_USER\", + \"HL_HOST\", + \"HL_HEARTBEAT_COUNT\", + \"HL_AGENT_INFO\", + \"HL_BLOCKEDBY_EXT_ID\", + \"HL_BLOCKEDBY_INT_ID\" +FROM \"HIVE_LOCKS\"" +); + +CREATE OR REPLACE VIEW `TRANSACTIONS` ( + `TXN_ID`, + `STATE`, + `STARTED`, + `LAST_HEARTBEAT`, + `USER`, + `HOST`, + `AGENT_INFO`, + `META_INFO`, + `HEARTBEAT_COUNT`, + `TYPE`, + `TC_CATALOG`, + `TC_DATABASE`, + `TC_TABLE`, + `TC_PARTITION`, + `TC_OPERATION_TYPE`, + `TC_WRITEID` +) AS +SELECT DISTINCT + T.`TXN_ID`, + CASE WHEN T.`TXN_STATE` = 'o' THEN 'open' WHEN T.`TXN_STATE` = 'a' THEN 'aborted' WHEN T.`TXN_STATE` = 'c' THEN 'commited' ELSE 'UNKNOWN' END AS TXN_STATE, + FROM_UNIXTIME(T.`TXN_STARTED` DIV 1000) AS TXN_STARTED, + FROM_UNIXTIME(T.`TXN_LAST_HEARTBEAT` DIV 1000) AS TXN_LAST_HEARTBEAT, + T.`TXN_USER`, + T.`TXN_HOST`, + T.`TXN_AGENT_INFO`, + T.`TXN_META_INFO`, + T.`TXN_HEARTBEAT_COUNT`, + CASE WHEN T.`TXN_TYPE` = 0 THEN 'DEFAULT' WHEN T.`TXN_TYPE` = 1 THEN 'REPL_CREATED' WHEN T.`TXN_TYPE` = 2 THEN 'READ_ONLY' WHEN T.`TXN_TYPE` = 3 THEN 'COMPACTION' END AS TXN_TYPE, + TC.`TC_CATALOG`, + TC.`TC_DATABASE`, + TC.`TC_TABLE`, + TC.`TC_PARTITION`, + CASE WHEN TC.`TC_OPERATION_TYPE` = 's' THEN 'SELECT' WHEN TC.`TC_OPERATION_TYPE` = 'i' THEN 'INSERT' WHEN TC.`TC_OPERATION_TYPE` = 'u' THEN 'UPDATE' WHEN TC.`TC_OPERATION_TYPE` = 'c' THEN 'COMPACT' END AS OPERATION_TYPE, + TC.`TC_WRITEID` +FROM `SYS`.`TXNS` AS T +LEFT JOIN `SYS`.`TXN_COMPONENTS` AS TC ON T.`TXN_ID` = TC.`TC_TXNID`; + +CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_WORKER_HOST`, + `C_WORKER_ID`, + `C_WORKER_VERSION`, + `C_ENQUEUE_TIME`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_ERROR_MESSAGE`, + `C_NEXT_TXN_ID`, + `C_TXN_ID`, + `C_COMMIT_TIME`, + `C_HIGHEST_WRITE_ID`, + `C_INITIATOR_HOST`, + `C_INITIATOR_ID`, + `C_INITIATOR_VERSION`, + `C_CLEANER_START`, + `C_POOL_NAME`, + `C_NUMBER_OF_BUCKETS`, + `C_TBLPROPERTIES` +) AS +SELECT + CC_ID, + CC_CATALOG, + CC_DATABASE, + CC_TABLE, + CC_PARTITION, + CASE WHEN CC_TYPE = 'i' THEN 'minor' WHEN CC_TYPE = 'a' THEN 'major' WHEN CC_TYPE = '*' THEN 'smart-optimize' ELSE 'UNKNOWN' END, + CASE WHEN CC_STATE = 'f' THEN 'failed' WHEN CC_STATE = 's' THEN 'succeeded' + WHEN CC_STATE = 'a' THEN 'did not initiate' WHEN CC_STATE = 'c' THEN 'refused' ELSE 'UNKNOWN' END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[0] END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[size(split(CC_WORKER_ID,"-"))-1] END, + CC_WORKER_VERSION, + FROM_UNIXTIME(CC_ENQUEUE_TIME DIV 1000), + FROM_UNIXTIME(CC_START DIV 1000), + CASE WHEN CC_END IS NULL THEN cast (null as string) ELSE CC_END-CC_START END, + CC_HADOOP_JOB_ID, + CC_RUN_AS, + CC_ERROR_MESSAGE, + CC_NEXT_TXN_ID, + CC_TXN_ID, + FROM_UNIXTIME(CC_COMMIT_TIME DIV 1000), + CC_HIGHEST_WRITE_ID, + CASE WHEN CC_INITIATOR_ID IS NULL THEN cast (null as string) ELSE split(CC_INITIATOR_ID,"-")[0] END, + CASE WHEN CC_INITIATOR_ID IS NULL THEN cast (null as string) ELSE split(CC_INITIATOR_ID,"-")[size(split(CC_INITIATOR_ID,"-"))-1] END, + CC_INITIATOR_VERSION, + NULL, + NVL(CC_POOL_NAME, 'default'), + CC_NUMBER_OF_BUCKETS, + CC_TBLPROPERTIES +FROM COMPLETED_COMPACTIONS +UNION ALL +SELECT + CQ_ID, + CQ_CATALOG, + CQ_DATABASE, + CQ_TABLE, + CQ_PARTITION, + CASE WHEN CQ_TYPE = 'i' THEN 'minor' WHEN CQ_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, + CASE WHEN CQ_STATE = 'i' THEN 'initiated' WHEN CQ_STATE = 'w' THEN 'working' WHEN CQ_STATE = 'r' THEN 'ready for cleaning' ELSE 'UNKNOWN' END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[0] END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[size(split(CQ_WORKER_ID,"-"))-1] END, + CQ_WORKER_VERSION, + FROM_UNIXTIME(CQ_ENQUEUE_TIME DIV 1000), + FROM_UNIXTIME(CQ_START DIV 1000), + cast (null as string), + CQ_HADOOP_JOB_ID, + CQ_RUN_AS, + CQ_ERROR_MESSAGE, + CQ_NEXT_TXN_ID, + CQ_TXN_ID, + FROM_UNIXTIME(CQ_COMMIT_TIME DIV 1000), + CQ_HIGHEST_WRITE_ID, + CASE WHEN CQ_INITIATOR_ID IS NULL THEN NULL ELSE split(CQ_INITIATOR_ID,"-")[0] END, + CASE WHEN CQ_INITIATOR_ID IS NULL THEN NULL ELSE split(CQ_INITIATOR_ID,"-")[size(split(CQ_INITIATOR_ID,"-"))-1] END, + CQ_INITIATOR_VERSION, + FROM_UNIXTIME(CQ_CLEANER_START DIV 1000), + NVL(CQ_POOL_NAME, 'default'), + CQ_NUMBER_OF_BUCKETS, + CQ_TBLPROPERTIES +FROM COMPACTION_QUEUE; + +USE INFORMATION_SCHEMA; + +CREATE OR REPLACE VIEW `SCHEMATA` +( + `CATALOG_NAME`, + `SCHEMA_NAME`, + `SCHEMA_OWNER`, + `DEFAULT_CHARACTER_SET_CATALOG`, + `DEFAULT_CHARACTER_SET_SCHEMA`, + `DEFAULT_CHARACTER_SET_NAME`, + `SQL_PATH` +) AS +SELECT DISTINCT + D.`CTLG_NAME`, + D.`NAME`, + D.`OWNER_NAME`, + cast(null as string), + cast(null as string), + cast(null as string), + `DB_LOCATION_URI` +FROM + `sys`.`DBS` D LEFT JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND current_authorizer() = P.`AUTHORIZER`; + +CREATE OR REPLACE VIEW `TABLES` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `TABLE_TYPE`, + `SELF_REFERENCING_COLUMN_NAME`, + `REFERENCE_GENERATION`, + `USER_DEFINED_TYPE_CATALOG`, + `USER_DEFINED_TYPE_SCHEMA`, + `USER_DEFINED_TYPE_NAME`, + `IS_INSERTABLE_INTO`, + `IS_TYPED`, + `COMMIT_ACTION` +) AS +SELECT DISTINCT + D.CTLG_NAME, + D.NAME, + T.TBL_NAME, + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), + 'NO', + cast(null as string) +FROM + `sys`.`TBLS` T JOIN `sys`.`DBS` D ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer(); + +CREATE OR REPLACE VIEW `TABLE_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE`, + `WITH_HIERARCHY` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + D.`CTLG_NAME`, + D.`NAME`, + T.`TBL_NAME`, + P.`TBL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), + 'NO' +FROM + `sys`.`TBL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR + (P2.`TBL_ID` IS NOT NULL AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER` = current_authorizer() AND P2.`AUTHORIZER` = current_authorizer()); + +CREATE OR REPLACE VIEW `COLUMNS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `ORDINAL_POSITION`, + `COLUMN_DEFAULT`, + `IS_NULLABLE`, + `DATA_TYPE`, + `CHARACTER_MAXIMUM_LENGTH`, + `CHARACTER_OCTET_LENGTH`, + `NUMERIC_PRECISION`, + `NUMERIC_PRECISION_RADIX`, + `NUMERIC_SCALE`, + `DATETIME_PRECISION`, + `INTERVAL_TYPE`, + `INTERVAL_PRECISION`, + `CHARACTER_SET_CATALOG`, + `CHARACTER_SET_SCHEMA`, + `CHARACTER_SET_NAME`, + `COLLATION_CATALOG`, + `COLLATION_SCHEMA`, + `COLLATION_NAME`, + `UDT_CATALOG`, + `UDT_SCHEMA`, + `UDT_NAME`, + `SCOPE_CATALOG`, + `SCOPE_SCHEMA`, + `SCOPE_NAME`, + `MAXIMUM_CARDINALITY`, + `DTD_IDENTIFIER`, + `IS_SELF_REFERENCING`, + `IS_IDENTITY`, + `IDENTITY_GENERATION`, + `IDENTITY_START`, + `IDENTITY_INCREMENT`, + `IDENTITY_MAXIMUM`, + `IDENTITY_MINIMUM`, + `IDENTITY_CYCLE`, + `IS_GENERATED`, + `GENERATION_EXPRESSION`, + `IS_SYSTEM_TIME_PERIOD_START`, + `IS_SYSTEM_TIME_PERIOD_END`, + `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, + `IS_UPDATABLE`, + `DECLARED_DATA_TYPE`, + `DECLARED_NUMERIC_PRECISION`, + `DECLARED_NUMERIC_SCALE` +) AS +SELECT DISTINCT + D.CTLG_NAME, + D.NAME, + T.TBL_NAME, + C.COLUMN_NAME, + C.INTEGER_IDX, + cast (null as string), + 'YES', + C.TYPE_NAME as TYPE_NAME, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 + WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 + ELSE null END, + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + C.CD_ID, + 'NO', + 'NO', + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + 'NEVER', + cast (null as string), + 'NO', + 'NO', + cast (null as string), + 'YES', + C.TYPE_NAME as DECLARED_DATA_TYPE, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END +FROM + `sys`.`COLUMNS_V2` C JOIN `sys`.`SDS` S ON (C.`CD_ID` = S.`CD_ID`) + JOIN `sys`.`TBLS` T ON (S.`SD_ID` = T.`SD_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_COL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND C.`COLUMN_NAME` = P.`COLUMN_NAME` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND array_contains(split_map_privs(P.`TBL_COL_PRIV`),"SELECT") AND P.`AUTHORIZER`=current_authorizer(); + +CREATE OR REPLACE VIEW `COLUMN_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + D.`CTLG_NAME`, + D.`NAME`, + T.`TBL_NAME`, + P.`COLUMN_NAME`, + P.`TBL_COL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') +FROM + (SELECT + Q.`GRANTOR`, + Q.`GRANT_OPTION`, + Q.`PRINCIPAL_NAME`, + Q.`PRINCIPAL_TYPE`, + Q.`AUTHORIZER`, + Q.`COLUMN_NAME`, + `TBL_COL_PRIV_TMP`.`TBL_COL_PRIV`, + Q.`TBL_ID` + FROM `sys`.`TBL_COL_PRIVS` AS Q + LATERAL VIEW explode(split_map_privs(Q.`TBL_COL_PRIV`)) `TBL_COL_PRIV_TMP` AS `TBL_COL_PRIV`) P + JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + JOIN `sys`.`SDS` S ON (S.`SD_ID` = T.`SD_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P2.`TBL_ID` IS NOT NULL + AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() AND P2.`AUTHORIZER`=current_authorizer(); + +CREATE OR REPLACE VIEW `VIEWS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `VIEW_DEFINITION`, + `CHECK_OPTION`, + `IS_UPDATABLE`, + `IS_INSERTABLE_INTO`, + `IS_TRIGGER_UPDATABLE`, + `IS_TRIGGER_DELETABLE`, + `IS_TRIGGER_INSERTABLE_INTO` +) AS +SELECT DISTINCT + D.CTLG_NAME, + D.NAME, + T.TBL_NAME, + T.VIEW_ORIGINAL_TEXT, + CAST(NULL as string), + false, + false, + false, + false, + false +FROM + `sys`.`DBS` D JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + length(T.VIEW_ORIGINAL_TEXT) > 0 + AND (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); + +CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_WORKER_HOST`, + `C_WORKER_ID`, + `C_WORKER_VERSION`, + `C_ENQUEUE_TIME`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_ERROR_MESSAGE`, + `C_NEXT_TXN_ID`, + `C_TXN_ID`, + `C_COMMIT_TIME`, + `C_HIGHEST_WRITE_ID`, + `C_INITIATOR_HOST`, + `C_INITIATOR_ID`, + `C_INITIATOR_VERSION`, + `C_CLEANER_START`, + `C_POOL_NAME`, + `C_NUMBER_OF_BUCKETS`, + `C_TBLPROPERTIES` +) AS +SELECT DISTINCT + C_ID, + C_CATALOG, + C_DATABASE, + C_TABLE, + C_PARTITION, + C_TYPE, + C_STATE, + C_WORKER_HOST, + C_WORKER_ID, + C_WORKER_VERSION, + C_ENQUEUE_TIME, + C_START, + C_DURATION, + C_HADOOP_JOB_ID, + C_RUN_AS, + C_ERROR_MESSAGE, + C_NEXT_TXN_ID, + C_TXN_ID, + C_COMMIT_TIME, + C_HIGHEST_WRITE_ID, + C_INITIATOR_HOST, + C_INITIATOR_ID, + C_INITIATOR_VERSION, + C_CLEANER_START, + C_POOL_NAME, + C_NUMBER_OF_BUCKETS, + C_TBLPROPERTIES +FROM + `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) AND (C.`C_CATALOG` = D.`CTLG_NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); + +CREATE OR REPLACE VIEW `TRANSACTIONS` ( + `TXN_ID`, + `STATE`, + `STARTED`, + `LAST_HEARTBEAT`, + `USER`, + `HOST`, + `AGENT_INFO`, + `META_INFO`, + `HEARTBEAT_COUNT`, + `TYPE`, + `TC_CATALOG`, + `TC_DATABASE`, + `TC_TABLE`, + `TC_PARTITION`, + `TC_OPERATION_TYPE`, + `TC_WRITEID` +) AS +SELECT DISTINCT + `TXN_ID`, + `STATE`, + `STARTED`, + `LAST_HEARTBEAT`, + `USER`, + `HOST`, + `AGENT_INFO`, + `META_INFO`, + `HEARTBEAT_COUNT`, + `TYPE`, + `TC_CATALOG`, + `TC_DATABASE`, + `TC_TABLE`, + `TC_PARTITION`, + `TC_OPERATION_TYPE`, + `TC_WRITEID` +FROM `SYS`.`TRANSACTIONS` AS TXN JOIN `sys`.`TBLS` T ON (TXN.`TC_TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (TXN.`TC_DATABASE` = D.`NAME`) AND (TXN.`TC_CATALOG` = D.`CTLG_NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); + +CREATE OR REPLACE VIEW `LOCKS` ( + `LOCK_EXT_ID`, + `LOCK_INT_ID`, + `TXNID`, + `CATALOG`, + `DB`, + `TABLE`, + `PARTITION`, + `LOCK_STATE`, + `LOCK_TYPE`, + `LAST_HEARTBEAT`, + `ACQUIRED_AT`, + `USER`, + `HOST`, + `HEARTBEAT_COUNT`, + `AGENT_INFO`, + `BLOCKEDBY_EXT_ID`, + `BLOCKEDBY_INT_ID` +) AS +SELECT DISTINCT + `LOCK_EXT_ID`, + `LOCK_INT_ID`, + `TXNID`, + `CATALOG`, + `DB`, + `TABLE`, + `PARTITION`, + `LOCK_STATE`, + `LOCK_TYPE`, + `LAST_HEARTBEAT`, + `ACQUIRED_AT`, + `USER`, + `HOST`, + `HEARTBEAT_COUNT`, + `AGENT_INFO`, + `BLOCKEDBY_EXT_ID`, + `BLOCKEDBY_INT_ID` +FROM SYS.`LOCKS` AS L JOIN `sys`.`TBLS` T ON (L.`TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (L.`DB` = D.`NAME`) AND (L.`CATALOG` = D.`CTLG_NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); SELECT 'Finished upgrading MetaStore schema from 4.2.0 to 4.3.0'; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.3.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.3.0.mssql.sql index ad2a45fc02b9..222ec48b0009 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.3.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.3.0.mssql.sql @@ -966,6 +966,7 @@ ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CT -- ----------------------------------------------------------------------------------------------------------------------------------------------- CREATE TABLE COMPACTION_QUEUE( CQ_ID bigint NOT NULL, + CQ_CATALOG nvarchar(128) NOT NULL, CQ_DATABASE nvarchar(128) NOT NULL, CQ_TABLE nvarchar(256) NOT NULL, CQ_PARTITION nvarchar(767) NULL, @@ -999,6 +1000,7 @@ PRIMARY KEY CLUSTERED CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint NOT NULL, + CC_CATALOG nvarchar(128) NOT NULL, CC_DATABASE nvarchar(128) NOT NULL, CC_TABLE nvarchar(256) NOT NULL, CC_PARTITION nvarchar(767) NULL, @@ -1029,10 +1031,11 @@ PRIMARY KEY CLUSTERED ) ); -CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION); -- HIVE-25842 CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_CATALOG nvarchar(128) NOT NULL, CMC_DATABASE nvarchar(128) NOT NULL, CMC_TABLE nvarchar(256) NOT NULL, CMC_PARTITION nvarchar(767) NULL, @@ -1043,6 +1046,7 @@ CREATE TABLE COMPACTION_METRICS_CACHE ( CREATE TABLE COMPLETED_TXN_COMPONENTS( CTC_TXNID bigint NOT NULL, + CTC_CATALOG nvarchar(128) NOT NULL, CTC_DATABASE nvarchar(128) NOT NULL, CTC_TABLE nvarchar(256) NULL, CTC_PARTITION nvarchar(767) NULL, @@ -1052,7 +1056,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS( ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE HIVE_LOCKS( HL_LOCK_EXT_ID bigint NOT NULL, @@ -1119,6 +1123,7 @@ INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, CREATE TABLE TXN_COMPONENTS( TC_TXNID bigint NOT NULL, + TC_CATALOG nvarchar(128) NOT NULL, TC_DATABASE nvarchar(128) NOT NULL, TC_TABLE nvarchar(256) NULL, TC_PARTITION nvarchar(767) NULL, @@ -1165,6 +1170,7 @@ CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); CREATE TABLE WRITE_SET ( + WS_CATALOG nvarchar(128) NOT NULL, WS_DATABASE nvarchar(128) NOT NULL, WS_TABLE nvarchar(256) NOT NULL, WS_PARTITION nvarchar(767), @@ -1184,30 +1190,33 @@ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG nvarchar(128) NOT NULL, T2W_DATABASE nvarchar(128) NOT NULL, T2W_TABLE nvarchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG nvarchar(128) NOT NULL, NWI_DATABASE nvarchar(128) NOT NULL, NWI_TABLE nvarchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_WRITE_ID ( MH_TXNID bigint NOT NULL, + MH_CATALOG nvarchar(128) NOT NULL, MH_DATABASE nvarchar(128) NOT NULL, MH_TABLE nvarchar(256) NOT NULL, MH_WRITEID bigint NOT NULL ); -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, @@ -1293,6 +1302,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_ID bigint NOT NULL, WNL_TXNID bigint NOT NULL, WNL_WRITEID bigint NOT NULL, + WNL_CATALOG nvarchar(128) NOT NULL, WNL_DATABASE nvarchar(128) NOT NULL, WNL_TABLE nvarchar(256) NOT NULL, WNL_PARTITION nvarchar(767) NOT NULL, @@ -1302,7 +1312,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_EVENT_TIME int NOT NULL ); -ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_CATALOG, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.2.0-to-4.3.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.2.0-to-4.3.0.mssql.sql index 58a884a52c78..9a73e7d5f2a5 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.2.0-to-4.3.0.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.2.0-to-4.3.0.mssql.sql @@ -1,9 +1,32 @@ SELECT 'Upgrading MetaStore schema from 4.2.0 to 4.3.0' AS MESSAGE; +DROP INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS; +DROP INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS; +DROP INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID; +DROP INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID; +DROP INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID; + ALTER TABLE HIVE_LOCKS ADD HL_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; ALTER TABLE MATERIALIZATION_REBUILD_LOCKS ADD MRL_CAT_NAME nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_COMPONENTS ADD TC_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPACTION_QUEUE ADD CQ_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPACTION_METRICS_CACHE ADD CMC_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE WRITE_SET ADD WS_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_TO_WRITE_ID ADD T2W_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE NEXT_WRITE_ID ADD NWI_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE MIN_HISTORY_WRITE_ID ADD MH_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD WNL_CATALOG nvarchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG DROP CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK; -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_CATALOG, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.3.0', VERSION_COMMENT='Hive release version 4.3.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.3.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.3.0.mysql.sql index 03e37811f6cf..ab32a99f25c9 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.3.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.3.0.mysql.sql @@ -966,6 +966,7 @@ INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, CREATE TABLE TXN_COMPONENTS ( TC_TXNID bigint NOT NULL, + TC_CATALOG varchar(128) NOT NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(256), TC_PARTITION varchar(767), @@ -978,6 +979,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID bigint NOT NULL, + CTC_CATALOG varchar(128) NOT NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(256), CTC_PARTITION varchar(767), @@ -986,7 +988,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_UPDATE_DELETE char(1) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; CREATE TABLE TXN_LOCK_TBL ( TXN_LOCK bigint NOT NULL @@ -1024,6 +1026,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID bigint PRIMARY KEY, + CQ_CATALOG varchar(128) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(256) NOT NULL, CQ_PARTITION varchar(767), @@ -1053,6 +1056,7 @@ CREATE TABLE COMPACTION_QUEUE ( CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID bigint PRIMARY KEY, + CC_CATALOG varchar(128) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(256) NOT NULL, CC_PARTITION varchar(767), @@ -1079,10 +1083,11 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_ORDER_BY varchar(4000) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION); -- HIVE-25842 CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_CATALOG varchar(128) NOT NULL, CMC_DATABASE varchar(128) NOT NULL, CMC_TABLE varchar(256) NOT NULL, CMC_PARTITION varchar(767), @@ -1104,6 +1109,7 @@ CREATE TABLE AUX_TABLE ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1; CREATE TABLE WRITE_SET ( + WS_CATALOG varchar(128) NOT NULL, WS_DATABASE varchar(128) NOT NULL, WS_TABLE varchar(256) NOT NULL, WS_PARTITION varchar(767), @@ -1114,31 +1120,34 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID bigint NOT NULL, + T2W_CATALOG varchar(128) NOT NULL, T2W_DATABASE varchar(128) NOT NULL, T2W_TABLE varchar(256) NOT NULL, T2W_WRITEID bigint NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG varchar(128) NOT NULL, NWI_DATABASE varchar(128) NOT NULL, NWI_TABLE varchar(256) NOT NULL, NWI_NEXT bigint NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_WRITE_ID ( MH_TXNID bigint NOT NULL, + MH_CATALOG varchar(128) NOT NULL, MH_DATABASE varchar(128) NOT NULL, MH_TABLE varchar(256) NOT NULL, MH_WRITEID bigint NOT NULL, FOREIGN KEY (MH_TXNID) REFERENCES TXNS (TXN_ID) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID bigint NOT NULL, @@ -1210,6 +1219,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_ID bigint NOT NULL, WNL_TXNID bigint NOT NULL, WNL_WRITEID bigint NOT NULL, + WNL_CATALOG varchar(128) NOT NULL, WNL_DATABASE varchar(128) NOT NULL, WNL_TABLE varchar(256) NOT NULL, WNL_PARTITION varchar(767) NOT NULL, @@ -1217,7 +1227,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_PARTITION_OBJ longtext, WNL_FILES longtext, WNL_EVENT_TIME INT(11) NOT NULL, - PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION) + PRIMARY KEY (WNL_TXNID, WNL_CATALOG, WNL_DATABASE, WNL_TABLE, WNL_PARTITION) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.2.0-to-4.3.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.2.0-to-4.3.0.mysql.sql index 5149dce33cff..671f1880ef10 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.2.0-to-4.3.0.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.2.0-to-4.3.0.mysql.sql @@ -1,9 +1,30 @@ SELECT 'Upgrading MetaStore schema from 4.2.0 to 4.3.0' AS MESSAGE; +DROP INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS; +DROP INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS; +DROP INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID; +DROP INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID; +DROP INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID; + ALTER TABLE HIVE_LOCKS ADD HL_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; -ALTER TABLE MATERIALIZATION_REBUILD_LOCKS ADD MRL_CAT_NAME varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE MATERIALIZATION_REBUILD_LOCKS ADD MRL_CAT_NAME varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_COMPONENTS ADD TC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPACTION_QUEUE ADD CQ_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPLETED_COMPACTIONS ADD CC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE COMPACTION_METRICS_CACHE ADD CMC_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE WRITE_SET ADD WS_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_TO_WRITE_ID ADD T2W_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE NEXT_WRITE_ID ADD NWI_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE MIN_HISTORY_WRITE_ID ADD MH_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD WNL_CATALOG varchar(128) NOT NULL DEFAULT 'hive'; -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.3.0', VERSION_COMMENT='Hive release version 4.3.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.3.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.3.0.oracle.sql index 33958a17852f..bb249ad0d3cd 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.3.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.3.0.oracle.sql @@ -957,6 +957,7 @@ INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, CREATE TABLE TXN_COMPONENTS ( TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID), + TC_CATALOG VARCHAR2(128) NOT NULL, TC_DATABASE VARCHAR2(128) NOT NULL, TC_TABLE VARCHAR2(256), TC_PARTITION VARCHAR2(767) NULL, @@ -968,6 +969,7 @@ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_TXNID NUMBER(19) NOT NULL, + CTC_CATALOG VARCHAR2(128) NOT NULL, CTC_DATABASE VARCHAR2(128) NOT NULL, CTC_TABLE VARCHAR2(256), CTC_PARTITION VARCHAR2(767), @@ -976,7 +978,7 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS ( CTC_UPDATE_DELETE CHAR(1) NOT NULL ) ROWDEPENDENCIES; -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); CREATE TABLE TXN_LOCK_TBL ( TXN_LOCK NUMBER(19) NOT NULL @@ -1013,6 +1015,7 @@ INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE COMPACTION_QUEUE ( CQ_ID NUMBER(19) PRIMARY KEY, + CQ_CATALOG VARCHAR2(128) NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(256) NOT NULL, CQ_PARTITION varchar(767), @@ -1047,6 +1050,7 @@ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE COMPLETED_COMPACTIONS ( CC_ID NUMBER(19) PRIMARY KEY, + CC_CATALOG VARCHAR2(128) NOT NULL, CC_DATABASE varchar(128) NOT NULL, CC_TABLE varchar(256) NOT NULL, CC_PARTITION varchar(767), @@ -1073,10 +1077,11 @@ CREATE TABLE COMPLETED_COMPACTIONS ( CC_ORDER_BY varchar(4000) ) ROWDEPENDENCIES; -CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION); -- HIVE-25842 CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_CATALOG VARCHAR2(128) NOT NULL, CMC_DATABASE varchar(128) NOT NULL, CMC_TABLE varchar(256) NOT NULL, CMC_PARTITION varchar(767), @@ -1093,6 +1098,7 @@ CREATE TABLE AUX_TABLE ( ); CREATE TABLE WRITE_SET ( + WS_CATALOG VARCHAR2(128) NOT NULL, WS_DATABASE varchar2(128) NOT NULL, WS_TABLE varchar2(256) NOT NULL, WS_PARTITION varchar2(767), @@ -1103,30 +1109,33 @@ CREATE TABLE WRITE_SET ( CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID NUMBER(19) NOT NULL, + T2W_CATALOG VARCHAR2(128) NOT NULL, T2W_DATABASE VARCHAR2(128) NOT NULL, T2W_TABLE VARCHAR2(256) NOT NULL, T2W_WRITEID NUMBER(19) NOT NULL ); -CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); -CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); CREATE TABLE NEXT_WRITE_ID ( + NWI_CATALOG VARCHAR2(128) NOT NULL, NWI_DATABASE VARCHAR2(128) NOT NULL, NWI_TABLE VARCHAR2(256) NOT NULL, NWI_NEXT NUMBER(19) NOT NULL ); -CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); CREATE TABLE MIN_HISTORY_WRITE_ID ( MH_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID), + MH_CATALOG VARCHAR2(128) NOT NULL, MH_DATABASE VARCHAR2(128) NOT NULL, MH_TABLE VARCHAR2(256) NOT NULL, MH_WRITEID NUMBER(19) NOT NULL ); -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); CREATE TABLE MIN_HISTORY_LEVEL ( MHL_TXNID NUMBER(19) NOT NULL, @@ -1192,6 +1201,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_ID number(19) NOT NULL, WNL_TXNID number(19) NOT NULL, WNL_WRITEID number(19) NOT NULL, + WNL_CATALOG VARCHAR2(128) NOT NULL, WNL_DATABASE varchar(128) NOT NULL, WNL_TABLE varchar(256) NOT NULL, WNL_PARTITION varchar(767), @@ -1201,7 +1211,7 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( WNL_EVENT_TIME number(10) NOT NULL ); -CREATE INDEX TXN_WRITE_NOTIFICATION_LOG_IDX ON TXN_WRITE_NOTIFICATION_LOG (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); +CREATE INDEX TXN_WRITE_NOTIFICATION_LOG_IDX ON TXN_WRITE_NOTIFICATION_LOG (WNL_TXNID, WNL_CATALOG, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.2.0-to-4.3.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.2.0-to-4.3.0.oracle.sql index bb210a6623c5..3189e3309529 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.2.0-to-4.3.0.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.2.0-to-4.3.0.oracle.sql @@ -1,9 +1,30 @@ SELECT 'Upgrading MetaStore schema from 4.2.0 to 4.3.0' AS Status from dual; +DROP INDEX COMPLETED_TXN_COMPONENTS_INDEX; +DROP INDEX COMPLETED_COMPACTIONS_RES; +DROP INDEX TBL_TO_TXN_ID_IDX; +DROP INDEX TBL_TO_WRITE_ID_IDX; +DROP INDEX NEXT_WRITE_ID_IDX; + ALTER TABLE HIVE_LOCKS ADD (HL_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); ALTER TABLE MATERIALIZATION_REBUILD_LOCKS ADD (MRL_CAT_NAME VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE TXN_COMPONENTS ADD (TC_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE COMPLETED_TXN_COMPONENTS ADD (CTC_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE COMPACTION_QUEUE ADD (CQ_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE COMPLETED_COMPACTIONS ADD (CC_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE COMPACTION_METRICS_CACHE ADD (CMC_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE WRITE_SET ADD (WS_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE TXN_TO_WRITE_ID ADD (T2W_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE NEXT_WRITE_ID ADD (NWI_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE MIN_HISTORY_WRITE_ID ADD (MH_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD (WNL_CATALOG VARCHAR2(128) DEFAULT 'hive' NOT NULL); -CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_DATABASE, MH_TABLE, MH_WRITEID); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_CATALOG, CTC_DATABASE, CTC_TABLE, CTC_PARTITION); +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_CATALOG, CC_DATABASE, CC_TABLE, CC_PARTITION); +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_CATALOG, T2W_DATABASE, T2W_TABLE, T2W_WRITEID); +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_CATALOG, NWI_DATABASE, NWI_TABLE); +CREATE INDEX MIN_HISTORY_WRITE_ID_IDX ON MIN_HISTORY_WRITE_ID (MH_CATALOG, MH_DATABASE, MH_TABLE, MH_WRITEID); -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='4.3.0', VERSION_COMMENT='Hive release version 4.3.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.3.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.3.0.postgres.sql index 10eb38923261..34bdbad11c09 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.3.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.3.0.postgres.sql @@ -1592,6 +1592,7 @@ INSERT INTO "TXNS" ("TXN_ID", "TXN_STATE", "TXN_STARTED", "TXN_LAST_HEARTBEAT", CREATE TABLE "TXN_COMPONENTS" ( "TC_TXNID" bigint NOT NULL REFERENCES "TXNS" ("TXN_ID"), + "TC_CATALOG" varchar(128) NOT NULL, "TC_DATABASE" varchar(128) NOT NULL, "TC_TABLE" varchar(256), "TC_PARTITION" varchar(767) DEFAULT NULL, @@ -1603,6 +1604,7 @@ CREATE INDEX TC_TXNID_INDEX ON "TXN_COMPONENTS" USING hash ("TC_TXNID"); CREATE TABLE "COMPLETED_TXN_COMPONENTS" ( "CTC_TXNID" bigint NOT NULL, + "CTC_CATALOG" varchar(128) NOT NULL, "CTC_DATABASE" varchar(128) NOT NULL, "CTC_TABLE" varchar(256), "CTC_PARTITION" varchar(767), @@ -1611,7 +1613,7 @@ CREATE TABLE "COMPLETED_TXN_COMPONENTS" ( "CTC_UPDATE_DELETE" char(1) NOT NULL ); -CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON "COMPLETED_TXN_COMPONENTS" USING btree ("CTC_DATABASE", "CTC_TABLE", "CTC_PARTITION"); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON "COMPLETED_TXN_COMPONENTS" USING btree ("CTC_CATALOG", "CTC_DATABASE", "CTC_TABLE", "CTC_PARTITION"); CREATE TABLE "TXN_LOCK_TBL" ( "TXN_LOCK" bigint NOT NULL @@ -1648,6 +1650,7 @@ INSERT INTO "NEXT_LOCK_ID" VALUES(1); CREATE TABLE "COMPACTION_QUEUE" ( "CQ_ID" bigint PRIMARY KEY, + "CQ_CATALOG" varchar(128) NOT NULL, "CQ_DATABASE" varchar(128) NOT NULL, "CQ_TABLE" varchar(256) NOT NULL, "CQ_PARTITION" varchar(767), @@ -1682,6 +1685,7 @@ INSERT INTO "NEXT_COMPACTION_QUEUE_ID" VALUES(1); CREATE TABLE "COMPLETED_COMPACTIONS" ( "CC_ID" bigint PRIMARY KEY, + "CC_CATALOG" varchar(128) NOT NULL, "CC_DATABASE" varchar(128) NOT NULL, "CC_TABLE" varchar(256) NOT NULL, "CC_PARTITION" varchar(767), @@ -1708,10 +1712,11 @@ CREATE TABLE "COMPLETED_COMPACTIONS" ( "CC_ORDER_BY" varchar(4000) ); -CREATE INDEX "COMPLETED_COMPACTIONS_RES" ON "COMPLETED_COMPACTIONS" ("CC_DATABASE","CC_TABLE","CC_PARTITION"); +CREATE INDEX "COMPLETED_COMPACTIONS_RES" ON "COMPLETED_COMPACTIONS" ("CC_CATALOG", "CC_DATABASE", "CC_TABLE", "CC_PARTITION"); -- HIVE-25842 CREATE TABLE "COMPACTION_METRICS_CACHE" ( + "CMC_CATALOG" varchar(128) NOT NULL, "CMC_DATABASE" varchar(128) NOT NULL, "CMC_TABLE" varchar(256) NOT NULL, "CMC_PARTITION" varchar(767), @@ -1728,6 +1733,7 @@ CREATE TABLE "AUX_TABLE" ( ); CREATE TABLE "WRITE_SET" ( + "WS_CATALOG" varchar(128) NOT NULL, "WS_DATABASE" varchar(128) NOT NULL, "WS_TABLE" varchar(256) NOT NULL, "WS_PARTITION" varchar(767), @@ -1738,30 +1744,33 @@ CREATE TABLE "WRITE_SET" ( CREATE TABLE "TXN_TO_WRITE_ID" ( "T2W_TXNID" bigint NOT NULL, + "T2W_CATALOG" varchar(128) NOT NULL, "T2W_DATABASE" varchar(128) NOT NULL, "T2W_TABLE" varchar(256) NOT NULL, "T2W_WRITEID" bigint NOT NULL ); -CREATE UNIQUE INDEX "TBL_TO_TXN_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_DATABASE", "T2W_TABLE", "T2W_TXNID"); -CREATE UNIQUE INDEX "TBL_TO_WRITE_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_DATABASE", "T2W_TABLE", "T2W_WRITEID"); +CREATE UNIQUE INDEX "TBL_TO_TXN_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_CATALOG", "T2W_DATABASE", "T2W_TABLE", "T2W_TXNID"); +CREATE UNIQUE INDEX "TBL_TO_WRITE_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_CATALOG", "T2W_DATABASE", "T2W_TABLE", "T2W_WRITEID"); CREATE TABLE "NEXT_WRITE_ID" ( + "NWI_CATALOG" varchar(128) NOT NULL, "NWI_DATABASE" varchar(128) NOT NULL, "NWI_TABLE" varchar(256) NOT NULL, "NWI_NEXT" bigint NOT NULL ); -CREATE UNIQUE INDEX "NEXT_WRITE_ID_IDX" ON "NEXT_WRITE_ID" ("NWI_DATABASE", "NWI_TABLE"); +CREATE UNIQUE INDEX "NEXT_WRITE_ID_IDX" ON "NEXT_WRITE_ID" ("NWI_CATALOG", "NWI_DATABASE", "NWI_TABLE"); CREATE TABLE "MIN_HISTORY_WRITE_ID" ( "MH_TXNID" bigint NOT NULL REFERENCES "TXNS" ("TXN_ID"), + "MH_CATALOG" varchar(128) NOT NULL, "MH_DATABASE" varchar(128) NOT NULL, "MH_TABLE" varchar(256) NOT NULL, "MH_WRITEID" bigint NOT NULL ); -CREATE INDEX "MIN_HISTORY_WRITE_ID_IDX" ON "MIN_HISTORY_WRITE_ID" ("MH_DATABASE", "MH_TABLE", "MH_WRITEID"); +CREATE INDEX "MIN_HISTORY_WRITE_ID_IDX" ON "MIN_HISTORY_WRITE_ID" ("MH_CATALOG", "MH_DATABASE", "MH_TABLE", "MH_WRITEID"); CREATE TABLE "MIN_HISTORY_LEVEL" ( "MHL_TXNID" bigint NOT NULL, @@ -1830,6 +1839,7 @@ CREATE TABLE "TXN_WRITE_NOTIFICATION_LOG" ( "WNL_ID" bigint NOT NULL, "WNL_TXNID" bigint NOT NULL, "WNL_WRITEID" bigint NOT NULL, + "WNL_CATALOG" varchar(128) NOT NULL, "WNL_DATABASE" varchar(128) NOT NULL, "WNL_TABLE" varchar(256) NOT NULL, "WNL_PARTITION" varchar(767) NOT NULL, @@ -1837,7 +1847,7 @@ CREATE TABLE "TXN_WRITE_NOTIFICATION_LOG" ( "WNL_PARTITION_OBJ" text, "WNL_FILES" text, "WNL_EVENT_TIME" integer NOT NULL, - PRIMARY KEY ("WNL_TXNID", "WNL_DATABASE", "WNL_TABLE", "WNL_PARTITION") + PRIMARY KEY ("WNL_TXNID", "WNL_CATALOG", "WNL_DATABASE", "WNL_TABLE", "WNL_PARTITION") ); INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.2.0-to-4.3.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.2.0-to-4.3.0.postgres.sql index bdd28ce20298..a34f4300727f 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.2.0-to-4.3.0.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.2.0-to-4.3.0.postgres.sql @@ -1,9 +1,30 @@ SELECT 'Upgrading MetaStore schema from 4.2.0 to 4.3.0'; +DROP INDEX COMPLETED_TXN_COMPONENTS_INDEX; +DROP INDEX "COMPLETED_COMPACTIONS_RES"; +DROP INDEX "TBL_TO_TXN_ID_IDX"; +DROP INDEX "TBL_TO_WRITE_ID_IDX"; +DROP INDEX "NEXT_WRITE_ID_IDX"; + ALTER TABLE "HIVE_LOCKS" ADD COLUMN "HL_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; ALTER TABLE "MATERIALIZATION_REBUILD_LOCKS" ADD COLUMN "MRL_CAT_NAME" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "TXN_COMPONENTS" ADD COLUMN "TC_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "COMPLETED_TXN_COMPONENTS" ADD COLUMN "CTC_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "COMPACTION_QUEUE" ADD COLUMN "CQ_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "COMPLETED_COMPACTIONS" ADD COLUMN "CC_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "COMPACTION_METRICS_CACHE" ADD COLUMN "CMC_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "WRITE_SET" ADD COLUMN "WS_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "TXN_TO_WRITE_ID" ADD COLUMN "T2W_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "NEXT_WRITE_ID" ADD COLUMN "NWI_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "MIN_HISTORY_WRITE_ID" ADD COLUMN "MH_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; +ALTER TABLE "TXN_WRITE_NOTIFICATION_LOG" ADD COLUMN "WNL_CATALOG" varchar(128) NOT NULL DEFAULT 'hive'; -CREATE INDEX "MIN_HISTORY_WRITE_ID_IDX" ON "MIN_HISTORY_WRITE_ID" ("MH_DATABASE", "MH_TABLE", "MH_WRITEID"); +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON "COMPLETED_TXN_COMPONENTS" USING btree ("CTC_CATALOG", "CTC_DATABASE", "CTC_TABLE", "CTC_PARTITION"); +CREATE INDEX "COMPLETED_COMPACTIONS_RES" ON "COMPLETED_COMPACTIONS" ("CC_CATALOG", "CC_DATABASE", "CC_TABLE", "CC_PARTITION"); +CREATE UNIQUE INDEX "TBL_TO_TXN_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_CATALOG", "T2W_DATABASE", "T2W_TABLE", "T2W_TXNID"); +CREATE UNIQUE INDEX "TBL_TO_WRITE_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_CATALOG", "T2W_DATABASE", "T2W_TABLE", "T2W_WRITEID"); +CREATE UNIQUE INDEX "NEXT_WRITE_ID_IDX" ON "NEXT_WRITE_ID" ("NWI_CATALOG", "NWI_DATABASE", "NWI_TABLE"); +CREATE INDEX "MIN_HISTORY_WRITE_ID_IDX" ON "MIN_HISTORY_WRITE_ID" ("MH_CATALOG", "MH_DATABASE", "MH_TABLE", "MH_WRITEID"); -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='4.3.0', "VERSION_COMMENT"='Hive release version 4.3.0' where "VER_ID"=1; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index c49953e7fec6..ed54e2f8cc7a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -374,7 +374,7 @@ public void testGetValidWriteIds() throws TException { client.rollbackTxn(tids.get(0)); ValidTxnList validTxnList = client.getValidTxns(); - String fullTableName = TxnUtils.getFullTableName("db", "tbl"); + String fullTableName = TxnUtils.getFullTableName(Warehouse.DEFAULT_CATALOG_NAME, "db", "tbl"); List tableValidWriteIds = client.getValidWriteIds( Collections.singletonList(fullTableName), validTxnList.writeToString()); @@ -439,6 +439,7 @@ public void testGetLatestCommittedCompactionInfo() throws Exception { // Test invalid inputs final String invalidTblName = "invalid"; + rqst.setCatName(Warehouse.DEFAULT_CATALOG_NAME); rqst.setDbname(dbName); Assert.assertThrows(MetaException.class, () -> client.getLatestCommittedCompactionInfo(rqst)); rqst.setTablename(invalidTblName); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetAllWriteEventInfo.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetAllWriteEventInfo.java index 4163f23041ab..fcf9428e7340 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetAllWriteEventInfo.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetAllWriteEventInfo.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.GetAllWriteEventInfoRequest; @@ -136,6 +137,7 @@ public void testGetByTxnId() throws Exception { Assert.assertEquals(1, writeEventInfoList.size()); WriteEventInfo writeEventInfo = writeEventInfoList.get(0); Assert.assertEquals(TXN_ID, writeEventInfo.getWriteId()); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, writeEventInfo.getCatalog()); Assert.assertEquals(DB_NAME, writeEventInfo.getDatabase()); Assert.assertEquals(TABLE_NAME, writeEventInfo.getTable()); } @@ -144,12 +146,14 @@ public void testGetByTxnId() throws Exception { public void testGetByTxnIdAndTableName() throws Exception { GetAllWriteEventInfoRequest req = new GetAllWriteEventInfoRequest(); req.setTxnId(TXN_ID); + req.setCatName(Warehouse.DEFAULT_CATALOG_NAME); req.setDbName(DB_NAME); req.setTableName(TABLE_NAME); List writeEventInfoList = client.getAllWriteEventInfo(req); Assert.assertEquals(1, writeEventInfoList.size()); WriteEventInfo writeEventInfo = writeEventInfoList.get(0); Assert.assertEquals(TXN_ID, writeEventInfo.getWriteId()); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, writeEventInfo.getCatalog()); Assert.assertEquals(DB_NAME, writeEventInfo.getDatabase()); Assert.assertEquals(TABLE_NAME, writeEventInfo.getTable()); } @@ -166,6 +170,7 @@ public void testGetByWrongTxnId() throws Exception { public void testGetByWrongDB() throws Exception { GetAllWriteEventInfoRequest req = new GetAllWriteEventInfoRequest(); req.setTxnId(TXN_ID); + req.setCatName(Warehouse.DEFAULT_CATALOG_NAME); req.setDbName("wrong_db"); List writeEventInfoList = client.getAllWriteEventInfo(req); Assert.assertTrue(writeEventInfoList.isEmpty()); @@ -175,6 +180,7 @@ public void testGetByWrongDB() throws Exception { public void testGetByWrongTable() throws Exception { GetAllWriteEventInfoRequest req = new GetAllWriteEventInfoRequest(); req.setTxnId(TXN_ID); + req.setCatName(Warehouse.DEFAULT_CATALOG_NAME); req.setDbName(DB_NAME); req.setTableName("wrong_table"); List writeEventInfoList = client.getAllWriteEventInfo(req); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/events/TestCommitTxnEventWithDbAndWriteId.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/events/TestCommitTxnEventWithDbAndWriteId.java index 092968619488..033ed502a197 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/events/TestCommitTxnEventWithDbAndWriteId.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/events/TestCommitTxnEventWithDbAndWriteId.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.metastore.events; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; @@ -45,12 +46,12 @@ public void testBackwardsCompatibility() { @Test public void testSerializeDeserialize() { - + List catalogs = Arrays.asList(Warehouse.DEFAULT_CATALOG_NAME, Warehouse.DEFAULT_CATALOG_NAME); List databases = Arrays.asList("db1", "db22"); List writeIds = Arrays.asList(1L, 2L); - CommitTxnEvent event = new CommitTxnEvent(999L, TxnType.DEFAULT, null, databases, writeIds); + CommitTxnEvent event = new CommitTxnEvent(999L, TxnType.DEFAULT, null, catalogs, databases, writeIds); CommitTxnMessage msg = - MessageBuilder.getInstance().buildCommitTxnMessage(event.getTxnId(), event.getDatabases(), event.getWriteId()); + MessageBuilder.getInstance().buildCommitTxnMessage(event.getTxnId(), event.getCatalogs(), event.getDatabases(), event.getWriteId()); JSONMessageEncoder msgEncoder = new JSONMessageEncoder(); String json = msgEncoder.getSerializer().serialize(msg); diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/ACIDBenchmarks.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/ACIDBenchmarks.java index 69244eb0fb71..ee69da5f4cea 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/ACIDBenchmarks.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/ACIDBenchmarks.java @@ -187,6 +187,7 @@ private void executeLock(HMSClient client, long txnId, List lockC @State(Scope.Benchmark) public static class TestAllocateTableWriteIds extends CoreContext { + String catName = Warehouse.DEFAULT_CATALOG_NAME; String dbName = "test_db"; String tblName = "tmp_table"; @@ -214,7 +215,7 @@ public void doTearDown() throws Exception { @Benchmark public void allocateTableWriteIds(TestAllocateTableWriteIds.ThreadState state) throws TException { - state.client.allocateTableWriteIds(dbName, tblName, state.openTxns); + state.client.allocateTableWriteIds(catName, dbName, tblName, state.openTxns); } private static long executeOpenTxnAndGetTxnId(HMSClient client) { diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java index c01200c33be5..eda4e95f8723 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.PartitionManagementTask; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest; @@ -581,7 +582,7 @@ static DescriptiveStatistics benchmarkAllocateTableWriteIds(@NotNull MicroBenchm return bench.measure( () -> throwingSupplierWrapper(() -> client.openTxn(howMany)), - () -> throwingSupplierWrapper(() -> client.allocateTableWriteIds("test_db", "test_tbl", client.getOpenTxns())), + () -> throwingSupplierWrapper(() -> client.allocateTableWriteIds(Warehouse.DEFAULT_CATALOG_NAME, "test_db", "test_tbl", client.getOpenTxns())), () -> throwingSupplierWrapper(() -> client.abortTxns(client.getOpenTxns())) ); } diff --git a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java index 973b75fbeb58..b0a43aa3fbc5 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java +++ b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java @@ -401,8 +401,9 @@ boolean abortTxns(List txnIds) throws TException { return true; } - boolean allocateTableWriteIds(String dbName, String tableName, List openTxns) throws TException { + boolean allocateTableWriteIds(String catName, String dbName, String tableName, List openTxns) throws TException { AllocateTableWriteIdsRequest awiRqst = new AllocateTableWriteIdsRequest(dbName, tableName); + awiRqst.setCatName(catName); openTxns.forEach(t -> { awiRqst.addToTxnIds(t); }); diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java b/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java index 9e593cd69dce..43fe4632bb16 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java @@ -132,6 +132,10 @@ public String getDbTable() { return db + DatabaseName.CAT_DB_TABLE_SEPARATOR + table; } + public String getQualified() { + return cat + DatabaseName.CAT_DB_TABLE_SEPARATOR + db + DatabaseName.CAT_DB_TABLE_SEPARATOR + table; + } + /** * Get the name in `db`.`table` escaped format, if db is not empty, otherwise pass only the table name. */ diff --git a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java index a2770e5b008b..2eb561b62a20 100644 --- a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java +++ b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java @@ -127,6 +127,7 @@ public String toString() { } // fields populated from builder + private String catalog; private String database; private String table; private List staticPartitionValues; @@ -156,6 +157,7 @@ public String toString() { private Runnable onShutdownRunner; private HiveStreamingConnection(Builder builder) throws StreamingException { + this.catalog = builder.catalog.toLowerCase(); this.database = builder.database.toLowerCase(); this.table = builder.table.toLowerCase(); this.staticPartitionValues = builder.staticPartitionValues; @@ -217,6 +219,7 @@ public static Builder newBuilder() { } public static class Builder { + private String catalog; private String database; private String table; private List staticPartitionValues; @@ -231,6 +234,17 @@ public static class Builder { private Table tableObject; private boolean isPartitioned; + /** + * Specify catalog to use for streaming connection. + * + * @param catalog - cat name + * @return - builder + */ + public Builder withCatalog(final String catalog) { + this.catalog = catalog; + return this; + } + /** * Specify database to use for streaming connection. * @@ -371,6 +385,9 @@ public Builder withTableObject(Table table) { * @return - hive streaming connection */ public HiveStreamingConnection connect() throws StreamingException { + if (catalog == null) { + throw new StreamingException("Catalog cannot be null for streaming connection"); + } if (database == null) { throw new StreamingException("Database cannot be null for streaming connection"); } @@ -908,6 +925,10 @@ public String getUsername() { return username; } + public String getCatalog() { + return catalog; + } + public String getDatabase() { return database; } diff --git a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java index 78c75db4ad3f..9ee03e7d4096 100644 --- a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java +++ b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java @@ -282,7 +282,7 @@ private void commitImpl(Set partitions, String key, String value) } if (!partNames.isEmpty()) { conn.getMSC().addDynamicPartitions(txnToWriteId.getTxnId(), - txnToWriteId.getWriteId(), conn.getDatabase(), + txnToWriteId.getWriteId(), conn.getCatalog(), conn.getDatabase(), conn.getTable().getTableName(), partNames, DataOperationType.INSERT); }