diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SevenToEightUpgradeHandler.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SevenToEightUpgradeHandler.java index 3affa7b64127..2cf95d033f70 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SevenToEightUpgradeHandler.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SevenToEightUpgradeHandler.java @@ -109,6 +109,7 @@ public Map upgrade(HoodieWriteConfig config, HoodieEngin rollbackFailedWritesAndCompact(table, context, config, upgradeDowngradeHelper, HoodieTableType.MERGE_ON_READ.equals(table.getMetaClient().getTableType()), HoodieTableVersion.SIX); try { HoodieTableMetaClient.createTableLayoutOnStorage(context.getStorageConf(), new StoragePath(config.getBasePath()), config.getProps(), TimelineLayoutVersion.VERSION_2, false); + LOG.info(">>> Created table layout on storage for timeline layout version {}", TimelineLayoutVersion.VERSION_2); } catch (IOException e) { LOG.error("Failed to create table layout on storage for timeline layout version {}", TimelineLayoutVersion.VERSION_2, e); throw new HoodieIOException("Failed to create table layout on storage", e); @@ -121,6 +122,7 @@ public Map upgrade(HoodieWriteConfig config, HoodieEngin setInitialVersion(tableConfig, tablePropsToAdd); upgradeKeyGeneratorType(tableConfig, tablePropsToAdd); upgradeBootstrapIndexType(tableConfig, tablePropsToAdd); + LOG.info(">>> Upgraded table properties: {}", tablePropsToAdd); // Handle timeline upgrade: // - Rewrite instants in active timeline to new format @@ -140,13 +142,17 @@ public Map upgrade(HoodieWriteConfig config, HoodieEngin CommitMetadataSerDeV2 commitMetadataSerDeV2 = new CommitMetadataSerDeV2(); CommitMetadataSerDeV1 commitMetadataSerDeV1 = new CommitMetadataSerDeV1(); ActiveTimelineV2 activeTimelineV2 = new ActiveTimelineV2(metaClient); + LOG.info(">>> Upgrading {} instants in active timeline", instants.size()); context.map(instants, instant -> { String originalFileName = instantFileNameGenerator.getFileName(instant); return upgradeActiveTimelineInstant(instant, originalFileName, metaClient, commitMetadataSerDeV1, commitMetadataSerDeV2, activeTimelineV2); }, instants.size()); + LOG.info(">>> Upgraded {} instants in active timeline", instants.size()); } + LOG.info(">>> Upgrading to LSM timeline"); upgradeToLSMTimeline(table, context, config); + LOG.info(">>> Upgraded to LSM timeline"); return tablePropsToAdd; } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/UpgradeDowngradeUtils.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/UpgradeDowngradeUtils.java index e5618d69a359..df3d42d30658 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/UpgradeDowngradeUtils.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/UpgradeDowngradeUtils.java @@ -197,6 +197,7 @@ static void rollbackFailedWritesAndCompact(HoodieTable table, HoodieEngineContex } } } + LOG.info(">>> Rollback and compaction completed successfully"); } catch (Exception e) { throw new HoodieException(e); } diff --git a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSevenToEightUpgrade.scala b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSevenToEightUpgrade.scala index 4fd75187852e..12a35d0c517a 100644 --- a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSevenToEightUpgrade.scala +++ b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSevenToEightUpgrade.scala @@ -59,6 +59,7 @@ class TestSevenToEightUpgrade extends RecordLevelIndexTestBase { hudiOptsWithoutLockConfigs } + println(">>> write data") doWriteAndValidateDataAndRecordIndex(hudiOpts, operation = DataSourceWriteOptions.INSERT_OPERATION_OPT_VAL, saveMode = SaveMode.Overwrite, @@ -72,6 +73,7 @@ class TestSevenToEightUpgrade extends RecordLevelIndexTestBase { // downgrade table props to version seven // assert table version is seven and the partition fields in table config does not have partition type + println(">>> downgrade the table") new UpgradeDowngrade(metaClient, getWriteConfig(hudiOpts), context, SparkUpgradeDowngradeHelper.getInstance) .run(HoodieTableVersion.SEVEN, null) metaClient = HoodieTableMetaClient.reload(metaClient) @@ -82,10 +84,12 @@ class TestSevenToEightUpgrade extends RecordLevelIndexTestBase { // auto upgrade the table // assert table version is eight and the partition fields in table config has partition type + println(">>> auto upgrade the table and write more data") doWriteAndValidateDataAndRecordIndex(hudiOpts, operation = DataSourceWriteOptions.UPSERT_OPERATION_OPT_VAL, saveMode = SaveMode.Append, validate = false) + println(">>> auto upgrade and commit done") metaClient = HoodieTableMetaClient.reload(metaClient) assertEquals(HoodieTableVersion.EIGHT, metaClient.getTableConfig.getTableVersion)