From 9edf55a8178407367ca79e1a533e7f6b9a56e582 Mon Sep 17 00:00:00 2001 From: Nisarg Thakkar Date: Sat, 27 Jul 2024 11:00:11 -0700 Subject: [PATCH] Harden update-store workflow --- .../venice/hadoop/VenicePushJobTest.java | 1 - .../java/com/linkedin/venice/ConfigKeys.java | 10 +- .../helix/StoragePersonaRepository.java | 6 +- .../linkedin/venice/meta/BackupStrategy.java | 6 +- .../venice/meta/HybridStoreConfig.java | 5 + .../venice/meta/PartitionerConfigImpl.java | 5 +- .../com/linkedin/venice/meta/ZKStore.java | 10 +- .../linkedin/venice/utils/PartitionUtils.java | 4 +- .../venice/meta/BackupStrategyTest.java | 23 + .../venice/meta/TestHybridStoreConfig.java | 22 + ...LevelConfigForActiveActiveReplication.java | 36 + ...lusterLevelConfigForNativeReplication.java | 2 + ...VeniceHelixAdminWithSharedEnvironment.java | 88 +- .../VeniceParentHelixAdminTest.java | 83 +- .../venice/endToEnd/PartialUpdateTest.java | 3 + .../venice/endToEnd/PushJobDetailsTest.java | 17 +- ...TestActiveActiveReplicationForIncPush.java | 29 +- .../linkedin/venice/endToEnd/TestBatch.java | 73 +- ...JobWithEmergencySourceRegionSelection.java | 5 +- .../TestPushJobWithNativeReplication.java | 4 +- .../venice/endToEnd/TestStoreMigration.java | 28 +- .../TestStoreUpdateStoragePersona.java | 15 +- .../endToEnd/TestWritePathComputation.java | 44 +- .../venice/hadoop/TestVenicePushJob.java | 40 +- .../utils/VeniceControllerCreateOptions.java | 16 + .../utils/VeniceControllerWrapper.java | 10 +- .../VeniceMultiClusterCreateOptions.java | 16 + ...VeniceMultiRegionClusterCreateOptions.java | 16 + ...woLayerMultiRegionMultiClusterWrapper.java | 4 +- .../utils/IntegrationTestPushUtils.java | 30 +- .../com/linkedin/venice/controller/Admin.java | 37 +- .../venice/controller/StoreViewUtils.java | 29 +- .../VeniceControllerClusterConfig.java | 13 +- .../controller/VeniceControllerService.java | 1 + .../venice/controller/VeniceHelixAdmin.java | 1050 ++------------ .../controller/VeniceParentHelixAdmin.java | 1289 ++++++----------- .../SystemSchemaInitializationRoutine.java | 42 +- .../init/SystemStoreInitializationHelper.java | 23 +- .../kafka/consumer/AdminConsumptionTask.java | 2 + .../kafka/consumer/AdminExecutionTask.java | 2 - ...SupersetSchemaGeneratorWithCustomProp.java | 29 +- .../venice/controller/util/AdminUtils.java | 73 + .../ParentControllerConfigUpdateUtils.java | 171 --- .../PrimaryControllerConfigUpdateUtils.java | 170 +++ .../controller/util/UpdateStoreUtils.java | 1173 +++++++++++++++ .../controller/util/UpdateStoreWrapper.java | 18 + .../AbstractTestVeniceParentHelixAdmin.java | 11 +- .../TestVeniceHelixAdminWithoutCluster.java | 56 - .../TestVeniceParentHelixAdmin.java | 304 ++-- .../SystemStoreInitializationHelperTest.java | 3 + ...SupersetSchemaGeneratorWithCustomProp.java | 37 + .../controller/util/AdminUtilsTest.java | 134 ++ ...rimaryControllerConfigUpdateUtilsTest.java | 154 ++ .../controller/util/UpdateStoreUtilsTest.java | 1079 ++++++++++++++ ...ParentControllerConfigUpdateUtilsTest.java | 308 ---- .../resources/superset_schema_test/v5.avsc | 12 + .../resources/superset_schema_test/v6.avsc | 9 + 57 files changed, 4100 insertions(+), 2780 deletions(-) create mode 100644 internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java delete mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java create mode 100644 services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java create mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java create mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java create mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java delete mode 100644 services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java create mode 100644 services/venice-controller/src/test/resources/superset_schema_test/v5.avsc create mode 100644 services/venice-controller/src/test/resources/superset_schema_test/v6.avsc diff --git a/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java b/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java index 2a0295fc17b..6ed5d107b37 100644 --- a/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java +++ b/clients/venice-push-job/src/test/java/com/linkedin/venice/hadoop/VenicePushJobTest.java @@ -439,7 +439,6 @@ private StoreInfo getStoreInfo(Consumer info, boolean applyFirst) { storeInfo.setChunkingEnabled(false); storeInfo.setCompressionStrategy(CompressionStrategy.NO_OP); storeInfo.setWriteComputationEnabled(false); - storeInfo.setIncrementalPushEnabled(false); storeInfo.setNativeReplicationSourceFabric("dc-0"); Map coloMaps = new HashMap() { { diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java b/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java index caff0d23161..1af59fb8a15 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/ConfigKeys.java @@ -314,8 +314,15 @@ private ConfigKeys() { "controller.store.graveyard.cleanup.sleep.interval.between.list.fetch.minutes"; /** - * Whether the superset schema generation in Parent Controller should be done via passed callback or not. + * Whether the superset schema generation in Primary Controller should be done via passed callback or not. */ + public static final String CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED = + "controller.external.superset.schema.generation.enabled"; + + /** + * Whether the superset schema generation in Primary Controller should be done via passed callback or not. + */ + @Deprecated public static final String CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED = "controller.parent.external.superset.schema.generation.enabled"; @@ -1139,6 +1146,7 @@ private ConfigKeys() { */ public static final String ENABLE_INCREMENTAL_PUSH_FOR_HYBRID_ACTIVE_ACTIVE_USER_STORES = "enable.incremental.push.for.hybrid.active.active.user.stores"; + /** * We will use this config to determine whether we should enable partial update for hybrid active-active user stores. * If this config is set to true, we will enable partial update for hybrid active-active user stores whose latest value diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java b/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java index cde1c6a5ed2..42f235bd9e0 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/helix/StoragePersonaRepository.java @@ -179,15 +179,15 @@ private void deleteStores(List storeNames) { public StoragePersona getPersonaContainingStore(String storeName) { String personaName = storeNamePersonaMap.get(storeName); - if (personaName == null) + if (personaName == null) { return null; + } return getPersona(personaName); } private boolean isStoreSetValid(StoragePersona persona, Optional additionalStore) { Set setToValidate = new HashSet<>(); - if (additionalStore.isPresent()) - setToValidate.add(additionalStore.get().getName()); + additionalStore.ifPresent(store -> setToValidate.add(store.getName())); setToValidate.addAll(persona.getStoresToEnforce()); return setToValidate.stream() .allMatch( diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java index c1ea17b8be6..d6422747d6c 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/BackupStrategy.java @@ -24,7 +24,7 @@ public enum BackupStrategy { // KEEP_IN_KAFKA_ONLY, /** Keep in user-specified store eg HDD, other DB */ // KEEP_IN_USER_STORE; - private int value; + private final int value; BackupStrategy(int v) { this.value = v; @@ -35,6 +35,10 @@ public enum BackupStrategy { Arrays.stream(values()).forEach(s -> idMapping.put(s.value, s)); } + public int getValue() { + return value; + } + public static BackupStrategy fromInt(int i) { BackupStrategy strategy = idMapping.get(i); if (strategy == null) { diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java index 061098317f5..3952f8f4a26 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/HybridStoreConfig.java @@ -27,4 +27,9 @@ public interface HybridStoreConfig extends DataModelBackedStructure= 0 && (this.getOffsetLagThresholdToGoOnline() >= 0 + || this.getProducerTimestampLagThresholdToGoOnlineInSeconds() >= 0); + } } diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java index ccd727d7f25..00692908aaa 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/PartitionerConfigImpl.java @@ -92,6 +92,9 @@ public int hashCode() { @JsonIgnore public PartitionerConfig clone() { - return new PartitionerConfigImpl(getPartitionerClass(), getPartitionerParams(), getAmplificationFactor()); + return new PartitionerConfigImpl( + getPartitionerClass(), + new HashMap<>(getPartitionerParams()), + getAmplificationFactor()); } } diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java b/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java index aedae77ef64..f120e506ff6 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/meta/ZKStore.java @@ -208,7 +208,7 @@ public ZKStore(Store store) { setSchemaAutoRegisterFromPushJobEnabled(store.isSchemaAutoRegisterFromPushJobEnabled()); setLatestSuperSetValueSchemaId(store.getLatestSuperSetValueSchemaId()); setHybridStoreDiskQuotaEnabled(store.isHybridStoreDiskQuotaEnabled()); - setEtlStoreConfig(store.getEtlStoreConfig()); + setEtlStoreConfig(store.getEtlStoreConfig().clone()); setStoreMetadataSystemStoreEnabled(store.isStoreMetadataSystemStoreEnabled()); setLatestVersionPromoteToCurrentTimestamp(store.getLatestVersionPromoteToCurrentTimestamp()); setBackupVersionRetentionMs(store.getBackupVersionRetentionMs()); @@ -219,7 +219,7 @@ public ZKStore(Store store) { setStoreMetaSystemStoreEnabled(store.isStoreMetaSystemStoreEnabled()); setActiveActiveReplicationEnabled(store.isActiveActiveReplicationEnabled()); setRmdVersion(store.getRmdVersion()); - setViewConfigs(store.getViewConfigs()); + setViewConfigs(new HashMap<>(store.getViewConfigs())); setStorageNodeReadQuotaEnabled(store.isStorageNodeReadQuotaEnabled()); setUnusedSchemaDeletionEnabled(store.isUnusedSchemaDeletionEnabled()); setMinCompactionLagSeconds(store.getMinCompactionLagSeconds()); @@ -361,11 +361,7 @@ public void setLargestUsedVersionNumber(int largestUsedVersionNumber) { @SuppressWarnings("unused") // Used by Serializer/De-serializer for storing to Zoo Keeper @Override public long getStorageQuotaInByte() { - // This is a safeguard in case that some old stores do not have storage quota field - return (this.storeProperties.storageQuotaInByte <= 0 - && this.storeProperties.storageQuotaInByte != UNLIMITED_STORAGE_QUOTA) - ? DEFAULT_STORAGE_QUOTA - : this.storeProperties.storageQuotaInByte; + return this.storeProperties.storageQuotaInByte; } @SuppressWarnings("unused") // Used by Serializer/De-serializer for storing to Zoo Keeper diff --git a/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java b/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java index 4ecf0f4456c..c3e84fde88e 100644 --- a/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java +++ b/internal/venice-common/src/main/java/com/linkedin/venice/utils/PartitionUtils.java @@ -68,7 +68,9 @@ public static int calculatePartitionCount( partitionCount, storageQuota, storeName); - return (int) partitionCount; + + // At least 1 partition + return partitionCount <= 0 ? 1 : (int) partitionCount; } public static VenicePartitioner getVenicePartitioner(PartitionerConfig config) { diff --git a/internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java b/internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java new file mode 100644 index 00000000000..29fa48cf72c --- /dev/null +++ b/internal/venice-common/src/test/java/com/linkedin/venice/meta/BackupStrategyTest.java @@ -0,0 +1,23 @@ +package com.linkedin.venice.meta; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertThrows; + +import com.linkedin.venice.exceptions.VeniceException; +import org.testng.annotations.Test; + + +public class BackupStrategyTest { + @Test + public void testFromInt() { + assertEquals(BackupStrategy.fromInt(0), BackupStrategy.KEEP_MIN_VERSIONS); + assertEquals(BackupStrategy.fromInt(1), BackupStrategy.DELETE_ON_NEW_PUSH_START); + assertThrows(VeniceException.class, () -> BackupStrategy.fromInt(2)); + } + + @Test + public void testGetValue() { + assertEquals(BackupStrategy.KEEP_MIN_VERSIONS.getValue(), 0); + assertEquals(BackupStrategy.DELETE_ON_NEW_PUSH_START.getValue(), 1); + } +} diff --git a/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java b/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java index c4dd2368d6b..2f751047981 100644 --- a/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java +++ b/internal/venice-common/src/test/java/com/linkedin/venice/meta/TestHybridStoreConfig.java @@ -22,4 +22,26 @@ public void deserializes() throws IOException { Assert.assertEquals(fasterXml.getRewindTimeInSeconds(), 123L); Assert.assertEquals(fasterXml.getDataReplicationPolicy(), DataReplicationPolicy.NON_AGGREGATE); } + + @Test + public void testIsHybrid() { + HybridStoreConfig hybridStoreConfig; + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, -1, null, null); + Assert.assertFalse(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, -1, null, null); + Assert.assertFalse(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, -1, null, null); + Assert.assertTrue(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, 100, null, null); + Assert.assertTrue(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, 100, null, null); + Assert.assertTrue(hybridStoreConfig.isHybrid()); + + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, 100, null, null); + Assert.assertFalse(hybridStoreConfig.isHybrid()); + } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java index ab23710c5a6..df2c1f04d9d 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForActiveActiveReplication.java @@ -85,4 +85,40 @@ public void testClusterLevelActiveActiveReplicationConfigForNewHybridStores() th assertFalse(parentControllerClient.getStore(storeName).getStore().isActiveActiveReplicationEnabled()); }); } + + @Test(timeOut = TEST_TIMEOUT) + public void testClusterLevelActiveActiveReplicationConfigForNewIncrementalPushStores() throws IOException { + String storeName = Utils.getUniqueString("test-store-incremental"); + String pushJobId1 = "test-push-job-id-1"; + parentControllerClient.createNewStore(storeName, "test-owner", "\"string\"", "\"string\""); + parentControllerClient.emptyPush(storeName, pushJobId1, 1); + + // Version 1 should exist. + StoreInfo store = assertCommand(parentControllerClient.getStore(storeName)).getStore(); + assertEquals(store.getVersions().size(), 1); + + // Check store level Active/Active is enabled or not + assertFalse(store.isActiveActiveReplicationEnabled()); + assertFalse(store.isIncrementalPushEnabled()); + assertFalse(store.isActiveActiveReplicationEnabled()); + + // Convert to incremental push store + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + StoreInfo storeToTest = parentControllerClient.getStore(storeName).getStore(); + assertTrue(storeToTest.isIncrementalPushEnabled()); + assertTrue(storeToTest.isActiveActiveReplicationEnabled()); + }); + + // After inc push is disabled, even default A/A config for pure hybrid store is false, + // original store A/A config is enabled. + assertCommand( + parentControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(false))); + TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { + StoreInfo storeToTest = parentControllerClient.getStore(storeName).getStore(); + assertFalse(storeToTest.isIncrementalPushEnabled()); + assertTrue(storeToTest.isActiveActiveReplicationEnabled()); + }); + } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java index 3cc1fc34ecc..fedf2d009dd 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestClusterLevelConfigForNativeReplication.java @@ -11,6 +11,7 @@ import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.DataReplicationPolicy; import com.linkedin.venice.meta.StoreInfo; import com.linkedin.venice.utils.TestUtils; import com.linkedin.venice.utils.Time; @@ -89,6 +90,7 @@ public void testClusterLevelNativeReplicationConfigForNewStores() { parentControllerClient.updateStore( storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE) .setHybridRewindSeconds(1L) .setHybridOffsetLagThreshold(10))); TestUtils.waitForNonDeterministicAssertion(TEST_TIMEOUT, TimeUnit.MILLISECONDS, () -> { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java index b635940e8e5..711f7b06b23 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithSharedEnvironment.java @@ -479,27 +479,6 @@ public void testUpdateStoreMetadata() throws Exception { PartitionerConfig partitionerConfig = new PartitionerConfigImpl(); veniceAdmin.setStorePartitionerConfig(clusterName, storeName, partitionerConfig); - veniceAdmin.setIncrementalPushEnabled(clusterName, storeName, true); - Assert.assertTrue(veniceAdmin.getStore(clusterName, storeName).isIncrementalPushEnabled()); - - veniceAdmin.setBootstrapToOnlineTimeoutInHours(clusterName, storeName, 48); - Assert.assertEquals(veniceAdmin.getStore(clusterName, storeName).getBootstrapToOnlineTimeoutInHours(), 48); - - veniceAdmin.setHybridStoreDiskQuotaEnabled(clusterName, storeName, true); - Assert.assertTrue(veniceAdmin.getStore(clusterName, storeName).isHybridStoreDiskQuotaEnabled()); - - // test setting per-store RMD (replication metadata) version ID - int rmdVersion = veniceAdmin.getStore(clusterName, storeName).getRmdVersion(); - Assert.assertEquals(rmdVersion, -1); - - veniceAdmin.setReplicationMetadataVersionID(clusterName, storeName, 2); - rmdVersion = veniceAdmin.getStore(clusterName, storeName).getRmdVersion(); - Assert.assertNotEquals(rmdVersion, -1); - Assert.assertEquals(rmdVersion, 2); - - // test hybrid config - // set incrementalPushEnabled to be false as hybrid and incremental are mutex - veniceAdmin.setIncrementalPushEnabled(clusterName, storeName, false); Assert.assertFalse(veniceAdmin.getStore(clusterName, storeName).isHybrid()); veniceAdmin.updateStore( clusterName, @@ -644,26 +623,18 @@ public void testGetRealTimeTopic() { Assert.assertThrows(VeniceNoStoreException.class, () -> veniceAdmin.getRealTimeTopic(clusterName, storeName)); veniceAdmin.createStore(clusterName, storeName, "owner", KEY_SCHEMA, VALUE_SCHEMA); + + // Must not be able to get a real time topic if the store is not hybrid + Assert.assertThrows(VeniceException.class, () -> veniceAdmin.getRealTimeTopic(clusterName, storeName)); + veniceAdmin.updateStore( clusterName, storeName, new UpdateStoreQueryParams().setHybridRewindSeconds(25L).setHybridOffsetLagThreshold(100L)); // make store // hybrid - try { - veniceAdmin.getRealTimeTopic(clusterName, storeName); - Assert.fail("Must not be able to get a real time topic until the store is initialized with a version"); - } catch (VeniceException e) { - Assert.assertTrue( - e.getMessage().contains("is not initialized with a version"), - "Got unexpected error message: " + e.getMessage()); - } - - int partitions = 2; // TODO verify partition count for RT topic. - veniceAdmin.incrementVersionIdempotent(clusterName, storeName, Version.guidBasedDummyPushId(), partitions, 1); - - String rtTopic = veniceAdmin.getRealTimeTopic(clusterName, storeName); - Assert.assertEquals(rtTopic, Version.composeRealTimeTopic(storeName)); + String expectedRtTopic = Version.composeRealTimeTopic(storeName); + Assert.assertEquals(veniceAdmin.getRealTimeTopic(clusterName, storeName), expectedRtTopic); } @Test(timeOut = TOTAL_TIMEOUT_FOR_LONG_TEST_MS) @@ -1467,33 +1438,27 @@ public void leakyTopicTruncation() { } } - @Test(timeOut = TOTAL_TIMEOUT_FOR_LONG_TEST_MS) - public void testSetLargestUsedVersion() { - String storeName = "testSetLargestUsedVersion"; - veniceAdmin.createStore(clusterName, storeName, storeOwner, KEY_SCHEMA, VALUE_SCHEMA); - Store store = veniceAdmin.getStore(clusterName, storeName); - Assert.assertEquals(store.getLargestUsedVersionNumber(), 0); - - Version version = - veniceAdmin.incrementVersionIdempotent(clusterName, storeName, Version.guidBasedDummyPushId(), 1, 1); - store = veniceAdmin.getStore(clusterName, storeName); - Assert.assertTrue(version.getNumber() > 0); - Assert.assertEquals(store.getLargestUsedVersionNumber(), version.getNumber()); - - veniceAdmin.setStoreLargestUsedVersion(clusterName, storeName, 0); - store = veniceAdmin.getStore(clusterName, storeName); - Assert.assertEquals(store.getLargestUsedVersionNumber(), 0); - } - @Test(timeOut = TOTAL_TIMEOUT_FOR_LONG_TEST_MS) public void testWriteComputationEnabled() { String storeName = Utils.getUniqueString("test_store"); - veniceAdmin.createStore(clusterName, storeName, storeOwner, "\"string\"", "\"string\""); + String VALUE_FIELD_NAME = "int_field"; + String SECOND_VALUE_FIELD_NAME = "opt_int_field"; + String VALUE_SCHEMA_V2_STR = "{\n" + "\"type\": \"record\",\n" + "\"name\": \"TestValueSchema\",\n" + + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + "\"fields\": [\n" + " {\"name\": \"" + + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10},\n" + "{\"name\": \"" + SECOND_VALUE_FIELD_NAME + + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + + veniceAdmin.createStore(clusterName, storeName, storeOwner, "\"string\"", VALUE_SCHEMA_V2_STR); Store store = veniceAdmin.getStore(clusterName, storeName); Assert.assertFalse(store.isWriteComputationEnabled()); - veniceAdmin.updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + veniceAdmin.updateStore( + clusterName, + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true)); store = veniceAdmin.getStore(clusterName, storeName); Assert.assertTrue(store.isWriteComputationEnabled()); } @@ -1744,10 +1709,12 @@ public void testVersionLevelActiveActiveReplicationConfig() { String pushJobId1 = "test-push-job-id-1"; veniceAdmin.createStore(clusterName, storeName, "test-owner", KEY_SCHEMA, VALUE_SCHEMA); /** - * Enable L/F and Active/Active replication + * Enable L/F, NR and Active/Active replication */ - veniceAdmin - .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setActiveActiveReplicationEnabled(true)); + veniceAdmin.updateStore( + clusterName, + storeName, + new UpdateStoreQueryParams().setNativeReplicationEnabled(true).setActiveActiveReplicationEnabled(true)); /** * Add version 1 @@ -1887,7 +1854,10 @@ public void testUpdateStoreWithVersionInheritedConfigs() { veniceAdmin.updateStore( clusterName, storeName, - new UpdateStoreQueryParams().setHybridOffsetLagThreshold(1) + new UpdateStoreQueryParams().setNativeReplicationEnabled(true) + .setActiveActiveReplicationEnabled(true) + .setChunkingEnabled(true) + .setHybridOffsetLagThreshold(1) .setHybridRewindSeconds(1) .setStoreViews(viewConfig)); veniceAdmin.incrementVersionIdempotent(clusterName, storeName, Version.guidBasedDummyPushId(), 1, 1); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java index 352d4e639df..c37e08c3e82 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/controller/VeniceParentHelixAdminTest.java @@ -2,7 +2,7 @@ import static com.linkedin.venice.ConfigKeys.CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE; import static com.linkedin.venice.ConfigKeys.CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE; -import static com.linkedin.venice.ConfigKeys.CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED; +import static com.linkedin.venice.ConfigKeys.CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED; import static com.linkedin.venice.ConfigKeys.TERMINAL_STATE_TOPIC_CHECK_DELAY_MS; import static com.linkedin.venice.ConfigKeys.TOPIC_CLEANUP_SLEEP_INTERVAL_BETWEEN_TOPIC_LIST_FETCH_MS; import static com.linkedin.venice.controller.SchemaConstants.BAD_VALUE_SCHEMA_FOR_WRITE_COMPUTE_V2; @@ -33,7 +33,6 @@ import com.linkedin.venice.controllerapi.VersionCreationResponse; import com.linkedin.venice.integration.utils.ServiceFactory; import com.linkedin.venice.integration.utils.VeniceClusterWrapper; -import com.linkedin.venice.integration.utils.VeniceControllerWrapper; import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; import com.linkedin.venice.meta.ETLStoreConfig; @@ -346,7 +345,7 @@ public void testHybridAndETLStoreConfig() { Assert.assertFalse(etlStoreConfig.isFutureVersionETLEnabled()); Assert.assertTrue( controllerResponse.getError() - .contains("Cannot enable ETL for this store " + "because etled user proxy account is not set")); + .contains("Cannot enable ETL for this store because etled user proxy account is not set")); // test enabling ETL with empty proxy account, expected failure params = new UpdateStoreQueryParams(); @@ -358,7 +357,7 @@ public void testHybridAndETLStoreConfig() { Assert.assertFalse(etlStoreConfig.isFutureVersionETLEnabled()); Assert.assertTrue( controllerResponse.getError() - .contains("Cannot enable ETL for this store " + "because etled user proxy account is not set")); + .contains("Cannot enable ETL for this store because etled user proxy account is not set")); // test enabling ETL with etl proxy account, expected success params = new UpdateStoreQueryParams(); @@ -405,22 +404,20 @@ public void testSupersetSchemaWithCustomSupersetSchemaGenerator() throws IOExcep // This cluster setup don't have server, we cannot perform push here. properties.setProperty(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE, String.valueOf(false)); properties.setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, String.valueOf(false)); - properties.setProperty(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); - properties - .put(VeniceControllerWrapper.SUPERSET_SCHEMA_GENERATOR, new SupersetSchemaGeneratorWithCustomProp(CUSTOM_PROP)); + properties.setProperty(CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); try (VeniceTwoLayerMultiRegionMultiClusterWrapper twoLayerMultiRegionMultiClusterWrapper = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( - 1, - 1, - 1, - 1, - 0, - 0, - 1, - Optional.of(properties), - Optional.empty(), - Optional.empty())) { + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(0) + .numberOfRouters(0) + .replicationFactor(1) + .parentControllerProperties(properties) + .supersetSchemaGenerator(new SupersetSchemaGeneratorWithCustomProp(CUSTOM_PROP)) + .build())) { String parentControllerUrl = twoLayerMultiRegionMultiClusterWrapper.getControllerConnectString(); try (ControllerClient parentControllerClient = new ControllerClient(twoLayerMultiRegionMultiClusterWrapper.getClusterNames()[0], parentControllerUrl)) { @@ -440,8 +437,11 @@ public void testSupersetSchemaWithCustomSupersetSchemaGenerator() throws IOExcep Assert.assertNotNull(newStoreResponse); Assert.assertFalse(newStoreResponse.isError(), "error in newStoreResponse: " + newStoreResponse.getError()); // Enable write compute - ControllerResponse updateStoreResponse = parentControllerClient - .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + ControllerResponse updateStoreResponse = parentControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(86400) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true)); Assert.assertFalse(updateStoreResponse.isError()); MultiSchemaResponse schemaResponse = parentControllerClient.getAllValueSchema(storeName); @@ -562,25 +562,26 @@ public void testStoreMetaDataUpdateFromParentToChildController( parentControllerProps .setProperty(CONTROLLER_AUTO_MATERIALIZE_DAVINCI_PUSH_STATUS_SYSTEM_STORE, String.valueOf(false)); if (isSupersetSchemaGeneratorEnabled) { - parentControllerProps - .setProperty(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); - parentControllerProps.put( - VeniceControllerWrapper.SUPERSET_SCHEMA_GENERATOR, - new SupersetSchemaGeneratorWithCustomProp("test_prop")); + parentControllerProps.setProperty(CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, String.valueOf(true)); + } + + VeniceMultiRegionClusterCreateOptions.Builder options = + new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) + .numberOfClusters(1) + .numberOfParentControllers(1) + .numberOfChildControllers(1) + .numberOfServers(0) + .numberOfRouters(0) + .replicationFactor(1) + .parentControllerProperties(parentControllerProps) + .sslToKafka(isControllerSslEnabled); + + if (isSupersetSchemaGeneratorEnabled) { + options = options.supersetSchemaGenerator(new SupersetSchemaGeneratorWithCustomProp("test_prop")); } try (VeniceTwoLayerMultiRegionMultiClusterWrapper venice = - ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( - new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) - .numberOfClusters(1) - .numberOfParentControllers(1) - .numberOfChildControllers(1) - .numberOfServers(0) - .numberOfRouters(0) - .replicationFactor(1) - .parentControllerProperties(parentControllerProps) - .sslToKafka(isControllerSslEnabled) - .build())) { + ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper(options.build())) { String childControllerUrl = venice.getChildRegions().get(0).getControllerConnectString(); String parentControllerUrl = venice.getControllerConnectString(); Optional sslFactory = @@ -1013,7 +1014,9 @@ private void testWriteComputeSchemaAutoGenerationFailure(ControllerClient parent private void validateEnablingWriteComputeFailed(String storeName, ControllerClient parentControllerClient) { UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams(); - updateStoreQueryParams.setWriteComputationEnabled(true); + updateStoreQueryParams.setHybridRewindSeconds(86400) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true); ControllerResponse response = parentControllerClient.updateStore(storeName, updateStoreQueryParams); Assert.assertTrue( response.isError(), @@ -1040,7 +1043,9 @@ private void testWriteComputeSchemaAutoGeneration(ControllerClient parentControl // Step 2. Update this store to enable write compute. UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams(); - updateStoreQueryParams.setWriteComputationEnabled(true); + updateStoreQueryParams.setHybridOffsetLagThreshold(1000) + .setHybridRewindSeconds(86400) + .setWriteComputationEnabled(true); parentControllerClient.updateStore(storeName, updateStoreQueryParams); // Step 3. Get value schema and write compute schema generated by the controller. @@ -1103,7 +1108,9 @@ private void testWriteComputeSchemaEnable(ControllerClient parentControllerClien // Step 2. Update this store to enable write compute. UpdateStoreQueryParams updateStoreQueryParams = new UpdateStoreQueryParams(); - updateStoreQueryParams.setWriteComputationEnabled(true); + updateStoreQueryParams.setHybridOffsetLagThreshold(1000) + .setHybridRewindSeconds(86400) + .setWriteComputationEnabled(true); parentControllerClient.updateStore(storeName, updateStoreQueryParams); // Could not enable write compute bad schema did not have defaults diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java index 2d0f9f2b0d6..eba6bec2096 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PartialUpdateTest.java @@ -69,6 +69,7 @@ import com.linkedin.venice.integration.utils.VeniceMultiClusterWrapper; import com.linkedin.venice.integration.utils.VeniceServerWrapper; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.DataReplicationPolicy; import com.linkedin.venice.meta.ReadOnlySchemaRepository; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.Version; @@ -310,6 +311,7 @@ public void testIncrementalPushPartialUpdateClassicFormat() throws IOException { .setWriteComputationEnabled(true) .setChunkingEnabled(true) .setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE) .setHybridRewindSeconds(10L) .setHybridOffsetLagThreshold(2L); ControllerResponse updateStoreResponse = @@ -377,6 +379,7 @@ public void testIncrementalPushPartialUpdateNewFormat(boolean useSparkCompute) t .setWriteComputationEnabled(true) .setChunkingEnabled(true) .setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE) .setHybridRewindSeconds(10L) .setHybridOffsetLagThreshold(2L); ControllerResponse updateStoreResponse = diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java index 631403a9628..0eb7374d6be 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/PushJobDetailsTest.java @@ -32,6 +32,7 @@ import com.linkedin.venice.integration.utils.VeniceClusterWrapper; import com.linkedin.venice.integration.utils.VeniceMultiClusterWrapper; import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.Version; import com.linkedin.venice.status.PushJobDetailsStatus; import com.linkedin.venice.status.protocol.PushJobDetails; @@ -126,8 +127,9 @@ public void testPushJobDetails() throws ExecutionException, InterruptedException recordSchema.getField(DEFAULT_VALUE_FIELD_PROP).schema().toString()); // Set store quota to unlimited else local VPJ jobs will fail due to quota enforcement NullPointerException because // hadoop job client cannot fetch counters properly. - parentControllerClient - .updateStore(testStoreName, new UpdateStoreQueryParams().setStorageQuotaInByte(-1).setPartitionCount(2)); + parentControllerClient.updateStore( + testStoreName, + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA).setPartitionCount(2)); Properties pushJobProps = defaultVPJProps(multiRegionMultiClusterWrapper, inputDirPath, testStoreName); pushJobProps.setProperty(PUSH_JOB_STATUS_UPLOAD_ENABLE, String.valueOf(true)); try (VenicePushJob testPushJob = new VenicePushJob("test-push-job-details-job", pushJobProps)) { @@ -221,7 +223,7 @@ public void testPushJobDetailsFailureTags() throws ExecutionException, Interrupt recordSchema.getField(DEFAULT_KEY_FIELD_PROP).schema().toString(), recordSchema.getField(DEFAULT_VALUE_FIELD_PROP).schema().toString()); // hadoop job client cannot fetch counters properly and should fail the job - parentControllerClient.updateStore(testStoreName, new UpdateStoreQueryParams().setStorageQuotaInByte(0)); + parentControllerClient.updateStore(testStoreName, new UpdateStoreQueryParams().setStorageQuotaInByte(1)); Properties pushJobProps = defaultVPJProps(multiRegionMultiClusterWrapper, inputDirPath, testStoreName); pushJobProps.setProperty(PUSH_JOB_STATUS_UPLOAD_ENABLE, String.valueOf(true)); try (VenicePushJob testPushJob = new VenicePushJob("test-push-job-details-job", pushJobProps)) { @@ -265,10 +267,11 @@ public void testPushJobDetailsRecordTooLarge() throws ExecutionException, Interr recordSchema.getField(DEFAULT_KEY_FIELD_PROP).schema().toString(), recordSchema.getField(DEFAULT_VALUE_FIELD_PROP).schema().toString()); // Set store quota to unlimited else local VPJ jobs will fail due to quota enforcement NullPointerException - final UpdateStoreQueryParams queryParams = new UpdateStoreQueryParams().setStorageQuotaInByte(-1) - .setPartitionCount(2) - .setChunkingEnabled(true) - .setMaxRecordSizeBytes(0); + final UpdateStoreQueryParams queryParams = + new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) + .setPartitionCount(2) + .setChunkingEnabled(true) + .setMaxRecordSizeBytes(0); parentControllerClient.updateStore(testStoreName, queryParams); Properties pushJobProps = defaultVPJProps(multiRegionMultiClusterWrapper, inputDirPath, testStoreName); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestActiveActiveReplicationForIncPush.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestActiveActiveReplicationForIncPush.java index 75309baedbb..5945a4832d5 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestActiveActiveReplicationForIncPush.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestActiveActiveReplicationForIncPush.java @@ -207,9 +207,12 @@ public void testAAReplicationForIncrementalPushToRT(boolean overrideDataReplicat dc1ControllerClient, dc2ControllerClient); + // Incremental push is not allowed for NON_AGGREGATE DataReplicationPolicy + boolean incrementalPushAllowed = !overrideDataReplicationPolicy; + verifyHybridAndIncPushConfig( storeName, - true, + incrementalPushAllowed, true, parentControllerClient, dc0ControllerClient, @@ -221,10 +224,18 @@ public void testAAReplicationForIncrementalPushToRT(boolean overrideDataReplicat job.run(); Assert.assertEquals(job.getKafkaUrl(), childDatacenters.get(2).getKafkaBrokerWrapper().getAddress()); } + // Run inc push with source fabric preference taking effect. try (VenicePushJob job = new VenicePushJob("Test push job incremental with NR + A/A from dc-2", propsInc1)) { job.run(); + if (!incrementalPushAllowed) { + Assert.fail("Incremental push should throw an exception for NON_AGGREGATE data replication policy"); + } Assert.assertEquals(job.getKafkaUrl(), childDatacenters.get(2).getKafkaBrokerWrapper().getAddress()); + } catch (Exception e) { + if (incrementalPushAllowed) { + throw e; + } } // Verify @@ -235,14 +246,26 @@ public void testAAReplicationForIncrementalPushToRT(boolean overrideDataReplicat childDataCenter.getRandomController().getVeniceAdmin().getStore(clusterName, storeName).getVersion(1); Assert.assertNotNull(version, "Version 1 is not present for DC: " + dcNames[i]); } - NativeReplicationTestUtils.verifyIncrementalPushData(childDatacenters, clusterName, storeName, 150, 2); + if (incrementalPushAllowed) { + NativeReplicationTestUtils.verifyIncrementalPushData(childDatacenters, clusterName, storeName, 150, 2); + } // Run another inc push with a different source fabric preference taking effect. try (VenicePushJob job = new VenicePushJob("Test push job incremental with NR + A/A from dc-1", propsInc2)) { job.run(); + if (!incrementalPushAllowed) { + Assert.fail("Incremental push should throw an exception for NON_AGGREGATE data replication policy"); + } Assert.assertEquals(job.getKafkaUrl(), childDatacenters.get(1).getKafkaBrokerWrapper().getAddress()); + } catch (Exception e) { + if (incrementalPushAllowed) { + throw e; + } + } + + if (incrementalPushAllowed) { + NativeReplicationTestUtils.verifyIncrementalPushData(childDatacenters, clusterName, storeName, 200, 3); } - NativeReplicationTestUtils.verifyIncrementalPushData(childDatacenters, clusterName, storeName, 200, 3); } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java index 61746451d02..598308749c3 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestBatch.java @@ -114,8 +114,6 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -//TODO: write a VPJWrapper that can handle the whole flow - @Test(singleThreaded = true) public abstract class TestBatch { @@ -225,18 +223,12 @@ public void testDataPushWithSchemaWithAWrongDefault() { inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithASchemaWithAWrongDefaultValue(inputDir, recordCnt)), properties -> {}, (avroClient, vsonClient, metricsRepository) -> { - for (int i = 0; i < recordCnt; i++) { - Object valueObject = avroClient.get(Integer.toString(i)).get(); - Assert.assertTrue( - valueObject instanceof GenericRecord, - "The returned value must be a ''GenericRecord' for key: " + i); - GenericRecord value = (GenericRecord) valueObject; - Assert.assertEquals(value.get(DEFAULT_KEY_FIELD_PROP).toString(), Integer.toString(i)); - Assert.assertEquals(Float.valueOf(value.get("score").toString()), 100.0f); - } + Assert.fail("Store creation should have failed"); }); + } catch (AssertionError e) { + Assert.assertTrue(e.getMessage().contains("Invalid default for field")); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("Could not create store")); + Assert.fail("Unexpected exception", e); } } @@ -471,7 +463,7 @@ public void testIncrementalPush() throws Exception { } }, storeName, - new UpdateStoreQueryParams().setIncrementalPushEnabled(true)); + null); testBatchStore( inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema(inputDir)), @@ -483,7 +475,7 @@ public void testIncrementalPush() throws Exception { } }, storeName, - new UpdateStoreQueryParams().setIncrementalPushEnabled(true)); + null); } @Test(timeOut = TEST_TIMEOUT, dataProvider = "Two-True-and-False", dataProviderClass = DataProviderUtils.class) @@ -525,59 +517,6 @@ public void testIncrementalPushWithCompression( null); } - @Test(timeOut = TEST_TIMEOUT) - public void testIncrementalPushWritesToRealTimeTopicWithPolicy() throws Exception { - double randomNumber = Math.random(); - String classAndFunctionName = getClass().getSimpleName() + ".testIncrementalPushWritesToRealTimeTopicWithPolicy()"; - String uniqueTestId = "attempt [" + randomNumber + "] of " + classAndFunctionName; - LOGGER.info("Start of {}", uniqueTestId); - try { - String storeName = testBatchStore( - inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema(inputDir)), - properties -> {}, - (avroClient, vsonClient, metricsRepository) -> { - for (int i = 1; i <= 100; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); - } - }, - new UpdateStoreQueryParams().setIncrementalPushEnabled(true) - .setChunkingEnabled(true) - .setHybridOffsetLagThreshold(10) - .setHybridRewindSeconds(0)); - - testBatchStore( - inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema2(inputDir)), - properties -> properties.setProperty(INCREMENTAL_PUSH, "true"), - (avroClient, vsonClient, metricsRepository) -> { - for (int i = 51; i <= 150; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + (i * 2)); - } - }, - storeName, - null); - - testBatchStore( - inputDir -> new KeyAndValueSchemas(writeSimpleAvroFileWithStringToStringSchema(inputDir)), - properties -> {}, - (avroClient, vsonClient, metricsRepository) -> { - TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, true, () -> { - for (int i = 1; i <= 100; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); - } - for (int i = 101; i <= 150; i++) { - Assert.assertNull(avroClient.get(Integer.toString(i)).get()); - } - }); - }, - storeName, - null); - LOGGER.info("Successful end of {}", uniqueTestId); - } catch (Throwable e) { - LOGGER.error("Caught throwable in {}", uniqueTestId, e); - throw e; - } - } - @Test(timeOut = TEST_TIMEOUT) public void testMetaStoreSchemaValidation() throws Exception { String storeName = testBatchStore( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java index c9b30967887..09e073468e2 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithEmergencySourceRegionSelection.java @@ -129,8 +129,7 @@ public void testNativeReplicationForBatchPushWithEmergencySourceOverride(int rec new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) .setPartitionCount(partitionCount) .setNativeReplicationEnabled(true) - .setNativeReplicationSourceFabric("dc-1") - .setActiveActiveReplicationEnabled(true); + .setNativeReplicationSourceFabric("dc-1"); createStoreForJob(clusterName, keySchemaStr, valueSchemaStr, props, updateStoreParams).close(); @@ -150,7 +149,7 @@ public void testNativeReplicationForBatchPushWithEmergencySourceOverride(int rec * Check the update store command in parent controller has been propagated into child controllers, before * sending any commands directly into child controllers, which can help avoid race conditions. */ - TestUtils.verifyDCConfigNativeAndActiveRepl(storeName, true, true, dc0Client, dc1Client, dc2Client); + TestUtils.verifyDCConfigNativeAndActiveRepl(storeName, true, false, dc0Client, dc1Client, dc2Client); } try (VenicePushJob job = new VenicePushJob("Test push job", props)) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java index 2d3accea013..e84810cdbeb 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestPushJobWithNativeReplication.java @@ -481,7 +481,8 @@ public void testNativeReplicationForIncrementalPush() throws Exception { updateStoreQueryParams -> updateStoreQueryParams.setPartitionCount(1) .setHybridOffsetLagThreshold(TEST_TIMEOUT) .setHybridRewindSeconds(2L) - .setIncrementalPushEnabled(true), + .setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE), 100, (parentControllerClient, clusterName, storeName, props, inputDir) -> { try (VenicePushJob job = new VenicePushJob("Batch Push", props)) { @@ -595,6 +596,7 @@ public void testMultiDataCenterRePushWithIncrementalPush() throws Exception { .updateStore( storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true) + .setHybridDataReplicationPolicy(DataReplicationPolicy.NONE) .setHybridOffsetLagThreshold(1) .setHybridRewindSeconds(Time.SECONDS_PER_DAY)) .isError()); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java index 66be7ae66d4..d95f19c29d1 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreMigration.java @@ -55,12 +55,12 @@ import com.linkedin.venice.utils.Utils; import com.linkedin.venice.utils.VeniceProperties; import java.io.File; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; +import java.util.List; import java.util.Optional; import java.util.Properties; -import java.util.Set; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import org.apache.avro.Schema; @@ -330,7 +330,7 @@ public void testStoreMigrationWithDaVinciPushStatusSystemStore() throws Exceptio startMigration(parentControllerUrl, storeName); // Store migration status output via closure PrintFunction - Set statusOutput = new HashSet(); + List statusOutput = new ArrayList<>(); PrintFunction printFunction = (message) -> { statusOutput.add(message.trim()); System.err.println(message); @@ -362,18 +362,20 @@ public void testStoreMigrationWithDaVinciPushStatusSystemStore() throws Exceptio .assertEquals(pushStatusStoreReader.getPartitionStatus(storeName, 1, 0, Optional.empty()).size(), 1)); // Verify that store and system store only exist in destination cluster after ending migration - statusOutput.clear(); endMigration(parentControllerUrl, storeName); - checkMigrationStatus(parentControllerUrl, storeName, printFunction); - Assert - .assertFalse(statusOutput.contains(String.format("%s exists in this cluster %s", storeName, srcClusterName))); - Assert - .assertTrue(statusOutput.contains(String.format("%s exists in this cluster %s", storeName, destClusterName))); - Assert.assertFalse( - statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, srcClusterName))); - Assert.assertTrue( - statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, destClusterName))); + TestUtils.waitForNonDeterministicAssertion(30, TimeUnit.SECONDS, () -> { + statusOutput.clear(); + checkMigrationStatus(parentControllerUrl, storeName, printFunction); + Assert.assertFalse( + statusOutput.contains(String.format("%s exists in this cluster %s", storeName, srcClusterName))); + Assert.assertTrue( + statusOutput.contains(String.format("%s exists in this cluster %s", storeName, destClusterName))); + Assert.assertFalse( + statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, srcClusterName))); + Assert.assertTrue( + statusOutput.contains(String.format("%s exists in this cluster %s", systemStoreName, destClusterName))); + }); } finally { Utils.closeQuietlyWithErrorLogged(pushStatusStoreReader); D2ClientUtils.shutdownClient(d2Client); diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java index 99e40b2b5cb..5c752361e2c 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestStoreUpdateStoragePersona.java @@ -8,8 +8,8 @@ import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.integration.utils.ServiceFactory; -import com.linkedin.venice.integration.utils.VeniceMultiRegionClusterCreateOptions; -import com.linkedin.venice.integration.utils.VeniceTwoLayerMultiRegionMultiClusterWrapper; +import com.linkedin.venice.integration.utils.VeniceClusterCreateOptions; +import com.linkedin.venice.integration.utils.VeniceClusterWrapper; import com.linkedin.venice.meta.Store; import com.linkedin.venice.persona.StoragePersona; import com.linkedin.venice.utils.TestStoragePersonaUtils; @@ -26,23 +26,20 @@ public class TestStoreUpdateStoragePersona { - // Ideally this should work with a single region cluster, but today persona only works with a multi region cluster - private VeniceTwoLayerMultiRegionMultiClusterWrapper venice; + private VeniceClusterWrapper venice; private ControllerClient controllerClient; @BeforeClass(alwaysRun = true) public void setUp() { - venice = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper( - new VeniceMultiRegionClusterCreateOptions.Builder().numberOfRegions(1) - .numberOfParentControllers(1) - .numberOfChildControllers(1) + venice = ServiceFactory.getVeniceCluster( + new VeniceClusterCreateOptions.Builder().numberOfControllers(1) .numberOfServers(1) .numberOfRouters(1) .replicationFactor(2) .sslToStorageNodes(false) .sslToKafka(false) .build()); - controllerClient = new ControllerClient(venice.getClusterNames()[0], venice.getControllerConnectString()); + controllerClient = new ControllerClient(venice.getClusterName(), venice.getAllControllersURLs()); } @AfterClass(alwaysRun = true) diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java index 60da54bae1f..e89572aa7af 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/endToEnd/TestWritePathComputation.java @@ -29,7 +29,7 @@ public class TestWritePathComputation { + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10},\n" + "{\"name\": \"" + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; - @Test(timeOut = 60 * Time.MS_PER_SECOND) + @Test(timeOut = 90 * Time.MS_PER_SECOND) public void testFeatureFlagSingleDC() { VeniceMultiClusterCreateOptions options = new VeniceMultiClusterCreateOptions.Builder().numberOfClusters(1) .numberOfControllers(1) @@ -41,23 +41,51 @@ public void testFeatureFlagSingleDC() { String clusterName = multiClusterWrapper.getClusterNames()[0]; VeniceControllerWrapper childController = multiClusterWrapper.getLeaderController(clusterName); String storeName = "test-store0"; + String storeName2 = "test-store2"; // Create store Admin childAdmin = multiClusterWrapper.getLeaderController(clusterName, GET_LEADER_CONTROLLER_TIMEOUT).getVeniceAdmin(); childAdmin.createStore(clusterName, storeName, "tester", "\"string\"", "\"string\""); + childAdmin.createStore(clusterName, storeName2, "tester", KEY_SCHEMA_STR, VALUE_SCHEMA_V2_STR); TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { Assert.assertTrue(childAdmin.hasStore(clusterName, storeName)); Assert.assertFalse(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); + Assert.assertTrue(childAdmin.hasStore(clusterName, storeName2)); + Assert.assertFalse(childAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); }); // Set flag String childControllerUrl = childController.getControllerUrl(); try (ControllerClient childControllerClient = new ControllerClient(clusterName, childControllerUrl)) { + ControllerResponse response = + childControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + Assert.assertTrue(response.isError()); + Assert.assertTrue(response.getError().contains("Write computation is only supported for hybrid stores")); + + ControllerResponse response2 = childControllerClient.updateStore( + storeName, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true)); + Assert.assertTrue(response2.isError()); + Assert.assertTrue(response2.getError().contains("top level field probably missing defaults")); + + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { + Assert.assertFalse( + childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled(), + "Write Compute should not be enabled if the value schema is not a Record."); + }); + assertCommand( - childControllerClient.updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)), - "Write Compute should be enabled"); - Assert.assertTrue(childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled()); + childControllerClient.updateStore( + storeName2, + new UpdateStoreQueryParams().setHybridRewindSeconds(1000) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true))); + TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { + Assert.assertTrue(childAdmin.getStore(clusterName, storeName2).isWriteComputationEnabled()); + }); // Reset flag assertCommand( @@ -70,7 +98,7 @@ public void testFeatureFlagSingleDC() { } } - @Test(timeOut = 90 * Time.MS_PER_SECOND) + @Test(timeOut = 120 * Time.MS_PER_SECOND) public void testFeatureFlagMultipleDC() { try (VeniceTwoLayerMultiRegionMultiClusterWrapper twoLayerMultiRegionMultiClusterWrapper = ServiceFactory.getVeniceTwoLayerMultiRegionMultiClusterWrapper(1, 1, 1, 1, 1, 0)) { @@ -100,7 +128,7 @@ public void testFeatureFlagMultipleDC() { ControllerResponse response = parentControllerClient .updateStore(storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); Assert.assertTrue(response.isError()); - Assert.assertTrue(response.getError().contains("top level field probably missing defaults")); + Assert.assertTrue(response.getError().contains("Write computation is only supported for hybrid stores")); ControllerResponse response2 = parentControllerClient.updateStore( storeName, @@ -113,10 +141,10 @@ public void testFeatureFlagMultipleDC() { TestUtils.waitForNonDeterministicAssertion(15, TimeUnit.SECONDS, () -> { Assert.assertFalse( parentAdmin.getStore(clusterName, storeName).isWriteComputationEnabled(), - "Write Compute should not be enabled before the value schema is not a Record."); + "Write Compute should not be enabled if the value schema is not a Record."); Assert.assertFalse( childAdmin.getStore(clusterName, storeName).isWriteComputationEnabled(), - "Write Compute should not be enabled before the value schema is not a Record."); + "Write Compute should not be enabled if the value schema is not a Record."); }); assertCommand( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java index 2d1366b5e1a..53ea4f485b9 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/hadoop/TestVenicePushJob.java @@ -21,6 +21,7 @@ import static com.linkedin.venice.utils.IntegrationTestPushUtils.createStoreForJob; import static com.linkedin.venice.utils.IntegrationTestPushUtils.defaultVPJProps; import static com.linkedin.venice.utils.TestWriteUtils.getTempDataDirectory; +import static com.linkedin.venice.utils.TestWriteUtils.loadFileAsString; import static com.linkedin.venice.utils.TestWriteUtils.writeSimpleAvroFileWithStringToStringSchema; import static com.linkedin.venice.utils.TestWriteUtils.writeSimpleAvroFileWithStringToStringSchema2; import static com.linkedin.venice.utils.TestWriteUtils.writeSimpleVsonFileWithUserSchema; @@ -468,11 +469,14 @@ public void testWCBatchJob() throws Exception { String routerUrl = veniceCluster.getRandomRouterURL(); ControllerClient controllerClient = new ControllerClient(veniceCluster.getClusterName(), routerUrl); - UpdateStoreQueryParams params = new UpdateStoreQueryParams(); - params.setWriteComputationEnabled(true); - params.setIncrementalPushEnabled(false); + UpdateStoreQueryParams params = new UpdateStoreQueryParams().setHybridRewindSeconds(Time.SECONDS_PER_DAY) + .setHybridOffsetLagThreshold(1000) + .setWriteComputationEnabled(true) + .setIncrementalPushEnabled(false); - controllerClient.createNewStoreWithParameters(storeName, "owner", "\"string\"", "\"string\"", params); + String valueSchemaStr = loadFileAsString("UserValue.avsc"); + + controllerClient.createNewStoreWithParameters(storeName, "owner", "\"string\"", valueSchemaStr, params); String inputDirPath = "file://" + inputDir.getAbsolutePath(); Properties props = defaultVPJProps(veniceCluster, inputDirPath, storeName); @@ -630,7 +634,7 @@ public void testKIFRepushFetch(boolean chunkingEnabled) throws Exception { new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) .setPartitionCount(2) .setIncrementalPushEnabled(true) - .setWriteComputationEnabled(true))); + .setChunkingEnabled(true))); Properties props = defaultVPJProps(veniceCluster, inputDirPath, storeName); props.setProperty(SEND_CONTROL_MESSAGES_DIRECTLY, "true"); // create a batch version. @@ -653,13 +657,27 @@ public void testKIFRepushFetch(boolean chunkingEnabled) throws Exception { new UpdateStoreQueryParams().setHybridOffsetLagThreshold(1) .setHybridRewindSeconds(0) .setChunkingEnabled(chunkingEnabled))); - // Run the repush job, it should still pass - TestWriteUtils.runPushJob("Test push job", props); - try (AvroGenericStoreClient avroClient = ClientFactory.getAndStartGenericAvroClient( - ClientConfig.defaultGenericClientConfig(storeName).setVeniceURL(veniceCluster.getRandomRouterURL()))) { - for (int i = 1; i <= 100; i++) { - Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); + try { + // Run the repush job, it should still pass + TestWriteUtils.runPushJob("Test push job", props); + + if (!chunkingEnabled) { + Assert.fail("Expected an exception since chunking was disabled in store config"); + } + + try (AvroGenericStoreClient avroClient = ClientFactory.getAndStartGenericAvroClient( + ClientConfig.defaultGenericClientConfig(storeName).setVeniceURL(veniceCluster.getRandomRouterURL()))) { + for (int i = 1; i <= 100; i++) { + Assert.assertEquals(avroClient.get(Integer.toString(i)).get().toString(), "test_name_" + i); + } + } + } catch (VeniceException e) { + if (!chunkingEnabled) { + Assert.assertTrue( + e.getMessage().contains("Source version has chunking enabled while chunking is disabled in store config")); + } else { + Assert.fail("Unexpected exception: " + e.getMessage(), e); } } } diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java index 305c49b76da..9fe11aa5a8d 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerCreateOptions.java @@ -11,6 +11,7 @@ import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_REPLICATION_FACTOR; import com.linkedin.venice.authorization.AuthorizerService; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -37,6 +38,7 @@ public class VeniceControllerCreateOptions { private final Properties extraProperties; private final AuthorizerService authorizerService; private final String regionName; + private final SupersetSchemaGenerator supersetSchemaGenerator; private VeniceControllerCreateOptions(Builder builder) { multiRegion = builder.multiRegion; @@ -57,6 +59,7 @@ private VeniceControllerCreateOptions(Builder builder) { authorizerService = builder.authorizerService; isParent = builder.childControllers != null && builder.childControllers.length != 0; regionName = builder.regionName; + supersetSchemaGenerator = builder.supersetSchemaGenerator; } @Override @@ -111,6 +114,9 @@ public String toString() { .append(", ") .append("childControllers:") .append(getAddressesOfChildControllers()) + .append(", ") + .append("supersetSchemaGenerator:") + .append(supersetSchemaGenerator) .toString(); } @@ -196,6 +202,10 @@ public String getRegionName() { return regionName; } + public SupersetSchemaGenerator getSupersetSchemaGenerator() { + return supersetSchemaGenerator; + } + public static class Builder { private boolean multiRegion = false; private final String[] clusterNames; @@ -214,6 +224,7 @@ public static class Builder { private Properties extraProperties = new Properties(); private AuthorizerService authorizerService; private String regionName; + private SupersetSchemaGenerator supersetSchemaGenerator; public Builder(String[] clusterNames, ZkServerWrapper zkServer, PubSubBrokerWrapper kafkaBroker) { this.clusterNames = Objects.requireNonNull(clusterNames, "clusterNames cannot be null when creating controller"); @@ -296,6 +307,11 @@ public Builder regionName(String regionName) { return this; } + public Builder supersetSchemaGenerator(SupersetSchemaGenerator supersetSchemaGenerator) { + this.supersetSchemaGenerator = supersetSchemaGenerator; + return this; + } + private void verifyAndAddParentControllerSpecificDefaults() { extraProperties.setProperty(LOCAL_REGION_NAME, DEFAULT_PARENT_DATA_CENTER_REGION_NAME); if (!extraProperties.containsKey(CONTROLLER_AUTO_MATERIALIZE_META_SYSTEM_STORE)) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java index 19171da3c5a..76b7989c438 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceControllerWrapper.java @@ -60,7 +60,6 @@ import com.linkedin.venice.controller.VeniceControllerContext; import com.linkedin.venice.controller.VeniceHelixAdmin; import com.linkedin.venice.controller.kafka.consumer.AdminConsumerService; -import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import com.linkedin.venice.d2.D2Server; import com.linkedin.venice.meta.PersistenceType; import com.linkedin.venice.pubsub.PubSubClientsFactory; @@ -100,8 +99,6 @@ public class VeniceControllerWrapper extends ProcessWrapper { public static final String PARENT_D2_CLUSTER_NAME = "ParentControllerD2Cluster"; public static final String PARENT_D2_SERVICE_NAME = "ParentController"; - public static final String SUPERSET_SCHEMA_GENERATOR = "SupersetSchemaGenerator"; - public static final double DEFAULT_STORAGE_ENGINE_OVERHEAD_RATIO = 0.85d; private VeniceController service; @@ -350,18 +347,13 @@ static StatefulServiceProvider generateService(VeniceCo if (clientConfig instanceof ClientConfig) { consumerClientConfig = Optional.of((ClientConfig) clientConfig); } - Optional supersetSchemaGenerator = Optional.empty(); - Object passedSupersetSchemaGenerator = options.getExtraProperties().get(SUPERSET_SCHEMA_GENERATOR); - if (passedSupersetSchemaGenerator instanceof SupersetSchemaGenerator) { - supersetSchemaGenerator = Optional.of((SupersetSchemaGenerator) passedSupersetSchemaGenerator); - } VeniceControllerContext ctx = new VeniceControllerContext.Builder().setPropertiesList(propertiesList) .setMetricsRepository(metricsRepository) .setServiceDiscoveryAnnouncers(d2ServerList) .setAuthorizerService(options.getAuthorizerService()) .setD2Client(d2Client) .setRouterClientConfig(consumerClientConfig.orElse(null)) - .setExternalSupersetSchemaGenerator(supersetSchemaGenerator.orElse(null)) + .setExternalSupersetSchemaGenerator(options.getSupersetSchemaGenerator()) .build(); VeniceController veniceController = new VeniceController(ctx); return new VeniceControllerWrapper( diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java index 807aa17aa51..bec53373252 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiClusterCreateOptions.java @@ -10,6 +10,7 @@ import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_STORAGE_NODES; import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.STANDALONE_REGION_NAME; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import java.util.Collections; import java.util.Map; import java.util.Properties; @@ -36,6 +37,7 @@ public class VeniceMultiClusterCreateOptions { private final PubSubBrokerWrapper pubSubBrokerWrapper; private final Properties childControllerProperties; private final Properties extraProperties; + private final SupersetSchemaGenerator supersetSchemaGenerator; public String getRegionName() { return regionName; @@ -117,6 +119,10 @@ public Properties getExtraProperties() { return extraProperties; } + public SupersetSchemaGenerator getSupersetSchemaGenerator() { + return supersetSchemaGenerator; + } + @Override public String toString() { return new StringBuilder().append("VeniceMultiClusterCreateOptions - ") @@ -180,6 +186,9 @@ public String toString() { .append(", ") .append("kafkaClusterMap:") .append(kafkaClusterMap) + .append(", ") + .append("supersetSchemaGenerator:") + .append(supersetSchemaGenerator) .toString(); } @@ -204,6 +213,7 @@ private VeniceMultiClusterCreateOptions(Builder builder) { extraProperties = builder.extraProperties; forkServer = builder.forkServer; kafkaClusterMap = builder.kafkaClusterMap; + supersetSchemaGenerator = builder.supersetSchemaGenerator; } public static class Builder { @@ -227,6 +237,7 @@ public static class Builder { private PubSubBrokerWrapper pubSubBrokerWrapper; private Properties childControllerProperties; private Properties extraProperties; + private SupersetSchemaGenerator supersetSchemaGenerator; public Builder numberOfClusters(int numberOfClusters) { this.numberOfClusters = numberOfClusters; @@ -328,6 +339,11 @@ public Builder extraProperties(Properties extraProperties) { return this; } + public Builder supersetSchemaGenerator(SupersetSchemaGenerator supersetSchemaGenerator) { + this.supersetSchemaGenerator = supersetSchemaGenerator; + return this; + } + private void addDefaults() { if (numberOfClusters == 0) { numberOfClusters = 1; diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java index 22cf7e9e344..e1c6d0799c6 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceMultiRegionClusterCreateOptions.java @@ -8,6 +8,7 @@ import static com.linkedin.venice.integration.utils.VeniceClusterWrapperConstants.DEFAULT_SSL_TO_STORAGE_NODES; import com.linkedin.venice.authorization.AuthorizerService; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import java.util.Properties; @@ -26,6 +27,7 @@ public class VeniceMultiRegionClusterCreateOptions { private final Properties childControllerProperties; private final Properties serverProperties; private final AuthorizerService parentAuthorizerService; + private final SupersetSchemaGenerator supersetSchemaGenerator; public int getNumberOfRegions() { return numberOfRegions; @@ -83,6 +85,10 @@ public AuthorizerService getParentAuthorizerService() { return parentAuthorizerService; } + public SupersetSchemaGenerator getSupersetSchemaGenerator() { + return supersetSchemaGenerator; + } + @Override public String toString() { return new StringBuilder().append("VeniceMultiClusterCreateOptions - ") @@ -127,6 +133,9 @@ public String toString() { .append(", ") .append("parentAuthorizerService:") .append(parentAuthorizerService) + .append(", ") + .append("supersetSchemaGenerator:") + .append(supersetSchemaGenerator) .toString(); } @@ -145,6 +154,7 @@ private VeniceMultiRegionClusterCreateOptions(Builder builder) { sslToKafka = builder.sslToKafka; forkServer = builder.forkServer; parentAuthorizerService = builder.parentAuthorizerService; + supersetSchemaGenerator = builder.supersetSchemaGenerator; } public static class Builder { @@ -162,6 +172,7 @@ public static class Builder { private Properties childControllerProperties; private Properties serverProperties; private AuthorizerService parentAuthorizerService; + private SupersetSchemaGenerator supersetSchemaGenerator; public Builder numberOfRegions(int numberOfRegions) { this.numberOfRegions = numberOfRegions; @@ -233,6 +244,11 @@ public Builder parentAuthorizerService(AuthorizerService parentAuthorizerService return this; } + public Builder supersetSchemaGenerator(SupersetSchemaGenerator supersetSchemaGenerator) { + this.supersetSchemaGenerator = supersetSchemaGenerator; + return this; + } + private void addDefaults() { if (numberOfRegions == 0) { numberOfRegions = 1; diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java index dabdfd2f432..243673af188 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/integration/utils/VeniceTwoLayerMultiRegionMultiClusterWrapper.java @@ -190,7 +190,8 @@ static ServiceProvider generateSer .sslToStorageNodes(options.isSslToStorageNodes()) .sslToKafka(options.isSslToKafka()) .forkServer(options.isForkServer()) - .kafkaClusterMap(kafkaClusterMap); + .kafkaClusterMap(kafkaClusterMap) + .supersetSchemaGenerator(options.getSupersetSchemaGenerator()); // Create multi-clusters for (int i = 0; i < options.getNumberOfRegions(); i++) { String regionName = childRegionName.get(i); @@ -221,6 +222,7 @@ static ServiceProvider generateSer .clusterToServerD2(clusterToServerD2) .regionName(parentRegionName) .authorizerService(options.getParentAuthorizerService()) + .supersetSchemaGenerator(options.getSupersetSchemaGenerator()) .build(); // Create parentControllers for multi-cluster for (int i = 0; i < options.getNumberOfParentControllers(); i++) { diff --git a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java index 236c6424a40..254629fcaa5 100644 --- a/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java +++ b/internal/venice-test-common/src/integrationTest/java/com/linkedin/venice/utils/IntegrationTestPushUtils.java @@ -25,12 +25,12 @@ import static com.linkedin.venice.samza.VeniceSystemFactory.VENICE_PARENT_D2_ZK_HOSTS; import static com.linkedin.venice.samza.VeniceSystemFactory.VENICE_PUSH_TYPE; import static com.linkedin.venice.samza.VeniceSystemFactory.VENICE_STORE; +import static com.linkedin.venice.utils.TestUtils.assertCommand; import com.linkedin.venice.compression.CompressionStrategy; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.D2ControllerClientFactory; -import com.linkedin.venice.controllerapi.NewStoreResponse; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.endToEnd.DaVinciClientDiskFullTest; import com.linkedin.venice.exceptions.VeniceException; @@ -175,9 +175,7 @@ public static ControllerClient createStoreForJob( keySchemaStr, valueSchema, props, - CompressionStrategy.NO_OP, - false, - false); + CompressionStrategy.NO_OP); } public static void makeStoreHybrid( @@ -258,9 +256,7 @@ public static ControllerClient createStoreForJob(String veniceClusterName, Schem recordSchema.getField(props.getProperty(KEY_FIELD_PROP, DEFAULT_KEY_FIELD_PROP)).schema().toString(), recordSchema.getField(props.getProperty(VALUE_FIELD_PROP, DEFAULT_VALUE_FIELD_PROP)).schema().toString(), props, - CompressionStrategy.NO_OP, - false, - false); + CompressionStrategy.NO_OP); } public static ControllerClient createStoreForJob( @@ -268,17 +264,13 @@ public static ControllerClient createStoreForJob( String keySchemaStr, String valueSchemaStr, Properties props, - CompressionStrategy compressionStrategy, - boolean chunkingEnabled, - boolean incrementalPushEnabled) { + CompressionStrategy compressionStrategy) { UpdateStoreQueryParams storeParams = new UpdateStoreQueryParams().setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA) .setCompressionStrategy(compressionStrategy) .setBatchGetLimit(2000) - .setReadQuotaInCU(DEFAULT_PER_ROUTER_READ_QUOTA) - .setChunkingEnabled(chunkingEnabled) - .setIncrementalPushEnabled(incrementalPushEnabled); + .setReadQuotaInCU(DEFAULT_PER_ROUTER_READ_QUOTA); return createStoreForJob(veniceClusterName, keySchemaStr, valueSchemaStr, props, storeParams); } @@ -290,12 +282,12 @@ public static ControllerClient createStoreForJob( Properties props, UpdateStoreQueryParams storeParams) { ControllerClient controllerClient = getControllerClient(veniceClusterName, props); - NewStoreResponse newStoreResponse = controllerClient - .createNewStore(props.getProperty(VENICE_STORE_NAME_PROP), "test@linkedin.com", keySchemaStr, valueSchemaStr); - - if (newStoreResponse.isError()) { - throw new VeniceException("Could not create store " + props.getProperty(VENICE_STORE_NAME_PROP)); - } + assertCommand( + controllerClient.createNewStore( + props.getProperty(VENICE_STORE_NAME_PROP), + "test@linkedin.com", + keySchemaStr, + valueSchemaStr)); updateStore(veniceClusterName, props, storeParams.setStorageQuotaInByte(Store.UNLIMITED_STORAGE_QUOTA)); return controllerClient; diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java index 6547752ad24..85c1b8fad73 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/Admin.java @@ -3,6 +3,7 @@ import com.linkedin.venice.acl.AclException; import com.linkedin.venice.common.VeniceSystemStoreType; import com.linkedin.venice.controller.kafka.consumer.AdminConsumerService; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; import com.linkedin.venice.controllerapi.NodeReplicasReadinessState; import com.linkedin.venice.controllerapi.RepushInfo; import com.linkedin.venice.controllerapi.StoreComparisonInfo; @@ -14,6 +15,7 @@ import com.linkedin.venice.helix.HelixReadOnlyZKSharedSystemStoreRepository; import com.linkedin.venice.helix.Replica; import com.linkedin.venice.meta.Instance; +import com.linkedin.venice.meta.ReadWriteSchemaRepository; import com.linkedin.venice.meta.RegionPushDetails; import com.linkedin.venice.meta.RoutersClusterConfig; import com.linkedin.venice.meta.Store; @@ -387,7 +389,14 @@ default SchemaEntry addValueSchema(String clusterName, String storeName, String SchemaEntry.DEFAULT_SCHEMA_CREATION_COMPATIBILITY_TYPE); } - SchemaEntry addSupersetSchema( + /** + * Add a new superset schema for the given store with all specified properties. + *

+ * Generate the superset schema off the current schema and latest superset schema (if any. If not, pick the latest + * value schema) existing in the store. + * If the newly generated superset schema is unique add it to the store and update latestSuperSetValueSchemaId of the store. + */ + void addSupersetSchema( String clusterName, String storeName, String valueSchemaStr, @@ -448,8 +457,6 @@ void validateAndMaybeRetrySystemStoreAutoCreation( void rollbackToBackupVersion(String clusterName, String storeName, String regionFilter); - void setStoreLargestUsedVersion(String clusterName, String storeName, int versionNumber); - void setStoreOwner(String clusterName, String storeName, String owner); void setStorePartitionCount(String clusterName, String storeName, int partitionCount); @@ -755,6 +762,13 @@ void updateRoutersClusterConfig( */ boolean isParent(); + /** + * The "Primary Controller" term is used to refer to whichever controller is the main controller in a Venice set-up. + * In a multi-region deployment, the primary controller is the parent controller. + * In a single-region deployment, the primary controller is the only controller. + */ + boolean isPrimary(); + /** * Return the state of the region of the parent controller. * @return {@link ParentControllerRegionState#ACTIVE} which means that the parent controller in the region is serving requests. @@ -1010,4 +1024,21 @@ default void clearInstanceMonitor(String clusterName) { HelixVeniceClusterResources getHelixVeniceClusterResources(String cluster); PubSubTopicRepository getPubSubTopicRepository(); + + default Schema getSupersetOrLatestValueSchema(String clusterName, Store store) { + ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + // If already a superset schema exists, try to generate the new superset from that and the input value schema + SchemaEntry existingSchema = schemaRepository.getSupersetOrLatestValueSchema(store.getName()); + return existingSchema == null ? null : existingSchema.getSchema(); + } + + /** + * Return the current superset schema generator for the given cluster. + */ + SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName); + + /** + * Return the multi-cluster configs for the controller. + */ + VeniceControllerMultiClusterConfig getMultiClusterConfigs(); } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java index 3e363fdd6bb..aaf6c462e52 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/StoreViewUtils.java @@ -20,28 +20,7 @@ public class StoreViewUtils { private static final VeniceJsonSerializer viewConfigVeniceJsonSerializer = new VeniceJsonSerializer<>(ViewConfig.class); - static Map convertStringMapViewToStoreViewConfigRecordMap( - Map stringMap) throws VeniceException { - Map mergedViewConfigRecords = new HashMap<>(); - if (!stringMap.isEmpty()) { - for (Map.Entry stringViewConfig: stringMap.entrySet()) { - try { - ViewConfig viewConfig = - viewConfigVeniceJsonSerializer.deserialize(stringViewConfig.getValue().getBytes(), ""); - StoreViewConfigRecord newViewConfigRecord = new StoreViewConfigRecord( - viewConfig.getViewClassName(), - CollectionUtils.getStringKeyCharSequenceValueMapFromStringMap(viewConfig.getViewParameters())); - mergedViewConfigRecords.put(stringViewConfig.getKey(), newViewConfigRecord); - } catch (IOException e) { - LOGGER.error("Failed to serialize provided view config: {}", stringViewConfig.getValue()); - throw new VeniceException("Failed to serialize provided view config:" + stringViewConfig.getValue(), e); - } - } - } - return mergedViewConfigRecords; - } - - static Map convertStringMapViewToStoreViewConfigMap(Map stringMap) { + public static Map convertStringMapViewToStoreViewConfigMap(Map stringMap) { Map mergedViewConfigRecords = new HashMap<>(); if (!stringMap.isEmpty()) { for (Map.Entry stringViewConfig: stringMap.entrySet()) { @@ -62,20 +41,20 @@ static Map convertStringMapViewToStoreViewConfigMap(Map return mergedViewConfigRecords; } - static Map convertStringMapViewToViewConfigMap(Map stringMap) { + public static Map convertStringMapViewToViewConfigMap(Map stringMap) { return convertStringMapViewToStoreViewConfigMap(stringMap).entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> new ViewConfigImpl(e.getValue()))); } - static Map convertViewConfigMapToStoreViewRecordMap( + public static Map convertViewConfigMapToStoreViewRecordMap( Map viewConfigMap) { return viewConfigMap.entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> convertViewConfigToStoreViewConfigRecord(e.getValue()))); } - static StoreViewConfigRecord convertViewConfigToStoreViewConfigRecord(ViewConfig viewConfig) { + public static StoreViewConfigRecord convertViewConfigToStoreViewConfigRecord(ViewConfig viewConfig) { return new StoreViewConfigRecord(viewConfig.getViewClassName(), viewConfig.dataModel().getViewParameters()); } } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java index 1820d8b00a0..4989069d31e 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerClusterConfig.java @@ -46,6 +46,7 @@ import static com.linkedin.venice.ConfigKeys.CONTROLLER_EARLY_DELETE_BACKUP_ENABLED; import static com.linkedin.venice.ConfigKeys.CONTROLLER_ENABLE_DISABLED_REPLICA_ENABLED; import static com.linkedin.venice.ConfigKeys.CONTROLLER_ENFORCE_SSL; +import static com.linkedin.venice.ConfigKeys.CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED; import static com.linkedin.venice.ConfigKeys.CONTROLLER_HAAS_SUPER_CLUSTER_NAME; import static com.linkedin.venice.ConfigKeys.CONTROLLER_IN_AZURE_FABRIC; import static com.linkedin.venice.ConfigKeys.CONTROLLER_JETTY_CONFIG_OVERRIDE_PREFIX; @@ -374,7 +375,7 @@ public class VeniceControllerClusterConfig { private final int parentSystemStoreRepairRetryCount; - private final boolean parentExternalSupersetSchemaGenerationEnabled; + private final boolean externalSupersetSchemaGenerationEnabled; private final boolean systemSchemaInitializationAtStartTimeEnabled; @@ -880,8 +881,10 @@ public VeniceControllerClusterConfig(VeniceProperties props) { this.parentSystemStoreRepairRetryCount = props.getInt(CONTROLLER_PARENT_SYSTEM_STORE_REPAIR_RETRY_COUNT, 1); this.clusterDiscoveryD2ServiceName = props.getString(CLUSTER_DISCOVERY_D2_SERVICE, ClientConfig.DEFAULT_CLUSTER_DISCOVERY_D2_SERVICE_NAME); - this.parentExternalSupersetSchemaGenerationEnabled = - props.getBoolean(CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, false); + this.externalSupersetSchemaGenerationEnabled = props.getBooleanWithAlternative( + CONTROLLER_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, + CONTROLLER_PARENT_EXTERNAL_SUPERSET_SCHEMA_GENERATION_ENABLED, + false); this.systemSchemaInitializationAtStartTimeEnabled = props.getBoolean(SYSTEM_SCHEMA_INITIALIZATION_AT_START_TIME_ENABLED, false); this.isKMERegistrationFromMessageHeaderEnabled = @@ -1540,8 +1543,8 @@ public int getParentSystemStoreRepairRetryCount() { return parentSystemStoreRepairRetryCount; } - public boolean isParentExternalSupersetSchemaGenerationEnabled() { - return parentExternalSupersetSchemaGenerationEnabled; + public boolean isExternalSupersetSchemaGenerationEnabled() { + return externalSupersetSchemaGenerationEnabled; } public boolean isSystemSchemaInitializationAtStartTimeEnabled() { diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java index 65799d01871..eca375a082c 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceControllerService.java @@ -90,6 +90,7 @@ public VeniceControllerService( sslConfig, accessController, icProvider, + externalSupersetSchemaGenerator, pubSubTopicRepository, pubSubClientsFactory, Arrays.asList(initRoutineForPushJobDetailsSystemStore, initRoutineForHeartbeatSystemStore)); diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java index 922a5929b38..1da00ddc378 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceHelixAdmin.java @@ -7,9 +7,6 @@ import static com.linkedin.venice.ConfigKeys.SSL_KAFKA_BOOTSTRAP_SERVERS; import static com.linkedin.venice.ConfigKeys.SSL_TO_KAFKA_LEGACY; import static com.linkedin.venice.controller.UserSystemStoreLifeCycleHelper.AUTO_META_SYSTEM_STORE_PUSH_ID_PREFIX; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_REWIND_TIME_IN_SECONDS; import static com.linkedin.venice.meta.Store.NON_EXISTING_VERSION; import static com.linkedin.venice.meta.Version.PushType; import static com.linkedin.venice.meta.VersionStatus.ERROR; @@ -53,9 +50,12 @@ import com.linkedin.venice.controller.init.SystemSchemaInitializationRoutine; import com.linkedin.venice.controller.kafka.StoreStatusDecider; import com.linkedin.venice.controller.kafka.consumer.AdminConsumerService; -import com.linkedin.venice.controller.kafka.protocol.admin.HybridStoreConfigRecord; -import com.linkedin.venice.controller.kafka.protocol.admin.StoreViewConfigRecord; import com.linkedin.venice.controller.stats.DisabledPartitionStats; +import com.linkedin.venice.controller.supersetschema.DefaultSupersetSchemaGenerator; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; +import com.linkedin.venice.controller.util.UpdateStoreUtils; +import com.linkedin.venice.controller.util.UpdateStoreWrapper; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.ControllerRoute; @@ -70,7 +70,6 @@ import com.linkedin.venice.controllerapi.UpdateStoragePersonaQueryParams; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.controllerapi.VersionResponse; -import com.linkedin.venice.exceptions.ErrorType; import com.linkedin.venice.exceptions.InvalidVeniceSchemaException; import com.linkedin.venice.exceptions.ResourceStillExistsException; import com.linkedin.venice.exceptions.VeniceException; @@ -104,12 +103,8 @@ import com.linkedin.venice.ingestion.control.RealTimeTopicSwitcher; import com.linkedin.venice.kafka.protocol.enums.ControlMessageType; import com.linkedin.venice.meta.BackupStrategy; -import com.linkedin.venice.meta.BufferReplayPolicy; import com.linkedin.venice.meta.DataReplicationPolicy; -import com.linkedin.venice.meta.ETLStoreConfig; -import com.linkedin.venice.meta.ETLStoreConfigImpl; import com.linkedin.venice.meta.HybridStoreConfig; -import com.linkedin.venice.meta.HybridStoreConfigImpl; import com.linkedin.venice.meta.Instance; import com.linkedin.venice.meta.InstanceStatus; import com.linkedin.venice.meta.LiveClusterConfig; @@ -428,6 +423,10 @@ public class VeniceHelixAdmin implements Admin, StoreCleaner { private final Lazy emptyPushZSTDDictionary; + private final Optional externalSupersetSchemaGenerator; + + private final SupersetSchemaGenerator defaultSupersetSchemaGenerator = new DefaultSupersetSchemaGenerator(); + public VeniceHelixAdmin( VeniceControllerMultiClusterConfig multiClusterConfigs, MetricsRepository metricsRepository, @@ -442,6 +441,7 @@ public VeniceHelixAdmin( Optional.empty(), Optional.empty(), Optional.empty(), + Optional.empty(), pubSubTopicRepository, pubSubClientsFactory, Collections.EMPTY_LIST); @@ -456,6 +456,7 @@ public VeniceHelixAdmin( Optional sslConfig, Optional accessController, Optional icProvider, + Optional externalSupersetSchemaGenerator, PubSubTopicRepository pubSubTopicRepository, PubSubClientsFactory pubSubClientsFactory, List additionalInitRoutines) { @@ -476,6 +477,7 @@ public VeniceHelixAdmin( this.minNumberOfStoreVersionsToPreserve = multiClusterConfigs.getMinNumberOfStoreVersionsToPreserve(); this.d2Client = d2Client; + this.externalSupersetSchemaGenerator = externalSupersetSchemaGenerator; this.pubSubTopicRepository = pubSubTopicRepository; if (sslEnabled) { @@ -616,8 +618,7 @@ public VeniceHelixAdmin( multiClusterConfigs, this, Optional.of(AvroProtocolDefinition.METADATA_SYSTEM_SCHEMA_STORE_KEY.getCurrentProtocolVersionSchema()), - Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS), - true)); + Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS))); } if (multiClusterConfigs.isZkSharedDaVinciPushStatusSystemSchemaStoreAutoCreationEnabled()) { // Add routine to create zk shared da vinci push status system store @@ -627,8 +628,7 @@ public VeniceHelixAdmin( multiClusterConfigs, this, Optional.of(AvroProtocolDefinition.PUSH_STATUS_SYSTEM_SCHEMA_STORE_KEY.getCurrentProtocolVersionSchema()), - Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS), - true)); + Optional.of(VeniceSystemStoreUtils.DEFAULT_USER_SYSTEM_STORE_UPDATE_QUERY_PARAMS))); } initRoutines.addAll(additionalInitRoutines); @@ -2963,23 +2963,17 @@ public String getRealTimeTopic(String clusterName, String storeName) { if (store == null) { throwStoreDoesNotExist(clusterName, storeName); } - if (!store.isHybrid() && !store.isWriteComputationEnabled() && !store.isSystemStore()) { + if (!store.isHybrid() && !store.isSystemStore()) { logAndThrow("Store " + storeName + " is not hybrid, refusing to return a realtime topic"); } Version version = store.getVersion(store.getLargestUsedVersionNumber()); - int partitionCount = version != null ? version.getPartitionCount() : 0; + // during transition to version based partition count, some old stores may have partition count on the store // config only. + // Now store-level partition count is set when a store is converted to hybrid + int partitionCount = version != null ? version.getPartitionCount() : store.getPartitionCount(); if (partitionCount == 0) { - // Now store-level partition count is set when a store is converted to hybrid - partitionCount = store.getPartitionCount(); - if (partitionCount == 0) { - if (version == null) { - throw new VeniceException("Store: " + storeName + " is not initialized with a version yet"); - } else { - throw new VeniceException("Store: " + storeName + " has partition count set to 0"); - } - } + throw new VeniceException("Unable to determine partition count for the real-time topic " + realTimeTopic); } VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); @@ -3853,20 +3847,8 @@ Pair waitVersion(String clusterName, String storeName, int versi */ @Override public void setStoreCurrentVersion(String clusterName, String storeName, int versionNumber) { - this.setStoreCurrentVersion(clusterName, storeName, versionNumber, false); - } - - /** - * In most cases, parent region should not update the current version. This is only allowed via an update-store call - * where the region filter list only contains one region, which is the region of the parent controller - */ - private void setStoreCurrentVersion( - String clusterName, - String storeName, - int versionNumber, - boolean allowedInParent) { - if (isParent() && !allowedInParent) { - // Parent colo should not update the current version of a store unless explicitly asked to do so + if (isParent()) { + // Parent colo should not update the current version of a store LOGGER.info( "Skipping current version update for store: {} in cluster: {} because it is not allowed in the " + "parent region", @@ -3894,6 +3876,7 @@ private void setStoreCurrentVersion( } return store; }); + LOGGER.info("Set store: {} version to {} in cluster: {}", storeName, versionNumber, clusterName); } @Override @@ -3982,17 +3965,6 @@ public int getBackupVersionNumber(List versions, int currentVersion) { return NON_EXISTING_VERSION; } - /** - * Update the largest used version number of a specified store. - */ - @Override - public void setStoreLargestUsedVersion(String clusterName, String storeName, int versionNumber) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setLargestUsedVersionNumber(versionNumber); - return store; - }); - } - /** * Update the owner of a specified store. */ @@ -4010,63 +3982,13 @@ public void setStoreOwner(String clusterName, String storeName, String owner) { */ @Override public void setStorePartitionCount(String clusterName, String storeName, int partitionCount) { - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); storeMetadataUpdate(clusterName, storeName, store -> { - preCheckStorePartitionCountUpdate(clusterName, store, partitionCount); - // Do not update the partitionCount on the store.version as version config is immutable. The - // version.getPartitionCount() - // is read only in getRealTimeTopic and createInternalStore creation, so modifying currentVersion should not have - // any effect. - if (partitionCount != 0) { - store.setPartitionCount(partitionCount); - } else { - store.setPartitionCount(clusterConfig.getMinNumberOfPartitions()); - } - + UpdateStoreUtils.validateStorePartitionCountUpdate(this, multiClusterConfigs, clusterName, store, partitionCount); + store.setPartitionCount(partitionCount); return store; }); } - void preCheckStorePartitionCountUpdate(String clusterName, Store store, int newPartitionCount) { - String errorMessagePrefix = "Store update error for " + store.getName() + " in cluster: " + clusterName + ": "; - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); - if (store.isHybrid() && store.getPartitionCount() != newPartitionCount) { - // Allow the update if partition count is not configured and the new partition count matches RT partition count - if (store.getPartitionCount() == 0) { - TopicManager topicManager; - if (isParent()) { - // RT might not exist in parent colo. Get RT partition count from a child colo. - String childDatacenter = clusterConfig.getChildDatacenters().iterator().next(); - topicManager = getTopicManager(multiClusterConfigs.getChildDataCenterKafkaUrlMap().get(childDatacenter)); - } else { - topicManager = getTopicManager(); - } - PubSubTopic realTimeTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(store.getName())); - if (topicManager.containsTopic(realTimeTopic) - && topicManager.getPartitionCount(realTimeTopic) == newPartitionCount) { - LOGGER.info("Allow updating store " + store.getName() + " partition count to " + newPartitionCount); - return; - } - } - String errorMessage = errorMessagePrefix + "Cannot change partition count for this hybrid store"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } - - int maxPartitionNum = clusterConfig.getMaxNumberOfPartitions(); - if (newPartitionCount > maxPartitionNum) { - String errorMessage = - errorMessagePrefix + "Partition count: " + newPartitionCount + " should be less than max: " + maxPartitionNum; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } - if (newPartitionCount < 0) { - String errorMessage = errorMessagePrefix + "Partition count: " + newPartitionCount + " should NOT be negative"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } - } - void setStorePartitionerConfig(String clusterName, String storeName, PartitionerConfig partitionerConfig) { storeMetadataUpdate(clusterName, storeName, store -> { // Only amplification factor is allowed to be changed if the store is a hybrid store. @@ -4120,32 +4042,6 @@ public void setStoreReadWriteability(String clusterName, String storeName, boole }); } - /** - * We will not expose this interface to Spark server. Updating quota can only be done by #updateStore - * TODO: remove all store attribute setters. - */ - private void setStoreStorageQuota(String clusterName, String storeName, long storageQuotaInByte) { - storeMetadataUpdate(clusterName, storeName, store -> { - if (storageQuotaInByte < 0 && storageQuotaInByte != Store.UNLIMITED_STORAGE_QUOTA) { - throw new VeniceException("storage quota can not be less than 0"); - } - store.setStorageQuotaInByte(storageQuotaInByte); - - return store; - }); - } - - private void setStoreReadQuota(String clusterName, String storeName, long readQuotaInCU) { - storeMetadataUpdate(clusterName, storeName, store -> { - if (readQuotaInCU < 0) { - throw new VeniceException("read quota can not be less than 0"); - } - store.setReadQuotaInCU(readQuotaInCU); - - return store; - }); - } - void setAccessControl(String clusterName, String storeName, boolean accessControlled) { storeMetadataUpdate(clusterName, storeName, store -> { store.setAccessControlled(accessControlled); @@ -4213,187 +4109,6 @@ public void deleteValueSchemas(String clusterName, String storeName, Set { - store.setCompressionStrategy(compressionStrategy); - - return store; - }); - } - - private void setClientDecompressionEnabled(String clusterName, String storeName, boolean clientDecompressionEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setClientDecompressionEnabled(clientDecompressionEnabled); - return store; - }); - } - - private void setChunkingEnabled(String clusterName, String storeName, boolean chunkingEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setChunkingEnabled(chunkingEnabled); - return store; - }); - } - - private void setRmdChunkingEnabled(String clusterName, String storeName, boolean rmdChunkingEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setRmdChunkingEnabled(rmdChunkingEnabled); - return store; - }); - } - - void setIncrementalPushEnabled(String clusterName, String storeName, boolean incrementalPushEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - VeniceControllerClusterConfig config = getHelixVeniceClusterResources(clusterName).getConfig(); - if (incrementalPushEnabled || store.isHybrid()) { - // Enabling incremental push - store.setNativeReplicationSourceFabric(config.getNativeReplicationSourceFabricAsDefaultForHybrid()); - store.setActiveActiveReplicationEnabled( - store.isActiveActiveReplicationEnabled() - || (config.isActiveActiveReplicationEnabledAsDefaultForHybrid() && !store.isSystemStore())); - } else { - // Disabling incremental push - // This is only possible when hybrid settings are set to null before turning of incremental push for the store. - store.setNativeReplicationSourceFabric(config.getNativeReplicationSourceFabricAsDefaultForBatchOnly()); - store.setActiveActiveReplicationEnabled(false); - } - store.setIncrementalPushEnabled(incrementalPushEnabled); - - return store; - }); - } - - private void setReplicationFactor(String clusterName, String storeName, int replicaFactor) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setReplicationFactor(replicaFactor); - - return store; - }); - } - - private void setBatchGetLimit(String clusterName, String storeName, int batchGetLimit) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBatchGetLimit(batchGetLimit); - - return store; - }); - } - - private void setNumVersionsToPreserve(String clusterName, String storeName, int numVersionsToPreserve) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setNumVersionsToPreserve(numVersionsToPreserve); - - return store; - }); - } - - private void setStoreMigration(String clusterName, String storeName, boolean migrating) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMigrating(migrating); - return store; - }); - } - - private void setMigrationDuplicateStore(String clusterName, String storeName, boolean migrationDuplicateStore) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMigrationDuplicateStore(migrationDuplicateStore); - return store; - }); - } - - private void setWriteComputationEnabled(String clusterName, String storeName, boolean writeComputationEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setWriteComputationEnabled(writeComputationEnabled); - return store; - }); - } - - void setReplicationMetadataVersionID(String clusterName, String storeName, int rmdVersion) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setRmdVersion(rmdVersion); - return store; - }); - } - - private void setReadComputationEnabled(String clusterName, String storeName, boolean computationEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setReadComputationEnabled(computationEnabled); - return store; - }); - } - - void setBootstrapToOnlineTimeoutInHours(String clusterName, String storeName, int bootstrapToOnlineTimeoutInHours) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBootstrapToOnlineTimeoutInHours(bootstrapToOnlineTimeoutInHours); - return store; - }); - } - - private void setNativeReplicationEnabled(String clusterName, String storeName, boolean nativeReplicationEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setNativeReplicationEnabled(nativeReplicationEnabled); - return store; - }); - } - - private void setPushStreamSourceAddress(String clusterName, String storeName, String pushStreamSourceAddress) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setPushStreamSourceAddress(pushStreamSourceAddress); - return store; - }); - } - - private void addStoreViews(String clusterName, String storeName, Map viewConfigMap) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setViewConfigs(StoreViewUtils.convertStringMapViewToViewConfigMap(viewConfigMap)); - return store; - }); - } - - private void setBackupStrategy(String clusterName, String storeName, BackupStrategy backupStrategy) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBackupStrategy(backupStrategy); - return store; - }); - } - - private void setAutoSchemaRegisterPushJobEnabled( - String clusterName, - String storeName, - boolean autoSchemaRegisterPushJobEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setSchemaAutoRegisterFromPushJobEnabled(autoSchemaRegisterPushJobEnabled); - return store; - }); - } - - void setHybridStoreDiskQuotaEnabled(String clusterName, String storeName, boolean hybridStoreDiskQuotaEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setHybridStoreDiskQuotaEnabled(hybridStoreDiskQuotaEnabled); - return store; - }); - } - - private void setBackupVersionRetentionMs(String clusterName, String storeName, long backupVersionRetentionMs) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setBackupVersionRetentionMs(backupVersionRetentionMs); - return store; - }); - } - - private void setNativeReplicationSourceFabric( - String clusterName, - String storeName, - String nativeReplicationSourceFabric) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setNativeReplicationSourceFabric(nativeReplicationSourceFabric); - return store; - }); - } - void setActiveActiveReplicationEnabled(String clusterName, String storeName, boolean activeActiveReplicationEnabled) { storeMetadataUpdate(clusterName, storeName, store -> { store.setActiveActiveReplicationEnabled(activeActiveReplicationEnabled); @@ -4401,40 +4116,6 @@ void setActiveActiveReplicationEnabled(String clusterName, String storeName, boo }); } - private void disableMetaSystemStore(String clusterName, String storeName) { - LOGGER.info("Disabling meta system store for store: {} of cluster: {}", storeName, clusterName); - storeMetadataUpdate(clusterName, storeName, store -> { - store.setStoreMetaSystemStoreEnabled(false); - store.setStoreMetadataSystemStoreEnabled(false); - return store; - }); - } - - private void disableDavinciPushStatusStore(String clusterName, String storeName) { - LOGGER.info("Disabling davinci push status store for store: {} of cluster: {}", storeName, clusterName); - storeMetadataUpdate(clusterName, storeName, store -> { - store.setDaVinciPushStatusStoreEnabled(false); - return store; - }); - } - - private void setLatestSupersetSchemaId(String clusterName, String storeName, int latestSupersetSchemaId) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setLatestSuperSetValueSchemaId(latestSupersetSchemaId); - return store; - }); - } - - private void setStorageNodeReadQuotaEnabled( - String clusterName, - String storeName, - boolean storageNodeReadQuotaEnabled) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setStorageNodeReadQuotaEnabled(storageNodeReadQuotaEnabled); - return store; - }); - } - /** * TODO: some logics are in parent controller {@link VeniceParentHelixAdmin} #updateStore and * some are in the child controller here. Need to unify them in the future. @@ -4448,6 +4129,15 @@ public void updateStore(String clusterName, String storeName, UpdateStoreQueryPa } } + @Override + public SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName) { + if (externalSupersetSchemaGenerator.isPresent() + && getMultiClusterConfigs().getControllerConfig(clusterName).isExternalSupersetSchemaGenerationEnabled()) { + return externalSupersetSchemaGenerator.get(); + } + return defaultSupersetSchemaGenerator; + } + /** * Update the {@linkplain LiveClusterConfig} at runtime for a specified cluster. * @param clusterName name of the Venice cluster. @@ -4483,369 +4173,55 @@ public void updateClusterConfig(String clusterName, UpdateClusterConfigQueryPara } private void internalUpdateStore(String clusterName, String storeName, UpdateStoreQueryParams params) { - // There are certain configs that are only allowed to be updated in child regions. We might still want the ability - // to update such configs in the parent region via the Admin tool for operational reasons. So, we allow such updates - // if the regions filter only specifies one region, which is the parent region. - boolean onlyParentRegionFilter = false; - - // Check whether the command affects this region. - if (params.getRegionsFilter().isPresent()) { - Set regionsFilter = parseRegionsFilterList(params.getRegionsFilter().get()); - if (!regionsFilter.contains(multiClusterConfigs.getRegionName())) { - LOGGER.info( - "UpdateStore command will be skipped for store: {} in cluster: {}, because the region filter is {}" - + " which doesn't include the current region: {}", - storeName, - clusterName, - regionsFilter, - multiClusterConfigs.getRegionName()); - return; - } - - if (isParent() && regionsFilter.size() == 1) { - onlyParentRegionFilter = true; - } + UpdateStoreWrapper updatedStoreWrapper = + UpdateStoreUtils.getStoreUpdate(this, clusterName, storeName, params, true); + if (updatedStoreWrapper == null) { + return; } - Store originalStore = getStore(clusterName, storeName); - if (originalStore == null) { - throw new VeniceNoStoreException(storeName, clusterName); - } - if (originalStore.isHybrid()) { - // If this is a hybrid store, always try to disable compaction if RT topic exists. - try { - PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); - getTopicManager().updateTopicCompactionPolicy(rtTopic, false); - } catch (PubSubTopicDoesNotExistException e) { - LOGGER.error("Could not find realtime topic for hybrid store {}", storeName); - } - } + Store originalStore = updatedStoreWrapper.originalStore; + Store updatedStore = updatedStoreWrapper.updatedStore; - Optional owner = params.getOwner(); - Optional readability = params.getEnableReads(); - Optional writeability = params.getEnableWrites(); - Optional partitionCount = params.getPartitionCount(); - Optional partitionerClass = params.getPartitionerClass(); - Optional> partitionerParams = params.getPartitionerParams(); - Optional amplificationFactor = params.getAmplificationFactor(); - Optional storageQuotaInByte = params.getStorageQuotaInByte(); - Optional readQuotaInCU = params.getReadQuotaInCU(); - Optional currentVersion = params.getCurrentVersion(); - Optional largestUsedVersionNumber = params.getLargestUsedVersionNumber(); - Optional hybridRewindSeconds = params.getHybridRewindSeconds(); - Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); - Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); - Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); - Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); - Optional accessControlled = params.getAccessControlled(); - Optional compressionStrategy = params.getCompressionStrategy(); - Optional clientDecompressionEnabled = params.getClientDecompressionEnabled(); - Optional chunkingEnabled = params.getChunkingEnabled(); - Optional rmdChunkingEnabled = params.getRmdChunkingEnabled(); - Optional batchGetLimit = params.getBatchGetLimit(); - Optional numVersionsToPreserve = params.getNumVersionsToPreserve(); - Optional incrementalPushEnabled = params.getIncrementalPushEnabled(); - Optional storeMigration = params.getStoreMigration(); - Optional writeComputationEnabled = params.getWriteComputationEnabled(); - Optional replicationMetadataVersionID = params.getReplicationMetadataVersionID(); - Optional readComputationEnabled = params.getReadComputationEnabled(); - Optional bootstrapToOnlineTimeoutInHours = params.getBootstrapToOnlineTimeoutInHours(); - Optional backupStrategy = params.getBackupStrategy(); - Optional autoSchemaRegisterPushJobEnabled = params.getAutoSchemaRegisterPushJobEnabled(); - Optional hybridStoreDiskQuotaEnabled = params.getHybridStoreDiskQuotaEnabled(); - Optional regularVersionETLEnabled = params.getRegularVersionETLEnabled(); - Optional futureVersionETLEnabled = params.getFutureVersionETLEnabled(); - Optional etledUserProxyAccount = params.getETLedProxyUserAccount(); - Optional nativeReplicationEnabled = params.getNativeReplicationEnabled(); - Optional pushStreamSourceAddress = params.getPushStreamSourceAddress(); - Optional backupVersionRetentionMs = params.getBackupVersionRetentionMs(); - Optional replicationFactor = params.getReplicationFactor(); - Optional migrationDuplicateStore = params.getMigrationDuplicateStore(); - Optional nativeReplicationSourceFabric = params.getNativeReplicationSourceFabric(); - Optional activeActiveReplicationEnabled = params.getActiveActiveReplicationEnabled(); - Optional personaName = params.getStoragePersona(); - Optional> storeViews = params.getStoreViews(); - Optional latestSupersetSchemaId = params.getLatestSupersetSchemaId(); - Optional storageNodeReadQuotaEnabled = params.getStorageNodeReadQuotaEnabled(); - Optional minCompactionLagSeconds = params.getMinCompactionLagSeconds(); - Optional maxCompactionLagSeconds = params.getMaxCompactionLagSeconds(); - Optional maxRecordSizeBytes = params.getMaxRecordSizeBytes(); - Optional unusedSchemaDeletionEnabled = params.getUnusedSchemaDeletionEnabled(); - Optional blobTransferEnabled = params.getBlobTransferEnabled(); - - final Optional newHybridStoreConfig; - if (hybridRewindSeconds.isPresent() || hybridOffsetLagThreshold.isPresent() || hybridTimeLagThreshold.isPresent() - || hybridDataReplicationPolicy.isPresent() || hybridBufferReplayPolicy.isPresent()) { - HybridStoreConfig hybridConfig = mergeNewSettingsIntoOldHybridStoreConfig( - originalStore, - hybridRewindSeconds, - hybridOffsetLagThreshold, - hybridTimeLagThreshold, - hybridDataReplicationPolicy, - hybridBufferReplayPolicy); - newHybridStoreConfig = Optional.ofNullable(hybridConfig); - } else { - newHybridStoreConfig = Optional.empty(); + if (updatedStore == originalStore) { + return; } try { - if (owner.isPresent()) { - setStoreOwner(clusterName, storeName, owner.get()); - } - - if (readability.isPresent()) { - setStoreReadability(clusterName, storeName, readability.get()); - } - - if (writeability.isPresent()) { - setStoreWriteability(clusterName, storeName, writeability.get()); - } - - if (partitionCount.isPresent()) { - setStorePartitionCount(clusterName, storeName, partitionCount.get()); - } - - /** - * If either of these three fields is not present, we should use store's original value to construct correct - * updated partitioner config. - */ - if (partitionerClass.isPresent() || partitionerParams.isPresent() || amplificationFactor.isPresent()) { - PartitionerConfig updatedPartitionerConfig = mergeNewSettingsIntoOldPartitionerConfig( - originalStore, - partitionerClass, - partitionerParams, - amplificationFactor); - setStorePartitionerConfig(clusterName, storeName, updatedPartitionerConfig); - } - - if (storageQuotaInByte.isPresent()) { - setStoreStorageQuota(clusterName, storeName, storageQuotaInByte.get()); - } - - if (readQuotaInCU.isPresent()) { - HelixVeniceClusterResources resources = getHelixVeniceClusterResources(clusterName); - ZkRoutersClusterManager routersClusterManager = resources.getRoutersClusterManager(); - int routerCount = routersClusterManager.getLiveRoutersCount(); - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); - int defaultReadQuotaPerRouter = clusterConfig.getDefaultReadQuotaPerRouter(); - - if (Math.max(defaultReadQuotaPerRouter, routerCount * defaultReadQuotaPerRouter) < readQuotaInCU.get()) { - throw new VeniceException( - "Cannot update read quota for store " + storeName + " in cluster " + clusterName + ". Read quota " - + readQuotaInCU.get() + " requested is more than the cluster quota."); + if (originalStore.isHybrid()) { + // If this is a hybrid store, always try to disable compaction if RT topic exists. + try { + PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); + getTopicManager().updateTopicCompactionPolicy(rtTopic, false); + } catch (PubSubTopicDoesNotExistException e) { + LOGGER.error("Could not find realtime topic for hybrid store {}", storeName); } - setStoreReadQuota(clusterName, storeName, readQuotaInCU.get()); - } - - if (currentVersion.isPresent()) { - setStoreCurrentVersion(clusterName, storeName, currentVersion.get(), onlyParentRegionFilter); - } - - if (largestUsedVersionNumber.isPresent()) { - setStoreLargestUsedVersion(clusterName, storeName, largestUsedVersionNumber.get()); - } - - if (bootstrapToOnlineTimeoutInHours.isPresent()) { - setBootstrapToOnlineTimeoutInHours(clusterName, storeName, bootstrapToOnlineTimeoutInHours.get()); - } - - VeniceControllerClusterConfig clusterConfig = getHelixVeniceClusterResources(clusterName).getConfig(); - if (newHybridStoreConfig.isPresent()) { - // To fix the final variable problem in the lambda expression - final HybridStoreConfig finalHybridConfig = newHybridStoreConfig.get(); - storeMetadataUpdate(clusterName, storeName, store -> { - if (!isHybrid(finalHybridConfig)) { - /** - * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. - * We cannot remove the RT topic immediately because with NR and AA, existing current version is - * still consuming the RT topic. - */ - store.setHybridStoreConfig(null); - store.setIncrementalPushEnabled(false); - // Enable/disable native replication for batch-only stores if the cluster level config for new batch - // stores is on - store.setNativeReplicationSourceFabric( - clusterConfig.getNativeReplicationSourceFabricAsDefaultForBatchOnly()); - store.setActiveActiveReplicationEnabled(false); - } else { - // Batch-only store is being converted to hybrid store. - if (!store.isHybrid()) { - /* - * Enable/disable native replication for hybrid stores if the cluster level config - * for new hybrid stores is on - */ - store - .setNativeReplicationSourceFabric(clusterConfig.getNativeReplicationSourceFabricAsDefaultForHybrid()); - /* - * Enable/disable active-active replication for user hybrid stores if the cluster level config - * for new hybrid stores is on - */ - store.setActiveActiveReplicationEnabled( - store.isActiveActiveReplicationEnabled() - || (clusterConfig.isActiveActiveReplicationEnabledAsDefaultForHybrid() - && !store.isSystemStore())); - } - store.setHybridStoreConfig(finalHybridConfig); - PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); - if (getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { - // RT already exists, ensure the retention is correct - getTopicManager() - .updateTopicRetention(rtTopic, StoreUtils.getExpectedRetentionTimeInMs(store, finalHybridConfig)); - } - } - return store; - }); - } - - if (accessControlled.isPresent()) { - setAccessControl(clusterName, storeName, accessControlled.get()); - } - - if (compressionStrategy.isPresent()) { - setStoreCompressionStrategy(clusterName, storeName, compressionStrategy.get()); - } - - if (clientDecompressionEnabled.isPresent()) { - setClientDecompressionEnabled(clusterName, storeName, clientDecompressionEnabled.get()); - } - - if (chunkingEnabled.isPresent()) { - setChunkingEnabled(clusterName, storeName, chunkingEnabled.get()); - } - - if (rmdChunkingEnabled.isPresent()) { - setRmdChunkingEnabled(clusterName, storeName, rmdChunkingEnabled.get()); - } - - if (batchGetLimit.isPresent()) { - setBatchGetLimit(clusterName, storeName, batchGetLimit.get()); } - if (numVersionsToPreserve.isPresent()) { - setNumVersionsToPreserve(clusterName, storeName, numVersionsToPreserve.get()); - } - - if (incrementalPushEnabled.isPresent()) { - if (incrementalPushEnabled.get()) { - enableHybridModeOrUpdateSettings(clusterName, storeName); + if (updatedStore.isHybrid()) { + PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); + if (getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { + // RT already exists, ensure the retention is correct + getTopicManager().updateTopicRetention( + rtTopic, + StoreUtils.getExpectedRetentionTimeInMs(updatedStore, updatedStore.getHybridStoreConfig())); } - setIncrementalPushEnabled(clusterName, storeName, incrementalPushEnabled.get()); - } - - if (replicationFactor.isPresent()) { - setReplicationFactor(clusterName, storeName, replicationFactor.get()); - } - - if (storeMigration.isPresent()) { - setStoreMigration(clusterName, storeName, storeMigration.get()); - } - - if (migrationDuplicateStore.isPresent()) { - setMigrationDuplicateStore(clusterName, storeName, migrationDuplicateStore.get()); - } - - if (writeComputationEnabled.isPresent()) { - setWriteComputationEnabled(clusterName, storeName, writeComputationEnabled.get()); - } - - if (replicationMetadataVersionID.isPresent()) { - setReplicationMetadataVersionID(clusterName, storeName, replicationMetadataVersionID.get()); } - if (readComputationEnabled.isPresent()) { - setReadComputationEnabled(clusterName, storeName, readComputationEnabled.get()); - } - - if (nativeReplicationEnabled.isPresent()) { - setNativeReplicationEnabled(clusterName, storeName, nativeReplicationEnabled.get()); - } - - if (activeActiveReplicationEnabled.isPresent()) { - setActiveActiveReplicationEnabled(clusterName, storeName, activeActiveReplicationEnabled.get()); - } - - if (pushStreamSourceAddress.isPresent()) { - setPushStreamSourceAddress(clusterName, storeName, pushStreamSourceAddress.get()); - } - - if (backupStrategy.isPresent()) { - setBackupStrategy(clusterName, storeName, backupStrategy.get()); - } - - autoSchemaRegisterPushJobEnabled - .ifPresent(value -> setAutoSchemaRegisterPushJobEnabled(clusterName, storeName, value)); - hybridStoreDiskQuotaEnabled.ifPresent(value -> setHybridStoreDiskQuotaEnabled(clusterName, storeName, value)); - if (regularVersionETLEnabled.isPresent() || futureVersionETLEnabled.isPresent() - || etledUserProxyAccount.isPresent()) { - ETLStoreConfig etlStoreConfig = new ETLStoreConfigImpl( - etledUserProxyAccount.orElse(originalStore.getEtlStoreConfig().getEtledUserProxyAccount()), - regularVersionETLEnabled.orElse(originalStore.getEtlStoreConfig().isRegularVersionETLEnabled()), - futureVersionETLEnabled.orElse(originalStore.getEtlStoreConfig().isFutureVersionETLEnabled())); - storeMetadataUpdate(clusterName, storeName, store -> { - store.setEtlStoreConfig(etlStoreConfig); - return store; - }); - } - if (backupVersionRetentionMs.isPresent()) { - setBackupVersionRetentionMs(clusterName, storeName, backupVersionRetentionMs.get()); - } - - if (nativeReplicationSourceFabric.isPresent()) { - setNativeReplicationSourceFabric(clusterName, storeName, nativeReplicationSourceFabric.get()); - } - - if (params.disableMetaStore().isPresent() && params.disableMetaStore().get()) { - disableMetaSystemStore(clusterName, storeName); - } - - if (params.disableDavinciPushStatusStore().isPresent() && params.disableDavinciPushStatusStore().get()) { - disableDavinciPushStatusStore(clusterName, storeName); - } + // All validations are done. We are ready to perform the persist the update on Zk + storeMetadataUpdate(clusterName, storeName, store -> updatedStore); + Optional personaName = params.getStoragePersona(); if (personaName.isPresent()) { StoragePersonaRepository repository = getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - repository.addStoresToPersona(personaName.get(), Arrays.asList(storeName)); + repository.addStoresToPersona(personaName.get(), Collections.singletonList(storeName)); } - if (storeViews.isPresent()) { - addStoreViews(clusterName, storeName, storeViews.get()); - } - - if (latestSupersetSchemaId.isPresent()) { - setLatestSupersetSchemaId(clusterName, storeName, latestSupersetSchemaId.get()); - } - - if (minCompactionLagSeconds.isPresent()) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMinCompactionLagSeconds(minCompactionLagSeconds.get()); - return store; - }); - } - if (maxCompactionLagSeconds.isPresent()) { - storeMetadataUpdate(clusterName, storeName, store -> { - store.setMaxCompactionLagSeconds(maxCompactionLagSeconds.get()); - return store; - }); + // Since we expect the parent controller to emit the actions to the Admin channel where necessary, we need to run + // it within the context of VeniceParentHelixAdmin. So, here, we only run it for the child controller if it is + // running in a single-region mode. + if (!isParent() && isPrimary()) { + UpdateStoreUtils.handlePostUpdateActions(this, clusterName, storeName); } - - maxRecordSizeBytes.ifPresent(aInt -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setMaxRecordSizeBytes(aInt); - return store; - })); - - unusedSchemaDeletionEnabled.ifPresent(aBoolean -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setUnusedSchemaDeletionEnabled(aBoolean); - return store; - })); - - storageNodeReadQuotaEnabled - .ifPresent(aBoolean -> setStorageNodeReadQuotaEnabled(clusterName, storeName, aBoolean)); - - blobTransferEnabled.ifPresent(aBoolean -> storeMetadataUpdate(clusterName, storeName, store -> { - store.setBlobTransferEnabled(aBoolean); - return store; - })); - LOGGER.info("Finished updating store: {} in cluster: {}", storeName, clusterName); } catch (VeniceException e) { LOGGER.error( @@ -4856,8 +4232,7 @@ private void internalUpdateStore(String clusterName, String storeName, UpdateSto // rollback to original store storeMetadataUpdate(clusterName, storeName, store -> originalStore); PubSubTopic rtTopic = pubSubTopicRepository.getTopic(Version.composeRealTimeTopic(storeName)); - if (originalStore.isHybrid() && newHybridStoreConfig.isPresent() - && getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { + if (originalStore.isHybrid() && getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { // Ensure the topic retention is rolled back too getTopicManager().updateTopicRetention( rtTopic, @@ -4872,35 +4247,6 @@ && getTopicManager().containsTopicAndAllPartitionsAreOnline(rtTopic)) { } } - /** - * Enabling hybrid mode for incremental push store is moved into - * {@link VeniceParentHelixAdmin#updateStore(String, String, UpdateStoreQueryParams)} - * TODO: Remove the method and its usage after the deployment of parent controller updateStore change. - */ - private void enableHybridModeOrUpdateSettings(String clusterName, String storeName) { - storeMetadataUpdate(clusterName, storeName, store -> { - HybridStoreConfig hybridStoreConfig = store.getHybridStoreConfig(); - if (hybridStoreConfig == null) { - store.setHybridStoreConfig( - new HybridStoreConfigImpl( - DEFAULT_REWIND_TIME_IN_SECONDS, - DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD, - DEFAULT_HYBRID_TIME_LAG_THRESHOLD, - DataReplicationPolicy.NONE, - null)); - } else if (hybridStoreConfig.getDataReplicationPolicy() == null) { - store.setHybridStoreConfig( - new HybridStoreConfigImpl( - hybridStoreConfig.getRewindTimeInSeconds(), - hybridStoreConfig.getOffsetLagThresholdToGoOnline(), - hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), - DataReplicationPolicy.NONE, - hybridStoreConfig.getBufferReplayPolicy())); - } - return store; - }); - } - /** * This method is invoked in parent controllers for store migration. */ @@ -4942,132 +4288,6 @@ public void replicateUpdateStore(String clusterName, String storeName, UpdateSto } } - /** - * Used by both the {@link VeniceHelixAdmin} and the {@link VeniceParentHelixAdmin} - * - * @param oldStore Existing Store that is the source for updates. This object will not be modified by this method. - * @param hybridRewindSeconds Optional is present if the returned object should include a new rewind time - * @param hybridOffsetLagThreshold Optional is present if the returned object should include a new offset lag threshold - * @return null if oldStore has no hybrid configs and optionals are not present, - * otherwise a fully specified {@link HybridStoreConfig} - */ - protected static HybridStoreConfig mergeNewSettingsIntoOldHybridStoreConfig( - Store oldStore, - Optional hybridRewindSeconds, - Optional hybridOffsetLagThreshold, - Optional hybridTimeLagThreshold, - Optional hybridDataReplicationPolicy, - Optional bufferReplayPolicy) { - if (!hybridRewindSeconds.isPresent() && !hybridOffsetLagThreshold.isPresent() && !oldStore.isHybrid()) { - return null; // For the nullable union in the avro record - } - HybridStoreConfig mergedHybridStoreConfig; - if (oldStore.isHybrid()) { // for an existing hybrid store, just replace any specified values - HybridStoreConfig oldHybridConfig = oldStore.getHybridStoreConfig().clone(); - mergedHybridStoreConfig = new HybridStoreConfigImpl( - hybridRewindSeconds.isPresent() ? hybridRewindSeconds.get() : oldHybridConfig.getRewindTimeInSeconds(), - hybridOffsetLagThreshold.isPresent() - ? hybridOffsetLagThreshold.get() - : oldHybridConfig.getOffsetLagThresholdToGoOnline(), - hybridTimeLagThreshold.isPresent() - ? hybridTimeLagThreshold.get() - : oldHybridConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), - hybridDataReplicationPolicy.isPresent() - ? hybridDataReplicationPolicy.get() - : oldHybridConfig.getDataReplicationPolicy(), - bufferReplayPolicy.isPresent() ? bufferReplayPolicy.get() : oldHybridConfig.getBufferReplayPolicy()); - } else { - // switching a non-hybrid store to hybrid; must specify: - // 1. rewind time - // 2. either offset lag threshold or time lag threshold, or both - if (!(hybridRewindSeconds.isPresent() - && (hybridOffsetLagThreshold.isPresent() || hybridTimeLagThreshold.isPresent()))) { - throw new VeniceException( - oldStore.getName() + " was not a hybrid store. In order to make it a hybrid store both " - + " rewind time in seconds and offset or time lag threshold must be specified"); - } - mergedHybridStoreConfig = new HybridStoreConfigImpl( - hybridRewindSeconds.get(), - // If not specified, offset/time lag threshold will be -1 and will not be used to determine whether - // a partition is ready to serve - hybridOffsetLagThreshold.orElse(DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD), - hybridTimeLagThreshold.orElse(DEFAULT_HYBRID_TIME_LAG_THRESHOLD), - hybridDataReplicationPolicy.orElse(DataReplicationPolicy.NON_AGGREGATE), - bufferReplayPolicy.orElse(BufferReplayPolicy.REWIND_FROM_EOP)); - } - if (mergedHybridStoreConfig.getRewindTimeInSeconds() > 0 - && mergedHybridStoreConfig.getOffsetLagThresholdToGoOnline() < 0 - && mergedHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() < 0) { - throw new VeniceException( - "Both offset lag threshold and time lag threshold are negative when setting hybrid" + " configs for store " - + oldStore.getName()); - } - return mergedHybridStoreConfig; - } - - static PartitionerConfig mergeNewSettingsIntoOldPartitionerConfig( - Store oldStore, - Optional partitionerClass, - Optional> partitionerParams, - Optional amplificationFactor) { - PartitionerConfig originalPartitionerConfig; - if (oldStore.getPartitionerConfig() == null) { - originalPartitionerConfig = new PartitionerConfigImpl(); - } else { - originalPartitionerConfig = oldStore.getPartitionerConfig(); - } - return new PartitionerConfigImpl( - partitionerClass.orElse(originalPartitionerConfig.getPartitionerClass()), - partitionerParams.orElse(originalPartitionerConfig.getPartitionerParams()), - amplificationFactor.orElse(originalPartitionerConfig.getAmplificationFactor())); - } - - static Map mergeNewViewConfigsIntoOldConfigs( - Store oldStore, - Map viewParameters) throws VeniceException { - // Merge the existing configs with the incoming configs. The new configs will override existing ones which share the - // same key. - Map oldViewConfigMap = oldStore.getViewConfigs(); - if (oldViewConfigMap == null) { - oldViewConfigMap = new HashMap<>(); - } - Map mergedConfigs = - StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(oldViewConfigMap); - mergedConfigs.putAll(StoreViewUtils.convertStringMapViewToStoreViewConfigRecordMap(viewParameters)); - return mergedConfigs; - } - - static Map addNewViewConfigsIntoOldConfigs( - Store oldStore, - String viewClass, - ViewConfig viewConfig) throws VeniceException { - // Add new view config into the existing config map. The new configs will override existing ones which share the - // same key. - Map oldViewConfigMap = oldStore.getViewConfigs(); - if (oldViewConfigMap == null) { - oldViewConfigMap = new HashMap<>(); - } - Map mergedConfigs = - StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(oldViewConfigMap); - - StoreViewConfigRecord newStoreViewConfigRecord = - StoreViewUtils.convertViewConfigToStoreViewConfigRecord(viewConfig); - mergedConfigs.put(viewClass, newStoreViewConfigRecord); - return mergedConfigs; - } - - static Map removeViewConfigFromStoreViewConfigMap(Store oldStore, String viewClass) - throws VeniceException { - Map oldViewConfigMap = oldStore.getViewConfigs(); - if (oldViewConfigMap == null) { - oldViewConfigMap = new HashMap<>(); - } - Map mergedConfigs = - StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(oldViewConfigMap); - mergedConfigs.remove(viewClass); - return mergedConfigs; - } - /** * Update the store metadata by applying provided operation. * @param clusterName name of the cluster. @@ -5361,8 +4581,9 @@ public SchemaEntry addValueSchema( DirectionalSchemaCompatibilityType expectedCompatibilityType) { checkControllerLeadershipFor(clusterName); ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); - schemaRepository.addValueSchema(storeName, valueSchemaStr, expectedCompatibilityType); - return new SchemaEntry(schemaRepository.getValueSchemaId(storeName, valueSchemaStr), valueSchemaStr); + int newValueSchemaId = + schemaRepository.preCheckValueSchemaAndGetNextAvailableId(storeName, valueSchemaStr, expectedCompatibilityType); + return addValueSchema(clusterName, storeName, valueSchemaStr, newValueSchemaId, expectedCompatibilityType); } /** @@ -5379,8 +4600,14 @@ public SchemaEntry addValueSchema( DirectionalSchemaCompatibilityType compatibilityType) { checkControllerLeadershipFor(clusterName); ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + + if (schemaId == SchemaData.DUPLICATE_VALUE_SCHEMA_CODE) { + return new SchemaEntry(schemaRepository.getValueSchemaId(storeName, valueSchemaStr), valueSchemaStr); + } + int newValueSchemaId = schemaRepository.preCheckValueSchemaAndGetNextAvailableId(storeName, valueSchemaStr, compatibilityType); + if (newValueSchemaId != SchemaData.DUPLICATE_VALUE_SCHEMA_CODE && newValueSchemaId != schemaId) { throw new VeniceException( "Inconsistent value schema id between the caller and the local schema repository." @@ -5388,7 +4615,15 @@ public SchemaEntry addValueSchema( + newValueSchemaId + " for store " + storeName + " in cluster " + clusterName + " Schema: " + valueSchemaStr); } - return schemaRepository.addValueSchema(storeName, valueSchemaStr, newValueSchemaId); + SchemaEntry addedSchemaEntry = schemaRepository.addValueSchema(storeName, valueSchemaStr, schemaId); + + if (isPrimary() && !isParent() && newValueSchemaId != SchemaData.DUPLICATE_VALUE_SCHEMA_CODE) { + // Now register all inferred schemas for the store if this is a child controller in single-region mode. + // Parent in multi-region mode will register all inferred schemas via the admin channel. + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(this, clusterName, storeName); + } + + return addedSchemaEntry; } /** @@ -5442,14 +4677,8 @@ public DerivedSchemaEntry removeDerivedSchema( .removeDerivedSchema(storeName, valueSchemaId, derivedSchemaId); } - /** - * Add a new superset schema for the given store with all specified properties. - *

- * Generate the superset schema off the current schema and latest superset schema (if any, if not pick the latest value schema) existing in the store. - * If the newly generated superset schema is unique add it to the store and update latestSuperSetValueSchemaId of the store. - */ @Override - public SchemaEntry addSupersetSchema( + public void addSupersetSchema( String clusterName, String storeName, String valueSchema, @@ -5459,12 +4688,16 @@ public SchemaEntry addSupersetSchema( checkControllerLeadershipFor(clusterName); ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + if (valueSchemaId != SchemaData.INVALID_VALUE_SCHEMA_ID) { + // add the value schema + schemaRepository.addValueSchema(storeName, valueSchema, valueSchemaId); + } + final SchemaEntry existingSupersetSchemaEntry = schemaRepository.getValueSchema(storeName, supersetSchemaId); if (existingSupersetSchemaEntry == null) { // If the new superset schema does not exist in the schema repo, add it LOGGER.info("Adding superset schema: {} for store: {}", supersetSchemaStr, storeName); schemaRepository.addValueSchema(storeName, supersetSchemaStr, supersetSchemaId); - } else { final Schema newSupersetSchema = AvroSchemaParseUtils.parseSchemaFromJSONStrictValidation(supersetSchemaStr); if (!AvroSchemaUtils.compareSchemaIgnoreFieldOrder(existingSupersetSchemaEntry.getSchema(), newSupersetSchema)) { @@ -5474,25 +4707,10 @@ public SchemaEntry addSupersetSchema( } } - // add the value schema - return schemaRepository.addValueSchema(storeName, valueSchema, valueSchemaId); - } - - int getValueSchemaIdIgnoreFieldOrder( - String clusterName, - String storeName, - String valueSchemaStr, - Comparator schemaComparator) { - checkControllerLeadershipFor(clusterName); - SchemaEntry valueSchemaEntry = new SchemaEntry(SchemaData.UNKNOWN_SCHEMA_ID, valueSchemaStr); - - for (SchemaEntry schemaEntry: getValueSchemas(clusterName, storeName)) { - if (schemaComparator.compare(schemaEntry.getSchema(), valueSchemaEntry.getSchema()) == 0) { - return schemaEntry.getId(); - } - } - return SchemaData.INVALID_VALUE_SCHEMA_ID; - + storeMetadataUpdate(clusterName, storeName, store -> { + store.setLatestSuperSetValueSchemaId(supersetSchemaId); + return store; + }); } int checkPreConditionForAddValueSchemaAndGetNewSchemaId( @@ -5528,28 +4746,7 @@ public Collection getReplicationMetadataSchemas(String clusterNa return schemaRepo.getReplicationMetadataSchemas(storeName); } - boolean checkIfValueSchemaAlreadyHasRmdSchema( - String clusterName, - String storeName, - final int valueSchemaID, - final int replicationMetadataVersionId) { - checkControllerLeadershipFor(clusterName); - Collection schemaEntries = - getHelixVeniceClusterResources(clusterName).getSchemaRepository().getReplicationMetadataSchemas(storeName); - for (RmdSchemaEntry rmdSchemaEntry: schemaEntries) { - if (rmdSchemaEntry.getValueSchemaID() == valueSchemaID - && rmdSchemaEntry.getId() == replicationMetadataVersionId) { - return true; - } - } - return false; - } - - boolean checkIfMetadataSchemaAlreadyPresent( - String clusterName, - String storeName, - int valueSchemaId, - RmdSchemaEntry rmdSchemaEntry) { + boolean checkIfMetadataSchemaAlreadyPresent(String clusterName, String storeName, RmdSchemaEntry rmdSchemaEntry) { checkControllerLeadershipFor(clusterName); try { Collection schemaEntries = @@ -5581,7 +4778,7 @@ public RmdSchemaEntry addReplicationMetadataSchema( RmdSchemaEntry rmdSchemaEntry = new RmdSchemaEntry(valueSchemaId, replicationMetadataVersionId, replicationMetadataSchemaStr); - if (checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, valueSchemaId, rmdSchemaEntry)) { + if (checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, rmdSchemaEntry)) { LOGGER.info( "Timestamp metadata schema Already present: for store: {} in cluster: {} metadataSchema: {} " + "replicationMetadataVersionId: {} valueSchemaId: {}", @@ -5708,13 +4905,6 @@ public Map getStorageNodesStatus(String clusterName, boolean ena return instancesStatusesMap; } - Schema getSupersetOrLatestValueSchema(String clusterName, Store store) { - ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); - // If already a superset schema exists, try to generate the new superset from that and the input value schema - SchemaEntry existingSchema = schemaRepository.getSupersetOrLatestValueSchema(store.getName()); - return existingSchema == null ? null : existingSchema.getSchema(); - } - /** * Remove one storage node from the given cluster. *

@@ -7519,38 +6709,19 @@ Store checkPreConditionForAclOp(String clusterName, String storeName) { } /** - * A store is not hybrid in the following two scenarios: - * If hybridStoreConfig is null, it means store is not hybrid. - * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. - */ - boolean isHybrid(HybridStoreConfig hybridStoreConfig) { - return hybridStoreConfig != null - && (hybridStoreConfig.getRewindTimeInSeconds() >= 0 || hybridStoreConfig.getOffsetLagThresholdToGoOnline() >= 0 - || hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() >= 0); - } - - /** - * @see VeniceHelixAdmin#isHybrid(HybridStoreConfig) + * @see Admin#isParent() */ - boolean isHybrid(HybridStoreConfigRecord hybridStoreConfigRecord) { - HybridStoreConfig hybridStoreConfig = null; - if (hybridStoreConfigRecord != null) { - hybridStoreConfig = new HybridStoreConfigImpl( - hybridStoreConfigRecord.rewindTimeInSeconds, - hybridStoreConfigRecord.offsetLagThresholdToGoOnline, - hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds, - DataReplicationPolicy.valueOf(hybridStoreConfigRecord.dataReplicationPolicy), - BufferReplayPolicy.valueOf(hybridStoreConfigRecord.bufferReplayPolicy)); - } - return isHybrid(hybridStoreConfig); + @Override + public boolean isParent() { + return multiClusterConfigs.isParent(); } /** - * @see Admin#isParent() + * @see Admin#isPrimary() */ @Override - public boolean isParent() { - return multiClusterConfigs.isParent(); + public boolean isPrimary() { + return !multiClusterConfigs.isMultiRegion() || isParent(); } /** @@ -7983,9 +7154,13 @@ public void createStoragePersona( Set storesToEnforce, Set owners) { checkControllerLeadershipFor(clusterName); - HelixVeniceClusterResources resources = getHelixVeniceClusterResources(clusterName); + StoragePersonaRepository repository = getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + if (repository.hasPersona(name)) { + throw new VeniceException("Persona with name " + name + " already exists"); + } + repository.validatePersona(name, quotaNumber, storesToEnforce, owners); + try { - StoragePersonaRepository repository = resources.getStoragePersonaRepository(); repository.addPersona(name, quotaNumber, storesToEnforce, owners); } catch (Exception e) { LOGGER.error("Failed to execute CreateStoragePersonaOperation.", e); @@ -8025,9 +7200,10 @@ public void deleteStoragePersona(String clusterName, String name) { @Override public void updateStoragePersona(String clusterName, String name, UpdateStoragePersonaQueryParams queryParams) { checkControllerLeadershipFor(clusterName); - HelixVeniceClusterResources resources = getHelixVeniceClusterResources(clusterName); + StoragePersonaRepository repository = getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + repository.validatePersonaUpdate(name, queryParams); + try { - StoragePersonaRepository repository = resources.getStoragePersonaRepository(); repository.updatePersona(name, queryParams); } catch (Exception e) { LOGGER.error("Failed to execute UpdateStoragePersonaOperation.", e); @@ -8158,8 +7334,8 @@ public boolean isClusterWipeAllowed(String clusterName) { return multiClusterConfigs.getControllerConfig(clusterName).isClusterWipeAllowed(); } - // Visible for testing - VeniceControllerMultiClusterConfig getMultiClusterConfigs() { + @Override + public VeniceControllerMultiClusterConfig getMultiClusterConfigs() { return multiClusterConfigs; } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java index afff25d93ac..0a52d399446 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/VeniceParentHelixAdmin.java @@ -2,7 +2,6 @@ import static com.linkedin.venice.controller.VeniceHelixAdmin.VERSION_ID_UNSET; import static com.linkedin.venice.controller.kafka.consumer.AdminConsumptionTask.IGNORED_CURRENT_VERSION; -import static com.linkedin.venice.controller.util.ParentControllerConfigUpdateUtils.addUpdateSchemaForStore; import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_CONTROLLED; import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACTIVE_ACTIVE_REPLICATION_ENABLED; import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR; @@ -56,10 +55,8 @@ import static com.linkedin.venice.controllerapi.ControllerApiConstants.UNUSED_SCHEMA_DELETION_ENABLED; import static com.linkedin.venice.controllerapi.ControllerApiConstants.VERSION; import static com.linkedin.venice.controllerapi.ControllerApiConstants.WRITE_COMPUTATION_ENABLED; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_REWIND_TIME_IN_SECONDS; -import static com.linkedin.venice.meta.VersionStatus.*; +import static com.linkedin.venice.meta.VersionStatus.ONLINE; +import static com.linkedin.venice.meta.VersionStatus.PUSHED; import static com.linkedin.venice.serialization.avro.AvroProtocolDefinition.BATCH_JOB_HEARTBEAT; import static com.linkedin.venice.serialization.avro.AvroProtocolDefinition.PUSH_JOB_DETAILS; @@ -68,7 +65,6 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; -import com.linkedin.venice.ConfigConstants; import com.linkedin.venice.SSLConfig; import com.linkedin.venice.acl.AclException; import com.linkedin.venice.acl.DynamicAccessController; @@ -82,7 +78,6 @@ import com.linkedin.venice.authorization.Resource; import com.linkedin.venice.common.VeniceSystemStoreType; import com.linkedin.venice.common.VeniceSystemStoreUtils; -import com.linkedin.venice.compression.CompressionStrategy; import com.linkedin.venice.controller.authorization.SystemStoreAclSynchronizationTask; import com.linkedin.venice.controller.init.DelegatingClusterLeaderInitializationRoutine; import com.linkedin.venice.controller.init.SharedInternalRTStoreInitializationRoutine; @@ -117,7 +112,6 @@ import com.linkedin.venice.controller.kafka.protocol.admin.SetStoreOwner; import com.linkedin.venice.controller.kafka.protocol.admin.SetStorePartitionCount; import com.linkedin.venice.controller.kafka.protocol.admin.StoreCreation; -import com.linkedin.venice.controller.kafka.protocol.admin.StoreViewConfigRecord; import com.linkedin.venice.controller.kafka.protocol.admin.SupersetSchemaCreation; import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStoragePersona; import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStore; @@ -130,7 +124,10 @@ import com.linkedin.venice.controller.migration.MigrationPushStrategyZKAccessor; import com.linkedin.venice.controller.supersetschema.DefaultSupersetSchemaGenerator; import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; -import com.linkedin.venice.controller.util.ParentControllerConfigUpdateUtils; +import com.linkedin.venice.controller.util.AdminUtils; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; +import com.linkedin.venice.controller.util.UpdateStoreUtils; +import com.linkedin.venice.controller.util.UpdateStoreWrapper; import com.linkedin.venice.controllerapi.AdminCommandExecution; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; @@ -153,10 +150,8 @@ import com.linkedin.venice.exceptions.ConcurrentBatchPushException; import com.linkedin.venice.exceptions.ConfigurationException; import com.linkedin.venice.exceptions.ErrorType; -import com.linkedin.venice.exceptions.PartitionerSchemaMismatchException; import com.linkedin.venice.exceptions.ResourceStillExistsException; import com.linkedin.venice.exceptions.VeniceException; -import com.linkedin.venice.exceptions.VeniceHttpException; import com.linkedin.venice.exceptions.VeniceNoStoreException; import com.linkedin.venice.exceptions.VeniceUnsupportedOperationException; import com.linkedin.venice.helix.HelixReadOnlyStoreConfigRepository; @@ -166,13 +161,13 @@ import com.linkedin.venice.helix.Replica; import com.linkedin.venice.helix.StoragePersonaRepository; import com.linkedin.venice.helix.ZkStoreConfigAccessor; -import com.linkedin.venice.meta.BackupStrategy; import com.linkedin.venice.meta.BufferReplayPolicy; import com.linkedin.venice.meta.DataReplicationPolicy; import com.linkedin.venice.meta.ETLStoreConfig; import com.linkedin.venice.meta.HybridStoreConfig; import com.linkedin.venice.meta.Instance; import com.linkedin.venice.meta.PartitionerConfig; +import com.linkedin.venice.meta.ReadWriteSchemaRepository; import com.linkedin.venice.meta.ReadWriteStoreRepository; import com.linkedin.venice.meta.RegionPushDetails; import com.linkedin.venice.meta.RoutersClusterConfig; @@ -184,8 +179,6 @@ import com.linkedin.venice.meta.VeniceUserStoreType; import com.linkedin.venice.meta.Version; import com.linkedin.venice.meta.VersionStatus; -import com.linkedin.venice.meta.ViewConfig; -import com.linkedin.venice.meta.ViewConfigImpl; import com.linkedin.venice.persona.StoragePersona; import com.linkedin.venice.pubsub.PubSubConsumerAdapterFactory; import com.linkedin.venice.pubsub.PubSubTopicRepository; @@ -201,7 +194,6 @@ import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.schema.avro.DirectionalSchemaCompatibilityType; import com.linkedin.venice.schema.rmd.RmdSchemaEntry; -import com.linkedin.venice.schema.rmd.RmdSchemaGenerator; import com.linkedin.venice.schema.writecompute.DerivedSchemaEntry; import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; import com.linkedin.venice.security.SSLFactory; @@ -217,7 +209,6 @@ import com.linkedin.venice.utils.CollectionUtils; import com.linkedin.venice.utils.ObjectMapperFactory; import com.linkedin.venice.utils.Pair; -import com.linkedin.venice.utils.PartitionUtils; import com.linkedin.venice.utils.ReflectUtils; import com.linkedin.venice.utils.RegionUtils; import com.linkedin.venice.utils.SslUtils; @@ -228,7 +219,6 @@ import com.linkedin.venice.utils.concurrent.VeniceConcurrentHashMap; import com.linkedin.venice.utils.locks.AutoCloseableLock; import com.linkedin.venice.views.VeniceView; -import com.linkedin.venice.views.ViewUtils; import com.linkedin.venice.writer.VeniceWriter; import com.linkedin.venice.writer.VeniceWriterFactory; import com.linkedin.venice.writer.VeniceWriterOptions; @@ -257,13 +247,11 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Function; import java.util.stream.Collectors; import javax.annotation.Nonnull; import org.apache.avro.Schema; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.Validate; -import org.apache.http.HttpStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -1000,7 +988,7 @@ public void addVersionAndStartIngestion( boolean versionSwapDeferred, int repushSourceVersion) { // Parent controller will always pick the replicationMetadataVersionId from configs. - final int replicationMetadataVersionId = getRmdVersionID(storeName, clusterName); + final int replicationMetadataVersionId = AdminUtils.getRmdVersionID(this, storeName, clusterName); Version version = getVeniceHelixAdmin().addVersionOnly( clusterName, storeName, @@ -1011,9 +999,6 @@ public void addVersionAndStartIngestion( remoteKafkaBootstrapServers, rewindTimeInSecondsOverride, replicationMetadataVersionId); - if (version.isActiveActiveReplicationEnabled()) { - updateReplicationMetadataSchemaForAllValueSchema(clusterName, storeName); - } acquireAdminMessageLock(clusterName, storeName); try { sendAddVersionAdminMessage(clusterName, storeName, pushJobId, version, numberOfPartitions, pushType, null, -1); @@ -1022,34 +1007,6 @@ public void addVersionAndStartIngestion( } } - private int getRmdVersionID(final String storeName, final String clusterName) { - final Store store = getVeniceHelixAdmin().getStore(clusterName, storeName); - if (store == null) { - LOGGER.warn( - "No store found in the store repository. Will get store-level RMD version ID from cluster config. " - + "Store name: {}, cluster: {}", - storeName, - clusterName); - } else if (store.getRmdVersion() == ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION) { - LOGGER.info("No store-level RMD version ID found for store {} in cluster {}", storeName, clusterName); - } else { - LOGGER.info( - "Found store-level RMD version ID {} for store {} in cluster {}", - store.getRmdVersion(), - storeName, - clusterName); - return store.getRmdVersion(); - } - - final VeniceControllerClusterConfig controllerConfig = getMultiClusterConfigs().getControllerConfig(clusterName); - if (controllerConfig == null) { - throw new VeniceException("No controller cluster config found for cluster " + clusterName); - } - final int rmdVersionID = controllerConfig.getReplicationMetadataVersion(); - LOGGER.info("Use RMD version ID {} for cluster {}", rmdVersionID, clusterName); - return rmdVersionID; - } - /** * Since there is no offline push running in Parent Controller, * the old store versions won't be cleaned up by job completion action, so Parent Controller chooses @@ -1592,7 +1549,7 @@ Version addVersionAndTopicOnly( boolean versionSwapDeferred, String targetedRegions, int repushSourceVersion) { - final int replicationMetadataVersionId = getRmdVersionID(storeName, clusterName); + final int replicationMetadataVersionId = AdminUtils.getRmdVersionID(this, storeName, clusterName); Pair result = getVeniceHelixAdmin().addVersionAndTopicOnly( clusterName, storeName, @@ -1614,9 +1571,6 @@ Version addVersionAndTopicOnly( repushSourceVersion); Version newVersion = result.getSecond(); if (result.getFirst()) { - if (newVersion.isActiveActiveReplicationEnabled()) { - updateReplicationMetadataSchemaForAllValueSchema(clusterName, storeName); - } // Send admin message if the version is newly created. acquireAdminMessageLock(clusterName, storeName); try { @@ -2051,16 +2005,6 @@ public void rollbackToBackupVersion(String clusterName, String storeName, String } } - /** - * Unsupported operation in the parent controller. - */ - @Override - public void setStoreLargestUsedVersion(String clusterName, String storeName, int versionNumber) { - throw new VeniceUnsupportedOperationException( - "setStoreLargestUsedVersion", - "This is only supported in the Child Controller."); - } - /** * Update the owner of a specified store by sending {@link AdminMessageType#SET_STORE_OWNER SET_STORE_OWNER} admin message * to the admin topic. @@ -2204,297 +2148,176 @@ public void setStoreReadWriteability(String clusterName, String storeName, boole public void updateStore(String clusterName, String storeName, UpdateStoreQueryParams params) { acquireAdminMessageLock(clusterName, storeName); try { - Optional owner = params.getOwner(); - Optional readability = params.getEnableReads(); - Optional writeability = params.getEnableWrites(); - Optional partitionCount = params.getPartitionCount(); - Optional partitionerClass = params.getPartitionerClass(); - Optional> partitionerParams = params.getPartitionerParams(); - Optional amplificationFactor = params.getAmplificationFactor(); - Optional storageQuotaInByte = params.getStorageQuotaInByte(); - Optional readQuotaInCU = params.getReadQuotaInCU(); - Optional currentVersion = params.getCurrentVersion(); - Optional largestUsedVersionNumber = params.getLargestUsedVersionNumber(); - Optional hybridRewindSeconds = params.getHybridRewindSeconds(); - Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); - Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); - Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); - Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); - Optional accessControlled = params.getAccessControlled(); - Optional compressionStrategy = params.getCompressionStrategy(); - Optional clientDecompressionEnabled = params.getClientDecompressionEnabled(); - Optional chunkingEnabled = params.getChunkingEnabled(); - Optional rmdChunkingEnabled = params.getRmdChunkingEnabled(); - Optional batchGetLimit = params.getBatchGetLimit(); - Optional numVersionsToPreserve = params.getNumVersionsToPreserve(); - Optional incrementalPushEnabled = params.getIncrementalPushEnabled(); - Optional storeMigration = params.getStoreMigration(); - Optional writeComputationEnabled = params.getWriteComputationEnabled(); - Optional replicationMetadataVersionID = params.getReplicationMetadataVersionID(); - Optional readComputationEnabled = params.getReadComputationEnabled(); - Optional bootstrapToOnlineTimeoutInHours = params.getBootstrapToOnlineTimeoutInHours(); - Optional backupStrategy = params.getBackupStrategy(); - Optional autoSchemaRegisterPushJobEnabled = params.getAutoSchemaRegisterPushJobEnabled(); - Optional hybridStoreDiskQuotaEnabled = params.getHybridStoreDiskQuotaEnabled(); - Optional regularVersionETLEnabled = params.getRegularVersionETLEnabled(); - Optional futureVersionETLEnabled = params.getFutureVersionETLEnabled(); - Optional etledUserProxyAccount = params.getETLedProxyUserAccount(); - Optional nativeReplicationEnabled = params.getNativeReplicationEnabled(); - Optional pushStreamSourceAddress = params.getPushStreamSourceAddress(); - Optional backupVersionRetentionMs = params.getBackupVersionRetentionMs(); - Optional replicationFactor = params.getReplicationFactor(); - Optional migrationDuplicateStore = params.getMigrationDuplicateStore(); - Optional nativeReplicationSourceFabric = params.getNativeReplicationSourceFabric(); - Optional activeActiveReplicationEnabled = params.getActiveActiveReplicationEnabled(); - Optional regionsFilter = params.getRegionsFilter(); - Optional personaName = params.getStoragePersona(); - Optional> storeViewConfig = params.getStoreViews(); - Optional viewName = params.getViewName(); - Optional viewClassName = params.getViewClassName(); - Optional> viewParams = params.getViewClassParams(); - Optional removeView = params.getDisableStoreView(); - Optional latestSupersetSchemaId = params.getLatestSupersetSchemaId(); - Optional unusedSchemaDeletionEnabled = params.getUnusedSchemaDeletionEnabled(); - /** * Check whether parent controllers will only propagate the update configs to child controller, or all unchanged * configs should be replicated to children too. */ Optional replicateAll = params.getReplicateAllConfigs(); - Optional storageNodeReadQuotaEnabled = params.getStorageNodeReadQuotaEnabled(); - Optional minCompactionLagSeconds = params.getMinCompactionLagSeconds(); - Optional maxCompactionLagSeconds = params.getMaxCompactionLagSeconds(); - Optional maxRecordSizeBytes = params.getMaxRecordSizeBytes(); boolean replicateAllConfigs = replicateAll.isPresent() && replicateAll.get(); List updatedConfigsList = new LinkedList<>(); - String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; - Store currStore = getVeniceHelixAdmin().getStore(clusterName, storeName); - if (currStore == null) { - LOGGER.error(errorMessagePrefix + "store does not exist, and thus cannot be updated."); - throw new VeniceNoStoreException(storeName, clusterName); + UpdateStoreWrapper updateStoreWrapper = + UpdateStoreUtils.getStoreUpdate(this, clusterName, storeName, params, false); + if (updateStoreWrapper == null) { + return; } + + Store originalStore = updateStoreWrapper.originalStore; + Set updatedConfigs = updateStoreWrapper.updatedConfigs; + Store updatedStore = updateStoreWrapper.updatedStore; + + if (!replicateAllConfigs && updatedConfigs.isEmpty()) { + String errMsg = "UpdateStore command failed for store " + storeName + ". The command didn't change any specific" + + " store config and didn't specify \"--replicate-all-configs\" flag."; + LOGGER.error(errMsg); + throw new VeniceException(errMsg); + } + UpdateStore setStore = (UpdateStore) AdminMessageType.UPDATE_STORE.getNewInstance(); setStore.clusterName = clusterName; setStore.storeName = storeName; - setStore.owner = owner.map(addToUpdatedConfigList(updatedConfigsList, OWNER)).orElseGet(currStore::getOwner); - // Invalid config update on hybrid will not be populated to admin channel so subsequent updates on the store won't - // be blocked by retry mechanism. - if (currStore.isHybrid() && (partitionerClass.isPresent() || partitionerParams.isPresent())) { - String errorMessage = errorMessagePrefix + "Cannot change partitioner class and parameters for hybrid stores"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.BAD_REQUEST); + if (updatedConfigs.contains(OWNER)) { + setStore.owner = updatedStore.getOwner(); + updatedConfigsList.add(OWNER); + } else { + setStore.owner = originalStore.getOwner(); } - if (partitionCount.isPresent()) { - getVeniceHelixAdmin().preCheckStorePartitionCountUpdate(clusterName, currStore, partitionCount.get()); - setStore.partitionNum = partitionCount.get(); + if (updatedConfigs.contains(PARTITION_COUNT)) { + setStore.partitionNum = updatedStore.getPartitionCount(); updatedConfigsList.add(PARTITION_COUNT); } else { - setStore.partitionNum = currStore.getPartitionCount(); + setStore.partitionNum = originalStore.getPartitionCount(); } - /** - * TODO: We should build an UpdateStoreHelper that takes current store config and update command as input, and - * return whether the update command is valid. - */ - validateActiveActiveReplicationEnableConfigs(activeActiveReplicationEnabled, nativeReplicationEnabled, currStore); - - setStore.nativeReplicationEnabled = - nativeReplicationEnabled.map(addToUpdatedConfigList(updatedConfigsList, NATIVE_REPLICATION_ENABLED)) - .orElseGet(currStore::isNativeReplicationEnabled); - setStore.pushStreamSourceAddress = - pushStreamSourceAddress.map(addToUpdatedConfigList(updatedConfigsList, PUSH_STREAM_SOURCE_ADDRESS)) - .orElseGet(currStore::getPushStreamSourceAddress); - - if (storeViewConfig.isPresent() && viewName.isPresent()) { - throw new VeniceException("Cannot update a store view and overwrite store view setup together!"); - } - if (viewName.isPresent()) { - Map updatedViewSettings; - if (!removeView.isPresent()) { - if (!viewClassName.isPresent()) { - throw new VeniceException("View class name is required when configuring a view."); - } - // If View parameter is not provided, use emtpy map instead. It does not inherit from existing config. - ViewConfig viewConfig = new ViewConfigImpl(viewClassName.get(), viewParams.orElse(Collections.emptyMap())); - validateStoreViewConfig(currStore, viewConfig); - updatedViewSettings = VeniceHelixAdmin.addNewViewConfigsIntoOldConfigs(currStore, viewName.get(), viewConfig); - } else { - updatedViewSettings = VeniceHelixAdmin.removeViewConfigFromStoreViewConfigMap(currStore, viewName.get()); - } - setStore.views = updatedViewSettings; - updatedConfigsList.add(STORE_VIEW); + if (updatedConfigs.contains(NATIVE_REPLICATION_ENABLED)) { + setStore.nativeReplicationEnabled = updatedStore.isNativeReplicationEnabled(); + updatedConfigsList.add(NATIVE_REPLICATION_ENABLED); + } else { + setStore.nativeReplicationEnabled = originalStore.isNativeReplicationEnabled(); + } + + if (updatedConfigs.contains(PUSH_STREAM_SOURCE_ADDRESS)) { + setStore.pushStreamSourceAddress = updatedStore.getPushStreamSourceAddress(); + updatedConfigsList.add(PUSH_STREAM_SOURCE_ADDRESS); + } else { + setStore.pushStreamSourceAddress = originalStore.getPushStreamSourceAddress(); } - if (storeViewConfig.isPresent()) { - // Validate and overwrite store views if they're getting set - validateStoreViewConfigs(storeViewConfig.get(), currStore); - setStore.views = StoreViewUtils.convertStringMapViewToStoreViewConfigRecordMap(storeViewConfig.get()); + if (updatedConfigs.contains(STORE_VIEW)) { + setStore.views = StoreViewUtils.convertViewConfigMapToStoreViewRecordMap(updatedStore.getViewConfigs()); updatedConfigsList.add(STORE_VIEW); } - // Only update fields that are set, other fields will be read from the original store's partitioner config. - PartitionerConfig updatedPartitionerConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldPartitionerConfig( - currStore, - partitionerClass, - partitionerParams, - amplificationFactor); - if (partitionerClass.isPresent() || partitionerParams.isPresent() || amplificationFactor.isPresent()) { - // Update updatedConfigsList. - partitionerClass.ifPresent(p -> updatedConfigsList.add(PARTITIONER_CLASS)); - partitionerParams.ifPresent(p -> updatedConfigsList.add(PARTITIONER_PARAMS)); - amplificationFactor.ifPresent(p -> updatedConfigsList.add(AMPLIFICATION_FACTOR)); + boolean partitionerChange = false; + + if (updatedConfigs.contains(PARTITIONER_CLASS)) { + updatedConfigsList.add(PARTITIONER_CLASS); + partitionerChange = true; + } + + if (updatedConfigs.contains(PARTITIONER_PARAMS)) { + updatedConfigsList.add(PARTITIONER_PARAMS); + partitionerChange = true; + } + + if (updatedConfigs.contains(AMPLIFICATION_FACTOR)) { + updatedConfigsList.add(AMPLIFICATION_FACTOR); + partitionerChange = true; + } + + if (partitionerChange) { // Create PartitionConfigRecord for admin channel transmission. + PartitionerConfig updatedPartitionerConfig = updatedStore.getPartitionerConfig(); PartitionerConfigRecord partitionerConfigRecord = new PartitionerConfigRecord(); partitionerConfigRecord.partitionerClass = updatedPartitionerConfig.getPartitionerClass(); partitionerConfigRecord.partitionerParams = CollectionUtils.getCharSequenceMapFromStringMap(updatedPartitionerConfig.getPartitionerParams()); partitionerConfigRecord.amplificationFactor = updatedPartitionerConfig.getAmplificationFactor(); - // Before setting partitioner config, verify the updated partitionerConfig can be built - try { - PartitionUtils.getVenicePartitioner( - partitionerConfigRecord.partitionerClass.toString(), - new VeniceProperties(partitionerConfigRecord.partitionerParams), - getKeySchema(clusterName, storeName).getSchema()); - } catch (PartitionerSchemaMismatchException e) { - String errorMessage = errorMessagePrefix + e.getMessage(); - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_SCHEMA); - } catch (Exception e) { - String errorMessage = errorMessagePrefix + "Partitioner Configs invalid, please verify that partitioner " - + "configs like classpath and parameters are correct!"; - LOGGER.error(errorMessage); - throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); - } setStore.partitionerConfig = partitionerConfigRecord; } - setStore.enableReads = - readability.map(addToUpdatedConfigList(updatedConfigsList, ENABLE_READS)).orElseGet(currStore::isEnableReads); - setStore.enableWrites = writeability.map(addToUpdatedConfigList(updatedConfigsList, ENABLE_WRITES)) - .orElseGet(currStore::isEnableWrites); - - setStore.readQuotaInCU = readQuotaInCU.map(addToUpdatedConfigList(updatedConfigsList, READ_QUOTA_IN_CU)) - .orElseGet(currStore::getReadQuotaInCU); - - // We need to be careful when handling currentVersion. - // Since it is not synced between parent and local controller, - // It is very likely to override local values unintentionally. - setStore.currentVersion = - currentVersion.map(addToUpdatedConfigList(updatedConfigsList, VERSION)).orElse(IGNORED_CURRENT_VERSION); - - hybridRewindSeconds.map(addToUpdatedConfigList(updatedConfigsList, REWIND_TIME_IN_SECONDS)); - hybridOffsetLagThreshold.map(addToUpdatedConfigList(updatedConfigsList, OFFSET_LAG_TO_GO_ONLINE)); - hybridTimeLagThreshold.map(addToUpdatedConfigList(updatedConfigsList, TIME_LAG_TO_GO_ONLINE)); - hybridDataReplicationPolicy.map(addToUpdatedConfigList(updatedConfigsList, DATA_REPLICATION_POLICY)); - hybridBufferReplayPolicy.map(addToUpdatedConfigList(updatedConfigsList, BUFFER_REPLAY_POLICY)); - HybridStoreConfig updatedHybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - currStore, - hybridRewindSeconds, - hybridOffsetLagThreshold, - hybridTimeLagThreshold, - hybridDataReplicationPolicy, - hybridBufferReplayPolicy); - - // Get VeniceControllerClusterConfig for the cluster - VeniceControllerClusterConfig controllerConfig = - veniceHelixAdmin.getHelixVeniceClusterResources(clusterName).getConfig(); - // Check if the store is being converted to a hybrid store - boolean storeBeingConvertedToHybrid = !currStore.isHybrid() && updatedHybridStoreConfig != null - && veniceHelixAdmin.isHybrid(updatedHybridStoreConfig); - // Check if the store is being converted to a batch store - boolean storeBeingConvertedToBatch = currStore.isHybrid() && !veniceHelixAdmin.isHybrid(updatedHybridStoreConfig); - if (storeBeingConvertedToBatch && activeActiveReplicationEnabled.orElse(false)) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Cannot convert store to batch-only and enable Active/Active together.", - ErrorType.BAD_REQUEST); - } - if (storeBeingConvertedToBatch && incrementalPushEnabled.orElse(false)) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Cannot convert store to batch-only and enable incremental push together.", - ErrorType.BAD_REQUEST); - } - // Update active-active replication config. - setStore.activeActiveReplicationEnabled = activeActiveReplicationEnabled - .map(addToUpdatedConfigList(updatedConfigsList, ACTIVE_ACTIVE_REPLICATION_ENABLED)) - .orElseGet(currStore::isActiveActiveReplicationEnabled); - // Enable active-active replication automatically when batch user store being converted to hybrid store and - // active-active replication is enabled for all hybrid store via the cluster config - if (storeBeingConvertedToHybrid && !setStore.activeActiveReplicationEnabled && !currStore.isSystemStore() - && controllerConfig.isActiveActiveReplicationEnabledAsDefaultForHybrid()) { - setStore.activeActiveReplicationEnabled = true; - updatedConfigsList.add(ACTIVE_ACTIVE_REPLICATION_ENABLED); - if (!hybridDataReplicationPolicy.isPresent()) { - LOGGER.info( - "Data replication policy was not explicitly set when converting store to hybrid store: {}." - + " Setting it to active-active replication policy.", - storeName); - - updatedHybridStoreConfig.setDataReplicationPolicy(DataReplicationPolicy.ACTIVE_ACTIVE); - updatedConfigsList.add(DATA_REPLICATION_POLICY); - } - } - // When turning off hybrid store, we will also turn off A/A store config. - if (storeBeingConvertedToBatch && setStore.activeActiveReplicationEnabled) { - setStore.activeActiveReplicationEnabled = false; - updatedConfigsList.add(ACTIVE_ACTIVE_REPLICATION_ENABLED); + if (updatedConfigs.contains(ENABLE_READS)) { + setStore.enableReads = updatedStore.isEnableReads(); + updatedConfigsList.add(ENABLE_READS); + } else { + setStore.enableReads = originalStore.isEnableReads(); } - // Update incremental push config. - setStore.incrementalPushEnabled = - incrementalPushEnabled.map(addToUpdatedConfigList(updatedConfigsList, INCREMENTAL_PUSH_ENABLED)) - .orElseGet(currStore::isIncrementalPushEnabled); - // Enable incremental push automatically when batch user store being converted to hybrid store and active-active - // replication is enabled or being and the cluster config allows it. - if (!setStore.incrementalPushEnabled && !currStore.isSystemStore() && storeBeingConvertedToHybrid - && setStore.activeActiveReplicationEnabled - && controllerConfig.enabledIncrementalPushForHybridActiveActiveUserStores()) { - setStore.incrementalPushEnabled = true; - updatedConfigsList.add(INCREMENTAL_PUSH_ENABLED); + if (updatedConfigs.contains(ENABLE_WRITES)) { + setStore.enableWrites = updatedStore.isEnableWrites(); + updatedConfigsList.add(ENABLE_WRITES); + } else { + setStore.enableWrites = originalStore.isEnableWrites(); } - // When turning off hybrid store, we will also turn off incremental store config. - if (storeBeingConvertedToBatch && setStore.incrementalPushEnabled) { - setStore.incrementalPushEnabled = false; - updatedConfigsList.add(INCREMENTAL_PUSH_ENABLED); + + if (updatedConfigs.contains(READ_QUOTA_IN_CU)) { + setStore.readQuotaInCU = updatedStore.getReadQuotaInCU(); + updatedConfigsList.add(READ_QUOTA_IN_CU); + } else { + setStore.readQuotaInCU = originalStore.getReadQuotaInCU(); } - if (updatedHybridStoreConfig == null) { - setStore.hybridStoreConfig = null; + if (updatedConfigsList.contains(VERSION)) { + setStore.currentVersion = updatedStore.getCurrentVersion(); + updatedConfigsList.add(VERSION); } else { - HybridStoreConfigRecord hybridStoreConfigRecord = new HybridStoreConfigRecord(); - hybridStoreConfigRecord.offsetLagThresholdToGoOnline = - updatedHybridStoreConfig.getOffsetLagThresholdToGoOnline(); - hybridStoreConfigRecord.rewindTimeInSeconds = updatedHybridStoreConfig.getRewindTimeInSeconds(); - hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds = - updatedHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(); - hybridStoreConfigRecord.dataReplicationPolicy = updatedHybridStoreConfig.getDataReplicationPolicy().getValue(); - hybridStoreConfigRecord.bufferReplayPolicy = updatedHybridStoreConfig.getBufferReplayPolicy().getValue(); - setStore.hybridStoreConfig = hybridStoreConfigRecord; + setStore.currentVersion = IGNORED_CURRENT_VERSION; } - if (incrementalPushEnabled.orElse(currStore.isIncrementalPushEnabled()) - && !veniceHelixAdmin.isHybrid(currStore.getHybridStoreConfig()) - && !veniceHelixAdmin.isHybrid(updatedHybridStoreConfig)) { - LOGGER.info( - "Enabling incremental push for a batch store:{}. Converting it to a hybrid store with default configs.", - storeName); - HybridStoreConfigRecord hybridStoreConfigRecord = new HybridStoreConfigRecord(); - hybridStoreConfigRecord.rewindTimeInSeconds = DEFAULT_REWIND_TIME_IN_SECONDS; + if (updatedConfigs.contains(REWIND_TIME_IN_SECONDS)) { updatedConfigsList.add(REWIND_TIME_IN_SECONDS); - hybridStoreConfigRecord.offsetLagThresholdToGoOnline = DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; + } + + if (updatedConfigs.contains(OFFSET_LAG_TO_GO_ONLINE)) { updatedConfigsList.add(OFFSET_LAG_TO_GO_ONLINE); - hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds = DEFAULT_HYBRID_TIME_LAG_THRESHOLD; + } + + if (updatedConfigs.contains(TIME_LAG_TO_GO_ONLINE)) { updatedConfigsList.add(TIME_LAG_TO_GO_ONLINE); - hybridStoreConfigRecord.dataReplicationPolicy = DataReplicationPolicy.NONE.getValue(); + } + + if (updatedConfigs.contains(DATA_REPLICATION_POLICY)) { updatedConfigsList.add(DATA_REPLICATION_POLICY); - hybridStoreConfigRecord.bufferReplayPolicy = BufferReplayPolicy.REWIND_FROM_EOP.getValue(); + } + + if (updatedConfigs.contains(BUFFER_REPLAY_POLICY)) { updatedConfigsList.add(BUFFER_REPLAY_POLICY); - setStore.hybridStoreConfig = hybridStoreConfigRecord; + } + + HybridStoreConfig updatedHybridStoreConfig = updatedStore.getHybridStoreConfig(); + setStore.hybridStoreConfig = new HybridStoreConfigRecord(); + if (updatedHybridStoreConfig == null) { + setStore.hybridStoreConfig.offsetLagThresholdToGoOnline = -1; + setStore.hybridStoreConfig.rewindTimeInSeconds = -1; + setStore.hybridStoreConfig.producerTimestampLagThresholdToGoOnlineInSeconds = -1; + setStore.hybridStoreConfig.dataReplicationPolicy = DataReplicationPolicy.NONE.getValue(); + setStore.hybridStoreConfig.bufferReplayPolicy = BufferReplayPolicy.REWIND_FROM_EOP.getValue(); + } else { + setStore.hybridStoreConfig.offsetLagThresholdToGoOnline = + updatedHybridStoreConfig.getOffsetLagThresholdToGoOnline(); + setStore.hybridStoreConfig.rewindTimeInSeconds = updatedHybridStoreConfig.getRewindTimeInSeconds(); + setStore.hybridStoreConfig.producerTimestampLagThresholdToGoOnlineInSeconds = + updatedHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(); + setStore.hybridStoreConfig.dataReplicationPolicy = + updatedHybridStoreConfig.getDataReplicationPolicy().getValue(); + setStore.hybridStoreConfig.bufferReplayPolicy = updatedHybridStoreConfig.getBufferReplayPolicy().getValue(); + } + + if (updatedConfigs.contains(ACTIVE_ACTIVE_REPLICATION_ENABLED)) { + setStore.activeActiveReplicationEnabled = updatedStore.isActiveActiveReplicationEnabled(); + updatedConfigsList.add(ACTIVE_ACTIVE_REPLICATION_ENABLED); + } else { + setStore.activeActiveReplicationEnabled = originalStore.isActiveActiveReplicationEnabled(); + } + + if (updatedConfigs.contains(INCREMENTAL_PUSH_ENABLED)) { + setStore.incrementalPushEnabled = updatedStore.isIncrementalPushEnabled(); + updatedConfigsList.add(INCREMENTAL_PUSH_ENABLED); + } else { + setStore.incrementalPushEnabled = originalStore.isIncrementalPushEnabled(); } /** @@ -2502,204 +2325,248 @@ public void updateStore(String clusterName, String storeName, UpdateStoreQueryPa * do append-only and compaction will happen later. * We expose actual disk usage to users, instead of multiplying/dividing the overhead ratio by situations. */ - setStore.storageQuotaInByte = - storageQuotaInByte.map(addToUpdatedConfigList(updatedConfigsList, STORAGE_QUOTA_IN_BYTE)) - .orElseGet(currStore::getStorageQuotaInByte); - - setStore.accessControlled = accessControlled.map(addToUpdatedConfigList(updatedConfigsList, ACCESS_CONTROLLED)) - .orElseGet(currStore::isAccessControlled); - setStore.compressionStrategy = - compressionStrategy.map(addToUpdatedConfigList(updatedConfigsList, COMPRESSION_STRATEGY)) - .map(CompressionStrategy::getValue) - .orElse(currStore.getCompressionStrategy().getValue()); - setStore.clientDecompressionEnabled = - clientDecompressionEnabled.map(addToUpdatedConfigList(updatedConfigsList, CLIENT_DECOMPRESSION_ENABLED)) - .orElseGet(currStore::getClientDecompressionEnabled); - setStore.batchGetLimit = batchGetLimit.map(addToUpdatedConfigList(updatedConfigsList, BATCH_GET_LIMIT)) - .orElseGet(currStore::getBatchGetLimit); - setStore.numVersionsToPreserve = - numVersionsToPreserve.map(addToUpdatedConfigList(updatedConfigsList, NUM_VERSIONS_TO_PRESERVE)) - .orElseGet(currStore::getNumVersionsToPreserve); - setStore.isMigrating = storeMigration.map(addToUpdatedConfigList(updatedConfigsList, STORE_MIGRATION)) - .orElseGet(currStore::isMigrating); - setStore.replicationMetadataVersionID = replicationMetadataVersionID - .map(addToUpdatedConfigList(updatedConfigsList, REPLICATION_METADATA_PROTOCOL_VERSION_ID)) - .orElse(currStore.getRmdVersion()); - setStore.readComputationEnabled = - readComputationEnabled.map(addToUpdatedConfigList(updatedConfigsList, READ_COMPUTATION_ENABLED)) - .orElseGet(currStore::isReadComputationEnabled); - setStore.bootstrapToOnlineTimeoutInHours = bootstrapToOnlineTimeoutInHours - .map(addToUpdatedConfigList(updatedConfigsList, BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS)) - .orElseGet(currStore::getBootstrapToOnlineTimeoutInHours); + if (updatedConfigs.contains(STORAGE_QUOTA_IN_BYTE)) { + setStore.storageQuotaInByte = updatedStore.getStorageQuotaInByte(); + updatedConfigsList.add(STORAGE_QUOTA_IN_BYTE); + } else { + setStore.storageQuotaInByte = originalStore.getStorageQuotaInByte(); + } + + if (updatedConfigs.contains(ACCESS_CONTROLLED)) { + setStore.accessControlled = updatedStore.isAccessControlled(); + updatedConfigsList.add(ACCESS_CONTROLLED); + } else { + setStore.accessControlled = originalStore.isAccessControlled(); + } + + if (updatedConfigs.contains(COMPRESSION_STRATEGY)) { + setStore.compressionStrategy = updatedStore.getCompressionStrategy().getValue(); + updatedConfigsList.add(COMPRESSION_STRATEGY); + } else { + setStore.compressionStrategy = originalStore.getCompressionStrategy().getValue(); + } + + if (updatedConfigs.contains(CLIENT_DECOMPRESSION_ENABLED)) { + setStore.clientDecompressionEnabled = updatedStore.getClientDecompressionEnabled(); + updatedConfigsList.add(CLIENT_DECOMPRESSION_ENABLED); + } else { + setStore.clientDecompressionEnabled = originalStore.getClientDecompressionEnabled(); + } + + if (updatedConfigs.contains(BATCH_GET_LIMIT)) { + setStore.batchGetLimit = updatedStore.getBatchGetLimit(); + updatedConfigsList.add(BATCH_GET_LIMIT); + } else { + setStore.batchGetLimit = originalStore.getBatchGetLimit(); + } + + if (updatedConfigs.contains(NUM_VERSIONS_TO_PRESERVE)) { + setStore.numVersionsToPreserve = updatedStore.getNumVersionsToPreserve(); + updatedConfigsList.add(NUM_VERSIONS_TO_PRESERVE); + } else { + setStore.numVersionsToPreserve = originalStore.getNumVersionsToPreserve(); + } + + if (updatedConfigs.contains(STORE_MIGRATION)) { + setStore.isMigrating = updatedStore.isMigrating(); + updatedConfigsList.add(STORE_MIGRATION); + } else { + setStore.isMigrating = originalStore.isMigrating(); + } + + if (updatedConfigs.contains(REPLICATION_METADATA_PROTOCOL_VERSION_ID)) { + setStore.replicationMetadataVersionID = updatedStore.getRmdVersion(); + updatedConfigsList.add(REPLICATION_METADATA_PROTOCOL_VERSION_ID); + } else { + setStore.replicationMetadataVersionID = originalStore.getRmdVersion(); + } + + if (updatedConfigs.contains(READ_COMPUTATION_ENABLED)) { + setStore.readComputationEnabled = updatedStore.isReadComputationEnabled(); + updatedConfigsList.add(READ_COMPUTATION_ENABLED); + } else { + setStore.readComputationEnabled = originalStore.isReadComputationEnabled(); + } + + if (updatedConfigs.contains(BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS)) { + setStore.bootstrapToOnlineTimeoutInHours = updatedStore.getBootstrapToOnlineTimeoutInHours(); + updatedConfigsList.add(BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS); + } else { + setStore.bootstrapToOnlineTimeoutInHours = originalStore.getBootstrapToOnlineTimeoutInHours(); + } + setStore.leaderFollowerModelEnabled = true; // do not mess up during upgrades - setStore.backupStrategy = (backupStrategy.map(addToUpdatedConfigList(updatedConfigsList, BACKUP_STRATEGY)) - .orElse(currStore.getBackupStrategy())).ordinal(); - - setStore.schemaAutoRegisterFromPushJobEnabled = autoSchemaRegisterPushJobEnabled - .map(addToUpdatedConfigList(updatedConfigsList, AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED)) - .orElse(currStore.isSchemaAutoRegisterFromPushJobEnabled()); - - setStore.hybridStoreDiskQuotaEnabled = - hybridStoreDiskQuotaEnabled.map(addToUpdatedConfigList(updatedConfigsList, HYBRID_STORE_DISK_QUOTA_ENABLED)) - .orElse(currStore.isHybridStoreDiskQuotaEnabled()); - - regularVersionETLEnabled.map(addToUpdatedConfigList(updatedConfigsList, REGULAR_VERSION_ETL_ENABLED)); - futureVersionETLEnabled.map(addToUpdatedConfigList(updatedConfigsList, FUTURE_VERSION_ETL_ENABLED)); - etledUserProxyAccount.map(addToUpdatedConfigList(updatedConfigsList, ETLED_PROXY_USER_ACCOUNT)); - setStore.ETLStoreConfig = mergeNewSettingIntoOldETLStoreConfig( - currStore, - regularVersionETLEnabled, - futureVersionETLEnabled, - etledUserProxyAccount); - - setStore.largestUsedVersionNumber = - largestUsedVersionNumber.map(addToUpdatedConfigList(updatedConfigsList, LARGEST_USED_VERSION_NUMBER)) - .orElseGet(currStore::getLargestUsedVersionNumber); - - setStore.backupVersionRetentionMs = - backupVersionRetentionMs.map(addToUpdatedConfigList(updatedConfigsList, BACKUP_VERSION_RETENTION_MS)) - .orElseGet(currStore::getBackupVersionRetentionMs); - setStore.replicationFactor = replicationFactor.map(addToUpdatedConfigList(updatedConfigsList, REPLICATION_FACTOR)) - .orElseGet(currStore::getReplicationFactor); - setStore.migrationDuplicateStore = - migrationDuplicateStore.map(addToUpdatedConfigList(updatedConfigsList, MIGRATION_DUPLICATE_STORE)) - .orElseGet(currStore::isMigrationDuplicateStore); - setStore.nativeReplicationSourceFabric = nativeReplicationSourceFabric - .map(addToUpdatedConfigList(updatedConfigsList, NATIVE_REPLICATION_SOURCE_FABRIC)) - .orElseGet((currStore::getNativeReplicationSourceFabric)); - - setStore.disableMetaStore = - params.disableMetaStore().map(addToUpdatedConfigList(updatedConfigsList, DISABLE_META_STORE)).orElse(false); - - setStore.disableDavinciPushStatusStore = params.disableDavinciPushStatusStore() - .map(addToUpdatedConfigList(updatedConfigsList, DISABLE_DAVINCI_PUSH_STATUS_STORE)) - .orElse(false); - - setStore.storagePersona = personaName.map(addToUpdatedConfigList(updatedConfigsList, PERSONA_NAME)).orElse(null); - - setStore.blobTransferEnabled = params.getBlobTransferEnabled() - .map(addToUpdatedConfigList(updatedConfigsList, BLOB_TRANSFER_ENABLED)) - .orElseGet(currStore::isBlobTransferEnabled); - - // Check whether the passed param is valid or not - if (latestSupersetSchemaId.isPresent()) { - if (latestSupersetSchemaId.get() != SchemaData.INVALID_VALUE_SCHEMA_ID) { - if (veniceHelixAdmin.getValueSchema(clusterName, storeName, latestSupersetSchemaId.get()) == null) { - throw new VeniceException( - "Unknown value schema id: " + latestSupersetSchemaId.get() + " in store: " + storeName); - } - } + + if (updatedConfigs.contains(BACKUP_STRATEGY)) { + setStore.backupStrategy = updatedStore.getBackupStrategy().getValue(); + updatedConfigsList.add(BACKUP_STRATEGY); + } else { + setStore.backupStrategy = originalStore.getBackupStrategy().getValue(); } - setStore.latestSuperSetValueSchemaId = - latestSupersetSchemaId.map(addToUpdatedConfigList(updatedConfigsList, LATEST_SUPERSET_SCHEMA_ID)) - .orElseGet(currStore::getLatestSuperSetValueSchemaId); - setStore.storageNodeReadQuotaEnabled = - storageNodeReadQuotaEnabled.map(addToUpdatedConfigList(updatedConfigsList, STORAGE_NODE_READ_QUOTA_ENABLED)) - .orElseGet(currStore::isStorageNodeReadQuotaEnabled); - setStore.unusedSchemaDeletionEnabled = - unusedSchemaDeletionEnabled.map(addToUpdatedConfigList(updatedConfigsList, UNUSED_SCHEMA_DELETION_ENABLED)) - .orElseGet(currStore::isUnusedSchemaDeletionEnabled); - setStore.minCompactionLagSeconds = - minCompactionLagSeconds.map(addToUpdatedConfigList(updatedConfigsList, MIN_COMPACTION_LAG_SECONDS)) - .orElseGet(currStore::getMinCompactionLagSeconds); - setStore.maxCompactionLagSeconds = - maxCompactionLagSeconds.map(addToUpdatedConfigList(updatedConfigsList, MAX_COMPACTION_LAG_SECONDS)) - .orElseGet(currStore::getMaxCompactionLagSeconds); - if (setStore.maxCompactionLagSeconds < setStore.minCompactionLagSeconds) { - throw new VeniceException( - "Store's max compaction lag seconds: " + setStore.maxCompactionLagSeconds + " shouldn't be smaller than " - + "store's min compaction lag seconds: " + setStore.minCompactionLagSeconds); - } - setStore.maxRecordSizeBytes = - maxRecordSizeBytes.map(addToUpdatedConfigList(updatedConfigsList, MAX_RECORD_SIZE_BYTES)) - .orElseGet(currStore::getMaxRecordSizeBytes); - - StoragePersonaRepository repository = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - StoragePersona personaToValidate = null; - StoragePersona existingPersona = repository.getPersonaContainingStore(currStore.getName()); - - if (params.getStoragePersona().isPresent()) { - personaToValidate = getVeniceHelixAdmin().getStoragePersona(clusterName, params.getStoragePersona().get()); - if (personaToValidate == null) { - String errMsg = "UpdateStore command failed for store " + storeName + ". The provided StoragePersona " - + params.getStoragePersona().get() + " does not exist."; - throw new VeniceException(errMsg); - } - } else if (existingPersona != null) { - personaToValidate = existingPersona; + + if (updatedConfigs.contains(AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED)) { + setStore.schemaAutoRegisterFromPushJobEnabled = updatedStore.isSchemaAutoRegisterFromPushJobEnabled(); + updatedConfigsList.add(AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED); + } else { + setStore.schemaAutoRegisterFromPushJobEnabled = originalStore.isSchemaAutoRegisterFromPushJobEnabled(); } - if (personaToValidate != null) { - /** - * Create a new copy of the store with an updated quota, and validate this. - */ - Store updatedQuotaStore = getVeniceHelixAdmin().getStore(clusterName, storeName); - updatedQuotaStore.setStorageQuotaInByte(setStore.getStorageQuotaInByte()); - repository.validateAddUpdatedStore(personaToValidate, Optional.of(updatedQuotaStore)); + if (updatedConfigs.contains(HYBRID_STORE_DISK_QUOTA_ENABLED)) { + setStore.hybridStoreDiskQuotaEnabled = updatedStore.isHybridStoreDiskQuotaEnabled(); + updatedConfigsList.add(HYBRID_STORE_DISK_QUOTA_ENABLED); + } else { + setStore.hybridStoreDiskQuotaEnabled = originalStore.isHybridStoreDiskQuotaEnabled(); } - /** - * Fabrics filter is not a store config, so we don't need to add it into {@link UpdateStore#updatedConfigsList} - */ - setStore.regionsFilter = regionsFilter.orElse(null); + if (updatedConfigs.contains(REGULAR_VERSION_ETL_ENABLED)) { + updatedConfigsList.add(REGULAR_VERSION_ETL_ENABLED); + } - // Update Partial Update config. - boolean partialUpdateConfigUpdated = ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - this, - clusterName, - storeName, - writeComputationEnabled, - setStore, - storeBeingConvertedToHybrid); - if (partialUpdateConfigUpdated) { + if (updatedConfigs.contains(FUTURE_VERSION_ETL_ENABLED)) { + updatedConfigsList.add(FUTURE_VERSION_ETL_ENABLED); + } + + if (updatedConfigs.contains(ETLED_PROXY_USER_ACCOUNT)) { + updatedConfigsList.add(ETLED_PROXY_USER_ACCOUNT); + } + + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + ETLStoreConfigRecord etlStoreConfigRecord = new ETLStoreConfigRecord(); + etlStoreConfigRecord.regularVersionETLEnabled = etlStoreConfig.isRegularVersionETLEnabled(); + etlStoreConfigRecord.futureVersionETLEnabled = etlStoreConfig.isFutureVersionETLEnabled(); + etlStoreConfigRecord.etledUserProxyAccount = etlStoreConfig.getEtledUserProxyAccount(); + setStore.ETLStoreConfig = etlStoreConfigRecord; + + if (updatedConfigs.contains(LARGEST_USED_VERSION_NUMBER)) { + setStore.largestUsedVersionNumber = updatedStore.getLargestUsedVersionNumber(); + updatedConfigsList.add(LARGEST_USED_VERSION_NUMBER); + } else { + setStore.largestUsedVersionNumber = originalStore.getLargestUsedVersionNumber(); + } + + if (updatedConfigs.contains(BACKUP_VERSION_RETENTION_MS)) { + setStore.backupVersionRetentionMs = updatedStore.getBackupVersionRetentionMs(); + updatedConfigsList.add(BACKUP_VERSION_RETENTION_MS); + } else { + setStore.backupVersionRetentionMs = originalStore.getBackupVersionRetentionMs(); + } + + if (updatedConfigs.contains(REPLICATION_FACTOR)) { + setStore.replicationFactor = updatedStore.getReplicationFactor(); + updatedConfigsList.add(REPLICATION_FACTOR); + } else { + setStore.replicationFactor = originalStore.getReplicationFactor(); + } + + if (updatedConfigs.contains(MIGRATION_DUPLICATE_STORE)) { + setStore.migrationDuplicateStore = updatedStore.isMigrationDuplicateStore(); + updatedConfigsList.add(MIGRATION_DUPLICATE_STORE); + } else { + setStore.migrationDuplicateStore = originalStore.isMigrationDuplicateStore(); + } + + if (updatedConfigs.contains(NATIVE_REPLICATION_SOURCE_FABRIC)) { + setStore.nativeReplicationSourceFabric = updatedStore.getNativeReplicationSourceFabric(); + updatedConfigsList.add(NATIVE_REPLICATION_SOURCE_FABRIC); + } else { + setStore.nativeReplicationSourceFabric = originalStore.getNativeReplicationSourceFabric(); + } + + if (updatedConfigs.contains(DISABLE_META_STORE)) { + setStore.disableMetaStore = !updatedStore.isStoreMetaSystemStoreEnabled(); + updatedConfigsList.add(DISABLE_META_STORE); + } else { + setStore.disableMetaStore = !originalStore.isStoreMetaSystemStoreEnabled(); + } + + if (updatedConfigs.contains(DISABLE_DAVINCI_PUSH_STATUS_STORE)) { + setStore.disableDavinciPushStatusStore = !updatedStore.isDaVinciPushStatusStoreEnabled(); + updatedConfigsList.add(DISABLE_DAVINCI_PUSH_STATUS_STORE); + } else { + setStore.disableDavinciPushStatusStore = !originalStore.isDaVinciPushStatusStoreEnabled(); + } + + if (updatedConfigs.contains(PERSONA_NAME)) { + setStore.storagePersona = params.getStoragePersona().get(); + updatedConfigsList.add(PERSONA_NAME); + } else { + setStore.storagePersona = null; + } + + if (updatedConfigs.contains(BLOB_TRANSFER_ENABLED)) { + setStore.blobTransferEnabled = updatedStore.isBlobTransferEnabled(); + updatedConfigsList.add(BLOB_TRANSFER_ENABLED); + } else { + setStore.blobTransferEnabled = originalStore.isBlobTransferEnabled(); + } + + if (updatedConfigs.contains(MAX_RECORD_SIZE_BYTES)) { + setStore.maxRecordSizeBytes = updatedStore.getMaxRecordSizeBytes(); + updatedConfigsList.add(MAX_RECORD_SIZE_BYTES); + } else { + setStore.maxRecordSizeBytes = originalStore.getMaxRecordSizeBytes(); + } + + if (updatedConfigs.contains(LATEST_SUPERSET_SCHEMA_ID)) { + setStore.latestSuperSetValueSchemaId = updatedStore.getLatestSuperSetValueSchemaId(); + updatedConfigsList.add(LATEST_SUPERSET_SCHEMA_ID); + } else { + setStore.latestSuperSetValueSchemaId = originalStore.getLatestSuperSetValueSchemaId(); + } + + if (updatedConfigs.contains(STORAGE_NODE_READ_QUOTA_ENABLED)) { + setStore.storageNodeReadQuotaEnabled = updatedStore.isStorageNodeReadQuotaEnabled(); + updatedConfigsList.add(STORAGE_NODE_READ_QUOTA_ENABLED); + } else { + setStore.storageNodeReadQuotaEnabled = originalStore.isStorageNodeReadQuotaEnabled(); + } + + if (updatedConfigs.contains(UNUSED_SCHEMA_DELETION_ENABLED)) { + setStore.unusedSchemaDeletionEnabled = updatedStore.isUnusedSchemaDeletionEnabled(); + updatedConfigsList.add(UNUSED_SCHEMA_DELETION_ENABLED); + } else { + setStore.unusedSchemaDeletionEnabled = originalStore.isUnusedSchemaDeletionEnabled(); + } + + if (updatedConfigs.contains(MIN_COMPACTION_LAG_SECONDS)) { + setStore.minCompactionLagSeconds = updatedStore.getMinCompactionLagSeconds(); + updatedConfigsList.add(MIN_COMPACTION_LAG_SECONDS); + } else { + setStore.minCompactionLagSeconds = originalStore.getMinCompactionLagSeconds(); + } + + if (updatedConfigs.contains(MAX_COMPACTION_LAG_SECONDS)) { + setStore.maxCompactionLagSeconds = updatedStore.getMaxCompactionLagSeconds(); + updatedConfigsList.add(MAX_COMPACTION_LAG_SECONDS); + } else { + setStore.maxCompactionLagSeconds = originalStore.getMaxCompactionLagSeconds(); + } + + if (updatedConfigs.contains(WRITE_COMPUTATION_ENABLED)) { + setStore.writeComputationEnabled = updatedStore.isWriteComputationEnabled(); updatedConfigsList.add(WRITE_COMPUTATION_ENABLED); + } else { + setStore.writeComputationEnabled = originalStore.isWriteComputationEnabled(); } - boolean partialUpdateJustEnabled = setStore.writeComputationEnabled && !currStore.isWriteComputationEnabled(); - // Update Chunking config. - boolean chunkingConfigUpdated = ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(this, clusterName, storeName, chunkingEnabled, setStore); - if (chunkingConfigUpdated) { + + if (updatedConfigs.contains(CHUNKING_ENABLED)) { + setStore.chunkingEnabled = updatedStore.isChunkingEnabled(); updatedConfigsList.add(CHUNKING_ENABLED); + } else { + setStore.chunkingEnabled = originalStore.isChunkingEnabled(); } - // Update RMD Chunking config. - boolean rmdChunkingConfigUpdated = ParentControllerConfigUpdateUtils - .checkAndMaybeApplyRmdChunkingConfigChange(this, clusterName, storeName, rmdChunkingEnabled, setStore); - if (rmdChunkingConfigUpdated) { + if (updatedConfigs.contains(RMD_CHUNKING_ENABLED)) { + setStore.rmdChunkingEnabled = updatedStore.isRmdChunkingEnabled(); updatedConfigsList.add(RMD_CHUNKING_ENABLED); + } else { + setStore.rmdChunkingEnabled = originalStore.isRmdChunkingEnabled(); } - // Validate Amplification Factor config based on latest A/A and partial update status. - if ((setStore.getActiveActiveReplicationEnabled() || setStore.getWriteComputationEnabled()) - && updatedPartitionerConfig.getAmplificationFactor() > 1) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Non-default amplification factor is not compatible with active-active replication and/or partial update.", - ErrorType.BAD_REQUEST); - } - - if (!getVeniceHelixAdmin().isHybrid(currStore.getHybridStoreConfig()) - && getVeniceHelixAdmin().isHybrid(setStore.getHybridStoreConfig()) && setStore.getPartitionNum() == 0) { - // This is a new hybrid store and partition count is not specified. - VeniceControllerClusterConfig config = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getConfig(); - setStore.setPartitionNum( - PartitionUtils.calculatePartitionCount( - storeName, - setStore.getStorageQuotaInByte(), - 0, - config.getPartitionSize(), - config.getMinNumberOfPartitionsForHybrid(), - config.getMaxNumberOfPartitions(), - config.isPartitionCountRoundUpEnabled(), - config.getPartitionCountRoundUpSize())); - LOGGER.info( - "Enforcing default hybrid partition count:{} for a new hybrid store:{}.", - setStore.getPartitionNum(), - storeName); - updatedConfigsList.add(PARTITION_COUNT); - } + /** + * Fabrics filter is not a store config, so we don't need to add it into {@link UpdateStore#updatedConfigsList} + */ + setStore.regionsFilter = params.getRegionsFilter().orElse(null); /** * By default, parent controllers will not try to replicate the unchanged store configs to child controllers; @@ -2707,90 +2574,30 @@ && getVeniceHelixAdmin().isHybrid(setStore.getHybridStoreConfig()) && setStore.g */ setStore.replicateAllConfigs = replicateAllConfigs; if (!replicateAllConfigs) { - if (updatedConfigsList.isEmpty()) { - String errMsg = - "UpdateStore command failed for store " + storeName + ". The command didn't change any specific" - + " store config and didn't specify \"--replicate-all-configs\" flag."; - LOGGER.error(errMsg); - throw new VeniceException(errMsg); - } setStore.updatedConfigsList = new ArrayList<>(updatedConfigsList); } else { setStore.updatedConfigsList = Collections.emptyList(); } - final boolean readComputeJustEnabled = - readComputationEnabled.orElse(false) && !currStore.isReadComputationEnabled(); - boolean needToGenerateSupersetSchema = - !currStore.isSystemStore() && (readComputeJustEnabled || partialUpdateJustEnabled); - if (needToGenerateSupersetSchema) { - // dry run to make sure superset schema generation can work - getSupersetSchemaGenerator(clusterName) - .generateSupersetSchemaFromSchemas(getValueSchemas(clusterName, storeName)); - } - AdminOperation message = new AdminOperation(); message.operationType = AdminMessageType.UPDATE_STORE.getValue(); message.payloadUnion = setStore; sendAdminMessageAndWaitForConsumed(clusterName, storeName, message); - - if (needToGenerateSupersetSchema) { - addSupersetSchemaForStore(clusterName, storeName, currStore.isActiveActiveReplicationEnabled()); - } - if (partialUpdateJustEnabled) { - LOGGER.info("Enabling partial update for the first time on store: {} in cluster: {}", storeName, clusterName); - addUpdateSchemaForStore(this, clusterName, storeName, false); - } - - /** - * If active-active replication is getting enabled for the store, generate and register the Replication metadata schema - * for all existing value schemas. - */ - final boolean activeActiveReplicationJustEnabled = - activeActiveReplicationEnabled.orElse(false) && !currStore.isActiveActiveReplicationEnabled(); - if (activeActiveReplicationJustEnabled) { - updateReplicationMetadataSchemaForAllValueSchema(clusterName, storeName); - } + UpdateStoreUtils.handlePostUpdateActions(this, clusterName, storeName); } finally { releaseAdminMessageLock(clusterName, storeName); } } - private void validateStoreViewConfigs(Map stringMap, Store store) { - Map configs = StoreViewUtils.convertStringMapViewToViewConfigMap(stringMap); - for (Map.Entry viewConfigEntry: configs.entrySet()) { - validateStoreViewConfig(store, viewConfigEntry.getValue()); - } - } - - private void validateStoreViewConfig(Store store, ViewConfig viewConfig) { - // TODO: Pass a proper properties object here. Today this isn't used in this context - VeniceView view = - ViewUtils.getVeniceView(viewConfig.getViewClassName(), new Properties(), store, viewConfig.getViewParameters()); - view.validateConfigs(); - } - - private SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName) { - if (externalSupersetSchemaGenerator.isPresent() && getMultiClusterConfigs().getControllerConfig(clusterName) - .isParentExternalSupersetSchemaGenerationEnabled()) { + @Override + public SupersetSchemaGenerator getSupersetSchemaGenerator(String clusterName) { + if (externalSupersetSchemaGenerator.isPresent() + && getMultiClusterConfigs().getControllerConfig(clusterName).isExternalSupersetSchemaGenerationEnabled()) { return externalSupersetSchemaGenerator.get(); } return defaultSupersetSchemaGenerator; } - private void addSupersetSchemaForStore(String clusterName, String storeName, boolean activeActiveReplicationEnabled) { - // Generate a superset schema and add it. - SchemaEntry supersetSchemaEntry = getSupersetSchemaGenerator(clusterName) - .generateSupersetSchemaFromSchemas(getValueSchemas(clusterName, storeName)); - final Schema supersetSchema = supersetSchemaEntry.getSchema(); - final int supersetSchemaID = supersetSchemaEntry.getId(); - addValueSchemaEntry(clusterName, storeName, supersetSchema.toString(), supersetSchemaID, true); - - if (activeActiveReplicationEnabled) { - updateReplicationMetadataSchema(clusterName, storeName, supersetSchema, supersetSchemaID); - } - } - /** * @see VeniceHelixAdmin#updateClusterConfig(String, UpdateClusterConfigQueryParams) */ @@ -2799,28 +2606,6 @@ public void updateClusterConfig(String clusterName, UpdateClusterConfigQueryPara getVeniceHelixAdmin().updateClusterConfig(clusterName, params); } - private void validateActiveActiveReplicationEnableConfigs( - Optional activeActiveReplicationEnabledOptional, - Optional nativeReplicationEnabledOptional, - Store store) { - final boolean activeActiveReplicationEnabled = activeActiveReplicationEnabledOptional.orElse(false); - if (!activeActiveReplicationEnabled) { - return; - } - - final boolean nativeReplicationEnabled = nativeReplicationEnabledOptional.isPresent() - ? nativeReplicationEnabledOptional.get() - : store.isNativeReplicationEnabled(); - - if (!nativeReplicationEnabled) { - throw new VeniceHttpException( - HttpStatus.SC_BAD_REQUEST, - "Active/Active Replication cannot be enabled for store " + store.getName() - + " since Native Replication is not enabled on it.", - ErrorType.INVALID_CONFIG); - } - } - /** * @see VeniceHelixAdmin#getStorageEngineOverheadRatio(String) */ @@ -2912,102 +2697,11 @@ public SchemaEntry addValueSchema( } } - private SchemaEntry addValueAndSupersetSchemaEntries( - String clusterName, - String storeName, - SchemaEntry newValueSchemaEntry, - SchemaEntry newSupersetSchemaEntry, - final boolean isWriteComputationEnabled) { - validateNewSupersetAndValueSchemaEntries(storeName, clusterName, newValueSchemaEntry, newSupersetSchemaEntry); - LOGGER.info( - "Adding value schema {} and superset schema {} to store: {} in cluster: {}", - newValueSchemaEntry, - newSupersetSchemaEntry, - storeName, - clusterName); - - SupersetSchemaCreation supersetSchemaCreation = - (SupersetSchemaCreation) AdminMessageType.SUPERSET_SCHEMA_CREATION.getNewInstance(); - supersetSchemaCreation.clusterName = clusterName; - supersetSchemaCreation.storeName = storeName; - SchemaMeta valueSchemaMeta = new SchemaMeta(); - valueSchemaMeta.definition = newValueSchemaEntry.getSchemaStr(); - valueSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); - supersetSchemaCreation.valueSchema = valueSchemaMeta; - supersetSchemaCreation.valueSchemaId = newValueSchemaEntry.getId(); - - SchemaMeta supersetSchemaMeta = new SchemaMeta(); - supersetSchemaMeta.definition = newSupersetSchemaEntry.getSchemaStr(); - supersetSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); - supersetSchemaCreation.supersetSchema = supersetSchemaMeta; - supersetSchemaCreation.supersetSchemaId = newSupersetSchemaEntry.getId(); - - AdminOperation message = new AdminOperation(); - message.operationType = AdminMessageType.SUPERSET_SCHEMA_CREATION.getValue(); - message.payloadUnion = supersetSchemaCreation; - - sendAdminMessageAndWaitForConsumed(clusterName, storeName, message); - // Need to add RMD schemas for both new value schema and new superset schema. - updateReplicationMetadataSchema( - clusterName, - storeName, - newValueSchemaEntry.getSchema(), - newValueSchemaEntry.getId()); - updateReplicationMetadataSchema( - clusterName, - storeName, - newSupersetSchemaEntry.getSchema(), - newSupersetSchemaEntry.getId()); - if (isWriteComputationEnabled) { - Schema newValueWriteComputeSchema = - writeComputeSchemaConverter.convertFromValueRecordSchema(newValueSchemaEntry.getSchema()); - Schema newSuperSetWriteComputeSchema = - writeComputeSchemaConverter.convertFromValueRecordSchema(newSupersetSchemaEntry.getSchema()); - addDerivedSchema(clusterName, storeName, newValueSchemaEntry.getId(), newValueWriteComputeSchema.toString()); - addDerivedSchema( - clusterName, - storeName, - newSupersetSchemaEntry.getId(), - newSuperSetWriteComputeSchema.toString()); - } - updateStore( - clusterName, - storeName, - new UpdateStoreQueryParams().setLatestSupersetSchemaId(newSupersetSchemaEntry.getId())); - return newValueSchemaEntry; - } - - private void validateNewSupersetAndValueSchemaEntries( - String storeName, - String clusterName, - SchemaEntry newValueSchemaEntry, - SchemaEntry newSupersetSchemaEntry) { - if (newValueSchemaEntry.getId() == newSupersetSchemaEntry.getId()) { - throw new IllegalArgumentException( - String.format( - "Superset schema ID and value schema ID are expected to be different for store %s in cluster %s. " - + "Got ID: %d", - storeName, - clusterName, - newValueSchemaEntry.getId())); - } - if (AvroSchemaUtils - .compareSchemaIgnoreFieldOrder(newValueSchemaEntry.getSchema(), newSupersetSchemaEntry.getSchema())) { - throw new IllegalArgumentException( - String.format( - "Superset and value schemas are expected to be different for store %s in cluster %s. Got schema: %s", - storeName, - clusterName, - newValueSchemaEntry.getSchema())); - } - } - private SchemaEntry addValueSchemaEntry( String clusterName, String storeName, String valueSchemaStr, - final int newValueSchemaId, - final boolean doUpdateSupersetSchemaID) { + final int newValueSchemaId) { LOGGER.info("Adding value schema: {} to store: {} in cluster: {}", valueSchemaStr, storeName, clusterName); ValueSchemaCreation valueSchemaCreation = @@ -3033,25 +2727,71 @@ private SchemaEntry addValueSchemaEntry( + actualValueSchemaId); } - if (doUpdateSupersetSchemaID) { - updateStore(clusterName, storeName, new UpdateStoreQueryParams().setLatestSupersetSchemaId(newValueSchemaId)); - } - return new SchemaEntry(actualValueSchemaId, valueSchemaStr); } - /** - * Unsupported operation in the parent controller. - */ @Override - public SchemaEntry addSupersetSchema( + public void addSupersetSchema( String clusterName, String storeName, String valueSchemaStr, int valueSchemaId, String supersetSchemaStr, int supersetSchemaId) { - throw new VeniceUnsupportedOperationException("addSupersetSchema"); + acquireAdminMessageLock(clusterName, storeName); + try { + if (supersetSchemaId == SchemaData.INVALID_VALUE_SCHEMA_ID) { + throw new VeniceException("Invalid superset schema id: " + supersetSchemaId); + } + + ReadWriteSchemaRepository schemaRepository = getHelixVeniceClusterResources(clusterName).getSchemaRepository(); + final SchemaEntry existingSupersetSchemaEntry = schemaRepository.getValueSchema(storeName, supersetSchemaId); + if (existingSupersetSchemaEntry != null) { + final Schema newSupersetSchema = AvroSchemaParseUtils.parseSchemaFromJSONStrictValidation(supersetSchemaStr); + if (!AvroSchemaUtils + .compareSchemaIgnoreFieldOrder(existingSupersetSchemaEntry.getSchema(), newSupersetSchema)) { + throw new VeniceException( + "Existing schema with id " + existingSupersetSchemaEntry.getId() + " does not match with new schema " + + supersetSchemaStr); + } + } + + if (valueSchemaId == SchemaData.INVALID_VALUE_SCHEMA_ID) { + LOGGER.info( + "Adding superset schema {} with id {} to store: {} in cluster: {}", + supersetSchemaStr, + supersetSchemaId, + storeName, + clusterName); + valueSchemaStr = ""; + } else if (StringUtils.isEmpty(valueSchemaStr)) { + throw new VeniceException("Invalid value schema string: " + valueSchemaStr); + } + + SupersetSchemaCreation supersetSchemaCreation = + (SupersetSchemaCreation) AdminMessageType.SUPERSET_SCHEMA_CREATION.getNewInstance(); + supersetSchemaCreation.clusterName = clusterName; + supersetSchemaCreation.storeName = storeName; + SchemaMeta valueSchemaMeta = new SchemaMeta(); + valueSchemaMeta.definition = valueSchemaStr; + valueSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); + supersetSchemaCreation.valueSchema = valueSchemaMeta; + supersetSchemaCreation.valueSchemaId = valueSchemaId; + + SchemaMeta supersetSchemaMeta = new SchemaMeta(); + supersetSchemaMeta.definition = supersetSchemaStr; + supersetSchemaMeta.schemaType = SchemaType.AVRO_1_4.getValue(); + supersetSchemaCreation.supersetSchema = supersetSchemaMeta; + supersetSchemaCreation.supersetSchemaId = supersetSchemaId; + + AdminOperation message = new AdminOperation(); + message.operationType = AdminMessageType.SUPERSET_SCHEMA_CREATION.getValue(); + message.payloadUnion = supersetSchemaCreation; + + sendAdminMessageAndWaitForConsumed(clusterName, storeName, message); + } finally { + releaseAdminMessageLock(clusterName, storeName); + } } @Override @@ -3063,80 +2803,14 @@ public SchemaEntry addValueSchema( DirectionalSchemaCompatibilityType expectedCompatibilityType) { acquireAdminMessageLock(clusterName, storeName); try { - Schema newValueSchema = AvroSchemaParseUtils.parseSchemaFromJSONStrictValidation(newValueSchemaStr); - - final Store store = getVeniceHelixAdmin().getStore(clusterName, storeName); - Schema existingValueSchema = getVeniceHelixAdmin().getSupersetOrLatestValueSchema(clusterName, store); - - final boolean doUpdateSupersetSchemaID; - if (existingValueSchema != null && (store.isReadComputationEnabled() || store.isWriteComputationEnabled())) { - SupersetSchemaGenerator supersetSchemaGenerator = getSupersetSchemaGenerator(clusterName); - Schema newSuperSetSchema = supersetSchemaGenerator.generateSupersetSchema(existingValueSchema, newValueSchema); - String newSuperSetSchemaStr = newSuperSetSchema.toString(); - - if (supersetSchemaGenerator.compareSchema(newSuperSetSchema, newValueSchema)) { - doUpdateSupersetSchemaID = true; - - } else if (supersetSchemaGenerator.compareSchema(newSuperSetSchema, existingValueSchema)) { - doUpdateSupersetSchemaID = false; - - } else if (store.isSystemStore()) { - /** - * Do not register superset schema for system store for now. Because some system stores specify the schema ID - * explicitly, which may conflict with the superset schema generated internally, the new value schema registration - * could fail. - * - * TODO: Design a long-term plan. - */ - doUpdateSupersetSchemaID = false; - - } else { - // Register superset schema only if it does not match with existing or new schema. - - // validate compatibility of the new superset schema - getVeniceHelixAdmin().checkPreConditionForAddValueSchemaAndGetNewSchemaId( - clusterName, - storeName, - newSuperSetSchemaStr, - expectedCompatibilityType); - // Check if the superset schema already exists or not. If exists use the same ID, else bump the value ID by - // one. - int supersetSchemaId = getVeniceHelixAdmin().getValueSchemaIdIgnoreFieldOrder( - clusterName, - storeName, - newSuperSetSchemaStr, - (s1, s2) -> supersetSchemaGenerator.compareSchema(s1, s2) ? 0 : 1); - if (supersetSchemaId == SchemaData.INVALID_VALUE_SCHEMA_ID) { - supersetSchemaId = schemaId + 1; - } - return addValueAndSupersetSchemaEntries( - clusterName, - storeName, - new SchemaEntry(schemaId, newValueSchema), - new SchemaEntry(supersetSchemaId, newSuperSetSchema), - store.isWriteComputationEnabled()); - } - } else { - doUpdateSupersetSchemaID = false; + if (schemaId == SchemaData.DUPLICATE_VALUE_SCHEMA_CODE) { + return new SchemaEntry(getValueSchemaId(clusterName, storeName, newValueSchemaStr), newValueSchemaStr); } - SchemaEntry addedSchemaEntry = - addValueSchemaEntry(clusterName, storeName, newValueSchemaStr, schemaId, doUpdateSupersetSchemaID); + SchemaEntry addedSchemaEntry = addValueSchemaEntry(clusterName, storeName, newValueSchemaStr, schemaId); - /** - * if active-active replication is enabled for the store then generate and register the new Replication metadata schema - * for this newly added value schema. - */ - if (store.isActiveActiveReplicationEnabled()) { - Schema latestValueSchema = getVeniceHelixAdmin().getSupersetOrLatestValueSchema(clusterName, store); - final int valueSchemaId = getValueSchemaId(clusterName, storeName, latestValueSchema.toString()); - updateReplicationMetadataSchema(clusterName, storeName, latestValueSchema, valueSchemaId); - } - if (store.isWriteComputationEnabled()) { - Schema newWriteComputeSchema = - writeComputeSchemaConverter.convertFromValueRecordSchema(addedSchemaEntry.getSchema()); - addDerivedSchema(clusterName, storeName, addedSchemaEntry.getId(), newWriteComputeSchema.toString()); - } + // Now register all inferred schemas for the store. + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(this, clusterName, storeName); return addedSchemaEntry; } finally { @@ -3262,8 +2936,8 @@ public RmdSchemaEntry addReplicationMetadataSchema( try { RmdSchemaEntry rmdSchemaEntry = new RmdSchemaEntry(valueSchemaId, replicationMetadataVersionId, replicationMetadataSchemaStr); - final boolean replicationMetadataSchemaAlreadyPresent = getVeniceHelixAdmin() - .checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, valueSchemaId, rmdSchemaEntry); + final boolean replicationMetadataSchemaAlreadyPresent = + getVeniceHelixAdmin().checkIfMetadataSchemaAlreadyPresent(clusterName, storeName, rmdSchemaEntry); if (replicationMetadataSchemaAlreadyPresent) { LOGGER.info( "Replication metadata schema already exists for store: {} in cluster: {} metadataSchema: {} " @@ -3368,36 +3042,6 @@ public void validateAndMaybeRetrySystemStoreAutoCreation( throw new VeniceUnsupportedOperationException("validateAndMaybeRetrySystemStoreAutoCreation"); } - private void updateReplicationMetadataSchemaForAllValueSchema(String clusterName, String storeName) { - final Collection valueSchemas = getValueSchemas(clusterName, storeName); - for (SchemaEntry valueSchemaEntry: valueSchemas) { - updateReplicationMetadataSchema(clusterName, storeName, valueSchemaEntry.getSchema(), valueSchemaEntry.getId()); - } - } - - private void updateReplicationMetadataSchema( - String clusterName, - String storeName, - Schema valueSchema, - int valueSchemaId) { - final int rmdVersionId = getRmdVersionID(storeName, clusterName); - final boolean valueSchemaAlreadyHasRmdSchema = getVeniceHelixAdmin() - .checkIfValueSchemaAlreadyHasRmdSchema(clusterName, storeName, valueSchemaId, rmdVersionId); - if (valueSchemaAlreadyHasRmdSchema) { - LOGGER.info( - "Store {} in cluster {} already has a replication metadata schema for its value schema with ID {} and " - + "replication metadata version ID {}. So skip updating this value schema's RMD schema.", - storeName, - clusterName, - valueSchemaId, - rmdVersionId); - return; - } - String replicationMetadataSchemaStr = - RmdSchemaGenerator.generateMetadataSchema(valueSchema, rmdVersionId).toString(); - addReplicationMetadataSchema(clusterName, storeName, valueSchemaId, rmdVersionId, replicationMetadataSchemaStr); - } - /** * Unsupported operation in the parent controller. */ @@ -3830,7 +3474,7 @@ public NodeRemovableResult isInstanceRemovable( */ @Override public Pair> nodeReplicaReadiness(String cluster, String helixNodeId) { - throw new VeniceUnsupportedOperationException("nodeReplicaReadiness is not supported"); + throw new VeniceUnsupportedOperationException("nodeReplicaReadiness"); } private StoreInfo getStoreInChildRegion(String regionName, String clusterName, String storeName) { @@ -4473,36 +4117,6 @@ public StoreMetaValue getMetaStoreValue(StoreMetaKey metaKey, String storeName) throw new VeniceException("Not implemented in parent"); } - /** - * Check if etled proxy account is set before enabling any ETL and return a {@link ETLStoreConfigRecord} - */ - private ETLStoreConfigRecord mergeNewSettingIntoOldETLStoreConfig( - Store store, - Optional regularVersionETLEnabled, - Optional futureVersionETLEnabled, - Optional etledUserProxyAccount) { - ETLStoreConfig etlStoreConfig = store.getEtlStoreConfig(); - /** - * If etl enabled is true (either current version or future version), then account name must be specified in the command - * and it's not empty, or the store metadata already contains a non-empty account name. - */ - if (regularVersionETLEnabled.orElse(false) || futureVersionETLEnabled.orElse(false)) { - if ((!etledUserProxyAccount.isPresent() || etledUserProxyAccount.get().isEmpty()) - && (etlStoreConfig.getEtledUserProxyAccount() == null - || etlStoreConfig.getEtledUserProxyAccount().isEmpty())) { - throw new VeniceException("Cannot enable ETL for this store because etled user proxy account is not set"); - } - } - ETLStoreConfigRecord etlStoreConfigRecord = new ETLStoreConfigRecord(); - etlStoreConfigRecord.etledUserProxyAccount = - etledUserProxyAccount.orElse(etlStoreConfig.getEtledUserProxyAccount()); - etlStoreConfigRecord.regularVersionETLEnabled = - regularVersionETLEnabled.orElse(etlStoreConfig.isRegularVersionETLEnabled()); - etlStoreConfigRecord.futureVersionETLEnabled = - futureVersionETLEnabled.orElse(etlStoreConfig.isFutureVersionETLEnabled()); - return etlStoreConfigRecord; - } - /** * This parses the input accessPermission string to create ACL's and provision them using the authorizerService interface. * @@ -4653,7 +4267,7 @@ public void updateAclForStore(String clusterName, String storeName, String acces try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreWriteLock(storeName)) { LOGGER.info("ACLProvisioning: UpdateAcl for store: {} in cluster: {}", storeName, clusterName); if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("updateAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("updateAclForStore"); } Store store = getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, storeName); provisionAclsForStore( @@ -4673,7 +4287,7 @@ public void updateSystemStoreAclForStore( HelixVeniceClusterResources resources = getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName); try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreWriteLock(regularStoreName)) { if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("updateAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("updateAclForStore"); } getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, regularStoreName); authorizerService.get().setAcls(systemStoreAclBinding); @@ -4689,7 +4303,7 @@ public String getAclForStore(String clusterName, String storeName) { try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreReadLock(storeName)) { LOGGER.info("ACLProvisioning: GetAcl for store: {} in cluster: {}", storeName, clusterName); if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("getAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("getAclForStore"); } getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, storeName); String accessPerms = fetchAclsForStore(storeName); @@ -4706,7 +4320,7 @@ public void deleteAclForStore(String clusterName, String storeName) { try (AutoCloseableLock ignore = resources.getClusterLockManager().createStoreWriteLock(storeName)) { LOGGER.info("ACLProvisioning: DeleteAcl for store: {} in cluster: {}", storeName, clusterName); if (!authorizerService.isPresent()) { - throw new VeniceUnsupportedOperationException("deleteAclForStore is not supported yet!"); + throw new VeniceUnsupportedOperationException("deleteAclForStore"); } Store store = getVeniceHelixAdmin().checkPreConditionForAclOp(clusterName, storeName); if (!store.isMigrating()) { @@ -4871,6 +4485,14 @@ public boolean isParent() { return getVeniceHelixAdmin().isParent(); } + /** + * @see Admin#isPrimary() + */ + @Override + public boolean isPrimary() { + return getVeniceHelixAdmin().isPrimary(); + } + /** * @see Admin#getParentControllerRegionState() */ @@ -4973,13 +4595,6 @@ public VeniceHelixAdmin getVeniceHelixAdmin() { return veniceHelixAdmin; } - private Function addToUpdatedConfigList(List updatedConfigList, String config) { - return (configValue) -> { - updatedConfigList.add(config); - return configValue; - }; - } - /** * @see Admin#getBackupVersionDefaultRetentionMs() */ @@ -5246,7 +4861,8 @@ LingeringStoreVersionChecker getLingeringStoreVersionChecker() { return lingeringStoreVersionChecker; } - VeniceControllerMultiClusterConfig getMultiClusterConfigs() { + @Override + public VeniceControllerMultiClusterConfig getMultiClusterConfigs() { return multiClusterConfigs; } @@ -5298,6 +4914,13 @@ public void createStoragePersona( Set owners) { getVeniceHelixAdmin().checkControllerLeadershipFor(clusterName); + StoragePersonaRepository repository = + getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + if (repository.hasPersona(name)) { + throw new VeniceException("Persona with name " + name + " already exists"); + } + repository.validatePersona(name, quotaNumber, storesToEnforce, owners); + CreateStoragePersona createStoragePersona = (CreateStoragePersona) AdminMessageType.CREATE_STORAGE_PERSONA.getNewInstance(); createStoragePersona.setClusterName(clusterName); @@ -5310,12 +4933,6 @@ public void createStoragePersona( message.operationType = AdminMessageType.CREATE_STORAGE_PERSONA.getValue(); message.payloadUnion = createStoragePersona; - StoragePersonaRepository repository = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - if (repository.hasPersona(name)) { - throw new VeniceException("Persona with name " + name + " already exists"); - } - repository.validatePersona(name, quotaNumber, storesToEnforce, owners); sendAdminMessageAndWaitForConsumed(clusterName, null, message); } @@ -5352,6 +4969,11 @@ public void deleteStoragePersona(String clusterName, String name) { @Override public void updateStoragePersona(String clusterName, String name, UpdateStoragePersonaQueryParams queryParams) { getVeniceHelixAdmin().checkControllerLeadershipFor(clusterName); + + StoragePersonaRepository repository = + getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + repository.validatePersonaUpdate(name, queryParams); + UpdateStoragePersona updateStoragePersona = (UpdateStoragePersona) AdminMessageType.UPDATE_STORAGE_PERSONA.getNewInstance(); updateStoragePersona.setClusterName(clusterName); @@ -5363,9 +4985,6 @@ public void updateStoragePersona(String clusterName, String name, UpdateStorageP message.operationType = AdminMessageType.UPDATE_STORAGE_PERSONA.getValue(); message.payloadUnion = updateStoragePersona; - StoragePersonaRepository repository = - getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); - repository.validatePersonaUpdate(name, queryParams); sendAdminMessageAndWaitForConsumed(clusterName, null, message); } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java index f3a179c7300..de50543b28b 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemSchemaInitializationRoutine.java @@ -5,14 +5,13 @@ import com.linkedin.venice.VeniceConstants; import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; import com.linkedin.venice.controller.VeniceHelixAdmin; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.exceptions.VeniceNoStoreException; import com.linkedin.venice.meta.Store; -import com.linkedin.venice.schema.GeneratedSchemaID; import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.schema.avro.DirectionalSchemaCompatibilityType; -import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; import com.linkedin.venice.serialization.avro.AvroProtocolDefinition; import com.linkedin.venice.utils.Pair; import com.linkedin.venice.utils.Utils; @@ -34,13 +33,12 @@ public class SystemSchemaInitializationRoutine implements ClusterLeaderInitializ private final VeniceHelixAdmin admin; private final Optional keySchema; private final Optional storeMetadataUpdate; - private final boolean autoRegisterDerivedComputeSchema; public SystemSchemaInitializationRoutine( AvroProtocolDefinition protocolDefinition, VeniceControllerMultiClusterConfig multiClusterConfigs, VeniceHelixAdmin admin) { - this(protocolDefinition, multiClusterConfigs, admin, Optional.empty(), Optional.empty(), false); + this(protocolDefinition, multiClusterConfigs, admin, Optional.empty(), Optional.empty()); } public SystemSchemaInitializationRoutine( @@ -48,14 +46,12 @@ public SystemSchemaInitializationRoutine( VeniceControllerMultiClusterConfig multiClusterConfigs, VeniceHelixAdmin admin, Optional keySchema, - Optional storeMetadataUpdate, - boolean autoRegisterDerivedComputeSchema) { + Optional storeMetadataUpdate) { this.protocolDefinition = protocolDefinition; this.multiClusterConfigs = multiClusterConfigs; this.admin = admin; this.keySchema = keySchema; this.storeMetadataUpdate = storeMetadataUpdate; - this.autoRegisterDerivedComputeSchema = autoRegisterDerivedComputeSchema; } /** @@ -194,31 +190,13 @@ public void execute(String clusterToInit) { schemaInLocalResources.toString(true)); } } - if (autoRegisterDerivedComputeSchema) { - // Check and register Write Compute schema - String writeComputeSchema = - WriteComputeSchemaConverter.getInstance().convertFromValueRecordSchema(schemaInLocalResources).toString(); - GeneratedSchemaID derivedSchemaInfo = - admin.getDerivedSchemaId(clusterToInit, systemStoreName, writeComputeSchema); - if (!derivedSchemaInfo.isValid()) { - /** - * The derived schema doesn't exist right now, try to register it. - */ - try { - admin.addDerivedSchema(clusterToInit, systemStoreName, valueSchemaVersion, writeComputeSchema); - } catch (Exception e) { - LOGGER.error( - "Caught Exception when attempting to register the derived compute schema for '{}' schema version '{}'. Will bubble up.", - protocolDefinition.name(), - valueSchemaVersion, - e); - throw e; - } - LOGGER.info( - "Added the derived compute schema for the new schema v{} to system store '{}'.", - valueSchemaVersion, - systemStoreName); - } + + boolean writeComputationEnabled = + storeMetadataUpdate.map(params -> params.getWriteComputationEnabled().orElse(false)).orElse(false); + + if (writeComputationEnabled) { + // Register partial update schemas (aka derived schemas) + PrimaryControllerConfigUpdateUtils.addUpdateSchemaForStore(admin, clusterToInit, systemStoreName, false); } } } diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java index 49af4bc9fe6..fc4270c6bc0 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelper.java @@ -3,6 +3,7 @@ import com.linkedin.venice.VeniceConstants; import com.linkedin.venice.controller.Admin; import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.util.PrimaryControllerConfigUpdateUtils; import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.meta.Store; @@ -161,6 +162,11 @@ public static void setupSystemStore( } if (updateStoreQueryParams != null && updateStoreCheckSupplier.apply(store)) { + if (store.getPartitionCount() == 0 && !updateStoreQueryParams.getPartitionCount().isPresent()) { + updateStoreQueryParams + .setPartitionCount(multiClusterConfigs.getControllerConfig(clusterName).getMinNumberOfPartitions()); + } + admin.updateStore(clusterName, systemStoreName, updateStoreQueryParams); store = RetryUtils.executeWithMaxAttempt(() -> { @@ -172,14 +178,29 @@ public static void setupSystemStore( throw new VeniceException("Unable to update store " + systemStoreName); } + if (internalStore.getPartitionCount() == 0) { + throw new VeniceException("Partition count is still 0 after updating store " + systemStoreName); + } + return internalStore; }, 5, delayBetweenStoreUpdateRetries, Collections.singletonList(VeniceException.class)); LOGGER.info("Updated internal store " + systemStoreName + " in cluster " + clusterName); } + boolean activeActiveReplicationEnabled = false; + if (updateStoreQueryParams != null) { + activeActiveReplicationEnabled = updateStoreQueryParams.getActiveActiveReplicationEnabled().orElse(false); + } + + if (activeActiveReplicationEnabled) { + // Now that store has enabled A/A and all value schemas are registered, register RMD schemas + PrimaryControllerConfigUpdateUtils + .updateReplicationMetadataSchemaForAllValueSchema(admin, clusterName, systemStoreName); + } + if (store.getCurrentVersion() <= 0) { - int partitionCount = multiClusterConfigs.getControllerConfig(clusterName).getMinNumberOfPartitions(); + int partitionCount = store.getPartitionCount(); int replicationFactor = admin.getReplicationFactor(clusterName, systemStoreName); Version version = admin.incrementVersionIdempotent( clusterName, diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java index 4d4bd4a5b39..aaf74ab3056 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminConsumptionTask.java @@ -323,6 +323,8 @@ public void run() { while (isRunning.get()) { try { Utils.sleep(READ_CYCLE_DELAY_MS); + // We don't really need to check if the controller is the leader for the cluster here, because it is checked in + // isAdminTopicConsumptionEnabled. However, we still check it here because it helps in testing. if (!admin.isLeaderControllerFor(clusterName) || !admin.isAdminTopicConsumptionEnabled(clusterName)) { unSubscribe(); continue; diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java index e3aacf847db..d405e5c0f50 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/kafka/consumer/AdminExecutionTask.java @@ -426,8 +426,6 @@ private void handleSetStoreCurrentVersion(SetStoreCurrentVersion message) { String storeName = message.storeName.toString(); int version = message.currentVersion; admin.setStoreCurrentVersion(clusterName, storeName, version); - - LOGGER.info("Set store: {} version to {} in cluster: {}", storeName, version, clusterName); } private void handleSetStoreOwner(SetStoreOwner message) { diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java index 92519628b53..7a5439a3e61 100644 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/supersetschema/SupersetSchemaGeneratorWithCustomProp.java @@ -1,10 +1,13 @@ package com.linkedin.venice.controller.supersetschema; +import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper; import com.linkedin.venice.schema.AvroSchemaParseUtils; import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.utils.AvroSchemaUtils; import com.linkedin.venice.utils.AvroSupersetSchemaUtils; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; import org.apache.avro.Schema; @@ -41,12 +44,30 @@ public SchemaEntry generateSupersetSchemaFromSchemas(Collection sch * Check whether the latest value schema contains {@link #customProp} or not. */ String customPropInLatestValueSchema = latestValueSchemaEntry.getSchema().getProp(customProp); - if (customPropInLatestValueSchema != null && supersetSchemaEntry.getSchema().getProp(customProp) == null) { + Schema existingSupersetSchema = supersetSchemaEntry.getSchema(); + if (customPropInLatestValueSchema != null + && !customPropInLatestValueSchema.equals(existingSupersetSchema.getProp(customProp))) { + List existingSupersetSchemaFields = existingSupersetSchema.getFields(); + List fieldList = new ArrayList<>(existingSupersetSchemaFields.size()); + for (Schema.Field field: existingSupersetSchemaFields) { + fieldList.add(AvroCompatibilityHelper.newField(field).build()); + } + Schema newSupersetSchema = Schema.createRecord( + existingSupersetSchema.getName(), + existingSupersetSchema.getDoc(), + existingSupersetSchema.getNamespace(), + existingSupersetSchema.isError(), + fieldList); + /** - * The 'supersetSchemaEntry' can contain a different custom prop value than the latest value schema, and - * custom prop value is not mutable. + * Custom props are not mutable, hence we need to copy all the existing props to the new schema */ - Schema newSupersetSchema = supersetSchemaEntry.clone().getSchema(); + AvroCompatibilityHelper.getAllPropNames(existingSupersetSchema).forEach(prop -> { + if (!prop.equals(customProp)) { + newSupersetSchema.addProp(prop, existingSupersetSchema.getProp(prop)); + } + }); + // Not empty, then copy it to the superset schema newSupersetSchema.addProp(customProp, customPropInLatestValueSchema); // Check whether this new schema exists or not diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java new file mode 100644 index 00000000000..37f661533f7 --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/AdminUtils.java @@ -0,0 +1,73 @@ +package com.linkedin.venice.controller.util; + +import com.linkedin.venice.ConfigConstants; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.kafka.protocol.admin.HybridStoreConfigRecord; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.Store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +public class AdminUtils { + private static final Logger LOGGER = LogManager.getLogger(AdminUtils.class); + + private AdminUtils() { + } + + public static boolean isHybrid(HybridStoreConfigRecord hybridStoreConfigRecord) { + HybridStoreConfig hybridStoreConfig = null; + if (hybridStoreConfigRecord != null) { + hybridStoreConfig = new HybridStoreConfigImpl( + hybridStoreConfigRecord.rewindTimeInSeconds, + hybridStoreConfigRecord.offsetLagThresholdToGoOnline, + hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds, + DataReplicationPolicy.valueOf(hybridStoreConfigRecord.dataReplicationPolicy), + BufferReplayPolicy.valueOf(hybridStoreConfigRecord.bufferReplayPolicy)); + } + return isHybrid(hybridStoreConfig); + } + + /** + * A store is not hybrid in the following two scenarios: + * If hybridStoreConfig is null, it means store is not hybrid. + * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. + */ + public static boolean isHybrid(HybridStoreConfig hybridStoreConfig) { + return hybridStoreConfig != null && hybridStoreConfig.isHybrid(); + } + + public static int getRmdVersionID(Admin admin, String storeName, String clusterName) { + final Store store = admin.getStore(clusterName, storeName); + if (store == null) { + LOGGER.warn( + "No store found in the store repository. Will get store-level RMD version ID from cluster config. " + + "Store name: {}, cluster: {}", + storeName, + clusterName); + } else if (store.getRmdVersion() == ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION) { + LOGGER.info("No store-level RMD version ID found for store {} in cluster {}", storeName, clusterName); + } else { + LOGGER.info( + "Found store-level RMD version ID {} for store {} in cluster {}", + store.getRmdVersion(), + storeName, + clusterName); + return store.getRmdVersion(); + } + + final VeniceControllerClusterConfig controllerClusterConfig = + admin.getMultiClusterConfigs().getControllerConfig(clusterName); + if (controllerClusterConfig == null) { + throw new VeniceException("No controller cluster config found for cluster " + clusterName); + } + final int rmdVersionID = controllerClusterConfig.getReplicationMetadataVersion(); + LOGGER.info("Use RMD version ID {} for cluster {}", rmdVersionID, clusterName); + return rmdVersionID; + } +} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java deleted file mode 100644 index cbd1cc92413..00000000000 --- a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/ParentControllerConfigUpdateUtils.java +++ /dev/null @@ -1,171 +0,0 @@ -package com.linkedin.venice.controller.util; - -import com.linkedin.venice.controller.VeniceControllerClusterConfig; -import com.linkedin.venice.controller.VeniceParentHelixAdmin; -import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStore; -import com.linkedin.venice.exceptions.VeniceException; -import com.linkedin.venice.meta.Store; -import com.linkedin.venice.schema.SchemaEntry; -import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Comparator; -import java.util.List; -import java.util.Optional; -import org.apache.avro.Schema; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - - -/** - * This class is a utility class for Parent Controller store update logics. - * The method here aims to take in current status and request params to determine if certain feature is updated / should - * be updated based on some customized logics. - */ -public class ParentControllerConfigUpdateUtils { - public static final Logger LOGGER = LogManager.getLogger(ParentControllerConfigUpdateUtils.class); - public static final WriteComputeSchemaConverter updateSchemaConverter = WriteComputeSchemaConverter.getInstance(); - - /** - * This method takes in current status and request and try to determine whether to change partial update config. - * The check logic is: - * Step (1): If there is explict request, we will respect the request and maybe update config if new request value is - * different from existing config value. In this step, if we are enabling partial update, we will also perform a dry - * run to validate schema. If validation fails, it will throw exception and fail the whole request. - * Step (2): If there is NO explict request and store is being converted into hybrid store, we will check the cluster - * config and store's latest A/A config to see whether we should by default enable partial update. If so, we will also - * perform a dry on to validate schema. If validation fails, it will swallow the exception and log warning message. It - * will not turn on partial update and will not fail the whole request. - */ - public static boolean checkAndMaybeApplyPartialUpdateConfig( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - Optional partialUpdateRequest, - UpdateStore setStore, - boolean storeBeingConvertedToHybrid) { - Store currentStore = parentHelixAdmin.getVeniceHelixAdmin().getStore(clusterName, storeName); - VeniceControllerClusterConfig controllerConfig = - parentHelixAdmin.getVeniceHelixAdmin().getHelixVeniceClusterResources(clusterName).getConfig(); - boolean partialUpdateConfigChanged = false; - setStore.writeComputationEnabled = currentStore.isWriteComputationEnabled(); - if (partialUpdateRequest.isPresent()) { - setStore.writeComputationEnabled = partialUpdateRequest.get(); - if (partialUpdateRequest.get() && !currentStore.isWriteComputationEnabled()) { - // Dry-run generating update schemas before sending admin messages to enable partial update because - // update schema generation may fail due to some reasons. If that happens, abort the store update process. - addUpdateSchemaForStore(parentHelixAdmin, clusterName, storeName, true); - } - // Explicit request to change partial update config has the highest priority. - return true; - } - /** - * If a store: - * (1) Is being converted to hybrid; - * (2) Is not partial update enabled for now; - * (3) Does not request to change partial update config; - * It means partial update is not enabled, and there is no explict intention to change it. In this case, we will - * check cluster default config based on the replication policy to determine whether to try to enable partial update. - */ - final boolean shouldEnablePartialUpdateBasedOnClusterConfig = - storeBeingConvertedToHybrid && (setStore.activeActiveReplicationEnabled - ? controllerConfig.isEnablePartialUpdateForHybridActiveActiveUserStores() - : controllerConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()); - if (!currentStore.isWriteComputationEnabled() && shouldEnablePartialUpdateBasedOnClusterConfig) { - LOGGER.info("Controller will try to enable partial update based on cluster config for store: " + storeName); - /** - * When trying to turn on partial update based on cluster config, if schema generation failed, we will not fail the - * whole request, but just do NOT turn on partial update, as other config update should still be respected. - */ - try { - addUpdateSchemaForStore(parentHelixAdmin, clusterName, storeName, true); - setStore.writeComputationEnabled = true; - partialUpdateConfigChanged = true; - } catch (Exception e) { - LOGGER.warn( - "Caught exception when trying to enable partial update base on cluster config, will not enable partial update for store: " - + storeName, - e); - } - } - return partialUpdateConfigChanged; - } - - public static boolean checkAndMaybeApplyChunkingConfigChange( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - Optional chunkingRequest, - UpdateStore setStore) { - Store currentStore = parentHelixAdmin.getVeniceHelixAdmin().getStore(clusterName, storeName); - setStore.chunkingEnabled = currentStore.isChunkingEnabled(); - if (chunkingRequest.isPresent()) { - setStore.chunkingEnabled = chunkingRequest.get(); - // Explicit request to change chunking config has the highest priority. - return true; - } - // If partial update is just enabled, we will by default enable chunking, if no explict request to update chunking - // config. - if (!currentStore.isWriteComputationEnabled() && setStore.writeComputationEnabled - && !currentStore.isChunkingEnabled()) { - setStore.chunkingEnabled = true; - return true; - } - return false; - } - - public static boolean checkAndMaybeApplyRmdChunkingConfigChange( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - Optional rmdChunkingRequest, - UpdateStore setStore) { - Store currentStore = parentHelixAdmin.getVeniceHelixAdmin().getStore(clusterName, storeName); - setStore.rmdChunkingEnabled = currentStore.isRmdChunkingEnabled(); - if (rmdChunkingRequest.isPresent()) { - setStore.rmdChunkingEnabled = rmdChunkingRequest.get(); - // Explicit request to change RMD chunking config has the highest priority. - return true; - } - // If partial update is just enabled and A/A is enabled, we will by default enable RMD chunking, if no explict - // request to update RMD chunking config. - if (!currentStore.isWriteComputationEnabled() && setStore.writeComputationEnabled - && setStore.activeActiveReplicationEnabled && !currentStore.isRmdChunkingEnabled()) { - setStore.rmdChunkingEnabled = true; - return true; - } - return false; - } - - public static void addUpdateSchemaForStore( - VeniceParentHelixAdmin parentHelixAdmin, - String clusterName, - String storeName, - boolean dryRun) { - Collection valueSchemaEntries = parentHelixAdmin.getValueSchemas(clusterName, storeName); - List updateSchemaEntries = new ArrayList<>(valueSchemaEntries.size()); - int maxId = valueSchemaEntries.stream().map(SchemaEntry::getId).max(Comparator.naturalOrder()).get(); - for (SchemaEntry valueSchemaEntry: valueSchemaEntries) { - try { - Schema updateSchema = updateSchemaConverter.convertFromValueRecordSchema(valueSchemaEntry.getSchema()); - updateSchemaEntries.add(new SchemaEntry(valueSchemaEntry.getId(), updateSchema)); - } catch (Exception e) { - // Allow failure in update schema generation in all schema except the latest value schema - if (valueSchemaEntry.getId() == maxId) { - throw new VeniceException( - "For store " + storeName + " cannot generate update schema for value schema ID :" - + valueSchemaEntry.getId() + ", top level field probably missing defaults.", - e); - } - } - } - // Add update schemas only after all update schema generation succeeded. - if (dryRun) { - return; - } - for (SchemaEntry updateSchemaEntry: updateSchemaEntries) { - parentHelixAdmin - .addDerivedSchema(clusterName, storeName, updateSchemaEntry.getId(), updateSchemaEntry.getSchemaStr()); - } - } -} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java new file mode 100644 index 00000000000..cecc7a955e8 --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtils.java @@ -0,0 +1,170 @@ +package com.linkedin.venice.controller.util; + +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.schema.SchemaEntry; +import com.linkedin.venice.schema.rmd.RmdSchemaEntry; +import com.linkedin.venice.schema.rmd.RmdSchemaGenerator; +import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import org.apache.avro.Schema; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +/** + * This class is a utility class for Primary Controller store update logics. + * Primary controller is the Parent controller in a multi-region deployment, and it is the Child Controller in a single-region deployment. + * The method here aims to take in current status and request params to determine if certain feature is updated / should + * be updated based on some customized logics. + */ +public class PrimaryControllerConfigUpdateUtils { + public static final Logger LOGGER = LogManager.getLogger(PrimaryControllerConfigUpdateUtils.class); + public static final WriteComputeSchemaConverter UPDATE_SCHEMA_CONVERTER = WriteComputeSchemaConverter.getInstance(); + + /** + * A store can have various schemas that are inferred based on the store's other properties (store configs, existing schemas, etc) + * This function is expected to register all such inferred schemas and it should be invoked on updates to the store's + * configs or schemas. + * + * This should only be executed in the primary controller. In a multi-region mode, the child controller is expected to + * get these updates via the admin channel. + */ + public static void registerInferredSchemas(Admin admin, String clusterName, String storeName) { + if (!UpdateStoreUtils.isInferredStoreUpdateAllowed(admin, storeName)) { + return; + } + + Store store = admin.getStore(clusterName, storeName); + + /** + * Register new superset schemas if either of the following conditions are met: + * 1. There is an existing superset schema + * 2. Read computation is enabled + * 3. Write computation is enabled + */ + if (store.isReadComputationEnabled() || store.isWriteComputationEnabled() + || store.getLatestSuperSetValueSchemaId() != SchemaData.INVALID_VALUE_SCHEMA_ID) { + addSupersetSchemaForStore(admin, clusterName, store); + } + + if (store.isWriteComputationEnabled()) { + // Register partial update schemas (aka derived schemas) + addUpdateSchemaForStore(admin, clusterName, storeName, false); + } + + if (store.isActiveActiveReplicationEnabled()) { + // Register RMD schemas + updateReplicationMetadataSchemaForAllValueSchema(admin, clusterName, storeName); + } + } + + private static void addSupersetSchemaForStore(Admin admin, String clusterName, Store store) { + String storeName = store.getName(); + SupersetSchemaGenerator supersetSchemaGenerator = admin.getSupersetSchemaGenerator(clusterName); + SchemaEntry supersetSchemaEntry = + supersetSchemaGenerator.generateSupersetSchemaFromSchemas(admin.getValueSchemas(clusterName, storeName)); + admin.addSupersetSchema( + clusterName, + storeName, + null, + SchemaData.INVALID_VALUE_SCHEMA_ID, + supersetSchemaEntry.getSchemaStr(), + supersetSchemaEntry.getId()); + } + + public static void addUpdateSchemaForStore(Admin admin, String clusterName, String storeName, boolean dryRun) { + Collection valueSchemaEntries = admin.getValueSchemas(clusterName, storeName); + List updateSchemaEntries = new ArrayList<>(valueSchemaEntries.size()); + int maxId = valueSchemaEntries.stream().map(SchemaEntry::getId).max(Comparator.naturalOrder()).get(); + for (SchemaEntry valueSchemaEntry: valueSchemaEntries) { + try { + Schema updateSchema = UPDATE_SCHEMA_CONVERTER.convertFromValueRecordSchema(valueSchemaEntry.getSchema()); + updateSchemaEntries.add(new SchemaEntry(valueSchemaEntry.getId(), updateSchema)); + } catch (Exception e) { + // Allow failure in update schema generation in all schema except the latest value schema + if (valueSchemaEntry.getId() == maxId) { + throw new VeniceException( + "For store " + storeName + " cannot generate update schema for value schema ID :" + + valueSchemaEntry.getId() + ", top level field probably missing defaults.", + e); + } + } + } + // Add update schemas only after all update schema generation succeeded. + if (dryRun) { + return; + } + for (SchemaEntry updateSchemaEntry: updateSchemaEntries) { + admin.addDerivedSchema(clusterName, storeName, updateSchemaEntry.getId(), updateSchemaEntry.getSchemaStr()); + } + } + + public static void updateReplicationMetadataSchemaForAllValueSchema( + Admin admin, + String clusterName, + String storeName) { + final Collection valueSchemas = admin.getValueSchemas(clusterName, storeName); + for (SchemaEntry valueSchemaEntry: valueSchemas) { + updateReplicationMetadataSchema( + admin, + clusterName, + storeName, + valueSchemaEntry.getSchema(), + valueSchemaEntry.getId()); + } + } + + private static void updateReplicationMetadataSchema( + Admin admin, + String clusterName, + String storeName, + Schema valueSchema, + int valueSchemaId) { + final int rmdVersionId = AdminUtils.getRmdVersionID(admin, storeName, clusterName); + final boolean valueSchemaAlreadyHasRmdSchema = + checkIfValueSchemaAlreadyHasRmdSchema(admin, clusterName, storeName, valueSchemaId, rmdVersionId); + if (valueSchemaAlreadyHasRmdSchema) { + LOGGER.info( + "Store {} in cluster {} already has a replication metadata schema for its value schema with ID {} and " + + "replication metadata version ID {}. So skip updating this value schema's RMD schema.", + storeName, + clusterName, + valueSchemaId, + rmdVersionId); + return; + } + String replicationMetadataSchemaStr = + RmdSchemaGenerator.generateMetadataSchema(valueSchema, rmdVersionId).toString(); + admin.addReplicationMetadataSchema( + clusterName, + storeName, + valueSchemaId, + rmdVersionId, + replicationMetadataSchemaStr); + } + + private static boolean checkIfValueSchemaAlreadyHasRmdSchema( + Admin admin, + String clusterName, + String storeName, + final int valueSchemaID, + final int replicationMetadataVersionId) { + Collection schemaEntries = admin.getHelixVeniceClusterResources(clusterName) + .getSchemaRepository() + .getReplicationMetadataSchemas(storeName); + for (RmdSchemaEntry rmdSchemaEntry: schemaEntries) { + if (rmdSchemaEntry.getValueSchemaID() == valueSchemaID + && rmdSchemaEntry.getId() == replicationMetadataVersionId) { + return true; + } + } + return false; + } +} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java new file mode 100644 index 00000000000..51976e1b85f --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreUtils.java @@ -0,0 +1,1173 @@ +package com.linkedin.venice.controller.util; + +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACCESS_CONTROLLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACTIVE_ACTIVE_REPLICATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.AMPLIFICATION_FACTOR; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BACKUP_STRATEGY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BACKUP_VERSION_RETENTION_MS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BATCH_GET_LIMIT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BLOB_TRANSFER_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.BUFFER_REPLAY_POLICY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.CHUNKING_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.CLIENT_DECOMPRESSION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.COMPRESSION_STRATEGY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.DATA_REPLICATION_POLICY; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.DISABLE_DAVINCI_PUSH_STATUS_STORE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.DISABLE_META_STORE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ENABLE_READS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ENABLE_WRITES; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ETLED_PROXY_USER_ACCOUNT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.FUTURE_VERSION_ETL_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.HYBRID_STORE_DISK_QUOTA_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.INCREMENTAL_PUSH_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.LARGEST_USED_VERSION_NUMBER; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.LATEST_SUPERSET_SCHEMA_ID; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MAX_COMPACTION_LAG_SECONDS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MAX_RECORD_SIZE_BYTES; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MIGRATION_DUPLICATE_STORE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.MIN_COMPACTION_LAG_SECONDS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NATIVE_REPLICATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NATIVE_REPLICATION_SOURCE_FABRIC; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NUM_VERSIONS_TO_PRESERVE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.OFFSET_LAG_TO_GO_ONLINE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.OWNER; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITIONER_CLASS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITIONER_PARAMS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITION_COUNT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PERSONA_NAME; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PUSH_STREAM_SOURCE_ADDRESS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.READ_COMPUTATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.READ_QUOTA_IN_CU; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REGULAR_VERSION_ETL_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REPLICATION_FACTOR; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REPLICATION_METADATA_PROTOCOL_VERSION_ID; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.REWIND_TIME_IN_SECONDS; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.RMD_CHUNKING_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORAGE_NODE_READ_QUOTA_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORAGE_QUOTA_IN_BYTE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORE_MIGRATION; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.STORE_VIEW; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.TIME_LAG_TO_GO_ONLINE; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.UNUSED_SCHEMA_DELETION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.VERSION; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.WRITE_COMPUTATION_ENABLED; +import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD; +import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; +import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_REWIND_TIME_IN_SECONDS; +import static com.linkedin.venice.utils.RegionUtils.parseRegionsFilterList; + +import com.linkedin.venice.common.VeniceSystemStoreUtils; +import com.linkedin.venice.compression.CompressionStrategy; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.HelixVeniceClusterResources; +import com.linkedin.venice.controller.StoreViewUtils; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.VeniceHelixAdmin; +import com.linkedin.venice.controller.VeniceParentHelixAdmin; +import com.linkedin.venice.controllerapi.UpdateStoreQueryParams; +import com.linkedin.venice.exceptions.ErrorType; +import com.linkedin.venice.exceptions.PartitionerSchemaMismatchException; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.exceptions.VeniceHttpException; +import com.linkedin.venice.exceptions.VeniceNoStoreException; +import com.linkedin.venice.helix.StoragePersonaRepository; +import com.linkedin.venice.helix.ZkRoutersClusterManager; +import com.linkedin.venice.meta.BackupStrategy; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.ETLStoreConfig; +import com.linkedin.venice.meta.ETLStoreConfigImpl; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.PartitionerConfig; +import com.linkedin.venice.meta.PartitionerConfigImpl; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.meta.Version; +import com.linkedin.venice.meta.ViewConfig; +import com.linkedin.venice.meta.ViewConfigImpl; +import com.linkedin.venice.persona.StoragePersona; +import com.linkedin.venice.pubsub.api.PubSubTopic; +import com.linkedin.venice.pubsub.manager.TopicManager; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.utils.PartitionUtils; +import com.linkedin.venice.utils.VeniceProperties; +import com.linkedin.venice.views.VeniceView; +import com.linkedin.venice.views.ViewUtils; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.function.Function; +import org.apache.commons.lang.StringUtils; +import org.apache.http.HttpStatus; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +public class UpdateStoreUtils { + private static final Logger LOGGER = LogManager.getLogger(UpdateStoreUtils.class); + + private UpdateStoreUtils() { + } + + public static UpdateStoreWrapper getStoreUpdate( + Admin admin, + String clusterName, + String storeName, + UpdateStoreQueryParams params, + boolean checkRegionFilter) { + VeniceControllerMultiClusterConfig multiClusterConfigs = admin.getMultiClusterConfigs(); + + // There are certain configs that are only allowed to be updated in child regions. We might still want the ability + // to update such configs in the parent region via the Admin tool for operational reasons. So, we allow such updates + // if the regions filter only specifies one region, which is the parent region. + boolean onlyParentRegionFilter = false; + + // Check whether the command affects this region. + if (params.getRegionsFilter().isPresent()) { + Set regionsFilter = parseRegionsFilterList(params.getRegionsFilter().get()); + if (checkRegionFilter && !regionsFilter.contains(multiClusterConfigs.getRegionName())) { + LOGGER.info( + "UpdateStore command will be skipped for store: {} in cluster: {}, because the region filter is {}" + + " which doesn't include the current region: {}", + storeName, + clusterName, + regionsFilter, + multiClusterConfigs.getRegionName()); + return null; + } + + if (admin.isParent() && regionsFilter.size() == 1) { + onlyParentRegionFilter = true; + } + } + + Store originalStore = admin.getStore(clusterName, storeName); + if (originalStore == null) { + throw new VeniceNoStoreException(storeName, clusterName); + } + + UpdateStoreWrapper updateStoreWrapper = new UpdateStoreWrapper(originalStore); + Set updatedConfigs = updateStoreWrapper.updatedConfigs; + Store updatedStore = updateStoreWrapper.updatedStore; + + Optional owner = params.getOwner(); + Optional readability = params.getEnableReads(); + Optional writeability = params.getEnableWrites(); + Optional partitionCount = params.getPartitionCount(); + Optional partitionerClass = params.getPartitionerClass(); + Optional> partitionerParams = params.getPartitionerParams(); + Optional amplificationFactor = params.getAmplificationFactor(); + Optional storageQuotaInByte = params.getStorageQuotaInByte(); + Optional readQuotaInCU = params.getReadQuotaInCU(); + Optional currentVersion = params.getCurrentVersion(); + Optional largestUsedVersionNumber = params.getLargestUsedVersionNumber(); + Optional hybridRewindSeconds = params.getHybridRewindSeconds(); + Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); + Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); + Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); + Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); + Optional accessControlled = params.getAccessControlled(); + Optional compressionStrategy = params.getCompressionStrategy(); + Optional clientDecompressionEnabled = params.getClientDecompressionEnabled(); + Optional chunkingEnabled = params.getChunkingEnabled(); + Optional rmdChunkingEnabled = params.getRmdChunkingEnabled(); + Optional batchGetLimit = params.getBatchGetLimit(); + Optional numVersionsToPreserve = params.getNumVersionsToPreserve(); + Optional incrementalPushEnabled = params.getIncrementalPushEnabled(); + Optional storeMigration = params.getStoreMigration(); + Optional writeComputationEnabled = params.getWriteComputationEnabled(); + Optional replicationMetadataVersionID = params.getReplicationMetadataVersionID(); + Optional readComputationEnabled = params.getReadComputationEnabled(); + Optional bootstrapToOnlineTimeoutInHours = params.getBootstrapToOnlineTimeoutInHours(); + Optional backupStrategy = params.getBackupStrategy(); + Optional autoSchemaRegisterPushJobEnabled = params.getAutoSchemaRegisterPushJobEnabled(); + Optional hybridStoreDiskQuotaEnabled = params.getHybridStoreDiskQuotaEnabled(); + Optional regularVersionETLEnabled = params.getRegularVersionETLEnabled(); + Optional futureVersionETLEnabled = params.getFutureVersionETLEnabled(); + Optional etledUserProxyAccount = params.getETLedProxyUserAccount(); + Optional nativeReplicationEnabled = params.getNativeReplicationEnabled(); + Optional pushStreamSourceAddress = params.getPushStreamSourceAddress(); + Optional backupVersionRetentionMs = params.getBackupVersionRetentionMs(); + Optional replicationFactor = params.getReplicationFactor(); + Optional migrationDuplicateStore = params.getMigrationDuplicateStore(); + Optional nativeReplicationSourceFabric = params.getNativeReplicationSourceFabric(); + Optional activeActiveReplicationEnabled = params.getActiveActiveReplicationEnabled(); + Optional personaName = params.getStoragePersona(); + Optional> storeViewConfig = params.getStoreViews(); + Optional viewName = params.getViewName(); + Optional viewClassName = params.getViewClassName(); + Optional> viewParams = params.getViewClassParams(); + Optional removeView = params.getDisableStoreView(); + Optional latestSupersetSchemaId = params.getLatestSupersetSchemaId(); + Optional storageNodeReadQuotaEnabled = params.getStorageNodeReadQuotaEnabled(); + Optional minCompactionLagSeconds = params.getMinCompactionLagSeconds(); + Optional maxCompactionLagSeconds = params.getMaxCompactionLagSeconds(); + Optional maxRecordSizeBytes = params.getMaxRecordSizeBytes(); + Optional unusedSchemaDeletionEnabled = params.getUnusedSchemaDeletionEnabled(); + Optional blobTransferEnabled = params.getBlobTransferEnabled(); + + owner.map(addToUpdatedConfigs(updatedConfigs, OWNER)).ifPresent(updatedStore::setOwner); + readability.map(addToUpdatedConfigs(updatedConfigs, ENABLE_READS)).ifPresent(updatedStore::setEnableReads); + writeability.map(addToUpdatedConfigs(updatedConfigs, ENABLE_WRITES)).ifPresent(updatedStore::setEnableWrites); + partitionCount.map(addToUpdatedConfigs(updatedConfigs, PARTITION_COUNT)).ifPresent(updatedStore::setPartitionCount); + largestUsedVersionNumber.map(addToUpdatedConfigs(updatedConfigs, LARGEST_USED_VERSION_NUMBER)) + .ifPresent(updatedStore::setLargestUsedVersionNumber); + bootstrapToOnlineTimeoutInHours.map(addToUpdatedConfigs(updatedConfigs, BOOTSTRAP_TO_ONLINE_TIMEOUT_IN_HOURS)) + .ifPresent(updatedStore::setBootstrapToOnlineTimeoutInHours); + storageQuotaInByte.map(addToUpdatedConfigs(updatedConfigs, STORAGE_QUOTA_IN_BYTE)) + .ifPresent(updatedStore::setStorageQuotaInByte); + readQuotaInCU.map(addToUpdatedConfigs(updatedConfigs, READ_QUOTA_IN_CU)).ifPresent(updatedStore::setReadQuotaInCU); + accessControlled.map(addToUpdatedConfigs(updatedConfigs, ACCESS_CONTROLLED)) + .ifPresent(updatedStore::setAccessControlled); + compressionStrategy.map(addToUpdatedConfigs(updatedConfigs, COMPRESSION_STRATEGY)) + .ifPresent(updatedStore::setCompressionStrategy); + clientDecompressionEnabled.map(addToUpdatedConfigs(updatedConfigs, CLIENT_DECOMPRESSION_ENABLED)) + .ifPresent(updatedStore::setClientDecompressionEnabled); + chunkingEnabled.map(addToUpdatedConfigs(updatedConfigs, CHUNKING_ENABLED)) + .ifPresent(updatedStore::setChunkingEnabled); + rmdChunkingEnabled.map(addToUpdatedConfigs(updatedConfigs, RMD_CHUNKING_ENABLED)) + .ifPresent(updatedStore::setRmdChunkingEnabled); + batchGetLimit.map(addToUpdatedConfigs(updatedConfigs, BATCH_GET_LIMIT)).ifPresent(updatedStore::setBatchGetLimit); + numVersionsToPreserve.map(addToUpdatedConfigs(updatedConfigs, NUM_VERSIONS_TO_PRESERVE)) + .ifPresent(updatedStore::setNumVersionsToPreserve); + replicationFactor.map(addToUpdatedConfigs(updatedConfigs, REPLICATION_FACTOR)) + .ifPresent(updatedStore::setReplicationFactor); + storeMigration.map(addToUpdatedConfigs(updatedConfigs, STORE_MIGRATION)).ifPresent(updatedStore::setMigrating); + migrationDuplicateStore.map(addToUpdatedConfigs(updatedConfigs, MIGRATION_DUPLICATE_STORE)) + .ifPresent(updatedStore::setMigrationDuplicateStore); + writeComputationEnabled.map(addToUpdatedConfigs(updatedConfigs, WRITE_COMPUTATION_ENABLED)) + .ifPresent(updatedStore::setWriteComputationEnabled); + replicationMetadataVersionID.map(addToUpdatedConfigs(updatedConfigs, REPLICATION_METADATA_PROTOCOL_VERSION_ID)) + .ifPresent(updatedStore::setRmdVersion); + readComputationEnabled.map(addToUpdatedConfigs(updatedConfigs, READ_COMPUTATION_ENABLED)) + .ifPresent(updatedStore::setReadComputationEnabled); + nativeReplicationEnabled.map(addToUpdatedConfigs(updatedConfigs, NATIVE_REPLICATION_ENABLED)) + .ifPresent(updatedStore::setNativeReplicationEnabled); + activeActiveReplicationEnabled.map(addToUpdatedConfigs(updatedConfigs, ACTIVE_ACTIVE_REPLICATION_ENABLED)) + .ifPresent(updatedStore::setActiveActiveReplicationEnabled); + pushStreamSourceAddress.map(addToUpdatedConfigs(updatedConfigs, PUSH_STREAM_SOURCE_ADDRESS)) + .ifPresent(updatedStore::setPushStreamSourceAddress); + backupStrategy.map(addToUpdatedConfigs(updatedConfigs, BACKUP_STRATEGY)).ifPresent(updatedStore::setBackupStrategy); + autoSchemaRegisterPushJobEnabled.map(addToUpdatedConfigs(updatedConfigs, AUTO_SCHEMA_REGISTER_FOR_PUSHJOB_ENABLED)) + .ifPresent(updatedStore::setSchemaAutoRegisterFromPushJobEnabled); + hybridStoreDiskQuotaEnabled.map(addToUpdatedConfigs(updatedConfigs, HYBRID_STORE_DISK_QUOTA_ENABLED)) + .ifPresent(updatedStore::setHybridStoreDiskQuotaEnabled); + backupVersionRetentionMs.map(addToUpdatedConfigs(updatedConfigs, BACKUP_VERSION_RETENTION_MS)) + .ifPresent(updatedStore::setBackupVersionRetentionMs); + nativeReplicationSourceFabric.map(addToUpdatedConfigs(updatedConfigs, NATIVE_REPLICATION_SOURCE_FABRIC)) + .ifPresent(updatedStore::setNativeReplicationSourceFabric); + latestSupersetSchemaId.map(addToUpdatedConfigs(updatedConfigs, LATEST_SUPERSET_SCHEMA_ID)) + .ifPresent(updatedStore::setLatestSuperSetValueSchemaId); + minCompactionLagSeconds.map(addToUpdatedConfigs(updatedConfigs, MIN_COMPACTION_LAG_SECONDS)) + .ifPresent(updatedStore::setMinCompactionLagSeconds); + maxCompactionLagSeconds.map(addToUpdatedConfigs(updatedConfigs, MAX_COMPACTION_LAG_SECONDS)) + .ifPresent(updatedStore::setMaxCompactionLagSeconds); + maxRecordSizeBytes.map(addToUpdatedConfigs(updatedConfigs, MAX_RECORD_SIZE_BYTES)) + .ifPresent(updatedStore::setMaxRecordSizeBytes); + unusedSchemaDeletionEnabled.map(addToUpdatedConfigs(updatedConfigs, UNUSED_SCHEMA_DELETION_ENABLED)) + .ifPresent(updatedStore::setUnusedSchemaDeletionEnabled); + blobTransferEnabled.map(addToUpdatedConfigs(updatedConfigs, BLOB_TRANSFER_ENABLED)) + .ifPresent(updatedStore::setBlobTransferEnabled); + storageNodeReadQuotaEnabled.map(addToUpdatedConfigs(updatedConfigs, STORAGE_NODE_READ_QUOTA_ENABLED)) + .ifPresent(updatedStore::setStorageNodeReadQuotaEnabled); + regularVersionETLEnabled.map(addToUpdatedConfigs(updatedConfigs, REGULAR_VERSION_ETL_ENABLED)) + .ifPresent(regularVersionETL -> { + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + if (etlStoreConfig == null) { + etlStoreConfig = new ETLStoreConfigImpl(); + } + etlStoreConfig.setRegularVersionETLEnabled(regularVersionETL); + updatedStore.setEtlStoreConfig(etlStoreConfig); + }); + futureVersionETLEnabled.map(addToUpdatedConfigs(updatedConfigs, FUTURE_VERSION_ETL_ENABLED)) + .ifPresent(futureVersionETL -> { + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + if (etlStoreConfig == null) { + etlStoreConfig = new ETLStoreConfigImpl(); + } + etlStoreConfig.setFutureVersionETLEnabled(futureVersionETL); + updatedStore.setEtlStoreConfig(etlStoreConfig); + }); + etledUserProxyAccount.map(addToUpdatedConfigs(updatedConfigs, ETLED_PROXY_USER_ACCOUNT)) + .ifPresent(etlProxyAccount -> { + ETLStoreConfig etlStoreConfig = updatedStore.getEtlStoreConfig(); + if (etlStoreConfig == null) { + etlStoreConfig = new ETLStoreConfigImpl(); + } + etlStoreConfig.setEtledUserProxyAccount(etlProxyAccount); + updatedStore.setEtlStoreConfig(etlStoreConfig); + }); + incrementalPushEnabled.map(addToUpdatedConfigs(updatedConfigs, INCREMENTAL_PUSH_ENABLED)) + .ifPresent(updatedStore::setIncrementalPushEnabled); + + // No matter what, set native replication to enabled in multi-region mode if the store currently doesn't enable it, + // and it is not explicitly asked to be updated + if (multiClusterConfigs.isMultiRegion() && !originalStore.isNativeReplicationEnabled()) { + updateInferredConfig( + admin, + updatedStore, + NATIVE_REPLICATION_ENABLED, + updatedConfigs, + () -> updatedStore.setNativeReplicationEnabled(true)); + } + + PartitionerConfig newPartitionerConfig = mergeNewSettingsIntoOldPartitionerConfig( + originalStore, + partitionerClass, + partitionerParams, + amplificationFactor); + + if (newPartitionerConfig != originalStore.getPartitionerConfig()) { + partitionerClass.ifPresent(p -> updatedConfigs.add(PARTITIONER_CLASS)); + partitionerParams.ifPresent(p -> updatedConfigs.add(PARTITIONER_PARAMS)); + amplificationFactor.ifPresent(p -> updatedConfigs.add(AMPLIFICATION_FACTOR)); + updatedStore.setPartitionerConfig(newPartitionerConfig); + } + + if (currentVersion.isPresent()) { + if (checkRegionFilter && admin.isParent() && !onlyParentRegionFilter) { + LOGGER.warn( + "Skipping current version update in parent region for store: {} in cluster: {}", + storeName, + clusterName); + } else { + updatedConfigs.add(VERSION); + updatedStore.setCurrentVersion(currentVersion.get()); + } + } + + HelixVeniceClusterResources resources = admin.getHelixVeniceClusterResources(clusterName); + VeniceControllerClusterConfig clusterConfig = resources.getConfig(); + + HybridStoreConfig originalHybridStoreConfig = originalStore.getHybridStoreConfig(); + HybridStoreConfig newHybridStoreConfigTemp = mergeNewSettingsIntoOldHybridStoreConfig( + originalStore, + hybridRewindSeconds, + hybridOffsetLagThreshold, + hybridTimeLagThreshold, + hybridDataReplicationPolicy, + hybridBufferReplayPolicy); + + HybridStoreConfig newHybridStoreConfig; + // Incremental push was enabled, but hybrid config hasn't changed. Set default hybrid configs + if (!AdminUtils.isHybrid(newHybridStoreConfigTemp) && !originalStore.isIncrementalPushEnabled() + && updatedStore.isIncrementalPushEnabled()) { + newHybridStoreConfig = new HybridStoreConfigImpl( + DEFAULT_REWIND_TIME_IN_SECONDS, + DEFAULT_HYBRID_OFFSET_LAG_THRESHOLD, + DEFAULT_HYBRID_TIME_LAG_THRESHOLD, + DataReplicationPolicy.NONE, + BufferReplayPolicy.REWIND_FROM_EOP); + } else { + newHybridStoreConfig = newHybridStoreConfigTemp; + } + + if (!AdminUtils.isHybrid(newHybridStoreConfig) && AdminUtils.isHybrid(originalHybridStoreConfig)) { + /** + * If all the hybrid config values are negative, it indicates that the store is being set back to batch-only store. + * We cannot remove the RT topic immediately because with NR and AA, existing current version is + * still consuming the RT topic. + */ + updatedStore.setHybridStoreConfig(null); + + updatedConfigs.add(REWIND_TIME_IN_SECONDS); + updatedConfigs.add(OFFSET_LAG_TO_GO_ONLINE); + updatedConfigs.add(TIME_LAG_TO_GO_ONLINE); + updatedConfigs.add(DATA_REPLICATION_POLICY); + updatedConfigs.add(BUFFER_REPLAY_POLICY); + + updateInferredConfigsForHybridToBatch(admin, clusterConfig, updatedStore, updatedConfigs); + } else if (AdminUtils.isHybrid(newHybridStoreConfig)) { + if (!originalStore.isHybrid()) { + updateInferredConfigsForBatchToHybrid(admin, clusterConfig, updatedStore, updatedConfigs); + } + + // Store is being made Active-Active + if (updatedStore.isActiveActiveReplicationEnabled() && !originalStore.isActiveActiveReplicationEnabled()) { + // If a data-replication policy has not been defined, set a default one. + if (!hybridDataReplicationPolicy.isPresent()) { + updateInferredConfig(admin, updatedStore, DATA_REPLICATION_POLICY, updatedConfigs, () -> { + LOGGER.info( + "Data replication policy was not explicitly set when converting store to Active-Active store: {}." + + " Setting it to active-active replication policy.", + storeName); + newHybridStoreConfig.setDataReplicationPolicy(DataReplicationPolicy.ACTIVE_ACTIVE); + }); + } + + // If configs are set to enable incremental push for hybrid Active-Active users store, enable it + if (clusterConfig.enabledIncrementalPushForHybridActiveActiveUserStores()) { + updateInferredConfig( + admin, + updatedStore, + INCREMENTAL_PUSH_ENABLED, + updatedConfigs, + () -> updatedStore.setIncrementalPushEnabled( + isIncrementalPushEnabled(clusterConfig.isMultiRegion(), newHybridStoreConfig))); + } + } + + if (AdminUtils.isHybrid(originalHybridStoreConfig)) { + if (originalHybridStoreConfig.getRewindTimeInSeconds() != newHybridStoreConfig.getRewindTimeInSeconds()) { + updatedConfigs.add(REWIND_TIME_IN_SECONDS); + } + + if (originalHybridStoreConfig.getOffsetLagThresholdToGoOnline() != newHybridStoreConfig + .getOffsetLagThresholdToGoOnline()) { + updatedConfigs.add(OFFSET_LAG_TO_GO_ONLINE); + } + + if (originalHybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() != newHybridStoreConfig + .getProducerTimestampLagThresholdToGoOnlineInSeconds()) { + updatedConfigs.add(TIME_LAG_TO_GO_ONLINE); + } + + if (originalHybridStoreConfig.getDataReplicationPolicy() != newHybridStoreConfig.getDataReplicationPolicy()) { + updatedConfigs.add(DATA_REPLICATION_POLICY); + } + + if (originalHybridStoreConfig.getBufferReplayPolicy() != newHybridStoreConfig.getBufferReplayPolicy()) { + updatedConfigs.add(BUFFER_REPLAY_POLICY); + } + } else { + updatedConfigs.add(REWIND_TIME_IN_SECONDS); + updatedConfigs.add(OFFSET_LAG_TO_GO_ONLINE); + updatedConfigs.add(TIME_LAG_TO_GO_ONLINE); + updatedConfigs.add(DATA_REPLICATION_POLICY); + updatedConfigs.add(BUFFER_REPLAY_POLICY); + } + + updatedStore.setHybridStoreConfig(newHybridStoreConfig); + } + + if (!updatedStore.isChunkingEnabled() && updatedStore.isWriteComputationEnabled()) { + updateInferredConfig(admin, updatedStore, CHUNKING_ENABLED, updatedConfigs, () -> { + LOGGER.info("Enabling chunking because write compute is enabled for store: " + storeName); + updatedStore.setChunkingEnabled(true); + }); + } + + if (!updatedStore.isRmdChunkingEnabled() && updatedStore.isWriteComputationEnabled()) { + updateInferredConfig(admin, updatedStore, RMD_CHUNKING_ENABLED, updatedConfigs, () -> { + LOGGER.info("Enabling RMD chunking because write compute is enabled for Active/Active store: " + storeName); + updatedStore.setRmdChunkingEnabled(true); + }); + } + + if (!updatedStore.isRmdChunkingEnabled() && updatedStore.isActiveActiveReplicationEnabled()) { + updateInferredConfig(admin, updatedStore, RMD_CHUNKING_ENABLED, updatedConfigs, () -> { + LOGGER.info("Enabling RMD chunking because Active/Active is enabled for store: " + storeName); + updatedStore.setRmdChunkingEnabled(true); + }); + } + + if (params.disableMetaStore().isPresent() && params.disableMetaStore().get()) { + LOGGER.info("Disabling meta system store for store: {} of cluster: {}", storeName, clusterName); + updatedConfigs.add(DISABLE_META_STORE); + updatedStore.setStoreMetaSystemStoreEnabled(false); + updatedStore.setStoreMetadataSystemStoreEnabled(false); + } + + if (params.disableDavinciPushStatusStore().isPresent() && params.disableDavinciPushStatusStore().get()) { + updatedConfigs.add(DISABLE_DAVINCI_PUSH_STATUS_STORE); + LOGGER.info("Disabling davinci push status store for store: {} of cluster: {}", storeName, clusterName); + updatedStore.setDaVinciPushStatusStoreEnabled(false); + } + + if (storeViewConfig.isPresent() && viewName.isPresent()) { + throw new VeniceException("Cannot update a store view and overwrite store view setup together!"); + } + + if (viewName.isPresent()) { + Map updatedViewSettings; + if (!removeView.isPresent()) { + if (!viewClassName.isPresent()) { + throw new VeniceException("View class name is required when configuring a view."); + } + // If View parameter is not provided, use emtpy map instead. It does not inherit from existing config. + ViewConfig viewConfig = new ViewConfigImpl(viewClassName.get(), viewParams.orElse(Collections.emptyMap())); + validateStoreViewConfig(originalStore, viewConfig); + updatedViewSettings = addNewViewConfigsIntoOldConfigs(originalStore, viewName.get(), viewConfig); + } else { + updatedViewSettings = removeViewConfigFromStoreViewConfigMap(originalStore, viewName.get()); + } + updatedStore.setViewConfigs(updatedViewSettings); + updatedConfigs.add(STORE_VIEW); + } + + if (storeViewConfig.isPresent()) { + // Validate and overwrite store views if they're getting set + validateStoreViewConfigs(storeViewConfig.get(), updatedStore); + updatedStore.setViewConfigs(StoreViewUtils.convertStringMapViewToViewConfigMap(storeViewConfig.get())); + updatedConfigs.add(STORE_VIEW); + } + + if (personaName.isPresent()) { + updatedConfigs.add(PERSONA_NAME); + } + + validateStoreConfigs(admin, clusterName, updatedStore); + validateStoreUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + validatePersona(admin, clusterName, updatedStore, personaName); + + return updateStoreWrapper; + } + + private static Function addToUpdatedConfigs(Set updatedConfigs, String config) { + return (configValue) -> { + updatedConfigs.add(config); + return configValue; + }; + } + + static void updateInferredConfig( + Admin admin, + Store store, + String configName, + Set updatedConfigs, + Runnable updater) { + if (!isInferredStoreUpdateAllowed(admin, store.getName())) { + return; + } + + if (!updatedConfigs.contains(configName)) { + updater.run(); + updatedConfigs.add(configName); + } + } + + static void updateInferredConfigsForHybridToBatch( + Admin admin, + VeniceControllerClusterConfig clusterConfig, + Store updatedStore, + Set updatedConfigs) { + updateInferredConfig( + admin, + updatedStore, + INCREMENTAL_PUSH_ENABLED, + updatedConfigs, + () -> updatedStore.setIncrementalPushEnabled(false)); + updateInferredConfig( + admin, + updatedStore, + NATIVE_REPLICATION_SOURCE_FABRIC, + updatedConfigs, + () -> updatedStore + .setNativeReplicationSourceFabric(clusterConfig.getNativeReplicationSourceFabricAsDefaultForBatchOnly())); + updateInferredConfig( + admin, + updatedStore, + ACTIVE_ACTIVE_REPLICATION_ENABLED, + updatedConfigs, + () -> updatedStore.setActiveActiveReplicationEnabled(false)); + } + + static void updateInferredConfigsForBatchToHybrid( + Admin admin, + VeniceControllerClusterConfig clusterConfig, + Store updatedStore, + Set updatedConfigs) { + String clusterName = clusterConfig.getClusterName(); + String storeName = updatedStore.getName(); + + updateInferredConfig( + admin, + updatedStore, + NATIVE_REPLICATION_SOURCE_FABRIC, + updatedConfigs, + () -> updatedStore + .setNativeReplicationSourceFabric(clusterConfig.getNativeReplicationSourceFabricAsDefaultForHybrid())); + + /* + * Enable/disable active-active replication for user hybrid stores if the cluster level config + * for new hybrid stores is on. + */ + updateInferredConfig( + admin, + updatedStore, + ACTIVE_ACTIVE_REPLICATION_ENABLED, + updatedConfigs, + () -> updatedStore.setActiveActiveReplicationEnabled( + updatedStore.isActiveActiveReplicationEnabled() + || (clusterConfig.isActiveActiveReplicationEnabledAsDefaultForHybrid() + && !updatedStore.isSystemStore()))); + + if (updatedStore.getPartitionCount() == 0) { + updateInferredConfig(admin, updatedStore, PARTITION_COUNT, updatedConfigs, () -> { + int updatedPartitionCount = PartitionUtils.calculatePartitionCount( + storeName, + updatedStore.getStorageQuotaInByte(), + 0, + clusterConfig.getPartitionSize(), + clusterConfig.getMinNumberOfPartitionsForHybrid(), + clusterConfig.getMaxNumberOfPartitions(), + clusterConfig.isPartitionCountRoundUpEnabled(), + clusterConfig.getPartitionCountRoundUpSize()); + updatedStore.setPartitionCount(updatedPartitionCount); + LOGGER.info( + "Enforcing default hybrid partition count: {} for a new hybrid store: {}", + updatedPartitionCount, + storeName); + }); + } + + /** + * If a store: + * (1) Is being converted to hybrid; + * (2) Is not partial update enabled for now; + * (3) Does not request to change partial update config; + * It means partial update is not enabled, and there is no explict intention to change it. In this case, we will + * check cluster default config based on the replication policy to determine whether to try to enable partial update. + */ + final boolean shouldEnablePartialUpdateBasedOnClusterConfig = (updatedStore.isActiveActiveReplicationEnabled() + ? clusterConfig.isEnablePartialUpdateForHybridActiveActiveUserStores() + : clusterConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()); + if (shouldEnablePartialUpdateBasedOnClusterConfig) { + LOGGER.info("Controller will enable partial update based on cluster config for store: " + storeName); + /** + * When trying to turn on partial update based on cluster config, if schema generation failed, we will not fail the + * whole request, but just do NOT turn on partial update, as other config update should still be respected. + */ + try { + PrimaryControllerConfigUpdateUtils.addUpdateSchemaForStore(admin, clusterName, updatedStore.getName(), true); + updateInferredConfig(admin, updatedStore, WRITE_COMPUTATION_ENABLED, updatedConfigs, () -> { + updatedStore.setWriteComputationEnabled(true); + }); + } catch (Exception e) { + LOGGER.warn( + "Caught exception when trying to enable partial update base on cluster config, will not enable partial update for store: " + + storeName, + e); + } + } + } + + /** + * Check if a store can support incremental pushes based on other configs. The following rules define when incremental + * push is allowed: + *

    + *
  1. If the system is running in single-region mode, the store must by hybrid
  2. + *
  3. If the system is running in multi-region mode,
  4. + *
      + *
    1. Hybrid + Active-Active + {@link DataReplicationPolicy} is {@link DataReplicationPolicy#ACTIVE_ACTIVE}
    2. + *
    3. Hybrid + {@link DataReplicationPolicy} is {@link DataReplicationPolicy#AGGREGATE}
    4. + *
    5. Hybrid + {@link DataReplicationPolicy} is {@link DataReplicationPolicy#NONE}
    6. + *
    + *
      + * @param multiRegion whether the system is running in multi-region mode + * @param hybridStoreConfig The hybrid store config after applying all updates + * @return {@code true} if incremental push is allowed, {@code false} otherwise + */ + static boolean isIncrementalPushEnabled(boolean multiRegion, HybridStoreConfig hybridStoreConfig) { + // Only hybrid stores can support incremental push + if (!AdminUtils.isHybrid(hybridStoreConfig)) { + return false; + } + + // If the system is running in multi-region mode, we need to validate the data replication policies + if (!multiRegion) { + return true; + } + + DataReplicationPolicy dataReplicationPolicy = hybridStoreConfig.getDataReplicationPolicy(); + return dataReplicationPolicy == DataReplicationPolicy.ACTIVE_ACTIVE + || dataReplicationPolicy == DataReplicationPolicy.AGGREGATE + || dataReplicationPolicy == DataReplicationPolicy.NONE; + } + + /** + * Validate if the specified store is in a valid state or not + * Examples of such checks are: + *
        + *
      • Write compute on batch-only store
      • + *
      • Incremental push with NON_AGGREGATE DRP in multi-region mode
      • + *
      + */ + static void validateStoreConfigs(Admin admin, String clusterName, Store store) { + String storeName = store.getName(); + String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; + + VeniceControllerClusterConfig controllerConfig = admin.getMultiClusterConfigs().getControllerConfig(clusterName); + + if (!store.isHybrid()) { + // Inc push + non hybrid not supported + if (store.isIncrementalPushEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Incremental push is only supported for hybrid stores", + ErrorType.INVALID_CONFIG); + } + + // WC is only supported for hybrid stores + if (store.isWriteComputationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Write computation is only supported for hybrid stores", + ErrorType.INVALID_CONFIG); + } + + // AA is only supported for hybrid stores + if (store.isActiveActiveReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Active-Active Replication is only supported for hybrid stores", + ErrorType.INVALID_CONFIG); + } + } else { + HybridStoreConfig hybridStoreConfig = store.getHybridStoreConfig(); + // All fields of hybrid store config must have valid values + if (hybridStoreConfig.getRewindTimeInSeconds() < 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Rewind time cannot be negative for a hybrid store", + ErrorType.INVALID_CONFIG); + } + + if (hybridStoreConfig.getOffsetLagThresholdToGoOnline() < 0 + && hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds() < 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + + "Both offset lag threshold and producer timestamp lag threshold cannot be negative for a hybrid store", + ErrorType.INVALID_CONFIG); + } + + DataReplicationPolicy dataReplicationPolicy = hybridStoreConfig.getDataReplicationPolicy(); + // Incremental push + NON_AGGREGATE DRP is not supported in multi-region mode + if (controllerConfig.isMultiRegion() && store.isIncrementalPushEnabled() + && dataReplicationPolicy == DataReplicationPolicy.NON_AGGREGATE) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + + "Incremental push is not supported for hybrid stores with non-aggregate data replication policy", + ErrorType.INVALID_CONFIG); + } + + // ACTIVE_ACTIVE DRP is only supported when activeActiveReplicationEnabled = true + if (dataReplicationPolicy == DataReplicationPolicy.ACTIVE_ACTIVE && !store.isActiveActiveReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + + "Data replication policy ACTIVE_ACTIVE is only supported for hybrid stores with active-active replication enabled", + ErrorType.INVALID_CONFIG); + } + } + + // Storage quota can not be less than 0 + if (store.getStorageQuotaInByte() < 0 && store.getStorageQuotaInByte() != Store.UNLIMITED_STORAGE_QUOTA) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Storage quota can not be less than 0", + ErrorType.INVALID_CONFIG); + } + + // Read quota can not be less than 0 + if (store.getReadQuotaInCU() < 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Read quota can not be less than 0", + ErrorType.INVALID_CONFIG); + } + + if (!admin.isParent()) { + HelixVeniceClusterResources resources = admin.getHelixVeniceClusterResources(clusterName); + ZkRoutersClusterManager routersClusterManager = resources.getRoutersClusterManager(); + int routerCount = routersClusterManager.getLiveRoutersCount(); + int defaultReadQuotaPerRouter = controllerConfig.getDefaultReadQuotaPerRouter(); + + long clusterReadQuota = Math.max(defaultReadQuotaPerRouter, routerCount * defaultReadQuotaPerRouter); + if (store.getReadQuotaInCU() > clusterReadQuota) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Read quota can not be more than the cluster quota (" + clusterReadQuota + ")", + ErrorType.INVALID_CONFIG); + } + } + + // Active-active replication is only supported for stores that also have native replication + if (store.isActiveActiveReplicationEnabled() && !store.isNativeReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Active/Active Replication cannot be enabled for store " + store.getName() + + " since Native Replication is not enabled on it.", + ErrorType.INVALID_CONFIG); + } + + PartitionerConfig partitionerConfig = store.getPartitionerConfig(); + if (partitionerConfig == null) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Partitioner Config cannot be null", + ErrorType.INVALID_CONFIG); + } + + // Active-Active and write-compute are not supported when amplification factor is more than 1 + if (partitionerConfig.getAmplificationFactor() > 1) { + if (store.isActiveActiveReplicationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Active-active replication is not supported for stores with amplification factor > 1", + ErrorType.INVALID_CONFIG); + } + + if (store.isWriteComputationEnabled()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + errorMessagePrefix + "Write computation is not supported for stores with amplification factor > 1", + ErrorType.INVALID_CONFIG); + } + } + + // Before setting partitioner config, verify the updated partitionerConfig can be built + try { + Properties partitionerParams = new Properties(); + for (Map.Entry param: partitionerConfig.getPartitionerParams().entrySet()) { + partitionerParams.setProperty(param.getKey(), param.getValue()); + } + + PartitionUtils.getVenicePartitioner( + partitionerConfig.getPartitionerClass(), + new VeniceProperties(partitionerParams), + admin.getKeySchema(clusterName, storeName).getSchema()); + } catch (PartitionerSchemaMismatchException e) { + String errorMessage = errorMessagePrefix + e.getMessage(); + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_SCHEMA); + } catch (Exception e) { + String errorMessage = errorMessagePrefix + "Partitioner Configs are invalid, please verify that partitioner " + + "configs like classpath and parameters are correct!"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + // Validate if the latest superset schema id is an existing value schema + int latestSupersetSchemaId = store.getLatestSuperSetValueSchemaId(); + if (latestSupersetSchemaId != SchemaData.INVALID_VALUE_SCHEMA_ID) { + if (admin.getValueSchema(clusterName, storeName, latestSupersetSchemaId) == null) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Unknown value schema id: " + latestSupersetSchemaId + " in store: " + storeName, + ErrorType.INVALID_CONFIG); + } + } + + if (store.getMaxCompactionLagSeconds() < store.getMinCompactionLagSeconds()) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Store's max compaction lag seconds: " + store.getMaxCompactionLagSeconds() + " shouldn't be smaller than " + + "store's min compaction lag seconds: " + store.getMinCompactionLagSeconds(), + ErrorType.INVALID_CONFIG); + } + + ETLStoreConfig etlStoreConfig = store.getEtlStoreConfig(); + if (etlStoreConfig != null + && (etlStoreConfig.isRegularVersionETLEnabled() || etlStoreConfig.isFutureVersionETLEnabled())) { + if (StringUtils.isEmpty(etlStoreConfig.getEtledUserProxyAccount())) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Cannot enable ETL for this store because etled user proxy account is not set", + ErrorType.INVALID_CONFIG); + } + } + } + + /** + * Validate the config changes by looking at the store configs before and after applying the requested updates. + * Examples of such checks are: + *
        + *
      • Partition count
      • + *
      • Store partitioner
      • + *
      • If partial update gets enabled, then the schema must be one that can support it
      • + *
      + */ + private static void validateStoreUpdate( + Admin admin, + VeniceControllerMultiClusterConfig multiClusterConfig, + String clusterName, + Store originalStore, + Store updatedStore) { + validateStorePartitionCountUpdate(admin, multiClusterConfig, clusterName, originalStore, updatedStore); + validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + if (updatedStore.isWriteComputationEnabled() && !originalStore.isWriteComputationEnabled()) { + // Dry-run generating update schemas before sending admin messages to enable partial update because + // update schema generation may fail due to some reasons. If that happens, abort the store update process. + PrimaryControllerConfigUpdateUtils.addUpdateSchemaForStore(admin, clusterName, originalStore.getName(), true); + } + } + + private static void validateStoreViewConfigs(Map stringMap, Store store) { + Map configs = StoreViewUtils.convertStringMapViewToViewConfigMap(stringMap); + for (Map.Entry viewConfigEntry: configs.entrySet()) { + validateStoreViewConfig(store, viewConfigEntry.getValue()); + } + } + + private static void validateStoreViewConfig(Store store, ViewConfig viewConfig) { + // TODO: Pass a proper properties object here. Today this isn't used in this context + VeniceView view = + ViewUtils.getVeniceView(viewConfig.getViewClassName(), new Properties(), store, viewConfig.getViewParameters()); + view.validateConfigs(); + } + + /** + * Used by both the {@link VeniceHelixAdmin} and the {@link VeniceParentHelixAdmin} + * + * @param oldStore Existing Store that is the source for updates. This object will not be modified by this method. + * @param hybridRewindSeconds Optional is present if the returned object should include a new rewind time + * @param hybridOffsetLagThreshold Optional is present if the returned object should include a new offset lag threshold + * @param hybridTimeLagThreshold + * @param hybridDataReplicationPolicy + * @param bufferReplayPolicy + * @return null if oldStore has no hybrid configs and optionals are not present, + * otherwise a fully specified {@link HybridStoreConfig} + */ + static HybridStoreConfig mergeNewSettingsIntoOldHybridStoreConfig( + Store oldStore, + Optional hybridRewindSeconds, + Optional hybridOffsetLagThreshold, + Optional hybridTimeLagThreshold, + Optional hybridDataReplicationPolicy, + Optional bufferReplayPolicy) { + HybridStoreConfig mergedHybridStoreConfig; + if (oldStore.isHybrid()) { // for an existing hybrid store, just replace any specified values + HybridStoreConfig oldHybridConfig = oldStore.getHybridStoreConfig().clone(); + mergedHybridStoreConfig = new HybridStoreConfigImpl( + hybridRewindSeconds.orElseGet(oldHybridConfig::getRewindTimeInSeconds), + hybridOffsetLagThreshold.orElseGet(oldHybridConfig::getOffsetLagThresholdToGoOnline), + hybridTimeLagThreshold.orElseGet(oldHybridConfig::getProducerTimestampLagThresholdToGoOnlineInSeconds), + hybridDataReplicationPolicy.orElseGet(oldHybridConfig::getDataReplicationPolicy), + bufferReplayPolicy.orElseGet(oldHybridConfig::getBufferReplayPolicy)); + } else { + mergedHybridStoreConfig = new HybridStoreConfigImpl( + hybridRewindSeconds.orElse(-1L), + // If not specified, offset/time lag threshold will be -1 and will not be used to determine whether + // a partition is ready to serve + hybridOffsetLagThreshold.orElse(-1L), + hybridTimeLagThreshold.orElse(-1L), + hybridDataReplicationPolicy.orElse(DataReplicationPolicy.NON_AGGREGATE), + bufferReplayPolicy.orElse(BufferReplayPolicy.REWIND_FROM_EOP)); + } + + if (!AdminUtils.isHybrid(mergedHybridStoreConfig)) { + return null; + } + + return mergedHybridStoreConfig; + } + + public static void validateStorePartitionCountUpdate( + Admin admin, + VeniceControllerMultiClusterConfig multiClusterConfigs, + String clusterName, + Store originalStore, + int newPartitionCount) { + Store updatedStore = originalStore.cloneStore(); + updatedStore.setPartitionCount(newPartitionCount); + validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + } + + static void validateStorePartitionCountUpdate( + Admin admin, + VeniceControllerMultiClusterConfig multiClusterConfigs, + String clusterName, + Store originalStore, + Store updatedStore) { + String storeName = originalStore.getName(); + String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; + VeniceControllerClusterConfig clusterConfig = admin.getHelixVeniceClusterResources(clusterName).getConfig(); + + int newPartitionCount = updatedStore.getPartitionCount(); + if (newPartitionCount < 0) { + String errorMessage = errorMessagePrefix + "Partition count: " + newPartitionCount + " should NOT be negative"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (updatedStore.isHybrid() && newPartitionCount == 0) { + String errorMessage = errorMessagePrefix + "Partition count cannot be 0 for hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (originalStore.isHybrid() && updatedStore.isHybrid() && originalStore.getPartitionCount() != newPartitionCount) { + String errorMessage = errorMessagePrefix + "Cannot change partition count for this hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + int minPartitionNum = clusterConfig.getMinNumberOfPartitions(); + if (newPartitionCount < minPartitionNum && newPartitionCount != 0) { + throw new VeniceHttpException( + HttpStatus.SC_BAD_REQUEST, + "Partition count must be at least " + minPartitionNum + " for store: " + storeName + + ". If a specific partition count is not required, set it to 0.", + ErrorType.INVALID_CONFIG); + } + + int maxPartitionNum = clusterConfig.getMaxNumberOfPartitions(); + if (newPartitionCount > maxPartitionNum) { + String errorMessage = + errorMessagePrefix + "Partition count: " + newPartitionCount + " should be less than max: " + maxPartitionNum; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (updatedStore.isHybrid()) { + // Allow the update if the new partition count matches RT partition count + TopicManager topicManager; + if (admin.isParent()) { + // RT might not exist in parent colo. Get RT partition count from a child colo. + String childDatacenter = clusterConfig.getChildDatacenters().iterator().next(); + topicManager = admin.getTopicManager(multiClusterConfigs.getChildDataCenterKafkaUrlMap().get(childDatacenter)); + } else { + topicManager = admin.getTopicManager(); + } + PubSubTopic realTimeTopic = admin.getPubSubTopicRepository().getTopic(Version.composeRealTimeTopic(storeName)); + if (!topicManager.containsTopic(realTimeTopic) + || topicManager.getPartitionCount(realTimeTopic) == newPartitionCount) { + LOGGER.info("Allow updating store " + storeName + " partition count to " + newPartitionCount); + return; + } + String errorMessage = errorMessagePrefix + "Cannot change partition count for this hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + } + + static void validateStorePartitionerUpdate(String clusterName, Store existingStore, Store updatedStore) { + String storeName = existingStore.getName(); + String errorMessagePrefix = "Store update error for " + storeName + " in cluster: " + clusterName + ": "; + + if (!existingStore.isHybrid() || !updatedStore.isHybrid()) { + // Allow partitioner changes for non-hybrid stores + return; + } + + PartitionerConfig existingPartitionerConfig = existingStore.getPartitionerConfig(); + PartitionerConfig updatedPartitionerConfig = updatedStore.getPartitionerConfig(); + + if (!existingPartitionerConfig.getPartitionerClass().equals(updatedPartitionerConfig.getPartitionerClass())) { + String errorMessage = errorMessagePrefix + "Partitioner class cannot be changed for hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + + if (!existingPartitionerConfig.getPartitionerParams().equals(updatedPartitionerConfig.getPartitionerParams())) { + String errorMessage = errorMessagePrefix + "Partitioner params cannot be changed for hybrid store"; + LOGGER.error(errorMessage); + throw new VeniceHttpException(HttpStatus.SC_BAD_REQUEST, errorMessage, ErrorType.INVALID_CONFIG); + } + } + + static void validatePersona(Admin admin, String clusterName, Store updatedStore, Optional personaName) { + String storeName = updatedStore.getName(); + StoragePersonaRepository repository = + admin.getHelixVeniceClusterResources(clusterName).getStoragePersonaRepository(); + StoragePersona personaToValidate = null; + StoragePersona existingPersona = repository.getPersonaContainingStore(storeName); + + if (personaName.isPresent()) { + personaToValidate = admin.getStoragePersona(clusterName, personaName.get()); + if (personaToValidate == null) { + String errMsg = "UpdateStore command failed for store " + storeName + ". The provided StoragePersona " + + personaName.get() + " does not exist."; + throw new VeniceException(errMsg); + } + } else if (existingPersona != null) { + personaToValidate = existingPersona; + } + + if (personaToValidate != null) { + repository.validateAddUpdatedStore(personaToValidate, Optional.of(updatedStore)); + } + } + + static PartitionerConfig mergeNewSettingsIntoOldPartitionerConfig( + Store oldStore, + Optional partitionerClass, + Optional> partitionerParams, + Optional amplificationFactor) { + + if (!partitionerClass.isPresent() && !partitionerParams.isPresent() && !amplificationFactor.isPresent()) { + return oldStore.getPartitionerConfig(); + } + + PartitionerConfig originalPartitionerConfig; + if (oldStore.getPartitionerConfig() == null) { + originalPartitionerConfig = new PartitionerConfigImpl(); + } else { + originalPartitionerConfig = oldStore.getPartitionerConfig(); + } + return new PartitionerConfigImpl( + partitionerClass.orElse(originalPartitionerConfig.getPartitionerClass()), + partitionerParams.orElse(originalPartitionerConfig.getPartitionerParams()), + amplificationFactor.orElse(originalPartitionerConfig.getAmplificationFactor())); + } + + static Map addNewViewConfigsIntoOldConfigs( + Store oldStore, + String viewClass, + ViewConfig viewConfig) throws VeniceException { + // Add new view config into the existing config map. The new configs will override existing ones which share the + // same key. + Map oldViewConfigMap = oldStore.getViewConfigs(); + if (oldViewConfigMap == null) { + oldViewConfigMap = new HashMap<>(); + } + Map mergedConfigs = new HashMap<>(oldViewConfigMap); + mergedConfigs.put(viewClass, viewConfig); + return mergedConfigs; + } + + static Map removeViewConfigFromStoreViewConfigMap(Store oldStore, String viewClass) + throws VeniceException { + Map oldViewConfigMap = oldStore.getViewConfigs(); + if (oldViewConfigMap == null) { + // TODO: We might want to return a null instead of empty map + oldViewConfigMap = new HashMap<>(); + } + Map mergedConfigs = new HashMap<>(oldViewConfigMap); + mergedConfigs.remove(viewClass); + return mergedConfigs; + } + + /** + * This function is the entry-point of all operations that are necessary after the successful execution of the store + * update. These should only be executed in the primary controller. + * @param admin The main {@link Admin} object for this component + * @param clusterName The name of the cluster where the store is being updated + * @param storeName The name of the store that was updated + */ + public static void handlePostUpdateActions(Admin admin, String clusterName, String storeName) { + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(admin, clusterName, storeName); + } + + /** + * Check if direct store config updates are allowed in this controller. In multi-region mode, parent controller + * decides what store configs get applied to a store. In a single-region mode, the child controller makes this + * decision. + * In a multi-region mode, the child controller must not do any inferencing and must only apply the configs that were + * applied by the parent controller, except for child-region-only stores - i.e. participant store. + */ + static boolean isInferredStoreUpdateAllowed(Admin admin, String storeName) { + // For system stores, do not allow any inferencing + if (VeniceSystemStoreUtils.isSystemStore(storeName)) { + return false; + } + + if (!admin.isPrimary()) { + return false; + } + + // Parent controller can only apply the updates if it is processing updates in VeniceParentHelixAdmin (i.e. not via + // the Admin channel) + return !admin.isParent() || admin instanceof VeniceParentHelixAdmin; + } +} diff --git a/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java new file mode 100644 index 00000000000..486b17a295d --- /dev/null +++ b/services/venice-controller/src/main/java/com/linkedin/venice/controller/util/UpdateStoreWrapper.java @@ -0,0 +1,18 @@ +package com.linkedin.venice.controller.util; + +import com.linkedin.venice.meta.Store; +import java.util.HashSet; +import java.util.Set; + + +public class UpdateStoreWrapper { + public final Set updatedConfigs; + public final Store originalStore; + public final Store updatedStore; + + public UpdateStoreWrapper(Store originalStore) { + this.originalStore = originalStore; + this.updatedConfigs = new HashSet<>(); + this.updatedStore = originalStore.cloneStore(); + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java index 2982ef97f98..fc00f903ccf 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/AbstractTestVeniceParentHelixAdmin.java @@ -4,7 +4,6 @@ import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import com.linkedin.venice.authorization.AuthorizerService; import com.linkedin.venice.authorization.DefaultIdentityParser; @@ -23,7 +22,6 @@ import com.linkedin.venice.helix.StoragePersonaRepository; import com.linkedin.venice.helix.ZkRoutersClusterManager; import com.linkedin.venice.helix.ZkStoreConfigAccessor; -import com.linkedin.venice.meta.HybridStoreConfig; import com.linkedin.venice.meta.OfflinePushStrategy; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.StoreInfo; @@ -88,7 +86,6 @@ public void setupInternalMocks() { doReturn(true).when(topicManager).containsTopicAndAllPartitionsAreOnline(pubSubTopicRepository.getTopic(topicName)); internalAdmin = mock(VeniceHelixAdmin.class); - when(internalAdmin.isHybrid((HybridStoreConfig) any())).thenCallRealMethod(); doReturn(topicManager).when(internalAdmin).getTopicManager(); SchemaEntry mockEntry = new SchemaEntry(0, TEST_SCHEMA); doReturn(mockEntry).when(internalAdmin).getKeySchema(anyString(), anyString()); @@ -127,6 +124,8 @@ public void setupInternalMocks() { .put(regionName, ControllerClient.constructClusterControllerClient(clusterName, "localhost", Optional.empty())); doReturn(controllerClients).when(internalAdmin).getControllerClientMap(any()); + doReturn(true).when(internalAdmin).isPrimary(); + resources = mockResources(config, clusterName); doReturn(storeRepository).when(resources).getStoreMetadataRepository(); ZkRoutersClusterManager manager = mock(ZkRoutersClusterManager.class); @@ -140,6 +139,8 @@ public void setupInternalMocks() { clusterLockManager = mock(ClusterLockManager.class); doReturn(clusterLockManager).when(resources).getClusterLockManager(); + doReturn(1000).when(config).getDefaultReadQuotaPerRouter(); + adminStats = mock(VeniceAdminStats.class); doReturn(adminStats).when(resources).getVeniceAdminStats(); @@ -204,6 +205,10 @@ VeniceControllerClusterConfig mockConfig(String clusterName) { doReturn(childClusterMap).when(config).getChildDataCenterControllerUrlMap(); doReturn(MAX_PARTITION_NUM).when(config).getMaxNumberOfPartitions(); doReturn(DefaultIdentityParser.class.getName()).when(config).getIdentityParserClassName(); + doReturn(true).when(config).isMultiRegion(); + doReturn(10L).when(config).getPartitionSize(); + doReturn("dc-batch-nr").when(config).getNativeReplicationSourceFabricAsDefaultForBatchOnly(); + doReturn("dc-hybrid-nr").when(config).getNativeReplicationSourceFabricAsDefaultForHybrid(); return config; } diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java index 9275758a4cc..8938e2ac57f 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceHelixAdminWithoutCluster.java @@ -10,10 +10,6 @@ import com.linkedin.venice.common.VeniceSystemStoreType; import com.linkedin.venice.exceptions.VeniceException; import com.linkedin.venice.helix.ZkStoreConfigAccessor; -import com.linkedin.venice.meta.BufferReplayPolicy; -import com.linkedin.venice.meta.DataReplicationPolicy; -import com.linkedin.venice.meta.HybridStoreConfig; -import com.linkedin.venice.meta.HybridStoreConfigImpl; import com.linkedin.venice.meta.ReadWriteStoreRepository; import com.linkedin.venice.meta.Store; import com.linkedin.venice.meta.StoreConfig; @@ -37,58 +33,6 @@ public class TestVeniceHelixAdminWithoutCluster { private final PubSubTopicRepository pubSubTopicRepository = new PubSubTopicRepository(); - @Test - public void canMergeNewHybridConfigValuesToOldStore() { - String storeName = Utils.getUniqueString("storeName"); - Store store = TestUtils.createTestStore(storeName, "owner", System.currentTimeMillis()); - Assert.assertFalse(store.isHybrid()); - - Optional rewind = Optional.of(123L); - Optional lagOffset = Optional.of(1500L); - Optional timeLag = Optional.of(300L); - Optional dataReplicationPolicy = Optional.of(DataReplicationPolicy.AGGREGATE); - Optional bufferReplayPolicy = Optional.of(BufferReplayPolicy.REWIND_FROM_EOP); - HybridStoreConfig hybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - store, - Optional.empty(), - Optional.empty(), - Optional.empty(), - Optional.empty(), - Optional.empty()); - Assert.assertNull( - hybridStoreConfig, - "passing empty optionals and a non-hybrid store should generate a null hybrid config"); - - hybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - store, - rewind, - lagOffset, - timeLag, - dataReplicationPolicy, - bufferReplayPolicy); - Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); - Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); - Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); - Assert.assertEquals(hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), 300L); - Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.AGGREGATE); - - // It's okay that time lag threshold or data replication policy is not specified - hybridStoreConfig = VeniceHelixAdmin.mergeNewSettingsIntoOldHybridStoreConfig( - store, - rewind, - lagOffset, - Optional.empty(), - Optional.empty(), - Optional.empty()); - Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); - Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); - Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); - Assert.assertEquals( - hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), - HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD); - Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.NON_AGGREGATE); - } - @Test(expectedExceptions = VeniceException.class, expectedExceptionsMessageRegExp = ".*still exists in cluster.*") public void testCheckResourceCleanupBeforeStoreCreationWhenExistsInOtherCluster() { String clusterName = "cluster1"; diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java index 67a27eb57c3..af7ef789042 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/TestVeniceParentHelixAdmin.java @@ -2,7 +2,7 @@ import static com.linkedin.venice.controller.VeniceHelixAdmin.VERSION_ID_UNSET; import static com.linkedin.venice.meta.BufferReplayPolicy.REWIND_FROM_SOP; -import static com.linkedin.venice.meta.HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD; +import static com.linkedin.venice.utils.TestWriteUtils.loadFileAsString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyInt; @@ -20,8 +20,10 @@ import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; -import com.linkedin.venice.common.VeniceSystemStoreUtils; +import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper; import com.linkedin.venice.compression.CompressionStrategy; import com.linkedin.venice.controller.kafka.AdminTopicUtils; import com.linkedin.venice.controller.kafka.consumer.AdminConsumptionTask; @@ -40,6 +42,7 @@ import com.linkedin.venice.controller.kafka.protocol.serializer.AdminOperationSerializer; import com.linkedin.venice.controller.lingeringjob.LingeringStoreVersionChecker; import com.linkedin.venice.controller.stats.VeniceAdminStats; +import com.linkedin.venice.controller.util.AdminUtils; import com.linkedin.venice.controllerapi.ControllerClient; import com.linkedin.venice.controllerapi.ControllerResponse; import com.linkedin.venice.controllerapi.JobStatusQueryResponse; @@ -80,6 +83,7 @@ import com.linkedin.venice.pushmonitor.PartitionStatus; import com.linkedin.venice.pushmonitor.StatusSnapshot; import com.linkedin.venice.schema.GeneratedSchemaID; +import com.linkedin.venice.schema.SchemaEntry; import com.linkedin.venice.schema.avro.DirectionalSchemaCompatibilityType; import com.linkedin.venice.serialization.avro.AvroProtocolDefinition; import com.linkedin.venice.utils.DataProviderUtils; @@ -89,7 +93,6 @@ import com.linkedin.venice.utils.TestUtils; import com.linkedin.venice.utils.Time; import com.linkedin.venice.utils.Utils; -import com.linkedin.venice.utils.concurrent.VeniceConcurrentHashMap; import com.linkedin.venice.utils.locks.ClusterLockManager; import com.linkedin.venice.views.ChangeCaptureView; import com.linkedin.venice.writer.VeniceWriter; @@ -106,6 +109,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import org.apache.avro.Schema; import org.apache.http.HttpStatus; import org.mockito.ArgumentCaptor; import org.testng.Assert; @@ -157,104 +161,6 @@ public void testStartWhenTopicNotExists() { Optional.empty()); } - /** - * Partially stubbed class to verify async setup behavior. - */ - private static class AsyncSetupMockVeniceParentHelixAdmin extends VeniceParentHelixAdmin { - private Map systemStores = new VeniceConcurrentHashMap<>(); - - public AsyncSetupMockVeniceParentHelixAdmin( - VeniceHelixAdmin veniceHelixAdmin, - VeniceControllerClusterConfig config) { - super(veniceHelixAdmin, TestUtils.getMultiClusterConfigFromOneCluster(config)); - } - - public boolean isAsyncSetupRunning(String clusterName) { - return asyncSetupEnabledMap.get(clusterName); - } - - @Override - public void createStore( - String clusterName, - String storeName, - String owner, - String keySchema, - String valueSchema, - boolean isSystemStore) { - if (!(VeniceSystemStoreUtils.isSystemStore(storeName) && isSystemStore)) { - throw new VeniceException("Invalid store name and isSystemStore combination. Got store name: " + storeName); - } - if (systemStores.containsKey(storeName)) { - // no op - return; - } - Store newStore = new ZKStore( - storeName, - owner, - System.currentTimeMillis(), - PersistenceType.IN_MEMORY, - RoutingStrategy.HASH, - ReadStrategy.ANY_OF_ONLINE, - OfflinePushStrategy.WAIT_N_MINUS_ONE_REPLCIA_PER_PARTITION, - 1); - systemStores.put(storeName, newStore); - } - - @Override - public Store getStore(String clusterName, String storeName) { - if (!systemStores.containsKey(storeName)) { - return null; - } - return systemStores.get(storeName).cloneStore(); - } - - @Override - public void updateStore(String clusterName, String storeName, UpdateStoreQueryParams params) { - Optional hybridRewindSeconds = params.getHybridRewindSeconds(); - Optional hybridOffsetLagThreshold = params.getHybridOffsetLagThreshold(); - Optional hybridTimeLagThreshold = params.getHybridTimeLagThreshold(); - Optional hybridDataReplicationPolicy = params.getHybridDataReplicationPolicy(); - Optional hybridBufferReplayPolicy = params.getHybridBufferReplayPolicy(); - - if (!systemStores.containsKey(storeName)) { - throw new VeniceNoStoreException("Cannot update store " + storeName + " because it's missing."); - } - if (hybridRewindSeconds.isPresent() && hybridOffsetLagThreshold.isPresent()) { - final long finalHybridTimeLagThreshold = hybridTimeLagThreshold.orElse(DEFAULT_HYBRID_TIME_LAG_THRESHOLD); - final DataReplicationPolicy finalHybridDataReplicationPolicy = - hybridDataReplicationPolicy.orElse(DataReplicationPolicy.NON_AGGREGATE); - final BufferReplayPolicy finalHybridBufferReplayPolicy = - hybridBufferReplayPolicy.orElse(BufferReplayPolicy.REWIND_FROM_EOP); - systemStores.get(storeName) - .setHybridStoreConfig( - new HybridStoreConfigImpl( - hybridRewindSeconds.get(), - hybridOffsetLagThreshold.get(), - finalHybridTimeLagThreshold, - finalHybridDataReplicationPolicy, - finalHybridBufferReplayPolicy)); - } - } - - @Override - public Version incrementVersionIdempotent( - String clusterName, - String storeName, - String pushJobId, - int numberOfPartition, - int replicationFactor) { - if (!systemStores.containsKey(storeName)) { - throw new VeniceNoStoreException("Cannot add version to store " + storeName + " because it's missing."); - } - Version version = new VersionImpl(storeName, 1, "test-id"); - version.setReplicationFactor(replicationFactor); - List versions = new ArrayList<>(); - versions.add(version); - systemStores.get(storeName).setVersions(versions); - return version; - } - } - @Test public void testAddStore() { doReturn(CompletableFuture.completedFuture(new SimplePubSubProduceResultImpl(topicName, partitionId, 1, -1))) @@ -1050,7 +956,7 @@ public void testIdempotentIncrementVersionWhenPreviousTopicsExistAndOfflineJobIs try { partialMockParentAdmin.incrementVersionIdempotent(clusterName, storeName, pushJobId, 1, 1); } catch (VeniceException e) { - Assert.assertTrue( + assertTrue( e.getMessage().contains(pushJobId), "Exception for topic exists when increment version should contain requested pushId"); } @@ -1472,11 +1378,11 @@ public void testStoreVersionCleanUpWithMoreVersions() { } // child region current versions 4,5,6 are persisted for (int i = 4; i <= 6; ++i) { - Assert.assertTrue(capturedStore.containsVersion(i)); + assertTrue(capturedStore.containsVersion(i)); } // last two probably failed pushes are persisted. for (int i = 9; i <= 10; ++i) { - Assert.assertTrue(capturedStore.containsVersion(i)); + assertTrue(capturedStore.containsVersion(i)); } } @@ -1568,7 +1474,7 @@ public void testGetExecutionStatus() { for (ExecutionStatus status: ExecutionStatus.values()) { assertEquals(clientMap.get(status).queryJobStatus("topic", Optional.empty()).getStatus(), status.toString()); } - Assert.assertTrue(clientMap.get(null).queryJobStatus("topic", Optional.empty()).isError()); + assertTrue(clientMap.get(null).queryJobStatus("topic", Optional.empty()).isError()); Map completeMap = new HashMap<>(); completeMap.put("cluster", clientMap.get(ExecutionStatus.COMPLETED)); @@ -1689,7 +1595,7 @@ public void testGetExecutionStatus() { assertEquals(extraInfo.get("fabric2"), ExecutionStatus.COMPLETED.toString()); assertEquals(extraInfo.get("failFabric"), ExecutionStatus.UNKNOWN.toString()); assertEquals(extraInfo.get("completelyFailingFabric"), ExecutionStatus.UNKNOWN.toString()); - Assert.assertTrue( + assertTrue( offlineJobStatus.getExtraDetails().get("completelyFailingFabric").contains(completelyFailingExceptionMessage)); Map errorMap = new HashMap<>(); @@ -1767,8 +1673,8 @@ public void testUpdateStore() { assertEquals(adminMessage.operationType, AdminMessageType.UPDATE_STORE.getValue()); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; - assertEquals(updateStore.incrementalPushEnabled, true); - Assert.assertTrue(updateStore.blobTransferEnabled); + assertTrue(updateStore.incrementalPushEnabled); + assertTrue(updateStore.blobTransferEnabled); long readQuota = 100L; boolean readability = true; @@ -1844,7 +1750,7 @@ public void testUpdateStore() { schemaId = schemaCaptor.getValue(); adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); updateStore = (UpdateStore) adminMessage.payloadUnion; - Assert.assertTrue( + assertTrue( updateStore.nativeReplicationEnabled, "Native replication was not set to true after updating the store!"); // Test exception thrown for unsuccessful partitioner instance creation inside store update. @@ -1855,8 +1761,8 @@ public void testUpdateStore() { new UpdateStoreQueryParams().setPartitionerClass(InvalidKeySchemaPartitioner.class.getName())); Assert.fail("The partitioner creation should not be successful"); } catch (Exception e) { - Assert.assertTrue(e.getClass().isAssignableFrom(VeniceHttpException.class)); - Assert.assertTrue(e instanceof VeniceHttpException); + assertTrue(e.getClass().isAssignableFrom(VeniceHttpException.class)); + assertTrue(e instanceof VeniceHttpException); VeniceHttpException veniceHttpException = (VeniceHttpException) e; assertEquals(veniceHttpException.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); assertEquals(veniceHttpException.getErrorType(), ErrorType.INVALID_SCHEMA); @@ -1905,11 +1811,12 @@ public void testDisableHybridConfigWhenActiveActiveOrIncPushConfigIsEnabled() { 1000, 100, -1, - DataReplicationPolicy.NON_AGGREGATE, + DataReplicationPolicy.ACTIVE_ACTIVE, BufferReplayPolicy.REWIND_FROM_EOP)); store.setActiveActiveReplicationEnabled(true); store.setIncrementalPushEnabled(true); store.setNativeReplicationEnabled(true); + store.setNativeReplicationSourceFabric("dc-0"); store.setChunkingEnabled(true); doReturn(store).when(internalAdmin).getStore(clusterName, storeName); @@ -1952,7 +1859,7 @@ public void testDisableHybridConfigWhenActiveActiveOrIncPushConfigIsEnabled() { int schemaId = schemaCaptor.getValue(); AdminOperation adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; - Assert.assertFalse(internalAdmin.isHybrid(updateStore.getHybridStoreConfig())); + Assert.assertFalse(AdminUtils.isHybrid(updateStore.getHybridStoreConfig())); Assert.assertFalse(updateStore.incrementalPushEnabled); Assert.assertFalse(updateStore.activeActiveReplicationEnabled); } @@ -1961,6 +1868,14 @@ public void testDisableHybridConfigWhenActiveActiveOrIncPushConfigIsEnabled() { public void testSetStoreViewConfig() { String storeName = Utils.getUniqueString("testUpdateStore"); Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)); store.setActiveActiveReplicationEnabled(true); store.setChunkingEnabled(true); doReturn(store).when(internalAdmin).getStore(clusterName, storeName); @@ -1988,13 +1903,21 @@ public void testSetStoreViewConfig() { int schemaId = schemaCaptor.getValue(); AdminOperation adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; - Assert.assertTrue(updateStore.getViews().containsKey("changeCapture")); + assertTrue(updateStore.getViews().containsKey("changeCapture")); } @Test public void testInsertStoreViewConfig() { String storeName = Utils.getUniqueString("testUpdateStore"); Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)); store.setActiveActiveReplicationEnabled(true); store.setChunkingEnabled(true); store.setViewConfigs( @@ -2025,17 +1948,25 @@ public void testInsertStoreViewConfig() { AdminOperation adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; assertEquals(updateStore.getViews().size(), 2); - Assert.assertTrue(updateStore.getViews().containsKey("changeCapture")); + assertTrue(updateStore.getViews().containsKey("changeCapture")); assertEquals( updateStore.getViews().get("changeCapture").viewClassName.toString(), ChangeCaptureView.class.getCanonicalName()); - Assert.assertTrue(updateStore.getViews().get("changeCapture").viewParameters.isEmpty()); + assertTrue(updateStore.getViews().get("changeCapture").viewParameters.isEmpty()); } @Test public void testRemoveStoreViewConfig() { String storeName = Utils.getUniqueString("testUpdateStore"); Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)); store.setActiveActiveReplicationEnabled(true); store.setChunkingEnabled(true); store.setViewConfigs( @@ -2094,11 +2025,6 @@ public void testUpdateStoreWithBadPartitionerConfigs() { () -> parentAdmin .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true))); verify(veniceWriter, times(0)).put(any(), any(), anyInt()); - - Assert.assertThrows( - () -> parentAdmin - .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setActiveActiveReplicationEnabled(true))); - verify(veniceWriter, times(0)).put(any(), any(), anyInt()); } @Test @@ -2195,7 +2121,7 @@ private Map prepareForCurrentVersionTest(int regionCou public void testGetKafkaTopicsByAge() { String storeName = Utils.getUniqueString("test-store"); List versionTopics = parentAdmin.getKafkaTopicsByAge(storeName); - Assert.assertTrue(versionTopics.isEmpty()); + assertTrue(versionTopics.isEmpty()); Set topicList = new HashSet<>(); topicList.add(pubSubTopicRepository.getTopic(storeName + "_v1")); @@ -2206,8 +2132,8 @@ public void testGetKafkaTopicsByAge() { Assert.assertFalse(versionTopics.isEmpty()); PubSubTopic latestTopic = versionTopics.get(0); assertEquals(latestTopic, pubSubTopicRepository.getTopic(storeName + "_v3")); - Assert.assertTrue(topicList.containsAll(versionTopics)); - Assert.assertTrue(versionTopics.containsAll(topicList)); + assertTrue(topicList.containsAll(versionTopics)); + assertTrue(versionTopics.containsAll(topicList)); } @Test @@ -2265,7 +2191,7 @@ public void testGetTopicForCurrentPushJob() { doReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS)).when(mockParentAdmin) .getOffLinePushStatus(clusterName, latestTopic); Optional currentPush = mockParentAdmin.getTopicForCurrentPushJob(clusterName, storeName, false, false); - Assert.assertTrue(currentPush.isPresent()); + assertTrue(currentPush.isPresent()); assertEquals(currentPush.get(), latestTopic); verify(mockParentAdmin, times(2)).getOffLinePushStatus(clusterName, latestTopic); @@ -2286,7 +2212,7 @@ public void testGetTopicForCurrentPushJob() { doReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS, extraInfo)).when(mockParentAdmin) .getOffLinePushStatus(clusterName, latestTopic); currentPush = mockParentAdmin.getTopicForCurrentPushJob(clusterName, storeName, false, false); - Assert.assertTrue(currentPush.isPresent()); + assertTrue(currentPush.isPresent()); assertEquals(currentPush.get(), latestTopic); verify(mockParentAdmin, times(12)).getOffLinePushStatus(clusterName, latestTopic); @@ -2298,7 +2224,7 @@ public void testGetTopicForCurrentPushJob() { .thenReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS, extraInfo)) .thenReturn(new Admin.OfflinePushStatusInfo(ExecutionStatus.PROGRESS)); currentPush = mockParentAdmin.getTopicForCurrentPushJob(clusterName, storeName, false, false); - Assert.assertTrue(currentPush.isPresent()); + assertTrue(currentPush.isPresent()); assertEquals(currentPush.get(), latestTopic); verify(mockParentAdmin, times(14)).getOffLinePushStatus(clusterName, latestTopic); @@ -2508,7 +2434,7 @@ public void testAdminCanKillLingeringVersion(boolean isIncrementalPush) { newVersion, "Unexpected new version returned by incrementVersionIdempotent"); // Parent should kill the lingering job. - Assert.assertTrue(partialMockParentAdmin.isJobKilled(version.kafkaTopicName())); + assertTrue(partialMockParentAdmin.isJobKilled(version.kafkaTopicName())); } } } @@ -2569,7 +2495,7 @@ public void testAdminMessageIsolation() { parentAdmin.incrementVersionIdempotent(clusterName, storeA, "", 3, 3); Assert.fail("Admin operations to a store with existing exception should be blocked"); } catch (VeniceException e) { - Assert.assertTrue(e.getMessage().contains("due to existing exception")); + assertTrue(e.getMessage().contains("due to existing exception")); } // store B should still be able to process admin operations. assertEquals( @@ -2637,14 +2563,12 @@ public void testHybridAndIncrementalUpdateStoreCommands() { UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; assertEquals(updateStore.hybridStoreConfig.offsetLagThresholdToGoOnline, 20000); assertEquals(updateStore.hybridStoreConfig.rewindTimeInSeconds, 60); + assertEquals(updateStore.nativeReplicationSourceFabric.toString(), "dc-hybrid-nr"); + assertEquals(updateStore.partitionNum, 1024); + store.setPartitionCount(1024); store.setHybridStoreConfig( - new HybridStoreConfigImpl( - 60, - 20000, - 0, - DataReplicationPolicy.NON_AGGREGATE, - BufferReplayPolicy.REWIND_FROM_EOP)); + new HybridStoreConfigImpl(60, 20000, 0, DataReplicationPolicy.NONE, BufferReplayPolicy.REWIND_FROM_EOP)); // Incremental push can be enabled on a hybrid store, default inc push policy is inc push to RT now parentAdmin.updateStore(clusterName, storeName, new UpdateStoreQueryParams().setIncrementalPushEnabled(true)); @@ -2652,6 +2576,116 @@ public void testHybridAndIncrementalUpdateStoreCommands() { verify(veniceWriter, times(2)).put(keyCaptor.capture(), valueCaptor.capture(), schemaCaptor.capture()); } + @Test + public void testEnableActiveActiveSetsReplicationPolicy() { + String storeName = Utils.getUniqueString("testUpdateStore"); + Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 1000, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)); + store.setIncrementalPushEnabled(true); + store.setNativeReplicationEnabled(true); + store.setNativeReplicationSourceFabric("dc-0"); + store.setChunkingEnabled(true); + doReturn(store).when(internalAdmin).getStore(clusterName, storeName); + + doReturn(CompletableFuture.completedFuture(new SimplePubSubProduceResultImpl(topicName, partitionId, 1, -1))) + .when(veniceWriter) + .put(any(), any(), anyInt()); + + when(zkClient.readData(zkMetadataNodePath, null)).thenReturn(null) + .thenReturn(AdminTopicMetadataAccessor.generateMetadataMap(1, -1, 1)); + + parentAdmin.initStorageCluster(clusterName); + parentAdmin + .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setActiveActiveReplicationEnabled(true)); + + verify(zkClient, times(1)).readData(zkMetadataNodePath, null); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor valueCaptor = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor schemaCaptor = ArgumentCaptor.forClass(Integer.class); + verify(veniceWriter).put(keyCaptor.capture(), valueCaptor.capture(), schemaCaptor.capture()); + + byte[] keyBytes = keyCaptor.getValue(); + byte[] valueBytes = valueCaptor.getValue(); + int schemaId = schemaCaptor.getValue(); + assertEquals(schemaId, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); + assertEquals(keyBytes.length, 0); + + AdminOperation adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); + assertEquals(adminMessage.operationType, AdminMessageType.UPDATE_STORE.getValue()); + + UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; + assertEquals(updateStore.activeActiveReplicationEnabled, true); + assertEquals(updateStore.hybridStoreConfig.dataReplicationPolicy, DataReplicationPolicy.ACTIVE_ACTIVE.getValue()); + } + + @Test + public void testEnableWcValidatesSchema() { + String storeName = Utils.getUniqueString("testUpdateStore"); + Store store = TestUtils.createTestStore(storeName, "test", System.currentTimeMillis()); + + store.setPartitionCount(100); + store.setHybridStoreConfig( + new HybridStoreConfigImpl( + 1000, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)); + store.setNativeReplicationEnabled(true); + store.setNativeReplicationSourceFabric("dc-0"); + doReturn(store).when(internalAdmin).getStore(clusterName, storeName); + + doReturn(CompletableFuture.completedFuture(new SimplePubSubProduceResultImpl(topicName, partitionId, 1, -1))) + .when(veniceWriter) + .put(any(), any(), anyInt()); + + when(zkClient.readData(zkMetadataNodePath, null)).thenReturn(null) + .thenReturn(AdminTopicMetadataAccessor.generateMetadataMap(1, -1, 1)); + + parentAdmin.initStorageCluster(clusterName); + + String stringSchemaStr = "\"string\""; + doReturn(Collections.singletonList(new SchemaEntry(1, stringSchemaStr))).when(internalAdmin) + .getValueSchemas(clusterName, storeName); + VeniceException e = expectThrows( + VeniceException.class, + () -> parentAdmin + .updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true))); + assertTrue(e.getMessage().contains("top level field probably missing defaults")); + + Schema recordSchema = AvroCompatibilityHelper.parse(loadFileAsString("superset_schema_test/v1.avsc")); + doReturn(Collections.singletonList(new SchemaEntry(1, recordSchema))).when(internalAdmin) + .getValueSchemas(clusterName, storeName); + parentAdmin.updateStore(clusterName, storeName, new UpdateStoreQueryParams().setWriteComputationEnabled(true)); + + verify(zkClient, times(1)).readData(zkMetadataNodePath, null); + ArgumentCaptor keyCaptor = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor valueCaptor = ArgumentCaptor.forClass(byte[].class); + ArgumentCaptor schemaCaptor = ArgumentCaptor.forClass(Integer.class); + verify(veniceWriter).put(keyCaptor.capture(), valueCaptor.capture(), schemaCaptor.capture()); + + byte[] keyBytes = keyCaptor.getValue(); + byte[] valueBytes = valueCaptor.getValue(); + int schemaId = schemaCaptor.getValue(); + assertEquals(schemaId, AdminOperationSerializer.LATEST_SCHEMA_ID_FOR_ADMIN_OPERATION); + assertEquals(keyBytes.length, 0); + + AdminOperation adminMessage = adminOperationSerializer.deserialize(ByteBuffer.wrap(valueBytes), schemaId); + assertEquals(adminMessage.operationType, AdminMessageType.UPDATE_STORE.getValue()); + + UpdateStore updateStore = (UpdateStore) adminMessage.payloadUnion; + assertTrue(updateStore.writeComputationEnabled); + assertTrue(updateStore.chunkingEnabled); + } + @Test public void testSetVersionShouldFailOnParentController() { try { diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java index 38f33432b9e..6c4a0595868 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/init/SystemStoreInitializationHelperTest.java @@ -64,14 +64,17 @@ public void testInitialSystemStoreSetup(boolean explicitlyProvidedKeySchema) { int partitionCount = 10; int replicationFactor = 3; doReturn(1).when(firstVersion).getNumber(); + doReturn(partitionCount).when(firstVersion).getPartitionCount(); Store storeForTest = mock(Store.class); Store storeForTestAfterUpdateStore = mock(Store.class); doReturn(true).when(storeForTestAfterUpdateStore).isHybrid(); + doReturn(partitionCount).when(storeForTestAfterUpdateStore).getPartitionCount(); Store storeForTestAfterCreatingVersion = mock(Store.class); doReturn(true).when(storeForTestAfterCreatingVersion).isHybrid(); + doReturn(partitionCount).when(storeForTestAfterCreatingVersion).getPartitionCount(); doReturn(versionNumber).when(storeForTestAfterCreatingVersion).getCurrentVersion(); doReturn(firstVersion).when(storeForTestAfterCreatingVersion).getVersion(versionNumber); doReturn(Collections.singletonList(firstVersion)).when(storeForTestAfterCreatingVersion).getVersions(); diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java index 7fd42962734..c734d5af601 100644 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/supersetschema/TestSupersetSchemaGeneratorWithCustomProp.java @@ -31,6 +31,10 @@ public class TestSupersetSchemaGeneratorWithCustomProp { AvroCompatibilityHelper.parse(TestWriteUtils.loadFileAsString("superset_schema_test/v3.avsc")); private Schema schemaV4 = AvroCompatibilityHelper .parse(TestWriteUtils.loadFileAsString("superset_schema_test/v4_without_custom_prop.avsc")); + private Schema schemaV5 = + AvroCompatibilityHelper.parse(TestWriteUtils.loadFileAsString("superset_schema_test/v5.avsc")); + private Schema schemaV6 = + AvroCompatibilityHelper.parse(TestWriteUtils.loadFileAsString("superset_schema_test/v6.avsc")); private SupersetSchemaGenerator generator; @@ -75,6 +79,39 @@ public void testGenerateSupersetSchemaFromSchemas() throws IOException { assertNotNull(supersetSchema3.getField("f1")); assertNotNull(supersetSchema3.getField("f2")); assertNotNull(supersetSchema3.getField("f3")); + + // v5 contains all fields in superset schema and a custom prop + Collection schemaEntryCollection4 = Arrays.asList( + new SchemaEntry(1, schemaV1), + new SchemaEntry(2, schemaV2), + new SchemaEntry(3, schemaV3), + new SchemaEntry(4, schemaV4), + new SchemaEntry(5, schemaV5)); + SchemaEntry supersetSchemaEntry4 = generator.generateSupersetSchemaFromSchemas(schemaEntryCollection4); + assertEquals(supersetSchemaEntry4.getId(), 5); + Schema supersetSchema4 = supersetSchemaEntry4.getSchema(); + assertEquals(supersetSchema4.getProp(CUSTOM_PROP), "custom_prop_value_for_v5"); + assertNotNull(supersetSchema4.getField("f0")); + assertNotNull(supersetSchema4.getField("f1")); + assertNotNull(supersetSchema4.getField("f2")); + assertNotNull(supersetSchema4.getField("f3")); + + // v6 contains a subset of fields, but with a different custom prop + Collection schemaEntryCollection5 = Arrays.asList( + new SchemaEntry(1, schemaV1), + new SchemaEntry(2, schemaV2), + new SchemaEntry(3, schemaV3), + new SchemaEntry(4, schemaV4), + new SchemaEntry(5, schemaV5), + new SchemaEntry(6, schemaV6)); + SchemaEntry supersetSchemaEntry5 = generator.generateSupersetSchemaFromSchemas(schemaEntryCollection5); + assertEquals(supersetSchemaEntry5.getId(), 7); + Schema supersetSchema5 = supersetSchemaEntry5.getSchema(); + assertEquals(supersetSchema5.getProp(CUSTOM_PROP), "custom_prop_value_for_v6"); + assertNotNull(supersetSchema5.getField("f0")); + assertNotNull(supersetSchema5.getField("f1")); + assertNotNull(supersetSchema5.getField("f2")); + assertNotNull(supersetSchema5.getField("f3")); } @Test diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java new file mode 100644 index 00000000000..b636814704c --- /dev/null +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/AdminUtilsTest.java @@ -0,0 +1,134 @@ +package com.linkedin.venice.controller.util; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +import com.linkedin.venice.ConfigConstants; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.kafka.protocol.admin.HybridStoreConfigRecord; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.Store; +import org.testng.Assert; +import org.testng.annotations.Test; + + +public class AdminUtilsTest { + @Test + public void testIsHybrid() { + Assert.assertFalse(AdminUtils.isHybrid((HybridStoreConfig) null)); + Assert.assertFalse(AdminUtils.isHybrid((HybridStoreConfigRecord) null)); + + HybridStoreConfig hybridStoreConfig; + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, -1, null, null); + Assert.assertFalse(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, -1, null, null); + Assert.assertFalse(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, -1, null, null); + assertTrue(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, 100, 100, null, null); + assertTrue(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(100, -1, 100, null, null); + assertTrue(AdminUtils.isHybrid(hybridStoreConfig)); + + hybridStoreConfig = new HybridStoreConfigImpl(-1, -1, 100, null, null); + Assert.assertFalse(AdminUtils.isHybrid(hybridStoreConfig)); + + HybridStoreConfigRecord hybridStoreConfigRecord = new HybridStoreConfigRecord(); + hybridStoreConfigRecord.rewindTimeInSeconds = 100; + hybridStoreConfigRecord.offsetLagThresholdToGoOnline = 100; + hybridStoreConfigRecord.producerTimestampLagThresholdToGoOnlineInSeconds = -1; + hybridStoreConfigRecord.dataReplicationPolicy = DataReplicationPolicy.ACTIVE_ACTIVE.getValue(); + hybridStoreConfigRecord.bufferReplayPolicy = BufferReplayPolicy.REWIND_FROM_SOP.getValue(); + assertTrue(AdminUtils.isHybrid(hybridStoreConfigRecord)); + } + + @Test + public void testGetRmdVersionID() { + String storeName = "storeName"; + String clusterName = "clusterName"; + + Admin mockAdmin = mock(Admin.class); + VeniceControllerMultiClusterConfig multiClusterConfig = mock(VeniceControllerMultiClusterConfig.class); + VeniceControllerClusterConfig controllerConfig = mock(VeniceControllerClusterConfig.class); + Store mockStore = mock(Store.class); + + // Store null + cluster config not set + doReturn(null).when(mockAdmin).getStore(clusterName, storeName); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(null).when(multiClusterConfig).getControllerConfig(clusterName); + VeniceException e1 = + Assert.expectThrows(VeniceException.class, () -> AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName)); + assertTrue(e1.getMessage().contains("No controller cluster config found for cluster clusterName")); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store null + cluster config set + doReturn(null).when(mockAdmin).getStore(clusterName, storeName); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfig).getControllerConfig(clusterName); + doReturn(10).when(controllerConfig).getReplicationMetadataVersion(); + assertEquals(AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName), 10); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store-level RMD version ID not found + cluster config not set + doReturn(mockStore).when(mockAdmin).getStore(clusterName, storeName); + doReturn(ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION).when(mockStore).getRmdVersion(); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(null).when(multiClusterConfig).getControllerConfig(clusterName); + VeniceException e2 = + Assert.expectThrows(VeniceException.class, () -> AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName)); + assertTrue(e2.getMessage().contains("No controller cluster config found for cluster clusterName")); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store-level RMD version ID not found + cluster config set + doReturn(mockStore).when(mockAdmin).getStore(clusterName, storeName); + doReturn(ConfigConstants.UNSPECIFIED_REPLICATION_METADATA_VERSION).when(mockStore).getRmdVersion(); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfig).getControllerConfig(clusterName); + doReturn(10).when(controllerConfig).getReplicationMetadataVersion(); + assertEquals(AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName), 10); + + reset(mockAdmin); + reset(multiClusterConfig); + reset(controllerConfig); + reset(mockStore); + + // Store-level RMD version ID found + doReturn(mockStore).when(mockAdmin).getStore(clusterName, storeName); + doReturn(5).when(mockStore).getRmdVersion(); + doReturn(multiClusterConfig).when(mockAdmin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfig).getControllerConfig(clusterName); + doReturn(10).when(controllerConfig).getReplicationMetadataVersion(); + assertEquals(AdminUtils.getRmdVersionID(mockAdmin, storeName, clusterName), 5); + verify(mockAdmin, never()).getMultiClusterConfigs(); + verify(multiClusterConfig, never()).getControllerConfig(any()); + verify(controllerConfig, never()).getReplicationMetadataVersion(); + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java new file mode 100644 index 00000000000..6985ba12987 --- /dev/null +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/PrimaryControllerConfigUpdateUtilsTest.java @@ -0,0 +1,154 @@ +package com.linkedin.venice.controller.util; + +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.HelixVeniceClusterResources; +import com.linkedin.venice.controller.supersetschema.SupersetSchemaGenerator; +import com.linkedin.venice.meta.ReadWriteSchemaRepository; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.schema.SchemaEntry; +import com.linkedin.venice.schema.rmd.RmdSchemaGenerator; +import com.linkedin.venice.schema.writecompute.WriteComputeSchemaConverter; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import org.apache.avro.Schema; +import org.testng.annotations.Test; + + +public class PrimaryControllerConfigUpdateUtilsTest { + private static final String VALUE_FIELD_NAME = "int_field"; + private static final String SECOND_VALUE_FIELD_NAME = "opt_int_field"; + private static final String VALUE_SCHEMA_V1_STR = "{\n" + "\"type\": \"record\",\n" + + "\"name\": \"TestValueSchema\",\n" + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10}]\n" + "}"; + private static final String VALUE_SCHEMA_V2_STR = + "{\n" + "\"type\": \"record\",\n" + "\"name\": \"TestValueSchema\",\n" + + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + "\"fields\": [\n" + "{\"name\": \"" + + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + private static final String SUPERSET_VALUE_SCHEMA_STR = "{\n" + "\"type\": \"record\",\n" + + "\"name\": \"TestValueSchema\",\n" + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10},\n" + + "{\"name\": \"" + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + + @Test + public void testRegisterInferredSchemas() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Collection storeValueSchemas = + Arrays.asList(new SchemaEntry(1, VALUE_SCHEMA_V1_STR), new SchemaEntry(2, VALUE_SCHEMA_V2_STR)); + SchemaEntry supersetSchemaEntry = new SchemaEntry(3, SUPERSET_VALUE_SCHEMA_STR); + + Admin mockAdmin = mock(Admin.class); + Store store = mock(Store.class); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(true).when(store).isReadComputationEnabled(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateSuperSetSchemaGenerated(mockAdmin, clusterName, storeName); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(true).when(store).isWriteComputationEnabled(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateSuperSetSchemaGenerated(mockAdmin, clusterName, storeName); + validateUpdateSchemaGenerated(mockAdmin, clusterName, storeName); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(1).when(store).getLatestSuperSetValueSchemaId(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateSuperSetSchemaGenerated(mockAdmin, clusterName, storeName); + + reset(mockAdmin); + reset(store); + setupMocks(mockAdmin, store, clusterName, storeName, storeValueSchemas, supersetSchemaEntry); + + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + doReturn(1).when(store).getRmdVersion(); + PrimaryControllerConfigUpdateUtils.registerInferredSchemas(mockAdmin, clusterName, storeName); + validateRmdSchemaGenerated(mockAdmin, clusterName, storeName); + } + + private void setupMocks( + Admin mockAdmin, + Store store, + String clusterName, + String storeName, + Collection storeValueSchemas, + SchemaEntry supersetSchemaEntry) { + doReturn(storeName).when(store).getName(); + + SupersetSchemaGenerator supersetSchemaGenerator = mock(SupersetSchemaGenerator.class); + + doReturn(true).when(mockAdmin).isPrimary(); + doReturn(false).when(mockAdmin).isParent(); + doReturn(store).when(mockAdmin).getStore(clusterName, storeName); + doReturn(supersetSchemaGenerator).when(mockAdmin).getSupersetSchemaGenerator(clusterName); + doReturn(storeValueSchemas).when(mockAdmin).getValueSchemas(clusterName, storeName); + + doReturn(supersetSchemaEntry).when(supersetSchemaGenerator).generateSupersetSchemaFromSchemas(storeValueSchemas); + + HelixVeniceClusterResources clusterResources = mock(HelixVeniceClusterResources.class); + doReturn(clusterResources).when(mockAdmin).getHelixVeniceClusterResources(clusterName); + + ReadWriteSchemaRepository schemaRepository = mock(ReadWriteSchemaRepository.class); + doReturn(schemaRepository).when(clusterResources).getSchemaRepository(); + + doReturn(Collections.emptyList()).when(schemaRepository).getReplicationMetadataSchemas(storeName); + } + + private void validateSuperSetSchemaGenerated(Admin mockAdmin, String clusterName, String storeName) { + verify(mockAdmin).addSupersetSchema( + clusterName, + storeName, + null, + SchemaData.INVALID_VALUE_SCHEMA_ID, + SUPERSET_VALUE_SCHEMA_STR, + 3); + } + + private void validateUpdateSchemaGenerated(Admin mockAdmin, String clusterName, String storeName) { + SchemaEntry updateSchemaEntry1 = new SchemaEntry(1, VALUE_SCHEMA_V1_STR); + Schema updateSchema1 = + WriteComputeSchemaConverter.getInstance().convertFromValueRecordSchema(updateSchemaEntry1.getSchema()); + + SchemaEntry updateSchemaEntry2 = new SchemaEntry(1, VALUE_SCHEMA_V2_STR); + Schema updateSchema2 = + WriteComputeSchemaConverter.getInstance().convertFromValueRecordSchema(updateSchemaEntry2.getSchema()); + + // Ideally, we should have seen the superset schema also, but due to the static-ness of mocks, we don't see it now + verify(mockAdmin).addDerivedSchema(clusterName, storeName, 1, updateSchema1.toString()); + verify(mockAdmin).addDerivedSchema(clusterName, storeName, 2, updateSchema2.toString()); + verify(mockAdmin, times(2)).addDerivedSchema(eq(clusterName), eq(storeName), anyInt(), anyString()); + } + + private void validateRmdSchemaGenerated(Admin mockAdmin, String clusterName, String storeName) { + Schema rmdSchema1 = RmdSchemaGenerator.generateMetadataSchema(VALUE_SCHEMA_V1_STR, 1); + Schema rmdSchema2 = RmdSchemaGenerator.generateMetadataSchema(VALUE_SCHEMA_V2_STR, 1); + + // Ideally, we should have seen the superset schema also, but due to the static-ness of mocks, we don't see it now + verify(mockAdmin).addReplicationMetadataSchema(clusterName, storeName, 1, 1, rmdSchema1.toString()); + verify(mockAdmin).addReplicationMetadataSchema(clusterName, storeName, 2, 1, rmdSchema2.toString()); + verify(mockAdmin, times(2)) + .addReplicationMetadataSchema(eq(clusterName), eq(storeName), anyInt(), eq(1), anyString()); + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java new file mode 100644 index 00000000000..3cfd8b20e8d --- /dev/null +++ b/services/venice-controller/src/test/java/com/linkedin/venice/controller/util/UpdateStoreUtilsTest.java @@ -0,0 +1,1079 @@ +package com.linkedin.venice.controller.util; + +import static com.linkedin.venice.controllerapi.ControllerApiConstants.ACTIVE_ACTIVE_REPLICATION_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.INCREMENTAL_PUSH_ENABLED; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.NATIVE_REPLICATION_SOURCE_FABRIC; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.PARTITION_COUNT; +import static com.linkedin.venice.controllerapi.ControllerApiConstants.WRITE_COMPUTATION_ENABLED; +import static com.linkedin.venice.utils.ByteUtils.BYTES_PER_GB; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotSame; +import static org.testng.Assert.assertSame; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; + +import com.linkedin.venice.common.VeniceSystemStoreType; +import com.linkedin.venice.common.VeniceSystemStoreUtils; +import com.linkedin.venice.controller.Admin; +import com.linkedin.venice.controller.HelixVeniceClusterResources; +import com.linkedin.venice.controller.VeniceControllerClusterConfig; +import com.linkedin.venice.controller.VeniceControllerMultiClusterConfig; +import com.linkedin.venice.controller.VeniceHelixAdmin; +import com.linkedin.venice.controller.VeniceParentHelixAdmin; +import com.linkedin.venice.exceptions.ErrorType; +import com.linkedin.venice.exceptions.PartitionerSchemaMismatchException; +import com.linkedin.venice.exceptions.VeniceException; +import com.linkedin.venice.exceptions.VeniceHttpException; +import com.linkedin.venice.helix.StoragePersonaRepository; +import com.linkedin.venice.helix.ZkRoutersClusterManager; +import com.linkedin.venice.meta.BufferReplayPolicy; +import com.linkedin.venice.meta.DataReplicationPolicy; +import com.linkedin.venice.meta.ETLStoreConfigImpl; +import com.linkedin.venice.meta.HybridStoreConfig; +import com.linkedin.venice.meta.HybridStoreConfigImpl; +import com.linkedin.venice.meta.PartitionerConfig; +import com.linkedin.venice.meta.PartitionerConfigImpl; +import com.linkedin.venice.meta.Store; +import com.linkedin.venice.meta.Version; +import com.linkedin.venice.meta.ViewConfig; +import com.linkedin.venice.partitioner.DefaultVenicePartitioner; +import com.linkedin.venice.partitioner.VenicePartitioner; +import com.linkedin.venice.persona.StoragePersona; +import com.linkedin.venice.pubsub.PubSubTopicRepository; +import com.linkedin.venice.pubsub.api.PubSubTopic; +import com.linkedin.venice.pubsub.manager.TopicManager; +import com.linkedin.venice.schema.SchemaData; +import com.linkedin.venice.schema.SchemaEntry; +import com.linkedin.venice.utils.TestUtils; +import com.linkedin.venice.utils.Utils; +import com.linkedin.venice.utils.VeniceProperties; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import javax.annotation.Nonnull; +import org.apache.avro.Schema; +import org.apache.http.HttpStatus; +import org.testng.Assert; +import org.testng.annotations.Test; + + +public class UpdateStoreUtilsTest { + private static final String VALUE_FIELD_NAME = "int_field"; + private static final String SECOND_VALUE_FIELD_NAME = "opt_int_field"; + private static final String VALUE_SCHEMA_V1_STR = "{\n" + "\"type\": \"record\",\n" + + "\"name\": \"TestValueSchema\",\n" + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + + "\"fields\": [\n" + " {\"name\": \"" + VALUE_FIELD_NAME + "\", \"type\": \"int\", \"default\": 10}]\n" + "}"; + private static final String VALUE_SCHEMA_V2_STR = + "{\n" + "\"type\": \"record\",\n" + "\"name\": \"TestValueSchema\",\n" + + "\"namespace\": \"com.linkedin.venice.fastclient.schema\",\n" + "\"fields\": [\n" + "{\"name\": \"" + + SECOND_VALUE_FIELD_NAME + "\", \"type\": [\"null\", \"int\"], \"default\": null}]\n" + "}"; + + @Test + public void testMergeNewHybridConfigValuesToOldStore() { + String storeName = Utils.getUniqueString("storeName"); + Store store = TestUtils.createTestStore(storeName, "owner", System.currentTimeMillis()); + assertFalse(store.isHybrid()); + + Optional rewind = Optional.of(123L); + Optional lagOffset = Optional.of(1500L); + Optional timeLag = Optional.of(300L); + Optional dataReplicationPolicy = Optional.of(DataReplicationPolicy.AGGREGATE); + Optional bufferReplayPolicy = Optional.of(BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridStoreConfig = UpdateStoreUtils.mergeNewSettingsIntoOldHybridStoreConfig( + store, + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty()); + Assert.assertNull( + hybridStoreConfig, + "passing empty optionals and a non-hybrid store should generate a null hybrid config"); + + hybridStoreConfig = UpdateStoreUtils.mergeNewSettingsIntoOldHybridStoreConfig( + store, + rewind, + lagOffset, + timeLag, + dataReplicationPolicy, + bufferReplayPolicy); + Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); + Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); + Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); + Assert.assertEquals(hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), 300L); + Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.AGGREGATE); + + // It's okay that time lag threshold or data replication policy is not specified + hybridStoreConfig = UpdateStoreUtils.mergeNewSettingsIntoOldHybridStoreConfig( + store, + rewind, + lagOffset, + Optional.empty(), + Optional.empty(), + Optional.empty()); + Assert.assertNotNull(hybridStoreConfig, "specifying rewind and lagOffset should generate a valid hybrid config"); + Assert.assertEquals(hybridStoreConfig.getRewindTimeInSeconds(), 123L); + Assert.assertEquals(hybridStoreConfig.getOffsetLagThresholdToGoOnline(), 1500L); + Assert.assertEquals( + hybridStoreConfig.getProducerTimestampLagThresholdToGoOnlineInSeconds(), + HybridStoreConfigImpl.DEFAULT_HYBRID_TIME_LAG_THRESHOLD); + Assert.assertEquals(hybridStoreConfig.getDataReplicationPolicy(), DataReplicationPolicy.NON_AGGREGATE); + } + + @Test + public void testIsInferredStoreUpdateAllowed() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Admin mockAdmin = mock(Admin.class); + + assertFalse( + UpdateStoreUtils.isInferredStoreUpdateAllowed(mockAdmin, VeniceSystemStoreUtils.getMetaStoreName(storeName))); + assertFalse( + UpdateStoreUtils + .isInferredStoreUpdateAllowed(mockAdmin, VeniceSystemStoreUtils.getDaVinciPushStatusStoreName(storeName))); + assertFalse( + UpdateStoreUtils.isInferredStoreUpdateAllowed( + mockAdmin, + VeniceSystemStoreUtils.getParticipantStoreNameForCluster(clusterName))); + assertFalse( + UpdateStoreUtils.isInferredStoreUpdateAllowed( + mockAdmin, + VeniceSystemStoreType.BATCH_JOB_HEARTBEAT_STORE.getZkSharedStoreName())); + + doReturn(false).when(mockAdmin).isPrimary(); + assertFalse(UpdateStoreUtils.isInferredStoreUpdateAllowed(mockAdmin, storeName)); + + Admin mockChildAdmin = mock(VeniceHelixAdmin.class); + doReturn(true).when(mockChildAdmin).isPrimary(); + doReturn(false).when(mockChildAdmin).isParent(); + assertTrue(UpdateStoreUtils.isInferredStoreUpdateAllowed(mockChildAdmin, storeName)); + + Admin mockParentAdmin = mock(VeniceParentHelixAdmin.class); + doReturn(true).when(mockParentAdmin).isPrimary(); + doReturn(true).when(mockParentAdmin).isParent(); + assertTrue(UpdateStoreUtils.isInferredStoreUpdateAllowed(mockParentAdmin, storeName)); + } + + @Test + public void testUpdateInferredConfig() { + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + Set updatedConfigSet = new HashSet<>(); + final AtomicBoolean updaterInvoked = new AtomicBoolean(false); + + doReturn(storeName).when(store).getName(); + doReturn(true).when(admin).isPrimary(); + doReturn(false).when(admin).isParent(); + + // Config previously updated. Will not update again. + updatedConfigSet.add("key1"); + updaterInvoked.set(false); + UpdateStoreUtils.updateInferredConfig(admin, store, "key1", updatedConfigSet, () -> updaterInvoked.set(true)); + assertFalse(updaterInvoked.get()); + assertTrue(updatedConfigSet.contains("key1")); + assertEquals(updatedConfigSet.size(), 1); + + // Config not updated previously. Will update it. + updatedConfigSet.clear(); + updaterInvoked.set(false); + UpdateStoreUtils.updateInferredConfig(admin, store, "key1", updatedConfigSet, () -> updaterInvoked.set(true)); + assertTrue(updaterInvoked.get()); + assertTrue(updatedConfigSet.contains("key1")); + assertEquals(updatedConfigSet.size(), 1); + + // Config not updated previously. Will not update it for system stores. + updatedConfigSet.clear(); + updaterInvoked.set(false); + doReturn(VeniceSystemStoreUtils.getParticipantStoreNameForCluster(storeName)).when(store).getName(); + UpdateStoreUtils.updateInferredConfig(admin, store, "key1", updatedConfigSet, () -> updaterInvoked.set(true)); + assertFalse(updaterInvoked.get()); + assertTrue(updatedConfigSet.isEmpty()); + } + + @Test + public void testUpdateInferredConfigsForHybridToBatch() { + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + VeniceControllerClusterConfig clusterConfig = mock(VeniceControllerClusterConfig.class); + + doReturn(true).when(admin).isPrimary(); + doReturn(false).when(admin).isParent(); + + Set updatedConfigSet = new HashSet<>(); + doReturn(storeName).when(store).getName(); + doReturn("dc-batch").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForBatchOnly(); + doReturn("dc-hybrid").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForHybrid(); + + UpdateStoreUtils.updateInferredConfigsForHybridToBatch(admin, clusterConfig, store, updatedConfigSet); + + verify(store).setIncrementalPushEnabled(false); + verify(store).setNativeReplicationSourceFabric("dc-batch"); + verify(store).setActiveActiveReplicationEnabled(false); + + assertEquals(updatedConfigSet.size(), 3); + assertTrue(updatedConfigSet.contains(INCREMENTAL_PUSH_ENABLED)); + assertTrue(updatedConfigSet.contains(NATIVE_REPLICATION_SOURCE_FABRIC)); + assertTrue(updatedConfigSet.contains(ACTIVE_ACTIVE_REPLICATION_ENABLED)); + } + + @Test + public void testUpdateInferredConfigsForBatchToHybrid() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + VeniceControllerClusterConfig clusterConfig = mock(VeniceControllerClusterConfig.class); + + doReturn(true).when(admin).isPrimary(); + doReturn(false).when(admin).isParent(); + + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isSystemStore(); + doReturn(0).when(store).getPartitionCount(); + doReturn(10 * BYTES_PER_GB).when(store).getStorageQuotaInByte(); + + Set updatedConfigSet = new HashSet<>(); + doReturn(clusterName).when(clusterConfig).getClusterName(); + doReturn("dc-batch").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForBatchOnly(); + doReturn("dc-hybrid").when(clusterConfig).getNativeReplicationSourceFabricAsDefaultForHybrid(); + doReturn(false).when(clusterConfig).isActiveActiveReplicationEnabledAsDefaultForHybrid(); + doReturn(1 * BYTES_PER_GB).when(clusterConfig).getPartitionSize(); + doReturn(3).when(clusterConfig).getMinNumberOfPartitionsForHybrid(); + doReturn(100).when(clusterConfig).getMaxNumberOfPartitions(); + doReturn(1).when(clusterConfig).getPartitionCountRoundUpSize(); + doReturn(true).when(clusterConfig).isEnablePartialUpdateForHybridNonActiveActiveUserStores(); + + doReturn(Arrays.asList(new SchemaEntry(1, VALUE_SCHEMA_V1_STR), new SchemaEntry(2, VALUE_SCHEMA_V2_STR))) + .when(admin) + .getValueSchemas(clusterName, storeName); + + UpdateStoreUtils.updateInferredConfigsForBatchToHybrid(admin, clusterConfig, store, updatedConfigSet); + + verify(store).setNativeReplicationSourceFabric("dc-hybrid"); + verify(store).setActiveActiveReplicationEnabled(false); + verify(store).setPartitionCount(10); + verify(store).setWriteComputationEnabled(true); + + assertEquals(updatedConfigSet.size(), 4); + assertTrue(updatedConfigSet.contains(NATIVE_REPLICATION_SOURCE_FABRIC)); + assertTrue(updatedConfigSet.contains(ACTIVE_ACTIVE_REPLICATION_ENABLED)); + assertTrue(updatedConfigSet.contains(PARTITION_COUNT)); + assertTrue(updatedConfigSet.contains(WRITE_COMPUTATION_ENABLED)); + + // Update schemas should only be generated in dry-run mode and not registered yet. + verify(admin, never()).addDerivedSchema(eq(clusterName), eq(storeName), anyInt(), anyString()); + } + + @Test + public void testIsIncrementalPushEnabled() { + HybridStoreConfig nonHybridConfig = + new HybridStoreConfigImpl(-1, -1, -1, DataReplicationPolicy.AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridConfigWithNonAggregateDRP = new HybridStoreConfigImpl( + 100, + 1000, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridConfigWithAggregateDRP = + new HybridStoreConfigImpl(100, 1000, -1, DataReplicationPolicy.AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridConfigWithActiveActiveDRP = new HybridStoreConfigImpl( + 100, + 1000, + -1, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP); + HybridStoreConfig hybridConfigWithNoneDRP = + new HybridStoreConfigImpl(100, 1000, -1, DataReplicationPolicy.NONE, BufferReplayPolicy.REWIND_FROM_EOP); + + // In single-region mode, any hybrid store should have incremental push enabled. + assertFalse(UpdateStoreUtils.isIncrementalPushEnabled(false, null)); + assertFalse(UpdateStoreUtils.isIncrementalPushEnabled(false, nonHybridConfig)); + assertTrue(UpdateStoreUtils.isIncrementalPushEnabled(false, hybridConfigWithNonAggregateDRP)); + assertTrue(UpdateStoreUtils.isIncrementalPushEnabled(false, hybridConfigWithAggregateDRP)); + assertTrue(UpdateStoreUtils.isIncrementalPushEnabled(false, hybridConfigWithActiveActiveDRP)); + assertTrue(UpdateStoreUtils.isIncrementalPushEnabled(false, hybridConfigWithNoneDRP)); + + // In multi-region mode, hybrid stores with NON_AGGREGATE DataReplicationPolicy should not have incremental push + // enabled. + assertFalse(UpdateStoreUtils.isIncrementalPushEnabled(true, null)); + assertFalse(UpdateStoreUtils.isIncrementalPushEnabled(true, nonHybridConfig)); + assertFalse(UpdateStoreUtils.isIncrementalPushEnabled(true, hybridConfigWithNonAggregateDRP)); + assertTrue(UpdateStoreUtils.isIncrementalPushEnabled(true, hybridConfigWithAggregateDRP)); + assertTrue(UpdateStoreUtils.isIncrementalPushEnabled(true, hybridConfigWithActiveActiveDRP)); + assertTrue(UpdateStoreUtils.isIncrementalPushEnabled(true, hybridConfigWithNoneDRP)); + } + + @Test + public void testValidateStoreConfigs() { + String clusterName = "clusterName"; + String storeName = "storeName"; + Admin admin = mock(Admin.class); + Store store = mock(Store.class); + VeniceControllerMultiClusterConfig multiClusterConfigs = mock(VeniceControllerMultiClusterConfig.class); + VeniceControllerClusterConfig controllerConfig = mock(VeniceControllerClusterConfig.class); + + doReturn(multiClusterConfigs).when(admin).getMultiClusterConfigs(); + doReturn(controllerConfig).when(multiClusterConfigs).getControllerConfig(clusterName); + doReturn(1000).when(controllerConfig).getDefaultReadQuotaPerRouter(); + + HelixVeniceClusterResources resources = mock(HelixVeniceClusterResources.class); + ZkRoutersClusterManager routersClusterManager = mock(ZkRoutersClusterManager.class); + + doReturn(resources).when(admin).getHelixVeniceClusterResources(clusterName); + doReturn(routersClusterManager).when(resources).getRoutersClusterManager(); + doReturn(1).when(routersClusterManager).getLiveRoutersCount(); + + // Batch-only + incremental push is not allowed + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isHybrid(); + doReturn(true).when(store).isIncrementalPushEnabled(); + VeniceHttpException e1 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e1.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e1.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e1.getMessage().contains("Incremental push is only supported for hybrid stores")); + + reset(store); + + // Batch-only + write compute is not allowed + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isHybrid(); + doReturn(true).when(store).isWriteComputationEnabled(); + VeniceHttpException e2 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e2.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e2.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e2.getMessage().contains("Write computation is only supported for hybrid stores")); + + reset(store); + + // Hybrid store cannot have negative rewind time config + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl(-1, 100, -1, DataReplicationPolicy.NON_AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP)) + .when(store) + .getHybridStoreConfig(); + VeniceHttpException e3 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e3.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e3.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e3.getMessage().contains("Rewind time cannot be negative for a hybrid store")); + + reset(store); + + // Hybrid store cannot have negative offset lag and negative producer time lag thresholds + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl(100, -1, -1, DataReplicationPolicy.NON_AGGREGATE, BufferReplayPolicy.REWIND_FROM_EOP)) + .when(store) + .getHybridStoreConfig(); + VeniceHttpException e4 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e4.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e4.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e4.getMessage() + .contains( + "Both offset lag threshold and producer timestamp lag threshold cannot be negative for a hybrid store")); + + reset(store); + + // Incremental push + NON_AGGREGATE DRP is not supported in multi-region mode + doReturn(true).when(controllerConfig).isMultiRegion(); + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn(true).when(store).isIncrementalPushEnabled(); + doReturn( + new HybridStoreConfigImpl( + 100, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + VeniceHttpException e5 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e5.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e5.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e5.getMessage() + .contains( + "Incremental push is not supported for hybrid stores with non-aggregate data replication policy")); + + reset(controllerConfig); + reset(store); + + // Incremental push + NON_AGGREGATE DRP is supported in single-region mode + doReturn(false).when(controllerConfig).isMultiRegion(); + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn(true).when(store).isIncrementalPushEnabled(); + doReturn( + new HybridStoreConfigImpl( + 100, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(new SchemaEntry(1, VALUE_SCHEMA_V1_STR)).when(admin).getKeySchema(clusterName, storeName); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store); + + reset(controllerConfig); + reset(store); + + // ACTIVE_ACTIVE DRP is only supported when activeActiveReplicationEnabled = true + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn(false).when(store).isActiveActiveReplicationEnabled(); + doReturn( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + VeniceHttpException e6 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e6.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e6.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e6.getMessage() + .contains( + "Data replication policy ACTIVE_ACTIVE is only supported for hybrid stores with active-active replication enabled")); + + reset(controllerConfig); + reset(store); + + // Storage quota can not be less than 0 + doReturn(storeName).when(store).getName(); + doReturn(-5L).when(store).getStorageQuotaInByte(); + VeniceHttpException e7 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e7.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e7.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e7.getMessage().contains("Storage quota can not be less than 0")); + + reset(controllerConfig); + reset(store); + + // Storage quota can be -1. Special value for unlimited quota + doReturn(storeName).when(store).getName(); + doReturn(-1L).when(store).getStorageQuotaInByte(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(new SchemaEntry(1, VALUE_SCHEMA_V1_STR)).when(admin).getKeySchema(clusterName, storeName); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store); + + reset(controllerConfig); + reset(store); + + // Read quota can not be less than 0 + doReturn(storeName).when(store).getName(); + doReturn(-5L).when(store).getReadQuotaInCU(); + VeniceHttpException e8 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e8.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e8.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e8.getMessage().contains("Read quota can not be less than 0")); + + reset(controllerConfig); + reset(store); + + // Read quota can not be larger than cluster quota + doReturn(storeName).when(store).getName(); + doReturn(2000L).when(store).getReadQuotaInCU(); + VeniceHttpException e9 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e9.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e9.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e9.getMessage().contains("Read quota can not be more than the cluster quota")); + + reset(controllerConfig); + reset(store); + + // Active-active replication is not supported for batch-only stores + doReturn(storeName).when(store).getName(); + doReturn(false).when(store).isNativeReplicationEnabled(); + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + VeniceHttpException e10 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e10.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e10.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e10.getMessage().contains("Active-Active Replication is only supported for hybrid stores")); + + reset(controllerConfig); + reset(store); + + // Active-active replication is only supported for stores that also have native replication + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(false).when(store).isNativeReplicationEnabled(); + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + VeniceHttpException e11 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e11.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e11.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e11.getMessage() + .contains( + "Active/Active Replication cannot be enabled for store " + store.getName() + + " since Native Replication is not enabled on it.")); + + reset(controllerConfig); + reset(store); + + // Partitioner Config cannot be null + doReturn(storeName).when(store).getName(); + doReturn(null).when(store).getPartitionerConfig(); + VeniceHttpException e12 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e12.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e12.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e12.getMessage().contains("Partitioner Config cannot be null")); + + reset(controllerConfig); + reset(store); + + // Active-Active is not supported when amplification factor is more than 1 + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl( + 100, + -1, + 100, + DataReplicationPolicy.ACTIVE_ACTIVE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(new PartitionerConfigImpl(DefaultVenicePartitioner.class.getName(), new HashMap<>(), 10)).when(store) + .getPartitionerConfig(); + doReturn(true).when(store).isNativeReplicationEnabled(); + doReturn(true).when(store).isActiveActiveReplicationEnabled(); + VeniceHttpException e13 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e13.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e13.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e13.getMessage() + .contains("Active-active replication is not supported for stores with amplification factor > 1")); + + reset(controllerConfig); + reset(store); + + // Write-compute is not supported when amplification factor is more than 1 + doReturn(storeName).when(store).getName(); + doReturn(true).when(store).isHybrid(); + doReturn( + new HybridStoreConfigImpl( + 100, + 100, + -1, + DataReplicationPolicy.NON_AGGREGATE, + BufferReplayPolicy.REWIND_FROM_EOP)).when(store).getHybridStoreConfig(); + doReturn(new PartitionerConfigImpl(DefaultVenicePartitioner.class.getName(), new HashMap<>(), 10)).when(store) + .getPartitionerConfig(); + doReturn(true).when(store).isWriteComputationEnabled(); + VeniceHttpException e14 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e14.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e14.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e14.getMessage().contains("Write computation is not supported for stores with amplification factor > 1")); + + reset(controllerConfig); + reset(store); + + // Verify the updated partitionerConfig can be built - partitioner doesn't exist + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl("com.linkedin.venice.InvalidPartitioner", new HashMap<>(), 10)).when(store) + .getPartitionerConfig(); + VeniceHttpException e15 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e15.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e15.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e15.getMessage() + .contains( + "Partitioner Configs are invalid, please verify that partitioner configs like classpath and parameters are correct!")); + + reset(controllerConfig); + reset(store); + + // Verify the updated partitionerConfig can be built - schema is not supported by partitioner + doReturn(storeName).when(store).getName(); + doReturn( + new PartitionerConfigImpl( + PickyVenicePartitioner.class.getName(), + Collections.singletonMap(PickyVenicePartitioner.SCHEMA_VALID, "false"), + 10)).when(store).getPartitionerConfig(); + VeniceHttpException e16 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e16.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e16.getErrorType(), ErrorType.INVALID_SCHEMA); + assertTrue(e16.getMessage().contains("Schema is not valid")); + + reset(controllerConfig); + reset(store); + + // Validate if the latest superset schema id is an existing value schema + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(new SchemaEntry(1, VALUE_SCHEMA_V1_STR)).when(admin).getKeySchema(clusterName, storeName); + doReturn(null).when(admin).getValueSchema(clusterName, storeName, 10); + doReturn(10).when(store).getLatestSuperSetValueSchemaId(); + VeniceHttpException e17 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e17.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e17.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e17.getMessage().contains("Unknown value schema id: 10 in store: storeName")); + + reset(controllerConfig); + reset(store); + + // Max compaction lag >= Min compaction lag + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(10L).when(store).getMaxCompactionLagSeconds(); + doReturn(100L).when(store).getMinCompactionLagSeconds(); + VeniceHttpException e18 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e18.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e18.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e18.getMessage() + .contains( + "Store's max compaction lag seconds: 10 shouldn't be smaller than store's min compaction lag seconds: 100")); + + reset(controllerConfig); + reset(store); + + // ETL Proxy user must be set if ETL is enabled for current or future version + doReturn(storeName).when(store).getName(); + doReturn(new PartitionerConfigImpl()).when(store).getPartitionerConfig(); + doReturn(SchemaData.INVALID_VALUE_SCHEMA_ID).when(store).getLatestSuperSetValueSchemaId(); + doReturn(new ETLStoreConfigImpl("", true, false)).when(store).getEtlStoreConfig(); + VeniceHttpException e19 = + expectThrows(VeniceHttpException.class, () -> UpdateStoreUtils.validateStoreConfigs(admin, clusterName, store)); + assertEquals(e19.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e19.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue( + e19.getMessage().contains("Cannot enable ETL for this store because etled user proxy account is not set")); + + reset(controllerConfig); + reset(store); + } + + @Test + public void testValidateStorePartitionCountUpdate() { + String clusterName = "clusterName"; + String storeName = "storeName"; + + Admin admin = mock(Admin.class); + VeniceControllerMultiClusterConfig multiClusterConfigs = mock(VeniceControllerMultiClusterConfig.class); + VeniceControllerClusterConfig clusterConfig = mock(VeniceControllerClusterConfig.class); + HelixVeniceClusterResources clusterResources = mock(HelixVeniceClusterResources.class); + TopicManager topicManager = mock(TopicManager.class); + PubSubTopicRepository topicRepository = mock(PubSubTopicRepository.class); + PubSubTopic rtTopic = mock(PubSubTopic.class); + + doReturn(false).when(admin).isParent(); + doReturn(topicManager).when(admin).getTopicManager(); + doReturn(topicRepository).when(admin).getPubSubTopicRepository(); + doReturn(rtTopic).when(topicRepository).getTopic(Version.composeRealTimeTopic(storeName)); + + Store originalStore = mock(Store.class); + Store updatedStore = mock(Store.class); + + doReturn(3).when(clusterConfig).getMinNumberOfPartitions(); + doReturn(100).when(clusterConfig).getMaxNumberOfPartitions(); + + doReturn(clusterResources).when(admin).getHelixVeniceClusterResources(clusterName); + doReturn(clusterConfig).when(clusterResources).getConfig(); + + doReturn(storeName).when(originalStore).getName(); + doReturn(storeName).when(updatedStore).getName(); + + // Negative partition count is not allowed + doReturn(-1).when(updatedStore).getPartitionCount(); + VeniceHttpException e1 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e1.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e1.getErrorType(), ErrorType.INVALID_CONFIG); + + // Hybrid store with partition count = 0 + doReturn(true).when(updatedStore).isHybrid(); + doReturn(0).when(updatedStore).getPartitionCount(); + VeniceHttpException e2 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e2.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e2.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count cannot be less than min partition count + doReturn(true).when(updatedStore).isHybrid(); + doReturn(1).when(updatedStore).getPartitionCount(); + VeniceHttpException e3 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e3.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e3.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count cannot be greater than max partition count + doReturn(false).when(updatedStore).isHybrid(); + doReturn(1000).when(updatedStore).getPartitionCount(); + VeniceHttpException e4 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e4.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e4.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count change for hybrid stores is not allowed + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(20).when(updatedStore).getPartitionCount(); + VeniceHttpException e5 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e5.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e5.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count update is allowed if RT topic doesn't exist + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(10).when(updatedStore).getPartitionCount(); + doReturn(false).when(topicManager).containsTopic(rtTopic); + UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + + // Partition count update is allowed if RT topic exists and partition count matches the store's partition count + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(10).when(updatedStore).getPartitionCount(); + doReturn(true).when(topicManager).containsTopic(rtTopic); + doReturn(10).when(topicManager).getPartitionCount(rtTopic); + UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + + // Partition count update is not allowed if RT topic exists and partition count is different from the store's + // partition count + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(10).when(updatedStore).getPartitionCount(); + doReturn(true).when(topicManager).containsTopic(rtTopic); + doReturn(20).when(topicManager).getPartitionCount(rtTopic); + VeniceHttpException e6 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore)); + assertEquals(e6.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e6.getErrorType(), ErrorType.INVALID_CONFIG); + + // Partition count change for batch stores is allowed + doReturn(true).when(originalStore).isHybrid(); + doReturn(false).when(updatedStore).isHybrid(); + doReturn(10).when(originalStore).getPartitionCount(); + doReturn(20).when(updatedStore).getPartitionCount(); + // No exception is thrown + UpdateStoreUtils + .validateStorePartitionCountUpdate(admin, multiClusterConfigs, clusterName, originalStore, updatedStore); + } + + @Test + public void testValidateStorePartitionerUpdate() { + String clusterName = "clusterName"; + String storeName = "storeName"; + + Store originalStore = mock(Store.class); + Store updatedStore = mock(Store.class); + + doReturn(storeName).when(originalStore).getName(); + doReturn(storeName).when(updatedStore).getName(); + + // Partitioner param update is allowed for batch-only stores + doReturn(false).when(originalStore).isHybrid(); + doReturn(false).when(updatedStore).isHybrid(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + // Partitioner param update is allowed during hybrid to batch conversion + doReturn(true).when(originalStore).isHybrid(); + doReturn(false).when(updatedStore).isHybrid(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + // Partitioner param update is allowed during batch to hybrid conversion + doReturn(false).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + + PartitionerConfig originalPartitionerConfig; + PartitionerConfig updatedPartitionerConfig; + + // Partitioner class update is not allowed for hybrid stores + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + originalPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 1); + updatedPartitionerConfig = new PartitionerConfigImpl("ClassB", Collections.singletonMap("key1", "value1"), 1); + doReturn(originalPartitionerConfig).when(originalStore).getPartitionerConfig(); + doReturn(updatedPartitionerConfig).when(updatedStore).getPartitionerConfig(); + VeniceHttpException e1 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore)); + assertEquals(e1.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e1.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e1.getMessage().contains("Partitioner class cannot be changed for hybrid store")); + + // Partitioner param update is not allowed for hybrid stores + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + originalPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 1); + updatedPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key2", "value2"), 1); + doReturn(originalPartitionerConfig).when(originalStore).getPartitionerConfig(); + doReturn(updatedPartitionerConfig).when(updatedStore).getPartitionerConfig(); + VeniceHttpException e2 = expectThrows( + VeniceHttpException.class, + () -> UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore)); + assertEquals(e2.getHttpStatusCode(), HttpStatus.SC_BAD_REQUEST); + assertEquals(e2.getErrorType(), ErrorType.INVALID_CONFIG); + assertTrue(e2.getMessage().contains("Partitioner params cannot be changed for hybrid store")); + + // Amplification factor changes are allowed for hybrid stores + doReturn(true).when(originalStore).isHybrid(); + doReturn(true).when(updatedStore).isHybrid(); + originalPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 1); + updatedPartitionerConfig = new PartitionerConfigImpl("ClassA", Collections.singletonMap("key1", "value1"), 10); + doReturn(originalPartitionerConfig).when(originalStore).getPartitionerConfig(); + doReturn(updatedPartitionerConfig).when(updatedStore).getPartitionerConfig(); + UpdateStoreUtils.validateStorePartitionerUpdate(clusterName, originalStore, updatedStore); + } + + @Test + public void testValidatePersona() { + String clusterName = "clusterName"; + String storeName = "storeName"; + + Store store = mock(Store.class); + Admin admin = mock(Admin.class); + HelixVeniceClusterResources clusterResources = mock(HelixVeniceClusterResources.class); + StoragePersonaRepository personaRepository = mock(StoragePersonaRepository.class); + StoragePersona persona = mock(StoragePersona.class); + + doReturn(storeName).when(store).getName(); + + doReturn(clusterResources).when(admin).getHelixVeniceClusterResources(clusterName); + doReturn(personaRepository).when(clusterResources).getStoragePersonaRepository(); + + // Persona not updated. Store doesn't have an existing persona. Update is allowed. + doReturn(null).when(personaRepository).getPersonaContainingStore(storeName); + UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.empty()); + + // Persona not updated. Store has an existing persona. Update is allowed if persona repo allows. + doReturn(persona).when(personaRepository).getPersonaContainingStore(storeName); + // Validation doesn't throw exception -> update is allowed + doNothing().when(personaRepository).validateAddUpdatedStore(any(), any()); + UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.empty()); + // Validation throws exception -> update is not allowed + doThrow(new VeniceException()).when(personaRepository).validateAddUpdatedStore(any(), any()); + assertThrows( + VeniceException.class, + () -> UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.empty())); + + String updatedPersona = "persona2"; + // Persona updated. New persona doesn't exist. Update is not allowed. + doReturn(null).when(personaRepository).getPersonaContainingStore(storeName); + doReturn(null).when(admin).getStoragePersona(clusterName, updatedPersona); + assertThrows( + VeniceException.class, + () -> UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.of(updatedPersona))); + + // Persona updated. New persona exists. Update is allowed if persona repo allows. + doReturn(null).when(personaRepository).getPersonaContainingStore(storeName); + doReturn(persona).when(admin).getStoragePersona(clusterName, updatedPersona); + // Validation doesn't throw exception -> update is allowed + doNothing().when(personaRepository).validateAddUpdatedStore(any(), any()); + UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.of(updatedPersona)); + // Validation throws exception -> update is not allowed + doThrow(new VeniceException()).when(personaRepository).validateAddUpdatedStore(any(), any()); + assertThrows( + VeniceException.class, + () -> UpdateStoreUtils.validatePersona(admin, clusterName, store, Optional.of(updatedPersona))); + } + + @Test + public void testMergeNewSettingsIntoOldPartitionerConfig() { + String storeName = "storeName"; + Store store = mock(Store.class); + + PartitionerConfig oldPartitionerConfig = new PartitionerConfigImpl(); + + doReturn(storeName).when(store).getName(); + doReturn(oldPartitionerConfig).when(store).getPartitionerConfig(); + + // No updates to the store partitioner configs should return the same partitioner configs + assertSame( + UpdateStoreUtils + .mergeNewSettingsIntoOldPartitionerConfig(store, Optional.empty(), Optional.empty(), Optional.empty()), + oldPartitionerConfig); + + String updatedPartitionerClass = "Class B"; + Map updatedPartitionerParams = Collections.singletonMap("key1", "value1"); + int updatedAmpFactor = 10; + + PartitionerConfig newPartitionerConfig = UpdateStoreUtils.mergeNewSettingsIntoOldPartitionerConfig( + store, + Optional.of(updatedPartitionerClass), + Optional.of(updatedPartitionerParams), + Optional.of(updatedAmpFactor)); + assertNotSame(newPartitionerConfig, oldPartitionerConfig); // Should be a new object + assertEquals(newPartitionerConfig.getPartitionerClass(), updatedPartitionerClass); + assertEquals(newPartitionerConfig.getPartitionerParams(), updatedPartitionerParams); + assertEquals(newPartitionerConfig.getAmplificationFactor(), updatedAmpFactor); + + // Even if the store doesn't have a partitioner config, the new partitioner config should be returned + doReturn(null).when(store).getPartitionerConfig(); + PartitionerConfig newPartitionerConfig2 = UpdateStoreUtils.mergeNewSettingsIntoOldPartitionerConfig( + store, + Optional.of(updatedPartitionerClass), + Optional.of(updatedPartitionerParams), + Optional.of(updatedAmpFactor)); + assertNotSame(newPartitionerConfig2, oldPartitionerConfig); // Should be a new object + assertEquals(newPartitionerConfig2.getPartitionerClass(), updatedPartitionerClass); + assertEquals(newPartitionerConfig2.getPartitionerParams(), updatedPartitionerParams); + assertEquals(newPartitionerConfig2.getAmplificationFactor(), updatedAmpFactor); + } + + @Test + public void testAddNewViewConfigsIntoOldConfigs() { + String storeName = "storeName"; + Store store = mock(Store.class); + String classA = "ClassA"; + String classB = "ClassB"; + String classC = "ClassC"; + + ViewConfig viewConfigA = mock(ViewConfig.class); + ViewConfig viewConfigB = mock(ViewConfig.class); + ViewConfig viewConfigC = mock(ViewConfig.class); + + Map viewConfigMap = new HashMap() { + { + put(classA, viewConfigA); + put(classB, viewConfigB); + } + }; + + doReturn(storeName).when(store).getName(); + doReturn(viewConfigMap).when(store).getViewConfigs(); + + Map mergedViewConfig1 = + UpdateStoreUtils.addNewViewConfigsIntoOldConfigs(store, classC, viewConfigC); + assertEquals(mergedViewConfig1.size(), 3); + assertEquals(mergedViewConfig1.get(classA), viewConfigA); + assertEquals(mergedViewConfig1.get(classB), viewConfigB); + assertEquals(mergedViewConfig1.get(classC), viewConfigC); + + Map mergedViewConfig2 = + UpdateStoreUtils.addNewViewConfigsIntoOldConfigs(store, classB, viewConfigC); + assertEquals(mergedViewConfig2.size(), 2); + assertEquals(mergedViewConfig2.get(classA), viewConfigA); + assertEquals(mergedViewConfig2.get(classB), viewConfigC); + + doReturn(null).when(store).getViewConfigs(); + Map mergedViewConfig3 = + UpdateStoreUtils.addNewViewConfigsIntoOldConfigs(store, classA, viewConfigA); + assertEquals(mergedViewConfig3.size(), 1); + assertEquals(mergedViewConfig3.get(classA), viewConfigA); + } + + @Test + public void testRemoveViewConfigFromStoreViewConfigMap() { + String storeName = "storeName"; + Store store = mock(Store.class); + String classA = "ClassA"; + String classB = "ClassB"; + + ViewConfig viewConfigA = mock(ViewConfig.class); + ViewConfig viewConfigB = mock(ViewConfig.class); + + Map viewConfigMap = new HashMap() { + { + put(classA, viewConfigA); + put(classB, viewConfigB); + } + }; + + doReturn(storeName).when(store).getName(); + doReturn(viewConfigMap).when(store).getViewConfigs(); + + Map newViewConfig1 = UpdateStoreUtils.removeViewConfigFromStoreViewConfigMap(store, classB); + assertEquals(newViewConfig1.size(), 1); + assertEquals(newViewConfig1.get(classA), viewConfigA); + + doReturn(null).when(store).getViewConfigs(); + Map newViewConfig2 = UpdateStoreUtils.removeViewConfigFromStoreViewConfigMap(store, classA); + assertTrue(newViewConfig2.isEmpty()); + } + + public static class PickyVenicePartitioner extends VenicePartitioner { + private static final String SCHEMA_VALID = "SCHEMA_VALID"; + + public PickyVenicePartitioner(VeniceProperties props, Schema schema) { + super(props, schema); + } + + @Override + public int getPartitionId(byte[] keyBytes, int numPartitions) { + return 0; + } + + @Override + public int getPartitionId(ByteBuffer keyByteBuffer, int numPartitions) { + return 0; + } + + @Override + protected void checkSchema(@Nonnull Schema keySchema) throws PartitionerSchemaMismatchException { + if (!props.getBoolean(SCHEMA_VALID)) { + throw new PartitionerSchemaMismatchException("Schema is not valid"); + } + } + } +} diff --git a/services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java b/services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java deleted file mode 100644 index c3963d52113..00000000000 --- a/services/venice-controller/src/test/java/com/linkedin/venice/controller/utils/ParentControllerConfigUpdateUtilsTest.java +++ /dev/null @@ -1,308 +0,0 @@ -package com.linkedin.venice.controller.utils; - -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.linkedin.venice.controller.HelixVeniceClusterResources; -import com.linkedin.venice.controller.VeniceControllerClusterConfig; -import com.linkedin.venice.controller.VeniceHelixAdmin; -import com.linkedin.venice.controller.VeniceParentHelixAdmin; -import com.linkedin.venice.controller.kafka.protocol.admin.UpdateStore; -import com.linkedin.venice.controller.util.ParentControllerConfigUpdateUtils; -import com.linkedin.venice.meta.Store; -import com.linkedin.venice.schema.SchemaEntry; -import com.linkedin.venice.utils.TestWriteUtils; -import java.util.Collections; -import java.util.Optional; -import org.testng.Assert; -import org.testng.annotations.Test; - - -public class ParentControllerConfigUpdateUtilsTest { - @Test - public void testPartialUpdateConfigUpdate() { - VeniceParentHelixAdmin parentHelixAdmin = mock(VeniceParentHelixAdmin.class); - VeniceHelixAdmin veniceHelixAdmin = mock(VeniceHelixAdmin.class); - String cluster = "foo"; - String storeName = "bar"; - Store store = mock(Store.class); - when(parentHelixAdmin.getVeniceHelixAdmin()).thenReturn(veniceHelixAdmin); - when(veniceHelixAdmin.getStore(anyString(), anyString())).thenReturn(store); - HelixVeniceClusterResources helixVeniceClusterResources = mock(HelixVeniceClusterResources.class); - VeniceControllerClusterConfig controllerConfig = mock(VeniceControllerClusterConfig.class); - when(helixVeniceClusterResources.getConfig()).thenReturn(controllerConfig); - when(veniceHelixAdmin.getHelixVeniceClusterResources(anyString())).thenReturn(helixVeniceClusterResources); - SchemaEntry schemaEntry = new SchemaEntry(1, TestWriteUtils.USER_WITH_DEFAULT_SCHEMA); - when(veniceHelixAdmin.getValueSchemas(anyString(), anyString())).thenReturn(Collections.singletonList(schemaEntry)); - when(parentHelixAdmin.getValueSchemas(anyString(), anyString())).thenReturn(Collections.singletonList(schemaEntry)); - - /** - * Explicit request. - */ - Optional partialUpdateRequest = Optional.of(true); - // Case 1: partial update config updated. - UpdateStore setStore = new UpdateStore(); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 2: partial update config updated. - setStore = new UpdateStore(); - when(store.isWriteComputationEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 3: partial update config updated. - partialUpdateRequest = Optional.of(false); - when(store.isWriteComputationEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 4: partial update config updated. - setStore = new UpdateStore(); - when(store.isWriteComputationEnabled()).thenReturn(false); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - - /** - * No request. - */ - partialUpdateRequest = Optional.empty(); - when(controllerConfig.isEnablePartialUpdateForHybridActiveActiveUserStores()).thenReturn(false); - when(controllerConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()).thenReturn(false); - // Case 1: partial update config not updated. - setStore = new UpdateStore(); - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - setStore.activeActiveReplicationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - // Case 2: partial update config updated. - when(controllerConfig.isEnablePartialUpdateForHybridActiveActiveUserStores()).thenReturn(true); - when(controllerConfig.isEnablePartialUpdateForHybridNonActiveActiveUserStores()).thenReturn(true); - setStore = new UpdateStore(); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - setStore.activeActiveReplicationEnabled = true; - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyPartialUpdateConfig( - parentHelixAdmin, - cluster, - storeName, - partialUpdateRequest, - setStore, - true)); - } - - @Test - public void testChunkingConfigUpdate() { - VeniceParentHelixAdmin parentHelixAdmin = mock(VeniceParentHelixAdmin.class); - VeniceHelixAdmin veniceHelixAdmin = mock(VeniceHelixAdmin.class); - String cluster = "foo"; - String storeName = "bar"; - Store store = mock(Store.class); - when(parentHelixAdmin.getVeniceHelixAdmin()).thenReturn(veniceHelixAdmin); - when(veniceHelixAdmin.getStore(anyString(), anyString())).thenReturn(store); - - /** - * Explicit request. - */ - Optional chunkingRequest = Optional.of(true); - when(store.isChunkingEnabled()).thenReturn(false); - // Case 1: chunking config updated. - UpdateStore setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 2: chunking config updated. - setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 3: chunking config updated. - chunkingRequest = Optional.of(false); - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 4: chunking config updated. - setStore = new UpdateStore(); - when(store.isChunkingEnabled()).thenReturn(false); - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - /** - * No request. - */ - chunkingRequest = Optional.empty(); - when(store.isWriteComputationEnabled()).thenReturn(false); - // Case 1: already enabled, chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(true); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - // Case 2: chunking config updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - Assert.assertTrue( - ParentControllerConfigUpdateUtils - .checkAndMaybeApplyChunkingConfigChange(parentHelixAdmin, cluster, storeName, chunkingRequest, setStore)); - } - - @Test - public void testRmdChunkingConfigUpdate() { - VeniceParentHelixAdmin parentHelixAdmin = mock(VeniceParentHelixAdmin.class); - VeniceHelixAdmin veniceHelixAdmin = mock(VeniceHelixAdmin.class); - String cluster = "foo"; - String storeName = "bar"; - Store store = mock(Store.class); - when(parentHelixAdmin.getVeniceHelixAdmin()).thenReturn(veniceHelixAdmin); - when(veniceHelixAdmin.getStore(anyString(), anyString())).thenReturn(store); - - /** - * Explicit request. - */ - Optional chunkingRequest = Optional.of(true); - when(store.isChunkingEnabled()).thenReturn(false); - // Case 1: chunking config updated. - UpdateStore setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 2: chunking config updated. - setStore = new UpdateStore(); - setStore.chunkingEnabled = false; - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 3: chunking config updated. - chunkingRequest = Optional.of(false); - when(store.isChunkingEnabled()).thenReturn(true); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 4: chunking config updated. - setStore = new UpdateStore(); - when(store.isChunkingEnabled()).thenReturn(false); - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - - /** - * No request. - */ - chunkingRequest = Optional.empty(); - when(store.isWriteComputationEnabled()).thenReturn(false); - // Case 1: already enabled, chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(true); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 2: chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - setStore.activeActiveReplicationEnabled = false; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 3: chunking config not updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = false; - setStore.activeActiveReplicationEnabled = true; - Assert.assertFalse( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - // Case 4: chunking config updated. - when(store.isChunkingEnabled()).thenReturn(false); - setStore = new UpdateStore(); - setStore.writeComputationEnabled = true; - setStore.activeActiveReplicationEnabled = true; - Assert.assertTrue( - ParentControllerConfigUpdateUtils.checkAndMaybeApplyRmdChunkingConfigChange( - parentHelixAdmin, - cluster, - storeName, - chunkingRequest, - setStore)); - - } -} diff --git a/services/venice-controller/src/test/resources/superset_schema_test/v5.avsc b/services/venice-controller/src/test/resources/superset_schema_test/v5.avsc new file mode 100644 index 00000000000..7081139240e --- /dev/null +++ b/services/venice-controller/src/test/resources/superset_schema_test/v5.avsc @@ -0,0 +1,12 @@ +{ + "type" : "record", + "namespace" : "example.avro", + "name" : "ValueRecordName", + "fields" : [ + { "name" : "f0", "type" : "int", "default" : -1 }, + { "name" : "f1", "type" : "int", "default" : -1 }, + { "name" : "f2", "type" : "int", "default" : -1 }, + { "name" : "f3", "type" : "int", "default" : -1 } + ], + "custom_prop" : "custom_prop_value_for_v5" +} \ No newline at end of file diff --git a/services/venice-controller/src/test/resources/superset_schema_test/v6.avsc b/services/venice-controller/src/test/resources/superset_schema_test/v6.avsc new file mode 100644 index 00000000000..57328b34ae1 --- /dev/null +++ b/services/venice-controller/src/test/resources/superset_schema_test/v6.avsc @@ -0,0 +1,9 @@ +{ + "type" : "record", + "namespace" : "example.avro", + "name" : "ValueRecordName", + "fields" : [ + { "name" : "f0", "type" : "int", "default" : -1 } + ], + "custom_prop" : "custom_prop_value_for_v6" +} \ No newline at end of file