Class PushJobSetting
- All Implemented Interfaces:
Serializable
- See Also:
-
Field Summary
FieldsModifier and TypeFieldDescriptionbooleanintbooleanbooleanintintbooleanClass<? extends DataWriterComputeJob>booleanintbooleanbooleanbooleanbooleanbooleanorg.apache.avro.Schemalongbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanbooleanlonglongbooleanbooleanorg.apache.avro.SchemabooleanintbooleanintlongBroker URL for producing/writing new version topic data.longbooleanBroker URL for consuming/reading existing version data during a KIF (Kafka Input Format) repush.intbooleanlongbooleanlongbooleanintbooleanbyte[]booleanbooleanorg.apache.avro.Schemalongbooleanintbyte[]org.apache.avro.SchemaintintVersion part of the store-version / topic namebooleanClass<? extends VenicePushJob> -
Constructor Summary
Constructors -
Method Summary
-
Field Details
-
jobStartTimeMs
public long jobStartTimeMs -
jobId
-
jobExecutionId
-
jobServerName
-
jobTmpDir
-
enableSSL
public boolean enableSSL -
vpjEntryClass
-
veniceControllerUrl
-
storeName
-
inputURI
-
sourceGridFabric
-
batchNumBytes
public int batchNumBytes -
isIncrementalPush
public boolean isIncrementalPush -
incrementalPushVersion
-
isDuplicateKeyAllowed
public boolean isDuplicateKeyAllowed -
controllerRetries
public int controllerRetries -
controllerStatusPollRetries
public int controllerStatusPollRetries -
pollJobStatusIntervalMs
public long pollJobStatusIntervalMs -
jobStatusInUnknownStateTimeoutMs
public long jobStatusInUnknownStateTimeoutMs -
pushJobTimeoutOverrideMs
public long pushJobTimeoutOverrideMs -
sendControlMessagesDirectly
public boolean sendControlMessagesDirectly -
isSourceETL
public boolean isSourceETL -
enableWriteCompute
public boolean enableWriteCompute -
isSourceKafka
public boolean isSourceKafka -
repushSourcePubsubBroker
Broker URL for consuming/reading existing version data during a KIF (Kafka Input Format) repush.This is the "input/source" side of a repush: the Kafka broker from which the previous version's data is read. It is set from one of two sources:
RepushInfoResponsereturned by the controller (which resolves the fabric name fromKAFKA_INPUT_FABRICto a broker URL), or- An explicit
VENICE_REPUSH_SOURCE_PUBSUB_BROKERproperty provided by the caller.
This may point to a different fabric than
pushDestinationPubsubBrokerwhen the repush input fabric differs from the NR source fabric. For example, a repush may read v1 data from dc-1 but write v2 data to dc-0 (the NR source). -
kafkaInputTopic
-
repushSourceVersion
public int repushSourceVersion -
rewindTimeInSecondsOverride
public long rewindTimeInSecondsOverride -
pushToSeparateRealtimeTopicEnabled
public boolean pushToSeparateRealtimeTopicEnabled -
versionSeparateRealTimeTopicEnabled
public boolean versionSeparateRealTimeTopicEnabled -
kafkaInputCombinerEnabled
public boolean kafkaInputCombinerEnabled -
kafkaInputBuildNewDictEnabled
public boolean kafkaInputBuildNewDictEnabled -
validateRemoteReplayPolicy
-
suppressEndOfPushMessage
public boolean suppressEndOfPushMessage -
deferVersionSwap
public boolean deferVersionSwap -
extendedSchemaValidityCheckEnabled
public boolean extendedSchemaValidityCheckEnabled -
compressionMetricCollectionEnabled
public boolean compressionMetricCollectionEnabled -
repushTTLEnabled
public boolean repushTTLEnabled -
isCompliancePush
public boolean isCompliancePush -
repushTTLStartTimeMs
public long repushTTLStartTimeMs -
rmdSchemaDir
-
valueSchemaDir
-
controllerD2ServiceName
-
parentControllerRegionD2ZkHosts
-
childControllerRegionD2ZkHosts
-
livenessHeartbeatEnabled
public boolean livenessHeartbeatEnabled -
livenessHeartbeatStoreName
-
multiRegion
public boolean multiRegion -
d2Routing
public boolean d2Routing -
targetedRegions
-
isTargetedRegionPushEnabled
public boolean isTargetedRegionPushEnabled -
isTargetRegionPushWithDeferredSwapEnabled
public boolean isTargetRegionPushWithDeferredSwapEnabled -
targetRegionPushWithDeferredSwapWaitTime
public int targetRegionPushWithDeferredSwapWaitTime -
isSystemSchemaReaderEnabled
public boolean isSystemSchemaReaderEnabled -
isZstdDictCreationRequired
public boolean isZstdDictCreationRequired -
isZstdDictCreationSuccess
public boolean isZstdDictCreationSuccess -
dataWriterComputeJobClass
-
clusterName
-
storeKeySchema
public org.apache.avro.Schema storeKeySchema -
isChunkingEnabled
public boolean isChunkingEnabled -
isRmdChunkingEnabled
public boolean isRmdChunkingEnabled -
storeStorageQuota
public long storeStorageQuota -
isSchemaAutoRegisterFromPushJobEnabled
public boolean isSchemaAutoRegisterFromPushJobEnabled -
storeCompressionStrategy
-
isStoreWriteComputeEnabled
public boolean isStoreWriteComputeEnabled -
isStoreIncrementalPushEnabled
public boolean isStoreIncrementalPushEnabled -
hybridStoreConfig
-
storeResponse
-
topic
-
version
public int versionVersion part of the store-version / topic name -
partitionCount
public int partitionCount -
pushDestinationPubsubBroker
Broker URL for producing/writing new version topic data.This is the "output/destination" side of a push: the Kafka broker to which new version data records are written. It is set from
VersionCreationResponse.getKafkaBootstrapServers(), which returns the broker for the NR (Native Replication) source region. In NR mode, data is first written to this broker, then replicated to other regions by the storage nodes.For a cross-fabric repush (where the input fabric differs from the NR source), this URL should point to the NR source fabric's broker — not the input fabric. For example, if NR source = dc-0 and repush reads from dc-1, this URL should be dc-0's broker.
-
sslToKafka
public boolean sslToKafka -
topicCompressionStrategy
-
partitionerClass
-
partitionerParams
-
chunkingEnabled
public boolean chunkingEnabled -
rmdChunkingEnabled
public boolean rmdChunkingEnabled -
maxRecordSizeBytes
public int maxRecordSizeBytes -
enableUncompressedRecordSizeLimit
public boolean enableUncompressedRecordSizeLimit -
kafkaSourceRegion
-
repushInfoResponse
-
repushUseFallbackValueSchemaId
public boolean repushUseFallbackValueSchemaId -
isAvro
public boolean isAvro -
valueSchemaId
public int valueSchemaId -
rmdSchemaId
public int rmdSchemaId -
derivedSchemaId
public int derivedSchemaId -
keyField
-
valueField
-
rmdField
-
inputDataSchema
public org.apache.avro.Schema inputDataSchema -
inputDataSchemaString
-
keySchema
public org.apache.avro.Schema keySchema -
keySchemaString
-
valueSchema
public org.apache.avro.Schema valueSchema -
valueSchemaString
-
replicationMetadataSchemaString
-
vsonInputKeySchema
-
vsonInputKeySchemaString
-
vsonInputValueSchema
-
vsonInputValueSchemaString
-
generatePartialUpdateRecordFromInput
public boolean generatePartialUpdateRecordFromInput -
etlValueSchemaTransformation
-
newKmeSchemasFromController
-
inputHasRecords
public boolean inputHasRecords -
inputFileDataSizeInBytes
public long inputFileDataSizeInBytes -
sourceKafkaInputVersionInfo
-
sourceVersionCompressionStrategy
-
sourceVersionChunkingEnabled
public boolean sourceVersionChunkingEnabled -
sourceDictionary
public byte[] sourceDictionary -
topicDictionary
public byte[] topicDictionary -
materializedViewConfigFlatMap
-
isBatchWriteOptimizationForHybridStoreEnabled
public boolean isBatchWriteOptimizationForHybridStoreEnabled -
isSortedIngestionEnabled
public boolean isSortedIngestionEnabled -
allowRegularPushWithTTLRepush
public boolean allowRegularPushWithTTLRepush
-
-
Constructor Details
-
PushJobSetting
public PushJobSetting()
-