Documentation
¶
Index ¶
- func CleanupPolicyTypeChoices() []string
- func CompressionTypeChoices() []string
- func CompressionTypeValueChoices() []string
- func ConfigMessageFormatVersionTypeChoices() []string
- func FormatTypeChoices() []string
- func MessageFormatVersionTypeChoices() []string
- func MessageTimestampTypeChoices() []string
- func MessageTimestampTypeValueChoices() []string
- func SourceTypeChoices() []string
- func TopicStateTypeChoices() []string
- type CleanupPolicyOut
- type CleanupPolicyType
- type CompressionType
- type CompressionTypeOut
- type CompressionTypeValue
- type ConfigIn
- type ConfigMessageFormatVersionType
- type ConfigOut
- type ConsumerGroupOut
- type DeleteRetentionMsOut
- type FileDeleteDelayMsOut
- type FlushMessagesOut
- type FlushMsOut
- type FormatType
- type Handler
- type IndexIntervalBytesOut
- type InklessEnableOut
- type KafkaTopicHandler
- func (h *KafkaTopicHandler) ServiceKafkaTopicCreate(ctx context.Context, project string, serviceName string, ...) error
- func (h *KafkaTopicHandler) ServiceKafkaTopicDelete(ctx context.Context, project string, serviceName string, topicName string) error
- func (h *KafkaTopicHandler) ServiceKafkaTopicGet(ctx context.Context, project string, serviceName string, topicName string) (*ServiceKafkaTopicGetOut, error)
- func (h *KafkaTopicHandler) ServiceKafkaTopicList(ctx context.Context, project string, serviceName string) ([]TopicOut, error)
- func (h *KafkaTopicHandler) ServiceKafkaTopicMessageList(ctx context.Context, project string, serviceName string, topicName string, ...) ([]MessageOut, error)
- func (h *KafkaTopicHandler) ServiceKafkaTopicMessageProduce(ctx context.Context, project string, serviceName string, topicName string, ...) (*ServiceKafkaTopicMessageProduceOut, error)
- func (h *KafkaTopicHandler) ServiceKafkaTopicUpdate(ctx context.Context, project string, serviceName string, topicName string, ...) error
- type LocalRetentionBytesOut
- type LocalRetentionMsOut
- type MaxCompactionLagMsOut
- type MaxMessageBytesOut
- type MessageDownconversionEnableOut
- type MessageFormatVersionOut
- type MessageFormatVersionType
- type MessageOut
- type MessageTimestampDifferenceMaxMsOut
- type MessageTimestampType
- type MessageTimestampTypeOut
- type MessageTimestampTypeValue
- type MinCleanableDirtyRatioOut
- type MinCompactionLagMsOut
- type MinInsyncReplicasOut
- type OffsetOut
- type PartitionOut
- type PreallocateOut
- type RecordIn
- type RemoteStorageEnableOut
- type RetentionBytesOut
- type RetentionMsOut
- type SegmentBytesOut
- type SegmentIndexBytesOut
- type SegmentJitterMsOut
- type SegmentMsOut
- type ServiceKafkaTopicCreateIn
- type ServiceKafkaTopicGetOut
- type ServiceKafkaTopicMessageListIn
- type ServiceKafkaTopicMessageProduceIn
- type ServiceKafkaTopicMessageProduceOut
- type ServiceKafkaTopicUpdateConfigIn
- type ServiceKafkaTopicUpdateIn
- type SourceType
- type TagIn
- type TagOut
- type TopicOut
- type TopicStateType
- type UncleanLeaderElectionEnableOut
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func CleanupPolicyTypeChoices ¶
func CleanupPolicyTypeChoices() []string
func CompressionTypeChoices ¶
func CompressionTypeChoices() []string
func CompressionTypeValueChoices ¶ added in v0.3.0
func CompressionTypeValueChoices() []string
func ConfigMessageFormatVersionTypeChoices ¶ added in v0.118.0
func ConfigMessageFormatVersionTypeChoices() []string
func FormatTypeChoices ¶
func FormatTypeChoices() []string
func MessageFormatVersionTypeChoices ¶
func MessageFormatVersionTypeChoices() []string
func MessageTimestampTypeChoices ¶
func MessageTimestampTypeChoices() []string
func MessageTimestampTypeValueChoices ¶ added in v0.3.0
func MessageTimestampTypeValueChoices() []string
func SourceTypeChoices ¶ added in v0.3.0
func SourceTypeChoices() []string
func TopicStateTypeChoices ¶ added in v0.3.0
func TopicStateTypeChoices() []string
Types ¶
type CleanupPolicyOut ¶
type CleanupPolicyOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value string `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value string `json:"value"` // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
}
CleanupPolicyOut cleanup.policy value, source and synonyms
type CleanupPolicyType ¶
type CleanupPolicyType string
const ( CleanupPolicyTypeCompact CleanupPolicyType = "compact" CleanupPolicyTypeCompactDelete CleanupPolicyType = "compact,delete" CleanupPolicyTypeDelete CleanupPolicyType = "delete" )
type CompressionType ¶
type CompressionType string
const ( CompressionTypeGzip CompressionType = "gzip" CompressionTypeLz4 CompressionType = "lz4" CompressionTypeProducer CompressionType = "producer" CompressionTypeSnappy CompressionType = "snappy" CompressionTypeUncompressed CompressionType = "uncompressed" CompressionTypeZstd CompressionType = "zstd" )
type CompressionTypeOut ¶
type CompressionTypeOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value string `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value CompressionTypeValue `json:"value"` // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
}
CompressionTypeOut compression.type value, source and synonyms
type CompressionTypeValue ¶ added in v0.3.0
type CompressionTypeValue string
const ( CompressionTypeValueGzip CompressionTypeValue = "gzip" CompressionTypeValueLz4 CompressionTypeValue = "lz4" CompressionTypeValueProducer CompressionTypeValue = "producer" CompressionTypeValueSnappy CompressionTypeValue = "snappy" CompressionTypeValueUncompressed CompressionTypeValue = "uncompressed" CompressionTypeValueZstd CompressionTypeValue = "zstd" )
type ConfigIn ¶
type ConfigIn struct {
CleanupPolicy CleanupPolicyType `json:"cleanup_policy,omitempty"` // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
CompressionType CompressionType `json:"compression_type,omitempty"` // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
DeleteRetentionMs *int `json:"delete_retention_ms,omitempty"` // The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
FileDeleteDelayMs *int `json:"file_delete_delay_ms,omitempty"` // The time to wait before deleting a file from the filesystem.
FlushMessages *int `json:"flush_messages,omitempty"` // This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
FlushMs *int `json:"flush_ms,omitempty"` // This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
IndexIntervalBytes *int `json:"index_interval_bytes,omitempty"` // This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
InklessEnable *bool `json:"inkless_enable,omitempty"` // Indicates whether inkless should be enabled. This is only available for BYOC services with Inkless feature enabled.
LocalRetentionBytes *int `json:"local_retention_bytes,omitempty"` // This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
LocalRetentionMs *int `json:"local_retention_ms,omitempty"` // This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
MaxCompactionLagMs *int `json:"max_compaction_lag_ms,omitempty"` // The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
MaxMessageBytes *int `json:"max_message_bytes,omitempty"` // The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
MessageDownconversionEnable *bool `json:"message_downconversion_enable,omitempty"` // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
MessageFormatVersion MessageFormatVersionType `json:"message_format_version,omitempty"` // Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. Deprecated in Kafka 4.0+: this configuration is removed and any supplied value will be ignored; for services upgraded to 4.0+, the returned value may be 'None'.
MessageTimestampDifferenceMaxMs *int `json:"message_timestamp_difference_max_ms,omitempty"` // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
MessageTimestampType MessageTimestampType `json:"message_timestamp_type,omitempty"` // Define whether the timestamp in the message is message create time or log append time.
MinCleanableDirtyRatio *float64 `json:"min_cleanable_dirty_ratio,omitempty"` // This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
MinCompactionLagMs *int `json:"min_compaction_lag_ms,omitempty"` // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
MinInsyncReplicas *int `json:"min_insync_replicas,omitempty"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
Preallocate *bool `json:"preallocate,omitempty"` // True if we should preallocate the file on disk when creating a new log segment.
RemoteStorageEnable *bool `json:"remote_storage_enable,omitempty"` // Indicates whether tiered storage should be enabled. This is only available for services with Tiered Storage feature enabled.
RetentionBytes *int `json:"retention_bytes,omitempty"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
RetentionMs *int `json:"retention_ms,omitempty"` // This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
SegmentBytes *int `json:"segment_bytes,omitempty"` // This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 megabytes.
SegmentIndexBytes *int `json:"segment_index_bytes,omitempty"` // This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
SegmentJitterMs *int `json:"segment_jitter_ms,omitempty"` // The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
SegmentMs *int `json:"segment_ms,omitempty"` // This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
UncleanLeaderElectionEnable *bool `json:"unclean_leader_election_enable,omitempty"` // Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
}
ConfigIn Kafka topic configuration
type ConfigMessageFormatVersionType ¶ added in v0.118.0
type ConfigMessageFormatVersionType string
const ( ConfigMessageFormatVersionType0100 ConfigMessageFormatVersionType = "0.10.0" ConfigMessageFormatVersionType0100Iv0 ConfigMessageFormatVersionType = "0.10.0-IV0" ConfigMessageFormatVersionType0100Iv1 ConfigMessageFormatVersionType = "0.10.0-IV1" ConfigMessageFormatVersionType0101 ConfigMessageFormatVersionType = "0.10.1" ConfigMessageFormatVersionType0101Iv0 ConfigMessageFormatVersionType = "0.10.1-IV0" ConfigMessageFormatVersionType0101Iv1 ConfigMessageFormatVersionType = "0.10.1-IV1" ConfigMessageFormatVersionType0101Iv2 ConfigMessageFormatVersionType = "0.10.1-IV2" ConfigMessageFormatVersionType0102 ConfigMessageFormatVersionType = "0.10.2" ConfigMessageFormatVersionType0102Iv0 ConfigMessageFormatVersionType = "0.10.2-IV0" ConfigMessageFormatVersionType0110 ConfigMessageFormatVersionType = "0.11.0" ConfigMessageFormatVersionType0110Iv0 ConfigMessageFormatVersionType = "0.11.0-IV0" ConfigMessageFormatVersionType0110Iv1 ConfigMessageFormatVersionType = "0.11.0-IV1" ConfigMessageFormatVersionType0110Iv2 ConfigMessageFormatVersionType = "0.11.0-IV2" ConfigMessageFormatVersionType080 ConfigMessageFormatVersionType = "0.8.0" ConfigMessageFormatVersionType081 ConfigMessageFormatVersionType = "0.8.1" ConfigMessageFormatVersionType082 ConfigMessageFormatVersionType = "0.8.2" ConfigMessageFormatVersionType090 ConfigMessageFormatVersionType = "0.9.0" ConfigMessageFormatVersionType10 ConfigMessageFormatVersionType = "1.0" ConfigMessageFormatVersionType10Iv0 ConfigMessageFormatVersionType = "1.0-IV0" ConfigMessageFormatVersionType11 ConfigMessageFormatVersionType = "1.1" ConfigMessageFormatVersionType11Iv0 ConfigMessageFormatVersionType = "1.1-IV0" ConfigMessageFormatVersionType20 ConfigMessageFormatVersionType = "2.0" ConfigMessageFormatVersionType20Iv0 ConfigMessageFormatVersionType = "2.0-IV0" ConfigMessageFormatVersionType20Iv1 ConfigMessageFormatVersionType = "2.0-IV1" ConfigMessageFormatVersionType21 ConfigMessageFormatVersionType = "2.1" ConfigMessageFormatVersionType21Iv0 ConfigMessageFormatVersionType = "2.1-IV0" ConfigMessageFormatVersionType21Iv1 ConfigMessageFormatVersionType = "2.1-IV1" ConfigMessageFormatVersionType21Iv2 ConfigMessageFormatVersionType = "2.1-IV2" ConfigMessageFormatVersionType22 ConfigMessageFormatVersionType = "2.2" ConfigMessageFormatVersionType22Iv0 ConfigMessageFormatVersionType = "2.2-IV0" ConfigMessageFormatVersionType22Iv1 ConfigMessageFormatVersionType = "2.2-IV1" ConfigMessageFormatVersionType23 ConfigMessageFormatVersionType = "2.3" ConfigMessageFormatVersionType23Iv0 ConfigMessageFormatVersionType = "2.3-IV0" ConfigMessageFormatVersionType23Iv1 ConfigMessageFormatVersionType = "2.3-IV1" ConfigMessageFormatVersionType24 ConfigMessageFormatVersionType = "2.4" ConfigMessageFormatVersionType24Iv0 ConfigMessageFormatVersionType = "2.4-IV0" ConfigMessageFormatVersionType24Iv1 ConfigMessageFormatVersionType = "2.4-IV1" ConfigMessageFormatVersionType25 ConfigMessageFormatVersionType = "2.5" ConfigMessageFormatVersionType25Iv0 ConfigMessageFormatVersionType = "2.5-IV0" ConfigMessageFormatVersionType26 ConfigMessageFormatVersionType = "2.6" ConfigMessageFormatVersionType26Iv0 ConfigMessageFormatVersionType = "2.6-IV0" ConfigMessageFormatVersionType27 ConfigMessageFormatVersionType = "2.7" ConfigMessageFormatVersionType27Iv0 ConfigMessageFormatVersionType = "2.7-IV0" ConfigMessageFormatVersionType27Iv1 ConfigMessageFormatVersionType = "2.7-IV1" ConfigMessageFormatVersionType27Iv2 ConfigMessageFormatVersionType = "2.7-IV2" ConfigMessageFormatVersionType28 ConfigMessageFormatVersionType = "2.8" ConfigMessageFormatVersionType28Iv0 ConfigMessageFormatVersionType = "2.8-IV0" ConfigMessageFormatVersionType28Iv1 ConfigMessageFormatVersionType = "2.8-IV1" ConfigMessageFormatVersionType30 ConfigMessageFormatVersionType = "3.0" ConfigMessageFormatVersionType30Iv0 ConfigMessageFormatVersionType = "3.0-IV0" ConfigMessageFormatVersionType30Iv1 ConfigMessageFormatVersionType = "3.0-IV1" ConfigMessageFormatVersionType31 ConfigMessageFormatVersionType = "3.1" ConfigMessageFormatVersionType31Iv0 ConfigMessageFormatVersionType = "3.1-IV0" ConfigMessageFormatVersionType32 ConfigMessageFormatVersionType = "3.2" ConfigMessageFormatVersionType32Iv0 ConfigMessageFormatVersionType = "3.2-IV0" ConfigMessageFormatVersionType33 ConfigMessageFormatVersionType = "3.3" ConfigMessageFormatVersionType33Iv0 ConfigMessageFormatVersionType = "3.3-IV0" ConfigMessageFormatVersionType33Iv1 ConfigMessageFormatVersionType = "3.3-IV1" ConfigMessageFormatVersionType33Iv2 ConfigMessageFormatVersionType = "3.3-IV2" ConfigMessageFormatVersionType33Iv3 ConfigMessageFormatVersionType = "3.3-IV3" ConfigMessageFormatVersionType34 ConfigMessageFormatVersionType = "3.4" ConfigMessageFormatVersionType34Iv0 ConfigMessageFormatVersionType = "3.4-IV0" ConfigMessageFormatVersionType35 ConfigMessageFormatVersionType = "3.5" ConfigMessageFormatVersionType35Iv0 ConfigMessageFormatVersionType = "3.5-IV0" ConfigMessageFormatVersionType35Iv1 ConfigMessageFormatVersionType = "3.5-IV1" ConfigMessageFormatVersionType35Iv2 ConfigMessageFormatVersionType = "3.5-IV2" ConfigMessageFormatVersionType36 ConfigMessageFormatVersionType = "3.6" ConfigMessageFormatVersionType36Iv0 ConfigMessageFormatVersionType = "3.6-IV0" ConfigMessageFormatVersionType36Iv1 ConfigMessageFormatVersionType = "3.6-IV1" ConfigMessageFormatVersionType36Iv2 ConfigMessageFormatVersionType = "3.6-IV2" ConfigMessageFormatVersionType37 ConfigMessageFormatVersionType = "3.7" ConfigMessageFormatVersionType37Iv0 ConfigMessageFormatVersionType = "3.7-IV0" ConfigMessageFormatVersionType37Iv1 ConfigMessageFormatVersionType = "3.7-IV1" ConfigMessageFormatVersionType37Iv2 ConfigMessageFormatVersionType = "3.7-IV2" ConfigMessageFormatVersionType37Iv3 ConfigMessageFormatVersionType = "3.7-IV3" ConfigMessageFormatVersionType37Iv4 ConfigMessageFormatVersionType = "3.7-IV4" ConfigMessageFormatVersionType38 ConfigMessageFormatVersionType = "3.8" ConfigMessageFormatVersionType38Iv0 ConfigMessageFormatVersionType = "3.8-IV0" ConfigMessageFormatVersionType39 ConfigMessageFormatVersionType = "3.9" ConfigMessageFormatVersionType39Iv0 ConfigMessageFormatVersionType = "3.9-IV0" ConfigMessageFormatVersionType39Iv1 ConfigMessageFormatVersionType = "3.9-IV1" ConfigMessageFormatVersionType40 ConfigMessageFormatVersionType = "4.0" ConfigMessageFormatVersionType40Iv0 ConfigMessageFormatVersionType = "4.0-IV0" ConfigMessageFormatVersionType41 ConfigMessageFormatVersionType = "4.1" ConfigMessageFormatVersionType41Iv0 ConfigMessageFormatVersionType = "4.1-IV0" )
type ConfigOut ¶
type ConfigOut struct {
CleanupPolicy *CleanupPolicyOut `json:"cleanup_policy,omitempty"` // cleanup.policy value, source and synonyms
CompressionType *CompressionTypeOut `json:"compression_type,omitempty"` // compression.type value, source and synonyms
DeleteRetentionMs *DeleteRetentionMsOut `json:"delete_retention_ms,omitempty"` // delete.retention.ms value, source and synonyms
FileDeleteDelayMs *FileDeleteDelayMsOut `json:"file_delete_delay_ms,omitempty"` // file.delete.delay.ms value, source and synonyms
FlushMessages *FlushMessagesOut `json:"flush_messages,omitempty"` // flush.messages value, source and synonyms
FlushMs *FlushMsOut `json:"flush_ms,omitempty"` // flush.ms value, source and synonyms
IndexIntervalBytes *IndexIntervalBytesOut `json:"index_interval_bytes,omitempty"` // index.interval.bytes value, source and synonyms
InklessEnable *InklessEnableOut `json:"inkless_enable,omitempty"` // inkless.enable value, source and synonyms
LocalRetentionBytes *LocalRetentionBytesOut `json:"local_retention_bytes,omitempty"` // local.retention.bytes value, source and synonyms
LocalRetentionMs *LocalRetentionMsOut `json:"local_retention_ms,omitempty"` // local.retention.ms value, source and synonyms
MaxCompactionLagMs *MaxCompactionLagMsOut `json:"max_compaction_lag_ms,omitempty"` // max.compaction.lag.ms value, source and synonyms
MaxMessageBytes *MaxMessageBytesOut `json:"max_message_bytes,omitempty"` // max.message.bytes value, source and synonyms
MessageDownconversionEnable *MessageDownconversionEnableOut `json:"message_downconversion_enable,omitempty"` // message.downconversion.enable value, source and synonyms
MessageFormatVersion *MessageFormatVersionOut `json:"message_format_version,omitempty"` // message.format.version value, source and synonyms
MessageTimestampDifferenceMaxMs *MessageTimestampDifferenceMaxMsOut `json:"message_timestamp_difference_max_ms,omitempty"` // message.timestamp.difference.max.ms value, source and synonyms
MessageTimestampType *MessageTimestampTypeOut `json:"message_timestamp_type,omitempty"` // message.timestamp.type value, source and synonyms
MinCleanableDirtyRatio *MinCleanableDirtyRatioOut `json:"min_cleanable_dirty_ratio,omitempty"` // min.cleanable.dirty.ratio value, source and synonyms
MinCompactionLagMs *MinCompactionLagMsOut `json:"min_compaction_lag_ms,omitempty"` // min.compaction.lag.ms value, source and synonyms
MinInsyncReplicas *MinInsyncReplicasOut `json:"min_insync_replicas,omitempty"` // min.insync.replicas value, source and synonyms
Preallocate *PreallocateOut `json:"preallocate,omitempty"` // preallocate value, source and synonyms
RemoteStorageEnable *RemoteStorageEnableOut `json:"remote_storage_enable,omitempty"` // remote.storage.enable value, source and synonyms
RetentionBytes *RetentionBytesOut `json:"retention_bytes,omitempty"` // retention.bytes value, source and synonyms
RetentionMs *RetentionMsOut `json:"retention_ms,omitempty"` // retention.ms value, source and synonyms
SegmentBytes *SegmentBytesOut `json:"segment_bytes,omitempty"` // segment.bytes value, source and synonyms
SegmentIndexBytes *SegmentIndexBytesOut `json:"segment_index_bytes,omitempty"` // segment.index.bytes value, source and synonyms
SegmentJitterMs *SegmentJitterMsOut `json:"segment_jitter_ms,omitempty"` // segment.jitter.ms value, source and synonyms
SegmentMs *SegmentMsOut `json:"segment_ms,omitempty"` // segment.ms value, source and synonyms
UncleanLeaderElectionEnable *UncleanLeaderElectionEnableOut `json:"unclean_leader_election_enable,omitempty"` // unclean.leader.election.enable value, source and synonyms
}
ConfigOut Kafka topic configuration
type ConsumerGroupOut ¶
type DeleteRetentionMsOut ¶
type DeleteRetentionMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
}
DeleteRetentionMsOut delete.retention.ms value, source and synonyms
type FileDeleteDelayMsOut ¶
type FileDeleteDelayMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // The time to wait before deleting a file from the filesystem.
}
FileDeleteDelayMsOut file.delete.delay.ms value, source and synonyms
type FlushMessagesOut ¶
type FlushMessagesOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
}
FlushMessagesOut flush.messages value, source and synonyms
type FlushMsOut ¶
type FlushMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
}
FlushMsOut flush.ms value, source and synonyms
type FormatType ¶
type FormatType string
const ( FormatTypeAvro FormatType = "avro" FormatTypeBinary FormatType = "binary" FormatTypeJson FormatType = "json" FormatTypeJsonschema FormatType = "jsonschema" FormatTypeProtobuf FormatType = "protobuf" )
type Handler ¶
type Handler interface {
// ServiceKafkaTopicCreate create a Kafka topic
// POST /v1/project/{project}/service/{service_name}/topic
// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicCreate
ServiceKafkaTopicCreate(ctx context.Context, project string, serviceName string, in *ServiceKafkaTopicCreateIn) error
// ServiceKafkaTopicDelete delete a Kafka topic
// DELETE /v1/project/{project}/service/{service_name}/topic/{topic_name}
// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicDelete
ServiceKafkaTopicDelete(ctx context.Context, project string, serviceName string, topicName string) error
// ServiceKafkaTopicGet get Kafka topic info
// GET /v1/project/{project}/service/{service_name}/topic/{topic_name}
// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicGet
ServiceKafkaTopicGet(ctx context.Context, project string, serviceName string, topicName string) (*ServiceKafkaTopicGetOut, error)
// ServiceKafkaTopicList get Kafka topic list
// GET /v1/project/{project}/service/{service_name}/topic
// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicList
ServiceKafkaTopicList(ctx context.Context, project string, serviceName string) ([]TopicOut, error)
// ServiceKafkaTopicMessageList list kafka topic messages
// POST /v1/project/{project}/service/{service_name}/kafka/rest/topics/{topic_name}/messages
// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicMessageList
ServiceKafkaTopicMessageList(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageListIn) ([]MessageOut, error)
// ServiceKafkaTopicMessageProduce produce message into a kafka topic
// POST /v1/project/{project}/service/{service_name}/kafka/rest/topics/{topic_name}/produce
// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicMessageProduce
ServiceKafkaTopicMessageProduce(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageProduceIn) (*ServiceKafkaTopicMessageProduceOut, error)
// ServiceKafkaTopicUpdate update a Kafka topic
// PUT /v1/project/{project}/service/{service_name}/topic/{topic_name}
// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicUpdate
ServiceKafkaTopicUpdate(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicUpdateIn) error
}
type IndexIntervalBytesOut ¶
type IndexIntervalBytesOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
}
IndexIntervalBytesOut index.interval.bytes value, source and synonyms
type InklessEnableOut ¶ added in v0.108.0
type InklessEnableOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value bool `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value bool `json:"value"` // Indicates whether inkless should be enabled. This is only available for BYOC services with Inkless feature enabled.
}
InklessEnableOut inkless.enable value, source and synonyms
type KafkaTopicHandler ¶
type KafkaTopicHandler struct {
// contains filtered or unexported fields
}
func NewHandler ¶
func NewHandler(doer doer) KafkaTopicHandler
func (*KafkaTopicHandler) ServiceKafkaTopicCreate ¶
func (h *KafkaTopicHandler) ServiceKafkaTopicCreate(ctx context.Context, project string, serviceName string, in *ServiceKafkaTopicCreateIn) error
func (*KafkaTopicHandler) ServiceKafkaTopicDelete ¶
func (*KafkaTopicHandler) ServiceKafkaTopicGet ¶
func (h *KafkaTopicHandler) ServiceKafkaTopicGet(ctx context.Context, project string, serviceName string, topicName string) (*ServiceKafkaTopicGetOut, error)
func (*KafkaTopicHandler) ServiceKafkaTopicList ¶
func (*KafkaTopicHandler) ServiceKafkaTopicMessageList ¶
func (h *KafkaTopicHandler) ServiceKafkaTopicMessageList(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageListIn) ([]MessageOut, error)
func (*KafkaTopicHandler) ServiceKafkaTopicMessageProduce ¶
func (h *KafkaTopicHandler) ServiceKafkaTopicMessageProduce(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageProduceIn) (*ServiceKafkaTopicMessageProduceOut, error)
func (*KafkaTopicHandler) ServiceKafkaTopicUpdate ¶
func (h *KafkaTopicHandler) ServiceKafkaTopicUpdate(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicUpdateIn) error
type LocalRetentionBytesOut ¶
type LocalRetentionBytesOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
}
LocalRetentionBytesOut local.retention.bytes value, source and synonyms
type LocalRetentionMsOut ¶
type LocalRetentionMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
}
LocalRetentionMsOut local.retention.ms value, source and synonyms
type MaxCompactionLagMsOut ¶
type MaxCompactionLagMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
}
MaxCompactionLagMsOut max.compaction.lag.ms value, source and synonyms
type MaxMessageBytesOut ¶
type MaxMessageBytesOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
}
MaxMessageBytesOut max.message.bytes value, source and synonyms
type MessageDownconversionEnableOut ¶
type MessageDownconversionEnableOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value bool `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value bool `json:"value"` // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
}
MessageDownconversionEnableOut message.downconversion.enable value, source and synonyms
type MessageFormatVersionOut ¶
type MessageFormatVersionOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value string `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value MessageFormatVersionType `json:"value"` // Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. Deprecated in Kafka 4.0+: this configuration is removed and any supplied value will be ignored; for services upgraded to 4.0+, the returned value may be 'None'.
}
MessageFormatVersionOut message.format.version value, source and synonyms
type MessageFormatVersionType ¶
type MessageFormatVersionType string
const ( MessageFormatVersionType0100 MessageFormatVersionType = "0.10.0" MessageFormatVersionType0100Iv0 MessageFormatVersionType = "0.10.0-IV0" MessageFormatVersionType0100Iv1 MessageFormatVersionType = "0.10.0-IV1" MessageFormatVersionType0101 MessageFormatVersionType = "0.10.1" MessageFormatVersionType0101Iv0 MessageFormatVersionType = "0.10.1-IV0" MessageFormatVersionType0101Iv1 MessageFormatVersionType = "0.10.1-IV1" MessageFormatVersionType0101Iv2 MessageFormatVersionType = "0.10.1-IV2" MessageFormatVersionType0102 MessageFormatVersionType = "0.10.2" MessageFormatVersionType0102Iv0 MessageFormatVersionType = "0.10.2-IV0" MessageFormatVersionType0110 MessageFormatVersionType = "0.11.0" MessageFormatVersionType0110Iv0 MessageFormatVersionType = "0.11.0-IV0" MessageFormatVersionType0110Iv1 MessageFormatVersionType = "0.11.0-IV1" MessageFormatVersionType0110Iv2 MessageFormatVersionType = "0.11.0-IV2" MessageFormatVersionType080 MessageFormatVersionType = "0.8.0" MessageFormatVersionType081 MessageFormatVersionType = "0.8.1" MessageFormatVersionType082 MessageFormatVersionType = "0.8.2" MessageFormatVersionType090 MessageFormatVersionType = "0.9.0" MessageFormatVersionType10 MessageFormatVersionType = "1.0" MessageFormatVersionType10Iv0 MessageFormatVersionType = "1.0-IV0" MessageFormatVersionType11 MessageFormatVersionType = "1.1" MessageFormatVersionType11Iv0 MessageFormatVersionType = "1.1-IV0" MessageFormatVersionType20 MessageFormatVersionType = "2.0" MessageFormatVersionType20Iv0 MessageFormatVersionType = "2.0-IV0" MessageFormatVersionType20Iv1 MessageFormatVersionType = "2.0-IV1" MessageFormatVersionType21 MessageFormatVersionType = "2.1" MessageFormatVersionType21Iv0 MessageFormatVersionType = "2.1-IV0" MessageFormatVersionType21Iv1 MessageFormatVersionType = "2.1-IV1" MessageFormatVersionType21Iv2 MessageFormatVersionType = "2.1-IV2" MessageFormatVersionType22 MessageFormatVersionType = "2.2" MessageFormatVersionType22Iv0 MessageFormatVersionType = "2.2-IV0" MessageFormatVersionType22Iv1 MessageFormatVersionType = "2.2-IV1" MessageFormatVersionType23 MessageFormatVersionType = "2.3" MessageFormatVersionType23Iv0 MessageFormatVersionType = "2.3-IV0" MessageFormatVersionType23Iv1 MessageFormatVersionType = "2.3-IV1" MessageFormatVersionType24 MessageFormatVersionType = "2.4" MessageFormatVersionType24Iv0 MessageFormatVersionType = "2.4-IV0" MessageFormatVersionType24Iv1 MessageFormatVersionType = "2.4-IV1" MessageFormatVersionType25 MessageFormatVersionType = "2.5" MessageFormatVersionType25Iv0 MessageFormatVersionType = "2.5-IV0" MessageFormatVersionType26 MessageFormatVersionType = "2.6" MessageFormatVersionType26Iv0 MessageFormatVersionType = "2.6-IV0" MessageFormatVersionType27 MessageFormatVersionType = "2.7" MessageFormatVersionType27Iv0 MessageFormatVersionType = "2.7-IV0" MessageFormatVersionType27Iv1 MessageFormatVersionType = "2.7-IV1" MessageFormatVersionType27Iv2 MessageFormatVersionType = "2.7-IV2" MessageFormatVersionType28 MessageFormatVersionType = "2.8" MessageFormatVersionType28Iv0 MessageFormatVersionType = "2.8-IV0" MessageFormatVersionType28Iv1 MessageFormatVersionType = "2.8-IV1" MessageFormatVersionType30 MessageFormatVersionType = "3.0" MessageFormatVersionType30Iv0 MessageFormatVersionType = "3.0-IV0" MessageFormatVersionType30Iv1 MessageFormatVersionType = "3.0-IV1" MessageFormatVersionType31 MessageFormatVersionType = "3.1" MessageFormatVersionType31Iv0 MessageFormatVersionType = "3.1-IV0" MessageFormatVersionType32 MessageFormatVersionType = "3.2" MessageFormatVersionType32Iv0 MessageFormatVersionType = "3.2-IV0" MessageFormatVersionType33 MessageFormatVersionType = "3.3" MessageFormatVersionType33Iv0 MessageFormatVersionType = "3.3-IV0" MessageFormatVersionType33Iv1 MessageFormatVersionType = "3.3-IV1" MessageFormatVersionType33Iv2 MessageFormatVersionType = "3.3-IV2" MessageFormatVersionType33Iv3 MessageFormatVersionType = "3.3-IV3" MessageFormatVersionType34 MessageFormatVersionType = "3.4" MessageFormatVersionType34Iv0 MessageFormatVersionType = "3.4-IV0" MessageFormatVersionType35 MessageFormatVersionType = "3.5" MessageFormatVersionType35Iv0 MessageFormatVersionType = "3.5-IV0" MessageFormatVersionType35Iv1 MessageFormatVersionType = "3.5-IV1" MessageFormatVersionType35Iv2 MessageFormatVersionType = "3.5-IV2" MessageFormatVersionType36 MessageFormatVersionType = "3.6" MessageFormatVersionType36Iv0 MessageFormatVersionType = "3.6-IV0" MessageFormatVersionType36Iv1 MessageFormatVersionType = "3.6-IV1" MessageFormatVersionType36Iv2 MessageFormatVersionType = "3.6-IV2" MessageFormatVersionType37 MessageFormatVersionType = "3.7" MessageFormatVersionType37Iv0 MessageFormatVersionType = "3.7-IV0" MessageFormatVersionType37Iv1 MessageFormatVersionType = "3.7-IV1" MessageFormatVersionType37Iv2 MessageFormatVersionType = "3.7-IV2" MessageFormatVersionType37Iv3 MessageFormatVersionType = "3.7-IV3" MessageFormatVersionType37Iv4 MessageFormatVersionType = "3.7-IV4" MessageFormatVersionType38 MessageFormatVersionType = "3.8" MessageFormatVersionType38Iv0 MessageFormatVersionType = "3.8-IV0" MessageFormatVersionType39 MessageFormatVersionType = "3.9" MessageFormatVersionType39Iv0 MessageFormatVersionType = "3.9-IV0" MessageFormatVersionType39Iv1 MessageFormatVersionType = "3.9-IV1" MessageFormatVersionType40 MessageFormatVersionType = "4.0" MessageFormatVersionType40Iv0 MessageFormatVersionType = "4.0-IV0" MessageFormatVersionType41 MessageFormatVersionType = "4.1" MessageFormatVersionType41Iv0 MessageFormatVersionType = "4.1-IV0" MessageFormatVersionTypeNone MessageFormatVersionType = "None" )
type MessageOut ¶
type MessageOut struct {
Key map[string]any `json:"key,omitempty"` // The message key, formatted according to the embedded format
Offset *int `json:"offset,omitempty"` // Offset of the message
Partition *int `json:"partition,omitempty"` // Partition of the message
Topic *string `json:"topic,omitempty"` // The name of the topic
Value map[string]any `json:"value,omitempty"` // The message value, formatted according to the embedded format
}
type MessageTimestampDifferenceMaxMsOut ¶
type MessageTimestampDifferenceMaxMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
}
MessageTimestampDifferenceMaxMsOut message.timestamp.difference.max.ms value, source and synonyms
type MessageTimestampType ¶
type MessageTimestampType string
const ( MessageTimestampTypeCreateTime MessageTimestampType = "CreateTime" MessageTimestampTypeLogAppendTime MessageTimestampType = "LogAppendTime" )
type MessageTimestampTypeOut ¶
type MessageTimestampTypeOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value string `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value MessageTimestampTypeValue `json:"value"` // Define whether the timestamp in the message is message create time or log append time.
}
MessageTimestampTypeOut message.timestamp.type value, source and synonyms
type MessageTimestampTypeValue ¶ added in v0.3.0
type MessageTimestampTypeValue string
const ( MessageTimestampTypeValueCreateTime MessageTimestampTypeValue = "CreateTime" MessageTimestampTypeValueLogAppendTime MessageTimestampTypeValue = "LogAppendTime" )
type MinCleanableDirtyRatioOut ¶
type MinCleanableDirtyRatioOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value float64 `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value float64 `json:"value"` // This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
}
MinCleanableDirtyRatioOut min.cleanable.dirty.ratio value, source and synonyms
type MinCompactionLagMsOut ¶
type MinCompactionLagMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
}
MinCompactionLagMsOut min.compaction.lag.ms value, source and synonyms
type MinInsyncReplicasOut ¶
type MinInsyncReplicasOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
}
MinInsyncReplicasOut min.insync.replicas value, source and synonyms
type OffsetOut ¶
type OffsetOut struct {
Error *string `json:"error,omitempty"` // An error message describing why the operation failed, or null if it succeeded
ErrorCode *int `json:"error_code,omitempty"` // An error code classifying the reason this operation failed, or null if it succeeded. 1 = Non-retriable Kafka exception, 2 = Retriable Kafka exception; the message might be sent successfully if retried
Offset *int `json:"offset,omitempty"` // Offset of the message, or null if publishing the message failed
Partition *int `json:"partition,omitempty"` // Partition the message was published to, or null if publishing the message failed
}
type PartitionOut ¶
type PartitionOut struct {
ConsumerGroups []ConsumerGroupOut `json:"consumer_groups"` // List of Kafka consumer groups
EarliestOffset int `json:"earliest_offset"` // Earliest partition message offset
Isr int `json:"isr"` // Number of In Sync Replicas (ISR)
LatestOffset int `json:"latest_offset"` // Latest partition message offset
Partition int `json:"partition"` // Partition number
RemoteSize *int `json:"remote_size,omitempty"` // Size of tiered data from partition in bytes
Size int `json:"size"` // Size of partition in bytes
}
type PreallocateOut ¶
type PreallocateOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value bool `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value bool `json:"value"` // True if we should preallocate the file on disk when creating a new log segment.
}
PreallocateOut preallocate value, source and synonyms
type RemoteStorageEnableOut ¶
type RemoteStorageEnableOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value bool `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value bool `json:"value"` // Indicates whether tiered storage should be enabled. This is only available for services with Tiered Storage feature enabled.
}
RemoteStorageEnableOut remote.storage.enable value, source and synonyms
type RetentionBytesOut ¶
type RetentionBytesOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
}
RetentionBytesOut retention.bytes value, source and synonyms
type RetentionMsOut ¶
type RetentionMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
}
RetentionMsOut retention.ms value, source and synonyms
type SegmentBytesOut ¶
type SegmentBytesOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 megabytes.
}
SegmentBytesOut segment.bytes value, source and synonyms
type SegmentIndexBytesOut ¶
type SegmentIndexBytesOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
}
SegmentIndexBytesOut segment.index.bytes value, source and synonyms
type SegmentJitterMsOut ¶
type SegmentJitterMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
}
SegmentJitterMsOut segment.jitter.ms value, source and synonyms
type SegmentMsOut ¶
type SegmentMsOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value int `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value int `json:"value"` // This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
}
SegmentMsOut segment.ms value, source and synonyms
type ServiceKafkaTopicCreateIn ¶
type ServiceKafkaTopicCreateIn struct {
CleanupPolicy CleanupPolicyType `json:"cleanup_policy,omitempty"` // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
Config *ConfigIn `json:"config,omitempty"` // Kafka topic configuration
MinInsyncReplicas *int `json:"min_insync_replicas,omitempty"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
OwnerUserGroupId *string `json:"owner_user_group_id,omitempty"` // The user group that owns this topic
Partitions *int `json:"partitions,omitempty"` // Number of partitions
Replication *int `json:"replication,omitempty"` // Number of replicas
RetentionBytes *int `json:"retention_bytes,omitempty"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
RetentionHours *int `json:"retention_hours,omitempty"` // DEPRECATED: use config.retention_ms
Tags *[]TagIn `json:"tags,omitempty"` // Topic tags
TopicDescription *string `json:"topic_description,omitempty"` // Topic description
TopicName string `json:"topic_name"` // Topic name
}
ServiceKafkaTopicCreateIn ServiceKafkaTopicCreateRequestBody
type ServiceKafkaTopicGetOut ¶
type ServiceKafkaTopicGetOut struct {
CleanupPolicy string `json:"cleanup_policy"` // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
Config ConfigOut `json:"config"` // Kafka topic configuration
MinInsyncReplicas int `json:"min_insync_replicas"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
OwnerUserGroupId string `json:"owner_user_group_id"` // The user group that owns this topic
Partitions []PartitionOut `json:"partitions"` // Topic partitions
Replication int `json:"replication"` // Number of replicas
RetentionBytes int `json:"retention_bytes"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
RetentionHours int `json:"retention_hours"` // DEPRECATED: use config.retention_ms
State TopicStateType `json:"state"` // Topic state
Tags []TagOut `json:"tags"` // Topic tags
TopicDescription string `json:"topic_description"` // Topic description
TopicName string `json:"topic_name"` // Topic name
}
ServiceKafkaTopicGetOut Kafka topic information
type ServiceKafkaTopicMessageListIn ¶
type ServiceKafkaTopicMessageListIn struct {
Format FormatType `json:"format,omitempty"` // The format of consumed messages, which is used to convert messages into a JSON-compatible form. If unspecified, defaults to binary
MaxBytes *int `json:"max_bytes,omitempty"` // The maximum number of bytes of unencoded keys and values that should be included in the response. This provides approximate control over the size of responses and the amount of memory required to store the decoded response. The actual limit will be the minimum of this setting and the server-side configuration consumer.request.max.bytes. Default is unlimited
Partitions map[string]any `json:"partitions"` // Object of desired partition / offset mappings
Timeout *int `json:"timeout,omitempty"` // The maximum total time to wait for messages for a request if the maximum request size has not yet been reached
}
ServiceKafkaTopicMessageListIn ServiceKafkaTopicMessageListRequestBody
type ServiceKafkaTopicMessageProduceIn ¶
type ServiceKafkaTopicMessageProduceIn struct {
Format FormatType `json:"format"` // The format of the content.
KeySchema *string `json:"key_schema,omitempty"` // Full schema encoded as a string (e.g. JSON serialized for Avro data)
KeySchemaId *int `json:"key_schema_id,omitempty"` // ID returned by a previous request using the same schema. This ID corresponds to the ID of the schema in the registry.
Records []RecordIn `json:"records"` // List of records to produce to the topic
ValueSchema *string `json:"value_schema,omitempty"` // Full schema encoded as a string (e.g. JSON serialized for Avro data)
ValueSchemaId *int `json:"value_schema_id,omitempty"` // ID returned by a previous request using the same schema. This ID corresponds to the ID of the schema in the registry.
}
ServiceKafkaTopicMessageProduceIn ServiceKafkaTopicMessageProduceRequestBody
type ServiceKafkaTopicMessageProduceOut ¶
type ServiceKafkaTopicMessageProduceOut struct {
KeySchemaId *int `json:"key_schema_id,omitempty"` // The ID for the schema used to produce keys, or null if keys were not used
Offsets []OffsetOut `json:"offsets,omitempty"` // List of offsets for the produced record
ValueSchemaId *int `json:"value_schema_id,omitempty"` // The ID for the schema used to produce values
}
ServiceKafkaTopicMessageProduceOut ServiceKafkaTopicMessageProduceResponse
type ServiceKafkaTopicUpdateConfigIn ¶ added in v0.118.0
type ServiceKafkaTopicUpdateConfigIn struct {
CleanupPolicy CleanupPolicyType `json:"cleanup_policy,omitempty"` // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
CompressionType CompressionType `json:"compression_type,omitempty"` // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
DeleteRetentionMs *int `json:"delete_retention_ms,omitempty"` // The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
FileDeleteDelayMs *int `json:"file_delete_delay_ms,omitempty"` // The time to wait before deleting a file from the filesystem.
FlushMessages *int `json:"flush_messages,omitempty"` // This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
FlushMs *int `json:"flush_ms,omitempty"` // This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
IndexIntervalBytes *int `json:"index_interval_bytes,omitempty"` // This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
InklessEnable *bool `json:"inkless_enable,omitempty"` // Indicates whether inkless should be enabled. This is only available for BYOC services with Inkless feature enabled.
LocalRetentionBytes *int `json:"local_retention_bytes,omitempty"` // This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
LocalRetentionMs *int `json:"local_retention_ms,omitempty"` // This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
MaxCompactionLagMs *int `json:"max_compaction_lag_ms,omitempty"` // The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
MaxMessageBytes *int `json:"max_message_bytes,omitempty"` // The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
MessageDownconversionEnable *bool `json:"message_downconversion_enable,omitempty"` // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
MessageFormatVersion ConfigMessageFormatVersionType `json:"message_format_version,omitempty"` // Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. Deprecated in Kafka 4.0+: this configuration is removed and any supplied value will be ignored; for services upgraded to 4.0+, the returned value may be 'None'.
MessageTimestampDifferenceMaxMs *int `json:"message_timestamp_difference_max_ms,omitempty"` // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
MessageTimestampType MessageTimestampType `json:"message_timestamp_type,omitempty"` // Define whether the timestamp in the message is message create time or log append time.
MinCleanableDirtyRatio *float64 `json:"min_cleanable_dirty_ratio,omitempty"` // This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
MinCompactionLagMs *int `json:"min_compaction_lag_ms,omitempty"` // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
MinInsyncReplicas *int `json:"min_insync_replicas,omitempty"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
Preallocate *bool `json:"preallocate,omitempty"` // True if we should preallocate the file on disk when creating a new log segment.
RemoteStorageEnable *bool `json:"remote_storage_enable,omitempty"` // Indicates whether tiered storage should be enabled. This is only available for services with Tiered Storage feature enabled.
RetentionBytes *int `json:"retention_bytes,omitempty"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
RetentionMs *int `json:"retention_ms,omitempty"` // This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
SegmentBytes *int `json:"segment_bytes,omitempty"` // This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 megabytes.
SegmentIndexBytes *int `json:"segment_index_bytes,omitempty"` // This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
SegmentJitterMs *int `json:"segment_jitter_ms,omitempty"` // The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
SegmentMs *int `json:"segment_ms,omitempty"` // This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
UncleanLeaderElectionEnable *bool `json:"unclean_leader_election_enable,omitempty"` // Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
}
ServiceKafkaTopicUpdateConfigIn Kafka topic configuration
type ServiceKafkaTopicUpdateIn ¶
type ServiceKafkaTopicUpdateIn struct {
Config *ServiceKafkaTopicUpdateConfigIn `json:"config,omitempty"` // Kafka topic configuration
MinInsyncReplicas *int `json:"min_insync_replicas,omitempty"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
OwnerUserGroupId *string `json:"owner_user_group_id,omitempty"` // The user group that owns this topic
Partitions *int `json:"partitions,omitempty"` // Number of partitions
Replication *int `json:"replication,omitempty"` // Number of replicas
RetentionBytes *int `json:"retention_bytes,omitempty"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
RetentionHours *int `json:"retention_hours,omitempty"` // DEPRECATED: use config.retention_ms
Tags *[]TagIn `json:"tags,omitempty"` // Topic tags
TopicDescription *string `json:"topic_description,omitempty"` // Topic description
}
ServiceKafkaTopicUpdateIn ServiceKafkaTopicUpdateRequestBody
type SourceType ¶ added in v0.3.0
type SourceType string
const ( SourceTypeDefaultConfig SourceType = "default_config" SourceTypeDynamicBrokerConfig SourceType = "dynamic_broker_config" SourceTypeDynamicBrokerLoggerConfig SourceType = "dynamic_broker_logger_config" SourceTypeDynamicDefaultBrokerConfig SourceType = "dynamic_default_broker_config" SourceTypeStaticBrokerConfig SourceType = "static_broker_config" SourceTypeTopicConfig SourceType = "topic_config" SourceTypeUnknownConfig SourceType = "unknown_config" )
type TopicOut ¶
type TopicOut struct {
CleanupPolicy string `json:"cleanup_policy"` // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
MinInsyncReplicas int `json:"min_insync_replicas"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
OwnerUserGroupId string `json:"owner_user_group_id"` // The user group that owns this topic
Partitions int `json:"partitions"` // Number of partitions
RemoteStorageEnable *bool `json:"remote_storage_enable,omitempty"` // Indicates whether tiered storage should be enabled. This is only available for services with Tiered Storage feature enabled.
Replication int `json:"replication"` // Number of replicas
RetentionBytes int `json:"retention_bytes"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
RetentionHours int `json:"retention_hours"` // Retention period (hours)
State TopicStateType `json:"state"` // Topic state
Tags []TagOut `json:"tags"` // Topic tags
TopicDescription string `json:"topic_description"` // Topic description
TopicName string `json:"topic_name"` // Topic name
}
type TopicStateType ¶ added in v0.3.0
type TopicStateType string
const ( TopicStateTypeActive TopicStateType = "ACTIVE" TopicStateTypeConfiguring TopicStateType = "CONFIGURING" TopicStateTypeDeleting TopicStateType = "DELETING" )
type UncleanLeaderElectionEnableOut ¶
type UncleanLeaderElectionEnableOut struct {
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Synonyms []struct {
Name string `json:"name"` // Synonym name
Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
Value bool `json:"value"` // Synonym value
} `json:"synonyms,omitempty"` // Configuration synonyms
Value bool `json:"value"` // Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
}
UncleanLeaderElectionEnableOut unclean.leader.election.enable value, source and synonyms