kafkatopic

package
v0.80.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 28, 2025 License: Apache-2.0 Imports: 4 Imported by: 2

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func CleanupPolicyTypeChoices

func CleanupPolicyTypeChoices() []string

func CompressionTypeChoices

func CompressionTypeChoices() []string

func CompressionTypeValueChoices added in v0.3.0

func CompressionTypeValueChoices() []string

func FormatTypeChoices

func FormatTypeChoices() []string

func MessageFormatVersionTypeChoices

func MessageFormatVersionTypeChoices() []string

func MessageTimestampTypeChoices

func MessageTimestampTypeChoices() []string

func MessageTimestampTypeValueChoices added in v0.3.0

func MessageTimestampTypeValueChoices() []string

func SourceTypeChoices added in v0.3.0

func SourceTypeChoices() []string

func TopicStateTypeChoices added in v0.3.0

func TopicStateTypeChoices() []string

Types

type CleanupPolicyOut

type CleanupPolicyOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  string     `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value string `json:"value"` // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
}

CleanupPolicyOut cleanup.policy value, source and synonyms

type CleanupPolicyType

type CleanupPolicyType string
const (
	CleanupPolicyTypeDelete        CleanupPolicyType = "delete"
	CleanupPolicyTypeCompact       CleanupPolicyType = "compact"
	CleanupPolicyTypeCompactDelete CleanupPolicyType = "compact,delete"
)

type CompressionType

type CompressionType string
const (
	CompressionTypeSnappy       CompressionType = "snappy"
	CompressionTypeGzip         CompressionType = "gzip"
	CompressionTypeLz4          CompressionType = "lz4"
	CompressionTypeProducer     CompressionType = "producer"
	CompressionTypeUncompressed CompressionType = "uncompressed"
	CompressionTypeZstd         CompressionType = "zstd"
)

type CompressionTypeOut

type CompressionTypeOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  string     `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value CompressionTypeValue `json:"value"` // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
}

CompressionTypeOut compression.type value, source and synonyms

type CompressionTypeValue added in v0.3.0

type CompressionTypeValue string
const (
	CompressionTypeValueSnappy       CompressionTypeValue = "snappy"
	CompressionTypeValueGzip         CompressionTypeValue = "gzip"
	CompressionTypeValueLz4          CompressionTypeValue = "lz4"
	CompressionTypeValueProducer     CompressionTypeValue = "producer"
	CompressionTypeValueUncompressed CompressionTypeValue = "uncompressed"
	CompressionTypeValueZstd         CompressionTypeValue = "zstd"
)

type ConfigIn

type ConfigIn struct {
	CleanupPolicy                   CleanupPolicyType        `json:"cleanup_policy,omitempty"`                      // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
	CompressionType                 CompressionType          `json:"compression_type,omitempty"`                    // Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
	DeleteRetentionMs               *int                     `json:"delete_retention_ms,omitempty"`                 // The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
	FileDeleteDelayMs               *int                     `json:"file_delete_delay_ms,omitempty"`                // The time to wait before deleting a file from the filesystem.
	FlushMessages                   *int                     `json:"flush_messages,omitempty"`                      // This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
	FlushMs                         *int                     `json:"flush_ms,omitempty"`                            // This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
	IndexIntervalBytes              *int                     `json:"index_interval_bytes,omitempty"`                // This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
	LocalRetentionBytes             *int                     `json:"local_retention_bytes,omitempty"`               // This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
	LocalRetentionMs                *int                     `json:"local_retention_ms,omitempty"`                  // This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
	MaxCompactionLagMs              *int                     `json:"max_compaction_lag_ms,omitempty"`               // The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
	MaxMessageBytes                 *int                     `json:"max_message_bytes,omitempty"`                   // The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
	MessageDownconversionEnable     *bool                    `json:"message_downconversion_enable,omitempty"`       // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
	MessageFormatVersion            MessageFormatVersionType `json:"message_format_version,omitempty"`              // Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand.
	MessageTimestampDifferenceMaxMs *int                     `json:"message_timestamp_difference_max_ms,omitempty"` // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
	MessageTimestampType            MessageTimestampType     `json:"message_timestamp_type,omitempty"`              // Define whether the timestamp in the message is message create time or log append time.
	MinCleanableDirtyRatio          *float64                 `json:"min_cleanable_dirty_ratio,omitempty"`           // This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
	MinCompactionLagMs              *int                     `json:"min_compaction_lag_ms,omitempty"`               // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
	MinInsyncReplicas               *int                     `json:"min_insync_replicas,omitempty"`                 // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
	Preallocate                     *bool                    `json:"preallocate,omitempty"`                         // True if we should preallocate the file on disk when creating a new log segment.
	RemoteStorageEnable             *bool                    `json:"remote_storage_enable,omitempty"`               // Indicates whether tiered storage should be enabled.
	RetentionBytes                  *int                     `json:"retention_bytes,omitempty"`                     // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
	RetentionMs                     *int                     `json:"retention_ms,omitempty"`                        // This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
	SegmentBytes                    *int                     `json:"segment_bytes,omitempty"`                       // This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 megabytes.
	SegmentIndexBytes               *int                     `json:"segment_index_bytes,omitempty"`                 // This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
	SegmentJitterMs                 *int                     `json:"segment_jitter_ms,omitempty"`                   // The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
	SegmentMs                       *int                     `json:"segment_ms,omitempty"`                          // This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
	UncleanLeaderElectionEnable     *bool                    `json:"unclean_leader_election_enable,omitempty"`      // Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
}

ConfigIn Kafka topic configuration

type ConfigOut

type ConfigOut struct {
	CleanupPolicy                   *CleanupPolicyOut                   `json:"cleanup_policy,omitempty"`                      // cleanup.policy value, source and synonyms
	CompressionType                 *CompressionTypeOut                 `json:"compression_type,omitempty"`                    // compression.type value, source and synonyms
	DeleteRetentionMs               *DeleteRetentionMsOut               `json:"delete_retention_ms,omitempty"`                 // delete.retention.ms value, source and synonyms
	FileDeleteDelayMs               *FileDeleteDelayMsOut               `json:"file_delete_delay_ms,omitempty"`                // file.delete.delay.ms value, source and synonyms
	FlushMessages                   *FlushMessagesOut                   `json:"flush_messages,omitempty"`                      // flush.messages value, source and synonyms
	FlushMs                         *FlushMsOut                         `json:"flush_ms,omitempty"`                            // flush.ms value, source and synonyms
	IndexIntervalBytes              *IndexIntervalBytesOut              `json:"index_interval_bytes,omitempty"`                // index.interval.bytes value, source and synonyms
	LocalRetentionBytes             *LocalRetentionBytesOut             `json:"local_retention_bytes,omitempty"`               // local.retention.bytes value, source and synonyms
	LocalRetentionMs                *LocalRetentionMsOut                `json:"local_retention_ms,omitempty"`                  // local.retention.ms value, source and synonyms
	MaxCompactionLagMs              *MaxCompactionLagMsOut              `json:"max_compaction_lag_ms,omitempty"`               // max.compaction.lag.ms value, source and synonyms
	MaxMessageBytes                 *MaxMessageBytesOut                 `json:"max_message_bytes,omitempty"`                   // max.message.bytes value, source and synonyms
	MessageDownconversionEnable     *MessageDownconversionEnableOut     `json:"message_downconversion_enable,omitempty"`       // message.downconversion.enable value, source and synonyms
	MessageFormatVersion            *MessageFormatVersionOut            `json:"message_format_version,omitempty"`              // message.format.version value, source and synonyms
	MessageTimestampDifferenceMaxMs *MessageTimestampDifferenceMaxMsOut `json:"message_timestamp_difference_max_ms,omitempty"` // message.timestamp.difference.max.ms value, source and synonyms
	MessageTimestampType            *MessageTimestampTypeOut            `json:"message_timestamp_type,omitempty"`              // message.timestamp.type value, source and synonyms
	MinCleanableDirtyRatio          *MinCleanableDirtyRatioOut          `json:"min_cleanable_dirty_ratio,omitempty"`           // min.cleanable.dirty.ratio value, source and synonyms
	MinCompactionLagMs              *MinCompactionLagMsOut              `json:"min_compaction_lag_ms,omitempty"`               // min.compaction.lag.ms value, source and synonyms
	MinInsyncReplicas               *MinInsyncReplicasOut               `json:"min_insync_replicas,omitempty"`                 // min.insync.replicas value, source and synonyms
	Preallocate                     *PreallocateOut                     `json:"preallocate,omitempty"`                         // preallocate value, source and synonyms
	RemoteStorageEnable             *RemoteStorageEnableOut             `json:"remote_storage_enable,omitempty"`               // remote.storage.enable value, source and synonyms
	RetentionBytes                  *RetentionBytesOut                  `json:"retention_bytes,omitempty"`                     // retention.bytes value, source and synonyms
	RetentionMs                     *RetentionMsOut                     `json:"retention_ms,omitempty"`                        // retention.ms value, source and synonyms
	SegmentBytes                    *SegmentBytesOut                    `json:"segment_bytes,omitempty"`                       // segment.bytes value, source and synonyms
	SegmentIndexBytes               *SegmentIndexBytesOut               `json:"segment_index_bytes,omitempty"`                 // segment.index.bytes value, source and synonyms
	SegmentJitterMs                 *SegmentJitterMsOut                 `json:"segment_jitter_ms,omitempty"`                   // segment.jitter.ms value, source and synonyms
	SegmentMs                       *SegmentMsOut                       `json:"segment_ms,omitempty"`                          // segment.ms value, source and synonyms
	UncleanLeaderElectionEnable     *UncleanLeaderElectionEnableOut     `json:"unclean_leader_election_enable,omitempty"`      // unclean.leader.election.enable value, source and synonyms
}

ConfigOut Kafka topic configuration

type ConsumerGroupOut

type ConsumerGroupOut struct {
	GroupName string `json:"group_name"` // consumer group
	Offset    int    `json:"offset"`     // Latest partition message offset
}

type DeleteRetentionMsOut

type DeleteRetentionMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
}

DeleteRetentionMsOut delete.retention.ms value, source and synonyms

type FileDeleteDelayMsOut

type FileDeleteDelayMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // The time to wait before deleting a file from the filesystem.
}

FileDeleteDelayMsOut file.delete.delay.ms value, source and synonyms

type FlushMessagesOut

type FlushMessagesOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
}

FlushMessagesOut flush.messages value, source and synonyms

type FlushMsOut

type FlushMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
}

FlushMsOut flush.ms value, source and synonyms

type FormatType

type FormatType string
const (
	FormatTypeBinary     FormatType = "binary"
	FormatTypeJson       FormatType = "json"
	FormatTypeAvro       FormatType = "avro"
	FormatTypeProtobuf   FormatType = "protobuf"
	FormatTypeJsonschema FormatType = "jsonschema"
)

type Handler

type Handler interface {
	// ServiceKafkaTopicCreate create a Kafka topic
	// POST /v1/project/{project}/service/{service_name}/topic
	// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicCreate
	ServiceKafkaTopicCreate(ctx context.Context, project string, serviceName string, in *ServiceKafkaTopicCreateIn) error

	// ServiceKafkaTopicDelete delete a Kafka topic
	// DELETE /v1/project/{project}/service/{service_name}/topic/{topic_name}
	// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicDelete
	ServiceKafkaTopicDelete(ctx context.Context, project string, serviceName string, topicName string) error

	// ServiceKafkaTopicGet get Kafka topic info
	// GET /v1/project/{project}/service/{service_name}/topic/{topic_name}
	// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicGet
	ServiceKafkaTopicGet(ctx context.Context, project string, serviceName string, topicName string) (*ServiceKafkaTopicGetOut, error)

	// ServiceKafkaTopicList get Kafka topic list
	// GET /v1/project/{project}/service/{service_name}/topic
	// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicList
	ServiceKafkaTopicList(ctx context.Context, project string, serviceName string) ([]TopicOut, error)

	// ServiceKafkaTopicMessageList list kafka topic messages
	// POST /v1/project/{project}/service/{service_name}/kafka/rest/topics/{topic_name}/messages
	// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicMessageList
	ServiceKafkaTopicMessageList(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageListIn) ([]MessageOut, error)

	// ServiceKafkaTopicMessageProduce produce message into a kafka topic
	// POST /v1/project/{project}/service/{service_name}/kafka/rest/topics/{topic_name}/produce
	// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicMessageProduce
	ServiceKafkaTopicMessageProduce(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageProduceIn) (*ServiceKafkaTopicMessageProduceOut, error)

	// ServiceKafkaTopicUpdate update a Kafka topic
	// PUT /v1/project/{project}/service/{service_name}/topic/{topic_name}
	// https://api.aiven.io/doc/#tag/Service:_Kafka/operation/ServiceKafkaTopicUpdate
	ServiceKafkaTopicUpdate(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicUpdateIn) error
}

type IndexIntervalBytesOut

type IndexIntervalBytesOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
}

IndexIntervalBytesOut index.interval.bytes value, source and synonyms

type KafkaTopicHandler

type KafkaTopicHandler struct {
	// contains filtered or unexported fields
}

func NewHandler

func NewHandler(doer doer) KafkaTopicHandler

func (*KafkaTopicHandler) ServiceKafkaTopicCreate

func (h *KafkaTopicHandler) ServiceKafkaTopicCreate(ctx context.Context, project string, serviceName string, in *ServiceKafkaTopicCreateIn) error

func (*KafkaTopicHandler) ServiceKafkaTopicDelete

func (h *KafkaTopicHandler) ServiceKafkaTopicDelete(ctx context.Context, project string, serviceName string, topicName string) error

func (*KafkaTopicHandler) ServiceKafkaTopicGet

func (h *KafkaTopicHandler) ServiceKafkaTopicGet(ctx context.Context, project string, serviceName string, topicName string) (*ServiceKafkaTopicGetOut, error)

func (*KafkaTopicHandler) ServiceKafkaTopicList

func (h *KafkaTopicHandler) ServiceKafkaTopicList(ctx context.Context, project string, serviceName string) ([]TopicOut, error)

func (*KafkaTopicHandler) ServiceKafkaTopicMessageList

func (h *KafkaTopicHandler) ServiceKafkaTopicMessageList(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageListIn) ([]MessageOut, error)

func (*KafkaTopicHandler) ServiceKafkaTopicMessageProduce

func (h *KafkaTopicHandler) ServiceKafkaTopicMessageProduce(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicMessageProduceIn) (*ServiceKafkaTopicMessageProduceOut, error)

func (*KafkaTopicHandler) ServiceKafkaTopicUpdate

func (h *KafkaTopicHandler) ServiceKafkaTopicUpdate(ctx context.Context, project string, serviceName string, topicName string, in *ServiceKafkaTopicUpdateIn) error

type LocalRetentionBytesOut

type LocalRetentionBytesOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
}

LocalRetentionBytesOut local.retention.bytes value, source and synonyms

type LocalRetentionMsOut

type LocalRetentionMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
}

LocalRetentionMsOut local.retention.ms value, source and synonyms

type MaxCompactionLagMsOut

type MaxCompactionLagMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
}

MaxCompactionLagMsOut max.compaction.lag.ms value, source and synonyms

type MaxMessageBytesOut

type MaxMessageBytesOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
}

MaxMessageBytesOut max.message.bytes value, source and synonyms

type MessageDownconversionEnableOut

type MessageDownconversionEnableOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  bool       `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value bool `json:"value"` // This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
}

MessageDownconversionEnableOut message.downconversion.enable value, source and synonyms

type MessageFormatVersionOut

type MessageFormatVersionOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  string     `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value MessageFormatVersionType `json:"value"` // Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand.
}

MessageFormatVersionOut message.format.version value, source and synonyms

type MessageFormatVersionType

type MessageFormatVersionType string
const (
	MessageFormatVersionType080     MessageFormatVersionType = "0.8.0"
	MessageFormatVersionType081     MessageFormatVersionType = "0.8.1"
	MessageFormatVersionType082     MessageFormatVersionType = "0.8.2"
	MessageFormatVersionType090     MessageFormatVersionType = "0.9.0"
	MessageFormatVersionType0100    MessageFormatVersionType = "0.10.0"
	MessageFormatVersionType0100Iv0 MessageFormatVersionType = "0.10.0-IV0"
	MessageFormatVersionType0100Iv1 MessageFormatVersionType = "0.10.0-IV1"
	MessageFormatVersionType0101    MessageFormatVersionType = "0.10.1"
	MessageFormatVersionType0101Iv0 MessageFormatVersionType = "0.10.1-IV0"
	MessageFormatVersionType0101Iv1 MessageFormatVersionType = "0.10.1-IV1"
	MessageFormatVersionType0101Iv2 MessageFormatVersionType = "0.10.1-IV2"
	MessageFormatVersionType0102    MessageFormatVersionType = "0.10.2"
	MessageFormatVersionType0102Iv0 MessageFormatVersionType = "0.10.2-IV0"
	MessageFormatVersionType0110    MessageFormatVersionType = "0.11.0"
	MessageFormatVersionType0110Iv0 MessageFormatVersionType = "0.11.0-IV0"
	MessageFormatVersionType0110Iv1 MessageFormatVersionType = "0.11.0-IV1"
	MessageFormatVersionType0110Iv2 MessageFormatVersionType = "0.11.0-IV2"
	MessageFormatVersionType10      MessageFormatVersionType = "1.0"
	MessageFormatVersionType10Iv0   MessageFormatVersionType = "1.0-IV0"
	MessageFormatVersionType11      MessageFormatVersionType = "1.1"
	MessageFormatVersionType11Iv0   MessageFormatVersionType = "1.1-IV0"
	MessageFormatVersionType20      MessageFormatVersionType = "2.0"
	MessageFormatVersionType20Iv0   MessageFormatVersionType = "2.0-IV0"
	MessageFormatVersionType20Iv1   MessageFormatVersionType = "2.0-IV1"
	MessageFormatVersionType21      MessageFormatVersionType = "2.1"
	MessageFormatVersionType21Iv0   MessageFormatVersionType = "2.1-IV0"
	MessageFormatVersionType21Iv1   MessageFormatVersionType = "2.1-IV1"
	MessageFormatVersionType21Iv2   MessageFormatVersionType = "2.1-IV2"
	MessageFormatVersionType22      MessageFormatVersionType = "2.2"
	MessageFormatVersionType22Iv0   MessageFormatVersionType = "2.2-IV0"
	MessageFormatVersionType22Iv1   MessageFormatVersionType = "2.2-IV1"
	MessageFormatVersionType23      MessageFormatVersionType = "2.3"
	MessageFormatVersionType23Iv0   MessageFormatVersionType = "2.3-IV0"
	MessageFormatVersionType23Iv1   MessageFormatVersionType = "2.3-IV1"
	MessageFormatVersionType24      MessageFormatVersionType = "2.4"
	MessageFormatVersionType24Iv0   MessageFormatVersionType = "2.4-IV0"
	MessageFormatVersionType24Iv1   MessageFormatVersionType = "2.4-IV1"
	MessageFormatVersionType25      MessageFormatVersionType = "2.5"
	MessageFormatVersionType25Iv0   MessageFormatVersionType = "2.5-IV0"
	MessageFormatVersionType26      MessageFormatVersionType = "2.6"
	MessageFormatVersionType26Iv0   MessageFormatVersionType = "2.6-IV0"
	MessageFormatVersionType27      MessageFormatVersionType = "2.7"
	MessageFormatVersionType27Iv0   MessageFormatVersionType = "2.7-IV0"
	MessageFormatVersionType27Iv1   MessageFormatVersionType = "2.7-IV1"
	MessageFormatVersionType27Iv2   MessageFormatVersionType = "2.7-IV2"
	MessageFormatVersionType28      MessageFormatVersionType = "2.8"
	MessageFormatVersionType28Iv0   MessageFormatVersionType = "2.8-IV0"
	MessageFormatVersionType28Iv1   MessageFormatVersionType = "2.8-IV1"
	MessageFormatVersionType30      MessageFormatVersionType = "3.0"
	MessageFormatVersionType30Iv0   MessageFormatVersionType = "3.0-IV0"
	MessageFormatVersionType30Iv1   MessageFormatVersionType = "3.0-IV1"
	MessageFormatVersionType31      MessageFormatVersionType = "3.1"
	MessageFormatVersionType31Iv0   MessageFormatVersionType = "3.1-IV0"
	MessageFormatVersionType32      MessageFormatVersionType = "3.2"
	MessageFormatVersionType32Iv0   MessageFormatVersionType = "3.2-IV0"
	MessageFormatVersionType33      MessageFormatVersionType = "3.3"
	MessageFormatVersionType33Iv0   MessageFormatVersionType = "3.3-IV0"
	MessageFormatVersionType33Iv1   MessageFormatVersionType = "3.3-IV1"
	MessageFormatVersionType33Iv2   MessageFormatVersionType = "3.3-IV2"
	MessageFormatVersionType33Iv3   MessageFormatVersionType = "3.3-IV3"
	MessageFormatVersionType34      MessageFormatVersionType = "3.4"
	MessageFormatVersionType34Iv0   MessageFormatVersionType = "3.4-IV0"
	MessageFormatVersionType35      MessageFormatVersionType = "3.5"
	MessageFormatVersionType35Iv0   MessageFormatVersionType = "3.5-IV0"
	MessageFormatVersionType35Iv1   MessageFormatVersionType = "3.5-IV1"
	MessageFormatVersionType35Iv2   MessageFormatVersionType = "3.5-IV2"
	MessageFormatVersionType36      MessageFormatVersionType = "3.6"
	MessageFormatVersionType36Iv0   MessageFormatVersionType = "3.6-IV0"
	MessageFormatVersionType36Iv1   MessageFormatVersionType = "3.6-IV1"
	MessageFormatVersionType36Iv2   MessageFormatVersionType = "3.6-IV2"
	MessageFormatVersionType37      MessageFormatVersionType = "3.7"
	MessageFormatVersionType37Iv0   MessageFormatVersionType = "3.7-IV0"
	MessageFormatVersionType37Iv1   MessageFormatVersionType = "3.7-IV1"
	MessageFormatVersionType37Iv2   MessageFormatVersionType = "3.7-IV2"
	MessageFormatVersionType37Iv3   MessageFormatVersionType = "3.7-IV3"
	MessageFormatVersionType37Iv4   MessageFormatVersionType = "3.7-IV4"
	MessageFormatVersionType38      MessageFormatVersionType = "3.8"
	MessageFormatVersionType38Iv0   MessageFormatVersionType = "3.8-IV0"
	MessageFormatVersionType39      MessageFormatVersionType = "3.9"
	MessageFormatVersionType39Iv0   MessageFormatVersionType = "3.9-IV0"
	MessageFormatVersionType39Iv1   MessageFormatVersionType = "3.9-IV1"
)

type MessageOut

type MessageOut struct {
	Key       map[string]any `json:"key,omitempty"`       // The message key, formatted according to the embedded format
	Offset    *int           `json:"offset,omitempty"`    // Offset of the message
	Partition *int           `json:"partition,omitempty"` // Partition of the message
	Topic     *string        `json:"topic,omitempty"`     // The name of the topic
	Value     map[string]any `json:"value,omitempty"`     // The message value, formatted according to the embedded format
}

type MessageTimestampDifferenceMaxMsOut

type MessageTimestampDifferenceMaxMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
}

MessageTimestampDifferenceMaxMsOut message.timestamp.difference.max.ms value, source and synonyms

type MessageTimestampType

type MessageTimestampType string
const (
	MessageTimestampTypeCreateTime    MessageTimestampType = "CreateTime"
	MessageTimestampTypeLogAppendTime MessageTimestampType = "LogAppendTime"
)

type MessageTimestampTypeOut

type MessageTimestampTypeOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  string     `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value MessageTimestampTypeValue `json:"value"` // Define whether the timestamp in the message is message create time or log append time.
}

MessageTimestampTypeOut message.timestamp.type value, source and synonyms

type MessageTimestampTypeValue added in v0.3.0

type MessageTimestampTypeValue string
const (
	MessageTimestampTypeValueCreateTime    MessageTimestampTypeValue = "CreateTime"
	MessageTimestampTypeValueLogAppendTime MessageTimestampTypeValue = "LogAppendTime"
)

type MinCleanableDirtyRatioOut

type MinCleanableDirtyRatioOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  float64    `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value float64 `json:"value"` // This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
}

MinCleanableDirtyRatioOut min.cleanable.dirty.ratio value, source and synonyms

type MinCompactionLagMsOut

type MinCompactionLagMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
}

MinCompactionLagMsOut min.compaction.lag.ms value, source and synonyms

type MinInsyncReplicasOut

type MinInsyncReplicasOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
}

MinInsyncReplicasOut min.insync.replicas value, source and synonyms

type OffsetOut

type OffsetOut struct {
	Error     *string `json:"error,omitempty"`      // An error message describing why the operation failed, or null if it succeeded
	ErrorCode *int    `json:"error_code,omitempty"` // An error code classifying the reason this operation failed, or null if it succeeded. 1 = Non-retriable Kafka exception, 2 = Retriable Kafka exception; the message might be sent successfully if retried
	Offset    *int    `json:"offset,omitempty"`     // Offset of the message, or null if publishing the message failed
	Partition *int    `json:"partition,omitempty"`  // Partition the message was published to, or null if publishing the message failed
}

type PartitionOut

type PartitionOut struct {
	ConsumerGroups []ConsumerGroupOut `json:"consumer_groups"`       // List of Kafka consumer groups
	EarliestOffset int                `json:"earliest_offset"`       // Earliest partition message offset
	Isr            int                `json:"isr"`                   // Number of In Sync Replicas (ISR)
	LatestOffset   int                `json:"latest_offset"`         // Latest partition message offset
	Partition      int                `json:"partition"`             // Partition number
	RemoteSize     *int               `json:"remote_size,omitempty"` // Size of tiered data from partition in bytes
	Size           int                `json:"size"`                  // Size of partition in bytes
}

type PreallocateOut

type PreallocateOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  bool       `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value bool `json:"value"` // True if we should preallocate the file on disk when creating a new log segment.
}

PreallocateOut preallocate value, source and synonyms

type RecordIn

type RecordIn struct {
	Key       *map[string]any `json:"key,omitempty"`       // Key for the produced record
	Partition *int            `json:"partition,omitempty"` // Optionally specify the partition where to produce the record
	Value     *map[string]any `json:"value,omitempty"`     // Value for the produced record
}

type RemoteStorageEnableOut

type RemoteStorageEnableOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  bool       `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value bool `json:"value"` // Indicates whether tiered storage should be enabled.
}

RemoteStorageEnableOut remote.storage.enable value, source and synonyms

type RetentionBytesOut

type RetentionBytesOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
}

RetentionBytesOut retention.bytes value, source and synonyms

type RetentionMsOut

type RetentionMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
}

RetentionMsOut retention.ms value, source and synonyms

type SegmentBytesOut

type SegmentBytesOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 megabytes.
}

SegmentBytesOut segment.bytes value, source and synonyms

type SegmentIndexBytesOut

type SegmentIndexBytesOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
}

SegmentIndexBytesOut segment.index.bytes value, source and synonyms

type SegmentJitterMsOut

type SegmentJitterMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
}

SegmentJitterMsOut segment.jitter.ms value, source and synonyms

type SegmentMsOut

type SegmentMsOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  int        `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value int `json:"value"` // This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
}

SegmentMsOut segment.ms value, source and synonyms

type ServiceKafkaTopicCreateIn

type ServiceKafkaTopicCreateIn struct {
	CleanupPolicy     CleanupPolicyType `json:"cleanup_policy,omitempty"`      // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
	Config            *ConfigIn         `json:"config,omitempty"`              // Kafka topic configuration
	MinInsyncReplicas *int              `json:"min_insync_replicas,omitempty"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
	OwnerUserGroupId  *string           `json:"owner_user_group_id,omitempty"` // The user group that owns this topic
	Partitions        *int              `json:"partitions,omitempty"`          // Number of partitions
	Replication       *int              `json:"replication,omitempty"`         // Number of replicas
	RetentionBytes    *int              `json:"retention_bytes,omitempty"`     // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
	RetentionHours    *int              `json:"retention_hours,omitempty"`     // DEPRECATED: use config.retention_ms
	Tags              *[]TagIn          `json:"tags,omitempty"`                // Topic tags
	TopicDescription  *string           `json:"topic_description,omitempty"`   // Topic description
	TopicName         string            `json:"topic_name"`                    // Topic name
}

ServiceKafkaTopicCreateIn ServiceKafkaTopicCreateRequestBody

type ServiceKafkaTopicGetOut

type ServiceKafkaTopicGetOut struct {
	CleanupPolicy     string         `json:"cleanup_policy"`      // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
	Config            ConfigOut      `json:"config"`              // Kafka topic configuration
	MinInsyncReplicas int            `json:"min_insync_replicas"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
	OwnerUserGroupId  string         `json:"owner_user_group_id"` // The user group that owns this topic
	Partitions        []PartitionOut `json:"partitions"`          // Topic partitions
	Replication       int            `json:"replication"`         // Number of replicas
	RetentionBytes    int            `json:"retention_bytes"`     // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
	RetentionHours    int            `json:"retention_hours"`     // DEPRECATED: use config.retention_ms
	State             TopicStateType `json:"state"`               // Topic state
	Tags              []TagOut       `json:"tags"`                // Topic tags
	TopicDescription  string         `json:"topic_description"`   // Topic description
	TopicName         string         `json:"topic_name"`          // Topic name
}

ServiceKafkaTopicGetOut Kafka topic information

type ServiceKafkaTopicMessageListIn

type ServiceKafkaTopicMessageListIn struct {
	Format     FormatType     `json:"format,omitempty"`    // The format of consumed messages, which is used to convert messages into a JSON-compatible form. If unspecified, defaults to binary
	MaxBytes   *int           `json:"max_bytes,omitempty"` // The maximum number of bytes of unencoded keys and values that should be included in the response. This provides approximate control over the size of responses and the amount of memory required to store the decoded response. The actual limit will be the minimum of this setting and the server-side configuration consumer.request.max.bytes. Default is unlimited
	Partitions map[string]any `json:"partitions"`          // Object of desired partition / offset mappings
	Timeout    *int           `json:"timeout,omitempty"`   // The maximum total time to wait for messages for a request if the maximum request size has not yet been reached
}

ServiceKafkaTopicMessageListIn ServiceKafkaTopicMessageListRequestBody

type ServiceKafkaTopicMessageProduceIn

type ServiceKafkaTopicMessageProduceIn struct {
	Format        FormatType `json:"format"`                    // The format of the content.
	KeySchema     *string    `json:"key_schema,omitempty"`      // Full schema encoded as a string (e.g. JSON serialized for Avro data)
	KeySchemaId   *int       `json:"key_schema_id,omitempty"`   // ID returned by a previous request using the same schema. This ID corresponds to the ID of the schema in the registry.
	Records       []RecordIn `json:"records"`                   // List of records to produce to the topic
	ValueSchema   *string    `json:"value_schema,omitempty"`    // Full schema encoded as a string (e.g. JSON serialized for Avro data)
	ValueSchemaId *int       `json:"value_schema_id,omitempty"` // ID returned by a previous request using the same schema. This ID corresponds to the ID of the schema in the registry.
}

ServiceKafkaTopicMessageProduceIn ServiceKafkaTopicMessageProduceRequestBody

type ServiceKafkaTopicMessageProduceOut

type ServiceKafkaTopicMessageProduceOut struct {
	KeySchemaId   *int        `json:"key_schema_id,omitempty"`   // The ID for the schema used to produce keys, or null if keys were not used
	Offsets       []OffsetOut `json:"offsets,omitempty"`         // List of offsets for the produced record
	ValueSchemaId *int        `json:"value_schema_id,omitempty"` // The ID for the schema used to produce values
}

ServiceKafkaTopicMessageProduceOut ServiceKafkaTopicMessageProduceResponse

type ServiceKafkaTopicUpdateIn

type ServiceKafkaTopicUpdateIn struct {
	Config            *ConfigIn `json:"config,omitempty"`              // Kafka topic configuration
	MinInsyncReplicas *int      `json:"min_insync_replicas,omitempty"` // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
	OwnerUserGroupId  *string   `json:"owner_user_group_id,omitempty"` // The user group that owns this topic
	Partitions        *int      `json:"partitions,omitempty"`          // Number of partitions
	Replication       *int      `json:"replication,omitempty"`         // Number of replicas
	RetentionBytes    *int      `json:"retention_bytes,omitempty"`     // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
	RetentionHours    *int      `json:"retention_hours,omitempty"`     // DEPRECATED: use config.retention_ms
	Tags              *[]TagIn  `json:"tags,omitempty"`                // Topic tags
	TopicDescription  *string   `json:"topic_description,omitempty"`   // Topic description
}

ServiceKafkaTopicUpdateIn ServiceKafkaTopicUpdateRequestBody

type SourceType added in v0.3.0

type SourceType string
const (
	SourceTypeUnknownConfig              SourceType = "unknown_config"
	SourceTypeTopicConfig                SourceType = "topic_config"
	SourceTypeDynamicBrokerConfig        SourceType = "dynamic_broker_config"
	SourceTypeDynamicDefaultBrokerConfig SourceType = "dynamic_default_broker_config"
	SourceTypeStaticBrokerConfig         SourceType = "static_broker_config"
	SourceTypeDefaultConfig              SourceType = "default_config"
	SourceTypeDynamicBrokerLoggerConfig  SourceType = "dynamic_broker_logger_config"
)

type TagIn

type TagIn struct {
	Key   string `json:"key"`   // Tag key
	Value string `json:"value"` // Tag value
}

type TagOut

type TagOut struct {
	Key   string `json:"key"`   // Tag key
	Value string `json:"value"` // Tag value
}

type TopicOut

type TopicOut struct {
	CleanupPolicy       string         `json:"cleanup_policy"`                  // The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic.
	MinInsyncReplicas   int            `json:"min_insync_replicas"`             // When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
	OwnerUserGroupId    string         `json:"owner_user_group_id"`             // The user group that owns this topic
	Partitions          int            `json:"partitions"`                      // Number of partitions
	RemoteStorageEnable *bool          `json:"remote_storage_enable,omitempty"` // Indicates whether tiered storage should be enabled.
	Replication         int            `json:"replication"`                     // Number of replicas
	RetentionBytes      int            `json:"retention_bytes"`                 // This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
	RetentionHours      int            `json:"retention_hours"`                 // Retention period (hours)
	State               TopicStateType `json:"state"`                           // Topic state
	Tags                []TagOut       `json:"tags"`                            // Topic tags
	TopicDescription    string         `json:"topic_description"`               // Topic description
	TopicName           string         `json:"topic_name"`                      // Topic name
}

type TopicStateType added in v0.3.0

type TopicStateType string
const (
	TopicStateTypeActive      TopicStateType = "ACTIVE"
	TopicStateTypeConfiguring TopicStateType = "CONFIGURING"
	TopicStateTypeDeleting    TopicStateType = "DELETING"
)

type UncleanLeaderElectionEnableOut

type UncleanLeaderElectionEnableOut struct {
	Source   SourceType `json:"source"` // Source of the Kafka topic configuration entry
	Synonyms []struct {
		Name   string     `json:"name"`   // Synonym name
		Source SourceType `json:"source"` // Source of the Kafka topic configuration entry
		Value  bool       `json:"value"`  // Synonym value
	} `json:"synonyms,omitempty"` // Configuration synonyms
	Value bool `json:"value"` // Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
}

UncleanLeaderElectionEnableOut unclean.leader.election.enable value, source and synonyms

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL