Documentation
¶
Index ¶
- Constants
- type AwsElasticsearchOutputConfig
- type AzureStorage
- type Buffer
- type CloudWatchOutput
- type Compress
- type DatadogOutput
- type ElasticsearchOutput
- type Endpoint
- type EndpointCredentials
- type Fields
- type FileOutputConfig
- type FluentdServer
- type Format
- type FormatRfc5424
- type ForwardOutput
- type GCSOutput
- type GelfOutputConfig
- type HTTPAuth
- type HTTPOutputConfig
- type KafkaOutputConfig
- type KinesisFirehoseAssumeRoleCredentials
- func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopy() *KinesisFirehoseAssumeRoleCredentials
- func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopyInto(out *KinesisFirehoseAssumeRoleCredentials)
- func (o *KinesisFirehoseAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type KinesisFirehoseOutputConfig
- type KinesisFirehoseProcessCredentials
- func (in *KinesisFirehoseProcessCredentials) DeepCopy() *KinesisFirehoseProcessCredentials
- func (in *KinesisFirehoseProcessCredentials) DeepCopyInto(out *KinesisFirehoseProcessCredentials)
- func (o *KinesisFirehoseProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type KinesisStreamAssumeRoleCredentials
- func (in *KinesisStreamAssumeRoleCredentials) DeepCopy() *KinesisStreamAssumeRoleCredentials
- func (in *KinesisStreamAssumeRoleCredentials) DeepCopyInto(out *KinesisStreamAssumeRoleCredentials)
- func (o *KinesisStreamAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type KinesisStreamOutputConfig
- type KinesisStreamProcessCredentials
- func (in *KinesisStreamProcessCredentials) DeepCopy() *KinesisStreamProcessCredentials
- func (in *KinesisStreamProcessCredentials) DeepCopyInto(out *KinesisStreamProcessCredentials)
- func (o *KinesisStreamProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
- type Label
- type LogDNAOutput
- type LogIntelligenceHeaders
- type LogZOutput
- type LokiOutput
- type MattermostOutputConfig
- type NewRelicOutputConfig
- type NullOutputConfig
- type OSSOutput
- type ObjectMetadata
- type OpenSearchEndpointCredentials
- type OpenSearchOutput
- type RdkafkaOptions
- type RedisOutputConfig
- type RelabelOutputConfig
- type S3AssumeRoleCredentials
- type S3InstanceProfileCredentials
- type S3OutputConfig
- type S3SharedCredentials
- type SQSOutputConfig
- type SplunkHecOutput
- type SyslogOutputConfig
- type VMwareLogInsightOutput
- type VMwareLogIntelligenceOutputConfig
- func (in *VMwareLogIntelligenceOutputConfig) DeepCopy() *VMwareLogIntelligenceOutputConfig
- func (in *VMwareLogIntelligenceOutputConfig) DeepCopyInto(out *VMwareLogIntelligenceOutputConfig)
- func (v *VMwareLogIntelligenceOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
Constants ¶
const ( OneEyePathTemplate string = "%v/%%Y/%%m/%%d/${$.kubernetes.namespace_name}/${$.kubernetes.pod_name}/${$.kubernetes.container_name}/" OneEyeObjectKeyFormat string = "%{path}%H:%M_%{index}.%{file_extension}" OneEyeTags string = "tag,time,$.kubernetes.namespace_name,$.kubernetes.pod_name,$.kubernetes.container_name" )
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AwsElasticsearchOutputConfig ¶
type AwsElasticsearchOutputConfig struct { // flush_interval FlushInterval string `json:"flush_interval,omitempty"` // AWS Endpoint Credentials Endpoint *EndpointCredentials `json:"endpoint,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // ElasticSearch *ElasticsearchOutput `json:",omitempty"` }
+kubebuilder:object:generate=true +docName:"Amazon Elasticsearch" Send your logs to a Amazon Elasticsearch Service
func (*AwsElasticsearchOutputConfig) DeepCopy ¶
func (in *AwsElasticsearchOutputConfig) DeepCopy() *AwsElasticsearchOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsElasticsearchOutputConfig.
func (*AwsElasticsearchOutputConfig) DeepCopyInto ¶
func (in *AwsElasticsearchOutputConfig) DeepCopyInto(out *AwsElasticsearchOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*AwsElasticsearchOutputConfig) ToDirective ¶
func (e *AwsElasticsearchOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type AzureStorage ¶
type AzureStorage struct { // Path prefix of the files on Azure Path string `json:"path,omitempty"` // Available in Logging operator version 4.5 and later. // Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase). // This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts AzureCloud string `json:"azure_cloud,omitempty"` // Your azure storage account // +docLink:"Secret,../secret/" AzureStorageAccount *secret.Secret `json:"azure_storage_account"` // Your azure storage access key // +docLink:"Secret,../secret/" AzureStorageAccessKey *secret.Secret `json:"azure_storage_access_key,omitempty"` // Your azure storage sas token // +docLink:"Secret,../secret/" AzureStorageSasToken *secret.Secret `json:"azure_storage_sas_token,omitempty"` // Your azure storage container AzureContainer string `json:"azure_container"` // Azure Instance Metadata Service API Version AzureImdsApiVersion string `json:"azure_imds_api_version,omitempty"` // Object key format (default: %{path}%{time_slice}_%{index}.%{file_extension}) AzureObjectKeyFormat string `json:"azure_object_key_format,omitempty"` // Automatically create container if not exists(default: true) AutoCreateContainer bool `json:"auto_create_container,omitempty"` // Compat format type: out_file, json, ltsv (default: out_file) Format string `json:"format,omitempty" plugin:"default:json"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*AzureStorage) DeepCopy ¶
func (in *AzureStorage) DeepCopy() *AzureStorage
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStorage.
func (*AzureStorage) DeepCopyInto ¶
func (in *AzureStorage) DeepCopyInto(out *AzureStorage)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*AzureStorage) ToDirective ¶
func (a *AzureStorage) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Buffer ¶
type Buffer struct { // Disable buffer section (default: false) Disabled bool `json:"disabled,omitempty" plugin:"default:false,hidden"` // Fluentd core bundles memory and file plugins. 3rd party plugins are also available when installed. Type string `json:"type,omitempty"` // When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags. (default: tag,time) Tags *string `json:"tags,omitempty"` // The path where buffer chunks are stored. The '*' is replaced with random characters. It's highly recommended to leave this default. (default: operator generated) Path string `json:"path,omitempty"` // The max size of each chunks: events will be written into chunks until the size of chunks become this size ChunkLimitSize string `json:"chunk_limit_size,omitempty"` // The max number of events that each chunks can store in it ChunkLimitRecords int `json:"chunk_limit_records,omitempty"` // The size limitation of this buffer plugin instance. Once the total size of stored buffer reached this threshold, all append operations will fail with error (and data will be lost) TotalLimitSize string `json:"total_limit_size,omitempty"` // The queue length limitation of this buffer plugin instance QueueLimitLength int `json:"queue_limit_length,omitempty"` // The percentage of chunk size threshold for flushing. output plugin will flush the chunk when actual size reaches chunk_limit_size * chunk_full_threshold (== 8MB * 0.95 in default) ChunkFullThreshold string `json:"chunk_full_threshold,omitempty"` // Limit the number of queued chunks. If you set smaller flush_interval, e.g. 1s, there are lots of small queued chunks in buffer. This is not good with file buffer because it consumes lots of fd resources when output destination has a problem. This parameter mitigates such situations. QueuedChunksLimitSize int `json:"queued_chunks_limit_size,omitempty"` // If you set this option to gzip, you can get Fluentd to compress data records before writing to buffer chunks. Compress string `json:"compress,omitempty"` // The value to specify to flush/write all buffer chunks at shutdown, or not FlushAtShutdown bool `json:"flush_at_shutdown,omitempty"` // Default: default (equals to lazy if time is specified as chunk key, interval otherwise) // lazy: flush/write chunks once per timekey // interval: flush/write chunks per specified time via flush_interval // immediate: flush/write chunks immediately after events are appended into chunks FlushMode string `json:"flush_mode,omitempty"` // Default: 60s FlushInterval string `json:"flush_interval,omitempty"` // The number of threads of output plugins, which is used to write chunks in parallel FlushThreadCount int `json:"flush_thread_count,omitempty"` // The sleep interval seconds of threads to wait next flush trial (when no chunks are waiting) FlushThreadInterval string `json:"flush_thread_interval,omitempty"` // The sleep interval seconds of threads between flushes when output plugin flushes waiting chunks next to next FlushThreadBurstInterval string `json:"flush_thread_burst_interval,omitempty"` // The timeout seconds until output plugin decides that async write operation fails DelayedCommitTimeout string `json:"delayed_commit_timeout,omitempty"` // How output plugin behaves when its buffer queue is full // throw_exception: raise exception to show this error in log // block: block processing of input plugin to emit events into that buffer // drop_oldest_chunk: drop/purge oldest chunk to accept newly incoming chunk OverflowAction string `json:"overflow_action,omitempty"` // The maximum seconds to retry to flush while failing, until plugin discards buffer chunks RetryTimeout string `json:"retry_timeout,omitempty"` // If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever RetryForever *bool `json:"retry_forever,omitempty" plugin:"default:true"` // The maximum number of times to retry to flush while failing RetryMaxTimes int `json:"retry_max_times,omitempty"` // The ratio of retry_timeout to switch to use secondary while failing (Maximum valid value is 1.0) RetrySecondaryThreshold string `json:"retry_secondary_threshold,omitempty"` // exponential_backoff: wait seconds will become large exponentially per failures // periodic: output plugin will retry periodically with fixed intervals (configured via retry_wait) RetryType string `json:"retry_type,omitempty"` // Seconds to wait before next retry to flush, or constant factor of exponential backoff RetryWait string `json:"retry_wait,omitempty"` // The base number of exponential backoff for retries RetryExponentialBackoffBase string `json:"retry_exponential_backoff_base,omitempty"` // The maximum interval seconds for exponential backoff between retries while failing RetryMaxInterval string `json:"retry_max_interval,omitempty"` // If true, output plugin will retry after randomized interval not to do burst retries RetryRandomize bool `json:"retry_randomize,omitempty"` // Instead of storing unrecoverable chunks in the backup directory, just discard them. This option is new in Fluentd v1.2.6. DisableChunkBackup bool `json:"disable_chunk_backup,omitempty"` // Output plugin will flush chunks per specified time (enabled when time is specified in chunk keys) // +kubebuilder:validation:Optional Timekey string `json:"timekey" plugin:"default:10m"` // Output plugin writes chunks after timekey_wait seconds later after timekey expiration TimekeyWait string `json:"timekey_wait,omitempty" plugin:"default:1m"` // Output plugin decides to use UTC or not to format placeholders using timekey TimekeyUseUtc bool `json:"timekey_use_utc,omitempty"` // The timezone (-0700 or Asia/Tokyo) string for formatting timekey placeholders TimekeyZone string `json:"timekey_zone,omitempty"` }
func (*Buffer) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Buffer.
func (*Buffer) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Buffer) ToDirective ¶
type CloudWatchOutput ¶
type CloudWatchOutput struct { // Create log group and stream automatically. (default: false) AutoCreateStream bool `json:"auto_create_stream,omitempty"` // AWS access key id // +docLink:"Secret,../secret/" AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. // +docLink:"Secret,../secret/" AwsSecretKey *secret.Secret `json:"aws_sec_key,omitempty"` // Instance Profile Credentials call retries (default: nil) AwsInstanceProfileCredentialsRetries int `json:"aws_instance_profile_credentials_retries,omitempty"` // Enable AssumeRoleCredentials to authenticate, rather than the default credential hierarchy. See 'Cross-Account Operation' below for more detail. AwsUseSts bool `json:"aws_use_sts,omitempty"` // The role ARN to assume when using cross-account sts authentication AwsStsRoleArn string `json:"aws_sts_role_arn,omitempty"` // The session name to use with sts authentication (default: 'fluentd') AwsStsSessionName string `json:"aws_sts_session_name,omitempty"` // Use to set the number of threads pushing data to CloudWatch. (default: 1) Concurrency int `json:"concurrency,omitempty"` // Use this parameter to connect to the local API endpoint (for testing) Endpoint string `json:"endpoint,omitempty"` // Use to set an optional HTTP proxy HttpProxy string `json:"http_proxy,omitempty"` // Include time key as part of the log entry (default: UTC) IncludeTimeKey bool `json:"include_time_key,omitempty"` // Name of the library to be used to handle JSON data. For now, supported libraries are json (default) and yaml JsonHandler string `json:"json_handler,omitempty"` // Use localtime timezone for include_time_key output (overrides UTC default) Localtime bool `json:"localtime,omitempty"` // Set a hash with keys and values to tag the log group resource LogGroupAwsTags string `json:"log_group_aws_tags,omitempty"` // Specified field of records as AWS tags for the log group LogGroupAwsTagsKey string `json:"log_group_aws_tags_key,omitempty"` // Name of log group to store logs LogGroupName string `json:"log_group_name,omitempty"` // Specified field of records as log group name LogGroupNameKey string `json:"log_group_name_key,omitempty"` // Output rejected_log_events_info request log. (default: false) LogRejectedRequest string `json:"log_rejected_request,omitempty"` // Name of log stream to store logs LogStreamName string `json:"log_stream_name,omitempty"` // Specified field of records as log stream name LogStreamNameKey string `json:"log_stream_name_key,omitempty"` // Maximum number of events to send at once (default: 10000) MaxEventsPerBatch int `json:"max_events_per_batch,omitempty"` // Maximum length of the message MaxMessageLength int `json:"max_message_length,omitempty"` // Keys to send messages as events MessageKeys string `json:"message_keys,omitempty"` // If true, put_log_events_retry_limit will be ignored PutLogEventsDisableRetryLimit bool `json:"put_log_events_disable_retry_limit,omitempty"` // Maximum count of retry (if exceeding this, the events will be discarded) PutLogEventsRetryLimit int `json:"put_log_events_retry_limit,omitempty"` // Time before retrying PutLogEvents (retry interval increases exponentially like put_log_events_retry_wait * (2 ^ retry_count)) PutLogEventsRetryWait string `json:"put_log_events_retry_wait,omitempty"` // AWS Region Region string `json:"region"` // Remove field specified by log_group_aws_tags_key RemoveLogGroupAwsTagsKey string `json:"remove_log_group_aws_tags_key,omitempty"` // Remove field specified by log_group_name_key RemoveLogGroupNameKey string `json:"remove_log_group_name_key,omitempty"` // Remove field specified by log_stream_name_key RemoveLogStreamNameKey string `json:"remove_log_stream_name_key,omitempty"` // Remove field specified by retention_in_days RemoveRetentionInDays string `json:"remove_retention_in_days,omitempty"` // Use to set the expiry time for log group when created with auto_create_stream. (default to no expiry) RetentionInDays string `json:"retention_in_days,omitempty"` // Use specified field of records as retention period RetentionInDaysKey string `json:"retention_in_days_key,omitempty"` // Use tag as a group name UseTagAsGroup bool `json:"use_tag_as_group,omitempty"` // Use tag as a stream name UseTagAsStream bool `json:"use_tag_as_stream,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*CloudWatchOutput) DeepCopy ¶
func (in *CloudWatchOutput) DeepCopy() *CloudWatchOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchOutput.
func (*CloudWatchOutput) DeepCopyInto ¶
func (in *CloudWatchOutput) DeepCopyInto(out *CloudWatchOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*CloudWatchOutput) ToDirective ¶
func (c *CloudWatchOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Compress ¶
type Compress struct { // Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd)(default: snappy) ParquetCompressionCodec string `json:"parquet_compression_codec,omitempty"` // Parquet file page size. (default: 8192 bytes) ParquetPageSize string `json:"parquet_page_size,omitempty"` // Parquet file row group size. (default: 128 MB) ParquetRowGroupSize string `json:"parquet_row_group_size,omitempty"` // Record data format type. (avro csv jsonl msgpack tsv msgpack json) (default: msgpack) RecordType string `json:"record_type,omitempty"` // Schema type. (avro, bigquery) (default: avro) SchemaType string `json:"schema_type,omitempty"` // Path to schema file. SchemaFile string `json:"schema_file,omitempty"` }
+kubebuilder:object:generate=true +docName:"Parquet compressor" parquet compressor
func (*Compress) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Compress.
func (*Compress) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DatadogOutput ¶
type DatadogOutput struct { // This parameter is required in order to authenticate your fluent agent. (default: nil) // +docLink:"Secret,../secret/" ApiKey *secret.Secret `json:"api_key"` // Event format, if true, the event is sent in json format. Othwerwise, in plain text. (default: true) UseJson bool `json:"use_json,omitempty"` // Automatically include the Fluentd tag in the record. (default: false) IncludeTagKey bool `json:"include_tag_key,omitempty"` // Where to store the Fluentd tag. (default: "tag") TagKey string `json:"tag_key,omitempty"` //Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added. (default: "@timestamp") TimestampKey string `json:"timestamp_key,omitempty"` // If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise. (default: true) UseSsl bool `json:"use_ssl,omitempty"` // Disable SSL validation (useful for proxy forwarding) (default: false) NoSslValidation bool `json:"no_ssl_validation,omitempty"` // Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region. (default: "443") SslPort string `json:"ssl_port,omitempty"` // The number of retries before the output plugin stops. Set to -1 for unlimited retries (default: "-1") MaxRetries string `json:"max_retries,omitempty"` // The maximum time waited between each retry in seconds (default: "30") MaxBackoff string `json:"max_backoff,omitempty"` // Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516 (default: true) UseHttp bool `json:"use_http,omitempty"` // Enable log compression for HTTP (default: true) UseCompression bool `json:"use_compression,omitempty"` // Set the log compression level for HTTP (1 to 9, 9 being the best ratio) (default: "6") CompressionLevel string `json:"compression_level,omitempty"` // This tells Datadog what integration it is (default: nil) DdSource string `json:"dd_source,omitempty"` // Multiple value attribute. Can be used to refine the source attribute (default: nil) DdSourcecategory string `json:"dd_sourcecategory,omitempty"` // Custom tags with the following format "key1:value1, key2:value2" (default: nil) DdTags string `json:"dd_tags,omitempty"` // Used by Datadog to identify the host submitting the logs. (default: "hostname -f") DdHostname string `json:"dd_hostname,omitempty"` // Used by Datadog to correlate between logs, traces and metrics. (default: nil) Service string `json:"service,omitempty"` // Proxy port when logs are not directly forwarded to Datadog and ssl is not used (default: "80") Port string `json:"port,omitempty"` // Proxy endpoint when logs are not directly forwarded to Datadog (default: "http-intake.logs.datadoghq.com") Host string `json:"host,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*DatadogOutput) DeepCopy ¶
func (in *DatadogOutput) DeepCopy() *DatadogOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatadogOutput.
func (*DatadogOutput) DeepCopyInto ¶
func (in *DatadogOutput) DeepCopyInto(out *DatadogOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DatadogOutput) ToDirective ¶
func (a *DatadogOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type ElasticsearchOutput ¶
type ElasticsearchOutput struct { // You can specify the Elasticsearch host using this parameter. (default:localhost) Host string `json:"host,omitempty"` // You can specify the Elasticsearch port using this parameter.(default: 9200) Port int `json:"port,omitempty"` // You can specify multiple Elasticsearch hosts with separator ",". If you specify the `hosts` option, the `host` and `port` options are ignored. Hosts string `json:"hosts,omitempty"` // User for HTTP Basic authentication. This plugin will escape required URL encoded characters within `%{}` placeholders, for example, `%{demo+}` User string `json:"user,omitempty"` // Password for HTTP Basic authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password,omitempty"` // Path for HTTP Basic authentication. Path string `json:"path,omitempty"` // Connection scheme (default: http) Scheme string `json:"scheme,omitempty"` // +kubebuilder:validation:Optional // Skip ssl verification (default: true) SslVerify *bool `json:"ssl_verify,omitempty" plugin:"default:true"` // If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] SslVersion string `json:"ssl_version,omitempty"` // Specify min/max SSL/TLS version SslMaxVersion string `json:"ssl_max_version,omitempty"` SslMinVersion string `json:"ssl_min_version,omitempty"` // CA certificate SSLCACert *secret.Secret `json:"ca_file,omitempty"` // Client certificate SSLClientCert *secret.Secret `json:"client_cert,omitempty"` // Client certificate key SSLClientCertKey *secret.Secret `json:"client_key,omitempty"` // Client key password SSLClientCertKeyPass *secret.Secret `json:"client_key_pass,omitempty"` // Enable Logstash log format.(default: false) LogstashFormat bool `json:"logstash_format,omitempty"` // Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API.(default: false) IncludeTimestamp bool `json:"include_timestamp,omitempty"` // Set the Logstash prefix.(default: logstash) LogstashPrefix string `json:"logstash_prefix,omitempty"` // Set the Logstash prefix separator.(default: -) LogstashPrefixSeparator string `json:"logstash_prefix_separator,omitempty"` // Set the Logstash date format.(default: %Y.%m.%d) LogstashDateformat string `json:"logstash_dateformat,omitempty"` // The index name to write events to (default: fluentd) IndexName string `json:"index_name,omitempty"` // Set the index type for elasticsearch. This is the fallback if `target_type_key` is missing. (default: fluentd) TypeName string `json:"type_name,omitempty"` // This param is to set a pipeline id of your elasticsearch to be added into the request, you can configure ingest node. Pipeline string `json:"pipeline,omitempty"` // The format of the time stamp field (@timestamp or what you specify with time_key). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to. TimeKeyFormat string `json:"time_key_format,omitempty"` // Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event. TimePrecision string `json:"time_precision,omitempty"` // By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you'd like to use a custom time, include an @timestamp with your record. TimeKey string `json:"time_key,omitempty"` // By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true) // +kubebuilder:validation:Optional UtcIndex *bool `json:"utc_index,omitempty" plugin:"default:true"` // Suppress type name to avoid warnings in Elasticsearch 7.x SuppressTypeName *bool `json:"suppress_type_name,omitempty"` // Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot ('.') as a separator. https://github.com/uken/fluent-plugin-elasticsearch#target_index_key TargetIndexKey string `json:"target_index_key,omitempty"` // Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name.(default: fluentd) TargetTypeKey string `json:"target_type_key,omitempty"` // The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated. TemplateName string `json:"template_name,omitempty"` // The path to the file containing the template to install. // +docLink:"Secret,../secret/" TemplateFile *secret.Secret `json:"template_file,omitempty"` // Specify index templates in form of hash. Can contain multiple templates. Templates string `json:"templates,omitempty"` // Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration. CustomizeTemplate string `json:"customize_template,omitempty"` // Specify this as true when an index with rollover capability needs to be created.(default: false) https://github.com/uken/fluent-plugin-elasticsearch#rollover_index RolloverIndex bool `json:"rollover_index,omitempty"` // Specify this to override the index date pattern for creating a rollover index.(default: now/d) IndexDatePattern *string `json:"index_date_pattern,omitempty"` // Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API DeflectorAlias string `json:"deflector_alias,omitempty"` // Specify the index prefix for the rollover index to be created.(default: logstash) IndexPrefix string `json:"index_prefix,omitempty"` // Specify the application name for the rollover index to be created.(default: default) ApplicationName *string `json:"application_name,omitempty"` // Always update the template, even if it already exists.(default: false) TemplateOverwrite bool `json:"template_overwrite,omitempty"` // You can specify times of retry putting template.(default: 10) MaxRetryPuttingTemplate string `json:"max_retry_putting_template,omitempty"` // Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true) // +kubebuilder:validation:Optional FailOnPuttingTemplateRetryExceed *bool `json:"fail_on_putting_template_retry_exceed,omitempty" plugin:"default:true"` // fail_on_detecting_es_version_retry_exceed (default: true) // +kubebuilder:validation:Optional FailOnDetectingEsVersionRetryExceed *bool `json:"fail_on_detecting_es_version_retry_exceed,omitempty" plugin:"default:true"` // You can specify the number of times to retry fetching the Elasticsearch version.(default: 15) MaxRetryGetEsVersion string `json:"max_retry_get_es_version,omitempty"` // You can specify HTTP request timeout.(default: 5s) RequestTimeout string `json:"request_timeout,omitempty"` // You can tune how the elasticsearch-transport host reloading feature works.(default: true) // +kubebuilder:validation:Optional ReloadConnections *bool `json:"reload_connections,omitempty" plugin:"default:true"` //Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request. This can be useful to quickly remove a dead node from the list of addresses.(default: false) ReloadOnFailure bool `json:"reload_on_failure,omitempty"` // When `reload_connections` is true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000. ReloadAfter string `json:"reload_after,omitempty"` // You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport's pool will be resurrected.(default: 60s) ResurrectAfter string `json:"resurrect_after,omitempty"` // This will add the Fluentd tag in the JSON record.(default: false) IncludeTagKey bool `json:"include_tag_key,omitempty"` // This will add the Fluentd tag in the JSON record.(default: tag) TagKey string `json:"tag_key,omitempty"` // https://github.com/uken/fluent-plugin-elasticsearch#id_key IdKey string `json:"id_key,omitempty"` // Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event. RoutingKey string `json:"routing_key,omitempty"` // https://github.com/uken/fluent-plugin-elasticsearch#remove_keys RemoveKeys string `json:"remove_keys,omitempty"` // Remove keys on update will not update the configured keys in elasticsearch when a record is being updated. This setting only has any effect if the write operation is update or upsert. RemoveKeysOnUpdate string `json:"remove_keys_on_update,omitempty"` // This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works. RemoveKeysOnUpdateKey string `json:"remove_keys_on_update_key,omitempty"` // This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided. RetryTag string `json:"retry_tag,omitempty"` // The write_operation can be any of: (index,create,update,upsert)(default: index) WriteOperation string `json:"write_operation,omitempty"` // Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of elasticsearch shield.(default: false) ReconnectOnError bool `json:"reconnect_on_error,omitempty"` // This is debugging purpose option to enable to obtain transporter layer log. (default: false) WithTransporterLog bool `json:"with_transporter_log,omitempty"` // With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload. (default: application/json) ContentType string `json:"content_type,omitempty"` //With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control. IncludeIndexInUrl bool `json:"include_index_in_url,omitempty"` // With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag. TimeParseErrorTag string `json:"time_parse_error_tag,omitempty"` // With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. (default: excon) HttpBackend string `json:"http_backend,omitempty"` // With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder. (default: false) PreferOjSerializer bool `json:"prefer_oj_serializer,omitempty"` // Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: //{"people" => 100} {"people" => {"some" => "thing"}} //The second log line will be rejected by the Elasticsearch parser because objects and concrete values can't live in the same field. To combat this, you can enable hash flattening. FlattenHashes bool `json:"flatten_hashes,omitempty"` // Flatten separator FlattenHashesSeparator string `json:"flatten_hashes_separator,omitempty"` // When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. (default: false) ValidateClientVersion bool `json:"validate_client_version,omitempty"` // Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch's thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. // If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. // Change default value of thread_pool.bulk.queue_size in elasticsearch.yml) UnrecoverableErrorTypes string `json:"unrecoverable_error_types,omitempty"` // Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. // For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. // If you want to disable to verify Elasticsearch version at start up, set it as false. // When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true) // +kubebuilder:validation:Optional VerifyEsVersionAtStartup *bool `json:"verify_es_version_at_startup,omitempty" plugin:"default:true"` // This parameter changes that ES plugin assumes default Elasticsearch version.(default: 5) DefaultElasticsearchVersion string `json:"default_elasticsearch_version,omitempty"` // This parameter adds additional headers to request. Example: {"token":"secret"} (default: {}) CustomHeaders string `json:"custom_headers,omitempty"` // api_key parameter adds authentication header. ApiKey *secret.Secret `json:"api_key,omitempty"` // By default, the error logger won't record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn't desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs. (default: false) LogEs400Reason bool `json:"log_es_400_reason,omitempty"` // By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. (default: false) SuppressDocWrap bool `json:"suppress_doc_wrap,omitempty"` // A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won't be called. It is possible also to specify classes at higher level in the hierarchy. For example // `ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"]` // will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc. IgnoreExceptions string `json:"ignore_exceptions,omitempty"` // Indicates whether to backup chunk when ignore exception occurs. (default: true) // +kubebuilder:validation:Optional ExceptionBackup *bool `json:"exception_backup,omitempty" plugin:"default:true"` // Configure bulk_message request splitting threshold size. // Default value is 20MB. (20 * 1024 * 1024) // If you specify this size as negative number, bulk_message request splitting feature will be disabled. (default: 20MB) BulkMessageRequestThreshold string `json:"bulk_message_request_threshold,omitempty"` // The default Sniffer used by the Elasticsearch::Transport class works well when Fluentd has a direct connection to all of the Elasticsearch servers and can make effective use of the _nodes API. This doesn't work well when Fluentd must connect through a load balancer or proxy. The parameter sniffer_class_name gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::ElasticsearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. https://github.com/uken/fluent-plugin-elasticsearch#sniffer-class-name SnifferClassName string `json:"sniffer_class_name,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // Enable Index Lifecycle Management (ILM). EnableIlm bool `json:"enable_ilm,omitempty"` // Specify ILM policy id. IlmPolicyID string `json:"ilm_policy_id,omitempty"` // Specify ILM policy contents as Hash. IlmPolicy string `json:"ilm_policy,omitempty"` // Specify whether overwriting ilm policy or not. IlmPolicyOverwrite bool `json:"ilm_policy_overwrite,omitempty"` // Use @type elasticsearch_data_stream DataStreamEnable *bool `json:"data_stream_enable,omitempty" plugin:"hidden"` // You can specify Elasticsearch data stream name by this parameter. This parameter is mandatory for elasticsearch_data_stream. There are some limitations about naming rule. For more details https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-data-stream.html#indices-create-data-stream-api-path-params DataStreamName string `json:"data_stream_name,omitempty"` // Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. (default: data_stream_name) Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream DataStreamTemplateName string `json:"data_stream_template_name,omitempty"` // Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template's or a new ILM default policy is applied. (default: data_stream_name) Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream DataStreamILMName string `json:"data_stream_ilm_name,omitempty"` // Specify data stream ILM policy contents as Hash. DataStreamIlmPolicy string `json:"data_stream_ilm_policy,omitempty"` // Specify whether overwriting data stream ilm policy or not. DataStreamIlmPolicyOverwrite bool `json:"data_stream_ilm_policy_overwrite,omitempty"` // If set to true, the output uses the [legacy index template format](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/indices-templates-v1.html). Otherwise, it uses the [composable index template](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/index-templates.html) format. (default: true) // +kubebuilder:validation:Optional UseLegacyTemplate *bool `json:"use_legacy_template,omitempty"` // Option for adding gzip compression of output data. Valid options: default_compression, best_compression, best_speed, no_compression. (default: no_compression) CompressionLevel string `json:"compression_level,omitempty"` }
+kubebuilder:object:generate=true +docName:"Elasticsearch" Send your logs to Elasticsearch
func (*ElasticsearchOutput) DeepCopy ¶
func (in *ElasticsearchOutput) DeepCopy() *ElasticsearchOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchOutput.
func (*ElasticsearchOutput) DeepCopyInto ¶
func (in *ElasticsearchOutput) DeepCopyInto(out *ElasticsearchOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ElasticsearchOutput) ToDirective ¶
func (e *ElasticsearchOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Endpoint ¶
type Endpoint struct { // LogZ URL. URL string `json:"url,omitempty" plugin:"default:https://listener.logz.io"` // Port over which to connect to LogZ URL. Port int `json:"port,omitempty" plugin:"default:8071"` // LogZ API Token. // +docLink:"Secret,../secret/" Token *secret.Secret `json:"token,omitempty"` }
Endpoint defines connection details for LogZ.io. +kubebuilder:object:generate=true +docName:"Endpoint"
func (*Endpoint) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
func (*Endpoint) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Endpoint) ToDirective ¶
ToDirective converts Endpoint struct to fluentd configuration.
type EndpointCredentials ¶
type EndpointCredentials struct { // AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION. Region string `json:"region,omitempty"` // AWS connection url. Url string `json:"url,omitempty"` // AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AccessKeyId *secret.Secret `json:"access_key_id,omitempty"` // AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role. SecretAccessKey *secret.Secret `json:"secret_access_key,omitempty"` // Typically, you can use AssumeRole for cross-account access or federation. AssumeRoleArn *secret.Secret `json:"assume_role_arn,omitempty"` // Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value EcsContainerCredentialsRelativeUri *secret.Secret `json:"ecs_container_credentials_relative_uri,omitempty"` // AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html AssumeRoleSessionName *secret.Secret `json:"assume_role_session_name,omitempty"` // AssumeRoleWithWebIdentity https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html AssumeRoleWebIdentityTokenFile *secret.Secret `json:"assume_role_web_identity_token_file,omitempty"` // By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html StsCredentialsRegion *secret.Secret `json:"sts_credentials_region,omitempty"` }
+kubebuilder:object:generate=true +docName:"Endpoint Credentials" endpoint
func (*EndpointCredentials) DeepCopy ¶
func (in *EndpointCredentials) DeepCopy() *EndpointCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointCredentials.
func (*EndpointCredentials) DeepCopyInto ¶
func (in *EndpointCredentials) DeepCopyInto(out *EndpointCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*EndpointCredentials) ToDirective ¶
func (o *EndpointCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Fields ¶
func (Fields) ToDirective ¶
type FileOutputConfig ¶
type FileOutputConfig struct { // The Path of the file. The actual path is path + time + ".log" by default. Path string `json:"path"` // The flushed chunk is appended to existence file or not. The default is not appended. Append bool `json:"append,omitempty"` // +kubebuilder:validation:Optional // Add path suffix(default: true) AddPathSuffix *bool `json:"add_path_suffix,omitempty" plugin:"default:true"` // The suffix of output result.(default: ".log") PathSuffix string `json:"path_suffix,omitempty"` // Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs.(default: false) SymlinkPath bool `json:"symlink_path,omitempty"` // Compresses flushed files using gzip. No compression is performed by default. Compress string `json:"compress,omitempty"` // Performs compression again even if the buffer chunk is already compressed. (default: false) Recompress bool `json:"recompress,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true
func (*FileOutputConfig) DeepCopy ¶
func (in *FileOutputConfig) DeepCopy() *FileOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileOutputConfig.
func (*FileOutputConfig) DeepCopyInto ¶
func (in *FileOutputConfig) DeepCopyInto(out *FileOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FileOutputConfig) ToDirective ¶
func (c *FileOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type FluentdServer ¶
type FluentdServer struct { // The IP address or host name of the server. Host string `json:"host"` // The name of the server. Used for logging and certificate verification in TLS transport (when host is address). Name string `json:"name,omitempty"` // The port number of the host. Note that both TCP packets (event stream) and UDP packets (heartbeat message) are sent to this port. (default: 24224) Port int `json:"port,omitempty"` SharedKey *secret.Secret `json:"shared_key,omitempty"` // The username for authentication. Username *secret.Secret `json:"username,omitempty"` // The password for authentication. Password *secret.Secret `json:"password,omitempty"` // Marks a node as the standby node for an Active-Standby model between Fluentd nodes. When an active node goes down, the standby node is promoted to an active node. The standby node is not used by the out_forward plugin until then. Standby bool `json:"standby,omitempty"` // The load balancing weight. If the weight of one server is 20 and the weight of the other server is 30, events are sent in a 2:3 ratio. (default: 60). Weight int `json:"weight,omitempty"` }
+kubebuilder:object:generate=true +docName:"Fluentd Server" server
func (*FluentdServer) DeepCopy ¶
func (in *FluentdServer) DeepCopy() *FluentdServer
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdServer.
func (*FluentdServer) DeepCopyInto ¶
func (in *FluentdServer) DeepCopyInto(out *FluentdServer)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FluentdServer) ToDirective ¶
func (f *FluentdServer) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Format ¶
type Format struct { // Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value (default: json) // +kubebuilder:validation:Enum=out_file;json;ltsv;csv;msgpack;hash;single_value Type string `json:"type,omitempty"` // When type is single_value add '\n' to the end of the message (default: true) AddNewline *bool `json:"add_newline,omitempty"` // When type is single_value specify the key holding information MessageKey string `json:"message_key,omitempty"` }
+kubebuilder:object:generate=true
func (*Format) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Format.
func (*Format) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Format) ToDirective ¶
type FormatRfc5424 ¶
type FormatRfc5424 struct { // Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value (default: json) // +kubebuilder:validation:Enum=out_file;json;ltsv;csv;msgpack;hash;single_value Type string `json:"type,omitempty"` // Prepends message length for syslog transmission (default: true) Rfc6587MessageSize *bool `json:"rfc6587_message_size,omitempty"` // Sets host name in syslog from field in fluentd, delimited by '.' (default: hostname) HostnameField string `json:"hostname_field,omitempty"` // Sets app name in syslog from field in fluentd, delimited by '.' (default: app_name) AppNameField string `json:"app_name_field,omitempty"` // Sets proc id in syslog from field in fluentd, delimited by '.' (default: proc_id) ProcIdField string `json:"proc_id_field,omitempty"` // Sets msg id in syslog from field in fluentd, delimited by '.' (default: message_id) MessageIdField string `json:"message_id_field,omitempty"` // Sets structured data in syslog from field in fluentd, delimited by '.' (default structured_data) StructuredDataField string `json:"structured_data_field,omitempty"` // Sets log in syslog from field in fluentd, delimited by '.' (default: log) LogField string `json:"log_field,omitempty"` }
+kubebuilder:object:generate=true
func (*FormatRfc5424) DeepCopy ¶
func (in *FormatRfc5424) DeepCopy() *FormatRfc5424
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatRfc5424.
func (*FormatRfc5424) DeepCopyInto ¶
func (in *FormatRfc5424) DeepCopyInto(out *FormatRfc5424)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FormatRfc5424) ToDirective ¶
func (f *FormatRfc5424) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type ForwardOutput ¶
type ForwardOutput struct { // Server definitions at least one is required // +docLink:"Server,#fluentd-server" FluentdServers []FluentdServer `json:"servers"` // The transport protocol to use [ tcp, tls ] Transport string `json:"transport,omitempty"` // Change the protocol to at-least-once. The plugin waits the ack from destination's in_forward plugin. RequireAckResponse bool `json:"require_ack_response,omitempty"` // This option is used when require_ack_response is true. This default value is based on popular tcp_syn_retries. (default: 190) AckResponseTimeout int `json:"ack_response_timeout,omitempty"` // The timeout time when sending event logs. (default: 60) SendTimeout int `json:"send_timeout,omitempty"` // The timeout time for socket connect. When the connection timed out during establishment, Errno::ETIMEDOUT is raised. ConnectTimeout int `json:"connect_timeout,omitempty"` // The wait time before accepting a server fault recovery. (default: 10) RecoverWait int `json:"recover_wait,omitempty"` // The transport protocol to use for heartbeats. Set "none" to disable heartbeat. [transport, tcp, udp, none] HeartbeatType string `json:"heartbeat_type,omitempty"` // The interval of the heartbeat packer. (default: 1) HeartbeatInterval int `json:"heartbeat_interval,omitempty"` // Use the "Phi accrual failure detector" to detect server failure. (default: true) PhiFailureDetector bool `json:"phi_failure_detector,omitempty"` // The threshold parameter used to detect server faults. (default: 16) //`phi_threshold` is deeply related to `heartbeat_interval`. If you are using longer `heartbeat_interval`, please use the larger `phi_threshold`. Otherwise you will see frequent detachments of destination servers. The default value 16 is tuned for `heartbeat_interval` 1s. PhiThreshold int `json:"phi_threshold,omitempty"` // The hard timeout used to detect server failure. The default value is equal to the send_timeout parameter. (default: 60) HardTimeout int `json:"hard_timeout,omitempty"` // Set TTL to expire DNS cache in seconds. Set 0 not to use DNS Cache. (default: 0) ExpireDnsCache int `json:"expire_dns_cache,omitempty"` // Enable client-side DNS round robin. Uniform randomly pick an IP address to send data when a hostname has several IP addresses. // `heartbeat_type udp` is not available with `dns_round_robin true`. Use `heartbeat_type tcp` or `heartbeat_type none`. DnsRoundRobin bool `json:"dns_round_robin,omitempty"` // Ignore DNS resolution and errors at startup time. IgnoreNetworkErrorsAtStartup bool `json:"ignore_network_errors_at_startup,omitempty"` // The default version of TLS transport. [TLSv1_1, TLSv1_2] (default: TLSv1_2) TlsVersion string `json:"tls_version,omitempty"` // The cipher configuration of TLS transport. (default: ALL:!aNULL:!eNULL:!SSLv2) TlsCiphers string `json:"tls_ciphers,omitempty"` // Skip all verification of certificates or not. (default: false) TlsInsecureMode bool `json:"tls_insecure_mode,omitempty"` // Allow self signed certificates or not. (default: false) TlsAllowSelfSignedCert bool `json:"tls_allow_self_signed_cert,omitempty"` // Verify hostname of servers and certificates or not in TLS transport. (default: true) TlsVerifyHostname bool `json:"tls_verify_hostname,omitempty"` // The additional CA certificate path for TLS. TlsCertPath *secret.Secret `json:"tls_cert_path,omitempty"` // The client certificate path for TLS TlsClientCertPath *secret.Secret `json:"tls_client_cert_path,omitempty"` // The client private key path for TLS. TlsClientPrivateKeyPath *secret.Secret `json:"tls_client_private_key_path,omitempty"` // The client private key passphrase for TLS. TlsClientPrivateKeyPassphrase *secret.Secret `json:"tls_client_private_key_passphrase,omitempty"` // The certificate thumbprint for searching from Windows system certstore This parameter is for Windows only. TlsCertThumbprint string `json:"tls_cert_thumbprint,omitempty"` // The certificate logical store name on Windows system certstore. This parameter is for Windows only. TlsCertLogicalStoreName string `json:"tls_cert_logical_store_name,omitempty"` // Enable to use certificate enterprise store on Windows system certstore. This parameter is for Windows only. TlsCertUseEnterpriseStore bool `json:"tls_cert_use_enterprise_store,omitempty"` // Enable keepalive connection. (default: false) Keepalive bool `json:"keepalive,omitempty"` // Expired time of keepalive. Default value is nil, which means to keep connection as long as possible. (default: 0) KeepaliveTimeout int `json:"keepalive_timeout,omitempty"` // +docLink:"Security,../../common/security/" Security *common.Security `json:"security,omitempty"` // Verify that a connection can be made with one of out_forward nodes at the time of startup. (default: false) VerifyConnectionAtStartup bool `json:"verify_connection_at_startup,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // Format forwarded events time as an epoch Integer with second resolution. Useful when forwarding to old ( <= 0.12 ) Fluentd servers. TimeAsInteger bool `json:"time_as_integer,omitempty"` }
+kubebuilder:object:generate=true
func (*ForwardOutput) DeepCopy ¶
func (in *ForwardOutput) DeepCopy() *ForwardOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardOutput.
func (*ForwardOutput) DeepCopyInto ¶
func (in *ForwardOutput) DeepCopyInto(out *ForwardOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ForwardOutput) ToDirective ¶
func (f *ForwardOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type GCSOutput ¶
type GCSOutput struct { // Project identifier for GCS Project string `json:"project"` // Path of GCS service account credentials JSON file Keyfile string `json:"keyfile,omitempty"` // GCS service account credentials in JSON format // +docLink:"Secret,../secret/" CredentialsJson *secret.Secret `json:"credentials_json,omitempty"` // Number of times to retry requests on server error ClientRetries int `json:"client_retries,omitempty"` // Default timeout to use in requests ClientTimeout int `json:"client_timeout,omitempty"` // Name of a GCS bucket Bucket string `json:"bucket"` // Format of GCS object keys (default: `%{path}%{time_slice}_%{index}.%{file_extension}`) ObjectKeyFormat string `json:"object_key_format,omitempty"` // Path prefix of the files on GCS Path string `json:"path,omitempty"` // Archive format on GCS: gzip json text (default: gzip) StoreAs string `json:"store_as,omitempty"` // Enable the decompressive form of transcoding Transcoding bool `json:"transcoding,omitempty"` // Create GCS bucket if it does not exists (default: true) AutoCreateBucket bool `json:"auto_create_bucket,omitempty"` // Max length of `%{hex_random}` placeholder(4-16) (default: 4) HexRandomLength int `json:"hex_random_length,omitempty"` // Overwrite already existing path (default: false) Overwrite bool `json:"overwrite,omitempty"` // Permission for the object in GCS: `auth_read` `owner_full` `owner_read` `private` `project_private` `public_read` // +kubebuilder:validation:enum=auth_read,owner_full,owner_read,private,project_private,public_read Acl string `json:"acl,omitempty"` // Storage class of the file: `dra` `nearline` `coldline` `multi_regional` `regional` `standard` // +kubebuilder:validation:enum=dra,nearline,coldline,multi_regional,regional,standard StorageClass string `json:"storage_class,omitempty"` // Customer-supplied, AES-256 encryption key EncryptionKey string `json:"encryption_key,omitempty"` // User provided web-safe keys and arbitrary string values that will returned with requests for the file as "x-goog-meta-" response headers. // +docLink:"Object Metadata,#objectmetadata" ObjectMetadata []ObjectMetadata `json:"object_metadata,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true
func (*GCSOutput) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSOutput.
func (*GCSOutput) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*GCSOutput) ToDirective ¶
type GelfOutputConfig ¶ added in v0.12.0
type GelfOutputConfig struct { // Destination host Host string `json:"host"` // Destination host port Port int `json:"port"` // Transport Protocol (default: "udp") Protocol string `json:"protocol,omitempty"` // Enable TLS (default: false) TLS *bool `json:"tls,omitempty"` // TLS Options. // For details, see [https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12](https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12). (default: {}) TLSOptions map[string]string `json:"tls_options,omitempty"` // MaxBytes specifies the maximum size, in bytes, of each individual log message. // For details, see [https://github.com/Graylog2/graylog2-server/issues/873](https://github.com/Graylog2/graylog2-server/issues/873) // Available since ghcr.io/kube-logging/fluentd:v1.16-4.10-full (default: 3200) MaxBytes int `json:"max_bytes,omitempty"` // UdpTransportType specifies the UDP chunk size by choosing either WAN or LAN mode. // The choice between WAN and LAN affects the UDP chunk size depending on whether you are sending logs within your local network (LAN) or over a longer route (e.g., through the internet). Set this option accordingly. // For more details, see: // [https://github.com/manet-marketing/gelf_redux/blob/9db64353b6672805152c17642ea8ad39eafb5875/lib/gelf/notifier.rb#L22](https://github.com/manet-marketing/gelf_redux/blob/9db64353b6672805152c17642ea8ad39eafb5875/lib/gelf/notifier.rb#L22) // Available since ghcr.io/kube-logging/logging-operator/fluentd:5.3.0-full (default: WAN) UdpTransportType string `json:"udp_transport_type,omitempty"` // Available since ghcr.io/kube-logging/fluentd:v1.16-4.8-full // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*GelfOutputConfig) DeepCopy ¶ added in v0.12.0
func (in *GelfOutputConfig) DeepCopy() *GelfOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GelfOutputConfig.
func (*GelfOutputConfig) DeepCopyInto ¶ added in v0.12.0
func (in *GelfOutputConfig) DeepCopyInto(out *GelfOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*GelfOutputConfig) ToDirective ¶ added in v0.12.0
func (s *GelfOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type HTTPAuth ¶
type HTTPAuth struct { // Username for basic authentication. // +docLink:"Secret,../secret/" Username *secret.Secret `json:"username"` // Password for basic authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password"` }
+kubebuilder:object:generate=true +docName:"HTTP auth config" http_auth
func (*HTTPAuth) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPAuth.
func (*HTTPAuth) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*HTTPAuth) ToDirective ¶
type HTTPOutputConfig ¶
type HTTPOutputConfig struct { // Endpoint for HTTP request. Endpoint string `json:"endpoint"` // Method for HTTP request. [post, put] (default: post) HTTPMethod string `json:"http_method,omitempty"` // Proxy for HTTP request. Proxy string `json:"proxy,omitempty"` // Content-Profile for HTTP request. ContentType string `json:"content_type,omitempty"` // Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body. (default: false) JsonArray bool `json:"json_array,omitempty"` // The option to compress HTTP request body. [text,gzip] (default: text) Compress string `json:"compress,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // Additional headers for HTTP request. Headers map[string]string `json:"headers,omitempty"` // Additional headers from placeholders for HTTP request. HeadersFromPlaceholders map[string]string `json:"headers_from_placeholders,omitempty"` // Connection open timeout in seconds. OpenTimeout int `json:"open_timeout,omitempty"` // Read timeout in seconds. ReadTimeout int `json:"read_timeout,omitempty"` // TLS timeout in seconds. SSLTimeout int `json:"ssl_timeout,omitempty"` // Try to reuse connection. This will improve performance. (default: false) ReuseConnections bool `json:"reuse_connections,omitempty"` // The default version of TLS transport. [TLSv1_1, TLSv1_2] (default: TLSv1_2) TlsVersion string `json:"tls_version,omitempty"` // The cipher configuration of TLS transport. (default: ALL:!aNULL:!eNULL:!SSLv2) TlsCiphers string `json:"tls_ciphers,omitempty"` // The CA certificate path for TLS. TlsCACertPath *secret.Secret `json:"tls_ca_cert_path,omitempty"` // The client certificate path for TLS. TlsClientCertPath *secret.Secret `json:"tls_client_cert_path,omitempty"` // The client private key path for TLS. TlsPrivateKeyPath *secret.Secret `json:"tls_private_key_path,omitempty"` // The client private key passphrase for TLS. TlsPrivateKeyPassphrase *secret.Secret `json:"tls_private_key_passphrase,omitempty"` // The verify mode of TLS. [peer, none] (default: peer) TlsVerifyMode string `json:"tls_verify_mode,omitempty"` // Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError. (default: true) ErrorResponseAsUnrecoverable *bool `json:"error_response_as_unrecoverable,omitempty"` // List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default. (default: [503]) RetryableResponseCodes []int `json:"retryable_response_codes,omitempty"` // +docLink:"HTTP auth,#http-auth-config" Auth *HTTPAuth `json:"auth,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*HTTPOutputConfig) DeepCopy ¶
func (in *HTTPOutputConfig) DeepCopy() *HTTPOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPOutputConfig.
func (*HTTPOutputConfig) DeepCopyInto ¶
func (in *HTTPOutputConfig) DeepCopyInto(out *HTTPOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*HTTPOutputConfig) ToDirective ¶
func (c *HTTPOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KafkaOutputConfig ¶
type KafkaOutputConfig struct { // Use rdkafka2 instead of the legacy kafka2 output plugin. This plugin requires fluentd image version v1.16-4.9-full or higher. UseRdkafka bool `json:"use_rdkafka,omitempty"` // RdkafkaOptions represents the global configuration properties for librdkafka. RdkafkaOptions *RdkafkaOptions `json:"rdkafka_options,omitempty"` // The list of all seed brokers, with their host and port information. Brokers string `json:"brokers"` // Topic Key (default: "topic") TopicKey string `json:"topic_key,omitempty"` // Partition (default: "partition") PartitionKey string `json:"partition_key,omitempty"` // Partition Key (default: "partition_key") PartitionKeyKey string `json:"partition_key_key,omitempty"` // Message Key (default: "message_key") MessageKeyKey string `json:"message_key_key,omitempty"` // Client ID (default: "kafka") ClientId string `json:"client_id,omitempty"` // The name of default topic (default: nil). DefaultTopic string `json:"default_topic,omitempty"` // The name of default partition key (default: nil). DefaultPartitionKey string `json:"default_partition_key,omitempty"` // The name of default message key (default: nil). DefaultMessageKey string `json:"default_message_key,omitempty"` // Exclude Topic key (default: false) ExcludeTopicKey bool `json:"exclude_topic_key,omitempty"` // Exclude Partition key (default: false) ExcludePartitionKey bool `json:"exclude_partion_key,omitempty"` // Get Kafka Client log (default: false) GetKafkaClientLog bool `json:"get_kafka_client_log,omitempty"` // Headers (default: {}) Headers map[string]string `json:"headers,omitempty"` // Headers from Record (default: {}) HeadersFromRecord map[string]string `json:"headers_from_record,omitempty"` // Use default for unknown topics (default: false) UseDefaultForUnknownTopic bool `json:"use_default_for_unknown_topic,omitempty"` // Idempotent (default: false) Idempotent bool `json:"idempotent,omitempty"` // SASL over SSL (default: true) // +kubebuilder:validation:Optional SaslOverSSL *bool `json:"sasl_over_ssl,omitempty"` Principal string `json:"principal,omitempty"` Keytab *secret.Secret `json:"keytab,omitempty"` // Username when using PLAIN/SCRAM SASL authentication Username *secret.Secret `json:"username,omitempty"` // Password when using PLAIN/SCRAM SASL authentication Password *secret.Secret `json:"password,omitempty"` // If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication ScramMechanism string `json:"scram_mechanism,omitempty"` // Number of times to retry sending of messages to a leader (default: 1) MaxSendRetries int `json:"max_send_retries,omitempty"` // Max byte size to send message to avoid MessageSizeTooLarge. Messages over the limit will be dropped (default: no limit) MaxSendLimitBytes int `json:"max_send_limit_bytes,omitempty"` // The number of acks required per request (default: -1). RequiredAcks int `json:"required_acks,omitempty"` // How long the producer waits for acks. The unit is seconds (default: nil => Uses default of ruby-kafka library) AckTimeout int `json:"ack_timeout,omitempty"` // The codec the producer uses to compress messages (default: nil). The available options are gzip and snappy. CompressionCodec string `json:"compression_codec,omitempty"` // Maximum value of total message size to be included in one batch transmission. (default: 4096). KafkaAggMaxBytes int `json:"kafka_agg_max_bytes,omitempty"` // Maximum number of messages to include in one batch transmission. (default: nil). KafkaAggMaxMessages int `json:"kafka_agg_max_messages,omitempty"` // Discard the record where Kafka DeliveryFailed occurred (default: false) DiscardKafkaDeliveryFailed bool `json:"discard_kafka_delivery_failed,omitempty"` // System's CA cert store (default: false) SSLCACertsFromSystem *bool `json:"ssl_ca_certs_from_system,omitempty"` // CA certificate SSLCACert *secret.Secret `json:"ssl_ca_cert,omitempty"` // Client certificate SSLClientCert *secret.Secret `json:"ssl_client_cert,omitempty"` // Client certificate chain SSLClientCertChain *secret.Secret `json:"ssl_client_cert_chain,omitempty"` // Client certificate key SSLClientCertKey *secret.Secret `json:"ssl_client_cert_key,omitempty"` // Verify certificate hostname SSLVerifyHostname *bool `json:"ssl_verify_hostname,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Kafka" Send your logs to Kafka. Setting use_rdkafka to true opts for rdkafka2, which offers higher performance compared to ruby-kafka. (Note: requires fluentd image version v1.16-4.9-full or higher) -[more info](https://github.com/fluent/fluent-plugin-kafka#output-plugin)
func (*KafkaOutputConfig) DeepCopy ¶
func (in *KafkaOutputConfig) DeepCopy() *KafkaOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaOutputConfig.
func (*KafkaOutputConfig) DeepCopyInto ¶
func (in *KafkaOutputConfig) DeepCopyInto(out *KafkaOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KafkaOutputConfig) ToDirective ¶
func (e *KafkaOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisFirehoseAssumeRoleCredentials ¶
type KinesisFirehoseAssumeRoleCredentials struct { // The Amazon Resource Name (ARN) of the role to assume RoleArn string `json:"role_arn"` // An identifier for the assumed role session RoleSessionName string `json:"role_session_name"` // An IAM policy in JSON format Policy string `json:"policy,omitempty"` // The duration, in seconds, of the role session (900-3600) DurationSeconds string `json:"duration_seconds,omitempty"` // A unique identifier that is used by third parties when assuming roles in their customers' accounts. ExternalId string `json:"external_id,omitempty"` }
+kubebuilder:object:generate=true +docName:"Assume Role Credentials" assume_role_credentials
func (*KinesisFirehoseAssumeRoleCredentials) DeepCopy ¶
func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopy() *KinesisFirehoseAssumeRoleCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseAssumeRoleCredentials.
func (*KinesisFirehoseAssumeRoleCredentials) DeepCopyInto ¶
func (in *KinesisFirehoseAssumeRoleCredentials) DeepCopyInto(out *KinesisFirehoseAssumeRoleCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisFirehoseAssumeRoleCredentials) ToDirective ¶
func (o *KinesisFirehoseAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisFirehoseOutputConfig ¶
type KinesisFirehoseOutputConfig struct { // Name of the delivery stream to put data. DeliveryStreamName string `json:"delivery_stream_name"` // If it is enabled, the plugin adds new line character (\n) to each serialized record. //Before appending \n, plugin calls chomp and removes separator from the end of each record as chomp_record is true. Therefore, you don't need to enable chomp_record option when you use kinesis_firehose output with default configuration (append_new_line is true). If you want to set append_new_line false, you can choose chomp_record false (default) or true (compatible format with plugin v2). (Default:true) AppendNewLine *bool `json:"append_new_line,omitempty"` // AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSKeyId *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSSECKey *secret.Secret `json:"aws_sec_key,omitempty"` // AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role. AWSSESToken *secret.Secret `json:"aws_ses_token,omitempty"` // The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries. AWSIAMRetries int `json:"aws_iam_retries,omitempty"` // Typically, you can use AssumeRole for cross-account access or federation. AssumeRoleCredentials *KinesisFirehoseAssumeRoleCredentials `json:"assume_role_credentials,omitempty"` // This loads AWS access credentials from an external process. ProcessCredentials *KinesisFirehoseProcessCredentials `json:"process_credentials,omitempty"` // AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION. Region string `json:"region,omitempty"` // The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times RetriesOnBatchRequest int `json:"retries_on_batch_request,omitempty"` // Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn't work some cases. ResetBackoffIfSuccess bool `json:"reset_backoff_if_success,omitempty"` // Integer, default 500. The number of max count of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxCount int `json:"batch_request_max_count,omitempty"` // Integer. The number of max size of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxSize int `json:"batch_request_max_size,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"KinesisFirehose" Send your logs to a Kinesis Firehose
func (*KinesisFirehoseOutputConfig) DeepCopy ¶
func (in *KinesisFirehoseOutputConfig) DeepCopy() *KinesisFirehoseOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseOutputConfig.
func (*KinesisFirehoseOutputConfig) DeepCopyInto ¶
func (in *KinesisFirehoseOutputConfig) DeepCopyInto(out *KinesisFirehoseOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisFirehoseOutputConfig) ToDirective ¶
func (e *KinesisFirehoseOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisFirehoseProcessCredentials ¶
type KinesisFirehoseProcessCredentials struct { // Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html Process string `json:"process"` }
+kubebuilder:object:generate=true +docName:"Process Credentials" process_credentials
func (*KinesisFirehoseProcessCredentials) DeepCopy ¶
func (in *KinesisFirehoseProcessCredentials) DeepCopy() *KinesisFirehoseProcessCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisFirehoseProcessCredentials.
func (*KinesisFirehoseProcessCredentials) DeepCopyInto ¶
func (in *KinesisFirehoseProcessCredentials) DeepCopyInto(out *KinesisFirehoseProcessCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisFirehoseProcessCredentials) ToDirective ¶
func (o *KinesisFirehoseProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisStreamAssumeRoleCredentials ¶
type KinesisStreamAssumeRoleCredentials struct { // The Amazon Resource Name (ARN) of the role to assume RoleArn string `json:"role_arn"` // An identifier for the assumed role session RoleSessionName string `json:"role_session_name"` // An IAM policy in JSON format Policy string `json:"policy,omitempty"` // The duration, in seconds, of the role session (900-3600) DurationSeconds string `json:"duration_seconds,omitempty"` // A unique identifier that is used by third parties when assuming roles in their customers' accounts. ExternalId string `json:"external_id,omitempty"` }
+kubebuilder:object:generate=true +docName:"Assume Role Credentials" assume_role_credentials
func (*KinesisStreamAssumeRoleCredentials) DeepCopy ¶
func (in *KinesisStreamAssumeRoleCredentials) DeepCopy() *KinesisStreamAssumeRoleCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamAssumeRoleCredentials.
func (*KinesisStreamAssumeRoleCredentials) DeepCopyInto ¶
func (in *KinesisStreamAssumeRoleCredentials) DeepCopyInto(out *KinesisStreamAssumeRoleCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisStreamAssumeRoleCredentials) ToDirective ¶
func (o *KinesisStreamAssumeRoleCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisStreamOutputConfig ¶
type KinesisStreamOutputConfig struct { // Name of the stream to put data. StreamName string `json:"stream_name"` // A key to extract partition key from JSON object. Default nil, which means partition key will be generated randomly. PartitionKey string `json:"partition_key,omitempty"` // AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSKeyId *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AWSSECKey *secret.Secret `json:"aws_sec_key,omitempty"` // AWS session token. This parameter is optional, but can be provided if using MFA or temporary credentials when your agent is not running on EC2 instance with an IAM Role. AWSSESToken *secret.Secret `json:"aws_ses_token,omitempty"` // The number of attempts to make (with exponential backoff) when loading instance profile credentials from the EC2 metadata service using an IAM role. Defaults to 5 retries. AWSIAMRetries int `json:"aws_iam_retries,omitempty"` // Typically, you can use AssumeRole for cross-account access or federation. AssumeRoleCredentials *KinesisStreamAssumeRoleCredentials `json:"assume_role_credentials,omitempty"` // This loads AWS access credentials from an external process. ProcessCredentials *KinesisStreamProcessCredentials `json:"process_credentials,omitempty"` // AWS region of your stream. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION. Region string `json:"region,omitempty"` // The plugin will put multiple records to Amazon Kinesis Data Streams in batches using PutRecords. A set of records in a batch may fail for reasons documented in the Kinesis Service API Reference for PutRecords. Failed records will be retried retries_on_batch_request times RetriesOnBatchRequest int `json:"retries_on_batch_request,omitempty"` // Boolean, default true. If enabled, when after retrying, the next retrying checks the number of succeeded records on the former batch request and reset exponential backoff if there is any success. Because batch request could be composed by requests across shards, simple exponential backoff for the batch request wouldn't work some cases. ResetBackoffIfSuccess bool `json:"reset_backoff_if_success,omitempty"` // Integer, default 500. The number of max count of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxCount int `json:"batch_request_max_count,omitempty"` // Integer. The number of max size of making batch request from record chunk. It can't exceed the default value because it's API limit. BatchRequestMaxSize int `json:"batch_request_max_size,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"KinesisStream" Send your logs to a Kinesis Stream
func (*KinesisStreamOutputConfig) DeepCopy ¶
func (in *KinesisStreamOutputConfig) DeepCopy() *KinesisStreamOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamOutputConfig.
func (*KinesisStreamOutputConfig) DeepCopyInto ¶
func (in *KinesisStreamOutputConfig) DeepCopyInto(out *KinesisStreamOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisStreamOutputConfig) ToDirective ¶
func (e *KinesisStreamOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type KinesisStreamProcessCredentials ¶
type KinesisStreamProcessCredentials struct { // Command more info: https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/ProcessCredentials.html Process string `json:"process"` }
+kubebuilder:object:generate=true +docName:"Process Credentials" process_credentials
func (*KinesisStreamProcessCredentials) DeepCopy ¶
func (in *KinesisStreamProcessCredentials) DeepCopy() *KinesisStreamProcessCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisStreamProcessCredentials.
func (*KinesisStreamProcessCredentials) DeepCopyInto ¶
func (in *KinesisStreamProcessCredentials) DeepCopyInto(out *KinesisStreamProcessCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*KinesisStreamProcessCredentials) ToDirective ¶
func (o *KinesisStreamProcessCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type Label ¶
func (Label) ToDirective ¶
type LogDNAOutput ¶
type LogDNAOutput struct { // LogDNA Api key ApiKey string `json:"api_key"` // Hostname HostName string `json:"hostname"` // Application name App string `json:"app,omitempty"` // Comma-Separated List of Tags, Optional Tags string `json:"tags,omitempty"` // HTTPS POST Request Timeout, Optional. Supports s and ms Suffices (default: 30 s) RequestTimeout string `json:"request_timeout,omitempty"` // Custom Ingester URL, Optional (default: `https://logs.logdna.com`) IngesterDomain string `json:"ingester_domain,omitempty"` // Custom Ingester Endpoint, Optional (default: /logs/ingest) IngesterEndpoint string `json:"ingester_endpoint,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"LogDNA" Send your logs to LogDNA
func (*LogDNAOutput) DeepCopy ¶
func (in *LogDNAOutput) DeepCopy() *LogDNAOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogDNAOutput.
func (*LogDNAOutput) DeepCopyInto ¶
func (in *LogDNAOutput) DeepCopyInto(out *LogDNAOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*LogDNAOutput) ToDirective ¶
func (l *LogDNAOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type LogIntelligenceHeaders ¶ added in v0.12.0
type LogIntelligenceHeaders struct { // Authorization Bearer token for http request to VMware Log Intelligence // +docLink:"Secret,../secret/" Authorization *secret.Secret `json:"authorization"` // Content Type for http request to VMware Log Intelligence ContentType string `json:"content_type" plugin:"default:application/json"` // Structure for http request to VMware Log Intelligence Structure string `json:"structure" plugin:"default:simple"` }
+kubebuilder:object:generate=true +docName:"VMwareLogIntelligenceHeaders" headers https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E
func (*LogIntelligenceHeaders) DeepCopy ¶ added in v0.12.0
func (in *LogIntelligenceHeaders) DeepCopy() *LogIntelligenceHeaders
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogIntelligenceHeaders.
func (*LogIntelligenceHeaders) DeepCopyInto ¶ added in v0.12.0
func (in *LogIntelligenceHeaders) DeepCopyInto(out *LogIntelligenceHeaders)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LogZOutput ¶
type LogZOutput struct { // Define LogZ endpoint URL Endpoint *Endpoint `json:"endpoint"` // Should the appender add a timestamp to your logs on their process time (recommended). OutputIncludeTime *bool `json:"output_include_time,omitempty"` // Should the appender add the fluentd tag to the document, called "fluentd_tag" OutputIncludeTags *bool `json:"output_include_tags,omitempty"` // Timeout in seconds that the http persistent connection will stay open without traffic. HTTPIdleTimeout int `json:"http_idle_timeout,omitempty"` // How many times to resend failed bulks. RetryCount int `json:"retry_count,omitempty"` // How long to sleep initially between retries, exponential step-off. RetrySleep int `json:"retry_sleep,omitempty"` // Limit to the size of the Logz.io upload bulk. Defaults to 1000000 bytes leaving about 24kB for overhead. BulkLimit int `json:"bulk_limit,omitempty"` // Limit to the size of the Logz.io warning message when a record exceeds bulk_limit to prevent a recursion when Fluent warnings are sent to the Logz.io output. BulkLimitWarningLimit int `json:"bulk_limit_warning_limit,omitempty"` // Should the plugin ship the logs in gzip compression. Default is false. Gzip bool `json:"gzip,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Logzio" LogZ Send your logs to LogZ.io
func (*LogZOutput) DeepCopy ¶
func (in *LogZOutput) DeepCopy() *LogZOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogZOutput.
func (*LogZOutput) DeepCopyInto ¶
func (in *LogZOutput) DeepCopyInto(out *LogZOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*LogZOutput) ToDirective ¶
func (e *LogZOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
ToDirective converts LogZOutput to fluentd configuration.
type LokiOutput ¶
type LokiOutput struct { // The url of the Loki server to send logs to. (default: `https://logs-us-west1.grafana.net`) Url string `json:"url,omitempty"` // Specify a username if the Loki server requires authentication. // +docLink:"Secret,../secret/" Username *secret.Secret `json:"username,omitempty"` // Specify password if the Loki server requires authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password,omitempty"` // TLS: parameters for presenting a client certificate // +docLink:"Secret,../secret/" Cert *secret.Secret `json:"cert,omitempty"` // TLS: parameters for presenting a client certificate // +docLink:"Secret,../secret/" Key *secret.Secret `json:"key,omitempty"` // TLS: CA certificate file for server certificate verification // +docLink:"Secret,../secret/" CaCert *secret.Secret `json:"ca_cert,omitempty"` // TLS: disable server certificate verification (default: false) InsecureTLS *bool `json:"insecure_tls,omitempty"` // Loki is a multi-tenant log storage platform and all requests sent must include a tenant. Tenant string `json:"tenant,omitempty"` // Set of labels to include with every Loki stream. Labels Label `json:"labels,omitempty"` // Set of extra labels to include with every Loki stream. ExtraLabels map[string]string `json:"extra_labels,omitempty"` // Format to use when flattening the record to a log line: json, key_value (default: key_value) LineFormat string `json:"line_format,omitempty" plugin:"default:json"` // Extract kubernetes labels as loki labels (default: false) ExtractKubernetesLabels *bool `json:"extract_kubernetes_labels,omitempty"` // Comma separated list of needless record keys to remove (default: []) RemoveKeys []string `json:"remove_keys,omitempty"` // If a record only has 1 key, then just set the log line to the value and discard the key. (default: false) DropSingleKey *bool `json:"drop_single_key,omitempty"` // Configure Kubernetes metadata in a Prometheus like format (default: false) ConfigureKubernetesLabels *bool `json:"configure_kubernetes_labels,omitempty"` // whether to include the fluentd_thread label when multiple threads are used for flushing. (default: true) IncludeThreadLabel *bool `json:"include_thread_label,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*LokiOutput) DeepCopy ¶
func (in *LokiOutput) DeepCopy() *LokiOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LokiOutput.
func (*LokiOutput) DeepCopyInto ¶
func (in *LokiOutput) DeepCopyInto(out *LokiOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*LokiOutput) ToDirective ¶
func (l *LokiOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type MattermostOutputConfig ¶ added in v0.10.0
type MattermostOutputConfig struct { // Incoming Webhook URI (Required for Incoming Webhook mode). WebhookURL *secret.Secret `json:"webhook_url"` // The ID of the channel where you want to receive the information. ChannelID string `json:"channel_id,omitempty"` // Color of the message you are sending, in hexadecimal format. (default: #A9A9A9) MessageColor string `json:"message_color,omitempty"` // The title you want to add to the message. (default: fluent_title_default) MessageTitle string `json:"message_title,omitempty"` // The message you want to send. It can be a static message, which you add at this point, or you can receive the Fluentd infos with the %s Message string `json:"message,omitempty"` // You can set the communication channel if it uses TLS. (default: true) EnableTLS *bool `json:"enable_tls,omitempty"` // The path of the CA certificates. CAPath *secret.Secret `json:"ca_path,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*MattermostOutputConfig) DeepCopy ¶ added in v0.10.0
func (in *MattermostOutputConfig) DeepCopy() *MattermostOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MattermostOutputConfig.
func (*MattermostOutputConfig) DeepCopyInto ¶ added in v0.10.0
func (in *MattermostOutputConfig) DeepCopyInto(out *MattermostOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*MattermostOutputConfig) ToDirective ¶ added in v0.10.0
func (c *MattermostOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type NewRelicOutputConfig ¶
type NewRelicOutputConfig struct { // New Relic API Insert key // +docLink:"Secret,../secret/" APIKey *secret.Secret `json:"api_key,omitempty"` // New Relic License Key (recommended) // +docLink:"Secret,../secret/" // LicenseKey *secret.Secret `json:"license_key,omitempty"` LicenseKey *secret.Secret `json:"license_key,omitempty"` // New Relic ingestion endpoint // +docLink:"Secret,../secret/" BaseURI string `json:"base_uri,omitempty" plugin:"default:https://log-api.newrelic.com/log/v1"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*NewRelicOutputConfig) DeepCopy ¶
func (in *NewRelicOutputConfig) DeepCopy() *NewRelicOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewRelicOutputConfig.
func (*NewRelicOutputConfig) DeepCopyInto ¶
func (in *NewRelicOutputConfig) DeepCopyInto(out *NewRelicOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*NewRelicOutputConfig) ToDirective ¶
func (c *NewRelicOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type NullOutputConfig ¶
type NullOutputConfig struct { // The parameter for testing to simulate the output plugin that never succeeds to flush. NeverFlush *bool `json:"never_flush,omitempty"` }
func NewNullOutputConfig ¶
func NewNullOutputConfig() *NullOutputConfig
func (*NullOutputConfig) DeepCopy ¶
func (in *NullOutputConfig) DeepCopy() *NullOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NullOutputConfig.
func (*NullOutputConfig) DeepCopyInto ¶
func (in *NullOutputConfig) DeepCopyInto(out *NullOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*NullOutputConfig) ToDirective ¶
func (c *NullOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type OSSOutput ¶
type OSSOutput struct { // OSS endpoint to connect to' Endpoint string `json:"endpoint"` // Your bucket name Bucket string `json:"bucket"` // Your access key id // +docLink:"Secret,../secret/" AccessKeyId *secret.Secret `json:"access_key_id"` // Your access secret key // +docLink:"Secret,../secret/" AccessKeySecret *secret.Secret `json:"access_key_secret"` // Path prefix of the files on OSS (default: fluent/logs) Path string `json:"path,omitempty"` // Upload crc enabled (default: true) UploadCrcEnable bool `json:"upload_crc_enable,omitempty"` // Download crc enabled (default: true) DownloadCrcEnable bool `json:"download_crc_enable,omitempty"` // Timeout for open connections (default: 10) OpenTimeout int `json:"open_timeout,omitempty"` // Timeout for read response (default: 120) ReadTimeout int `json:"read_timeout,omitempty"` // OSS SDK log directory (default: /var/log/td-agent) OssSdkLogDir string `json:"oss_sdk_log_dir,omitempty"` // The format of OSS object keys (default: `%{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}`) KeyFormat string `json:"key_format,omitempty"` // Archive format on OSS: gzip, json, text, lzo, lzma2 (default: gzip) StoreAs string `json:"store_as,omitempty"` // desc 'Create OSS bucket if it does not exists (default: false) AutoCreateBucket bool `json:"auto_create_bucket,omitempty"` // Overwrite already existing path (default: false) Overwrite bool `json:"overwrite,omitempty"` // Check bucket if exists or not (default: true) CheckBucket bool `json:"check_bucket,omitempty"` // Check object before creation (default: true) CheckObject bool `json:"check_object,omitempty"` // The length of `%{hex_random}` placeholder(4-16) (default: 4) HexRandomLength int `json:"hex_random_length,omitempty"` // `sprintf` format for `%{index}` (default: %d) IndexFormat string `json:"index_format,omitempty"` // Given a threshold to treat events as delay, output warning logs if delayed events were put into OSS WarnForDelay string `json:"warn_for_delay,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*OSSOutput) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSOutput.
func (*OSSOutput) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OSSOutput) ToDirective ¶
type ObjectMetadata ¶
func (*ObjectMetadata) ToDirective ¶
func (o *ObjectMetadata) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type OpenSearchEndpointCredentials ¶ added in v0.12.0
type OpenSearchEndpointCredentials struct { // AWS region. It should be in form like us-east-1, us-west-2. Default nil, which means try to find from environment variable AWS_REGION. Region string `json:"region,omitempty"` // AWS connection url. Url string `json:"url"` // AWS access key id. This parameter is required when your agent is not running on EC2 instance with an IAM Role. AccessKeyId *secret.Secret `json:"access_key_id,omitempty"` // AWS secret key. This parameter is required when your agent is not running on EC2 instance with an IAM Role. SecretAccessKey *secret.Secret `json:"secret_access_key,omitempty"` // Typically, you can use AssumeRole for cross-account access or federation. AssumeRoleArn *secret.Secret `json:"assume_role_arn,omitempty"` // Set with AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable value EcsContainerCredentialsRelativeUri *secret.Secret `json:"ecs_container_credentials_relative_uri,omitempty"` // [AssumeRoleWithWebIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html) AssumeRoleSessionName *secret.Secret `json:"assume_role_session_name,omitempty"` // [AssumeRoleWithWebIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html) AssumeRoleWebIdentityTokenFile *secret.Secret `json:"assume_role_web_identity_token_file,omitempty"` // By default, the AWS Security Token Service (AWS STS) is available as a global service, and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional AWS STS endpoints instead of the global endpoint to reduce latency, build in redundancy, and increase session token validity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html StsCredentialsRegion *secret.Secret `json:"sts_credentials_region,omitempty"` }
+kubebuilder:object:generate=true
func (*OpenSearchEndpointCredentials) DeepCopy ¶ added in v0.12.0
func (in *OpenSearchEndpointCredentials) DeepCopy() *OpenSearchEndpointCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenSearchEndpointCredentials.
func (*OpenSearchEndpointCredentials) DeepCopyInto ¶ added in v0.12.0
func (in *OpenSearchEndpointCredentials) DeepCopyInto(out *OpenSearchEndpointCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OpenSearchEndpointCredentials) ToDirective ¶ added in v0.12.0
func (o *OpenSearchEndpointCredentials) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type OpenSearchOutput ¶
type OpenSearchOutput struct { // You can specify OpenSearch host by this parameter. (default:localhost) Host string `json:"host,omitempty"` // You can specify OpenSearch port by this parameter.(default: 9200) Port int `json:"port,omitempty"` // User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. e.g. `%{demo+}` User string `json:"user,omitempty"` // Password for HTTP Basic authentication. // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password,omitempty"` // Path for HTTP Basic authentication. Path string `json:"path,omitempty"` // Connection scheme (default: http) Scheme string `json:"scheme,omitempty"` // You can specify multiple OpenSearch hosts with separator ",". If you specify hosts option, host and port options are ignored. Hosts string `json:"hosts,omitempty"` // Tell this plugin to find the index name to write to in the record under this key in preference to other mechanisms. Key can be specified as path to nested record using dot ('.') as a separator. TargetIndexKey string `json:"target_index_key,omitempty"` // The format of the time stamp field (@timestamp or what you specify with `time_key`). This parameter only has an effect when logstash_format is true as it only affects the name of the index we write to. TimeKeyFormat string `json:"time_key_format,omitempty"` // Should the record not include a time_key, define the degree of sub-second time precision to preserve from the time portion of the routed event. TimePrecision string `json:"time_precision,omitempty"` // Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API.(default: false) IncludeTimestamp bool `json:"include_timestamp,omitempty"` // Enable Logstash log format.(default: false) LogstashFormat bool `json:"logstash_format,omitempty"` // Set the Logstash prefix.(default: logstash) LogstashPrefix string `json:"logstash_prefix,omitempty"` // Set the Logstash prefix separator.(default: -) LogstashPrefixSeparator string `json:"logstash_prefix_separator,omitempty"` // Set the Logstash date format.(default: %Y.%m.%d) LogstashDateformat string `json:"logstash_dateformat,omitempty"` // By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe `utc_index` to false. // +kubebuilder:validation:Optional UtcIndex *bool `json:"utc_index,omitempty" plugin:"default:true"` // Suppress type name to avoid warnings in OpenSearch SuppressTypeName *bool `json:"suppress_type_name,omitempty"` // The index name to write events to (default: fluentd) IndexName string `json:"index_name,omitempty"` // Field on your data to identify the data uniquely IdKey string `json:"id_key,omitempty"` // The write_operation can be any of: (index,create,update,upsert)(default: index) WriteOperation string `json:"write_operation,omitempty"` // parent_key ParentKey string `json:"parent_key,omitempty"` // routing_key RoutingKey string `json:"routing_key,omitempty"` // You can specify HTTP request timeout.(default: 5s) RequestTimeout string `json:"request_timeout,omitempty"` // You can tune how the OpenSearch-transport host reloading feature works.(default: true) // +kubebuilder:validation:Optional ReloadConnections *bool `json:"reload_connections,omitempty" plugin:"default:true"` // Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses.(default: false) ReloadOnFailure bool `json:"reload_on_failure,omitempty"` // This setting allows custom routing of messages in response to bulk request failures. The default behavior is to emit failed records using the same tag that was provided. RetryTag string `json:"retry_tag,omitempty"` // You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport's pool will be resurrected.(default: 60s) ResurrectAfter string `json:"resurrect_after,omitempty"` // By default, when inserting records in Logstash format, @timestamp is dynamically created with the time at log ingestion. If you'd like to use a custom time, include an @timestamp with your record. TimeKey string `json:"time_key,omitempty"` // time_key_exclude_timestamp (default: false) TimeKeyExcludeTimestamp bool `json:"time_key_exclude_timestamp,omitempty"` // +kubebuilder:validation:Optional // Skip ssl verification (default: true) SslVerify *bool `json:"ssl_verify,omitempty" plugin:"default:true"` // Client certificate key SSLClientCertKey *secret.Secret `json:"client_key,omitempty"` // Client certificate SSLClientCert *secret.Secret `json:"client_cert,omitempty"` // Client key password SSLClientCertKeyPass *secret.Secret `json:"client_key_pass,omitempty"` // CA certificate SSLCACert *secret.Secret `json:"ca_file,omitempty"` // If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] SslVersion string `json:"ssl_version,omitempty"` // https://github.com/fluent/fluent-plugin-opensearch#remove_keys RemoveKeys string `json:"remove_keys,omitempty"` // Remove keys on update will not update the configured keys in OpenSearch when a record is being updated. This setting only has any effect if the write operation is update or upsert. RemoveKeysOnUpdate string `json:"remove_keys_on_update,omitempty"` // This setting allows remove_keys_on_update to be configured with a key in each record, in much the same way as target_index_key works. RemoveKeysOnUpdateKey string `json:"remove_keys_on_update_key,omitempty"` // [https://github.com/fluent/fluent-plugin-opensearch#hash-flattening](https://github.com/fluent/fluent-plugin-opensearch#hash-flattening) FlattenHashes bool `json:"flatten_hashes,omitempty"` // Flatten separator FlattenHashesSeparator string `json:"flatten_hashes_separator,omitempty"` // The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated. TemplateName string `json:"template_name,omitempty"` // The path to the file containing the template to install. // +docLink:"Secret,../secret/" TemplateFile *secret.Secret `json:"template_file,omitempty"` // Always update the template, even if it already exists.(default: false) TemplateOverwrite bool `json:"template_overwrite,omitempty"` // Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration. CustomizeTemplate string `json:"customize_template,omitempty"` // Specify this to override the index date pattern for creating a rollover index.(default: now/d) IndexDatePattern *string `json:"index_date_pattern,omitempty"` // index_separator (default: -) IndexSeparator string `json:"index_separator,omitempty"` // Specify the application name for the rollover index to be created.(default: default) ApplicationName *string `json:"application_name,omitempty"` // Specify index templates in form of hash. Can contain multiple templates. Templates string `json:"templates,omitempty"` // You can specify times of retry putting template.(default: 10) MaxRetryPuttingTemplate string `json:"max_retry_putting_template,omitempty"` // Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on Fluentd statup.(default: true) // +kubebuilder:validation:Optional FailOnPuttingTemplateRetryExceed *bool `json:"fail_on_putting_template_retry_exceed,omitempty" plugin:"default:true"` // fail_on_detecting_os_version_retry_exceed (default: true) // +kubebuilder:validation:Optional FailOnDetectingOsVersionRetryExceed *bool `json:"fail_on_detecting_os_version_retry_exceed,omitempty" plugin:"default:true"` // max_retry_get_os_version (default: 15) MaxRetryGetOsVersion int `json:"max_retry_get_os_version,omitempty"` // This will add the Fluentd tag in the JSON record.(default: false) IncludeTagKey bool `json:"include_tag_key,omitempty"` // This will add the Fluentd tag in the JSON record.(default: tag) TagKey string `json:"tag_key,omitempty"` // With logstash_format true, OpenSearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with `time_parse_error_tag` configured tag. TimeParseErrorTag string `json:"time_parse_error_tag,omitempty"` // Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of OpenSearch shield.(default: false) ReconnectOnError bool `json:"reconnect_on_error,omitempty"` // This param is to set a pipeline ID of your OpenSearch to be added into the request, you can configure ingest node. Pipeline string `json:"pipeline,omitempty"` // This is debugging purpose option to enable to obtain transporter layer log. (default: false) WithTransporterLog bool `json:"with_transporter_log,omitempty"` // emit_error_for_missing_id (default: false) EmitErrorForMissingID bool `json:"emit_error_for_missing_id,omitempty"` // The default Sniffer used by the OpenSearch::Transport class works well when Fluentd has a direct connection to all of the OpenSearch servers and can make effective use of the _nodes API. This doesn't work well when Fluentd must connect through a load balancer or proxy. The `sniffer_class_name` parameter gives you the ability to provide your own Sniffer class to implement whatever connection reload logic you require. In addition, there is a new Fluent::Plugin::OpenSearchSimpleSniffer class which reuses the hosts given in the configuration, which is typically the hostname of the load balancer or proxy. For example, a configuration like this would cause connections to logging-os to reload every 100 operations: [https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name](https://github.com/fluent/fluent-plugin-opensearch#sniffer-class-name). SnifferClassName string `json:"sniffer_class_name,omitempty"` // selector_class_name SelectorClassName string `json:"selector_class_name,omitempty"` // When reload_connections true, this is the integer number of operations after which the plugin will reload the connections. The default value is 10000. ReloadAfter string `json:"reload_after,omitempty"` // With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). You can use this option to enforce an URL-based access control. IncludeIndexInUrl bool `json:"include_index_in_url,omitempty"` // With http_backend typhoeus, the opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. (default: excon) HttpBackend string `json:"http_backend,omitempty"` // http_backend_excon_nonblock // +kubebuilder:validation:Optional HttpBackendExconNonblock *bool `json:"http_backend_excon_nonblock,omitempty" plugin:"default:true"` // When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch. (default: false) ValidateClientVersion bool `json:"validate_client_version,omitempty"` // With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder. (default: false) PreferOjSerializer bool `json:"prefer_oj_serializer,omitempty"` // Default unrecoverable_error_types parameter is set up strictly. Because rejected_execution_exception is caused by exceeding OpenSearch's thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. UnrecoverableErrorTypes string `json:"unrecoverable_error_types,omitempty"` // unrecoverable_record_types UnrecoverableRecordTypes string `json:"unrecoverable_record_types,omitempty"` // emit_error_label_event (default: true) // +kubebuilder:validation:Optional EmitErrorLabelEvent *bool `json:"emit_error_label_event,omitempty" plugin:"default:true"` // verify_os_version_at_startup (default: true) // +kubebuilder:validation:Optional VerifyOsVersionAtStartup *bool `json:"verify_os_version_at_startup,omitempty" plugin:"default:true"` // max_retry_get_os_version (default: 1) DefaultOpensearchVersion int `json:"default_opensearch_version,omitempty"` // log_os_400_reason (default: false) LogOs400Reason bool `json:"log_os_400_reason,omitempty"` // This parameter adds additional headers to request. Example: `{"token":"secret"}` (default: {}) CustomHeaders string `json:"custom_headers,omitempty"` // By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. (default: false) SuppressDocWrap bool `json:"suppress_doc_wrap,omitempty"` // A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won't be called. It is possible also to specify classes at higher level in the hierarchy. IgnoreExceptions string `json:"ignore_exceptions,omitempty"` // Indicates whether to backup chunk when ignore exception occurs. // +kubebuilder:validation:Optional ExceptionBackup *bool `json:"exception_backup,omitempty" plugin:"default:true"` // Configure bulk_message request splitting threshold size. // Default value is 20MB. (20 * 1024 * 1024) // If you specify this size as negative number, bulk_message request splitting feature will be disabled. (default: 20MB) BulkMessageRequestThreshold string `json:"bulk_message_request_threshold,omitempty"` // compression_level CompressionLevel string `json:"compression_level,omitempty"` // truncate_caches_interval TruncateCachesInterval string `json:"truncate_caches_interval,omitempty"` // Specify wether to use legacy template or not. (default: true) // +kubebuilder:validation:Optional UseLegacyTemplate *bool `json:"use_legacy_template,omitempty"` // catch_transport_exception_on_retry (default: true) // +kubebuilder:validation:Optional CatchTransportExceptionOnRetry *bool `json:"catch_transport_exception_on_retry,omitempty" plugin:"default:true"` // target_index_affinity (default: false) TargetIndexAffinity bool `json:"target_index_affinity,omitempty"` Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // Use @type opensearch_data_stream DataStreamEnable *bool `json:"data_stream_enable,omitempty" plugin:"hidden"` // You can specify Opensearch data stream name by this parameter. This parameter is mandatory for opensearch_data_stream. DataStreamName string `json:"data_stream_name,omitempty"` // Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. (default: data_stream_name) DataStreamTemplateName string `json:"data_stream_template_name,omitempty"` // AWS Endpoint Credentials Endpoint *OpenSearchEndpointCredentials `json:"endpoint,omitempty"` }
+kubebuilder:object:generate=true +docName:"OpenSearch" Send your logs to OpenSearch
func (*OpenSearchOutput) DeepCopy ¶
func (in *OpenSearchOutput) DeepCopy() *OpenSearchOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenSearchOutput.
func (*OpenSearchOutput) DeepCopyInto ¶
func (in *OpenSearchOutput) DeepCopyInto(out *OpenSearchOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OpenSearchOutput) ToDirective ¶
func (e *OpenSearchOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type RdkafkaOptions ¶ added in v0.12.0
type RdkafkaOptions struct { // Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support. BuiltinFeatures string `json:"builtin.features,omitempty"` // Client identifier. ClientID string `json:"client.id,omitempty"` // Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. MetadataBrokerList string `json:"metadata.broker.list,omitempty"` // Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. BootstrapServers string `json:"bootstrap.servers,omitempty"` // Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation). MessageMaxBytes int `json:"message.max.bytes,omitempty"` // Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. MessageCopyMaxBytes int `json:"message.copy.max.bytes,omitempty"` // Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set. ReceiveMessageMaxBytes int `json:"receive.message.max.bytes,omitempty"` // Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. MaxInFlightRequestsPerConnection int `json:"max.in.flight.requests.per.connection,omitempty"` // Alias for `max.in.flight.requests.per.connection`: Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. MaxInFlight int `json:"max.in.flight,omitempty"` // Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s. TopicMetadataRefreshIntervalMs int `json:"topic.metadata.refresh.interval.ms,omitempty"` // Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3 MetadataMaxAgeMs int `json:"metadata.max.age.ms,omitempty"` // When a topic loses its leader a new metadata request will be enqueued immediately and then with this initial interval, exponentially increasing upto `retry.backoff.max.ms`, until the topic metadata has been refreshed. If not set explicitly, it will be defaulted to `retry.backoff.ms`. This is used to recover quickly from transitioning leader brokers. TopicMetadataRefreshFastIntervalMs int `json:"topic.metadata.refresh.fast.interval.ms,omitempty"` // Sparse metadata requests (consumes less network bandwidth) TopicMetadataRefreshSparse bool `json:"topic.metadata.refresh.sparse,omitempty"` // Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce(). TopicMetadataPropagationMaxMs int `json:"topic.metadata.propagation.max.ms,omitempty"` // Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist. TopicBlacklist string `json:"topic.blacklist,omitempty"` // A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch Debug string `json:"debug,omitempty"` // Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value. SocketTimeoutMs int `json:"socket.timeout.ms,omitempty"` // DEPRECATED No longer used. SocketBlockingMaxMs int `json:"socket.blocking.max.ms,omitempty"` // Broker socket send buffer size. System default is used if 0. SocketSendBufferBytes int `json:"socket.send.buffer.bytes,omitempty"` // Broker socket receive buffer size. System default is used if 0. SocketReceiveBufferBytes int `json:"socket.receive.buffer.bytes,omitempty"` // Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets SocketKeepaliveEnable bool `json:"socket.keepalive.enable,omitempty"` // Disable the Nagle algorithm (TCP_NODELAY) on broker sockets. SocketNagleDisable bool `json:"socket.nagle.disable,omitempty"` // Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established. SocketMaxFails int `json:"socket.max.fails,omitempty"` // How long to cache the broker address resolving results (milliseconds). BrokerAddressTTl int `json:"broker.address.ttl,omitempty"` // Allowed broker IP address families: any, v4, v6 BrokerAddressFamily string `json:"broker.address.family,omitempty"` // Maximum time allowed for broker connection setup (TCP connection setup as well SSL and SASL handshake). If the connection to the broker is not fully functional after this the connection will be closed and retried. SocketConnectionSetupTimeoutMs int `json:"socket.connection.setup.timeout.ms,omitempty"` // Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info). ConnectionsMaxIdleMs int `json:"connections.max.idle.ms,omitempty"` // The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately. ReconnectBackoffMs int `json:"reconnect.backoff.ms,omitempty"` // The maximum time to wait before reconnecting to a broker after the connection has been closed. ReconnectBackoffMaxMs int `json:"reconnect.backoff.max.ms,omitempty"` // librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics. StatisticsIntervalMs int `json:"statistics.interval.ms,omitempty"` // See `rd_kafka_conf_set_events()` EnabledEvents int `json:"enabled_events,omitempty"` // Error callback (set with rd_kafka_conf_set_error_cb()) ErrorCb string `json:"error_cb,omitempty"` // Throttle callback (set with rd_kafka_conf_set_throttle_cb()) ThrottleCb string `json:"throttle_cb,omitempty"` // Statistics callback (set with rd_kafka_conf_set_stats_cb()) StatsCb string `json:"stats_cb,omitempty"` // Log callback (set with rd_kafka_conf_set_log_cb()) LogCb string `json:"log_cb,omitempty"` // Logging level (syslog(3) levels) LogLevel int `json:"log_level,omitempty"` // Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set. LogQueue bool `json:"log.queue,omitempty"` // Print internal thread name in log messages (useful for debugging librdkafka internals) LogThreadName bool `json:"log.thread.name,omitempty"` // If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new(). EnableRandomSeed bool `json:"enable.random.seed,omitempty"` // Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connections.max.idle.ms` value. LogConnectionClose bool `json:"log.connection.close,omitempty"` // Background queue event callback (set with rd_kafka_conf_set_background_event_cb()) BackgroundEventCb string `json:"background_event_cb,omitempty"` // Socket creation callback to provide race-free CLOEXEC SocketCb string `json:"socket_cb,omitempty"` // Socket connect callback ConnectCb string `json:"connect_cb,omitempty"` // Socket close callback CloseSocketCb string `json:"closesocket_cb,omitempty"` // File open callback to provide race-free CLOEXEC OpenCb string `json:"open_cb,omitempty"` // Address resolution callback (set with rd_kafka_conf_set_resolve_cb()) ResolveCb string `json:"resolve_cb,omitempty"` // Application opaque (set with rd_kafka_conf_set_opaque()) Opaque string `json:"opaque,omitempty"` // Default topic configuration for automatically subscribed topics DefaultTopicConf string `json:"default_topic_conf,omitempty"` // Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed. InternalTerminationSignal int `json:"internal.termination.signal,omitempty"` // Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used. ApiVersionRequest bool `json:"api.version.request,omitempty"` // Timeout for broker API version requests. ApiVersionRequestTimeoutMs int `json:"api.version.request.timeout.ms,omitempty"` // Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. ApiVersionFallbackMs int `json:"api.version.fallback.ms,omitempty"` // Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests. BrokerVersionFallback string `json:"broker.version.fallback,omitempty"` // Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies. AllowAutoCreateTopics bool `json:"allow.auto.create.topics,omitempty"` // Protocol used to communicate with brokers. SecurityProtocol string `json:"security.protocol,omitempty"` // A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3). SSLCipherSuites string `json:"ssl.cipher.suites,omitempty"` // The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required. SSLCurvesList string `json:"ssl.curves.list,omitempty"` // The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required. SSLSigalgsList string `json:"ssl.sigalgs.list,omitempty"` // Path to client's private key (PEM) used for authentication. SSLKeyLocation string `json:"ssl.key.location,omitempty"` // Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`). SSLKeyPassword string `json:"ssl.key.password,omitempty"` // Client's private key string (PEM format) used for authentication. SSLKeyPem string `json:"ssl.key.pem,omitempty"` // Path to client's public key (PEM) used for authentication. SSLCertificateLocation string `json:"ssl.certificate.location,omitempty"` // Client's public key string (PEM format) used for authentication. SSLCertificatePem string `json:"ssl.certificate.pem,omitempty"` // File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`). SSLCaLocation string `json:"ssl.ca.location,omitempty"` // CA certificate string (PEM format) for verifying the broker's key. SSLCaPem string `json:"ssl.ca.pem,omitempty"` // Path to CRL for verifying broker's certificate validity. SSLCrlLocation string `json:"ssl.crl.location,omitempty"` // Path to client's keystore (PKCS#12) used for authentication. SSLKeystoreLocation string `json:"ssl.keystore.location,omitempty"` // Client's keystore (PKCS#12) password. SSLKeystorePassword string `json:"ssl.keystore.password,omitempty"` // Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., "default,legacy". SSLProviders string `json:"ssl.providers,omitempty"` // **DEPRECATED** Path to OpenSSL engine library. OpenSSL >= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers. SSLEngineLocation string `json:"ssl.engine.location,omitempty"` // OpenSSL engine id is the name used for loading engine. SSLEngineId string `json:"ssl.engine.id,omitempty"` // Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb. EnableSSLCertificateVerification bool `json:"enable.ssl.certificate.verification,omitempty"` // Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required. SSLEndpointIdentificationAlgorithm string `json:"ssl.endpoint.identification.algorithm,omitempty"` // SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. NOTE: Despite the name only one mechanism must be configured. SaslMechanisms string `json:"sasl.mechanisms,omitempty" ` // Kerberos principal name that Kafka runs as, not including /hostname@REALM. SaslKerberosServiceName string `json:"sasl.kerberos.service.name,omitempty" ` // This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal). SaslKerberosPrincipal string `json:"sasl.kerberos.principal,omitempty" ` // Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). SaslKerberosKinitCmd string `json:"sasl.kerberos.kinit.cmd,omitempty" ` // Path to Kerberos keytab file. This configuration property is only used as a variable in sasl.kerberos.kinit.cmd as ... -t "%{sasl.kerberos.keytab}". SaslKerberosKeytab string `json:"sasl.kerberos.keytab,omitempty" ` // Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0. SaslKerberosMinTimeBeforeRelogin int `json:"sasl.kerberos.min.time.before.relogin,omitempty" ` // SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms. SaslUsername string `json:"sasl.username,omitempty" ` // SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism. SaslPassword string `json:"sasl.password,omitempty" ` // SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600. In addition, SASL extensions can be communicated to the broker via extension_NAME=value. For example: principal=admin extension_traceId=123. SaslOauthbearerConfig string `json:"sasl.oauthbearer.config,omitempty" ` // Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production. EnableSaslOauthbearerUnsecureJwt bool `json:"enable.sasl.oauthbearer.unsecure.jwt,omitempty" ` // SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token. Also see rd_kafka_conf_enable_sasl_queue(). OauthbearerTokenRefreshCb string `json:"oauthbearer_token_refresh_cb,omitempty" ` // Set to "default" or "oidc" to control which login method to be used. If set to "oidc", the following properties must also be specified: sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, and sasl.oauthbearer.token.endpoint.url. SaslOauthbearerMethod string `json:"sasl.oauthbearer.method,omitempty" ` // Public identifier for the application. Must be unique across all clients that the authorization server handles. Only used when sasl.oauthbearer.method is set to "oidc". SaslOauthbearerClientId string `json:"sasl.oauthbearer.client.id,omitempty" ` // Client secret only known to the application and the authorization server. This should be a sufficiently random string that is not guessable. Only used when sasl.oauthbearer.method is set to "oidc". SaslOauthbearerClientSecret string `json:"sasl.oauthbearer.client.secret,omitempty" ` // Client use this to specify the scope of the access request to the broker. Only used when sasl.oauthbearer.method is set to "oidc". SaslOauthbearerScope string `json:"sasl.oauthbearer.scope,omitempty" ` // Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. E.g., "supportFeatureX=true,organizationId=sales-emea".Only used when sasl.oauthbearer.method is set to "oidc". SaslOauthbearerExtensions string `json:"sasl.oauthbearer.extensions,omitempty" ` // OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. Only used when sasl.oauthbearer.method is set to "oidc". SaslOauthbearerTokenEndpointUrl string `json:"sasl.oauthbearer.token.endpoint.url,omitempty" ` // List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically. PluginLibraryPaths string `json:"plugin.library.paths,omitempty" ` // Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors. Interceptors string `json:"interceptors,omitempty"` }
RdkafkaOptions represents the global configuration properties for librdkafka.
type RedisOutputConfig ¶
type RedisOutputConfig struct { // Host Redis endpoint (default: localhost) Host string `json:"host,omitempty"` // Port of the Redis server (default: 6379) Port int `json:"port,omitempty"` // DbNumber database number is optional. (default: 0) DbNumber int `json:"db_number,omitempty"` // Redis Server password Password *secret.Secret `json:"password,omitempty"` // insert_key_prefix (default: "${tag}") InsertKeyPrefix string `json:"insert_key_prefix,omitempty"` // Users can set strftime format. (default: "%s") StrftimeFormat string `json:"strftime_format,omitempty"` // Allow inserting key duplicate. It will work as update values. (default: false) AllowDuplicateKey bool `json:"allow_duplicate_key,omitempty"` // If 0 or negative value is set, ttl is not set in each key. TTL int `json:"ttl,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*RedisOutputConfig) DeepCopy ¶
func (in *RedisOutputConfig) DeepCopy() *RedisOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisOutputConfig.
func (*RedisOutputConfig) DeepCopyInto ¶
func (in *RedisOutputConfig) DeepCopyInto(out *RedisOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*RedisOutputConfig) ToDirective ¶
func (c *RedisOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type RelabelOutputConfig ¶ added in v0.10.0
type RelabelOutputConfig struct { // Specifies new label for events Label string `json:"label"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*RelabelOutputConfig) DeepCopy ¶ added in v0.10.0
func (in *RelabelOutputConfig) DeepCopy() *RelabelOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelOutputConfig.
func (*RelabelOutputConfig) DeepCopyInto ¶ added in v0.10.0
func (in *RelabelOutputConfig) DeepCopyInto(out *RelabelOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*RelabelOutputConfig) ToDirective ¶ added in v0.10.0
func (c *RelabelOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type S3AssumeRoleCredentials ¶
type S3AssumeRoleCredentials struct { // The Amazon Resource Name (ARN) of the role to assume RoleArn string `json:"role_arn"` // An identifier for the assumed role session RoleSessionName string `json:"role_session_name"` // An IAM policy in JSON format Policy string `json:"policy,omitempty"` // The duration, in seconds, of the role session (900-3600) DurationSeconds string `json:"duration_seconds,omitempty"` // A unique identifier that is used by third parties when assuming roles in their customers' accounts. ExternalId string `json:"external_id,omitempty"` }
+kubebuilder:object:generate=true +docName:"Assume Role Credentials" assume_role_credentials
func (*S3AssumeRoleCredentials) DeepCopy ¶
func (in *S3AssumeRoleCredentials) DeepCopy() *S3AssumeRoleCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3AssumeRoleCredentials.
func (*S3AssumeRoleCredentials) DeepCopyInto ¶
func (in *S3AssumeRoleCredentials) DeepCopyInto(out *S3AssumeRoleCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type S3InstanceProfileCredentials ¶
type S3InstanceProfileCredentials struct { // IP address (default:169.254.169.254) IpAddress string `json:"ip_address,omitempty"` // Port number (default:80) Port string `json:"port,omitempty"` // Number of seconds to wait for the connection to open HttpOpenTimeout string `json:"http_open_timeout,omitempty"` // Number of seconds to wait for one block to be read HttpReadTimeout string `json:"http_read_timeout,omitempty"` // Number of times to retry when retrieving credentials Retries string `json:"retries,omitempty"` }
+kubebuilder:object:generate=true +docName:"Instance Profile Credentials" instance_profile_credentials
func (*S3InstanceProfileCredentials) DeepCopy ¶
func (in *S3InstanceProfileCredentials) DeepCopy() *S3InstanceProfileCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceProfileCredentials.
func (*S3InstanceProfileCredentials) DeepCopyInto ¶
func (in *S3InstanceProfileCredentials) DeepCopyInto(out *S3InstanceProfileCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type S3OutputConfig ¶
type S3OutputConfig struct { // AWS access key id // +docLink:"Secret,../secret/" AwsAccessKey *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key. // +docLink:"Secret,../secret/" AwsSecretKey *secret.Secret `json:"aws_sec_key,omitempty"` // Check AWS key on start CheckApikeyOnStart string `json:"check_apikey_on_start,omitempty"` // Allows grantee to read the object data and its metadata GrantRead string `json:"grant_read,omitempty"` // Overwrite already existing path Overwrite string `json:"overwrite,omitempty"` // Path prefix of the files on S3 Path string `json:"path,omitempty"` // Allows grantee to write the ACL for the applicable object GrantWriteAcp string `json:"grant_write_acp,omitempty"` // Check bucket if exists or not CheckBucket string `json:"check_bucket,omitempty"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data SseCustomerKey string `json:"sse_customer_key,omitempty" default:"10m"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321 SseCustomerKeyMd5 string `json:"sse_customer_key_md5,omitempty"` // AWS SDK uses MD5 for API request/response by default ComputeChecksums string `json:"compute_checksums,omitempty"` // Given a threshold to treat events as delay, output warning logs if delayed events were put into s3 WarnForDelay string `json:"warn_for_delay,omitempty"` // Use aws-sdk-ruby bundled cert UseBundledCert string `json:"use_bundled_cert,omitempty"` // Custom S3 endpoint (like minio) S3Endpoint string `json:"s3_endpoint,omitempty"` // Specifies the AWS KMS key ID to use for object encryption SsekmsKeyId string `json:"ssekms_key_id,omitempty"` // Arbitrary S3 metadata headers to set for the object S3Metadata string `json:"s3_metadata,omitempty"` // If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain ForcePathStyle string `json:"force_path_style,omitempty"` // Create S3 bucket if it does not exists AutoCreateBucket string `json:"auto_create_bucket,omitempty"` // `sprintf` format for `%{index}` IndexFormat string `json:"index_format,omitempty"` // Signature version for API Request (s3,v4) SignatureVersion string `json:"signature_version,omitempty"` // If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket EnableTransferAcceleration string `json:"enable_transfer_acceleration,omitempty"` // If false, the certificate of endpoint will not be verified SslVerifyPeer string `json:"ssl_verify_peer,omitempty"` // URI of proxy environment ProxyUri string `json:"proxy_uri,omitempty"` // Allows grantee to read the object ACL GrantReadAcp string `json:"grant_read_acp,omitempty"` // Check object before creation CheckObject string `json:"check_object,omitempty"` // Specifies the algorithm to use to when encrypting the object SseCustomerAlgorithm string `json:"sse_customer_algorithm,omitempty"` // The Server-side encryption algorithm used when storing this object in S3 (AES256, aws:kms) UseServerSideEncryption string `json:"use_server_side_encryption,omitempty"` // S3 region name S3Region string `json:"s3_region,omitempty"` // Permission for the object in S3 Acl string `json:"acl,omitempty"` // Allows grantee READ, READ_ACP, and WRITE_ACP permissions on the object GrantFullControl string `json:"grant_full_control,omitempty"` // The length of `%{hex_random}` placeholder(4-16) HexRandomLength string `json:"hex_random_length,omitempty"` // The format of S3 object keys (default: `%{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension}`) S3ObjectKeyFormat string `json:"s3_object_key_format,omitempty" plugin:"default:%{path}%{time_slice}_%{uuid_hash}_%{index}.%{file_extension}"` // S3 bucket name S3Bucket string `json:"s3_bucket"` // Archive format on S3 StoreAs string `json:"store_as,omitempty"` // The type of storage to use for the object, for example STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR // For a complete list of possible values, see the [Amazon S3 API reference](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass). StorageClass string `json:"storage_class,omitempty"` // The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role AwsIamRetries string `json:"aws_iam_retries,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Assume Role Credentials,#assume-role-credentials" AssumeRoleCredentials *S3AssumeRoleCredentials `json:"assume_role_credentials,omitempty"` // +docLink:"Instance Profile Credentials,#instance-profile-credentials" InstanceProfileCredentials *S3InstanceProfileCredentials `json:"instance_profile_credentials,omitempty"` SharedCredentials *S3SharedCredentials `json:"shared_credentials,omitempty"` // Parquet compressor Compress *Compress `json:"compress,omitempty"` // One-eye format trigger (default:false) OneEyeFormat bool `json:"oneeye_format,omitempty"` // Custom cluster name (default:one-eye) ClusterName string `json:"clustername,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*S3OutputConfig) DeepCopy ¶
func (in *S3OutputConfig) DeepCopy() *S3OutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3OutputConfig.
func (*S3OutputConfig) DeepCopyInto ¶
func (in *S3OutputConfig) DeepCopyInto(out *S3OutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*S3OutputConfig) ToDirective ¶
func (c *S3OutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type S3SharedCredentials ¶
type S3SharedCredentials struct { string `json:"profile_name,omitempty"` Path string `json:"path,omitempty"` }ProfileName
+kubebuilder:object:generate=true +docName:"Shared Credentials" shared_credentials
func (*S3SharedCredentials) DeepCopy ¶
func (in *S3SharedCredentials) DeepCopy() *S3SharedCredentials
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SharedCredentials.
func (*S3SharedCredentials) DeepCopyInto ¶
func (in *S3SharedCredentials) DeepCopyInto(out *S3SharedCredentials)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type SQSOutputConfig ¶
type SQSOutputConfig struct { // SQS queue url e.g. `https://sqs.us-west-2.amazonaws.com/123456789012/myqueue` SQSUrl string `json:"sqs_url,omitempty"` // SQS queue name - required if sqs_url is not set QueueName string `json:"queue_name,omitempty"` // AWS access key id AWSKeyId *secret.Secret `json:"aws_key_id,omitempty"` // AWS secret key AWSSecKey *secret.Secret `json:"aws_sec_key,omitempty"` // Create SQS queue (default: true) CreateQueue *bool `json:"create_queue,omitempty"` // AWS region (default: ap-northeast-1) Region string `json:"region,omitempty"` // Message group id for FIFO queue MessageGroupId string `json:"message_group_id,omitempty"` // Delivery delay seconds (default: 0) DelaySeconds int `json:"delay_seconds,omitempty"` // Include tag (default: true) IncludeTag *bool `json:"include_tag,omitempty"` // Tags property name in json (default: '__tag') TagPropertyName string `json:"tag_property_name,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"Output Config"
func (*SQSOutputConfig) DeepCopy ¶
func (in *SQSOutputConfig) DeepCopy() *SQSOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSOutputConfig.
func (*SQSOutputConfig) DeepCopyInto ¶
func (in *SQSOutputConfig) DeepCopyInto(out *SQSOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SQSOutputConfig) ToDirective ¶
func (s *SQSOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type SplunkHecOutput ¶
type SplunkHecOutput struct { // The type of data that will be sent to Sumo Logic, either event or metric (default: event) DataType string `json:"data_type,omitempty"` // You can specify SplunkHec host by this parameter. HecHost string `json:"hec_host"` // The port number for the Hec token or the Hec load balancer. (default: 8088) HecPort int `json:"hec_port,omitempty"` // This is the protocol to use for calling the Hec API. Available values are: http, https. (default: https) Protocol string `json:"protocol,omitempty"` // Identifier for the Hec token. // +docLink:"Secret,../secret/" HecToken *secret.Secret `json:"hec_token"` // When data_type is set to "metric", the ingest API will treat every key-value pair in the input event as a metric name-value pair. Set metrics_from_event to false to disable this behavior and use `metric_name_key` and `metric_value_key` to define metrics. (Default:true) MetricsFromEvent *bool `json:"metrics_from_event,omitempty"` // Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the `metrics_from_event` parameter is automatically set to false. (default: true) MetricNameKey string `json:"metric_name_key,omitempty"` // Field name that contains the metric value, this parameter is required when `metric_name_key` is configured. MetricValueKey string `json:"metric_value_key,omitempty"` // Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. (default: true). CoerceToUtf8 *bool `json:"coerce_to_utf8,omitempty"` // If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. (default: ' '). NonUtf8ReplacementString string `json:"non_utf8_replacement_string,omitempty"` // Identifier for the Splunk index to be used for indexing events. If this parameter is not set, the indexer is chosen by HEC. Cannot set both index and index_key parameters at the same time. Index string `json:"index,omitempty"` // The field name that contains the Splunk index name. Cannot set both index and index_key parameters at the same time. IndexKey string `json:"index_key,omitempty"` // The host location for events. Cannot set both host and host_key parameters at the same time. (Default:hostname) Host string `json:"host,omitempty"` // Key for the host location. Cannot set both host and host_key parameters at the same time. HostKey string `json:"host_key,omitempty"` // The source field for events. If this parameter is not set, the source will be decided by HEC. Cannot set both source and source_key parameters at the same time. Source string `json:"source,omitempty"` // Field name to contain source. Cannot set both source and source_key parameters at the same time. SourceKey string `json:"source_key,omitempty"` // The sourcetype field for events. When not set, the sourcetype is decided by HEC. Cannot set both source and `source_key` parameters at the same time. SourceType string `json:"sourcetype,omitempty"` // Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time. SourceTypeKey string `json:"sourcetype_key,omitempty"` // By default, all the fields used by the *_key parameters are removed from the original input events. To change this behavior, set this parameter to true. This parameter is set to false by default. When set to true, all fields defined in `index_key`, `host_key`, `source_key`, `sourcetype_key`, `metric_name_key`, and `metric_value_key` are saved in the original event. KeepKeys bool `json:"keep_keys,omitempty"` //If a connection has not been used for this number of seconds it will automatically be reset upon the next use to avoid attempting to send to a closed connection. nil means no timeout. IdleTimeout int `json:"idle_timeout,omitempty"` // The amount of time allowed between reading two chunks from the socket. ReadTimeout int `json:"read_timeout,omitempty"` // The amount of time to wait for a connection to be opened. OpenTimeout int `json:"open_timeout,omitempty"` // The path to a file containing a PEM-format CA certificate for this client. // +docLink:"Secret,../secret/" ClientCert *secret.Secret `json:"client_cert,omitempty"` // The private key for this client.' // +docLink:"Secret,../secret/" ClientKey *secret.Secret `json:"client_key,omitempty"` // The path to a file containing a PEM-format CA certificate. // +docLink:"Secret,../secret/" CAFile *secret.Secret `json:"ca_file,omitempty"` // The path to a directory containing CA certificates in PEM format. // +docLink:"Secret,../secret/" CAPath *secret.Secret `json:"ca_path,omitempty"` // List of SSL ciphers allowed. SSLCiphers string `json:"ssl_ciphers,omitempty"` // Indicates if insecure SSL connection is allowed (default:false) InsecureSSL *bool `json:"insecure_ssl,omitempty"` // In this case, parameters inside `<fields>` are used as indexed fields and removed from the original input events Fields Fields `json:"fields,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true +docName:"SplunkHecOutput" SplunkHecOutput sends your logs to Splunk via Hec
func (*SplunkHecOutput) DeepCopy ¶
func (in *SplunkHecOutput) DeepCopy() *SplunkHecOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SplunkHecOutput.
func (*SplunkHecOutput) DeepCopyInto ¶
func (in *SplunkHecOutput) DeepCopyInto(out *SplunkHecOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SplunkHecOutput) ToDirective ¶
func (c *SplunkHecOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type SyslogOutputConfig ¶
type SyslogOutputConfig struct { // Destination host address Host string `json:"host"` // Destination host port (default: "514") Port int `json:"port,omitempty"` // Transport Protocol (default: "tls") Transport string `json:"transport,omitempty"` // skip ssl validation (default: false) Insecure *bool `json:"insecure,omitempty"` // verify_fqdn (default: nil) VerifyFqdn *bool `json:"verify_fqdn,omitempty"` // cert_store to set ca_certificate for ssl context EnableSystemCertStore *bool `json:"enable_system_cert_store,omitempty"` // file path to ca to trust TrustedCaPath *secret.Secret `json:"trusted_ca_path,omitempty"` // file path for private_key_path ClientCertPath *secret.Secret `json:"client_cert_path,omitempty"` // file path for private_key_path PrivateKeyPath *secret.Secret `json:"private_key_path,omitempty"` // PrivateKeyPassphrase for private key (default: "nil") PrivateKeyPassphrase *secret.Secret `json:"private_key_passphrase,omitempty"` // allow_self_signed_cert for mutual tls (default: false) AllowSelfSignedCert *bool `json:"allow_self_signed_cert,omitempty"` // Fqdn (default: "nil") Fqdn string `json:"fqdn,omitempty"` // TLS Version (default: "TLSv1_2") Version string `json:"version,omitempty"` // +docLink:"Format,../format_rfc5424/" Format *FormatRfc5424 `json:"format,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // The threshold for chunk flush performance check. // Parameter type is float, not time, default: 20.0 (seconds) // If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. SlowFlushLogThreshold string `json:"slow_flush_log_threshold,omitempty"` }
+kubebuilder:object:generate=true
func (*SyslogOutputConfig) DeepCopy ¶
func (in *SyslogOutputConfig) DeepCopy() *SyslogOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogOutputConfig.
func (*SyslogOutputConfig) DeepCopyInto ¶
func (in *SyslogOutputConfig) DeepCopyInto(out *SyslogOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SyslogOutputConfig) ToDirective ¶
func (s *SyslogOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type VMwareLogInsightOutput ¶ added in v0.12.0
type VMwareLogInsightOutput struct { // agent_id generated by your LI (default: 0) AgentID string `json:"agent_id,omitempty"` // Type of authentication to use (nil,basic) (default: nil) Authentication *string `json:"authentication,omitempty"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // +docLink:"Secret,../secret/" CAFile *secret.Secret `json:"ca_file,omitempty"` // Rename fields names (default: {"source" => "log_source"}) ConfigParam map[string]string `json:"config_param,omitempty"` // Flatten hashes to create one key/val pair w/o losing log data (default: true) FlattenHashes *bool `json:"flatten_hashes,omitempty"` // Separator to use for joining flattened keys (default: _) FlattenHashesSeparator string `json:"flatten_hashes_separator,omitempty"` // If set, enables debug logs for http connection (default: false) HTTPConnectionDebug bool `json:"http_conn_debug,omitempty"` // HTTP method (post) (default: post) HTTPMethod string `json:"http_method,omitempty"` // VMware Aria Operations For Logs Host ex. localhost Host string `json:"host,omitempty"` // Keys from log event whose values should be added as log message/text to // VMware Aria Operations For Logs. These key/value pairs won't be // expanded/flattened and won't be added as metadata/fields. // // (default: ["log", "message", "msg"]) LogTextKeys []string `json:"log_text_keys,omitempty"` // Number of bytes per post request (default: 4000000) MaxBatchSize int `json:"max_batch_size,omitempty"` // +docLink:"Secret,../secret/" Password *secret.Secret `json:"password,omitempty"` // VMware Aria Operations For Logs ingestion api path ex. 'api/v1/events/ingest' // (default: api/v1/events/ingest) Path string `json:"path,omitempty"` // VMware Aria Operations For Logs port ex. 9000 (default: 80) Port int `json:"port,omitempty"` // Raise errors that were rescued during HTTP requests? (default: false) RaiseOnError bool `json:"raise_on_error,omitempty"` // Simple rate limiting: ignore any records within `rate_limit_msec` since the // last one (default: 0) RateLimitMilliseconds int `json:"rate_limit_msec,omitempty"` // Number of retries (default: 3) RequestRetries int `json:"request_retries,omitempty"` // http connection ttl for each request (default: 5) RequestTimeout int `json:"request_timeout,omitempty"` // SSL verification flag (default: true) SSLVerify *bool `json:"ssl_verify,omitempty"` // HTTP scheme (http,https) (default: http) Scheme string `json:"scheme,omitempty"` // Serialization (json) (default: json) Serializer string `json:"serializer,omitempty"` // Keys from log event to rewrite for instance from 'kubernetes_namespace' to // 'k8s_namespace' tags will be rewritten with substring substitution and // applied in the order present in the hash (Hashes enumerate their values in // the order that the corresponding keys were inserted see: // https://ruby-doc.org/core-2.2.2/Hash.html // // (default { // 'kubernetes_':'k8s_', // 'namespace':'ns', // 'labels_':”, // '_name':”, // '_hash':”, // 'container_':” // }) ShortenKeys map[string]string `json:"shorten_keys,omitempty"` // +docLink:"Secret,../secret/" Username *secret.Secret `json:"username,omitempty"` }
+kubebuilder:object:generate=true +docName:"VMwareLogInsight" Send your logs to VNMware LogInsight
func (*VMwareLogInsightOutput) DeepCopy ¶ added in v0.12.0
func (in *VMwareLogInsightOutput) DeepCopy() *VMwareLogInsightOutput
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMwareLogInsightOutput.
func (*VMwareLogInsightOutput) DeepCopyInto ¶ added in v0.12.0
func (in *VMwareLogInsightOutput) DeepCopyInto(out *VMwareLogInsightOutput)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*VMwareLogInsightOutput) ToDirective ¶ added in v0.12.0
func (in *VMwareLogInsightOutput) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
type VMwareLogIntelligenceOutputConfig ¶ added in v0.12.0
type VMwareLogIntelligenceOutputConfig struct { // Log Intelligence endpoint to send logs to https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-endpoint_url EndpointURL string `json:"endpoint_url"` // Verify SSL (default: true) https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-verify_ssl VerifySSL *bool `json:"verify_ssl" plugin:"default:true"` // Compress http request https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-http_compress HTTPCompress *bool `json:"http_compress,omitempty"` // Required headers for sending logs to VMware Log Intelligence https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E Headers LogIntelligenceHeaders `json:"headers"` // +docLink:"Buffer,../buffer/" Buffer *Buffer `json:"buffer,omitempty"` // +docLink:"Format,../format/" Format *Format `json:"format,omitempty"` }
+kubebuilder:object:generate=true +docName:"VMwareLogIntelligence"
func (*VMwareLogIntelligenceOutputConfig) DeepCopy ¶ added in v0.12.0
func (in *VMwareLogIntelligenceOutputConfig) DeepCopy() *VMwareLogIntelligenceOutputConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMwareLogIntelligenceOutputConfig.
func (*VMwareLogIntelligenceOutputConfig) DeepCopyInto ¶ added in v0.12.0
func (in *VMwareLogIntelligenceOutputConfig) DeepCopyInto(out *VMwareLogIntelligenceOutputConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*VMwareLogIntelligenceOutputConfig) ToDirective ¶ added in v0.12.0
func (v *VMwareLogIntelligenceOutputConfig) ToDirective(secretLoader secret.SecretLoader, id string) (types.Directive, error)
Source Files
¶
- aws_elasticsearch.go
- azurestore.go
- buffer.go
- cloudwatch.go
- datadog.go
- elasticsearch.go
- file.go
- format.go
- format_rfc5424.go
- forward.go
- gcs.go
- gelf.go
- http.go
- kafka.go
- kinesis_firehose.go
- kinesis_stream.go
- logdna.go
- logz.go
- loki.go
- mattermost.go
- newrelic.go
- null.go
- opensearch.go
- oss.go
- redis.go
- relabel.go
- s3.go
- splunk_hec.go
- sqs.go
- syslog.go
- vmware_log_intelligence.go
- vmware_loginsight.go
- zz_generated.deepcopy.go