Documentation
¶
Index ¶
- Variables
- func Factory() (pipeline.AnyPlugin, pipeline.AnyConfig)
- type Clickhouse
- type ColBool
- type ColDateTime
- type ColDateTime64
- type ColEnum16
- type ColEnum8
- type ColFloat32
- type ColFloat64
- type ColIPv4
- type ColIPv6
- type ColInt128
- type ColInt16
- type ColInt256
- type ColInt32
- type ColInt64
- type ColInt8
- type ColString
- type ColUInt128
- type ColUInt16
- type ColUInt256
- type ColUInt32
- type ColUInt64
- type ColUInt8
- type Column
- type Config
- type InsaneColInput
- type InsaneColumn
- type InsertStrategy
- type Plugin
- type Setting
- type Settings
Constants ¶
This section is empty.
Variables ¶
var ( ErrNodeIsNil = errors.New("node is nil, but column is not") ErrInvalidIPVersion = errors.New("IP is valid, but the version does not match the column") )
Functions ¶
Types ¶
type Clickhouse ¶
type ColBool ¶
type ColBool struct {
// contains filtered or unexported fields
}
ColBool represents Clickhouse Bool type.
func NewColBool ¶
func (*ColBool) Append ¶
func (t *ColBool) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColBool) EncodeColumn ¶
func (*ColBool) Type ¶
func (t *ColBool) Type() proto.ColumnType
type ColDateTime ¶
type ColDateTime struct {
// contains filtered or unexported fields
}
ColDateTime represents Clickhouse DateTime type.
func NewColDateTime ¶
func NewColDateTime(col *proto.ColDateTime) *ColDateTime
func (*ColDateTime) Append ¶
func (t *ColDateTime) Append(node *insaneJSON.StrictNode) error
func (*ColDateTime) EncodeColumn ¶
func (t *ColDateTime) EncodeColumn(buffer *proto.Buffer)
func (*ColDateTime) Reset ¶
func (t *ColDateTime) Reset()
func (*ColDateTime) Rows ¶
func (t *ColDateTime) Rows() int
func (*ColDateTime) Type ¶
func (t *ColDateTime) Type() proto.ColumnType
type ColDateTime64 ¶
type ColDateTime64 struct {
// contains filtered or unexported fields
}
ColDateTime64 represents Clickhouse DateTime64 type.
func NewColDateTime64 ¶
func NewColDateTime64(col *proto.ColDateTime64, scale int64) *ColDateTime64
func (*ColDateTime64) Append ¶
func (t *ColDateTime64) Append(node *insaneJSON.StrictNode) error
func (*ColDateTime64) EncodeColumn ¶
func (t *ColDateTime64) EncodeColumn(buffer *proto.Buffer)
func (*ColDateTime64) Reset ¶
func (t *ColDateTime64) Reset()
func (*ColDateTime64) Rows ¶
func (t *ColDateTime64) Rows() int
func (*ColDateTime64) Type ¶
func (t *ColDateTime64) Type() proto.ColumnType
type ColEnum16 ¶
type ColEnum16 struct {
// contains filtered or unexported fields
}
ColEnum16 represents Clickhouse Enum16 type.
func (*ColEnum16) Append ¶
func (t *ColEnum16) Append(node *insaneJSON.StrictNode) error
func (*ColEnum16) EncodeColumn ¶
func (*ColEnum16) Type ¶
func (t *ColEnum16) Type() proto.ColumnType
type ColEnum8 ¶
type ColEnum8 struct {
// contains filtered or unexported fields
}
ColEnum8 represents Clickhouse Enum8 type.
func NewColEnum16 ¶
func NewColEnum8 ¶
func (*ColEnum8) Append ¶
func (t *ColEnum8) Append(node *insaneJSON.StrictNode) error
func (*ColEnum8) EncodeColumn ¶
func (*ColEnum8) Type ¶
func (t *ColEnum8) Type() proto.ColumnType
type ColFloat32 ¶
type ColFloat32 struct {
// contains filtered or unexported fields
}
ColFloat32 represents Clickhouse Float32 type.
func NewColFloat32 ¶
func NewColFloat32(nullable bool) *ColFloat32
func (*ColFloat32) Append ¶
func (t *ColFloat32) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColFloat32) EncodeColumn ¶
func (t *ColFloat32) EncodeColumn(buffer *proto.Buffer)
func (*ColFloat32) Reset ¶
func (t *ColFloat32) Reset()
func (*ColFloat32) Rows ¶
func (t *ColFloat32) Rows() int
func (*ColFloat32) Type ¶
func (t *ColFloat32) Type() proto.ColumnType
type ColFloat64 ¶
type ColFloat64 struct {
// contains filtered or unexported fields
}
ColFloat64 represents Clickhouse Float64 type.
func NewColFloat64 ¶
func NewColFloat64(nullable bool) *ColFloat64
func (*ColFloat64) Append ¶
func (t *ColFloat64) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColFloat64) EncodeColumn ¶
func (t *ColFloat64) EncodeColumn(buffer *proto.Buffer)
func (*ColFloat64) Reset ¶
func (t *ColFloat64) Reset()
func (*ColFloat64) Rows ¶
func (t *ColFloat64) Rows() int
func (*ColFloat64) Type ¶
func (t *ColFloat64) Type() proto.ColumnType
type ColIPv4 ¶
type ColIPv4 struct {
// contains filtered or unexported fields
}
ColIPv4 represents Clickhouse IPv4 type.
func NewColIPv4 ¶
func (*ColIPv4) Append ¶
func (t *ColIPv4) Append(node *insaneJSON.StrictNode) error
func (*ColIPv4) EncodeColumn ¶
func (*ColIPv4) Type ¶
func (t *ColIPv4) Type() proto.ColumnType
type ColIPv6 ¶
type ColIPv6 struct {
// contains filtered or unexported fields
}
ColIPv6 represents Clickhouse IPv6 type.
func NewColIPv6 ¶
func (*ColIPv6) Append ¶
func (t *ColIPv6) Append(node *insaneJSON.StrictNode) error
func (*ColIPv6) EncodeColumn ¶
func (*ColIPv6) Type ¶
func (t *ColIPv6) Type() proto.ColumnType
type ColInt128 ¶
type ColInt128 struct {
// contains filtered or unexported fields
}
ColInt128 represents Clickhouse Int128 type.
func NewColInt128 ¶
func (*ColInt128) Append ¶
func (t *ColInt128) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColInt128) EncodeColumn ¶
func (*ColInt128) Type ¶
func (t *ColInt128) Type() proto.ColumnType
type ColInt16 ¶
type ColInt16 struct {
// contains filtered or unexported fields
}
ColInt16 represents Clickhouse Int16 type.
func NewColInt16 ¶
func (*ColInt16) Append ¶
func (t *ColInt16) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColInt16) EncodeColumn ¶
func (*ColInt16) Type ¶
func (t *ColInt16) Type() proto.ColumnType
type ColInt256 ¶
type ColInt256 struct {
// contains filtered or unexported fields
}
ColInt256 represents Clickhouse Int256 type.
func NewColInt256 ¶
func (*ColInt256) Append ¶
func (t *ColInt256) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColInt256) EncodeColumn ¶
func (*ColInt256) Type ¶
func (t *ColInt256) Type() proto.ColumnType
type ColInt32 ¶
type ColInt32 struct {
// contains filtered or unexported fields
}
ColInt32 represents Clickhouse Int32 type.
func NewColInt32 ¶
func (*ColInt32) Append ¶
func (t *ColInt32) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColInt32) EncodeColumn ¶
func (*ColInt32) Type ¶
func (t *ColInt32) Type() proto.ColumnType
type ColInt64 ¶
type ColInt64 struct {
// contains filtered or unexported fields
}
ColInt64 represents Clickhouse Int64 type.
func NewColInt64 ¶
func (*ColInt64) Append ¶
func (t *ColInt64) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColInt64) EncodeColumn ¶
func (*ColInt64) Type ¶
func (t *ColInt64) Type() proto.ColumnType
type ColInt8 ¶
type ColInt8 struct {
// contains filtered or unexported fields
}
ColInt8 represents Clickhouse Int8 type.
func NewColInt8 ¶
func (*ColInt8) Append ¶
func (t *ColInt8) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColInt8) EncodeColumn ¶
func (*ColInt8) Type ¶
func (t *ColInt8) Type() proto.ColumnType
type ColString ¶
type ColString struct {
// contains filtered or unexported fields
}
ColString represents Clickhouse String type.
func NewColString ¶
func (*ColString) Append ¶
func (t *ColString) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColString) EncodeColumn ¶
func (*ColString) Type ¶
func (t *ColString) Type() proto.ColumnType
type ColUInt128 ¶
type ColUInt128 struct {
// contains filtered or unexported fields
}
ColUInt128 represents Clickhouse UInt128 type.
func NewColUInt128 ¶
func NewColUInt128(nullable bool) *ColUInt128
func (*ColUInt128) Append ¶
func (t *ColUInt128) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColUInt128) EncodeColumn ¶
func (t *ColUInt128) EncodeColumn(buffer *proto.Buffer)
func (*ColUInt128) Reset ¶
func (t *ColUInt128) Reset()
func (*ColUInt128) Rows ¶
func (t *ColUInt128) Rows() int
func (*ColUInt128) Type ¶
func (t *ColUInt128) Type() proto.ColumnType
type ColUInt16 ¶
type ColUInt16 struct {
// contains filtered or unexported fields
}
ColUInt16 represents Clickhouse UInt16 type.
func NewColUInt16 ¶
func (*ColUInt16) Append ¶
func (t *ColUInt16) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColUInt16) EncodeColumn ¶
func (*ColUInt16) Type ¶
func (t *ColUInt16) Type() proto.ColumnType
type ColUInt256 ¶
type ColUInt256 struct {
// contains filtered or unexported fields
}
ColUInt256 represents Clickhouse UInt256 type.
func NewColUInt256 ¶
func NewColUInt256(nullable bool) *ColUInt256
func (*ColUInt256) Append ¶
func (t *ColUInt256) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColUInt256) EncodeColumn ¶
func (t *ColUInt256) EncodeColumn(buffer *proto.Buffer)
func (*ColUInt256) Reset ¶
func (t *ColUInt256) Reset()
func (*ColUInt256) Rows ¶
func (t *ColUInt256) Rows() int
func (*ColUInt256) Type ¶
func (t *ColUInt256) Type() proto.ColumnType
type ColUInt32 ¶
type ColUInt32 struct {
// contains filtered or unexported fields
}
ColUInt32 represents Clickhouse UInt32 type.
func NewColUInt32 ¶
func (*ColUInt32) Append ¶
func (t *ColUInt32) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColUInt32) EncodeColumn ¶
func (*ColUInt32) Type ¶
func (t *ColUInt32) Type() proto.ColumnType
type ColUInt64 ¶
type ColUInt64 struct {
// contains filtered or unexported fields
}
ColUInt64 represents Clickhouse UInt64 type.
func NewColUInt64 ¶
func (*ColUInt64) Append ¶
func (t *ColUInt64) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColUInt64) EncodeColumn ¶
func (*ColUInt64) Type ¶
func (t *ColUInt64) Type() proto.ColumnType
type ColUInt8 ¶
type ColUInt8 struct {
// contains filtered or unexported fields
}
ColUInt8 represents Clickhouse UInt8 type.
func NewColUInt8 ¶
func (*ColUInt8) Append ¶
func (t *ColUInt8) Append(node *insaneJSON.StrictNode) error
Append the insaneJSON.Node to the batch.
func (*ColUInt8) EncodeColumn ¶
func (*ColUInt8) Type ¶
func (t *ColUInt8) Type() proto.ColumnType
type Config ¶
type Config struct {
// > @3@4@5@6
// >
// > TCP Clickhouse addresses, e.g.: 127.0.0.1:9000.
// > Check the insert_strategy to find out how File.d will behave with a list of addresses.
Addresses []string `json:"addresses" required:"true"` // *
// > @3@4@5@6
// >
// > If more than one addresses are set, File.d will insert batches depends on the strategy:
// > round_robin - File.d will send requests in the round-robin order.
// > in_order - File.d will send requests starting from the first address, ending with the number of retries.
InsertStrategy string `json:"insert_strategy" default:"round_robin" options:"round_robin|in_order"` // *
InsertStrategy_ InsertStrategy
// > @3@4@5@6
// >
// > CA certificate in PEM encoding. This can be a path or the content of the certificate.
CACert string `json:"ca_cert" default:""` // *
// > @3@4@5@6
// >
// > Clickhouse database name to search the table.
Database string `json:"database" default:"default"` // *
// > @3@4@5@6
// >
// > Clickhouse database user.
User string `json:"user" default:"default"` // *
// > @3@4@5@6
// >
// > Clickhouse database password.
Password string `json:"password" default:""` // *
// > @3@4@5@6
// >
// > Clickhouse quota key.
// > https://clickhouse.com/docs/en/operations/quotas
QuotaKey string `json:"quota_key" default:""` // *
// > @3@4@5@6
// >
// > Clickhouse target table.
Table string `json:"table" required:"true"` // *
// > @3@4@5@6
// >
// > Clickhouse table columns. Each column must contain `name` and `type`.
// > File.d supports next data types:
// > * Signed and unsigned integers from 8 to 64 bits.
// > If you set 128-256 bits - File.d will cast the number to the int64.
// > * DateTime, DateTime64
// > * String
// > * Enum8, Enum16
// > * Bool
// > * Nullable
// > * IPv4, IPv6
// > If you need more types, please, create an issue.
Columns []Column `json:"columns" required:"true"` // *
// > @3@4@5@6
// >
// > The level of the Compression.
// > Disabled - lowest CPU overhead.
// > LZ4 - medium CPU overhead.
// > ZSTD - high CPU overhead.
// > None - uses no compression but data has checksums.
Compression string `default:"disabled" options:"disabled|lz4|zstd|none"` // *
// > @3@4@5@6
// >
// > Retries of insertion. If File.d cannot insert for this number of attempts,
// > File.d will fall with non-zero exit code.
Retry int `json:"retry" default:"10"` // *
// > @3@4@5@6
// >
// > Additional settings to the Clickhouse.
// > Settings list: https://clickhouse.com/docs/en/operations/settings/settings
ClickhouseSettings Settings `json:"clickhouse_settings"` // *
// > @3@4@5@6
// >
// > Retention milliseconds for retry to DB.
Retention cfg.Duration `json:"retention" default:"50ms" parse:"duration"` // *
Retention_ time.Duration
// > @3@4@5@6
// >
// > Timeout for each insert request.
InsertTimeout cfg.Duration `json:"insert_timeout" default:"10s" parse:"duration"` // *
InsertTimeout_ time.Duration
// > @3@4@5@6
// >
// > Max connections in the connection pool.
MaxConns cfg.Expression `json:"max_conns" default:"gomaxprocs*4" parse:"expression"` // *
MaxConns_ int32
// > @3@4@5@6
// >
// > Min connections in the connection pool.
MinConns cfg.Expression `json:"min_conns" default:"gomaxprocs*1" parse:"expression"` // *
MinConns_ int32
// > @3@4@5@6
// >
// > How long a connection lives before it is killed and recreated.
MaxConnLifetime cfg.Duration `json:"max_conn_lifetime" default:"30m" parse:"duration"` // *
MaxConnLifetime_ time.Duration
// > @3@4@5@6
// >
// > How long an unused connection lives before it is killed.
MaxConnIdleTime cfg.Duration `json:"max_conn_idle_time" default:"5m" parse:"duration"` // *
MaxConnIdleTime_ time.Duration
// > @3@4@5@6
// >
// > How often to check that idle connections is time to kill.
HealthCheckPeriod cfg.Duration `json:"health_check_period" default:"1m" parse:"duration"` // *
HealthCheckPeriod_ time.Duration
// > @3@4@5@6
// >
// > How much workers will be instantiated to send batches.
// > It also configures the amount of minimum and maximum number of database connections.
WorkersCount cfg.Expression `json:"workers_count" default:"gomaxprocs*4" parse:"expression"` // *
WorkersCount_ int
// > @3@4@5@6
// >
// > Maximum quantity of events to pack into one batch.
BatchSize cfg.Expression `json:"batch_size" default:"capacity/4" parse:"expression"` // *
BatchSize_ int
// > @3@4@5@6
// >
// > A minimum size of events in a batch to send.
// > If both batch_size and batch_size_bytes are set, they will work together.
BatchSizeBytes cfg.Expression `json:"batch_size_bytes" default:"0" parse:"expression"` // *
BatchSizeBytes_ int
// > @3@4@5@6
// >
// > After this timeout batch will be sent even if batch isn't completed.
BatchFlushTimeout cfg.Duration `json:"batch_flush_timeout" default:"200ms" parse:"duration"` // *
BatchFlushTimeout_ time.Duration
}
! config-params ^ config-params
type InsaneColInput ¶
type InsaneColInput interface {
proto.ColInput
Append(node *insaneJSON.StrictNode) error
Reset()
}
type InsaneColumn ¶
type InsaneColumn struct {
Name string
ColInput InsaneColInput
}
type InsertStrategy ¶
type InsertStrategy byte
const ( StrategyRoundRobin InsertStrategy = iota StrategyInOrder )
type Plugin ¶
type Plugin struct {
// contains filtered or unexported fields
}
func (*Plugin) RegisterMetrics ¶
Directories
¶
| Path | Synopsis |
|---|---|
|
Package mock_clickhouse is a generated GoMock package.
|
Package mock_clickhouse is a generated GoMock package. |