Documentation
¶
Overview ¶
Copyright 2022 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2022 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Index ¶
- Constants
- Variables
- func BuildTLSConfig(pk, pem, clientPem string, tlsConf *tls.Config) error
- func CombineDomain(domain, addr string) string
- func CompactionType2Str(compact CompactionType) string
- func FormatSpdy(cfg *Spdy)
- func GetCertLeaf(cert *tls.Certificate) time.Time
- func GetDataDir() string
- func GetNodeId() uint64
- func GetShardMemTableMinSize() int64
- func GetShardMemTableSizeLimit() int64
- func GetSubscriptionEnable() bool
- func GetTimeZoneLoc() *time.Location
- func GetTimeZoneName() string
- func HotModeEnabled() bool
- func IsHardWrite() bool
- func IsLogKeeper() bool
- func IsReplication() bool
- func IsSharedStorage() bool
- func LimitRange[T Comparable](v *T, minV, maxV, def T)
- func NewTLSConfig(skipVerify bool) *tls.Config
- func Parse(conf Config, path string) error
- func PreFullCompactLevel() uint16
- func ResetZero2Default[T comparable](v *T, zero T, def T)
- func SetCommon(conf Common)
- func SetDefaultLimitsForUnmarshalling(defaults Limits)
- func SetElectionTick(tick int)
- func SetEntryFileRWType(typ int)
- func SetFileWrapSize(size int)
- func SetHaPolicy(haPolicy string) error
- func SetHardWrite(hardWriteConf bool)
- func SetHeartbeatTick(tick int)
- func SetIndexConfig(conf *Index)
- func SetLastRowCacheConfig(cfg *LastRowCacheConfig)
- func SetLogStoreConfig(c *LogStoreConfig)
- func SetNodeId(id uint64)
- func SetProductType(productType string)
- func SetRaftMsgCacheSize(size int)
- func SetRaftMsgTimeout(timeout time.Duration)
- func SetReadDataCachePct(pct int)
- func SetReadMetaCachePct(pct int)
- func SetSFSConfig(dataDir string)
- func SetShardMemTableSizeLimit(limit int64)
- func SetShelfMode(conf ShelfMode)
- func SetStoreConfig(conf Store)
- func SetSubscriptionEnable(en bool)
- func SetWaitCommitTimeout(timeout time.Duration)
- func ShelfHotModeEnabled() bool
- func ShelfModeEnabled() bool
- func TSSPToParquetLevel() uint16
- func UpdateTimeZoneLoc(zoneName string) error
- type App
- type Castor
- type CertValidator
- type ClvConfig
- type ColumnStore
- type Common
- type Compact
- type CompactionType
- type Comparable
- type Config
- type Consume
- type ContinuousQueryConfig
- type Coordinator
- type EngineType
- type Fence
- type Gossip
- type HAPolicy
- type HierarchicalConfig
- type HotMode
- type Index
- type LastRowCacheConfig
- type Limits
- type LogStoreConfig
- func (l *LogStoreConfig) EnableCache(e bool)
- func (l *LogStoreConfig) GetCacheMemory() int64
- func (l *LogStoreConfig) GetCacheRate() float32
- func (l *LogStoreConfig) GetCacheTTL() time.Duration
- func (l *LogStoreConfig) GetContainerBasePath() string
- func (l *LogStoreConfig) GetVlmCacheGroupSize() uint32
- func (l *LogStoreConfig) GetVlmCachePieceSize() uint32
- func (l *LogStoreConfig) GetVlmCachePrefetchNums() uint32
- func (l *LogStoreConfig) GetVlmCacheTtl() time.Duration
- func (l *LogStoreConfig) IsCacheEnabled() bool
- func (l *LogStoreConfig) IsVlmCacheEnable() bool
- func (l *LogStoreConfig) IsVlmCacheGroupPrefetch() bool
- func (l *LogStoreConfig) IsVlmCacheHotData() bool
- func (l *LogStoreConfig) IsVlmCachePiecePrefetch() bool
- func (l *LogStoreConfig) IsVlmPrefetchEnable() bool
- func (l *LogStoreConfig) SetMemorySize(m toml.Size)
- type Logger
- type MemTable
- type Merge
- type Meta
- type Monitor
- type MonitorMain
- type MonitorQuery
- type MonitorReport
- type Obs
- type OpsMonitor
- type ParquetTaskConfig
- type ProductType
- type RaftStorage
- type ReadCache
- type RecordWriteConfig
- type RuntimeConfig
- type SFS
- type SelectSpecConfig
- type ShardMergeConfig
- type ShelfMode
- type SherlockConfig
- type Spdy
- func (c *Spdy) ApplyEnvOverrides(_ func(string) string) error
- func (c *Spdy) GetOpenSessionTimeout() time.Duration
- func (c *Spdy) GetSessionSelectTimeout() time.Duration
- func (c *Spdy) GetTCPDialTimeout() time.Duration
- func (c *Spdy) NewClientTLSConfig() (*tls.Config, error)
- func (c *Spdy) NewTLSConfig() (*tls.Config, error)
- func (c *Spdy) ShowConfigs() map[string]interface{}
- func (c Spdy) Validate() error
- type Store
- type Subscriber
- type TSMeta
- func (c *TSMeta) ApplyEnvOverrides(func(string) string) error
- func (c *TSMeta) GetCommon() *Common
- func (c *TSMeta) GetLogStoreConfig() *LogStoreConfig
- func (c *TSMeta) GetLogging() *Logger
- func (c *TSMeta) GetSpdy() *Spdy
- func (c *TSMeta) ShowConfigs() map[string]interface{}
- func (c *TSMeta) Validate() error
- type TSMonitor
- func (c *TSMonitor) ApplyEnvOverrides(getenv func(string) string) error
- func (c *TSMonitor) GetCommon() *Common
- func (c *TSMonitor) GetLogStoreConfig() *LogStoreConfig
- func (c *TSMonitor) GetLogging() *Logger
- func (c *TSMonitor) GetSpdy() *Spdy
- func (c *TSMonitor) ShowConfigs() map[string]interface{}
- func (c *TSMonitor) Validate() error
- type TSSql
- func (c *TSSql) ApplyEnvOverrides(fn func(string) string) error
- func (c *TSSql) Corrector(cpuNum, cpuAllocRatio int)
- func (c *TSSql) GetCommon() *Common
- func (c *TSSql) GetLogStoreConfig() *LogStoreConfig
- func (c *TSSql) GetLogging() *Logger
- func (c *TSSql) GetSpdy() *Spdy
- func (c *TSSql) ShowConfigs() map[string]interface{}
- func (c *TSSql) Validate() error
- type TSStore
- func (c *TSStore) ApplyEnvOverrides(fn func(string) string) error
- func (c *TSStore) GetCommon() *Common
- func (c *TSStore) GetLogStoreConfig() *LogStoreConfig
- func (c *TSStore) GetLogging() *Logger
- func (c *TSStore) GetSpdy() *Spdy
- func (c *TSStore) ShowConfigs() map[string]interface{}
- func (c *TSStore) Validate() error
- type Topo
- type TsRecover
- func (c *TsRecover) ApplyEnvOverrides(getenv func(string) string) error
- func (c *TsRecover) GetCommon() *Common
- func (c *TsRecover) GetLogStoreConfig() *LogStoreConfig
- func (c *TsRecover) GetLogging() *Logger
- func (c *TsRecover) GetSpdy() *Spdy
- func (c *TsRecover) ShowConfigs() map[string]interface{}
- func (c *TsRecover) Validate() error
- type Validator
- type Wal
Constants ¶
const ( DefaultPoolSize int = 30 DefaultWaitTimeout int = 30 DefaultTracingEndpoint = "127.0.0.1:8086" DefaultTracingRatio = 0.1 DefaultTracingStorePath = "_internal" )
const ( Fit algorithmType = "fit" Predict algorithmType = "predict" Detect algorithmType = "detect" FitDetect algorithmType = "fit_detect" )
const ( DefaultCompactFullWriteColdDuration = 1 * time.Hour // DefaultMaxConcurrentCompactions is the maximum number of concurrent full and level compactions // that can run at one time. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. DefaultMaxConcurrentCompactions = 0 DefaultMaxConcurrentFullCompactions = 1 DefaultSnapshotThroughput = 48 * MB DefaultSnapshotThroughputBurst = 48 * MB DefaultBackGroundReadThroughput = 64 * MB )
const ( DefaultConsumeHost = "127.0.0.1" DefaultConsumePort = 9092 MaxRequestSize = 1 * 1024 * 1024 // 1M )
const ( WAFPolicy = "write-available-first" SSPolicy = "shared-storage" RepPolicy = "replication" )
const ( DefaultHSRunInterval = 1 * time.Minute DefaultIHSRunInterval = 1 * time.Hour DefaultMaxProcessHSNumber = 1 )
const ( // DefaultSubPath is default subpath for storing logs DefaultSubPath = "logs" // DefaultLevel is the level of logs will be emitted DefaultLevel = zap.InfoLevel // DefaultMaxSize is the max size of a log file DefaultMaxSize = 64 * 1024 * 1024 // 64MB // DefaultMaxNum is the max number of log files DefaultMaxNum = 16 // DefaultMaxAge is the max duration a log file can keep DefaultMaxAge = 7 // 7days // DefaultCompressEnabled is whether the log files are compressed DefaultCompressEnabled = true DefaultStoreRaftLoggerName = "store_raft" )
const ( DefaultWriteColdDuration = 5 * time.Second DefaultMaxWriteHangTime = 15 * time.Second DefaultForceSnapShotTime = 20 * time.Second )
const ( ReliabilityLevelLow = 1 ReliabilityLevelMedium = 2 ReliabilityLevelHigh = 3 )
const ( DefaultDir = "meta" DefaultLoggingEnabled = true DefaultRaftFileName = "raft" DefaultGossipFileName = "gossip" DefaultHTTPBindAddress = "127.0.0.1:8091" DefaultRPCBindAddress = "127.0.0.1:8092" DefaultRaftBindAddress = "127.0.0.1:8088" DefaultCommitTimeout = 50 * time.Millisecond DefaultLeaderLeaseTimeout = 500 * time.Millisecond DefaultElectionTimeout = 1000 * time.Millisecond DefaultHeartbeatTimeout = 1000 * time.Millisecond DefaultLeaseDuration = 60 * time.Second DefaultReplicaColdSelectInterval = 5 * time.Minute DefaultConcurrentWriteLimit = 10 DefaultVersion = 0 DefaultSplitRowThreshold = 10000 DefaultImbalanceFactor = 0.3 DefaultHostname = "localhost" DefaultSuspicionMult = 4 DefaultProbInterval = toml.Duration(400 * time.Millisecond) DefaultPtNumPerNode = 1 DefaultHashAlgo = "ver03" DefaultHaPolicy = "write-available-first" DefaultBalanceAlgoVer = "v1.1" // Default number of shards for each measurement in a shard group. DefaultNumOfShards = 3 MaxNumOfShards = 1024 // Enable SQLite for meta DefalutSQLiteEnabled = false DefaultSqlBindPort = 8012 DefaultShardGroupTimeZone = "UTC" )
const ( DefaultHistoryFile = "history.json" DefaultMonitorAddress = "127.0.0.1:8086" DefaultMonitorDatabase = "monitor" DefaultMonitorRP = "autogen" DefaultMonitorRPDuration = 7 * 24 * time.Hour DefaultReplicaN = 1 // DefaultStoreEnabled is whether the system writes gathered information in // an InfluxDB system for historical analysis. DefaultStoreEnabled = false // DefaultStoreDatabase is the name of the database where gathered information is written. DefaultStoreDatabase = "_internal" // DefaultStoreInterval is the period between storing gathered information. DefaultStoreInterval = 10 * time.Second // DefaultHttpEndpoint is the address monitor write to DefaultHttpEndpoint = "127.0.0.1:8086" // MonitorRetentionPolicy Name of the retention policy used by the monitor service. MonitorRetentionPolicy = "autogen" // MonitorRetentionPolicyDuration Duration of the monitor retention policy. MonitorRetentionPolicyDuration = 7 * 24 * time.Hour // MonitorRetentionPolicyReplicaN Default replication factor to set on the monitor retention policy. MonitorRetentionPolicyReplicaN = 1 // HttpPusher Pushing monitoring metric data through HTTP HttpPusher = "http" // FilePusher Save the monitoring metric data to file FilePusher = "file" DefaultPushers = "" PusherSep = "|" )
const ( DefaultQueenSize = 32 DefaultWalBufferSize = 1 )
const ( DefaultTSSPToParquetLevel = 0 DefaultMaxRowGroupLen = 64 * 1024 DefaultPageSize = 64 * 1024 DefaultWriteBatchSize = 512 DefaultItrWriteBatchSize = 10 DefaultDictCompressEnable = 0 // 0 means disable, 1 otherwise DefaultCompressAlg = 1 // 0 represent snappy, 1 represent zstd, default is zstd DefaultMaxStatsSize = 64 * 1024 DefaultReliabilityLogDir = "/data/openGemini/parquet_reliability_log" DefaultOutputDir = "/data/openGemini/parquet_output" )
const ( DefaultReadMetaCachePercent = 3 DefaultReadDataCachePercent = 10 )
const ( DefaultSeriesCount = 0 DefaultFieldsCount = 0 )
const ( DefaultSMRunInterval = 40 * time.Minute MergeDriSuffix = "_merge" )
const ( DefaultCollectInterval = 10 * time.Second DefaultSherlockMaxNum = 32 DefaultSherlockMaxAge = 7 // 7 days )
const ( Second = toml.Duration(time.Second) MinRecvWindowSize = 2 MinConcurrentAcceptSession = 1024 MinOpenSessionTimeout = Second MinSessionSelectTimeout = 60 * Second MinTCPDialTimeout = Second MinConnPoolSize = 2 DefaultRecvWindowSize = 8 DefaultConcurrentAcceptSession = 4096 DefaultOpenSessionTimeout = 2 * Second DefaultSessionSelectTimeout = 300 * Second DefaultTCPDialTimeout = Second DefaultConnPoolSize = 4 TCPWriteTimeout = 120 * time.Second TCPReadTimeout = 300 * time.Second )
const ( // DefaultWriteTimeout is the default timeout for a complete write to succeed. DefaultWriteTimeout = 10 * time.Second DefaultQueryTimeout = 0 // DefaultShardWriterTimeout is the default timeout set on shard writers. DefaultShardWriterTimeout = 10 * time.Second // DefaultShardMapperTimeout is the default timeout set on shard mappers. DefaultShardMapperTimeout = 10 * time.Second // DefaultMaxConcurrentQueries is the maximum number of running queries. // A value of zero will make the maximum query limit unlimited. DefaultMaxConcurrentQueries = 0 // DefaultMaxQueryMem is the is the maximum size a query cache can reach before it starts stopping a query. DefaultMaxQueryMem = 0 DefaultMetaExecutorWriteTimeout = 5 * time.Second DefaultQueryLimitIntervalTime = 10 DefaultQueryLimitLevel = 0 DefaultQueryLimitFlag = false DefaultShardTier = "warm" DefaultForceBroadcastQuery = false DefaultRetentionPolicyLimit = 100 )
const ( EngineType1 = "tssp1" EngineType2 = "tssp2" DefaultEngine = "tssp1" KB = 1024 MB = 1024 * 1024 GB = 1024 * 1024 * 1024 DefaultIngesterAddress = "127.0.0.1:8400" DefaultSelectAddress = "127.0.0.1:8401" DefaultInterruptSqlMemPct = 85 CompressAlgoLZ4 = "lz4" CompressAlgoSnappy = "snappy" CompressAlgoZSTD = "zstd" IndexFileDirectory = "index" DataDirectory = "data" WalDirectory = "wal" MetaDirectory = "meta" FenceDirectory = "fence" DefaultDropInterval = 3 )
const ( DefaultHotModeMemoryAllowedPercent = 5 DefaultHotModeTimeWindow = time.Minute DefaultHotDuration = time.Hour MaxHotDuration = time.Hour * 24 * 30 DefaultMaxHotFileSize = 2 * GB DefaultHotModePoolObjectCnt = 2 DefaultHotModeMaxCacheSize = 1 * GB )
const ( DefaultRaftMsgTimeout = 15 * time.Second DefaultElectionTick = 10 DefaultHeartbeatTick = 1 DefaultRaftMsgCacheSize = 1000 DefaultFileWrapSize = 128 DefaultWaitCommitTimeout = 20 * time.Second DefaultEntryFileRWType = 2 )
const ( DefaultHTTPTimeout = 30 * time.Second // 30 seconds DefaultBufferSize = 100 // channel size 100 )
const ( DefaultWALSyncInterval = 100 * time.Millisecond DefaultWalReplayBatchSize = 1 * MB // 1MB )
const (
DefaultFenceEnable = false
)
const (
DefaultRaftEntrySyncInterval = 100 * time.Millisecond
)
const ( // DefaultRunInterval is the default interval at which the CQ service will run. DefaultRunInterval = time.Second )
const (
DefaultTopoManagerUrl = ""
)
const (
LogKeeperService = "logkeeper"
)
const StreamGroupValueSeparator byte = 0
Use byte 0 to replace spaces as the separator of stream group values.
const StreamGroupValueStrSeparator string = "\x00"
Variables ¶
var DefaultCipherSuites = []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, }
var DefaultMetaJoin = []string{"127.0.0.1:8092"}
var ElectionTick = DefaultElectionTick
var EngineType2String map[EngineType]string = map[EngineType]string{ TSSTORE: "tsstore", COLUMNSTORE: "columnstore", ENGINETYPEEND: "undefined", }
var EntryFileRWType = DefaultEntryFileRWType
var FileWrapSize = DefaultFileWrapSize
var HeartbeatTick = DefaultHeartbeatTick
var LogKeeperConfig = &LogStoreConfig{}
var MetaEventHandleEn bool
var RaftMsgCacheSize = DefaultRaftMsgCacheSize
var RaftMsgTimeout = DefaultRaftMsgTimeout
var ReadDataCachePct = DefaultReadDataCachePercent
var ReadMetaCachePct = DefaultReadMetaCachePercent
var String2EngineType map[string]EngineType = map[string]EngineType{ "tsstore": TSSTORE, "columnstore": COLUMNSTORE, "undefined": ENGINETYPEEND, }
var WaitCommitTimeout = DefaultWaitCommitTimeout
Functions ¶
func BuildTLSConfig ¶ added in v1.5.0
func CombineDomain ¶ added in v0.2.0
func CompactionType2Str ¶ added in v1.2.0
func CompactionType2Str(compact CompactionType) string
func FormatSpdy ¶
func FormatSpdy(cfg *Spdy)
func GetCertLeaf ¶ added in v1.5.0
func GetCertLeaf(cert *tls.Certificate) time.Time
func GetDataDir ¶ added in v1.2.0
func GetDataDir() string
func GetShardMemTableMinSize ¶ added in v1.3.0
func GetShardMemTableMinSize() int64
func GetShardMemTableSizeLimit ¶ added in v1.3.0
func GetShardMemTableSizeLimit() int64
func GetSubscriptionEnable ¶ added in v1.1.0
func GetSubscriptionEnable() bool
func GetTimeZoneLoc ¶ added in v1.5.0
func GetTimeZoneName ¶ added in v1.5.0
func GetTimeZoneName() string
func HotModeEnabled ¶ added in v1.4.0
func HotModeEnabled() bool
func IsHardWrite ¶ added in v1.4.0
func IsHardWrite() bool
func IsLogKeeper ¶ added in v1.2.0
func IsLogKeeper() bool
func IsReplication ¶ added in v1.1.0
func IsReplication() bool
func IsSharedStorage ¶ added in v1.1.1
func IsSharedStorage() bool
func LimitRange ¶ added in v1.4.1
func LimitRange[T Comparable](v *T, minV, maxV, def T)
func NewTLSConfig ¶ added in v1.3.0
func PreFullCompactLevel ¶ added in v1.3.0
func PreFullCompactLevel() uint16
func ResetZero2Default ¶ added in v1.4.0
func ResetZero2Default[T comparable](v *T, zero T, def T)
func SetDefaultLimitsForUnmarshalling ¶ added in v1.4.0
func SetDefaultLimitsForUnmarshalling(defaults Limits)
SetDefaultLimitsForUnmarshalling sets global default limits, used when loading Limits from YAML files. This is used to ensure per-tenant limits are defaulted to those values.
func SetElectionTick ¶ added in v1.3.0
func SetElectionTick(tick int)
func SetEntryFileRWType ¶ added in v1.5.0
func SetEntryFileRWType(typ int)
func SetFileWrapSize ¶ added in v1.4.0
func SetFileWrapSize(size int)
func SetHaPolicy ¶ added in v1.1.0
func SetHardWrite ¶ added in v1.4.0
func SetHardWrite(hardWriteConf bool)
func SetHeartbeatTick ¶ added in v1.3.0
func SetHeartbeatTick(tick int)
func SetIndexConfig ¶ added in v1.3.0
func SetIndexConfig(conf *Index)
func SetLastRowCacheConfig ¶ added in v1.5.0
func SetLastRowCacheConfig(cfg *LastRowCacheConfig)
func SetLogStoreConfig ¶ added in v1.3.0
func SetLogStoreConfig(c *LogStoreConfig)
func SetProductType ¶ added in v1.2.0
func SetProductType(productType string)
func SetRaftMsgCacheSize ¶ added in v1.4.0
func SetRaftMsgCacheSize(size int)
func SetRaftMsgTimeout ¶ added in v1.3.0
func SetReadDataCachePct ¶ added in v1.2.0
func SetReadDataCachePct(pct int)
func SetReadMetaCachePct ¶ added in v1.2.0
func SetReadMetaCachePct(pct int)
func SetSFSConfig ¶ added in v1.2.0
func SetSFSConfig(dataDir string)
func SetShardMemTableSizeLimit ¶ added in v1.3.0
func SetShardMemTableSizeLimit(limit int64)
func SetShelfMode ¶ added in v1.5.0
func SetShelfMode(conf ShelfMode)
func SetStoreConfig ¶ added in v1.2.0
func SetStoreConfig(conf Store)
func SetSubscriptionEnable ¶ added in v1.1.0
func SetSubscriptionEnable(en bool)
func SetWaitCommitTimeout ¶ added in v1.4.0
func ShelfHotModeEnabled ¶ added in v1.5.0
func ShelfHotModeEnabled() bool
func ShelfModeEnabled ¶ added in v1.4.0
func ShelfModeEnabled() bool
func TSSPToParquetLevel ¶ added in v1.4.0
func TSSPToParquetLevel() uint16
func UpdateTimeZoneLoc ¶ added in v1.5.0
Types ¶
type Castor ¶ added in v0.2.0
type Castor struct {
Enabled bool `toml:"enabled"`
PyWorkerAddr []string `toml:"pyworker-addr"`
ConnPoolSize int `toml:"connect-pool-size"`
ResultWaitTimeout int `toml:"result-wait-timeout"`
FitDetect algoConfig `toml:"fit_detect"`
Detect algoConfig `toml:"detect"`
Predict algoConfig `toml:"predict"`
Fit algoConfig `toml:"fit"`
TracingEnabled bool `toml:"tracing-enabled"`
TracingEndpoint string `toml:"tracing-endpoint"`
TracingHttpsEnabled bool `toml:"tracing-https-enabled"`
TracingRatio float64 `toml:"tracing-ratio"`
TracingStorePath string `toml:"tracing-store-path"`
}
func (*Castor) ApplyEnvOverrides ¶ added in v0.2.0
func (*Castor) CheckAlgoAndConfExistence ¶ added in v0.2.0
func (*Castor) GetWaitTimeout ¶ added in v0.2.0
type CertValidator ¶
type CertValidator struct {
// contains filtered or unexported fields
}
func NewCertValidator ¶
func NewCertValidator(certFile, keyFile string) *CertValidator
func (*CertValidator) Validate ¶
func (c *CertValidator) Validate() error
type ClvConfig ¶ added in v1.1.0
type ClvConfig struct {
QMax int `toml:"q-max"`
Threshold int `toml:"token-threshold"`
DocCount uint32 `toml:"document-count"`
Enabled bool `toml:"enabled"`
}
func NewClvConfig ¶ added in v1.1.0
func NewClvConfig() *ClvConfig
type ColumnStore ¶ added in v1.5.0
type Common ¶
type Common struct {
MetaJoin []string `toml:"meta-join"`
CryptoConfig string `toml:"crypto-config"`
ClusterID string `toml:"cluster-id"`
CPUNum int `toml:"cpu-num"`
ReaderStop bool `toml:"read-stop"`
WriterStop bool `toml:"write-stop"`
IgnoreEmptyTag bool `toml:"ignore-empty-tag"`
ReportEnable bool `toml:"report-enable"`
PreAggEnabled bool `toml:"pre-agg-enabled"`
PprofEnabled bool `toml:"pprof-enabled"`
MemorySize itoml.Size `toml:"memory-size"`
MemoryLimitSize itoml.Size `toml:"executor-memory-size-limit"`
MemoryWaitTime itoml.Duration `toml:"executor-memory-wait-time"`
OptHashAlgo string `toml:"select-hash-algorithm"`
CpuAllocationRatio int `toml:"cpu-allocation-ratio"`
HaPolicy string `toml:"ha-policy"`
NodeRole string `toml:"node-role"`
ProductType string `toml:"product-type"`
PprofBindAddress string `toml:"pprof-bind-address"`
StorePprofPort string `toml:"store-pprof-port"`
SqlPprofPort string `toml:"sql-pprof-port"`
MetaPprofPort string `toml:"meta-pprof-port"`
Bindjoin []string `toml:"bind-join"`
GlobalDictFiles []string `toml:"global-dict-files"`
MetaConnRetryTime itoml.Duration `toml:"meta-conn-retry-time"`
MetaConnRetryNumber int `toml:"meta-conn-retry-number"`
}
Common represents the CommonConfiguration format for the ts-store binary.
func NewCommon ¶
func NewCommon() *Common
NewCommon builds a new CommonConfiguration with default values.
func (*Common) ApplyEnvOverrides ¶
ApplyEnvOverrides apply the environment CommonConfiguration on top of the CommonConfig.
func (*Common) GetLogging ¶
func (*Common) ShowConfigs ¶ added in v1.1.1
func (Common) ValidateRole ¶ added in v1.2.0
type Compact ¶ added in v1.3.0
type Compact struct {
CompactFullWriteColdDuration toml.Duration `toml:"compact-full-write-cold-duration"`
MaxConcurrentCompactions int `toml:"max-concurrent-compactions"`
MaxFullCompactions int `toml:"max-full-compactions"`
CompactThroughput toml.Size `toml:"compact-throughput"`
CompactThroughputBurst toml.Size `toml:"compact-throughput-burst"`
SnapshotThroughput toml.Size `toml:"snapshot-throughput"`
SnapshotThroughputBurst toml.Size `toml:"snapshot-throughput-burst"`
BackGroundReadThroughput toml.Size `toml:"back-ground-read-throughput"`
CompactionMethod int `toml:"compaction-method"` // 0:auto, 1: streaming, 2: non-streaming
MaxCompactionLevel int `toml:"max-compaction-level"`
CompactRecovery bool `toml:"compact-recovery"`
CsCompactionEnabled bool `toml:"column-store-compact-enabled"`
CorrectTimeDisorder bool `toml:"correct-time-disorder"`
}
func NewCompactConfig ¶ added in v1.3.0
func NewCompactConfig() Compact
type CompactionType ¶ added in v1.2.0
type CompactionType int32
const ( ROW CompactionType = iota BLOCK COMPACTIONTYPEEND )
func Str2CompactionType ¶ added in v1.2.0
func Str2CompactionType(compactStr string) CompactionType
type Comparable ¶ added in v1.4.1
type Consume ¶ added in v1.5.0
type Consume struct {
ConsumeEnable bool `toml:"consume-enabled"`
ConsumeHost string `toml:"consume-host"`
ConsumePort uint32 `toml:"consume-port"`
ConsumeMaxReadSize toml.Size `toml:"consume-max-read-size"`
}
func NewConsumeConfig ¶ added in v1.5.0
func NewConsumeConfig() Consume
type ContinuousQueryConfig ¶ added in v1.1.0
type ContinuousQueryConfig struct {
// If this flag is set to false, both the brokers and data nodes should ignore any CQ processing.
Enabled bool `toml:"enabled"`
// The interval at which the CQ service will run.
RunInterval toml.Duration `toml:"run-interval"`
// MaxProcessCQNumber is the max number of CQs to process in one run.
MaxProcessCQNumber int `toml:"max-process-CQ-number"`
}
ContinuousQueryConfig is the configuration for the continuous query service.
func NewContinuousQueryConfig ¶ added in v1.1.0
func NewContinuousQueryConfig() ContinuousQueryConfig
NewContinuousQueryConfig returns a new instance of ContinuousQueryConfig with defaults.
func (ContinuousQueryConfig) ApplyEnvOverrides ¶ added in v1.1.0
func (c ContinuousQueryConfig) ApplyEnvOverrides(_ func(string) string) error
func (*ContinuousQueryConfig) ShowConfigs ¶ added in v1.1.1
func (c *ContinuousQueryConfig) ShowConfigs() map[string]interface{}
func (ContinuousQueryConfig) Validate ¶ added in v1.1.0
func (c ContinuousQueryConfig) Validate() error
Validate returns an error if the config is invalid.
type Coordinator ¶
type Coordinator struct {
WriteTimeout toml.Duration `toml:"write-timeout"`
MaxConcurrentQueries int `toml:"max-concurrent-queries"`
QueryTimeout toml.Duration `toml:"query-timeout"`
LogQueriesAfter toml.Duration `toml:"log-queries-after"`
ShardWriterTimeout toml.Duration `toml:"shard-writer-timeout"`
ShardMapperTimeout toml.Duration `toml:"shard-mapper-timeout"`
// Maximum number of memory bytes to use from the query
MaxQueryMem toml.Size `toml:"max-query-mem"`
MetaExecutorWriteTimeout toml.Duration `toml:"meta-executor-write-timeout"`
QueryLimitIntervalTime int `toml:"query-limit-interval-time"`
QueryLimitLevel int `toml:"query-limit-level"`
RetentionPolicyLimit int `toml:"rp-limit"`
DatabaseNumLimit int32 `toml:"db-limit"`
ShardTier string `toml:"shard-tier"`
TimeRangeLimit []toml.Duration `toml:"time-range-limit"`
MeasurementBlacklist []string `toml:"measurement-blacklist"`
// Maximum number of tag keys in a measurement
TagLimit int `toml:"tag-limit"`
FieldLimit int `toml:"field-limit"`
QueryLimitFlag bool `toml:"query-limit-flag"`
QueryTimeCompareEnabled bool `toml:"query-time-compare-enabled"`
ForceBroadcastQuery bool `toml:"force-broadcast-query"`
HardWrite bool `toml:"hard-write"`
}
Coordinator represents the configuration for the coordinator service.
func NewCoordinator ¶
func NewCoordinator() Coordinator
NewCoordinator returns an instance of Config with defaults.
func (*Coordinator) ShowConfigs ¶ added in v1.1.1
func (c *Coordinator) ShowConfigs() map[string]interface{}
func (Coordinator) Validate ¶
func (c Coordinator) Validate() error
Validate validates that the configuration is acceptable.
type EngineType ¶ added in v1.1.0
type EngineType uint8
const ( TSSTORE EngineType = iota // tsstore, data aware(time series) column store, default value(0 for int) if engineType not set COLUMNSTORE // columnstore, traditional column store ENGINETYPEEND // undefined )
type Fence ¶ added in v1.5.0
type Fence struct {
FenceFilePath string `toml:"fence-file-path"`
FenceEnable bool `toml:"fence-enable"`
}
func NewFenceConfig ¶ added in v1.5.0
func NewFenceConfig() Fence
type Gossip ¶
type Gossip struct {
Enabled bool `toml:"enabled"`
LogEnabled bool `toml:"log-enabled"`
BindAddr string `toml:"bind-address"`
MetaBindPort int `toml:"meta-bind-port"`
StoreBindPort int `toml:"store-bind-port"`
SqlBindPort int `toml:"sql-bind-port"`
ProbInterval toml.Duration `toml:"prob-interval"`
SuspicionMult int `toml:"suspicion-mult"`
Members []string `toml:"members"`
}
type HAPolicy ¶ added in v1.1.0
type HAPolicy uint8
func GetHaPolicy ¶ added in v1.1.0
func GetHaPolicy() HAPolicy
type HierarchicalConfig ¶ added in v1.2.0
type HierarchicalConfig struct {
// If false, close hierarchical storage service
Enabled bool `toml:"enabled"`
// If false, close index hierarchical storage service
IndexEnabled bool `toml:"index-enabled"`
// Interval time for checking hierarchical storage.
RunInterval toml.Duration `toml:"run-interval"`
// Interval time for checking index hierarchical storage.
IndexRunInterval toml.Duration `toml:"index-run-interval"`
// Max process number for shard moving
MaxProcessN int `toml:"max-process-hs-number"`
EnableWriteColdShard bool `toml:"enable-write-cold-shard"`
}
Config represents a configuration for the hierarchical storage service.
func NewHierarchicalConfig ¶ added in v1.2.0
func NewHierarchicalConfig() HierarchicalConfig
func (HierarchicalConfig) Validate ¶ added in v1.2.0
func (c HierarchicalConfig) Validate() error
type HotMode ¶ added in v1.4.0
type HotMode struct {
Enabled bool `toml:"enabled"`
ShelfOnly bool `toml:"shelf-only"`
MemoryAllowedPercent uint8 `toml:"memory-allowed-percent"`
Duration toml.Duration `toml:"duration"`
TimeWindow toml.Duration `toml:"time-window"`
MaxFileSize toml.Size `toml:"max-file-size"`
PoolObjectCnt int `toml:"pool-object-cnt"`
MaxCacheSize toml.Size `toml:"max-cache-size"`
}
func (*HotMode) DurationSeconds ¶ added in v1.4.0
func (*HotMode) GetMemoryAllowedPercent ¶ added in v1.4.0
func (*HotMode) TimeWindowSeconds ¶ added in v1.4.0
type Index ¶ added in v1.3.0
type Index struct {
TSIDCacheSize int `toml:"tsid-cache-size"`
SKeyCacheSize int `toml:"skey-cache-size"`
TagCacheSize int `toml:"tag-cache-size"`
TagFilterCostCacheSize int `toml:"tag-filter-cost-cache-size"`
Concurrency int `toml:"concurrency"`
TagScanPruneThreshold int `toml:"tag-scan-prune-threshold"`
MemoryAllowedPercent int `toml:"memory-allowed-percent"`
CacheExpireDuration toml.Duration `toml:"cache-expire-duration"`
CacheCompressEnable bool `toml:"cache-compress-enable"`
BloomFilterEnabled bool `toml:"bloom-filter-enable"`
}
func GetIndexConfig ¶ added in v1.3.0
func GetIndexConfig() *Index
type LastRowCacheConfig ¶ added in v1.5.0
type LastRowCacheConfig struct {
CacheEnabled bool `toml:"enabled"`
NumCounters int64 `toml:"number-counters"`
MaxCost int64 `toml:"max-cost"`
BufferItems int64 `toml:"buffer-items"`
Metrics bool `toml:"metrics"`
}
func GetLastRowCacheConfig ¶ added in v1.5.0
func GetLastRowCacheConfig() *LastRowCacheConfig
func NewLastRowCacheConfig ¶ added in v1.5.0
func NewLastRowCacheConfig() *LastRowCacheConfig
type Limits ¶ added in v1.4.0
type Limits struct {
PromLimitEnabled bool `toml:"prom-limit-enabled" yaml:"prom_limit_enabled"`
// prom metrics write limits
MaxLabelNameLength int `toml:"max-label-name-length" yaml:"max_label_name_length"`
MaxLabelValueLength int `toml:"max-label-value-length" yaml:"max_label_value_length"`
MaxLabelNamesPerSeries int `toml:"max-label-names-per-series" yaml:"max_label_names_per_series"`
MaxMetadataLength int `toml:"max-metadata-length" yaml:"max_metadata_length"`
RejectOldSamples bool `toml:"reject-old-samples" yaml:"reject_old_samples"`
RejectOldSamplesMaxAge model.Duration `toml:"reject-old-samples-max-age" yaml:"reject_old_samples_max_age"`
CreationGracePeriod model.Duration `toml:"creation-grace-period" yaml:"creation_grace_period"`
EnforceMetadataMetricName bool `toml:"enforce-metadata-metric-name" yaml:"enforce_metadata_metric_name"`
EnforceMetricName bool `toml:"enforce-metric-name" yaml:"enforce_metric_name"`
// query limits
MaxQueryLength model.Duration `toml:"max-query-length" yaml:"max_query_length"`
}
Limits describe all the limits for users; can be used to describe global default limits via config, or per-user limits via yaml config.
func (*Limits) UnmarshalYAML ¶ added in v1.4.0
UnmarshalYAML implements the yaml.Unmarshaler interface.
type LogStoreConfig ¶ added in v1.3.0
type LogStoreConfig struct {
MemorySize uint64
CacheEnabled bool `toml:"cache-segment-metadata"`
CacheRate float32 `toml:"cache-segment-metadata-memory-rate"`
CacheTTL toml.Duration `toml:"cache-segment-metadata-ttl"`
VlmCacheHotData bool `toml:"vlm-cache-hotdata"`
VlmCachePiecePrefetch bool `toml:"vlm-cache-piece-prefetch"`
VlmCachePieceSize uint32 `toml:"vlm-cache-piece-size"`
VlmCacheGroupPrefetch bool `toml:"vlm-cache-group-prefetch"`
VlmCacheGroupSize uint32 `toml:"vlm-cache-group-size"`
VlmCachePrefetchNum uint32 `toml:"vlm-cache-prefetch-shard-num"`
VlmCacheTtl toml.Duration `toml:"vlm-cache-ttl"`
ContainerBasePath string `toml:"container-base-path"`
}
func GetLogStoreConfig ¶ added in v1.3.0
func GetLogStoreConfig() *LogStoreConfig
func NewLogStoreConfig ¶ added in v1.3.0
func NewLogStoreConfig() *LogStoreConfig
func (*LogStoreConfig) EnableCache ¶ added in v1.3.0
func (l *LogStoreConfig) EnableCache(e bool)
func (*LogStoreConfig) GetCacheMemory ¶ added in v1.3.0
func (l *LogStoreConfig) GetCacheMemory() int64
func (*LogStoreConfig) GetCacheRate ¶ added in v1.3.0
func (l *LogStoreConfig) GetCacheRate() float32
func (*LogStoreConfig) GetCacheTTL ¶ added in v1.3.0
func (l *LogStoreConfig) GetCacheTTL() time.Duration
func (*LogStoreConfig) GetContainerBasePath ¶ added in v1.3.0
func (l *LogStoreConfig) GetContainerBasePath() string
func (*LogStoreConfig) GetVlmCacheGroupSize ¶ added in v1.3.0
func (l *LogStoreConfig) GetVlmCacheGroupSize() uint32
func (*LogStoreConfig) GetVlmCachePieceSize ¶ added in v1.3.0
func (l *LogStoreConfig) GetVlmCachePieceSize() uint32
func (*LogStoreConfig) GetVlmCachePrefetchNums ¶ added in v1.3.0
func (l *LogStoreConfig) GetVlmCachePrefetchNums() uint32
func (*LogStoreConfig) GetVlmCacheTtl ¶ added in v1.3.0
func (l *LogStoreConfig) GetVlmCacheTtl() time.Duration
func (*LogStoreConfig) IsCacheEnabled ¶ added in v1.3.0
func (l *LogStoreConfig) IsCacheEnabled() bool
func (*LogStoreConfig) IsVlmCacheEnable ¶ added in v1.3.0
func (l *LogStoreConfig) IsVlmCacheEnable() bool
func (*LogStoreConfig) IsVlmCacheGroupPrefetch ¶ added in v1.3.0
func (l *LogStoreConfig) IsVlmCacheGroupPrefetch() bool
func (*LogStoreConfig) IsVlmCacheHotData ¶ added in v1.3.0
func (l *LogStoreConfig) IsVlmCacheHotData() bool
func (*LogStoreConfig) IsVlmCachePiecePrefetch ¶ added in v1.3.0
func (l *LogStoreConfig) IsVlmCachePiecePrefetch() bool
func (*LogStoreConfig) IsVlmPrefetchEnable ¶ added in v1.3.0
func (l *LogStoreConfig) IsVlmPrefetchEnable() bool
func (*LogStoreConfig) SetMemorySize ¶ added in v1.3.0
func (l *LogStoreConfig) SetMemorySize(m toml.Size)
type Logger ¶
type Logger struct {
Format string `toml:"format"`
Level zapcore.Level `toml:"level"`
MaxSize toml.Size `toml:"max-size"`
MaxNum int `toml:"max-num"`
MaxAge int `toml:"max-age"`
CompressEnabled bool `toml:"compress-enabled"`
Path string `toml:"path"`
// contains filtered or unexported fields
}
func GetStoreLogger ¶ added in v1.0.0
func GetStoreLogger() *Logger
func (*Logger) NewLumberjackLogger ¶ added in v1.1.0
func (c *Logger) NewLumberjackLogger(fileName string) *lumberjack.Logger
func (*Logger) ShowConfigs ¶ added in v1.1.1
type MemTable ¶ added in v1.3.0
type MemTable struct {
WriteColdDuration toml.Duration `toml:"write-cold-duration"`
ForceSnapShotDuration toml.Duration `toml:"force-snapShot-duration"`
ShardMutableSizeLimit toml.Size `toml:"shard-mutable-size-limit"`
NodeMutableSizeLimit toml.Size `toml:"node-mutable-size-limit"`
MaxWriteHangTime toml.Duration `toml:"max-write-hang-time"`
MemDataReadEnabled bool `toml:"mem-data-read-enabled"`
CsDetachedFlushEnabled bool `toml:"column-store-detached-flush-enabled"`
SnapshotTblNum int `toml:"snapshot-table-number"`
FragmentsNumPerFlush int `toml:"fragments-num-per-flush"`
}
func GetMemTableConfig ¶ added in v1.3.0
func GetMemTableConfig() *MemTable
func NewMemTableConfig ¶ added in v1.3.0
func NewMemTableConfig() MemTable
type Merge ¶ added in v1.3.0
type Merge struct {
// merge only unordered data
MergeSelfOnly bool `toml:"merge-self-only"`
// The total size of unordered files to be merged each time cannot exceed MaxUnorderedFileSize
MaxUnorderedFileSize toml.Size `toml:"max-unordered-file-size"`
// The number of unordered files to be merged each time cannot exceed MaxUnorderedFileNumber
MaxUnorderedFileNumber int `toml:"max-unordered-file-number"`
MaxMergeSelfLevel uint16 `toml:"max-merge-self-level"`
MinInterval toml.Duration `toml:"min-interval"`
StreamMergeModeLevel int `toml:"stream-merge-mode-level"`
}
type Meta ¶
type Meta struct {
HTTPSEnabled bool `toml:"https-enabled"`
RetentionAutoCreate bool `toml:"retention-autocreate"`
ClusterTracing bool `toml:"cluster-tracing"`
LoggingEnabled bool `toml:"logging-enabled"`
BatchApplyCh bool `toml:"batch-enabled"`
TakeOverEnable bool `toml:"takeover-enable"`
ExpandShardsEnable bool `toml:"expand-shards-enable"`
PingFailedNode bool `toml:"ping-failed-node"`
RepairPT bool `toml:"repair-pt"`
DataDir string
WalDir string
Domain string `toml:"domain"`
Dir string `toml:"dir"`
HTTPBindAddress string `toml:"http-bind-address"`
RPCBindAddress string `toml:"rpc-bind-address"`
BindAddress string `toml:"bind-address"`
AuthEnabled bool `toml:"auth-enabled"`
HTTPSCertificate string `toml:"https-certificate"`
HTTPSPrivateKey string `toml:"https-private-key"`
HTTPSClientCertificate string `toml:"https-client-certificate"`
MaxConcurrentWriteLimit int `toml:"-"`
Version int `toml:"meta-version"`
Hostname string `toml:"hostname"`
SplitRowThreshold uint64 `toml:"split-row-threshold"`
ImbalanceFactor float64 `toml:"imbalance-factor"`
RemoteHostname string
JoinPeers []string
ElectionTimeout toml.Duration `toml:"election-timeout"`
HeartbeatTimeout toml.Duration `toml:"heartbeat-timeout"`
LeaderLeaseTimeout toml.Duration `toml:"leader-lease-timeout"`
CommitTimeout toml.Duration `toml:"commit-timeout"`
LeaseDuration toml.Duration `toml:"lease-duration"`
Logging Logger `toml:"logging"`
PtNumPerNode uint32 `toml:"ptnum-pernode"`
BalanceAlgo string `toml:"balance-algorithm-version"`
// Number of shards for each measurement in a shard group
NumOfShards int32 `toml:"num-of-shards"`
UseIncSyncData bool `toml:"inc-sync-data"`
SQLiteEnabled bool `toml:"sqlite-enabled"`
RepDisPolicy uint8 `toml:"rep-dis-policy"`
SchemaCleanEn bool `toml:"schema-clean-enable"`
AsyncSchemaEndtimeUpdateEn bool `toml:"async-schema-endtime-update-enable"`
AsyncSchemaEndtimeUpdateCache int `toml:"async-schema-endtime-update-cache"`
AsyncSchemaEndtimeUpdateConcurrency int `toml:"async-schema-endtime-update-concurrency"`
MetaEventHandleEn bool `toml:"meta-event-handle-enable"`
BindPeers []string
ReplicaColdSelectEnable bool `toml:"replica_cold_select_enable"`
ReplicaColdSelectInterval toml.Duration `toml:"replica_cold_select_interval"`
MetaRecover bool `toml:"meta_recover_enable"`
ShardGroupTimeZone string `toml:"shard-group-time-zone"`
}
Meta represents the meta configuration.
func (*Meta) CombineDomain ¶ added in v0.2.0
type Monitor ¶
type Monitor struct {
Pushers string `toml:"pushers"`
StoreEnabled bool `toml:"store-enabled"`
StoreDatabase string `toml:"store-database"`
StoreInterval toml.Duration `toml:"store-interval"`
StorePath string `toml:"store-path"`
Compress bool `toml:"compress"`
HttpsEnabled bool `toml:"https-enabled"`
HttpEndPoint string `toml:"http-endpoint"`
Username string `toml:"username"`
Password string `toml:"password"`
// contains filtered or unexported fields
}
Monitor represents the configuration for the monitor service.
func NewMonitor ¶
type MonitorMain ¶
type MonitorMain struct {
Host string `toml:"host"`
MetricPath string `toml:"metric-path"`
ErrLogPath string `toml:"error-log-path"`
Process string `toml:"process"`
DiskPath string `toml:"disk-path"`
AuxDiskPath string `toml:"aux-disk-path"`
History string `toml:"history-file"`
Compress bool `toml:"compress"`
HttpEndpoint string `toml:"http-endpoint"`
LockFilePath string `toml:"lock-file-path"`
}
type MonitorQuery ¶
type MonitorReport ¶
type MonitorReport struct {
Address string `toml:"address"`
Database string `toml:"database"`
Rp string `toml:"rp"`
RpDuration toml.Duration `toml:"rp-duration"`
Username string `toml:"username"`
Password string `toml:"password"`
HTTPSEnabled bool `toml:"https-enable"`
ReplicaN int `toml:"replicaN"`
}
type OpsMonitor ¶ added in v1.0.0
type OpsMonitor struct {
HttpAddress string `toml:"store-http-addr"`
AuthEnabled bool `toml:"auth-enabled"`
HttpsEnabled bool `toml:"store-https-enabled"`
HttpsCertificate string `toml:"store-https-certificate"`
}
func NewOpsMonitorConfig ¶ added in v1.0.0
func NewOpsMonitorConfig() *OpsMonitor
type ParquetTaskConfig ¶ added in v1.4.0
type ParquetTaskConfig struct {
Enabled bool `toml:"enabled"`
// the level of the TSSP file to be converted to a Parquet. 0: not convert
TSSPToParquetLevel uint16 `toml:"tssp-to-parquet-level"`
// group length of parquet file
MaxRowGroupLen int `toml:"max-group-len"`
// Page size of parquet file
PageSize int `toml:"page-size"`
// parquet writer batch size
WriteBatchSize int `toml:"write-batch-size"`
ItrBatchSize uint64 `toml:"itr-batch-size"`
DictCompressEnable uint64 `toml:"dict-compress-enable"`
CompressAlg uint64 `toml:"compress-alg"`
EnableMst []string `toml:"enable-mst"`
MaxStatsSize int64 `toml:"max-stats-size"`
OutputDir string `toml:"output-dir"`
ReliabilityLogDir string `toml:"reliability-log-dir"`
// contains filtered or unexported fields
}
Config represents a configuration for the parquet task.
func NewParquetTaskConfig ¶ added in v1.4.0
func NewParquetTaskConfig() *ParquetTaskConfig
func (*ParquetTaskConfig) GetEnableMst ¶ added in v1.5.0
func (c *ParquetTaskConfig) GetEnableMst() []string
func (*ParquetTaskConfig) GetOutputDir ¶ added in v1.4.0
func (c *ParquetTaskConfig) GetOutputDir() string
func (*ParquetTaskConfig) GetReliabilityLogDir ¶ added in v1.4.0
func (c *ParquetTaskConfig) GetReliabilityLogDir() string
func (*ParquetTaskConfig) SetEnableMst ¶ added in v1.5.0
func (c *ParquetTaskConfig) SetEnableMst(mst []string)
type ProductType ¶ added in v1.2.0
type ProductType uint8
const ( Basic ProductType = iota LogKeeper // the log service of CSS )
func GetProductType ¶ added in v1.2.0
func GetProductType() ProductType
type RaftStorage ¶ added in v1.4.0
func NewRaftStorageConfig ¶ added in v1.4.0
func NewRaftStorageConfig() RaftStorage
type ReadCache ¶ added in v1.3.0
type ReadCache struct {
ReadPageSize string `toml:"read-page-size"`
ReadMetaPageSize []string `toml:"read-meta-page-size"`
ReadMetaCacheEn toml.Size `toml:"enable-meta-cache"`
ReadMetaCacheEnPct toml.Size `toml:"read-meta-cache-limit-pct"`
ReadDataCacheEn toml.Size `toml:"enable-data-cache"`
ReadDataCacheEnPct toml.Size `toml:"read-data-cache-limit-pct"`
}
func NewReadCacheConfig ¶ added in v1.3.0
func NewReadCacheConfig() ReadCache
type RecordWriteConfig ¶ added in v1.4.0
type RecordWriteConfig struct {
Enabled bool `toml:"enabled"`
AuthEnabled bool `toml:"auth-enabled"`
ShelfMode bool `toml:"shelf-mode"`
TLS tlsConfig `toml:"TLS"`
RPCAddress string `toml:"rpc-address"`
MaxRecvMsgSize int `toml:"max-message-size"`
}
func NewRecordWriteConfig ¶ added in v1.4.0
func NewRecordWriteConfig() RecordWriteConfig
func (RecordWriteConfig) ShowConfigs ¶ added in v1.4.0
func (c RecordWriteConfig) ShowConfigs() map[string]interface{}
func (RecordWriteConfig) Validate ¶ added in v1.4.0
func (c RecordWriteConfig) Validate() error
type RuntimeConfig ¶ added in v1.4.0
type RuntimeConfig struct {
Enabled bool `toml:"enabled"`
// How often to check runtime config file.
ReloadPeriod toml.Duration `toml:"reload-period"`
// LoadPath contains the path to the runtime config file;set empty if runtimecfg is not required.
LoadPath string `toml:"load-path"`
}
func NewRuntimeConfig ¶ added in v1.4.0
func NewRuntimeConfig() RuntimeConfig
func (RuntimeConfig) Validate ¶ added in v1.4.0
func (c RuntimeConfig) Validate() error
Validate the runtimecfg config and returns an error if the validation doesn't pass
type SFS ¶ added in v1.2.0
type SFS struct {
DataDir string `toml:"store-data-dir"`
}
func GetSFSSConfig ¶ added in v1.2.0
func GetSFSSConfig() *SFS
func (*SFS) GetDataDir ¶ added in v1.2.0
type SelectSpecConfig ¶ added in v1.1.0
type SelectSpecConfig struct {
EnableWhenExceed bool `toml:"enable-query-when-exceed"`
QuerySeriesLimit int `toml:"query-series-limit"`
QuerySchemaLimit int `toml:"query-schema-limit"`
}
func NewSelectSpecConfig ¶ added in v1.1.0
func NewSelectSpecConfig() SelectSpecConfig
type ShardMergeConfig ¶ added in v1.5.0
type ShardMergeConfig struct {
// If false, close shard merge service
Enabled bool `toml:"enabled"`
// Interval time for checking merge shard
RunInterval toml.Duration `toml:"run-interval"`
}
Config represents a configuration for the hierarchical storage service.
func NewShardMergeConfig ¶ added in v1.5.0
func NewShardMergeConfig() ShardMergeConfig
func (ShardMergeConfig) Validate ¶ added in v1.5.0
func (c ShardMergeConfig) Validate() error
type ShelfMode ¶ added in v1.4.0
type ShelfMode struct {
Enabled bool `toml:"enabled"`
// Reliability requirements for data writing
// 1: Low reliability. Data may be lost due to process faults.
// 2: Medium reliability. Data may be lost due to container or VM faults. (default)
// 3: High reliability. Data may be lost when a storage medium is faulty.
ReliabilityLevel int `toml:"reliability-level"`
MaxWalFileSize toml.Size `toml:"max-wal-file-size"`
MaxWalDuration toml.Duration `toml:"max-wal-duration"`
// WAL data compression mode. 0: not compressed; 1: LZ4 (default); 2: Snappy
WalCompressMode int `toml:"wal-compress-mode"`
// number of background write threads. default value is CPUNum
Concurrent int `toml:"concurrent"`
// Limit the number of WAL files (waiting to be converted + being converted + being written)
// to prevent disk space from being fully occupied, which could cause node failure.
// It is not a strict constraint; the actual number may exceed this limit.
// Default value is concurrent*8
MaxNumOfWal int `toml:"max-num-of-wal"`
// by default, the table is grouped based on the hash value of the measurement name
// If this parameter is set to a value greater than 1,
// secondary grouping is performed based on the hash value of the series key
SeriesHashFactor int `toml:"series-hash-factor"`
// max number of concurrent WAL files to be converted to SSP files.
// default value is the same as Concurrent
TSSPConvertConcurrent int `toml:"tssp-convert-concurrent"`
// max number of concurrent conversions from a single WAL file to TSSP file
// default value is max(1, TSSPConvertConcurrent/4)
OneTSSPConvertConcurrent int `toml:"one-tssp-convert-concurrent"`
// Compress the series key in memory to save memory and reduce the risk of OOM
// When the memory used by the series key exceeds this value, compression is enabled.
// 0 means never compress
SeriesKeyCompressThreshold toml.Size `toml:"series-key-compress-threshold"`
}
func GetShelfMode ¶ added in v1.5.0
func GetShelfMode() *ShelfMode
type SherlockConfig ¶ added in v1.0.0
type SherlockConfig struct {
SherlockEnable bool `toml:"sherlock-enable"`
CollectInterval toml.Duration `toml:"collect-interval"`
CPUMaxPercent toml.Size `toml:"cpu-max-percent"`
DumpPath string `toml:"dump-path"`
MaxNum int `toml:"max-num"`
MaxAge int `toml:"max-age"`
CPUConfig typeConfig `toml:"cpu"`
MemoryConfig typeConfig `toml:"memory"`
GoroutineConfig typeConfig `toml:"goroutine"`
}
func NewSherlockConfig ¶ added in v1.0.0
func NewSherlockConfig() *SherlockConfig
func (*SherlockConfig) ApplyEnvOverrides ¶ added in v1.0.0
func (c *SherlockConfig) ApplyEnvOverrides(_ func(string) string) error
func (*SherlockConfig) Validate ¶ added in v1.0.0
func (c *SherlockConfig) Validate() error
type Spdy ¶
type Spdy struct {
ByteBufferPoolDefaultSize uint64
RecvWindowSize int `toml:"recv-window-size"`
ConcurrentAcceptSession int `toml:"concurrent-accept-session"`
ConnPoolSize int `toml:"conn-pool-size"`
OpenSessionTimeout toml.Duration `toml:"open-session-timeout"`
SessionSelectTimeout toml.Duration `toml:"session-select-timeout"`
TCPDialTimeout toml.Duration `toml:"tcp-dial-timeout"`
DataAckTimeout toml.Duration `toml:"data-ack-timeout"`
CompressEnable bool `toml:"compress-enable"`
TLSEnable bool `toml:"tls-enable"`
TLSClientAuth bool `toml:"tls-client-auth"`
TLSInsecureSkipVerify bool `toml:"tls-insecure-skip-verify"`
TLSCertificate string `toml:"tls-certificate"`
TLSPrivateKey string `toml:"tls-private-key"`
TLSClientCertificate string `toml:"tls-client-certificate"`
TLSClientPrivateKey string `toml:"tls-client-private-key"`
TLSCARoot string `toml:"tls-ca-root"`
TLSServerName string `toml:"tls-server-name"`
}
func (*Spdy) GetOpenSessionTimeout ¶
func (*Spdy) GetSessionSelectTimeout ¶
func (*Spdy) GetTCPDialTimeout ¶
func (*Spdy) ShowConfigs ¶ added in v1.1.1
type Store ¶
type Store struct {
IngesterAddress string `toml:"store-ingest-addr"`
SelectAddress string `toml:"store-select-addr"`
Domain string `toml:"domain"`
TLS *tls.Config `toml:"-"`
DataDir string `toml:"store-data-dir"`
WALDir string `toml:"store-wal-dir"`
MetaDir string `toml:"store-meta-dir"`
Engine string `toml:"engine-type"`
Index string `toml:"index-version"`
OpsMonitor *OpsMonitor `toml:"ops-monitor"`
// configs for compact
Compact Compact `toml:"compact"`
// configs for memTable
MemTable MemTable `toml:"memtable"`
// configs for wal
Wal Wal `toml:"wal"`
// configs for raftStorage
RaftStorage RaftStorage `toml:"raft-storage"`
// configs for readCache
ReadCache ReadCache `toml:"readcache"`
EnableMmapRead bool `toml:"enable-mmap-read"`
Readonly bool `toml:"readonly"`
WriteConcurrentLimit int `toml:"write-concurrent-limit"`
OpenShardLimit int `toml:"open-shard-limit"`
MaxSeriesPerDatabase int `toml:"max-series-per-database"`
DownSampleWriteDrop bool `toml:"downsample-write-drop"`
ShardMoveLayoutSwitchEnabled bool `toml:"shard-move-layout-switch"`
//parallelism allocator
MaxWaitResourceTime toml.Duration `toml:"max-wait-resource-time"`
MaxSeriesParallelismNum int `toml:"max-series-parallelism-num"`
MaxShardsParallelismNum int `toml:"max-shards-parallelism-num"`
ChunkReaderThreshold int `toml:"chunk-reader-threshold"`
MinChunkReaderConcurrency int `toml:"min-chunk-reader-concurrency"`
MinShardsConcurrency int `toml:"min-shards-concurrency"`
MaxDownSampleTaskConcurrency int `toml:"max-downsample-task-concurrency"`
// for query
EnableQueryFileHandleCache bool `toml:"enable_query_file_handle_cache"`
MaxQueryCachedFileHandles uint32 `toml:"max_query_cached_file_handles"`
// config for lazy load shard
LazyLoadShardEnable bool `toml:"lazy-load-shard-enable"`
ThermalShardStartDuration toml.Duration `toml:"thermal-shard-start-duration"`
ThermalShardEndDuration toml.Duration `toml:"thermal-shard-end-duration"`
// for auto interrupt query
InterruptQuery bool `toml:"interrupt-query"`
InterruptSqlMemPct int `toml:"interrupt-sql-mem-pct"`
ProactiveMgrInterval toml.Duration `toml:"proactive-manager-interval"`
TemporaryIndexCompressMode int `toml:"temporary-index-compress-mode"`
ChunkMetaCompressMode int `toml:"chunk-meta-compress-mode"`
IndexReadCachePersistent bool `toml:"index-read-cache-persistent"`
FloatCompressAlgorithm string `toml:"float-compress-algorithm"`
StringCompressAlgo string `toml:"string-compress-algo"`
// Ordered data and unordered data are not distinguished. All data is processed as unordered data.
UnorderedOnly bool `toml:"unordered-only"`
Merge Merge `toml:"merge"`
MaxRowsPerSegment int `toml:"max-rows-per-segment"`
// in some scenarios, it is allowed to write past time but ordered data(for examle, some scenarios allow to write the past 14 days data in order)
EnableWriteHistoryOrderedData bool `toml:"enable-write-history-ordered-data"`
// for hierarchical storage
SkipRegisterColdShard bool `toml:"skip-register-cold-shard"`
AvailabilityZone string `toml:"availability-zone"`
ClearEntryLogTolerateTime toml.Duration `toml:"clear-entryLog-tolerate-time"`
ClearEntryLogTolerateSize toml.Size `toml:"clear-entryLog-tolerate-size"`
ParquetTask *ParquetTaskConfig `toml:"parquet-task"`
// configs for consume
Consume Consume `toml:"consume"`
Fence Fence `toml:"fence"`
HotMode HotMode `toml:"hot-mode"`
ColumnStore ColumnStore `toml:"column-store"`
RaftMsgTimeout toml.Duration `toml:"raft-msg-time-out"`
ElectionTick int `toml:"election-tick"`
HeartbeatTick int `toml:"heartbeat-tick"`
RaftMsgCacheSize int `toml:"raft-msg-cache-size"`
FileWrapSize int `toml:"file-wrap-size"`
WaitCommitTimeout toml.Duration `toml:"wait-commit-time-out"`
EnablePerlRegrep bool `toml:"enable-perl-regrep"`
RetryPtCheckTime int `toml:"retry-pt-check-time"`
EntryFileRWType int `toml:"entry-file-rw-type"`
NagtPoolCap int `toml:"nagt-pool-cap"`
DropSeriesPeriod toml.Duration `toml:"drop-series-period"`
}
Store is the configuration for the engine.
func GetStoreConfig ¶ added in v1.2.0
func GetStoreConfig() *Store
func (*Store) CorrectorThroughput ¶ added in v1.1.0
func (*Store) InsertAddr ¶ added in v0.2.0
func (*Store) SelectAddr ¶ added in v0.2.0
func (Store) ValidateEngine ¶
type Subscriber ¶ added in v1.1.0
type Subscriber struct {
Enabled bool `toml:"enabled"`
HTTPTimeout toml.Duration `toml:"http-timeout"`
InsecureSkipVerify bool `toml:"insecure-skip-verify"`
HttpsCertificate string `toml:"https-certificate"`
WriteBufferSize int `toml:"write-buffer-size"`
WriteConcurrency int `toml:"write-concurrency"`
}
func NewSubscriber ¶ added in v1.1.0
func NewSubscriber() Subscriber
func (*Subscriber) ShowConfigs ¶ added in v1.1.1
func (c *Subscriber) ShowConfigs() map[string]interface{}
func (Subscriber) Validate ¶ added in v1.1.0
func (s Subscriber) Validate() error
type TSMeta ¶
type TSMeta struct {
Common *Common `toml:"common"`
Meta *Meta `toml:"meta"`
Data Store `toml:"data"`
Logging Logger `toml:"logging"`
Monitor Monitor `toml:"monitor"`
Gossip *Gossip `toml:"gossip"`
Spdy Spdy `toml:"spdy"`
// TLS provides configuration options for all https endpoints.
TLS tlsconfig.Config `toml:"tls"`
Sherlock *SherlockConfig `toml:"sherlock"`
IODetector *iodetector.Config `toml:"io-detector"`
}
TSMeta represents the configuration format for the ts-meta binary.
func (*TSMeta) ApplyEnvOverrides ¶
ApplyEnvOverrides apply the environment configuration on top of the config.
func (*TSMeta) GetLogStoreConfig ¶ added in v1.3.0
func (c *TSMeta) GetLogStoreConfig() *LogStoreConfig
func (*TSMeta) GetLogging ¶
func (*TSMeta) ShowConfigs ¶ added in v1.1.1
type TSMonitor ¶
type TSMonitor struct {
MonitorConfig MonitorMain `toml:"monitor"`
QueryConfig MonitorQuery `toml:"query"`
ReportConfig MonitorReport `toml:"report"`
Logging Logger `toml:"logging"`
}
TSMonitor represents the configuration format for the ts-meta binary.
func NewTSMonitor ¶
func NewTSMonitor() *TSMonitor
NewTSMonitor returns an instance of Config with reasonable defaults.
func (*TSMonitor) ApplyEnvOverrides ¶
ApplyEnvOverrides apply the environment configuration on top of the config.
func (*TSMonitor) GetLogStoreConfig ¶ added in v1.3.0
func (c *TSMonitor) GetLogStoreConfig() *LogStoreConfig
func (*TSMonitor) GetLogging ¶
func (*TSMonitor) ShowConfigs ¶ added in v1.1.1
type TSSql ¶
type TSSql struct {
Common *Common `toml:"common"`
Meta *Meta `toml:"meta"`
Coordinator Coordinator `toml:"coordinator"`
Monitor Monitor `toml:"monitor"`
Logging Logger `toml:"logging"`
Gossip *Gossip `toml:"gossip"`
Spdy Spdy `toml:"spdy"`
HTTP httpdConfig.Config `toml:"http"`
// TLS provides configuration options for all https endpoints.
TLS tlsconfig.Config `toml:"tls"`
Analysis Castor `toml:"castor"`
Sherlock *SherlockConfig `toml:"sherlock"`
SelectSpec SelectSpecConfig `toml:"spec-limit"`
Subscriber Subscriber `toml:"subscriber"`
ContinuousQuery ContinuousQueryConfig `toml:"continuous_queries"`
Data Store `toml:"data"`
ShelfMode ShelfMode `toml:"shelf-mode"`
Limits Limits `toml:"limits"`
RuntimeConfig RuntimeConfig `toml:"runtime-config"`
RecordWrite RecordWriteConfig `toml:"record-write"`
Topo Topo `toml:"topo"`
}
TSSql represents the configuration format for the TSSql binary.
func (*TSSql) ApplyEnvOverrides ¶
ApplyEnvOverrides apply the environment configuration on top of the config.
func (*TSSql) GetLogStoreConfig ¶ added in v1.3.0
func (c *TSSql) GetLogStoreConfig() *LogStoreConfig
func (*TSSql) GetLogging ¶
func (*TSSql) ShowConfigs ¶ added in v1.1.1
type TSStore ¶
type TSStore struct {
Common *Common `toml:"common"`
Data Store `toml:"data"`
Coordinator Coordinator `toml:"coordinator"`
Monitor Monitor `toml:"monitor"`
Logging Logger `toml:"logging"`
Gossip *Gossip `toml:"gossip"`
Spdy Spdy `toml:"spdy"`
ShelfMode ShelfMode `toml:"shelf-mode"`
HTTPD httpdConf.Config `toml:"http"`
Retention retention.Config `toml:"retention"`
DownSample retention.Config `toml:"downsample"`
HierarchicalStore HierarchicalConfig `toml:"hierarchical_storage"`
Stream stream.Config `toml:"stream"`
// TLS provides configuration options for all https endpoints.
TLS tlsconfig.Config `toml:"tls"`
Analysis Castor `toml:"castor"`
Sherlock *SherlockConfig `toml:"sherlock"`
IODetector *iodetector.Config `toml:"io-detector"`
Meta *Meta `toml:"meta"`
ClvConfig *ClvConfig `toml:"clv_config"`
SelectSpec SelectSpecConfig `toml:"spec-limit"`
// index
Index *Index `toml:"index"`
// last row cache
LastRowCache *LastRowCacheConfig `toml:"last_row_cache"`
// logkeeper config
LogStore *LogStoreConfig `toml:"logstore"`
ShardMerge ShardMergeConfig `toml:"shardMerge_service"`
ObsMode Obs `toml:"Obs"`
}
TSStore represents the configuration format for the ts-store binary.
func NewTSStore ¶
NewTSStore returns an instance of Config with reasonable defaults.
func (*TSStore) ApplyEnvOverrides ¶
ApplyEnvOverrides apply the environment configuration on top of the config.
func (*TSStore) GetLogStoreConfig ¶ added in v1.3.0
func (c *TSStore) GetLogStoreConfig() *LogStoreConfig
func (*TSStore) GetLogging ¶
func (*TSStore) ShowConfigs ¶ added in v1.1.1
type TsRecover ¶ added in v1.3.0
type TsRecover struct {
Data Store `toml:"data"`
}
func NewTsRecover ¶ added in v1.3.0
func NewTsRecover() *TsRecover
func (*TsRecover) ApplyEnvOverrides ¶ added in v1.3.0
ApplyEnvOverrides apply the environment configuration on top of the config.
func (*TsRecover) GetLogStoreConfig ¶ added in v1.3.0
func (c *TsRecover) GetLogStoreConfig() *LogStoreConfig
func (*TsRecover) GetLogging ¶ added in v1.3.0
func (*TsRecover) ShowConfigs ¶ added in v1.3.0
type Wal ¶ added in v1.3.0
type Wal struct {
WalSyncInterval toml.Duration `toml:"wal-sync-interval"`
WalEnabled bool `toml:"wal-enabled"`
WalReplayParallel bool `toml:"wal-replay-parallel"`
WalReplayAsync bool `toml:"wal-replay-async"`
WalUsedForStream bool `toml:"wal-used-for-stream"`
WalReplayBatchSize toml.Size `toml:"wal-replay-batch-size"`
}
func NewWalConfig ¶ added in v1.3.0
func NewWalConfig() Wal
Source Files
¶
- castor.go
- cert_validate.go
- compact.go
- config.go
- consume.go
- continuousquery.go
- fence.go
- forward.go
- ha_policy.go
- hierarchical.go
- index.go
- last_row_cache.go
- limits.go
- logger.go
- logstore.go
- memtable.go
- meta.go
- monitor.go
- obs.go
- openGemini_dir.go
- parquet_task.go
- raft_storage.go
- readcache.go
- recordwrite.go
- recover.go
- runtimeconfig.go
- select_spec.go
- sfs.go
- shard_merge.go
- sherlock.go
- spdy.go
- sql.go
- store.go
- store_raft.go
- subscriber.go
- topo.go
- validator.go
- wal.go