Documentation
¶
Index ¶
- Constants
- func DetectLogFormat(line string) string
- type Chunk
- type Chunks
- type Config
- type DedupingTokenizer
- type Drain
- func (d *Drain) Clusters() []*LogCluster
- func (d *Drain) Delete(cluster *LogCluster)
- func (d *Drain) Prune()
- func (d *Drain) Train(content string, ts int64) *LogCluster
- func (d *Drain) TrainPattern(content string, samples []*logproto.PatternSample) *LogCluster
- func (d *Drain) TrainTokens(tokens []string, stringer func([]string) string, ts int64) *LogCluster
- type Limits
- type LineTokenizer
- type LogCluster
- type LogClusterCache
- type Metrics
- type Node
Constants ¶
View Source
const ( FormatLogfmt = "logfmt" FormatJSON = "json" FormatUnknown = "unknown" TooFewTokens = "too_few_tokens" TooManyTokens = "too_many_tokens" LineTooLong = "line_too_long" )
Variables ¶
This section is empty.
Functions ¶
func DetectLogFormat ¶ added in v3.2.0
DetectLogFormat guesses at how the logs are encoded based on some simple heuristics. It only runs on the first log line when a new stream is created, so it could do some more complex parsing or regex.
Types ¶
type Chunk ¶
type Chunk struct {
Samples []logproto.PatternSample
}
type Config ¶
type Config struct {
LogClusterDepth int
SimTh float64
MaxChildren int
ExtraDelimiters []string
MaxClusters int
ParamString string
MaxEvictionRatio float64
MaxAllowedLineLength int
// contains filtered or unexported fields
}
func DefaultConfig ¶
func DefaultConfig() *Config
type DedupingTokenizer ¶ added in v3.2.0
type DedupingTokenizer struct {
LineTokenizer
// contains filtered or unexported fields
}
func (DedupingTokenizer) Join ¶ added in v3.2.0
func (d DedupingTokenizer) Join(tokens []string, state interface{}) string
type Drain ¶
type Drain struct {
// contains filtered or unexported fields
}
func (*Drain) Clusters ¶
func (d *Drain) Clusters() []*LogCluster
func (*Drain) Delete ¶
func (d *Drain) Delete(cluster *LogCluster)
func (*Drain) TrainPattern ¶
func (d *Drain) TrainPattern(content string, samples []*logproto.PatternSample) *LogCluster
func (*Drain) TrainTokens ¶
type LineTokenizer ¶ added in v3.1.0
type LineTokenizer interface {
Tokenize(line string, tokens []string, state interface{}, linesDropped *prometheus.CounterVec) ([]string, interface{})
Join(tokens []string, state interface{}) string
Clone(tokens []string, state interface{}) ([]string, interface{})
}
type LogCluster ¶
type LogCluster struct {
Size int
Tokens []string
TokenState interface{}
Stringer func([]string, interface{}) string
Chunks Chunks
// contains filtered or unexported fields
}
func (*LogCluster) Iterator ¶
func (c *LogCluster) Iterator(from, through, step model.Time) iter.Iterator
func (*LogCluster) Prune ¶
func (c *LogCluster) Prune(olderThan time.Duration)
func (*LogCluster) Samples ¶
func (c *LogCluster) Samples() []*logproto.PatternSample
func (*LogCluster) String ¶
func (c *LogCluster) String() string
type LogClusterCache ¶
type LogClusterCache struct {
// contains filtered or unexported fields
}
func (*LogClusterCache) Get ¶
func (c *LogClusterCache) Get(key int) *LogCluster
func (*LogClusterCache) Set ¶
func (c *LogClusterCache) Set(key int, cluster *LogCluster)
func (*LogClusterCache) Values ¶
func (c *LogClusterCache) Values() []*LogCluster
type Metrics ¶ added in v3.1.0
type Metrics struct {
PatternsEvictedTotal prometheus.Counter
PatternsPrunedTotal prometheus.Counter
PatternsDetectedTotal prometheus.Counter
LinesSkipped *prometheus.CounterVec
TokensPerLine prometheus.Observer
StatePerLine prometheus.Observer
}
Click to show internal directories.
Click to hide internal directories.