Documentation
¶
Index ¶
Constants ¶
const ( FormatLogfmt = "logfmt" FormatJSON = "json" FormatUnknown = "unknown" TooFewTokens = "too_few_tokens" TooManyTokens = "too_many_tokens" LineTooLong = "line_too_long" )
Variables ¶
This section is empty.
Functions ¶
func DetectLogFormat ¶ added in v3.2.0
DetectLogFormat guesses at how the logs are encoded based on some simple heuristics. It only runs on the first log line when a new stream is created, so it could do some more complex parsing or regex.
Types ¶
type Chunk ¶
type Chunk struct {
Samples []logproto.PatternSample
}
func (Chunk) ForRange ¶
func (c Chunk) ForRange(start, end, step, sampleInterval model.Time) []logproto.PatternSample
ForRange returns samples with only the values in the given range [start:end) and aggregates them by step duration. start and end are in milliseconds since epoch. step is a duration in milliseconds. sampleInterval is the configured sample interval for this chunk.
type Chunks ¶
type Chunks []Chunk
func (*Chunks) Add ¶
func (c *Chunks) Add(ts model.Time, maxChunkAge time.Duration, sampleInterval time.Duration) *logproto.PatternSample
Add records the sample by incrementing the value of the current sample or creating a new sample if past the time resolution of the current one. Returns the previous sample if a new sample was created, nil otherwise.
type Config ¶
type Config struct {
LogClusterDepth int
SimTh float64
MaxChildren int
ExtraDelimiters []string
MaxClusters int
ParamString string
MaxEvictionRatio float64
MaxAllowedLineLength int
MaxChunkAge time.Duration
SampleInterval time.Duration
// contains filtered or unexported fields
}
func DefaultConfig ¶
func DefaultConfig() *Config
type DedupingTokenizer ¶ added in v3.2.0
type DedupingTokenizer struct {
LineTokenizer
// contains filtered or unexported fields
}
func (DedupingTokenizer) Join ¶ added in v3.2.0
func (d DedupingTokenizer) Join(tokens []string, state interface{}) string
type Drain ¶
type Drain struct {
// contains filtered or unexported fields
}
func (*Drain) Clusters ¶
func (d *Drain) Clusters() []*LogCluster
func (*Drain) Delete ¶
func (d *Drain) Delete(cluster *LogCluster)
type LineTokenizer ¶ added in v3.1.0
type LineTokenizer interface {
Tokenize(line string, tokens []string, state interface{}, linesDropped *prometheus.CounterVec) ([]string, interface{})
Join(tokens []string, state interface{}) string
Clone(tokens []string, state interface{}) ([]string, interface{})
}
type LogCluster ¶
type LogCluster struct {
Size int
Tokens []string
TokenState interface{}
Stringer func([]string, interface{}) string
Volume int64
SampleCount int64
Chunks Chunks
// contains filtered or unexported fields
}
func (*LogCluster) Prune ¶
func (c *LogCluster) Prune(olderThan time.Duration) []*logproto.PatternSample
func (*LogCluster) Samples ¶
func (c *LogCluster) Samples() []*logproto.PatternSample
func (*LogCluster) String ¶
func (c *LogCluster) String() string
type LogClusterCache ¶
type LogClusterCache struct {
// contains filtered or unexported fields
}
func (*LogClusterCache) Get ¶
func (c *LogClusterCache) Get(key int) *LogCluster
func (*LogClusterCache) Set ¶
func (c *LogClusterCache) Set(key int, cluster *LogCluster)
func (*LogClusterCache) Values ¶
func (c *LogClusterCache) Values() []*LogCluster
type Metrics ¶ added in v3.1.0
type Metrics struct {
PatternsEvictedTotal prometheus.Counter
PatternsPrunedTotal prometheus.Counter
PatternsDetectedTotal prometheus.Counter
LinesSkipped *prometheus.CounterVec
TokensPerLine prometheus.Observer
StatePerLine prometheus.Observer
}