metric

package
v0.65.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 22, 2026 License: Apache-2.0 Imports: 3 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	SearchOverall = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "search",
		Name:      "total",
		Help:      "Number of search requests ingestor started to process",
	})
	SearchColdTotal = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "search",
		Name:      "cold_total",
		Help:      "Number of search requests sent to cold stores",
	})
	SearchColdErrors = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "search",
		Name:      "cold_errors_total",
		Help:      "Number of errors in cold search requests",
	})
	SearchErrors = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "search",
		Name:      "errors_total",
		Help:      "Number of search requests completed with error",
	})
	IngestorPanics = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "common",
		Name:      "panics_total",
		Help:      "Number of panics in ingestor",
	})

	SearchPartial = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "search",
		Name:      "partial_total",
		Help:      "Number of searches ending with partial response",
	})

	IngestorBulkDocProvideDurationSeconds = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "bulk",
		Name:      "doc_provide_duration_seconds",
		Help:      "Bulk processing time (parsing, tokenization, extracting meta, compressing docs and meta) by ingestor",
		Buckets:   SecondsBuckets,
	})
	IngestorBulkSendAttemptDurationSeconds = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "bulk",
		Name:      "send_attempt_duration_seconds",
		Help:      "Time spent sending a bulk to stores successfully in seconds",
		Buckets:   SecondsBuckets,
	})
	IngestorBulkAttemptErrorDurationSeconds = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "bulk",
		Name:      "attempt_error_duration_seconds",
		Help:      "Time spent before error occurred in bulk send attempts in seconds",
		Buckets:   SecondsBuckets,
	})
	IngestorBulkSkipCold = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "bulk",
		Name:      "skip_cold_total",
		Help:      "Number of bulk requests where sending to cold storage was skipped since it was already stored in cold at the previous attempt",
	})
	IngestorBulkSkipShard = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "bulk",
		Name:      "skip_shard_total",
		Help:      "Number of replicas skipped when choosing replicas to send a bulk since the replica was chosen for storage already",
	})
	BulkErrors = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "bulk",
		Name:      "errors_total",
		Help:      "Number of errors in bulk processing",
	})

	DocumentsFetched = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "fetch",
		Name:      "fetched_docs",
		Help:      "Number of documents returned by the Fetch method",
		Buckets:   prometheus.ExponentialBuckets(1, 2, 16),
	})
	DocumentsRequested = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "fetch",
		Name:      "requested_docs",
		Help:      "Number of documents requested using the Fetch method",
		Buckets:   prometheus.ExponentialBuckets(1, 3, 16),
	})
	FetchErrors = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "fetch",
		Name:      "errors_total",
		Help:      "Number of errors in fetch requests",
	})
	FetchNotFoundError = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "fetch",
		Name:      "not_found_errors",
		Help:      "Number of documents not found per fetch request",
		Buckets:   prometheus.ExponentialBuckets(1, 3, 16),
	})
	FetchDuplicateErrors = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "fetch",
		Name:      "duplicate_errors",
		Help:      "Number of duplicate document errors per fetch request",
		Buckets:   prometheus.ExponentialBuckets(1, 3, 16),
	})
	RateLimiterSize = promauto.NewGauge(prometheus.GaugeOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "ratelimiter",
		Name:      "map_size",
		Help:      "Size of internal map of rate limiter",
	})

	CircuitBreakerSuccess = promauto.NewCounterVec(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "circuit_breaker",
		Name:      "success_total",
		Help:      "Count of each time `Execute` does not return an error",
	}, []string{"name"})
	CircuitBreakerErr = promauto.NewCounterVec(prometheus.CounterOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "circuit_breaker",
		Name:      "errors_total",
		Help:      "The number of errors that have occurred in the circuit breaker",
	}, []string{"name", "kind"})
	CircuitBreakerState = promauto.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "circuit_breaker",
		Name:      "state",
		Help:      "The state of the circuit breaker",
	}, []string{"name"})

	ExportDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "export",
		Name:      "duration_seconds",
		Help:      "Time taken to export data by protocol in seconds",
		Buckets:   SecondsBucketsDoublePrecision,
	}, []string{"protocol"})
	ExportSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "export",
		Name:      "size_bytes",
		Help:      "Size of exported data by protocol in bytes",
		Buckets:   prometheus.ExponentialBuckets(10, 3, 20),
	}, []string{"protocol"})
	CurrentExportersCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "export",
		Name:      "current_exporters_in_progress",
		Help:      "Current number of active exporters in progress by protocol",
	}, []string{"protocol"})

	TokenizerTokensPerMessage = promauto.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "tokenizer",
		Name:      "tokens_per_message",
		Help:      "Number of tokens extracted per message by tokenizer type",
		Buckets:   prometheus.ExponentialBuckets(1, 2, 16),
	}, []string{"tokenizer"})
	TokenizerParseDurationSeconds = promauto.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "tokenizer",
		Name:      "parse_duration_seconds",
		Help:      "Time taken to parse and tokenize messages in seconds by tokenizer type",
		Buckets:   SecondsBuckets,
	}, []string{"tokenizer"})
	TokenizerIncomingTextLen = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_ingestor",
		Subsystem: "tokenizer",
		Name:      "incoming_text_len_bytes",
		Help:      "Length of incoming text for tokenization in bytes",
		Buckets:   prometheus.ExponentialBuckets(1, 2, 16),
	})
)
View Source
var (
	Version = promauto.NewCounterVec(prometheus.CounterOpts{
		Namespace: "seq_db",
		Name:      "version_starts_total",
		Help:      "Number of seq-db instances started with a particular version",
	},
		[]string{"version"})

	RepetitionsDocsTotal = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db",
		Subsystem: "merge",
		Name:      "repetitions_docs_total",
		Help:      "Number of doc repetitions observed while merging QPRs",
	})

	CountersTotal = promauto.NewCounterVec(prometheus.CounterOpts{
		Namespace: "seq_db",
		Subsystem: "common",
		Name:      "counters_total",
		Help:      "Counter for various events",
	}, []string{"name"})

	// SecondsBuckets covers range from 1ms to 177s.
	SecondsBuckets = prometheus.ExponentialBuckets(0.001, 3, 12)
	// SecondsBucketsDoublePrecision covers range from 1ms to 4hrs.
	SecondsBucketsDoublePrecision = prometheus.ExponentialBuckets(0.001, 3, 16)
	// SecondsBucketsDelay covers range from 1min to 2048min.
	SecondsBucketsDelay = prometheus.ExponentialBuckets(60, 2, 12)
	// SecondsRanges covers range from 1min to 47day.
	SecondsRanges = prometheus.ExponentialBuckets(60, 2.1, 16)
)
View Source
var (
	BulkDurationSeconds = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "bulk",
		Name:      "duration_seconds",
		Help:      "Bulk processing time",
		Buckets:   SecondsBuckets,
	})
	BulkDuplicateDocsTotal = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "bulk",
		Name:      "duplicate_docs",
		Help:      "Number of duplicate documents found in active fraction in bulk requests",
		Buckets:   prometheus.ExponentialBuckets(1, 4, 16),
	})
	BulkDocsTotal = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "bulk",
		Name:      "docs",
		Help:      "Number of documents in bulk request",
		Buckets:   prometheus.ExponentialBuckets(1, 4, 16),
	})
	BulkDocBytesTotal = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "bulk",
		Name:      "doc_bytes",
		Help:      "Byte size of document in bulk requests",
		Buckets:   prometheus.ExponentialBuckets(1, 4, 16),
	})
	BulkMetaBytesTotal = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "bulk",
		Name:      "meta_bytes",
		Help:      "Size of metadata in bulk requests in bytes",
		Buckets:   prometheus.ExponentialBuckets(1, 4, 16),
	})

	SearchInFlightQueriesTotal = promauto.NewGauge(prometheus.GaugeOpts{
		Namespace: "seq_db_store",
		Subsystem: "search",
		Name:      "in_flight",
		Help:      "Current number of search requests being processed",
	})
	RejectedRequests = promauto.NewCounterVec(prometheus.CounterOpts{
		Namespace: "seq_db_store",
		Name:      "rejected_requests_total",
		Help:      "Number of rejected requests by method and reason",
	}, []string{"method", "type"})
	SearchDurationSeconds = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "search",
		Name:      "duration_seconds",
		Help:      "Search request duration time (only successful searches)",
		Buckets:   SecondsBuckets,
	})

	SearchRangesSeconds = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "search",
		Name:      "ranges_seconds",
		Help:      "Time range covered by search requests in seconds",
		Buckets:   SecondsRanges,
	})
	FetchInFlightQueriesTotal = promauto.NewGauge(prometheus.GaugeOpts{
		Namespace: "seq_db_store",
		Subsystem: "fetch",
		Name:      "in_flight",
		Help:      "Current number of fetch requests being processed",
	})
	FetchDurationSeconds = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "fetch",
		Name:      "duration_seconds",
		Help:      "Fetch requests duration time",
		Buckets:   SecondsBuckets,
	})
	FetchDocsTotal = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "fetch",
		Name:      "docs",
		Help:      "Number of documents returned per fetch request",
		Buckets:   prometheus.ExponentialBuckets(1, 4, 18),
	})
	FetchDocsNotFound = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "fetch",
		Name:      "docs_not_found",
		Help:      "Number of documents not found per fetch request",
		Buckets:   prometheus.ExponentialBuckets(1, 4, 18),
	})
	FetchBytesTotal = promauto.NewHistogram(prometheus.HistogramOpts{
		Namespace: "seq_db_store",
		Subsystem: "fetch",
		Name:      "bytes",
		Help:      "Total size of data in bytes returned per fetch request",
		Buckets:   prometheus.ExponentialBuckets(256, 4, 20),
	})

	StoreReady = promauto.NewGauge(prometheus.GaugeOpts{
		Namespace: "seq_db_store",
		Subsystem: "main",
		Name:      "ready",
		Help:      "Indicates if a store is ready to accept requests",
	})

	StorePanics = promauto.NewCounter(prometheus.CounterOpts{
		Namespace: "seq_db_store",
		Subsystem: "common",
		Name:      "panics_total",
		Help:      "Number of panics in store",
	})

	SkippedIndexesText    = skippedIndexes.WithLabelValues("text")
	SkippedIndexesKeyword = skippedIndexes.WithLabelValues("keyword")
	SkippedIndexesPath    = skippedIndexes.WithLabelValues("path")

	SkippedIndexesBytesText    = skippedIndexesBytes.WithLabelValues("text")
	SkippedIndexesBytesKeyword = skippedIndexesBytes.WithLabelValues("keyword")
	SkippedIndexesBytesPath    = skippedIndexesBytes.WithLabelValues("path")
)

Functions

This section is empty.

Types

type RollingAverage

type RollingAverage struct {
	// contains filtered or unexported fields
}

RollingAverage can compute average in a rolling window.

func NewRollingAverage

func NewRollingAverage(sampleSize int) *RollingAverage

func (*RollingAverage) Append

func (r *RollingAverage) Append(value int)

Append the value to the sample.

func (*RollingAverage) Filled

func (r *RollingAverage) Filled() bool

Filled returns true if the sample is filled.

func (*RollingAverage) Get

func (r *RollingAverage) Get() float32

Get returns a rolling average.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL