reporter

package
v0.47.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 20, 2026 License: Apache-2.0 Imports: 54 Imported by: 0

Documentation

Index

Constants

View Source
const (
	MetadataSchemaVersion   = "parca_write_schema_version"
	MetadataSchemaVersionV1 = "v1"
	ColumnLabelsPrefix      = "labels."
)
View Source
const (
	// ChunkSize 8MB is the size of the chunks in which debuginfo files are
	// uploaded and downloaded. AWS S3 has a minimum of 5MB for multi-part uploads
	// and a maximum of 15MB, and a default of 8MB.
	ChunkSize = 1024 * 1024 * 8
	// MaxMsgSize is the maximum message size the server can receive or send. By default, it is 64MB.
	MaxMsgSize = 1024 * 1024 * 64
)
View Source
const (
	DATA_FILE_EXTENSION            string = ".padata"
	DATA_FILE_COMPRESSED_EXTENSION string = ".padata.zst"
)
View Source
const (
	MetadataSchemaVersionV2 = "v2"
)

V2 Schema Constants

View Source
const (
	ReasonUploadInProgress = "A previous upload is still in-progress and not stale yet (only stale uploads can be retried)."
)

Variables

View Source
var (
	LocationsField = arrow.Field{
		Name: "locations",
		Type: arrow.ListOf(arrow.StructOf([]arrow.Field{{
			Name: "address",
			Type: arrow.PrimitiveTypes.Uint64,
		}, {
			Name: "frame_type",
			Type: arrow.RunEndEncodedOf(
				arrow.PrimitiveTypes.Int32,
				&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
			),
		}, {
			Name: "mapping_start",
			Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Uint64),
		}, {
			Name: "mapping_limit",
			Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Uint64),
		}, {
			Name: "mapping_offset",
			Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Uint64),
		}, {
			Name: "mapping_file",
			Type: arrow.RunEndEncodedOf(
				arrow.PrimitiveTypes.Int32,
				&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
			),
		}, {
			Name: "mapping_build_id",
			Type: arrow.RunEndEncodedOf(
				arrow.PrimitiveTypes.Int32,
				&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
			),
		}, {
			Name: "lines",
			Type: arrow.ListOf(arrow.StructOf([]arrow.Field{{
				Name: "line",
				Type: arrow.PrimitiveTypes.Int64,
			}, {
				Name: "column",
				Type: arrow.PrimitiveTypes.Uint64,
			}, {
				Name: "function_name",
				Type: &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
			}, {
				Name: "function_system_name",
				Type: &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
			}, {
				Name: "function_filename",
				Type: arrow.RunEndEncodedOf(
					arrow.PrimitiveTypes.Int32,
					&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
				),
			}, {
				Name: "function_start_line",
				Type: arrow.PrimitiveTypes.Int64,
			}}...)),
		}}...)),
	}

	StacktraceIDField = arrow.Field{
		Name: "stacktrace_id",
		Type: arrow.RunEndEncodedOf(
			arrow.PrimitiveTypes.Int32,
			&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
		),
	}

	ValueField = arrow.Field{
		Name: "value",
		Type: arrow.PrimitiveTypes.Int64,
	}

	ProducerField = arrow.Field{
		Name: "producer",
		Type: arrow.RunEndEncodedOf(
			arrow.PrimitiveTypes.Int32,
			&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
		),
	}

	SampleTypeField = arrow.Field{
		Name: "sample_type",
		Type: arrow.RunEndEncodedOf(
			arrow.PrimitiveTypes.Int32,
			&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
		),
	}

	SampleUnitField = arrow.Field{
		Name: "sample_unit",
		Type: arrow.RunEndEncodedOf(
			arrow.PrimitiveTypes.Int32,
			&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
		),
	}

	PeriodTypeField = arrow.Field{
		Name: "period_type",
		Type: arrow.RunEndEncodedOf(
			arrow.PrimitiveTypes.Int32,
			&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
		),
	}

	PeriodUnitField = arrow.Field{
		Name: "period_unit",
		Type: arrow.RunEndEncodedOf(
			arrow.PrimitiveTypes.Int32,
			&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
		),
	}

	TemporalityField = arrow.Field{
		Name: "temporality",
		Type: arrow.RunEndEncodedOf(
			arrow.PrimitiveTypes.Int32,
			&arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary},
		),
	}

	PeriodField = arrow.Field{
		Name: "period",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Int64),
	}

	DurationField = arrow.Field{
		Name: "duration",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Int64),
	}

	TimestampField = arrow.Field{
		Name: "timestamp",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Int64),
	}
)
View Source
var (
	// FilenameDictTypeV2 is a dictionary of filenames for efficient storage.
	FilenameDictTypeV2 = &arrow.DictionaryType{
		IndexType: arrow.PrimitiveTypes.Uint32,
		ValueType: arrow.BinaryTypes.String,
	}

	// FunctionFieldTypeV2 defines the function struct type for v2.
	FunctionFieldTypeV2 = arrow.StructOf(
		arrow.Field{Name: "system_name", Type: arrow.BinaryTypes.StringView, Nullable: true},
		arrow.Field{Name: "filename", Type: FilenameDictTypeV2, Nullable: true},
		arrow.Field{Name: "start_line", Type: arrow.PrimitiveTypes.Uint64, Nullable: false},
	)

	// FunctionDictTypeV2 is a dictionary of functions for efficient storage.
	FunctionDictTypeV2 = &arrow.DictionaryType{
		IndexType: arrow.PrimitiveTypes.Uint32,
		ValueType: FunctionFieldTypeV2,
	}

	// LineFieldTypeV2 defines the line struct type for v2.
	LineFieldTypeV2 = arrow.StructOf(
		arrow.Field{Name: "line", Type: arrow.PrimitiveTypes.Uint64, Nullable: false},
		arrow.Field{Name: "column", Type: arrow.PrimitiveTypes.Uint64, Nullable: false},
		arrow.Field{Name: "function", Type: FunctionDictTypeV2, Nullable: false},
	)

	// FrameTypeDictTypeV2 is a dictionary of frame types with Uint32 index.
	FrameTypeDictTypeV2 = &arrow.DictionaryType{
		IndexType: arrow.PrimitiveTypes.Uint32,
		ValueType: arrow.BinaryTypes.String,
	}

	// MappingFileDictTypeV2 is a dictionary of mapping files for efficient storage.
	MappingFileDictTypeV2 = &arrow.DictionaryType{
		IndexType: arrow.PrimitiveTypes.Uint32,
		ValueType: arrow.BinaryTypes.String,
	}

	// MappingBuildIDDictTypeV2 is a dictionary of mapping build IDs for efficient storage.
	MappingBuildIDDictTypeV2 = &arrow.DictionaryType{
		IndexType: arrow.PrimitiveTypes.Uint32,
		ValueType: arrow.BinaryTypes.String,
	}

	// LocationTypeV2 defines the location struct type for v2.
	LocationTypeV2 = arrow.StructOf(
		arrow.Field{Name: "address", Type: arrow.PrimitiveTypes.Uint64, Nullable: false},
		arrow.Field{Name: "frame_type", Type: FrameTypeDictTypeV2, Nullable: true},
		arrow.Field{Name: "mapping_file", Type: MappingFileDictTypeV2, Nullable: true},
		arrow.Field{Name: "mapping_build_id", Type: MappingBuildIDDictTypeV2, Nullable: true},
		arrow.Field{Name: "lines", Type: arrow.ListViewOf(LineFieldTypeV2), Nullable: true},
	)

	// LocationDictTypeV2 is a dictionary of locations for efficient storage.
	LocationDictTypeV2 = &arrow.DictionaryType{
		IndexType: arrow.PrimitiveTypes.Uint32,
		ValueType: LocationTypeV2,
	}

	// StacktraceTypeV2 is a ListView of dictionary-encoded locations.
	// - Dictionary deduplicates individual locations
	// - ListView allows reusing offset/size for identical stacktraces
	StacktraceTypeV2 = arrow.ListViewOf(LocationDictTypeV2)

	// StacktraceFieldV2 is the field definition for stacktraces in the v2 sample schema.
	StacktraceFieldV2 = arrow.Field{
		Name:     "stacktrace",
		Type:     StacktraceTypeV2,
		Nullable: true,
	}

	TimestampFieldV2 = arrow.Field{
		Name: "timestamp",
		Type: &arrow.TimestampType{Unit: arrow.Nanosecond, TimeZone: "UTC"},
	}

	ProducerFieldV2 = arrow.Field{
		Name: "producer",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.BinaryTypes.String),
	}

	SampleTypeFieldV2 = arrow.Field{
		Name: "sample_type",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.BinaryTypes.String),
	}

	SampleUnitFieldV2 = arrow.Field{
		Name: "sample_unit",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.BinaryTypes.String),
	}

	PeriodTypeFieldV2 = arrow.Field{
		Name: "period_type",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.BinaryTypes.String),
	}

	PeriodUnitFieldV2 = arrow.Field{
		Name: "period_unit",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.BinaryTypes.String),
	}

	TemporalityFieldV2 = arrow.Field{
		Name:     "temporality",
		Nullable: true,
		Type:     arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.BinaryTypes.String),
	}

	DurationFieldV2 = arrow.Field{
		Name: "duration",
		Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Uint64),
	}

	StacktraceIDFieldV2 = arrow.Field{
		Name: "stacktrace_id",
		Type: extensions.NewUUIDType(),
	}
)
View Source
var ErrDebuginfoAlreadyExists = errors.New("debug info already exists")

Functions

func ArrowSamplesField

func ArrowSamplesField(profileLabelFields []arrow.Field) []arrow.Field

func ArrowSamplesFieldV2 added in v0.47.1

func ArrowSamplesFieldV2(profileLabelFields []arrow.Field) []arrow.Field

ArrowSamplesFieldV2 returns the fields for the v2 sample schema.

func SampleSchema

func SampleSchema(profileLabelFields []arrow.Field) *arrow.Schema

func SampleSchemaV2 added in v0.47.1

func SampleSchemaV2(profileLabelFields []arrow.Field) *arrow.Schema

SampleSchemaV2 creates the v2 sample schema with the given label fields.

Types

type BinaryDictionaryRunEndBuilder

type BinaryDictionaryRunEndBuilder struct {
	// contains filtered or unexported fields
}

func (*BinaryDictionaryRunEndBuilder) Append

func (b *BinaryDictionaryRunEndBuilder) Append(v []byte)

func (*BinaryDictionaryRunEndBuilder) AppendN

func (b *BinaryDictionaryRunEndBuilder) AppendN(v []byte, n uint64)

func (*BinaryDictionaryRunEndBuilder) AppendNull

func (b *BinaryDictionaryRunEndBuilder) AppendNull()

func (*BinaryDictionaryRunEndBuilder) AppendString

func (b *BinaryDictionaryRunEndBuilder) AppendString(v string)

func (*BinaryDictionaryRunEndBuilder) AppendStringN

func (b *BinaryDictionaryRunEndBuilder) AppendStringN(v string, n uint64)

func (*BinaryDictionaryRunEndBuilder) EnsureLength

func (b *BinaryDictionaryRunEndBuilder) EnsureLength(l int)

func (*BinaryDictionaryRunEndBuilder) Len

func (*BinaryDictionaryRunEndBuilder) NewArray

func (*BinaryDictionaryRunEndBuilder) Release

func (b *BinaryDictionaryRunEndBuilder) Release()

type FunctionDictBuilderV2 added in v0.47.1

type FunctionDictBuilderV2 struct {
	// contains filtered or unexported fields
}

FunctionDictBuilderV2 deduplicates functions using a map.

func NewFunctionDictBuilderV2 added in v0.47.1

func NewFunctionDictBuilderV2(mem memory.Allocator) *FunctionDictBuilderV2

NewFunctionDictBuilderV2 creates a new FunctionDictBuilderV2.

func (*FunctionDictBuilderV2) AppendFunction added in v0.47.1

func (b *FunctionDictBuilderV2) AppendFunction(f FunctionV2) uint32

AppendFunction adds a function and returns its dictionary index.

func (*FunctionDictBuilderV2) Len added in v0.47.1

func (b *FunctionDictBuilderV2) Len() int

Len returns the number of unique functions.

func (*FunctionDictBuilderV2) Release added in v0.47.1

func (b *FunctionDictBuilderV2) Release()

Release releases the builder resources.

type FunctionV2 added in v0.47.1

type FunctionV2 struct {
	SystemName string
	Filename   string
	StartLine  uint64
}

FunctionV2 represents a function for deduplication purposes.

type GrpcDebuginfoUploadServiceClient

type GrpcDebuginfoUploadServiceClient interface {
	Upload(ctx context.Context, opts ...grpc.CallOption) (debuginfogrpc.DebuginfoService_UploadClient, error)
}

type GrpcUploadClient

type GrpcUploadClient struct {
	GrpcDebuginfoUploadServiceClient
}

func (*GrpcUploadClient) Upload

func (c *GrpcUploadClient) Upload(ctx context.Context, uploadInstructions *debuginfopb.UploadInstructions, r io.Reader) (uint64, error)

type Int64RunEndBuilder

type Int64RunEndBuilder struct {
	// contains filtered or unexported fields
}

func (*Int64RunEndBuilder) Append

func (b *Int64RunEndBuilder) Append(v int64)

func (*Int64RunEndBuilder) NewArray

func (b *Int64RunEndBuilder) NewArray() arrow.Array

func (*Int64RunEndBuilder) Release

func (b *Int64RunEndBuilder) Release()

type Label

type Label struct {
	Name  string
	Value string
}

type Labels

type Labels []Label

func (Labels) String

func (l Labels) String() string

type LocationsWriter

type LocationsWriter struct {
	IsComplete         *array.BooleanBuilder
	LocationsList      *array.ListBuilder
	Locations          *array.StructBuilder
	Address            *array.Uint64Builder
	FrameType          *BinaryDictionaryRunEndBuilder
	MappingStart       *Uint64RunEndBuilder
	MappingLimit       *Uint64RunEndBuilder
	MappingOffset      *Uint64RunEndBuilder
	MappingFile        *BinaryDictionaryRunEndBuilder
	MappingBuildID     *BinaryDictionaryRunEndBuilder
	Lines              *array.ListBuilder
	Line               *array.StructBuilder
	LineNumber         *array.Int64Builder
	ColumnNumber       *array.Uint64Builder
	FunctionName       *array.BinaryDictionaryBuilder
	FunctionSystemName *array.BinaryDictionaryBuilder
	FunctionFilename   *BinaryDictionaryRunEndBuilder
	FunctionStartLine  *array.Int64Builder
}

func NewLocationsWriter

func NewLocationsWriter(mem memory.Allocator) *LocationsWriter

func (*LocationsWriter) NewRecord

func (w *LocationsWriter) NewRecord(stacktraceIDs *array.Binary) arrow.Record

func (*LocationsWriter) Release

func (w *LocationsWriter) Release()

type OfflineModeConfig added in v0.36.0

type OfflineModeConfig struct {
	StoragePath      string
	RotationInterval time.Duration
}

type ParcaReporter

type ParcaReporter struct {
	// contains filtered or unexported fields
}

ParcaReporter receives and transforms information to be OTLP/profiles compliant.

func New

func New(
	mem memory.Allocator,
	client profilestoregrpc.ProfileStoreServiceClient,
	debuginfoClient debuginfogrpc.DebuginfoServiceClient,
	externalLabels []Label,
	reportInterval time.Duration,
	labelTTL time.Duration,
	stripTextSection bool,
	symbolUploadConcurrency int,
	disableSymbolUpload bool,
	samplesPerSecond int64,
	cacheSize uint32,
	uploaderQueueSize uint32,
	cacheDir string,
	nodeName string,
	relabelConfigs []*relabel.Config,
	agentRevision string,
	reg prometheus.Registerer,
	offlineModeConfig *OfflineModeConfig,
	enableOOMProf bool,
	enableAllocs bool,
	disableCPULabel bool,
	disableThreadIDLabel bool,
	disableThreadCommLabel bool,
	useV2Schema bool,
) (*ParcaReporter, error)

New creates a ParcaReporter.

func (*ParcaReporter) ExecutableKnown added in v0.35.0

func (r *ParcaReporter) ExecutableKnown(fileID libpf.FileID) bool

ExecutableKnown returns true if the metadata of the Executable specified by fileID is cached in the reporter.

func (*ParcaReporter) ReportCountForTrace

func (r *ParcaReporter) ReportCountForTrace(_ libpf.TraceHash, _ uint16, _ *samples.TraceEventMeta)

ReportCountForTrace is a NOP for ParcaReporter.

func (*ParcaReporter) ReportExecutable added in v0.47.0

func (r *ParcaReporter) ReportExecutable(args *reporter.ExecutableMetadata)

ExecutableMetadata accepts a fileID with the corresponding filename and caches this information.

func (*ParcaReporter) ReportFramesForTrace

func (r *ParcaReporter) ReportFramesForTrace(_ *libpf.Trace)

ReportFramesForTrace is a NOP for ParcaReporter.

func (*ParcaReporter) ReportHostMetadata

func (r *ParcaReporter) ReportHostMetadata(metadataMap map[string]string)

ReportHostMetadata enqueues host metadata.

func (*ParcaReporter) ReportHostMetadataBlocking

func (r *ParcaReporter) ReportHostMetadataBlocking(_ context.Context,
	metadataMap map[string]string, _ int, _ time.Duration,
) error

ReportHostMetadataBlocking enqueues host metadata.

func (*ParcaReporter) ReportMetrics

func (r *ParcaReporter) ReportMetrics(_ uint32, ids []uint32, values []int64)

ReportMetrics records metrics.

func (*ParcaReporter) ReportTraceEvent

func (r *ParcaReporter) ReportTraceEvent(trace *libpf.Trace,
	meta *samples.TraceEventMeta,
) error

ReportTraceEvent enqueues reported trace events for the OTLP reporter.

func (*ParcaReporter) SampleEvents added in v0.42.0

func (r *ParcaReporter) SampleEvents(oomprofSamples []oomprof.Sample, meta oomprof.SampleMeta) error

SampleEvents implements the oomprof.Reporter interface. It converts oomprof samples to trace events and reports them.

func (*ParcaReporter) Start added in v0.35.0

func (r *ParcaReporter) Start(mainCtx context.Context) error

func (*ParcaReporter) Stop

func (r *ParcaReporter) Stop()

Stop triggers a graceful shutdown of ParcaReporter.

func (*ParcaReporter) SupportsReportTraceEvent

func (r *ParcaReporter) SupportsReportTraceEvent() bool

type ParcaSymbolUploader

type ParcaSymbolUploader struct {
	// contains filtered or unexported fields
}

func NewParcaSymbolUploader

func NewParcaSymbolUploader(
	client debuginfogrpc.DebuginfoServiceClient,
	cacheSize uint32,
	stripTextSection bool,
	queueSize uint32,
	workerNum int,
	cacheDir string,
	uploadRequestBytes prometheus.Counter,
) (*ParcaSymbolUploader, error)

func (*ParcaSymbolUploader) Run

Start starts the upload workers.

func (*ParcaSymbolUploader) Upload

func (u *ParcaSymbolUploader) Upload(ctx context.Context, fileID libpf.FileID, fileName string, buildID string,
	open func() (process.ReadAtCloser, error))

Upload enqueues a file for upload if it's not already in progress, or if it is marked not to be retried.

type SampleWriter

type SampleWriter struct {
	StacktraceID *BinaryDictionaryRunEndBuilder
	Value        *array.Int64Builder
	Producer     *BinaryDictionaryRunEndBuilder
	SampleType   *BinaryDictionaryRunEndBuilder
	SampleUnit   *BinaryDictionaryRunEndBuilder
	PeriodType   *BinaryDictionaryRunEndBuilder
	PeriodUnit   *BinaryDictionaryRunEndBuilder
	Temporality  *BinaryDictionaryRunEndBuilder
	Period       *Int64RunEndBuilder
	Duration     *Int64RunEndBuilder
	Timestamp    *Int64RunEndBuilder
	// contains filtered or unexported fields
}

func NewSampleWriter

func NewSampleWriter(mem memory.Allocator) *SampleWriter

func (*SampleWriter) Label

func (w *SampleWriter) Label(labelName string) *BinaryDictionaryRunEndBuilder

func (*SampleWriter) LabelAll

func (w *SampleWriter) LabelAll(labelName, labelValue string)

func (*SampleWriter) NewRecord

func (w *SampleWriter) NewRecord() arrow.Record

func (*SampleWriter) Release

func (w *SampleWriter) Release()

type SampleWriterV2 added in v0.47.1

type SampleWriterV2 struct {

	// Stacktrace with deduplication
	Stacktrace   *StacktraceDictBuilderV2
	StacktraceID *extensions.UUIDBuilder

	// Sample data fields (same as v1)
	Value       *array.Int64Builder
	Producer    *StringRunEndBuilder
	SampleType  *StringRunEndBuilder
	SampleUnit  *StringRunEndBuilder
	PeriodType  *StringRunEndBuilder
	PeriodUnit  *StringRunEndBuilder
	Temporality *StringRunEndBuilder
	Period      *Int64RunEndBuilder
	Duration    *Uint64RunEndBuilder
	Timestamp   *array.TimestampBuilder
	// contains filtered or unexported fields
}

SampleWriterV2 writes samples with inline stacktraces using the v2 schema.

func NewSampleWriterV2 added in v0.47.1

func NewSampleWriterV2(mem memory.Allocator) *SampleWriterV2

NewSampleWriterV2 creates a new SampleWriterV2.

func (*SampleWriterV2) Label added in v0.47.1

Label returns the label builder for the given label name, creating it if necessary.

func (*SampleWriterV2) LabelAll added in v0.47.1

func (w *SampleWriterV2) LabelAll(labelName, labelValue string)

LabelAll sets a label value for all samples in the current batch.

func (*SampleWriterV2) NewRecord added in v0.47.1

func (w *SampleWriterV2) NewRecord() arrow.Record

NewRecord builds and returns an Arrow record with all samples.

func (*SampleWriterV2) Release added in v0.47.1

func (w *SampleWriterV2) Release()

Release releases all builder resources.

type StacktraceDictBuilderV2 added in v0.47.1

type StacktraceDictBuilderV2 struct {

	// Track locations for deduplication: frame -> dictionary index
	LocationIndex map[libpf.Frame]uint32
	// contains filtered or unexported fields
}

StacktraceDictBuilderV2 deduplicates stacktraces using TraceHash and ListView. Structure: ListView[Dictionary[Uint32, LocationTypeV2]] - Dictionary handles location-level deduplication (manual construction) - ListView handles stacktrace-level deduplication via offset/size reuse - Functions within lines are dictionary-encoded for deduplication

Since Arrow v16 doesn't have StructDictionaryBuilder, we build the dictionaries manually: separate arrays for values, uint32 arrays for indices.

func NewStacktraceDictBuilderV2 added in v0.47.1

func NewStacktraceDictBuilderV2(mem memory.Allocator) *StacktraceDictBuilderV2

NewStacktraceDictBuilderV2 creates a new StacktraceDictBuilderV2.

func (*StacktraceDictBuilderV2) AppendNull added in v0.47.1

func (b *StacktraceDictBuilderV2) AppendNull()

AppendNull appends a null stacktrace. For ListView, null is represented by size=0 with any offset.

func (*StacktraceDictBuilderV2) AppendStacktrace added in v0.47.1

func (b *StacktraceDictBuilderV2) AppendStacktrace(
	traceHash libpf.TraceHash,
	frames libpf.Frames,
	appendLocation func(frame libpf.Frame) uint32,
)

AppendStacktrace appends a stacktrace, reusing ListView dimensions for duplicates. The appendLocation callback is called for each frame; it handles dedup, frame resolution, and writing to the arrow builders, returning the dictionary index.

func (*StacktraceDictBuilderV2) Len added in v0.47.1

func (b *StacktraceDictBuilderV2) Len() int

Len returns the number of stacktraces appended.

func (*StacktraceDictBuilderV2) NewArray added in v0.47.1

func (b *StacktraceDictBuilderV2) NewArray() arrow.Array

NewArray builds and returns the ListView[Dictionary[Uint32, LocationTypeV2]] array. This manually constructs the full array hierarchy since Arrow v16 lacks StructDictionaryBuilder. The hierarchy is: ListView → Dict[Uint32, LocationStruct] → ... → lines list → Dict[Uint32, FunctionStruct].

func (*StacktraceDictBuilderV2) Release added in v0.47.1

func (b *StacktraceDictBuilderV2) Release()

Release releases all builder resources.

func (*StacktraceDictBuilderV2) UniqueStacktraces added in v0.47.1

func (b *StacktraceDictBuilderV2) UniqueStacktraces() int

UniqueStacktraces returns the number of unique stacktraces.

type Stater

type Stater interface {
	Stat() (os.FileInfo, error)
}

type StringRunEndBuilder added in v0.47.1

type StringRunEndBuilder struct {
	// contains filtered or unexported fields
}

func (*StringRunEndBuilder) AppendNull added in v0.47.1

func (b *StringRunEndBuilder) AppendNull()

func (*StringRunEndBuilder) AppendString added in v0.47.1

func (b *StringRunEndBuilder) AppendString(v string)

func (*StringRunEndBuilder) AppendStringN added in v0.47.1

func (b *StringRunEndBuilder) AppendStringN(v string, n uint64)

func (*StringRunEndBuilder) EnsureLength added in v0.47.1

func (b *StringRunEndBuilder) EnsureLength(l int)

func (*StringRunEndBuilder) Len added in v0.47.1

func (b *StringRunEndBuilder) Len() int

func (*StringRunEndBuilder) NewArray added in v0.47.1

func (b *StringRunEndBuilder) NewArray() arrow.Array

func (*StringRunEndBuilder) Release added in v0.47.1

func (b *StringRunEndBuilder) Release()

type Uint64RunEndBuilder

type Uint64RunEndBuilder struct {
	// contains filtered or unexported fields
}

func (*Uint64RunEndBuilder) Append added in v0.47.1

func (b *Uint64RunEndBuilder) Append(v uint64)

func (*Uint64RunEndBuilder) AppendN

func (b *Uint64RunEndBuilder) AppendN(v uint64, n uint64)

func (*Uint64RunEndBuilder) NewArray

func (b *Uint64RunEndBuilder) NewArray() arrow.Array

func (*Uint64RunEndBuilder) Release

func (b *Uint64RunEndBuilder) Release()

Directories

Path Synopsis
containermetadata provides functionality for retrieving the kubernetes pod and container metadata or the docker container metadata for a particular PID.
containermetadata provides functionality for retrieving the kubernetes pod and container metadata or the docker container metadata for a particular PID.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL