Documentation
¶
Index ¶
- Constants
- Variables
- func ArrowSamplesField(profileLabelFields []arrow.Field) []arrow.Field
- func SampleSchema(profileLabelFields []arrow.Field) *arrow.Schema
- type BinaryDictionaryRunEndBuilder
- func (b *BinaryDictionaryRunEndBuilder) Append(v []byte)
- func (b *BinaryDictionaryRunEndBuilder) AppendN(v []byte, n uint64)
- func (b *BinaryDictionaryRunEndBuilder) AppendNull()
- func (b *BinaryDictionaryRunEndBuilder) AppendString(v string)
- func (b *BinaryDictionaryRunEndBuilder) AppendStringN(v string, n uint64)
- func (b *BinaryDictionaryRunEndBuilder) EnsureLength(l int)
- func (b *BinaryDictionaryRunEndBuilder) Len() int
- func (b *BinaryDictionaryRunEndBuilder) NewArray() arrow.Array
- func (b *BinaryDictionaryRunEndBuilder) Release()
- type GrpcDebuginfoUploadServiceClient
- type GrpcUploadClient
- type Int64RunEndBuilder
- type Label
- type Labels
- type LocationsWriter
- type OfflineModeConfig
- type ParcaReporter
- func (r *ParcaReporter) ExecutableKnown(fileID libpf.FileID) bool
- func (r *ParcaReporter) ExecutableMetadata(args *reporter.ExecutableMetadataArgs)
- func (r *ParcaReporter) ReportCountForTrace(_ libpf.TraceHash, _ uint16, _ *samples.TraceEventMeta)
- func (r *ParcaReporter) ReportFramesForTrace(_ *libpf.Trace)
- func (r *ParcaReporter) ReportHostMetadata(metadataMap map[string]string)
- func (r *ParcaReporter) ReportHostMetadataBlocking(_ context.Context, metadataMap map[string]string, _ int, _ time.Duration) error
- func (r *ParcaReporter) ReportMetrics(_ uint32, ids []uint32, values []int64)
- func (r *ParcaReporter) ReportTraceEvent(trace *libpf.Trace, meta *samples.TraceEventMeta) error
- func (r *ParcaReporter) SampleEvents(oomprofSamples []oomprof.Sample, meta oomprof.SampleMeta) error
- func (r *ParcaReporter) Start(mainCtx context.Context) error
- func (r *ParcaReporter) Stop()
- func (r *ParcaReporter) SupportsReportTraceEvent() bool
- type ParcaSymbolUploader
- type SampleWriter
- type Stater
- type Uint64RunEndBuilder
Constants ¶
const ( MetadataSchemaVersion = "parca_write_schema_version" MetadataSchemaVersionV1 = "v1" ColumnLabelsPrefix = "labels." )
const ( // ChunkSize 8MB is the size of the chunks in which debuginfo files are // uploaded and downloaded. AWS S3 has a minimum of 5MB for multi-part uploads // and a maximum of 15MB, and a default of 8MB. ChunkSize = 1024 * 1024 * 8 // MaxMsgSize is the maximum message size the server can receive or send. By default, it is 64MB. MaxMsgSize = 1024 * 1024 * 64 )
const DATA_FILE_COMPRESSED_EXTENSION string = ".padata.zst"
const DATA_FILE_EXTENSION string = ".padata"
const (
ReasonUploadInProgress = "A previous upload is still in-progress and not stale yet (only stale uploads can be retried)."
)
Variables ¶
var ( LocationsField = arrow.Field{ Name: "locations", Type: arrow.ListOf(arrow.StructOf([]arrow.Field{{ Name: "address", Type: arrow.PrimitiveTypes.Uint64, }, { Name: "frame_type", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), }, { Name: "mapping_start", Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Uint64), }, { Name: "mapping_limit", Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Uint64), }, { Name: "mapping_offset", Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Uint64), }, { Name: "mapping_file", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), }, { Name: "mapping_build_id", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), }, { Name: "lines", Type: arrow.ListOf(arrow.StructOf([]arrow.Field{{ Name: "line", Type: arrow.PrimitiveTypes.Int64, }, { Name: "function_name", Type: &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, }, { Name: "function_system_name", Type: &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, }, { Name: "function_filename", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), }, { Name: "function_start_line", Type: arrow.PrimitiveTypes.Int64, }}...)), }}...)), } StacktraceIDField = arrow.Field{ Name: "stacktrace_id", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), } ValueField = arrow.Field{ Name: "value", Type: arrow.PrimitiveTypes.Int64, } ProducerField = arrow.Field{ Name: "producer", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), } SampleTypeField = arrow.Field{ Name: "sample_type", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), } SampleUnitField = arrow.Field{ Name: "sample_unit", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), } PeriodTypeField = arrow.Field{ Name: "period_type", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), } PeriodUnitField = arrow.Field{ Name: "period_unit", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), } TemporalityField = arrow.Field{ Name: "temporality", Type: arrow.RunEndEncodedOf( arrow.PrimitiveTypes.Int32, &arrow.DictionaryType{IndexType: arrow.PrimitiveTypes.Uint32, ValueType: arrow.BinaryTypes.Binary}, ), } PeriodField = arrow.Field{ Name: "period", Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Int64), } DurationField = arrow.Field{ Name: "duration", Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Int64), } TimestampField = arrow.Field{ Name: "timestamp", Type: arrow.RunEndEncodedOf(arrow.PrimitiveTypes.Int32, arrow.PrimitiveTypes.Int64), } )
var ErrDebuginfoAlreadyExists = errors.New("debug info already exists")
Functions ¶
Types ¶
type BinaryDictionaryRunEndBuilder ¶
type BinaryDictionaryRunEndBuilder struct {
// contains filtered or unexported fields
}
func (*BinaryDictionaryRunEndBuilder) Append ¶
func (b *BinaryDictionaryRunEndBuilder) Append(v []byte)
func (*BinaryDictionaryRunEndBuilder) AppendN ¶
func (b *BinaryDictionaryRunEndBuilder) AppendN(v []byte, n uint64)
func (*BinaryDictionaryRunEndBuilder) AppendNull ¶
func (b *BinaryDictionaryRunEndBuilder) AppendNull()
func (*BinaryDictionaryRunEndBuilder) AppendString ¶
func (b *BinaryDictionaryRunEndBuilder) AppendString(v string)
func (*BinaryDictionaryRunEndBuilder) AppendStringN ¶
func (b *BinaryDictionaryRunEndBuilder) AppendStringN(v string, n uint64)
func (*BinaryDictionaryRunEndBuilder) EnsureLength ¶
func (b *BinaryDictionaryRunEndBuilder) EnsureLength(l int)
func (*BinaryDictionaryRunEndBuilder) Len ¶
func (b *BinaryDictionaryRunEndBuilder) Len() int
func (*BinaryDictionaryRunEndBuilder) NewArray ¶
func (b *BinaryDictionaryRunEndBuilder) NewArray() arrow.Array
func (*BinaryDictionaryRunEndBuilder) Release ¶
func (b *BinaryDictionaryRunEndBuilder) Release()
type GrpcDebuginfoUploadServiceClient ¶
type GrpcDebuginfoUploadServiceClient interface {
Upload(ctx context.Context, opts ...grpc.CallOption) (debuginfogrpc.DebuginfoService_UploadClient, error)
}
type GrpcUploadClient ¶
type GrpcUploadClient struct {
GrpcDebuginfoUploadServiceClient
}
func NewGrpcUploadClient ¶
func NewGrpcUploadClient(client GrpcDebuginfoUploadServiceClient) *GrpcUploadClient
func (*GrpcUploadClient) Upload ¶
func (c *GrpcUploadClient) Upload(ctx context.Context, uploadInstructions *debuginfopb.UploadInstructions, r io.Reader) (uint64, error)
type Int64RunEndBuilder ¶
type Int64RunEndBuilder struct {
// contains filtered or unexported fields
}
func (*Int64RunEndBuilder) Append ¶
func (b *Int64RunEndBuilder) Append(v int64)
func (*Int64RunEndBuilder) NewArray ¶
func (b *Int64RunEndBuilder) NewArray() arrow.Array
func (*Int64RunEndBuilder) Release ¶
func (b *Int64RunEndBuilder) Release()
type LocationsWriter ¶
type LocationsWriter struct {
IsComplete *array.BooleanBuilder
LocationsList *array.ListBuilder
Locations *array.StructBuilder
Address *array.Uint64Builder
FrameType *BinaryDictionaryRunEndBuilder
MappingStart *Uint64RunEndBuilder
MappingLimit *Uint64RunEndBuilder
MappingOffset *Uint64RunEndBuilder
MappingFile *BinaryDictionaryRunEndBuilder
MappingBuildID *BinaryDictionaryRunEndBuilder
Lines *array.ListBuilder
Line *array.StructBuilder
LineNumber *array.Int64Builder
FunctionName *array.BinaryDictionaryBuilder
FunctionSystemName *array.BinaryDictionaryBuilder
FunctionFilename *BinaryDictionaryRunEndBuilder
FunctionStartLine *array.Int64Builder
}
func NewLocationsWriter ¶
func NewLocationsWriter(mem memory.Allocator) *LocationsWriter
func (*LocationsWriter) NewRecord ¶
func (w *LocationsWriter) NewRecord(stacktraceIDs *array.Binary) arrow.Record
func (*LocationsWriter) Release ¶
func (w *LocationsWriter) Release()
type OfflineModeConfig ¶ added in v0.36.0
type ParcaReporter ¶
type ParcaReporter struct {
// contains filtered or unexported fields
}
ParcaReporter receives and transforms information to be OTLP/profiles compliant.
func New ¶
func New( mem memory.Allocator, client profilestoregrpc.ProfileStoreServiceClient, debuginfoClient debuginfogrpc.DebuginfoServiceClient, externalLabels []Label, reportInterval time.Duration, labelTTL time.Duration, stripTextSection bool, symbolUploadConcurrency int, disableSymbolUpload bool, samplesPerSecond int64, cacheSize uint32, uploaderQueueSize uint32, cacheDir string, nodeName string, relabelConfigs []*relabel.Config, agentRevision string, reg prometheus.Registerer, offlineModeConfig *OfflineModeConfig, enableOOMProf bool, enableAllocs bool, ) (*ParcaReporter, error)
New creates a ParcaReporter.
func (*ParcaReporter) ExecutableKnown ¶ added in v0.35.0
func (r *ParcaReporter) ExecutableKnown(fileID libpf.FileID) bool
ExecutableKnown returns true if the metadata of the Executable specified by fileID is cached in the reporter.
func (*ParcaReporter) ExecutableMetadata ¶
func (r *ParcaReporter) ExecutableMetadata(args *reporter.ExecutableMetadataArgs)
ExecutableMetadata accepts a fileID with the corresponding filename and caches this information.
func (*ParcaReporter) ReportCountForTrace ¶
func (r *ParcaReporter) ReportCountForTrace(_ libpf.TraceHash, _ uint16, _ *samples.TraceEventMeta)
ReportCountForTrace is a NOP for ParcaReporter.
func (*ParcaReporter) ReportFramesForTrace ¶
func (r *ParcaReporter) ReportFramesForTrace(_ *libpf.Trace)
ReportFramesForTrace is a NOP for ParcaReporter.
func (*ParcaReporter) ReportHostMetadata ¶
func (r *ParcaReporter) ReportHostMetadata(metadataMap map[string]string)
ReportHostMetadata enqueues host metadata.
func (*ParcaReporter) ReportHostMetadataBlocking ¶
func (r *ParcaReporter) ReportHostMetadataBlocking(_ context.Context, metadataMap map[string]string, _ int, _ time.Duration) error
ReportHostMetadataBlocking enqueues host metadata.
func (*ParcaReporter) ReportMetrics ¶
func (r *ParcaReporter) ReportMetrics(_ uint32, ids []uint32, values []int64)
ReportMetrics records metrics.
func (*ParcaReporter) ReportTraceEvent ¶
func (r *ParcaReporter) ReportTraceEvent(trace *libpf.Trace, meta *samples.TraceEventMeta) error
ReportTraceEvent enqueues reported trace events for the OTLP reporter.
func (*ParcaReporter) SampleEvents ¶ added in v0.42.0
func (r *ParcaReporter) SampleEvents(oomprofSamples []oomprof.Sample, meta oomprof.SampleMeta) error
SampleEvents implements the oomprof.Reporter interface. It converts oomprof samples to trace events and reports them.
func (*ParcaReporter) Start ¶ added in v0.35.0
func (r *ParcaReporter) Start(mainCtx context.Context) error
func (*ParcaReporter) Stop ¶
func (r *ParcaReporter) Stop()
Stop triggers a graceful shutdown of ParcaReporter.
func (*ParcaReporter) SupportsReportTraceEvent ¶
func (r *ParcaReporter) SupportsReportTraceEvent() bool
type ParcaSymbolUploader ¶
type ParcaSymbolUploader struct {
// contains filtered or unexported fields
}
func NewParcaSymbolUploader ¶
func NewParcaSymbolUploader( client debuginfogrpc.DebuginfoServiceClient, cacheSize uint32, stripTextSection bool, queueSize uint32, workerNum int, cacheDir string, uploadRequestBytes prometheus.Counter, ) (*ParcaSymbolUploader, error)
type SampleWriter ¶
type SampleWriter struct {
StacktraceID *BinaryDictionaryRunEndBuilder
Value *array.Int64Builder
Producer *BinaryDictionaryRunEndBuilder
SampleType *BinaryDictionaryRunEndBuilder
SampleUnit *BinaryDictionaryRunEndBuilder
PeriodType *BinaryDictionaryRunEndBuilder
PeriodUnit *BinaryDictionaryRunEndBuilder
Temporality *BinaryDictionaryRunEndBuilder
Period *Int64RunEndBuilder
Duration *Int64RunEndBuilder
Timestamp *Int64RunEndBuilder
// contains filtered or unexported fields
}
func NewSampleWriter ¶
func NewSampleWriter(mem memory.Allocator) *SampleWriter
func (*SampleWriter) Label ¶
func (w *SampleWriter) Label(labelName string) *BinaryDictionaryRunEndBuilder
func (*SampleWriter) LabelAll ¶
func (w *SampleWriter) LabelAll(labelName, labelValue string)
func (*SampleWriter) NewRecord ¶
func (w *SampleWriter) NewRecord() arrow.Record
func (*SampleWriter) Release ¶
func (w *SampleWriter) Release()
type Uint64RunEndBuilder ¶
type Uint64RunEndBuilder struct {
// contains filtered or unexported fields
}
func (*Uint64RunEndBuilder) AppendN ¶
func (b *Uint64RunEndBuilder) AppendN(v uint64, n uint64)
func (*Uint64RunEndBuilder) NewArray ¶
func (b *Uint64RunEndBuilder) NewArray() arrow.Array
func (*Uint64RunEndBuilder) Release ¶
func (b *Uint64RunEndBuilder) Release()
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
containermetadata provides functionality for retrieving the kubernetes pod and container metadata or the docker container metadata for a particular PID.
|
containermetadata provides functionality for retrieving the kubernetes pod and container metadata or the docker container metadata for a particular PID. |