Documentation
¶
Index ¶
- Constants
- Variables
- func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool
- func CacheDataInMemory() bool
- func CacheMetaInMemory() bool
- func CompactRecovery(path string, group *CompactGroup)
- func DecodeBooleanBlock(in []byte, out *[]byte, ctx *CoderContext) ([]bool, error)
- func DecodeFloatBlock(in []byte, out *[]byte, ctx *CoderContext) ([]float64, error)
- func DecodeIntegerBlock(in []byte, out *[]byte, ctx *CoderContext) ([]int64, error)
- func DecodeStringBlock(in []byte, out *[]byte, dstOffset *[]uint32, ctx *CoderContext) ([]byte, []uint32, error)
- func DecodeTimestampBlock(in []byte, out *[]byte, ctx *CoderContext) ([]int64, error)
- func DecodeUnsignedBlock(in []byte, out *[]byte, ctx *CoderContext) ([]uint64, error)
- func EnableMmapRead(en bool)
- func EnableReadCache(readCacheLimit uint64)
- func EncodeBooleanBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
- func EncodeFloatBlock(in []byte, out []byte, ctx *CoderContext) ([]byte, error)
- func EncodeIntegerBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
- func EncodeStringBlock(in []byte, offset []uint32, out []byte, ctx *CoderContext) ([]byte, error)
- func EncodeTimestampBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
- func EncodeUnsignedBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
- func EstimateBufferSize(recSize int, rows int) int
- func FileOperation(f TSSPFile, op func())
- func FilterByField(rec *record.Record, filterMap map[string]interface{}, con influxql.Expr, ...) *record.Record
- func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record
- func FilterByTime(rec *record.Record, tr record.TimeRange) *record.Record
- func FilterByTimeDescend(rec *record.Record, tr record.TimeRange) *record.Record
- func GenLogFileName(logSeq *uint64) string
- func GetTmpTsspFileSuffix() string
- func Init()
- func InitDecFunctions()
- func IsInterfaceNil(value interface{}) bool
- func IsTempleFile(name string) bool
- func MaxRowsPerSegment() int
- func MergeFlag() int32
- func MergeRecovery(path string, name string, ctx *mergeContext)
- func NewDiskFileReader(f fileops.File, lock *string) *diskFileReader
- func NewDiskWriter(lw NameReadWriterCloser, bufferSize int, lockPath *string) *diskWriter
- func NewIndexWriter(indexName string, cacheMeta bool, limitCompact bool, lockPath *string) *indexWriter
- func NewLastMergeTime() *lastMergeTime
- func NewMemReaderEvictCtx() *memReaderEvictCtx
- func NewMergeContext(mst string) *mergeContext
- func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer
- func NonStreamingCompaction(fi FilesInfo) bool
- func PutDataCoder(coder DataCoder)
- func PutIDTimePairs(pair *IdTimePairs)
- func ReleaseColumnBuilder(b PreAggBuilder)
- func ReleaseMsBuilder(msb *MsBuilder)
- func RenameTmpFiles(newFiles []TSSPFile) error
- func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)
- func SegMergeFlag(v int32)
- func SetCacheDataBlock(en bool)
- func SetCacheMetaData(en bool)
- func SetCompactLimit(bytesPerSec int64, burstLimit int64)
- func SetImmTableMaxMemoryPercentage(sysTotalMem, percentage int)
- func SetMaxCompactor(n int)
- func SetMaxFullCompactor(n int)
- func SetMaxRowsPerSegment(maxRowsPerSegmentLimit int)
- func SetMaxSegmentLimit(limit int)
- func SetSnapshotLimit(bytesPerSec int64, burstLimit int64)
- func SnapshotLimit() bool
- func SumFilesSize(files []TSSPFile) int64
- func UnrefAll(files ...*TSSPFiles)
- func UnrefFiles(files ...TSSPFile)
- func UnrefFilesReader(files ...TSSPFile)
- func UnrefTSSPFiles(files *TSSPFiles)
- func ZSTDCompressBound(srcSize int) int
- func ZigZagDecode(v uint64) int64
- func ZigZagEncode(v int64) uint64
- type Boolean
- type BooleanPreAgg
- type BufferReader
- type BytesBuffer
- func (w *BytesBuffer) Bytes() []byte
- func (w *BytesBuffer) Len() int
- func (w *BytesBuffer) Read(p []byte) (n int, err error)
- func (w *BytesBuffer) ReadByte() (b byte, err error)
- func (w *BytesBuffer) Reset(b []byte)
- func (w *BytesBuffer) Write(p []byte) (n int, err error)
- func (w *BytesBuffer) WriteByte(b byte) error
- type ChunkDataBuilder
- type ChunkIterator
- type ChunkIterators
- func (c *ChunkIterators) Close()
- func (c *ChunkIterators) Len() int
- func (c *ChunkIterators) Less(i, j int) bool
- func (c *ChunkIterators) Next() (uint64, *record.Record, error)
- func (c *ChunkIterators) Pop() interface{}
- func (c *ChunkIterators) Push(v interface{})
- func (c *ChunkIterators) Swap(i, j int)
- func (c *ChunkIterators) WithLog(log *Log.Logger)
- type ChunkMeta
- func (m *ChunkMeta) Clone() *ChunkMeta
- func (m *ChunkMeta) GetColMeta() []ColumnMeta
- func (m *ChunkMeta) GetSid() (sid uint64)
- func (m *ChunkMeta) Len() int
- func (m *ChunkMeta) Less(i, j int) bool
- func (m *ChunkMeta) MinMaxTime() (min int64, max int64)
- func (m *ChunkMeta) Rows(ab PreAggBuilder) int
- func (m *ChunkMeta) SegmentCount() int
- func (m *ChunkMeta) Size() int
- func (m *ChunkMeta) Swap(i, j int)
- func (m *ChunkMeta) TimeMeta() *ColumnMeta
- type CoderContext
- type ColumnBuilder
- type ColumnIterator
- func (itr *ColumnIterator) Close()
- func (itr *ColumnIterator) Error() error
- func (itr *ColumnIterator) IncrChunkUsed()
- func (itr *ColumnIterator) NextChunkMeta() bool
- func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)
- func (itr *ColumnIterator) PutCol(col *record.ColVal)
- func (itr *ColumnIterator) Run(p ColumnIteratorPerformer) error
- type ColumnIteratorPerformer
- type ColumnMeta
- type ColumnReader
- type CompactGroup
- type CompactedFileInfo
- type Config
- type DataCoder
- type DiskFileReader
- type FileIterator
- type FileIterators
- type FileWriter
- type FilesInfo
- type FilterOptions
- type Float
- type FloatPreAgg
- type IdTimePairs
- func (p *IdTimePairs) Add(id uint64, tm int64)
- func (p *IdTimePairs) AddRowCounts(rowCounts int64)
- func (p *IdTimePairs) Len() int
- func (p *IdTimePairs) Marshal(isOrder bool, dst []byte, ctx *CoderContext) []byte
- func (p *IdTimePairs) Reset(name string)
- func (p *IdTimePairs) Unmarshal(isOrder bool, src []byte) ([]byte, error)
- type InMerge
- type Integer
- type IntegerPreAgg
- type LimitWriter
- type Limiter
- type Location
- type LocationCursor
- func (l *LocationCursor) AddLocation(loc *Location)
- func (l *LocationCursor) AddRef()
- func (l *LocationCursor) Len() int
- func (l *LocationCursor) Less(i, j int) bool
- func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
- func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
- func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
- func (l *LocationCursor) Reverse()
- func (l *LocationCursor) Swap(i, j int)
- func (l *LocationCursor) Unref()
- type MemBlock
- func (mb *MemBlock) AppendDataBlock(srcData []byte)
- func (mb *MemBlock) CopyBlocks(src MemoryReader)
- func (mb *MemBlock) DataBlocks() [][]byte
- func (mb *MemBlock) DataInMemory() bool
- func (mb *MemBlock) FreeMemory() int64
- func (mb *MemBlock) LoadIntoMemory(dr DiskFileReader, tr *Trailer, metaIndexItems []MetaIndex) error
- func (mb *MemBlock) MetaBlocks() [][]byte
- func (mb *MemBlock) MetaInMemory() bool
- func (mb *MemBlock) ReadChunkMetaBlock(metaIdx int, sid uint64, count uint32) []byte
- func (mb *MemBlock) ReadDataBlock(offset int64, size uint32, dstPtr *[]byte) ([]byte, error)
- func (mb *MemBlock) ReserveDataBlock(n int)
- func (mb *MemBlock) ReserveMetaBlock(n int)
- func (mb *MemBlock) Reset()
- func (mb *MemBlock) SetMetaBlocks(blocks [][]byte)
- func (mb *MemBlock) Size() int64
- type MemoryReader
- type MetaIndex
- type MmsIdTime
- type MmsReaders
- type MmsTables
- func (m *MmsTables) AddRowCountsBySid(measurement string, sid uint64, rowCounts int64) error
- func (m *MmsTables) AddTSSPFiles(name string, isOrder bool, files ...TSSPFile)
- func (m *MmsTables) AddTable(mb *MsBuilder, isOrder bool, tmp bool)
- func (m *MmsTables) Close() error
- func (m *MmsTables) CompactDone(files []string)
- func (m *MmsTables) CompactionDisable()
- func (m *MmsTables) CompactionEnable()
- func (m *MmsTables) CompactionEnabled() bool
- func (m *MmsTables) DisableCompAndMerge()
- func (m *MmsTables) DropMeasurement(_ context.Context, name string) error
- func (m *MmsTables) EnableCompAndMerge()
- func (m *MmsTables) File(mstName string, fileName string, isOrder bool) TSSPFile
- func (m *MmsTables) FreeAllMemReader()
- func (m *MmsTables) FreeSequencer() bool
- func (m *MmsTables) FullCompact(shid uint64) error
- func (m *MmsTables) GetBothFilesRef(measurement string, hasTimeFilter bool, tr record.TimeRange) ([]TSSPFile, []TSSPFile)
- func (m *MmsTables) GetFileSeq() uint64
- func (m *MmsTables) GetLastFlushTimeBySid(measurement string, sid uint64) (int64, error)
- func (m *MmsTables) GetMstFileStat() *statistics.FileStat
- func (m *MmsTables) GetOutOfOrderFileNum() int
- func (m *MmsTables) GetRowCountsBySid(measurement string, sid uint64) (int64, error)
- func (m *MmsTables) GetTSSPFiles(name string, isOrder bool) (files *TSSPFiles, ok bool)
- func (m *MmsTables) IsOutOfOrderFilesExist() bool
- func (m *MmsTables) LevelCompact(level uint16, shid uint64) error
- func (m *MmsTables) LevelPlan(level uint16) []*CompactGroup
- func (m *MmsTables) Listen(signal chan struct{}, onClose func())
- func (m *MmsTables) MergeDisable()
- func (m *MmsTables) MergeEnable()
- func (m *MmsTables) MergeEnabled() bool
- func (m *MmsTables) MergeOutOfOrder(shId uint64, force bool) error
- func (m *MmsTables) NewChunkIterators(group FilesInfo) (*ChunkIterators, error)
- func (m *MmsTables) NewFileIterators(group *CompactGroup) (FilesInfo, error)
- func (m *MmsTables) NewStreamIterators(group FilesInfo) (*StreamIterators, error)
- func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile
- func (m *MmsTables) NextSequence() uint64
- func (m *MmsTables) Open() (int64, error)
- func (m *MmsTables) ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, ...) (err error)
- func (m *MmsTables) ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) (err error)
- func (m *MmsTables) Sequencer() *Sequencer
- func (m *MmsTables) SetAddFunc(addFunc func(int64))
- func (m *MmsTables) SetOpId(shardId uint64, opId uint64)
- func (m *MmsTables) Tier() uint64
- func (m *MmsTables) UnRefSequencer()
- func (m *MmsTables) Wait()
- type MsBuilder
- func (b *MsBuilder) FileVersion() uint64
- func (b *MsBuilder) Flush() error
- func (b *MsBuilder) MaxRowsPerSegment() int
- func (b *MsBuilder) Name() string
- func (b *MsBuilder) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (b *MsBuilder) Reset()
- func (b *MsBuilder) Size() int64
- func (b *MsBuilder) WithLog(log *Log.Logger)
- func (b *MsBuilder) WriteData(id uint64, data *record.Record) error
- func (b *MsBuilder) WriteRecord(id uint64, data *record.Record, ...) (*MsBuilder, error)
- type NameReadWriterCloser
- type Offset
- type PreAggBuilder
- type PreAggBuilders
- type ReadContext
- func (d *ReadContext) GetOps() []*comm.CallOption
- func (d *ReadContext) InitPreAggBuilder()
- func (d *ReadContext) MatchPreAgg() bool
- func (d *ReadContext) Release()
- func (d *ReadContext) Reset()
- func (d *ReadContext) Set(ascending bool, tr record.TimeRange, onlyFirstOrLast bool, ...)
- func (d *ReadContext) SetOps(c []*comm.CallOption)
- func (d *ReadContext) SetTr(tr record.TimeRange)
- type Segment
- type SegmentRange
- type SegmentReader
- type Sequencer
- type StreamIterator
- type StreamIterators
- func (c *StreamIterators) Close()
- func (c *StreamIterators) FileVersion() uint64
- func (c *StreamIterators) Flush() error
- func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
- func (c *StreamIterators) Len() int
- func (c *StreamIterators) Less(i, j int) bool
- func (c *StreamIterators) NewFile(addFileExt bool) error
- func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (c *StreamIterators) Pop() interface{}
- func (c *StreamIterators) Push(v interface{})
- func (c *StreamIterators) Size() int64
- func (c *StreamIterators) Swap(i, j int)
- func (c *StreamIterators) WithLog(log *Log.Logger)
- type StreamIteratorsPool
- type StreamWriteFile
- func (c *StreamWriteFile) AppendColumn(ref record.Field) error
- func (c *StreamWriteFile) ChangeColumn(ref record.Field) error
- func (c *StreamWriteFile) ChangeSid(sid uint64)
- func (c *StreamWriteFile) Close(isError bool)
- func (c *StreamWriteFile) Flush() error
- func (c *StreamWriteFile) GetTSSPFile() TSSPFile
- func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
- func (c *StreamWriteFile) InitFile(seq uint64) error
- func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error
- func (c *StreamWriteFile) NewFile(addFileExt bool) error
- func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (c *StreamWriteFile) Size() int64
- func (c *StreamWriteFile) WriteCurrentMeta() error
- func (c *StreamWriteFile) WriteData(id uint64, ref record.Field, col record.ColVal, timeCol *record.ColVal) error
- func (c *StreamWriteFile) WriteFile() error
- func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error
- type String
- type StringPreAgg
- type TSSPFile
- type TSSPFileName
- func (n *TSSPFileName) Equal(other *TSSPFileName) bool
- func (n *TSSPFileName) ParseFileName(name string) error
- func (n *TSSPFileName) Path(dir string, tmp bool) string
- func (n *TSSPFileName) SetExtend(extend uint16)
- func (n *TSSPFileName) SetLevel(l uint16)
- func (n *TSSPFileName) SetMerge(merge uint16)
- func (n *TSSPFileName) SetOrder(v bool)
- func (n *TSSPFileName) SetSeq(seq uint64)
- func (n *TSSPFileName) String() string
- func (n *TSSPFileName) TmpPath(dir string) string
- type TSSPFileReader
- func (r *TSSPFileReader) AverageChunkRows() int
- func (r *TSSPFileReader) BlockHeader(meta *ChunkMeta, dst []record.Field) ([]record.Field, error)
- func (r *TSSPFileReader) ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, dst *ChunkMeta, ...) (*ChunkMeta, error)
- func (r *TSSPFileReader) ChunkMetaAt(index int) (*ChunkMeta, error)
- func (r *TSSPFileReader) Close() error
- func (r *TSSPFileReader) Contains(id uint64, tm record.TimeRange) bool
- func (r *TSSPFileReader) ContainsId(id uint64) bool
- func (r *TSSPFileReader) ContainsTime(tm record.TimeRange) bool
- func (r *TSSPFileReader) CreateTime() int64
- func (r *TSSPFileReader) FileName() string
- func (r *TSSPFileReader) FileSize() int64
- func (r *TSSPFileReader) FreeFileHandle() error
- func (r *TSSPFileReader) FreeMemory() int64
- func (r *TSSPFileReader) GetTSSPFileBytes(offset int64, size uint32, buf *[]byte) ([]byte, error)
- func (r *TSSPFileReader) InMemSize() int64
- func (r *TSSPFileReader) LoadComponents() error
- func (r *TSSPFileReader) LoadIntoMemory() error
- func (r *TSSPFileReader) MaxChunkRows() int
- func (r *TSSPFileReader) MetaIndex(id uint64, tr record.TimeRange) (int, *MetaIndex, error)
- func (r *TSSPFileReader) MetaIndexAt(idx int) (*MetaIndex, error)
- func (r *TSSPFileReader) MinMaxSeriesID() (min, max uint64, err error)
- func (r *TSSPFileReader) MinMaxTime() (min, max int64, err error)
- func (r *TSSPFileReader) Name() string
- func (r *TSSPFileReader) Open() error
- func (r *TSSPFileReader) Read(offset int64, size uint32, dst *[]byte) ([]byte, error)
- func (r *TSSPFileReader) ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta) ([]ChunkMeta, error)
- func (r *TSSPFileReader) ReadData(cm *ChunkMeta, segment int, dst *record.Record, decs *ReadContext) (*record.Record, error)
- func (r *TSSPFileReader) ReadDataBlock(offset int64, size uint32, dst *[]byte) (rb []byte, err error)
- func (r *TSSPFileReader) ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, dst *[]byte) (rb []byte, err error)
- func (r *TSSPFileReader) Ref()
- func (r *TSSPFileReader) Rename(newName string) error
- func (r *TSSPFileReader) Stat() *Trailer
- func (r *TSSPFileReader) Unref()
- func (r *TSSPFileReader) Version() uint64
- type TSSPFiles
- type TableData
- type TableReader
- type TableReaders
- type TableStat
- type TableStoreGC
- type TablesGC
- type TablesStore
- type Time
- type TimePreAgg
- type Tombstone
- type TombstoneFile
- type Trailer
- type UnorderedColumnReader
- type UnorderedReader
- func (r *UnorderedReader) AddFiles(files []TSSPFile)
- func (r *UnorderedReader) Close()
- func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64) error
- func (r *UnorderedReader) Read(sid uint64, ref *record.Field, maxTime int64) (*record.ColVal, []int64, error)
- func (r *UnorderedReader) ReadAllTimes() []int64
- func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error
- func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas
- func (r *UnorderedReader) ReadTimes(ref *record.Field, maxTime int64) []int64
Constants ¶
const ( DefaultMaxRowsPerSegment = 1000 DefaultMaxChunkMetaItemSize = 256 * 1024 DefaultMaxChunkMetaItemCount = 512 NonStreamingCompact = 2 StreamingCompact = 1 AutoCompact = 0 )
const ( // BlockFloat64 designates a block encodes float64 values. BlockFloat64 = byte(influx.Field_Type_Float) // BlockInteger designates a block encodes int64 values. BlockInteger = byte(influx.Field_Type_Int) // BlockBoolean designates a block encodes boolean values. BlockBoolean = byte(influx.Field_Type_Boolean) // BlockString designates a block encodes string values. BlockString = byte(influx.Field_Type_String) )
const ( PRELOAD = iota LOAD )
const ( MergeFirstAvgSize = 10 * 1024 * 1024 MergeFirstDstSize = 10 * 1024 * 1024 MergeFirstRatio = 0.5 )
const ( BLOOMFILTER_SIZE = 8 SERIESKEY_STATISTIC_SIZE = 24 COMPRESSION_RATIO = 2 )
const ( DownSampleLogDir = "downsample_log" TsspDirName = "tssp" )
const ( MB = 1024 * 1024 GB = 1024 * MB )
const (
CompactLevels = 7
)
const (
StringCompressedZstd = 2
)
Variables ¶
var ( ErrCompStopped = errors.New("compact stopped") ErrDownSampleStopped = errors.New("downSample stopped") ErrDroppingMst = errors.New("measurement is dropped") LevelCompactRule = []uint16{0, 1, 0, 2, 0, 3, 0, 1, 2, 3, 0, 4, 0, 5, 0, 1, 2, 6} LeveLMinGroupFiles = [CompactLevels]int{8, 4, 4, 4, 4, 4, 2} EnableMergeOutOfOrder = true MaxNumOfFileToMerge = 256 MaxSizeOfFileToMerge int64 = 512 * 1024 * 1024 // 512MB )
var ( MinMaxTimeLen = int(unsafe.Sizeof(SegmentRange{})) SegmentLen = (Segment{}).bytes() ColumnMetaLenMin = (ColumnMeta{}).bytes(1) ChunkMetaLen = int(unsafe.Sizeof(ChunkMeta{})-24*2) + MinMaxTimeLen ChunkMetaMinLen = ChunkMetaLen + ColumnMetaLenMin*2 MetaIndexLen = int(unsafe.Sizeof(MetaIndex{})) )
var (
CLog = Log.NewLogger(errno.ModuleCompact)
)
var ErrDirtyLog = errors.New("incomplete log file")
Functions ¶
func AggregateData ¶
func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool
func CacheDataInMemory ¶
func CacheDataInMemory() bool
func CacheMetaInMemory ¶
func CacheMetaInMemory() bool
func CompactRecovery ¶
func CompactRecovery(path string, group *CompactGroup)
func DecodeBooleanBlock ¶
func DecodeBooleanBlock(in []byte, out *[]byte, ctx *CoderContext) ([]bool, error)
func DecodeFloatBlock ¶
func DecodeFloatBlock(in []byte, out *[]byte, ctx *CoderContext) ([]float64, error)
func DecodeIntegerBlock ¶
func DecodeIntegerBlock(in []byte, out *[]byte, ctx *CoderContext) ([]int64, error)
func DecodeStringBlock ¶
func DecodeTimestampBlock ¶
func DecodeTimestampBlock(in []byte, out *[]byte, ctx *CoderContext) ([]int64, error)
func DecodeUnsignedBlock ¶
func DecodeUnsignedBlock(in []byte, out *[]byte, ctx *CoderContext) ([]uint64, error)
func EnableMmapRead ¶
func EnableMmapRead(en bool)
func EnableReadCache ¶
func EnableReadCache(readCacheLimit uint64)
func EncodeBooleanBlock ¶
func EncodeBooleanBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
func EncodeFloatBlock ¶
func EncodeFloatBlock(in []byte, out []byte, ctx *CoderContext) ([]byte, error)
func EncodeIntegerBlock ¶
func EncodeIntegerBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
func EncodeStringBlock ¶
func EncodeTimestampBlock ¶
func EncodeTimestampBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
func EncodeUnsignedBlock ¶
func EncodeUnsignedBlock(in, out []byte, ctx *CoderContext) ([]byte, error)
func EstimateBufferSize ¶
func FileOperation ¶ added in v1.0.0
func FileOperation(f TSSPFile, op func())
func FilterByField ¶
func FilterByOpts ¶ added in v1.0.1
func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record
func FilterByTimeDescend ¶
func GenLogFileName ¶ added in v1.0.0
func GetTmpTsspFileSuffix ¶ added in v1.0.0
func GetTmpTsspFileSuffix() string
func InitDecFunctions ¶
func InitDecFunctions()
func IsInterfaceNil ¶
func IsInterfaceNil(value interface{}) bool
func IsTempleFile ¶
func MaxRowsPerSegment ¶
func MaxRowsPerSegment() int
func MergeRecovery ¶
func NewDiskFileReader ¶
func NewDiskWriter ¶
func NewDiskWriter(lw NameReadWriterCloser, bufferSize int, lockPath *string) *diskWriter
func NewIndexWriter ¶
func NewLastMergeTime ¶ added in v1.0.0
func NewLastMergeTime() *lastMergeTime
func NewMemReaderEvictCtx ¶
func NewMemReaderEvictCtx() *memReaderEvictCtx
func NewMergeContext ¶ added in v1.0.0
func NewMergeContext(mst string) *mergeContext
func NewMergePerformer ¶ added in v1.0.0
func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer
func NonStreamingCompaction ¶
func PutDataCoder ¶
func PutDataCoder(coder DataCoder)
func PutIDTimePairs ¶
func PutIDTimePairs(pair *IdTimePairs)
func ReleaseColumnBuilder ¶
func ReleaseColumnBuilder(b PreAggBuilder)
func ReleaseMsBuilder ¶ added in v1.0.0
func ReleaseMsBuilder(msb *MsBuilder)
func RenameTmpFiles ¶
func ResetAggregateData ¶
func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)
func SegMergeFlag ¶
func SegMergeFlag(v int32)
func SetCacheDataBlock ¶
func SetCacheDataBlock(en bool)
func SetCacheMetaData ¶
func SetCacheMetaData(en bool)
func SetCompactLimit ¶
func SetImmTableMaxMemoryPercentage ¶
func SetImmTableMaxMemoryPercentage(sysTotalMem, percentage int)
func SetMaxCompactor ¶
func SetMaxCompactor(n int)
func SetMaxFullCompactor ¶
func SetMaxFullCompactor(n int)
func SetMaxRowsPerSegment ¶
func SetMaxRowsPerSegment(maxRowsPerSegmentLimit int)
func SetMaxSegmentLimit ¶
func SetMaxSegmentLimit(limit int)
func SetSnapshotLimit ¶
func SnapshotLimit ¶
func SnapshotLimit() bool
func SumFilesSize ¶ added in v1.0.0
func UnrefFiles ¶
func UnrefFiles(files ...TSSPFile)
func UnrefFilesReader ¶ added in v1.0.0
func UnrefFilesReader(files ...TSSPFile)
func UnrefTSSPFiles ¶ added in v1.0.0
func UnrefTSSPFiles(files *TSSPFiles)
func ZSTDCompressBound ¶
func ZigZagDecode ¶
func ZigZagEncode ¶
ZigZagEncode ZigZag encoding maps signed integers to unsigned integers from: https://developers.google.com/protocol-buffers/docs/encoding
Types ¶
type Boolean ¶
type Boolean struct {
// contains filtered or unexported fields
}
func GetBoolCoder ¶
func GetBoolCoder() *Boolean
func (*Boolean) SetEncodingType ¶
type BooleanPreAgg ¶
type BooleanPreAgg struct {
// contains filtered or unexported fields
}
func NewBooleanPreAgg ¶
func NewBooleanPreAgg() *BooleanPreAgg
type BufferReader ¶ added in v1.0.0
type BufferReader struct {
// contains filtered or unexported fields
}
func NewBufferReader ¶ added in v1.0.0
func NewBufferReader(maxSize uint32) *BufferReader
func (*BufferReader) Read ¶ added in v1.0.0
func (br *BufferReader) Read(offset int64, size uint32) ([]byte, error)
func (*BufferReader) Reset ¶ added in v1.0.0
func (br *BufferReader) Reset(r TSSPFile)
type BytesBuffer ¶
type BytesBuffer struct {
// contains filtered or unexported fields
}
func NewBytesBuffer ¶
func NewBytesBuffer(buf []byte) *BytesBuffer
func (*BytesBuffer) Bytes ¶
func (w *BytesBuffer) Bytes() []byte
func (*BytesBuffer) Len ¶
func (w *BytesBuffer) Len() int
func (*BytesBuffer) ReadByte ¶
func (w *BytesBuffer) ReadByte() (b byte, err error)
func (*BytesBuffer) Reset ¶
func (w *BytesBuffer) Reset(b []byte)
func (*BytesBuffer) WriteByte ¶
func (w *BytesBuffer) WriteByte(b byte) error
type ChunkDataBuilder ¶
type ChunkDataBuilder struct {
// contains filtered or unexported fields
}
func NewChunkDataBuilder ¶
func NewChunkDataBuilder(maxRowsPerSegment, maxSegmentLimit int) *ChunkDataBuilder
func (*ChunkDataBuilder) EncodeChunk ¶
func (*ChunkDataBuilder) EncodeTime ¶
func (b *ChunkDataBuilder) EncodeTime(offset int64) error
type ChunkIterator ¶
type ChunkIterator struct {
*FileIterator
// contains filtered or unexported fields
}
func NewChunkIterator ¶
func NewChunkIterator(r *FileIterator) *ChunkIterator
func (*ChunkIterator) Close ¶
func (c *ChunkIterator) Close()
func (*ChunkIterator) GetRecord ¶ added in v1.0.0
func (c *ChunkIterator) GetRecord() *record.Record
func (*ChunkIterator) GetSeriesID ¶ added in v1.0.0
func (c *ChunkIterator) GetSeriesID() uint64
func (*ChunkIterator) Next ¶
func (c *ChunkIterator) Next() bool
func (*ChunkIterator) WithLog ¶
func (c *ChunkIterator) WithLog(log *Log.Logger)
type ChunkIterators ¶
type ChunkIterators struct {
// contains filtered or unexported fields
}
func (*ChunkIterators) Close ¶
func (c *ChunkIterators) Close()
func (*ChunkIterators) Len ¶
func (c *ChunkIterators) Len() int
func (*ChunkIterators) Less ¶
func (c *ChunkIterators) Less(i, j int) bool
func (*ChunkIterators) Pop ¶
func (c *ChunkIterators) Pop() interface{}
func (*ChunkIterators) Push ¶
func (c *ChunkIterators) Push(v interface{})
func (*ChunkIterators) Swap ¶
func (c *ChunkIterators) Swap(i, j int)
func (*ChunkIterators) WithLog ¶
func (c *ChunkIterators) WithLog(log *Log.Logger)
type ChunkMeta ¶
type ChunkMeta struct {
// contains filtered or unexported fields
}
func (*ChunkMeta) GetColMeta ¶ added in v1.0.0
func (m *ChunkMeta) GetColMeta() []ColumnMeta
func (*ChunkMeta) MinMaxTime ¶
func (*ChunkMeta) Rows ¶
func (m *ChunkMeta) Rows(ab PreAggBuilder) int
func (*ChunkMeta) SegmentCount ¶ added in v1.0.0
func (*ChunkMeta) TimeMeta ¶
func (m *ChunkMeta) TimeMeta() *ColumnMeta
type CoderContext ¶
type CoderContext struct {
// contains filtered or unexported fields
}
func NewCoderContext ¶
func NewCoderContext() *CoderContext
func (*CoderContext) Release ¶
func (ctx *CoderContext) Release()
type ColumnBuilder ¶
type ColumnBuilder struct {
// contains filtered or unexported fields
}
func NewColumnBuilder ¶
func NewColumnBuilder() *ColumnBuilder
func (*ColumnBuilder) BuildPreAgg ¶ added in v1.0.0
func (b *ColumnBuilder) BuildPreAgg()
type ColumnIterator ¶ added in v1.0.0
type ColumnIterator struct {
// contains filtered or unexported fields
}
func NewColumnIterator ¶ added in v1.0.0
func NewColumnIterator(fi *FileIterator) *ColumnIterator
func (*ColumnIterator) Close ¶ added in v1.0.0
func (itr *ColumnIterator) Close()
func (*ColumnIterator) Error ¶ added in v1.0.0
func (itr *ColumnIterator) Error() error
func (*ColumnIterator) IncrChunkUsed ¶ added in v1.0.0
func (itr *ColumnIterator) IncrChunkUsed()
func (*ColumnIterator) NextChunkMeta ¶ added in v1.0.0
func (itr *ColumnIterator) NextChunkMeta() bool
func (*ColumnIterator) NextColumn ¶ added in v1.0.0
func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)
func (*ColumnIterator) PutCol ¶ added in v1.0.0
func (itr *ColumnIterator) PutCol(col *record.ColVal)
func (*ColumnIterator) Run ¶ added in v1.0.0
func (itr *ColumnIterator) Run(p ColumnIteratorPerformer) error
type ColumnIteratorPerformer ¶ added in v1.0.0
type ColumnMeta ¶
type ColumnMeta struct {
// contains filtered or unexported fields
}
func (*ColumnMeta) Clone ¶ added in v1.0.0
func (m *ColumnMeta) Clone() ColumnMeta
func (*ColumnMeta) RowCount ¶
func (m *ColumnMeta) RowCount(ref *record.Field, decs *ReadContext) (int64, error)
type ColumnReader ¶
type CompactGroup ¶
type CompactGroup struct {
// contains filtered or unexported fields
}
func NewCompactGroup ¶
func NewCompactGroup(name string, toLevle uint16, count int) *CompactGroup
type CompactedFileInfo ¶
type Config ¶
type Config struct {
// contains filtered or unexported fields
}
func (*Config) MaxRowsPerSegment ¶
func (*Config) SetFilesLimit ¶
func (*Config) SetMaxRowsPerSegment ¶
func (*Config) SetMaxSegmentLimit ¶
type DiskFileReader ¶
type FileIterator ¶
type FileIterator struct {
// contains filtered or unexported fields
}
func NewFileIterator ¶
func NewFileIterator(r TSSPFile, log *Log.Logger) *FileIterator
func (*FileIterator) Close ¶
func (itr *FileIterator) Close()
func (*FileIterator) GetCurtChunkMeta ¶ added in v1.0.0
func (itr *FileIterator) GetCurtChunkMeta() *ChunkMeta
func (*FileIterator) NextChunkMeta ¶
func (itr *FileIterator) NextChunkMeta() bool
func (*FileIterator) WithLog ¶
func (itr *FileIterator) WithLog(log *Log.Logger)
type FileIterators ¶
type FileIterators []*FileIterator
func (FileIterators) AverageRows ¶
func (i FileIterators) AverageRows() int
func (FileIterators) Close ¶
func (i FileIterators) Close()
func (FileIterators) MaxChunkRows ¶
func (i FileIterators) MaxChunkRows() int
func (FileIterators) MaxColumns ¶
func (i FileIterators) MaxColumns() int
type FileWriter ¶
type FilterOptions ¶
type FilterOptions struct {
// contains filtered or unexported fields
}
func NewFilterOpts ¶
type Float ¶
type Float struct {
// contains filtered or unexported fields
}
func GetFloatCoder ¶
func GetFloatCoder() *Float
func (*Float) SetEncodingType ¶
type FloatPreAgg ¶
type FloatPreAgg struct {
// contains filtered or unexported fields
}
FloatPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewFloatPreAgg ¶
func NewFloatPreAgg() *FloatPreAgg
type IdTimePairs ¶
func GetIDTimePairs ¶
func GetIDTimePairs(name string) *IdTimePairs
func (*IdTimePairs) Add ¶
func (p *IdTimePairs) Add(id uint64, tm int64)
func (*IdTimePairs) AddRowCounts ¶
func (p *IdTimePairs) AddRowCounts(rowCounts int64)
func (*IdTimePairs) Len ¶
func (p *IdTimePairs) Len() int
func (*IdTimePairs) Marshal ¶
func (p *IdTimePairs) Marshal(isOrder bool, dst []byte, ctx *CoderContext) []byte
func (*IdTimePairs) Reset ¶
func (p *IdTimePairs) Reset(name string)
type InMerge ¶
type InMerge struct {
// contains filtered or unexported fields
}
func NewInMerge ¶
func NewInMerge() *InMerge
type Integer ¶
type Integer struct {
// contains filtered or unexported fields
}
func GetInterCoder ¶
func GetInterCoder() *Integer
func (*Integer) SetEncodingType ¶
type IntegerPreAgg ¶
type IntegerPreAgg struct {
// contains filtered or unexported fields
}
IntegerPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewIntegerPreAgg ¶
func NewIntegerPreAgg() *IntegerPreAgg
type LimitWriter ¶
type LimitWriter struct {
// contains filtered or unexported fields
}
func (*LimitWriter) Close ¶
func (w *LimitWriter) Close() error
func (*LimitWriter) Name ¶
func (w *LimitWriter) Name() string
type Limiter ¶
type Limiter interface {
SetBurst(newBurst int)
SetLimit(newLimit rate.Limit)
WaitN(ctx context.Context, n int) (err error)
Limit() rate.Limit
Burst() int
}
func NewLimiter ¶
type Location ¶
type Location struct {
// contains filtered or unexported fields
}
func NewLocation ¶
func NewLocation(r TSSPFile, ctx *ReadContext) *Location
func (*Location) GetChunkMeta ¶
type LocationCursor ¶
type LocationCursor struct {
// contains filtered or unexported fields
}
func NewLocationCursor ¶
func NewLocationCursor(n int) *LocationCursor
func (*LocationCursor) AddLocation ¶
func (l *LocationCursor) AddLocation(loc *Location)
func (*LocationCursor) AddRef ¶
func (l *LocationCursor) AddRef()
func (*LocationCursor) Len ¶
func (l *LocationCursor) Len() int
func (*LocationCursor) Less ¶
func (l *LocationCursor) Less(i, j int) bool
func (*LocationCursor) ReadData ¶
func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
func (*LocationCursor) ReadMeta ¶
func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
func (*LocationCursor) ReadOutOfOrderMeta ¶
func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
func (*LocationCursor) Reverse ¶
func (l *LocationCursor) Reverse()
func (*LocationCursor) Swap ¶
func (l *LocationCursor) Swap(i, j int)
func (*LocationCursor) Unref ¶
func (l *LocationCursor) Unref()
type MemBlock ¶
type MemBlock struct {
// contains filtered or unexported fields
}
func (*MemBlock) AppendDataBlock ¶
func (*MemBlock) CopyBlocks ¶
func (mb *MemBlock) CopyBlocks(src MemoryReader)
func (*MemBlock) DataBlocks ¶
func (*MemBlock) DataInMemory ¶
func (*MemBlock) FreeMemory ¶
func (*MemBlock) LoadIntoMemory ¶
func (mb *MemBlock) LoadIntoMemory(dr DiskFileReader, tr *Trailer, metaIndexItems []MetaIndex) error
func (*MemBlock) MetaBlocks ¶
func (*MemBlock) MetaInMemory ¶
func (*MemBlock) ReadChunkMetaBlock ¶
func (*MemBlock) ReadDataBlock ¶
func (*MemBlock) ReserveDataBlock ¶
func (*MemBlock) ReserveMetaBlock ¶
func (*MemBlock) SetMetaBlocks ¶
type MemoryReader ¶
type MemoryReader interface {
AppendDataBlock(srcData []byte)
ReadChunkMetaBlock(metaIdx int, sid uint64, count uint32) []byte
ReadDataBlock(offset int64, size uint32, dstPtr *[]byte) ([]byte, error)
CopyBlocks(src MemoryReader)
LoadIntoMemory(dr DiskFileReader, tr *Trailer, metaIndexItems []MetaIndex) error
FreeMemory() int64
DataInMemory() bool
MetaInMemory() bool
ReserveMetaBlock(n int)
ReserveDataBlock(n int)
DataBlocks() [][]byte
MetaBlocks() [][]byte
SetMetaBlocks(blocks [][]byte)
Size() int64
Reset()
}
func NewMemReader ¶
func NewMemReader() MemoryReader
func NewMemoryReader ¶
func NewMemoryReader(blkSize int) MemoryReader
type MetaIndex ¶
type MetaIndex struct {
// contains filtered or unexported fields
}
MetaIndex If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
type MmsIdTime ¶
type MmsIdTime struct {
// contains filtered or unexported fields
}
func NewMmsIdTime ¶
func NewMmsIdTime() *MmsIdTime
type MmsReaders ¶
type MmsReaders struct {
Orders TableReaders
OutOfOrders TableReaders
}
type MmsTables ¶
type MmsTables struct {
Order map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles}
OutOfOrder map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles}
Conf *Config
// contains filtered or unexported fields
}
func NewTableStore ¶
func (*MmsTables) AddRowCountsBySid ¶
func (*MmsTables) AddTSSPFiles ¶
func (*MmsTables) CompactDone ¶
func (*MmsTables) CompactionDisable ¶
func (m *MmsTables) CompactionDisable()
func (*MmsTables) CompactionEnable ¶
func (m *MmsTables) CompactionEnable()
func (*MmsTables) CompactionEnabled ¶
func (*MmsTables) DisableCompAndMerge ¶ added in v1.0.0
func (m *MmsTables) DisableCompAndMerge()
func (*MmsTables) DropMeasurement ¶
func (*MmsTables) EnableCompAndMerge ¶ added in v1.0.0
func (m *MmsTables) EnableCompAndMerge()
func (*MmsTables) FreeAllMemReader ¶
func (m *MmsTables) FreeAllMemReader()
func (*MmsTables) FreeSequencer ¶ added in v1.0.0
func (*MmsTables) FullCompact ¶
func (*MmsTables) GetBothFilesRef ¶ added in v1.0.0
func (*MmsTables) GetFileSeq ¶ added in v1.0.0
func (*MmsTables) GetLastFlushTimeBySid ¶
func (*MmsTables) GetMstFileStat ¶
func (m *MmsTables) GetMstFileStat() *statistics.FileStat
func (*MmsTables) GetOutOfOrderFileNum ¶
func (*MmsTables) GetRowCountsBySid ¶
func (*MmsTables) GetTSSPFiles ¶ added in v1.0.0
func (*MmsTables) IsOutOfOrderFilesExist ¶ added in v1.0.0
func (*MmsTables) LevelPlan ¶
func (m *MmsTables) LevelPlan(level uint16) []*CompactGroup
func (*MmsTables) Listen ¶ added in v1.0.0
func (m *MmsTables) Listen(signal chan struct{}, onClose func())
func (*MmsTables) MergeDisable ¶
func (m *MmsTables) MergeDisable()
func (*MmsTables) MergeEnable ¶
func (m *MmsTables) MergeEnable()
func (*MmsTables) MergeEnabled ¶
func (*MmsTables) MergeOutOfOrder ¶
func (*MmsTables) NewChunkIterators ¶
func (m *MmsTables) NewChunkIterators(group FilesInfo) (*ChunkIterators, error)
func (*MmsTables) NewFileIterators ¶
func (m *MmsTables) NewFileIterators(group *CompactGroup) (FilesInfo, error)
func (*MmsTables) NewStreamIterators ¶
func (m *MmsTables) NewStreamIterators(group FilesInfo) (*StreamIterators, error)
func (*MmsTables) NewStreamWriteFile ¶ added in v1.0.0
func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile
func (*MmsTables) NextSequence ¶
func (*MmsTables) ReplaceDownSampleFiles ¶ added in v1.0.0
func (*MmsTables) ReplaceFiles ¶
func (*MmsTables) Sequencer ¶
used for update sequencer in flush, do not need reload, just update sequencer and merge idTimes in reload process in case which sequence is write, free, flush, update idTimes, and merge idTimes in next write
func (*MmsTables) SetAddFunc ¶ added in v1.0.0
func (*MmsTables) UnRefSequencer ¶ added in v1.0.0
func (m *MmsTables) UnRefSequencer()
type MsBuilder ¶
type MsBuilder struct {
Path string
TableData
Conf *Config
MaxIds int
Files []TSSPFile
FileName TSSPFileName
// contains filtered or unexported fields
}
func AllocMsBuilder ¶
func GetMsBuilder ¶
func (*MsBuilder) FileVersion ¶
func (*MsBuilder) MaxRowsPerSegment ¶
type NameReadWriterCloser ¶
type NameReadWriterCloser interface {
Name() string
io.ReadWriteCloser
}
func NewLimitWriter ¶
func NewLimitWriter(w NameReadWriterCloser, l Limiter) NameReadWriterCloser
type PreAggBuilder ¶
type PreAggBuilder interface {
// contains filtered or unexported methods
}
type PreAggBuilders ¶
type PreAggBuilders struct {
// contains filtered or unexported fields
}
func (*PreAggBuilders) Release ¶
func (b *PreAggBuilders) Release()
type ReadContext ¶
type ReadContext struct {
Ascending bool
// contains filtered or unexported fields
}
func NewReadContext ¶
func NewReadContext(ascending bool) *ReadContext
func (*ReadContext) GetOps ¶
func (d *ReadContext) GetOps() []*comm.CallOption
func (*ReadContext) InitPreAggBuilder ¶
func (d *ReadContext) InitPreAggBuilder()
func (*ReadContext) MatchPreAgg ¶
func (d *ReadContext) MatchPreAgg() bool
func (*ReadContext) Release ¶
func (d *ReadContext) Release()
func (*ReadContext) Reset ¶
func (d *ReadContext) Reset()
func (*ReadContext) Set ¶
func (d *ReadContext) Set(ascending bool, tr record.TimeRange, onlyFirstOrLast bool, ops []*comm.CallOption)
func (*ReadContext) SetOps ¶
func (d *ReadContext) SetOps(c []*comm.CallOption)
func (*ReadContext) SetTr ¶
func (d *ReadContext) SetTr(tr record.TimeRange)
type Segment ¶
type Segment struct {
// contains filtered or unexported fields
}
Segment offset/size/minT/maxT
type SegmentRange ¶
type SegmentRange [2]int64 // min/max
type SegmentReader ¶ added in v1.0.0
type SegmentReader struct {
// contains filtered or unexported fields
}
func NewSegmentReader ¶ added in v1.0.0
func NewSegmentReader(fi *FileIterator) *SegmentReader
func (*SegmentReader) ResetContext ¶ added in v1.0.0
func (sr *SegmentReader) ResetContext()
type Sequencer ¶
type Sequencer struct {
// contains filtered or unexported fields
}
func NewSequencer ¶
func NewSequencer() *Sequencer
func (*Sequencer) AddRowCounts ¶
func (*Sequencer) BatchUpdate ¶
func (s *Sequencer) BatchUpdate(p *IdTimePairs)
func (*Sequencer) BatchUpdateCheckTime ¶
func (s *Sequencer) BatchUpdateCheckTime(p *IdTimePairs)
type StreamIterator ¶
type StreamIterator struct {
*FileIterator
// contains filtered or unexported fields
}
func NewStreamStreamIterator ¶
func NewStreamStreamIterator(fi *FileIterator) *StreamIterator
type StreamIterators ¶
func (*StreamIterators) Close ¶
func (c *StreamIterators) Close()
func (*StreamIterators) FileVersion ¶
func (c *StreamIterators) FileVersion() uint64
func (*StreamIterators) Flush ¶
func (c *StreamIterators) Flush() error
func (*StreamIterators) Init ¶
func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
func (*StreamIterators) Len ¶
func (c *StreamIterators) Len() int
func (*StreamIterators) Less ¶
func (c *StreamIterators) Less(i, j int) bool
func (*StreamIterators) NewFile ¶
func (c *StreamIterators) NewFile(addFileExt bool) error
func (*StreamIterators) NewTSSPFile ¶
func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)
func (*StreamIterators) Pop ¶
func (c *StreamIterators) Pop() interface{}
func (*StreamIterators) Push ¶
func (c *StreamIterators) Push(v interface{})
func (*StreamIterators) Size ¶
func (c *StreamIterators) Size() int64
func (*StreamIterators) Swap ¶
func (c *StreamIterators) Swap(i, j int)
func (*StreamIterators) WithLog ¶
func (c *StreamIterators) WithLog(log *Log.Logger)
type StreamIteratorsPool ¶
type StreamIteratorsPool struct {
// contains filtered or unexported fields
}
func NewStreamIteratorsPool ¶
func NewStreamIteratorsPool(n int) *StreamIteratorsPool
type StreamWriteFile ¶ added in v1.0.0
func NewWriteScanFile ¶ added in v1.0.0
func (*StreamWriteFile) AppendColumn ¶ added in v1.0.0
func (c *StreamWriteFile) AppendColumn(ref record.Field) error
func (*StreamWriteFile) ChangeColumn ¶ added in v1.0.0
func (c *StreamWriteFile) ChangeColumn(ref record.Field) error
func (*StreamWriteFile) ChangeSid ¶ added in v1.0.0
func (c *StreamWriteFile) ChangeSid(sid uint64)
func (*StreamWriteFile) Close ¶ added in v1.0.0
func (c *StreamWriteFile) Close(isError bool)
func (*StreamWriteFile) Flush ¶ added in v1.0.0
func (c *StreamWriteFile) Flush() error
func (*StreamWriteFile) GetTSSPFile ¶ added in v1.0.0
func (c *StreamWriteFile) GetTSSPFile() TSSPFile
func (*StreamWriteFile) Init ¶ added in v1.0.0
func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
func (*StreamWriteFile) InitFile ¶ added in v1.0.0
func (c *StreamWriteFile) InitFile(seq uint64) error
func (*StreamWriteFile) InitMergedFile ¶ added in v1.0.0
func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error
func (*StreamWriteFile) NewFile ¶ added in v1.0.0
func (c *StreamWriteFile) NewFile(addFileExt bool) error
func (*StreamWriteFile) NewTSSPFile ¶ added in v1.0.0
func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)
func (*StreamWriteFile) Size ¶ added in v1.0.0
func (c *StreamWriteFile) Size() int64
func (*StreamWriteFile) WriteCurrentMeta ¶ added in v1.0.0
func (c *StreamWriteFile) WriteCurrentMeta() error
func (*StreamWriteFile) WriteFile ¶ added in v1.0.0
func (c *StreamWriteFile) WriteFile() error
func (*StreamWriteFile) WriteMeta ¶ added in v1.0.0
func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error
type String ¶
type String struct {
// contains filtered or unexported fields
}
func GetStringCoder ¶
func GetStringCoder() *String
func (*String) MaxEncodedLen ¶
func (*String) SetEncodingType ¶
type StringPreAgg ¶
type StringPreAgg struct {
// contains filtered or unexported fields
}
StringPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewStringPreAgg ¶
func NewStringPreAgg() *StringPreAgg
type TSSPFile ¶
type TSSPFile interface {
Path() string
Name() string
FileName() TSSPFileName
LevelAndSequence() (uint16, uint64)
FileNameMerge() uint16
FileNameExtend() uint16
IsOrder() bool
Ref()
Unref()
RefFileReader()
UnrefFileReader()
Stop()
Inuse() bool
MetaIndexAt(idx int) (*MetaIndex, error)
MetaIndex(id uint64, tr record.TimeRange) (int, *MetaIndex, error)
ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, dst *ChunkMeta, buffer *[]byte) (*ChunkMeta, error)
Read(id uint64, tr record.TimeRange, dst *record.Record) (*record.Record, error)
ReadAt(cm *ChunkMeta, segment int, dst *record.Record, decs *ReadContext) (*record.Record, error)
ChunkAt(index int) (*ChunkMeta, error)
ReadData(offset int64, size uint32, dst *[]byte) ([]byte, error)
ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta) ([]ChunkMeta, error)
CreateTime() int64
FileStat() *Trailer
// FileSize get the size of the disk occupied by file
FileSize() int64
// InMemSize get the size of the memory occupied by file
InMemSize() int64
Contains(id uint64) (bool, error)
ContainsByTime(tr record.TimeRange) (bool, error)
ContainsValue(id uint64, tr record.TimeRange) (bool, error)
MinMaxTime() (int64, int64, error)
Delete(ids []int64) error
DeleteRange(ids []int64, min, max int64) error
HasTombstones() bool
TombstoneFiles() []TombstoneFile
Open() error
Close() error
LoadIntoMemory() error
LoadComponents() error
LoadIdTimes(p *IdTimePairs) error
Rename(newName string) error
Remove() error
FreeMemory(evictLock bool) int64
FreeFileHandle() error
Version() uint64
AverageChunkRows() int
MaxChunkRows() int
MetaIndexItemNum() int64
AddToEvictList(level uint16)
RemoveFromEvictList(level uint16)
}
type TSSPFileName ¶
type TSSPFileName struct {
// contains filtered or unexported fields
}
func NewTSSPFileName ¶
func NewTSSPFileName(seq uint64, level, merge, extent uint16, order bool, lockPath *string) TSSPFileName
func (*TSSPFileName) Equal ¶
func (n *TSSPFileName) Equal(other *TSSPFileName) bool
func (*TSSPFileName) ParseFileName ¶
func (n *TSSPFileName) ParseFileName(name string) error
func (*TSSPFileName) SetExtend ¶
func (n *TSSPFileName) SetExtend(extend uint16)
func (*TSSPFileName) SetLevel ¶
func (n *TSSPFileName) SetLevel(l uint16)
func (*TSSPFileName) SetMerge ¶
func (n *TSSPFileName) SetMerge(merge uint16)
func (*TSSPFileName) SetOrder ¶
func (n *TSSPFileName) SetOrder(v bool)
func (*TSSPFileName) SetSeq ¶
func (n *TSSPFileName) SetSeq(seq uint64)
func (*TSSPFileName) String ¶
func (n *TSSPFileName) String() string
func (*TSSPFileName) TmpPath ¶
func (n *TSSPFileName) TmpPath(dir string) string
type TSSPFileReader ¶
type TSSPFileReader struct {
// contains filtered or unexported fields
}
func CreateTSSPFileReader ¶
func NewTSSPFileReader ¶
func NewTSSPFileReader(name string, lockPath *string) (*TSSPFileReader, error)
func (*TSSPFileReader) AverageChunkRows ¶
func (r *TSSPFileReader) AverageChunkRows() int
func (*TSSPFileReader) BlockHeader ¶
func (*TSSPFileReader) ChunkMetaAt ¶
func (r *TSSPFileReader) ChunkMetaAt(index int) (*ChunkMeta, error)
func (*TSSPFileReader) Close ¶
func (r *TSSPFileReader) Close() error
func (*TSSPFileReader) Contains ¶
func (r *TSSPFileReader) Contains(id uint64, tm record.TimeRange) bool
func (*TSSPFileReader) ContainsId ¶
func (r *TSSPFileReader) ContainsId(id uint64) bool
func (*TSSPFileReader) ContainsTime ¶
func (r *TSSPFileReader) ContainsTime(tm record.TimeRange) bool
func (*TSSPFileReader) CreateTime ¶
func (r *TSSPFileReader) CreateTime() int64
func (*TSSPFileReader) FileName ¶
func (r *TSSPFileReader) FileName() string
func (*TSSPFileReader) FileSize ¶
func (r *TSSPFileReader) FileSize() int64
func (*TSSPFileReader) FreeFileHandle ¶ added in v1.0.0
func (r *TSSPFileReader) FreeFileHandle() error
func (*TSSPFileReader) FreeMemory ¶
func (r *TSSPFileReader) FreeMemory() int64
func (*TSSPFileReader) GetTSSPFileBytes ¶
func (*TSSPFileReader) InMemSize ¶
func (r *TSSPFileReader) InMemSize() int64
func (*TSSPFileReader) LoadComponents ¶ added in v1.0.0
func (r *TSSPFileReader) LoadComponents() error
func (*TSSPFileReader) LoadIntoMemory ¶
func (r *TSSPFileReader) LoadIntoMemory() error
func (*TSSPFileReader) MaxChunkRows ¶
func (r *TSSPFileReader) MaxChunkRows() int
func (*TSSPFileReader) MetaIndexAt ¶
func (r *TSSPFileReader) MetaIndexAt(idx int) (*MetaIndex, error)
func (*TSSPFileReader) MinMaxSeriesID ¶
func (r *TSSPFileReader) MinMaxSeriesID() (min, max uint64, err error)
func (*TSSPFileReader) MinMaxTime ¶
func (r *TSSPFileReader) MinMaxTime() (min, max int64, err error)
func (*TSSPFileReader) Name ¶
func (r *TSSPFileReader) Name() string
func (*TSSPFileReader) Open ¶
func (r *TSSPFileReader) Open() error
func (*TSSPFileReader) ReadChunkMetaData ¶
func (*TSSPFileReader) ReadData ¶
func (r *TSSPFileReader) ReadData(cm *ChunkMeta, segment int, dst *record.Record, decs *ReadContext) (*record.Record, error)
func (*TSSPFileReader) ReadDataBlock ¶
func (*TSSPFileReader) ReadMetaBlock ¶
func (*TSSPFileReader) Ref ¶ added in v1.0.0
func (r *TSSPFileReader) Ref()
func (*TSSPFileReader) Rename ¶
func (r *TSSPFileReader) Rename(newName string) error
func (*TSSPFileReader) Stat ¶
func (r *TSSPFileReader) Stat() *Trailer
func (*TSSPFileReader) Unref ¶ added in v1.0.0
func (r *TSSPFileReader) Unref()
func (*TSSPFileReader) Version ¶
func (r *TSSPFileReader) Version() uint64
type TSSPFiles ¶
type TSSPFiles struct {
// contains filtered or unexported fields
}
func NewTSSPFiles ¶
func NewTSSPFiles() *TSSPFiles
type TableReader ¶
type TableReader interface {
Open() error
Close() error
ReadData(cm *ChunkMeta, segment int, dst *record.Record, ctx *ReadContext) (*record.Record, error)
Ref()
Unref()
MetaIndexAt(idx int) (*MetaIndex, error)
MetaIndex(id uint64, tr record.TimeRange) (int, *MetaIndex, error)
ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, dst *ChunkMeta, buffer *[]byte) (*ChunkMeta, error)
ChunkMetaAt(index int) (*ChunkMeta, error)
ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, dst *[]byte) ([]byte, error)
ReadDataBlock(offset int64, size uint32, dst *[]byte) ([]byte, error)
Read(offset int64, size uint32, dst *[]byte) ([]byte, error)
ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta) ([]ChunkMeta, error)
BlockHeader(meta *ChunkMeta, dst []record.Field) ([]record.Field, error)
Stat() *Trailer
MinMaxSeriesID() (min, max uint64, err error)
MinMaxTime() (min, max int64, err error)
Contains(id uint64, tm record.TimeRange) bool
ContainsTime(tm record.TimeRange) bool
ContainsId(id uint64) bool
CreateTime() int64
Name() string
FileName() string
Rename(newName string) error
FileSize() int64
InMemSize() int64
Version() uint64
FreeMemory() int64
FreeFileHandle() error
LoadIntoMemory() error
LoadComponents() error
AverageChunkRows() int
MaxChunkRows() int
}
type TableReaders ¶
type TableReaders []TSSPFile
func (TableReaders) Len ¶
func (tables TableReaders) Len() int
func (TableReaders) Less ¶
func (tables TableReaders) Less(i, j int) bool
func (TableReaders) Swap ¶
func (tables TableReaders) Swap(i, j int)
type TableStoreGC ¶
type TableStoreGC struct {
// contains filtered or unexported fields
}
func (*TableStoreGC) Add ¶
func (sgc *TableStoreGC) Add(free bool, files ...TSSPFile)
func (*TableStoreGC) GC ¶
func (sgc *TableStoreGC) GC()
type TablesGC ¶
func NewTableStoreGC ¶
func NewTableStoreGC() TablesGC
type TablesStore ¶
type TablesStore interface {
SetOpId(shardId uint64, opId uint64)
Open() (int64, error)
Close() error
AddTable(ms *MsBuilder, isOrder bool, tmp bool)
AddTSSPFiles(name string, isOrder bool, f ...TSSPFile)
FreeAllMemReader()
ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) error
GetBothFilesRef(measurement string, hasTimeFilter bool, tr record.TimeRange) ([]TSSPFile, []TSSPFile)
ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, isOrder bool, callBack func()) error
NextSequence() uint64
Sequencer() *Sequencer
GetTSSPFiles(mm string, isOrder bool) (*TSSPFiles, bool)
Tier() uint64
File(name string, namePath string, isOrder bool) TSSPFile
CompactDone(seq []string)
CompactionEnable()
CompactionDisable()
MergeEnable()
MergeDisable()
CompactionEnabled() bool
MergeEnabled() bool
IsOutOfOrderFilesExist() bool
MergeOutOfOrder(shId uint64, force bool) error
LevelCompact(level uint16, shid uint64) error
FullCompact(shid uint64) error
SetAddFunc(addFunc func(int64))
GetLastFlushTimeBySid(measurement string, sid uint64) (int64, error)
GetRowCountsBySid(measurement string, sid uint64) (int64, error)
AddRowCountsBySid(measurement string, sid uint64, rowCounts int64) error
GetOutOfOrderFileNum() int
GetMstFileStat() *stats.FileStat
DropMeasurement(ctx context.Context, name string) error
GetFileSeq() uint64
DisableCompAndMerge()
EnableCompAndMerge()
FreeSequencer() bool
UnRefSequencer()
}
type Time ¶
type Time struct {
// contains filtered or unexported fields
}
func GetTimeCoder ¶
func GetTimeCoder() *Time
func (*Time) SetEncodingType ¶
type TimePreAgg ¶
type TimePreAgg struct {
// contains filtered or unexported fields
}
func NewTimePreAgg ¶
func NewTimePreAgg() *TimePreAgg
type TombstoneFile ¶
type TombstoneFile struct {
// contains filtered or unexported fields
}
func (*TombstoneFile) Path ¶
func (t *TombstoneFile) Path() string
func (*TombstoneFile) TombstonesCount ¶
func (t *TombstoneFile) TombstonesCount() int
type Trailer ¶
type Trailer struct {
TableStat
// contains filtered or unexported fields
}
func (*Trailer) ContainsId ¶
func (*Trailer) MetaIndexItemNum ¶ added in v1.0.0
type UnorderedColumnReader ¶ added in v1.0.0
type UnorderedColumnReader struct {
// contains filtered or unexported fields
}
func (*UnorderedColumnReader) Read ¶ added in v1.0.0
func (r *UnorderedColumnReader) Read(ref *record.Field, maxTime int64) (*record.ColVal, []int64, error)
reads all unordered data whose time is earlier than maxTime
func (*UnorderedColumnReader) ReadSchema ¶ added in v1.0.0
func (r *UnorderedColumnReader) ReadSchema(res map[string]record.Field, maxTime int64)
type UnorderedReader ¶ added in v1.0.0
type UnorderedReader struct {
// contains filtered or unexported fields
}
func NewUnorderedReader ¶ added in v1.0.0
func NewUnorderedReader(log *logger.Logger) *UnorderedReader
func (*UnorderedReader) AddFiles ¶ added in v1.0.0
func (r *UnorderedReader) AddFiles(files []TSSPFile)
func (*UnorderedReader) Close ¶ added in v1.0.0
func (r *UnorderedReader) Close()
func (*UnorderedReader) InitTimes ¶ added in v1.0.1
func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64) error
InitTimes initialize the time column of unordered data
func (*UnorderedReader) Read ¶ added in v1.0.0
func (r *UnorderedReader) Read(sid uint64, ref *record.Field, maxTime int64) (*record.ColVal, []int64, error)
Read reads data based on the series ID, column, and time range
func (*UnorderedReader) ReadAllTimes ¶ added in v1.0.1
func (r *UnorderedReader) ReadAllTimes() []int64
func (*UnorderedReader) ReadRemain ¶ added in v1.0.0
func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error
ReadRemain reads all remaining data that is smaller than the current series ID in the unordered data
func (*UnorderedReader) ReadSeriesSchemas ¶ added in v1.0.0
func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas
Source Files
¶
- bool.go
- chunk_iterators.go
- chunkdata_builder.go
- column_builder.go
- column_iterator.go
- compact.go
- compaction_file_info.go
- config.go
- encoding.go
- evict.go
- file_iterator.go
- float.go
- fs_reader.go
- int.go
- limiter.go
- location.go
- location_cursor.go
- mem_reader.go
- merge_out_of_order.go
- merge_performer.go
- merge_tool.go
- merge_util.go
- mms_loader.go
- mms_tables.go
- msbuilder.go
- read_context.go
- reader.go
- sequencer.go
- stream_compact.go
- stream_downsample.go
- string.go
- table.go
- table_stat.go
- timestamp.go
- tombstone.go
- trailer.go
- tssp_file.go
- tssp_file_inmem.go
- tssp_file_name.go
- tssp_reader.go
- unordered_reader.go
- util.go
- writer.go