Documentation
¶
Overview ¶
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2024 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2024 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Index ¶
- Constants
- Variables
- func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool
- func CacheDataInMemory() bool
- func CacheMetaInMemory() bool
- func CanEncodeOneRowMode(col *record.ColVal) bool
- func CleanTempFile(f fileops.File)
- func CompactRecovery(path string, group *CompactGroup)
- func CompareT[T int | int64 | float64 | string](s1, s2 T, isAscending bool) (bool, bool)
- func CreateTSSPFileReader(size int64, fd fileops.File, trailer *Trailer, tb *TableData, ver uint64, ...) (*tsspFileReader, error)
- func DecodeColumnHeader(col *record.ColVal, data []byte, colType uint8) ([]byte, []byte, error)
- func DecodeColumnOfOneValue(data []byte, col *record.ColVal, typ uint8)
- func EncodeColumnHeader(col *record.ColVal, dst []byte, typ uint8) []byte
- func EstimateBufferSize(recSize int, rows int) int
- func FileOperation(f TSSPFile, op func())
- func FillNilCol(col *record.ColVal, size int, ref *record.Field)
- func FilterByField(rec *record.Record, filterRec *record.Record, filterOption *BaseFilterOptions, ...) *record.Record
- func FilterByFieldFuncs(rec, filterRec *record.Record, filterOption *BaseFilterOptions, ...) *record.Record
- func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record
- func FilterByTime(rec *record.Record, tr util.TimeRange) *record.Record
- func FilterByTimeDescend(rec *record.Record, tr util.TimeRange) *record.Record
- func FlushRemoteEnabled(tier uint64) bool
- func GenFixRowsPerSegment(data *record.Record, rowNumPerSegment int) []int
- func GenLogFileName(logSeq *uint64) string
- func GenParquetLogName() string
- func GenRecByReserveIds(rec, filterRec *record.Record, rowNumber []int, redIdxMap map[int]struct{}) *record.Record
- func GetBloomFilterBuf() *bloomFilter
- func GetChunkMetaCompressMode() uint8
- func GetCursorsBy(path *sparseindex.OBSFilterPath, tr util.TimeRange, isAscending bool) (int, uint64, error)
- func GetDetachedFlushEnabled() bool
- func GetDir(engineType config.EngineType, path string) string
- func GetMaxRowsPerSegment4TsStore() int
- func GetMergeFlag4TsStore() int32
- func GetMetaIndexChunkCount(obsOptions *obs.ObsOptions, dataPath string) (int64, error)
- func GetMetaIndexOffsetAndLengthByChunkId(chunkId int64) (offset, length int64)
- func GetPKItems(path string, obsOpts *obs.ObsOptions, miChunkIds []int64) (*colstore.DetachedPKMetaInfo, []*colstore.DetachedPKInfo, error)
- func GetPKMetaOffsetLengthByChunkId(pkMetaInfo *colstore.DetachedPKMetaInfo, chunkId int) (offset, length int64)
- func GetSortKeyColVal(fi *FileIterator, sortKey []record.Field, ctx *ReadContext, ...) ([]*record.ColVal, []record.SortItem, error)
- func GetTmpFileSuffix() string
- func InParquetProcess(files ...string) bool
- func Init()
- func InitDecFunctions()
- func InitQueryFileCache(cap uint32, enable bool)
- func InitWriterPool(size int)
- func IsFlushToFinalFile(totalSegmentCnt, flushToFinalFileLimit uint64) bool
- func IsInterfaceNil(value interface{}) bool
- func IsTempleFile(name string) bool
- func LeftBound(nums []uint32, target uint32, left int) int
- func MergeRecovery(path string, name string, ctx *MergeContext)
- func MergeTimes(a []int64, b []int64, dst []int64) []int64
- func NewCsImmTableImpl() *csImmTableImpl
- func NewLastMergeTime() *lastMergeTime
- func NewMemReaderEvictCtx() *memReaderEvictCtx
- func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer
- func NewObsWriter(path, fileName string, obsOpts *obs.ObsOptions) (*obsWriter, error)
- func NewObsWriterByFd(fd fileops.File, obsOpts *obs.ObsOptions) (*obsWriter, error)
- func NewTSSPFileReader(name string, lockPath *string) (*tsspFileReader, error)
- func NewTsImmTable() *tsImmTableImpl
- func NonStreamingCompaction(fi FilesInfo) bool
- func PreAggOnlyOneRow(buf []byte) bool
- func ProcParquetLog(logDir string, lockPath *string, ctx EventContext) error
- func PutBloomFilterBuf(key *bloomFilter)
- func PutChunkMeta(filePath string, chunkMeta *ChunkMeta)
- func PutDetachedSegmentTask(queryID string, meta IndexFrags)
- func PutIDTimePairs(pair *IdTimePairs)
- func ReadPKDataAll(path string, opts *obs.ObsOptions, offset, length []int64, ...) ([]*colstore.DetachedPKData, error)
- func ReadPKMetaAll(path string, opts *obs.ObsOptions, offset, length []int64) ([]*colstore.DetachedPKMeta, error)
- func ReadPKMetaInfoAll(path string, opts *obs.ObsOptions) (*colstore.DetachedPKMetaInfo, error)
- func ReadReliabilityLog(file string, dst interface{}) error
- func ReleaseColumnBuilder(b PreAggBuilder)
- func RemoveTsspSuffix(dataPath string) string
- func RenameIndexFiles(fname string, indexList []string) error
- func RenameTmpFiles(newFiles []TSSPFile) error
- func RenameTmpFilesWithPKIndex(newFiles []TSSPFile, ir *influxql.IndexRelation) error
- func RenameTmpFullTextIdxFile(msb *MsBuilder) error
- func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)
- func SaveReliabilityLog(data interface{}, dir string, lockFile string, nameGenerator func() string) (string, error)
- func SetCacheDataBlock(en bool)
- func SetCacheMetaData(en bool)
- func SetChunkMetaCompressMode(mode int)
- func SetCompactLimit(bytesPerSec int64, burstLimit int64)
- func SetCompactionEnabled(compactionEnabled bool)
- func SetDetachedFlushEnabled(detachFlushEnabled bool)
- func SetFragmentsNumPerFlush(fragmentsNumPerFlush int)
- func SetImmTableMaxMemoryPercentage(sysTotalMem, percentage int)
- func SetIndexCompressMode(mode int)
- func SetMaxCompactor(n int)
- func SetMaxFullCompactor(n int)
- func SetMaxRowsPerSegment4TsStore(maxRowsPerSegmentLimit int)
- func SetMaxSegmentLimit4TsStore(limit int)
- func SetMergeFlag4TsStore(v int32)
- func SetSnapshotLimit(bytesPerSec int64, burstLimit int64)
- func SetSnapshotTblNum(snapshotTblNum int)
- func SnapshotLimit() bool
- func SumFilesSize(files []TSSPFile) int64
- func TimeSorted(sortKeys []string) bool
- func UnrefFiles(files ...TSSPFile)
- func UnrefFilesReader(files ...TSSPFile)
- func UpdateChunkMetaFunc(_, _ cache.Entry) bool
- func UpdateDetachedMetaDataCache(old, new cache.Entry) bool
- func WriteIntoFile(msb *MsBuilder, tmp bool, withPKIndex bool, ir *influxql.IndexRelation) error
- type AccumulateMetaIndex
- type BaseFilterOptions
- type BloomFilterIterator
- type BooleanPreAgg
- type BufferReader
- type ChunkDataBuilder
- type ChunkIterator
- type ChunkIterators
- func (c *ChunkIterators) Close()
- func (c *ChunkIterators) Len() int
- func (c *ChunkIterators) Less(i, j int) bool
- func (c *ChunkIterators) Next() (uint64, *record.Record, error)
- func (c *ChunkIterators) Pop() interface{}
- func (c *ChunkIterators) Push(v interface{})
- func (c *ChunkIterators) Swap(i, j int)
- func (c *ChunkIterators) WithLog(log *Log.Logger)
- type ChunkMeta
- func (m *ChunkMeta) AllocColMeta(ref *record.Field) *ColumnMeta
- func (m *ChunkMeta) Clone() *ChunkMeta
- func (m *ChunkMeta) DelEmptyColMeta()
- func (m *ChunkMeta) GetColMeta() []ColumnMeta
- func (m *ChunkMeta) GetSid() (sid uint64)
- func (m *ChunkMeta) GetTimeRangeBy(index int) SegmentRange
- func (m *ChunkMeta) Len() int
- func (m *ChunkMeta) Less(i, j int) bool
- func (m *ChunkMeta) MinMaxTime() (min int64, max int64)
- func (m *ChunkMeta) Rows(ab PreAggBuilder) int
- func (m *ChunkMeta) SegmentCount() int
- func (m *ChunkMeta) Size() int
- func (m *ChunkMeta) Swap(i, j int)
- func (m *ChunkMeta) TimeMeta() *ColumnMeta
- func (m *ChunkMeta) UnmarshalWithColumns(src []byte, columns []string) ([]byte, error)
- type ChunkMetaContext
- type ChunkMetaEntry
- type ColAux
- type ColumnBuilder
- func (b *ColumnBuilder) BuildPreAgg()
- func (b *ColumnBuilder) EncodeColumn(ref record.Field, col *record.ColVal, timeCols []record.ColVal, ...) ([]byte, error)
- func (b *ColumnBuilder) EncodeColumnBySize(ref record.Field, col *record.ColVal, timeCols []record.ColVal, ...) ([]byte, error)
- func (b *ColumnBuilder) SetEncodeMode(detached bool)
- type ColumnIterator
- func (itr *ColumnIterator) Close()
- func (itr *ColumnIterator) Error() error
- func (itr *ColumnIterator) IncrChunkUsed()
- func (itr *ColumnIterator) IterCurrentChunk(p ColumnIteratorPerformer) error
- func (itr *ColumnIterator) NextChunkMeta() bool
- func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)
- func (itr *ColumnIterator) PutCol(col *record.ColVal)
- func (itr *ColumnIterator) Run(p ColumnIteratorPerformer) error
- type ColumnIteratorPerformer
- type ColumnMeta
- func (m *ColumnMeta) Clone() ColumnMeta
- func (m *ColumnMeta) Equal(name string, ty int) bool
- func (m *ColumnMeta) GetPreAgg() []byte
- func (m *ColumnMeta) GetSegment(i int) (int64, uint32)
- func (m *ColumnMeta) IsTime() bool
- func (m *ColumnMeta) Name() string
- func (m *ColumnMeta) RowCount(ref *record.Field, decs *ReadContext) (int64, error)
- func (m *ColumnMeta) Type() uint8
- type ColumnReader
- type CompactGroup
- type CompactGroupBuilder
- type CompactTask
- type CompactedFileInfo
- type Config
- func (c *Config) GetCompactionEnabled() bool
- func (c *Config) GetMaxRowsPerSegment() int
- func (c *Config) GetMaxSegmentLimit() int
- func (c *Config) SetExpectedSegmentSize(n uint32)
- func (c *Config) SetFilesLimit(n int64)
- func (c *Config) SetMaxRowsPerSegment(maxRowsPerSegmentLimit int)
- func (c *Config) SetMaxSegmentLimit(n int)
- type CsChunkDataImp
- func (c *CsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (c *CsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (c *CsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
- func (c *CsChunkDataImp) SetDetachedInfo(writeDetached bool)
- type DetachedChunkMetaReader
- type DetachedMetaDataReader
- type DetachedMetaIndexReader
- type DetachedPKDataReader
- type DetachedPKMetaInfoReader
- type DetachedPKMetaReader
- type DetachedSegmentEntry
- func (e *DetachedSegmentEntry) GetKey() string
- func (e *DetachedSegmentEntry) GetTime() time.Time
- func (e *DetachedSegmentEntry) GetValue() interface{}
- func (e *DetachedSegmentEntry) SetTime(time time.Time)
- func (e *DetachedSegmentEntry) SetValue(value interface{})
- func (e *DetachedSegmentEntry) Size() int64
- type EncodeChunkData
- type EncodeColumnMode
- type EngineShard
- type Event
- type EventBus
- type EventContext
- type EventType
- type Events
- func (es *Events) Finish(success bool, ctx EventContext)
- func (es *Events) Instance() *Events
- func (es *Events) Register(e Event)
- func (es *Events) TriggerNewFile(f TSSPFile)
- func (es *Events) TriggerReplaceFile(shardDir, lock string) error
- func (es *Events) TriggerWriteChunkMeta(cm *ChunkMeta)
- func (es *Events) TriggerWriteRecord(rec *record.Record)
- type FileInfoExtend
- type FileIterator
- type FileIterators
- type FileReader
- type FileReaderContext
- type FileSwapper
- type FilesInfo
- type FilterOptions
- type FirstLastReader
- type FloatPreAgg
- type FragmentIterator
- type FragmentIterators
- func (f *FragmentIterators) Close()
- func (f *FragmentIterators) CompareWithBreakPoint(curPos, breakPos int) bool
- func (f *FragmentIterators) IsEmpty() bool
- func (f *FragmentIterators) Len() int
- func (f *FragmentIterators) Less(i, j int) bool
- func (f *FragmentIterators) Pop() interface{}
- func (f *FragmentIterators) Push(v interface{})
- func (f *FragmentIterators) Swap(i, j int)
- func (f *FragmentIterators) WithLog(log *Log.Logger)
- type FragmentIteratorsPool
- type IdTimePairs
- func (p *IdTimePairs) Add(id uint64, tm int64)
- func (p *IdTimePairs) AddRowCounts(rowCounts int64)
- func (p *IdTimePairs) Len() int
- func (p *IdTimePairs) Marshal(encTimes bool, dst []byte, ctx *encoding.CoderContext) []byte
- func (p *IdTimePairs) Reset(name string)
- func (p *IdTimePairs) Unmarshal(decTimes bool, src []byte) ([]byte, error)
- type ImmTable
- type IndexCompressWriter
- func (w *IndexCompressWriter) BlockSize() int
- func (w *IndexCompressWriter) Close() error
- func (w *IndexCompressWriter) CopyTo(to io.Writer) (int, error)
- func (w *IndexCompressWriter) GetWriter() *bufio.Writer
- func (w *IndexCompressWriter) Init(name string, lock *string, cacheMeta bool, limitCompact bool)
- func (w *IndexCompressWriter) MetaDataBlocks(dst [][]byte) [][]byte
- func (w *IndexCompressWriter) Size() int
- func (w *IndexCompressWriter) SwitchMetaBuffer() (int, error)
- func (w *IndexCompressWriter) Write(p []byte) (int, error)
- type IndexFrags
- type IndexMergeSet
- type IndexWriter
- type IntegerPreAgg
- type IteratorByBlock
- type IteratorByRow
- type Location
- func (l *Location) AscendingDone()
- func (l *Location) Contains(sid uint64, tr util.TimeRange, ctx *ChunkMetaContext) (bool, error)
- func (l *Location) DescendingDone()
- func (l *Location) GetChunkMeta() *ChunkMeta
- func (l *Location) ReadData(filterOpts *FilterOptions, dst *record.Record, filterDst *record.Record) (*record.Record, error)
- func (l *Location) ResetMeta()
- func (l *Location) SetChunkMeta(chunkMeta *ChunkMeta)
- func (d *Location) SetClosedSignal(s *bool)
- func (l *Location) SetFragmentRanges(frs []*fragment.FragmentRange)
- type LocationCursor
- func (l *LocationCursor) AddFilterRecPool(pool *record.CircularRecordPool)
- func (l *LocationCursor) AddLocation(loc *Location)
- func (l *LocationCursor) AddRef()
- func (l *LocationCursor) Close()
- func (l *LocationCursor) FragmentCount() int
- func (l *LocationCursor) Len() int
- func (l *LocationCursor) Less(i, j int) bool
- func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record, ...) (*record.Record, error)
- func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record, ...) (*record.Record, error)
- func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
- func (l *LocationCursor) Reset()
- func (l *LocationCursor) Reverse()
- func (l *LocationCursor) RowCount() int
- func (l *LocationCursor) Swap(i, j int)
- func (l *LocationCursor) Unref()
- type MatchAllOperator
- type MeasurementInProcess
- type MemBlock
- func (mb *MemBlock) AppendDataBlock(srcData []byte)
- func (mb *MemBlock) CopyBlocks(src MemoryReader)
- func (mb *MemBlock) DataBlocks() [][]byte
- func (mb *MemBlock) DataInMemory() bool
- func (mb *MemBlock) FreeMemory() int64
- func (mb *MemBlock) LoadIntoMemory(dr fileops.BasicFileReader, tr *Trailer, metaIndexItems []MetaIndex) error
- func (mb *MemBlock) MetaBlocks() [][]byte
- func (mb *MemBlock) MetaInMemory() bool
- func (mb *MemBlock) ReadChunkMetaBlock(metaIdx int, sid uint64, count uint32) []byte
- func (mb *MemBlock) ReadDataBlock(offset int64, size uint32, dstPtr *[]byte) ([]byte, error)
- func (mb *MemBlock) ReserveDataBlock(n int)
- func (mb *MemBlock) ReserveMetaBlock(n int)
- func (mb *MemBlock) Reset()
- func (mb *MemBlock) SetMetaBlocks(blocks [][]byte)
- func (mb *MemBlock) Size() int64
- type MemoryReader
- type MergeColPool
- type MergeContext
- func (ctx *MergeContext) AddUnordered(f TSSPFile)
- func (ctx *MergeContext) Limited() bool
- func (ctx *MergeContext) MergeSelf() bool
- func (ctx *MergeContext) MergeSelfFast() bool
- func (ctx *MergeContext) Release()
- func (ctx *MergeContext) Sort()
- func (ctx *MergeContext) ToLevel() uint16
- func (ctx *MergeContext) UnorderedLen() int
- func (ctx *MergeContext) UpdateLevel(l uint16)
- type MergePerformers
- func (c *MergePerformers) Close()
- func (c *MergePerformers) Closed() bool
- func (c *MergePerformers) Len() int
- func (c *MergePerformers) Less(i, j int) bool
- func (c *MergePerformers) Next() error
- func (c *MergePerformers) Pop() interface{}
- func (c *MergePerformers) Push(v interface{})
- func (c *MergePerformers) Release()
- func (c *MergePerformers) Swap(i, j int)
- type MergeSelf
- type MergeSelfParquetEvent
- type MetaControl
- type MetaData
- type MetaDataInfo
- type MetaDatas
- type MetaIndex
- type MetaQueue
- type MetaStack
- type MmsIdTime
- type MmsReaders
- type MmsTables
- func (m *MmsTables) AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
- func (m *MmsTables) AddPKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, ...)
- func (m *MmsTables) AddRowCountsBySid(measurement string, sid uint64, rowCounts int64)
- func (m *MmsTables) AddTSSPFiles(name string, isOrder bool, files ...TSSPFile)
- func (m *MmsTables) AddTable(mb *MsBuilder, isOrder bool, tmp bool)
- func (m *MmsTables) Close() error
- func (m *MmsTables) CompactDone(files []string)
- func (m *MmsTables) CompactionDisable()
- func (m *MmsTables) CompactionEnable()
- func (m *MmsTables) CompactionEnabled() bool
- func (m *MmsTables) CopyCSFiles(name string) []TSSPFile
- func (m *MmsTables) DisableCompAndMerge()
- func (m *MmsTables) DropMeasurement(_ context.Context, name string) error
- func (m *MmsTables) EnableCompAndMerge()
- func (m *MmsTables) File(mstName string, fileName string, isOrder bool) TSSPFile
- func (m *MmsTables) FreeAllMemReader()
- func (m *MmsTables) FreeSequencer() bool
- func (m *MmsTables) FullCompact(shid uint64) error
- func (m *MmsTables) FullyCompacted() bool
- func (m *MmsTables) GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool)
- func (m *MmsTables) GetCSFiles(name string) (files *TSSPFiles, ok bool)
- func (m *MmsTables) GetFileSeq() uint64
- func (m *MmsTables) GetMstFileStat() *statistics.FileStat
- func (m *MmsTables) GetMstInfo(name string) (*meta.MeasurementInfo, bool)
- func (m *MmsTables) GetMstList(isOrder bool) []string
- func (m *MmsTables) GetObsOption() *obs.ObsOptions
- func (m *MmsTables) GetOutOfOrderFileNum() int
- func (m *MmsTables) GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool)
- func (m *MmsTables) GetRowCountsBySid(measurement string, sid uint64) (int64, error)
- func (m *MmsTables) GetShardID() uint64
- func (m *MmsTables) GetTSSPFiles(name string, isOrder bool) (files *TSSPFiles, ok bool)
- func (m *MmsTables) GetTableFileNum(name string, order bool) int
- func (m *MmsTables) IsOutOfOrderFilesExist() bool
- func (m *MmsTables) LevelCompact(level uint16, shid uint64) error
- func (m *MmsTables) Listen(signal chan struct{}, onClose func())
- func (m *MmsTables) LoadSequencer()
- func (m *MmsTables) MergeDisable()
- func (m *MmsTables) MergeEnable()
- func (m *MmsTables) MergeEnabled() bool
- func (m *MmsTables) MergeOutOfOrder(shId uint64, full bool, force bool) error
- func (m *MmsTables) NewChunkIterators(group FilesInfo) *ChunkIterators
- func (m *MmsTables) NewStreamIterators(group FilesInfo) *StreamIterators
- func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile
- func (m *MmsTables) NextSequence() uint64
- func (m *MmsTables) Open() (int64, error)
- func (m *MmsTables) ReloadSequencer(seq *Sequencer, async bool)
- func (m *MmsTables) RenameFileToLevel(plan *CompactGroup) error
- func (m *MmsTables) ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, ...) (err error)
- func (m *MmsTables) ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) (err error)
- func (m *MmsTables) ReplacePKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, ...) error
- func (m *MmsTables) Sequencer() *Sequencer
- func (m *MmsTables) SeriesTotal() uint64
- func (m *MmsTables) SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)
- func (m *MmsTables) SetAddFunc(addFunc func(int64))
- func (m *MmsTables) SetImmTableType(engineType config.EngineType)
- func (m *MmsTables) SetIndexMergeSet(idx IndexMergeSet)
- func (m *MmsTables) SetLockPath(lock *string)
- func (m *MmsTables) SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
- func (m *MmsTables) SetObsOption(option *obs.ObsOptions)
- func (m *MmsTables) SetOpId(shardId uint64, opId uint64)
- func (m *MmsTables) SetTier(tier uint64)
- func (m *MmsTables) Tier() uint64
- func (m *MmsTables) Wait()
- type MsBuilder
- func (b *MsBuilder) BloomFilterNeedDetached(filterDetachedWriteTimes int) bool
- func (b *MsBuilder) CloseIndexWriters() error
- func (b *MsBuilder) FileVersion() uint64
- func (b *MsBuilder) Flush() error
- func (b *MsBuilder) GetChunkBuilder() *ChunkDataBuilder
- func (b *MsBuilder) GetFullTextIdx() bool
- func (b *MsBuilder) GetIndexBuilder() *index.IndexWriterBuilder
- func (b *MsBuilder) GetLocalBfCount() int64
- func (b *MsBuilder) GetPKInfoNum() int
- func (b *MsBuilder) GetPKMark(i int) fragment.IndexFragment
- func (b *MsBuilder) GetPKRecord(i int) *record.Record
- func (b *MsBuilder) MaxRowsPerSegment() int
- func (b *MsBuilder) Name() string
- func (b *MsBuilder) NewIndexWriterBuilder(schema record.Schemas, indexRelation influxql.IndexRelation)
- func (b *MsBuilder) NewPKIndexWriter()
- func (b *MsBuilder) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (b *MsBuilder) Reset()
- func (b *MsBuilder) SetEncodeChunkDataImp(engineType config.EngineType)
- func (b *MsBuilder) SetFullTextIdx(fullTextIdx bool)
- func (b *MsBuilder) SetLocalBfCount(count int64)
- func (b *MsBuilder) SetTCLocation(tcLocation int8)
- func (b *MsBuilder) SetTimeSorted(timeSorted bool)
- func (b *MsBuilder) Size() int64
- func (b *MsBuilder) StoreTimes()
- func (b *MsBuilder) SwitchChunkMeta() error
- func (b *MsBuilder) WithLog(log *logger.Logger)
- func (b *MsBuilder) WriteChunkMeta(cm *ChunkMeta) (int, error)
- func (b *MsBuilder) WriteData(id uint64, data *record.Record) error
- func (b *MsBuilder) WriteDetached(id uint64, data *record.Record, pkSchema record.Schemas, firstFlush bool, ...) error
- func (b *MsBuilder) WriteDetachedIndex(writeRec *record.Record, rowsPerSegment []int) error
- func (b *MsBuilder) WriteDetachedMetaAndIndex(writeRec *record.Record, pkSchema record.Schemas, firstFlush bool, ...) error
- func (b *MsBuilder) WriteRecord(id uint64, data *record.Record, ...) (*MsBuilder, error)
- func (b *MsBuilder) WriteRecordByCol(id uint64, data *record.Record, schema record.Schemas, ...) (*MsBuilder, error)
- type PageCacheReader
- func (pcr *PageCacheReader) GetCachePageIdsAndOffsets(start int64, size uint32) ([]int64, []int64, error)
- func (pcr *PageCacheReader) GetMaxPageIdAndOffset() (int64, int64)
- func (pcr *PageCacheReader) Init()
- func (pcr *PageCacheReader) Read(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
- func (pcr *PageCacheReader) ReadFixPageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
- func (pcr *PageCacheReader) ReadSinglePage(cacheKey string, pageOffset int64, pageSize int64, buf *[]byte, ioPriority int) (*readcache.CachePage, []byte, error)
- func (pcr *PageCacheReader) ReadVariablePageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
- type ParquetTask
- type PreAggBuilder
- type PreAggBuilders
- type QueryfileCache
- type ReadContext
- func (d *ReadContext) GetCoder() *encoding.CoderContext
- func (d *ReadContext) GetOps() []*comm.CallOption
- func (d *ReadContext) GetReadBuff() []byte
- func (d *ReadContext) InitPreAggBuilder()
- func (d *ReadContext) IsAborted() bool
- func (d *ReadContext) MatchPreAgg() bool
- func (d *ReadContext) Release()
- func (d *ReadContext) Reset()
- func (d *ReadContext) Set(ascending bool, tr util.TimeRange, onlyFirstOrLast bool, ...)
- func (d *ReadContext) SetClosedSignal(s *bool)
- func (d *ReadContext) SetOps(c []*comm.CallOption)
- func (d *ReadContext) SetSpan(readSpan, filterSpan *tracing.Span)
- func (d *ReadContext) SetTr(tr util.TimeRange)
- type Segment
- type SegmentMeta
- type SegmentRange
- type SegmentReader
- type SegmentSequenceReader
- type SegmentTask
- type SequenceIterator
- type SequenceIteratorChunkMetaReader
- type SequenceIteratorHandler
- type Sequencer
- func (s *Sequencer) AddRowCounts(mn string, id uint64, rowCounts int64)
- func (s *Sequencer) BatchUpdateCheckTime(p *IdTimePairs, incrRows bool)
- func (s *Sequencer) DelMmsIdTime(name string)
- func (s *Sequencer) Get(mn string, id uint64) (lastFlushTime, rowCnt int64)
- func (s *Sequencer) GetMmsIdTime(name string) *MmsIdTime
- func (s *Sequencer) IsLoading() bool
- func (s *Sequencer) ResetMmsIdTime()
- func (s *Sequencer) SeriesTotal() uint64
- func (s *Sequencer) SetStat(free, loading bool)
- func (s *Sequencer) SetToInLoading() bool
- func (s *Sequencer) UnRef()
- type SeriesCounter
- type ShowTagValuesPlan
- type SortKeyIterator
- type SortLimitCursor
- func (t *SortLimitCursor) Close() error
- func (t *SortLimitCursor) EndSpan()
- func (t *SortLimitCursor) GetInput() comm.TimeCutKeyCursor
- func (t *SortLimitCursor) GetSchema() record.Schemas
- func (t *SortLimitCursor) Name() string
- func (t *SortLimitCursor) Next() (*record.Record, comm.SeriesInfoIntf, error)
- func (t *SortLimitCursor) NextAggData() (*record.Record, *comm.FileInfo, error)
- func (t *SortLimitCursor) SetOps(ops []*comm.CallOption)
- func (t *SortLimitCursor) SinkPlan(plan hybridqp.QueryNode)
- func (t *SortLimitCursor) StartSpan(span *tracing.Span)
- type SortLimitRows
- type StreamCompactParquetEvent
- type StreamIterator
- type StreamIterators
- func (c *StreamIterators) Close()
- func (c *StreamIterators) FileVersion() uint64
- func (c *StreamIterators) Flush() error
- func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
- func (c *StreamIterators) InitEvents(level uint16) *Events
- func (c *StreamIterators) Len() int
- func (c *StreamIterators) Less(i, j int) bool
- func (c *StreamIterators) ListenCloseSignal(finish chan struct{})
- func (c *StreamIterators) NewFile(addFileExt bool) error
- func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (c *StreamIterators) Pop() interface{}
- func (c *StreamIterators) Push(v interface{})
- func (c *StreamIterators) RemoveTmpFiles()
- func (c *StreamIterators) SetWriter(w fileops.FileWriter)
- func (c *StreamIterators) Size() int64
- func (c *StreamIterators) Swap(i, j int)
- func (c *StreamIterators) SwitchChunkMeta() error
- func (c *StreamIterators) WithLog(log *Log.Logger)
- func (c *StreamIterators) WriteChunkMeta(cm *ChunkMeta) (int, error)
- type StreamIteratorsPool
- type StreamWriteFile
- func (c *StreamWriteFile) AppendColumn(ref *record.Field) error
- func (c *StreamWriteFile) ChangeColumn(ref record.Field) error
- func (c *StreamWriteFile) ChangeSid(sid uint64)
- func (c *StreamWriteFile) Close(isError bool)
- func (c *StreamWriteFile) Flush() error
- func (c *StreamWriteFile) GetTSSPFile() TSSPFile
- func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
- func (c *StreamWriteFile) InitFile(seq uint64) error
- func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error
- func (c *StreamWriteFile) NewFile(addFileExt bool) error
- func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)
- func (c *StreamWriteFile) SetValidate(en bool)
- func (c *StreamWriteFile) Size() int64
- func (c *StreamWriteFile) SortColumns()
- func (c *StreamWriteFile) SwitchChunkMeta() error
- func (c *StreamWriteFile) WriteChunkMeta(cm *ChunkMeta) (int, error)
- func (c *StreamWriteFile) WriteCurrentMeta() error
- func (c *StreamWriteFile) WriteData(id uint64, ref record.Field, col record.ColVal, timeCol *record.ColVal) error
- func (c *StreamWriteFile) WriteFile() error
- func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error
- type StringPreAgg
- type TSSP2ParquetEvent
- func (e *TSSP2ParquetEvent) Enable() bool
- func (e *TSSP2ParquetEvent) Init(mst string, level uint16)
- func (e *TSSP2ParquetEvent) OnFinish(ctx EventContext)
- func (e *TSSP2ParquetEvent) OnInterrupt()
- func (e *TSSP2ParquetEvent) OnNewFile(f TSSPFile)
- func (e *TSSP2ParquetEvent) OnReplaceFile(shardDir string, lockFile string) error
- type TSSP2ParquetPlan
- type TSSPFile
- type TSSPFileAttachedReader
- func (t *TSSPFileAttachedReader) Close() error
- func (t *TSSPFileAttachedReader) EndSpan()
- func (t *TSSPFileAttachedReader) GetSchema() record.Schemas
- func (t *TSSPFileAttachedReader) Name() string
- func (t *TSSPFileAttachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
- func (t *TSSPFileAttachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)
- func (t *TSSPFileAttachedReader) ResetBy(files []TSSPFile, fragRanges []fragment.FragmentRanges) error
- func (t *TSSPFileAttachedReader) SetOps(ops []*comm.CallOption)
- func (t *TSSPFileAttachedReader) SinkPlan(plan hybridqp.QueryNode)
- func (t *TSSPFileAttachedReader) StartSpan(span *tracing.Span)
- type TSSPFileDetachedReader
- func (t *TSSPFileDetachedReader) Close() error
- func (t *TSSPFileDetachedReader) EndSpan()
- func (t *TSSPFileDetachedReader) GetSchema() record.Schemas
- func (t *TSSPFileDetachedReader) Name() string
- func (t *TSSPFileDetachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
- func (t *TSSPFileDetachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)
- func (t *TSSPFileDetachedReader) ResetBy(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext)
- func (t *TSSPFileDetachedReader) SetOps(ops []*comm.CallOption)
- func (t *TSSPFileDetachedReader) SinkPlan(plan hybridqp.QueryNode)
- func (t *TSSPFileDetachedReader) StartSpan(span *tracing.Span)
- func (t *TSSPFileDetachedReader) UpdateTime(time int64)
- type TSSPFileName
- func (n *TSSPFileName) Equal(other *TSSPFileName) bool
- func (n *TSSPFileName) ParseFileName(name string) error
- func (n *TSSPFileName) Path(dir string, tmp bool) string
- func (n *TSSPFileName) SetExtend(extend uint16)
- func (n *TSSPFileName) SetLevel(l uint16)
- func (n *TSSPFileName) SetMerge(merge uint16)
- func (n *TSSPFileName) SetOrder(v bool)
- func (n *TSSPFileName) SetSeq(seq uint64)
- func (n *TSSPFileName) String() string
- func (n *TSSPFileName) TmpPath(dir string) string
- type TSSPFiles
- func (f *TSSPFiles) Append(file ...TSSPFile)
- func (f *TSSPFiles) Files() []TSSPFile
- func (f *TSSPFiles) Len() int
- func (f *TSSPFiles) Less(i, j int) bool
- func (f *TSSPFiles) MaxMerged() uint16
- func (f *TSSPFiles) RLock()
- func (f *TSSPFiles) RUnlock()
- func (f *TSSPFiles) StopFiles()
- func (f *TSSPFiles) Swap(i, j int)
- type TableData
- type TableReaders
- type TableStat
- type TableStoreGC
- type TablesGC
- type TablesStore
- type TagSets
- type TagValuesIteratorHandler
- func (h *TagValuesIteratorHandler) Begin()
- func (h *TagValuesIteratorHandler) Finish()
- func (h *TagValuesIteratorHandler) Init(param map[string]interface{}) error
- func (h *TagValuesIteratorHandler) Limited() bool
- func (h *TagValuesIteratorHandler) NextChunkMeta(buf []byte) error
- func (h *TagValuesIteratorHandler) NextFile(TSSPFile)
- type TimePreAgg
- type Trailer
- func (t *Trailer) ContainsId(id uint64) bool
- func (t *Trailer) ContainsTime(tm util.TimeRange) bool
- func (t *Trailer) DataSize() int64
- func (t *Trailer) EqualData(idx int, v byte) bool
- func (t *Trailer) GetData(idx int, def uint8) uint8
- func (t *Trailer) IndexSize() int64
- func (t *Trailer) MetaIndexItemNum() int64
- func (t *Trailer) MetaIndexSize() int64
- func (t *Trailer) SetChunkMetaCompressFlag()
- func (t *Trailer) SetData(idx int, v byte)
- type TsChunkDataImp
- func (t *TsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (t *TsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, ...) ([]byte, error)
- func (t *TsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
- func (t *TsChunkDataImp) SetDetachedInfo(writeDetached bool)
- type UnnestOperator
- type UnorderedColumn
- type UnorderedColumnReader
- func (r *UnorderedColumnReader) ChangeColumn(sid uint64, ref *record.Field)
- func (r *UnorderedColumnReader) ChangeSeries(sid uint64) error
- func (r *UnorderedColumnReader) Close()
- func (r *UnorderedColumnReader) HasColumn() bool
- func (r *UnorderedColumnReader) MatchSeries(sid uint64) bool
- func (r *UnorderedColumnReader) Read(sid uint64, maxTime int64) (*record.ColVal, []int64, error)
- func (r *UnorderedColumnReader) ReadSchemas(sid uint64, maxTime int64, dst map[string]record.Field)
- func (r *UnorderedColumnReader) ReadTime(sid uint64, maxTime int64) []int64
- type UnorderedColumns
- func (c *UnorderedColumns) ChangeColumn(name string) *UnorderedColumn
- func (c *UnorderedColumns) GetLineOffset(name string) int
- func (c *UnorderedColumns) GetSegOffset(name string) int
- func (c *UnorderedColumns) IncrLineOffset(name string, n int)
- func (c *UnorderedColumns) IncrSegOffset(name string, n int)
- func (c *UnorderedColumns) Init(cm *ChunkMeta)
- func (c *UnorderedColumns) ReadCompleted() bool
- func (c *UnorderedColumns) SetRemainLine(n int)
- func (c *UnorderedColumns) TimeMeta() *ColumnMeta
- func (c *UnorderedColumns) Walk(callback func(meta *ColumnMeta))
- type UnorderedReader
- func (r *UnorderedReader) AddFiles(files []TSSPFile)
- func (r *UnorderedReader) AllocNilCol(size int, ref *record.Field) *record.ColVal
- func (r *UnorderedReader) ChangeColumn(sid uint64, ref *record.Field)
- func (r *UnorderedReader) ChangeSeries(sid uint64) error
- func (r *UnorderedReader) Close()
- func (r *UnorderedReader) CloseFile()
- func (r *UnorderedReader) HasSeries(sid uint64) bool
- func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64)
- func (r *UnorderedReader) Read(sid uint64, maxTime int64) (*record.ColVal, []int64, error)
- func (r *UnorderedReader) ReadAllTimes() []int64
- func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error
- func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas
- func (r *UnorderedReader) ReadTimes(maxTime int64) []int64
- type UnorderedReaderContext
Constants ¶
const ( ChunkMetaSize int64 = 128 * 1024 * 1024 ChunkMetaTTL = 60 * time.Minute )
const ( QueryMetaCacheTTL = 10 * time.Minute QueryMetaDataCacheSize int64 = 50 * 1024 * 1024 * int64(MetaIndexLen+128*int(unsafe.Sizeof(fragment.FragmentRange{}))) )
const ( MetaIndexLimitNum = 16 MetaIndexHeaderSize int64 = 16 MetaIndexItemSize = int64(util.Int64SizeBytes*3 + util.Uint64SizeBytes + util.Uint32SizeBytes) )
const ( PKMetaLimitNum = 16 PKMetaPrefixSize = util.Uint64SizeBytes*2 + util.Uint32SizeBytes*2 )
const ( PKMetaInfoLength int64 = 12 PkMetaHeaderSize = int64(util.Uint32SizeBytes * 2) )
const ( PRELOAD = iota LOAD )
const ( SwapperCompressNone = 0 SwapperCompressSnappy = 1 SwapperCompressZSTD = 2 )
const ( DefaultLevelMergeFileNum = 4 MergeSelfFastModeMaxLevel = 2 )
const ( META_DATA_N_BYTES int32 = 8 + 8 + 8 + 8 + 8 + 4 + 4 META_STORE_N_BYTES int32 = META_DATA_N_BYTES + 4 META_DATA_SIZE int32 = META_STORE_N_BYTES + 4 )
const ( MinMaxTimeLen = int(unsafe.Sizeof(SegmentRange{})) MetaIndexLen = int(unsafe.Sizeof(MetaIndex{})) DetachedMetaIndexLen = int(unsafe.Sizeof(MetaIndex{}) - 4) //count not use )
const ( InitParamKeyDst string = "dst" InitParamKeyKeys string = "keys" InitParamKeyMeasurement string = "measurement" )
const ( BLOOMFILTER_SIZE = 8 SERIESKEY_STATISTIC_SIZE = 24 COMPRESSION_RATIO = 2 )
const ( IndexOfTimeStoreFlag = 0 IndexOfChunkMetaCompressFlag = 1 TimeStoreFlag = 1 )
const ( ChunkMetaReadNum = 16 BatchReaderRecordNum = 8 ReaderContentNumSpan = "reader_content_num_span" ReaderContentSizeSpan = "reader_content_size_span" ReaderContentDuration = "reader_content_duration" ReaderFilterDuration = "reader_filter_duration" )
const ( ChunkMetaCompressNone = 0 ChunkMetaCompressSnappy = 1 ChunkMetaCompressLZ4 = 2 ChunkMetaCompressEnd = 3 )
const ( DownSampleLogDir = "downsample_log" ShardMoveLogDir = "shard_move_log" TsspDirName = "tssp" ColumnStoreDirName = obs.ColumnStoreDirName CountBinFile = "count.txt" CapacityBinFile = "capacity.txt" )
const ( DataFile = "segment.bin" ChunkMetaFile = "segment.meta" MetaIndexFile = "segment.idx" PrimaryKeyFile = "primary.idx" PrimaryMetaFile = "primary.meta" )
const (
CompactLevels = 7
)
const (
FD_OUTSIDE uint32 = 0x00001
)
const MetaIndexConsumeNum = 16
const (
MetaIndexSegmentNum = 16
)
const PKDataLimitNum = 16
const (
SortLimitCursorDuration = "sort_limit_cursor_duration"
)
Variables ¶
var ( ErrCompStopped = errors.New("compact stopped") ErrDownSampleStopped = errors.New("downSample stopped") ErrDroppingMst = errors.New("measurement is dropped") LevelCompactRule = []uint16{0, 1, 0, 2, 0, 3, 0, 1, 2, 3, 0, 4, 0, 5, 0, 1, 2, 6} LevelCompactRuleForCs = []uint16{0, 1, 0, 1, 0, 1} // columnStore currently only doing level 0 and level 1 compaction,but the full functionality is available LeveLMinGroupFiles = [CompactLevels]int{8, 4, 4, 4, 4, 4, 2} EnableMergeOutOfOrder = true )
var ( SegmentLen = (Segment{}).bytes() ColumnMetaLenMin = (ColumnMeta{}).bytes(1) ChunkMetaMinLen = (&ChunkMeta{}).minBytes() )
var (
CLog = Log.NewLogger(errno.ModuleCompact)
)
var ChunkMetaCache = cache.NewCache(ChunkMetaSize, ChunkMetaTTL)
var DetachedMetaDataCache = cache.NewCache(QueryMetaDataCacheSize, QueryMetaCacheTTL)
var ErrDirtyLog = errors.New("incomplete log file")
var (
LevelMergeFileNum = []int{8, 8}
)
Functions ¶
func AggregateData ¶
func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool
func CacheDataInMemory ¶
func CacheDataInMemory() bool
func CacheMetaInMemory ¶
func CacheMetaInMemory() bool
func CanEncodeOneRowMode ¶ added in v1.2.0
func CleanTempFile ¶ added in v1.3.0
func CompactRecovery ¶
func CompactRecovery(path string, group *CompactGroup)
func CreateTSSPFileReader ¶
func DecodeColumnHeader ¶ added in v1.2.0
func DecodeColumnOfOneValue ¶ added in v1.2.0
func EncodeColumnHeader ¶ added in v1.2.0
func EstimateBufferSize ¶
func FileOperation ¶ added in v1.0.0
func FileOperation(f TSSPFile, op func())
func FilterByField ¶
func FilterByFieldFuncs ¶ added in v1.1.0
func FilterByFieldFuncs(rec, filterRec *record.Record, filterOption *BaseFilterOptions, filterBitmap *bitmap.FilterBitmap) *record.Record
func FilterByOpts ¶ added in v1.0.1
func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record
func FilterByTimeDescend ¶
func FlushRemoteEnabled ¶ added in v1.3.0
func GenFixRowsPerSegment ¶ added in v1.2.0
func GenLogFileName ¶ added in v1.0.0
func GenParquetLogName ¶ added in v1.3.0
func GenParquetLogName() string
func GenRecByReserveIds ¶ added in v1.2.0
func GetBloomFilterBuf ¶ added in v1.2.0
func GetBloomFilterBuf() *bloomFilter
func GetChunkMetaCompressMode ¶ added in v1.2.0
func GetChunkMetaCompressMode() uint8
func GetCursorsBy ¶ added in v1.3.0
func GetCursorsBy(path *sparseindex.OBSFilterPath, tr util.TimeRange, isAscending bool) (int, uint64, error)
func GetDetachedFlushEnabled ¶ added in v1.3.0
func GetDetachedFlushEnabled() bool
func GetMaxRowsPerSegment4TsStore ¶ added in v1.1.0
func GetMaxRowsPerSegment4TsStore() int
func GetMergeFlag4TsStore ¶ added in v1.1.0
func GetMergeFlag4TsStore() int32
func GetMetaIndexChunkCount ¶ added in v1.2.0
func GetMetaIndexChunkCount(obsOptions *obs.ObsOptions, dataPath string) (int64, error)
func GetMetaIndexOffsetAndLengthByChunkId ¶ added in v1.2.0
func GetPKItems ¶ added in v1.3.0
func GetPKItems(path string, obsOpts *obs.ObsOptions, miChunkIds []int64) (*colstore.DetachedPKMetaInfo, []*colstore.DetachedPKInfo, error)
func GetPKMetaOffsetLengthByChunkId ¶ added in v1.2.0
func GetPKMetaOffsetLengthByChunkId(pkMetaInfo *colstore.DetachedPKMetaInfo, chunkId int) (offset, length int64)
func GetSortKeyColVal ¶ added in v1.1.1
func GetTmpFileSuffix ¶ added in v1.1.0
func GetTmpFileSuffix() string
func InParquetProcess ¶ added in v1.3.0
func InitDecFunctions ¶
func InitDecFunctions()
func InitQueryFileCache ¶ added in v1.1.0
func InitWriterPool ¶ added in v1.1.0
func InitWriterPool(size int)
func IsFlushToFinalFile ¶ added in v1.2.0
func IsInterfaceNil ¶
func IsInterfaceNil(value interface{}) bool
func IsTempleFile ¶
func MergeRecovery ¶
func MergeRecovery(path string, name string, ctx *MergeContext)
func NewCsImmTableImpl ¶ added in v1.2.0
func NewCsImmTableImpl() *csImmTableImpl
func NewLastMergeTime ¶ added in v1.0.0
func NewLastMergeTime() *lastMergeTime
func NewMemReaderEvictCtx ¶
func NewMemReaderEvictCtx() *memReaderEvictCtx
func NewMergePerformer ¶ added in v1.0.0
func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer
func NewObsWriter ¶ added in v1.2.0
func NewObsWriter(path, fileName string, obsOpts *obs.ObsOptions) (*obsWriter, error)
func NewObsWriterByFd ¶ added in v1.2.0
func NewObsWriterByFd(fd fileops.File, obsOpts *obs.ObsOptions) (*obsWriter, error)
func NewTSSPFileReader ¶
func NewTsImmTable ¶ added in v1.2.0
func NewTsImmTable() *tsImmTableImpl
func NonStreamingCompaction ¶
func PreAggOnlyOneRow ¶ added in v1.2.0
func ProcParquetLog ¶ added in v1.3.0
func ProcParquetLog(logDir string, lockPath *string, ctx EventContext) error
func PutBloomFilterBuf ¶ added in v1.2.0
func PutBloomFilterBuf(key *bloomFilter)
func PutChunkMeta ¶ added in v1.2.0
func PutDetachedSegmentTask ¶ added in v1.2.0
func PutDetachedSegmentTask(queryID string, meta IndexFrags)
func PutIDTimePairs ¶
func PutIDTimePairs(pair *IdTimePairs)
func ReadPKDataAll ¶ added in v1.3.0
func ReadPKDataAll(path string, opts *obs.ObsOptions, offset, length []int64, meta []*colstore.DetachedPKMeta, info *colstore.DetachedPKMetaInfo) ([]*colstore.DetachedPKData, error)
func ReadPKMetaAll ¶ added in v1.3.0
func ReadPKMetaAll(path string, opts *obs.ObsOptions, offset, length []int64) ([]*colstore.DetachedPKMeta, error)
func ReadPKMetaInfoAll ¶ added in v1.3.0
func ReadPKMetaInfoAll(path string, opts *obs.ObsOptions) (*colstore.DetachedPKMetaInfo, error)
func ReadReliabilityLog ¶ added in v1.3.0
func ReleaseColumnBuilder ¶
func ReleaseColumnBuilder(b PreAggBuilder)
func RemoveTsspSuffix ¶ added in v1.1.0
func RenameIndexFiles ¶ added in v1.2.0
func RenameTmpFiles ¶
func RenameTmpFilesWithPKIndex ¶ added in v1.1.0
func RenameTmpFilesWithPKIndex(newFiles []TSSPFile, ir *influxql.IndexRelation) error
func RenameTmpFullTextIdxFile ¶ added in v1.2.0
func ResetAggregateData ¶
func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)
func SaveReliabilityLog ¶ added in v1.3.0
func SetCacheDataBlock ¶
func SetCacheDataBlock(en bool)
func SetCacheMetaData ¶
func SetCacheMetaData(en bool)
func SetChunkMetaCompressMode ¶ added in v1.2.0
func SetChunkMetaCompressMode(mode int)
func SetCompactLimit ¶
func SetCompactionEnabled ¶ added in v1.2.0
func SetCompactionEnabled(compactionEnabled bool)
func SetDetachedFlushEnabled ¶ added in v1.2.0
func SetDetachedFlushEnabled(detachFlushEnabled bool)
func SetFragmentsNumPerFlush ¶ added in v1.1.1
func SetFragmentsNumPerFlush(fragmentsNumPerFlush int)
func SetImmTableMaxMemoryPercentage ¶
func SetImmTableMaxMemoryPercentage(sysTotalMem, percentage int)
func SetIndexCompressMode ¶ added in v1.2.0
func SetIndexCompressMode(mode int)
func SetMaxCompactor ¶
func SetMaxCompactor(n int)
func SetMaxFullCompactor ¶
func SetMaxFullCompactor(n int)
func SetMaxRowsPerSegment4TsStore ¶ added in v1.1.0
func SetMaxRowsPerSegment4TsStore(maxRowsPerSegmentLimit int)
func SetMaxSegmentLimit4TsStore ¶ added in v1.1.0
func SetMaxSegmentLimit4TsStore(limit int)
func SetMergeFlag4TsStore ¶ added in v1.1.0
func SetMergeFlag4TsStore(v int32)
func SetSnapshotLimit ¶
func SetSnapshotTblNum ¶ added in v1.1.0
func SetSnapshotTblNum(snapshotTblNum int)
func SnapshotLimit ¶
func SnapshotLimit() bool
func SumFilesSize ¶ added in v1.0.0
func TimeSorted ¶ added in v1.2.0
func UnrefFiles ¶
func UnrefFiles(files ...TSSPFile)
func UnrefFilesReader ¶ added in v1.0.0
func UnrefFilesReader(files ...TSSPFile)
func UpdateChunkMetaFunc ¶ added in v1.2.0
func UpdateDetachedMetaDataCache ¶ added in v1.2.0
func WriteIntoFile ¶ added in v1.1.1
Types ¶
type AccumulateMetaIndex ¶ added in v1.2.0
type AccumulateMetaIndex struct {
// contains filtered or unexported fields
}
func (*AccumulateMetaIndex) GetBlockId ¶ added in v1.2.0
func (a *AccumulateMetaIndex) GetBlockId() uint64
func (*AccumulateMetaIndex) SetAccumulateMetaIndex ¶ added in v1.2.0
func (a *AccumulateMetaIndex) SetAccumulateMetaIndex(pkDataOffset uint32, blockId uint64, dataOffset, offset int64)
type BaseFilterOptions ¶ added in v1.1.0
type BaseFilterOptions struct {
FiltersMap influxql.FilterMapValuer
RedIdxMap map[int]struct{} // redundant columns, which are not required after filtering.
FieldsIdx []int // field index in schema
FilterTags []string // filter tag name
CondFunctions *binaryfilterfunc.ConditionImpl
}
type BloomFilterIterator ¶ added in v1.2.0
type BloomFilterIterator struct {
// contains filtered or unexported fields
}
func NewBloomFilterIterator ¶ added in v1.2.0
func NewBloomFilterIterator(f *FragmentIterators, oldFiles []TSSPFile, bfCols []string) (*BloomFilterIterator, error)
func (*BloomFilterIterator) AppendFileIdx ¶ added in v1.2.0
func (bfi *BloomFilterIterator) AppendFileIdx(fileIdx int)
func (*BloomFilterIterator) Write ¶ added in v1.2.0
func (bfi *BloomFilterIterator) Write(toLocal bool) error
type BooleanPreAgg ¶
type BooleanPreAgg struct {
// contains filtered or unexported fields
}
func NewBooleanPreAgg ¶
func NewBooleanPreAgg() *BooleanPreAgg
type BufferReader ¶ added in v1.0.0
type BufferReader struct {
// contains filtered or unexported fields
}
func NewBufferReader ¶ added in v1.0.0
func NewBufferReader(maxSize uint32) *BufferReader
func (*BufferReader) Read ¶ added in v1.0.0
func (br *BufferReader) Read(offset int64, size uint32) ([]byte, error)
func (*BufferReader) Reset ¶ added in v1.0.0
func (br *BufferReader) Reset(r TSSPFile)
type ChunkDataBuilder ¶
type ChunkDataBuilder struct {
// contains filtered or unexported fields
}
func NewChunkDataBuilder ¶
func NewChunkDataBuilder(maxRowsPerSegment, maxSegmentLimit int) *ChunkDataBuilder
func (*ChunkDataBuilder) EncodeTime ¶
func (b *ChunkDataBuilder) EncodeTime(offset int64, timeSorted bool) error
type ChunkIterator ¶
type ChunkIterator struct {
*FileIterator
// contains filtered or unexported fields
}
func NewChunkIterator ¶
func NewChunkIterator(r *FileIterator) *ChunkIterator
func (*ChunkIterator) Close ¶
func (c *ChunkIterator) Close()
func (*ChunkIterator) GetRecord ¶ added in v1.0.0
func (c *ChunkIterator) GetRecord() *record.Record
func (*ChunkIterator) GetSeriesID ¶ added in v1.0.0
func (c *ChunkIterator) GetSeriesID() uint64
func (*ChunkIterator) Next ¶
func (c *ChunkIterator) Next() bool
func (*ChunkIterator) WithLog ¶
func (c *ChunkIterator) WithLog(log *Log.Logger)
type ChunkIterators ¶
type ChunkIterators struct {
// contains filtered or unexported fields
}
func (*ChunkIterators) Close ¶
func (c *ChunkIterators) Close()
func (*ChunkIterators) Len ¶
func (c *ChunkIterators) Len() int
func (*ChunkIterators) Less ¶
func (c *ChunkIterators) Less(i, j int) bool
func (*ChunkIterators) Pop ¶
func (c *ChunkIterators) Pop() interface{}
func (*ChunkIterators) Push ¶
func (c *ChunkIterators) Push(v interface{})
func (*ChunkIterators) Swap ¶
func (c *ChunkIterators) Swap(i, j int)
func (*ChunkIterators) WithLog ¶
func (c *ChunkIterators) WithLog(log *Log.Logger)
type ChunkMeta ¶
type ChunkMeta struct {
// contains filtered or unexported fields
}
func GetChunkMeta ¶ added in v1.2.0
func (*ChunkMeta) AllocColMeta ¶ added in v1.1.0
func (m *ChunkMeta) AllocColMeta(ref *record.Field) *ColumnMeta
func (*ChunkMeta) DelEmptyColMeta ¶ added in v1.1.0
func (m *ChunkMeta) DelEmptyColMeta()
func (*ChunkMeta) GetColMeta ¶ added in v1.0.0
func (m *ChunkMeta) GetColMeta() []ColumnMeta
func (*ChunkMeta) GetTimeRangeBy ¶ added in v1.3.0
func (m *ChunkMeta) GetTimeRangeBy(index int) SegmentRange
func (*ChunkMeta) MinMaxTime ¶
func (*ChunkMeta) Rows ¶
func (m *ChunkMeta) Rows(ab PreAggBuilder) int
func (*ChunkMeta) SegmentCount ¶ added in v1.0.0
func (*ChunkMeta) TimeMeta ¶
func (m *ChunkMeta) TimeMeta() *ColumnMeta
type ChunkMetaContext ¶ added in v1.2.0
type ChunkMetaContext struct {
// contains filtered or unexported fields
}
func NewChunkMetaContext ¶ added in v1.2.0
func NewChunkMetaContext(schema record.Schemas) *ChunkMetaContext
func (*ChunkMetaContext) Instance ¶ added in v1.2.0
func (ctx *ChunkMetaContext) Instance() pool.Object
func (*ChunkMetaContext) MemSize ¶ added in v1.2.0
func (ctx *ChunkMetaContext) MemSize() int
func (*ChunkMetaContext) Release ¶ added in v1.2.0
func (ctx *ChunkMetaContext) Release()
type ChunkMetaEntry ¶ added in v1.2.0
type ChunkMetaEntry struct {
// contains filtered or unexported fields
}
func NewChunkMetaEntry ¶ added in v1.2.0
func NewChunkMetaEntry(filePath string) *ChunkMetaEntry
func (*ChunkMetaEntry) GetKey ¶ added in v1.2.0
func (e *ChunkMetaEntry) GetKey() string
func (*ChunkMetaEntry) GetTime ¶ added in v1.2.0
func (e *ChunkMetaEntry) GetTime() time.Time
func (*ChunkMetaEntry) GetValue ¶ added in v1.2.0
func (e *ChunkMetaEntry) GetValue() interface{}
func (*ChunkMetaEntry) SetTime ¶ added in v1.2.0
func (e *ChunkMetaEntry) SetTime(time time.Time)
func (*ChunkMetaEntry) SetValue ¶ added in v1.2.0
func (e *ChunkMetaEntry) SetValue(value interface{})
func (*ChunkMetaEntry) Size ¶ added in v1.2.0
func (e *ChunkMetaEntry) Size() int64
type ColumnBuilder ¶
type ColumnBuilder struct {
// contains filtered or unexported fields
}
func NewColumnBuilder ¶
func NewColumnBuilder() *ColumnBuilder
func (*ColumnBuilder) BuildPreAgg ¶ added in v1.0.0
func (b *ColumnBuilder) BuildPreAgg()
func (*ColumnBuilder) EncodeColumn ¶
func (*ColumnBuilder) EncodeColumnBySize ¶ added in v1.2.0
func (*ColumnBuilder) SetEncodeMode ¶ added in v1.2.0
func (b *ColumnBuilder) SetEncodeMode(detached bool)
type ColumnIterator ¶ added in v1.0.0
type ColumnIterator struct {
// contains filtered or unexported fields
}
func NewColumnIterator ¶ added in v1.0.0
func NewColumnIterator(fi *FileIterator) *ColumnIterator
func (*ColumnIterator) Close ¶ added in v1.0.0
func (itr *ColumnIterator) Close()
func (*ColumnIterator) Error ¶ added in v1.0.0
func (itr *ColumnIterator) Error() error
func (*ColumnIterator) IncrChunkUsed ¶ added in v1.0.0
func (itr *ColumnIterator) IncrChunkUsed()
func (*ColumnIterator) IterCurrentChunk ¶ added in v1.3.0
func (itr *ColumnIterator) IterCurrentChunk(p ColumnIteratorPerformer) error
func (*ColumnIterator) NextChunkMeta ¶ added in v1.0.0
func (itr *ColumnIterator) NextChunkMeta() bool
func (*ColumnIterator) NextColumn ¶ added in v1.0.0
func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)
func (*ColumnIterator) PutCol ¶ added in v1.0.0
func (itr *ColumnIterator) PutCol(col *record.ColVal)
func (*ColumnIterator) Run ¶ added in v1.0.0
func (itr *ColumnIterator) Run(p ColumnIteratorPerformer) error
type ColumnIteratorPerformer ¶ added in v1.0.0
type ColumnMeta ¶
type ColumnMeta struct {
// contains filtered or unexported fields
}
func (*ColumnMeta) Clone ¶ added in v1.0.0
func (m *ColumnMeta) Clone() ColumnMeta
func (*ColumnMeta) GetPreAgg ¶ added in v1.2.0
func (m *ColumnMeta) GetPreAgg() []byte
func (*ColumnMeta) GetSegment ¶ added in v1.2.0
func (m *ColumnMeta) GetSegment(i int) (int64, uint32)
func (*ColumnMeta) IsTime ¶ added in v1.1.0
func (m *ColumnMeta) IsTime() bool
func (*ColumnMeta) Name ¶ added in v1.1.0
func (m *ColumnMeta) Name() string
func (*ColumnMeta) RowCount ¶
func (m *ColumnMeta) RowCount(ref *record.Field, decs *ReadContext) (int64, error)
func (*ColumnMeta) Type ¶ added in v1.2.0
func (m *ColumnMeta) Type() uint8
type ColumnReader ¶
type ColumnReader interface {
ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, dst *pool.Buffer, ioPriority int) ([]byte, error)
UnrefCachePage(cachePage *readcache.CachePage)
}
type CompactGroup ¶
type CompactGroup struct {
// contains filtered or unexported fields
}
func NewCompactGroup ¶
func NewCompactGroup(name string, toLevle uint16, count int) *CompactGroup
func (*CompactGroup) Add ¶ added in v1.3.0
func (g *CompactGroup) Add(item string)
func (*CompactGroup) Len ¶ added in v1.3.0
func (g *CompactGroup) Len() int
func (*CompactGroup) UpdateLevel ¶ added in v1.3.0
func (g *CompactGroup) UpdateLevel(lv uint16)
type CompactGroupBuilder ¶ added in v1.3.0
type CompactGroupBuilder struct {
// contains filtered or unexported fields
}
func (*CompactGroupBuilder) AddFile ¶ added in v1.3.0
func (b *CompactGroupBuilder) AddFile(f TSSPFile) bool
func (*CompactGroupBuilder) Init ¶ added in v1.3.0
func (b *CompactGroupBuilder) Init(name string, closing *int64, size int)
func (*CompactGroupBuilder) Limited ¶ added in v1.3.0
func (b *CompactGroupBuilder) Limited() bool
func (*CompactGroupBuilder) Release ¶ added in v1.3.0
func (b *CompactGroupBuilder) Release()
func (*CompactGroupBuilder) SwitchGroup ¶ added in v1.3.0
func (b *CompactGroupBuilder) SwitchGroup()
type CompactTask ¶ added in v1.3.0
func NewCompactTask ¶ added in v1.3.0
func NewCompactTask(table *MmsTables, plan *CompactGroup, full bool) *CompactTask
func (*CompactTask) BeforeExecute ¶ added in v1.3.0
func (t *CompactTask) BeforeExecute() bool
func (*CompactTask) Execute ¶ added in v1.3.0
func (t *CompactTask) Execute()
func (*CompactTask) Finish ¶ added in v1.3.0
func (t *CompactTask) Finish()
func (*CompactTask) IncrFull ¶ added in v1.3.0
func (t *CompactTask) IncrFull(n int64)
type CompactedFileInfo ¶
type Config ¶
type Config struct {
SnapshotTblNum int
FragmentsNumPerFlush int
// contains filtered or unexported fields
}
func GetColStoreConfig ¶ added in v1.1.0
func GetColStoreConfig() *Config
func GetTsStoreConfig ¶ added in v1.1.0
func GetTsStoreConfig() *Config
func NewColumnStoreConfig ¶ added in v1.1.1
func NewColumnStoreConfig() *Config
func NewTsStoreConfig ¶ added in v1.1.0
func NewTsStoreConfig() *Config
func (*Config) GetCompactionEnabled ¶ added in v1.2.0
func (*Config) GetMaxRowsPerSegment ¶ added in v1.1.0
func (*Config) GetMaxSegmentLimit ¶ added in v1.1.0
func (*Config) SetExpectedSegmentSize ¶ added in v1.2.0
func (*Config) SetFilesLimit ¶
func (*Config) SetMaxRowsPerSegment ¶
func (*Config) SetMaxSegmentLimit ¶
type CsChunkDataImp ¶ added in v1.2.0
type CsChunkDataImp struct {
// contains filtered or unexported fields
}
func (*CsChunkDataImp) EncodeChunk ¶ added in v1.2.0
func (c *CsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)
func (*CsChunkDataImp) EncodeChunkForCompaction ¶ added in v1.2.0
func (c *CsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)
func (*CsChunkDataImp) SetAccumulateRowsIndex ¶ added in v1.2.0
func (c *CsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
func (*CsChunkDataImp) SetDetachedInfo ¶ added in v1.2.0
func (c *CsChunkDataImp) SetDetachedInfo(writeDetached bool)
type DetachedChunkMetaReader ¶ added in v1.2.0
type DetachedChunkMetaReader struct {
// contains filtered or unexported fields
}
func NewDetachedChunkMetaReader ¶ added in v1.2.0
func NewDetachedChunkMetaReader(path string, obsOpts *obs.ObsOptions) (*DetachedChunkMetaReader, error)
func (*DetachedChunkMetaReader) ReadChunkMeta ¶ added in v1.2.0
func (reader *DetachedChunkMetaReader) ReadChunkMeta(offset, length []int64) ([]*ChunkMeta, error)
type DetachedMetaDataReader ¶ added in v1.2.0
type DetachedMetaDataReader struct {
// contains filtered or unexported fields
}
func NewDetachedMetaDataReader ¶ added in v1.2.0
func NewDetachedMetaDataReader(path string, obsOpts *obs.ObsOptions, isSort bool) (*DetachedMetaDataReader, error)
func (*DetachedMetaDataReader) Close ¶ added in v1.3.0
func (reader *DetachedMetaDataReader) Close()
func (*DetachedMetaDataReader) InitReadBatch ¶ added in v1.2.0
func (reader *DetachedMetaDataReader) InitReadBatch(s []*SegmentMeta, schema record.Schemas)
func (*DetachedMetaDataReader) ReadBatch ¶ added in v1.2.0
func (reader *DetachedMetaDataReader) ReadBatch(dst *record.Record, decs *ReadContext) (*record.Record, error)
type DetachedMetaIndexReader ¶ added in v1.2.0
type DetachedMetaIndexReader struct {
// contains filtered or unexported fields
}
func NewDetachedMetaIndexReader ¶ added in v1.2.0
func NewDetachedMetaIndexReader(path string, obsOpts *obs.ObsOptions) (*DetachedMetaIndexReader, error)
func (*DetachedMetaIndexReader) Close ¶ added in v1.3.0
func (reader *DetachedMetaIndexReader) Close()
func (*DetachedMetaIndexReader) ReadMetaIndex ¶ added in v1.2.0
func (reader *DetachedMetaIndexReader) ReadMetaIndex(offset, length []int64) ([]*MetaIndex, error)
type DetachedPKDataReader ¶ added in v1.2.0
type DetachedPKDataReader struct {
// contains filtered or unexported fields
}
func NewDetachedPKDataReader ¶ added in v1.2.0
func NewDetachedPKDataReader(path string, opts *obs.ObsOptions) (*DetachedPKDataReader, error)
func (*DetachedPKDataReader) Close ¶ added in v1.3.0
func (reader *DetachedPKDataReader) Close()
func (*DetachedPKDataReader) Read ¶ added in v1.2.0
func (reader *DetachedPKDataReader) Read(offset, length []int64, metas []*colstore.DetachedPKMeta, info *colstore.DetachedPKMetaInfo) ([]*colstore.DetachedPKData, error)
type DetachedPKMetaInfoReader ¶ added in v1.2.0
type DetachedPKMetaInfoReader struct {
// contains filtered or unexported fields
}
func NewDetachedPKMetaInfoReader ¶ added in v1.2.0
func NewDetachedPKMetaInfoReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaInfoReader, error)
func (*DetachedPKMetaInfoReader) Close ¶ added in v1.3.0
func (reader *DetachedPKMetaInfoReader) Close()
func (*DetachedPKMetaInfoReader) Read ¶ added in v1.2.0
func (reader *DetachedPKMetaInfoReader) Read() (*colstore.DetachedPKMetaInfo, error)
type DetachedPKMetaReader ¶ added in v1.2.0
type DetachedPKMetaReader struct {
// contains filtered or unexported fields
}
func NewDetachedPKMetaReader ¶ added in v1.2.0
func NewDetachedPKMetaReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaReader, error)
func (*DetachedPKMetaReader) Close ¶ added in v1.3.0
func (reader *DetachedPKMetaReader) Close()
func (*DetachedPKMetaReader) Read ¶ added in v1.2.0
func (reader *DetachedPKMetaReader) Read(offset, length []int64) ([]*colstore.DetachedPKMeta, error)
type DetachedSegmentEntry ¶ added in v1.2.0
type DetachedSegmentEntry struct {
// contains filtered or unexported fields
}
func NewSegmentMetaDataEntry ¶ added in v1.2.0
func NewSegmentMetaDataEntry(segmentID string) *DetachedSegmentEntry
func (*DetachedSegmentEntry) GetKey ¶ added in v1.2.0
func (e *DetachedSegmentEntry) GetKey() string
func (*DetachedSegmentEntry) GetTime ¶ added in v1.2.0
func (e *DetachedSegmentEntry) GetTime() time.Time
func (*DetachedSegmentEntry) GetValue ¶ added in v1.2.0
func (e *DetachedSegmentEntry) GetValue() interface{}
func (*DetachedSegmentEntry) SetTime ¶ added in v1.2.0
func (e *DetachedSegmentEntry) SetTime(time time.Time)
func (*DetachedSegmentEntry) SetValue ¶ added in v1.2.0
func (e *DetachedSegmentEntry) SetValue(value interface{})
func (*DetachedSegmentEntry) Size ¶ added in v1.2.0
func (e *DetachedSegmentEntry) Size() int64
type EncodeChunkData ¶ added in v1.2.0
type EncodeChunkData interface {
EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)
EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)
SetAccumulateRowsIndex(rowsPerSegment []int)
SetDetachedInfo(writeDetached bool)
}
type EncodeColumnMode ¶ added in v1.2.0
type EncodeColumnMode interface {
// contains filtered or unexported methods
}
type EngineShard ¶ added in v1.3.0
type EngineShard interface {
IsOpened() bool
OpenAndEnable(client metaclient.MetaClient) error
GetDataPath() string
GetIdent() *meta.ShardIdentifier
}
type EventBus ¶ added in v1.3.0
type EventBus struct {
// contains filtered or unexported fields
}
func DefaultEventBus ¶ added in v1.3.0
func DefaultEventBus() *EventBus
type EventContext ¶ added in v1.3.0
type EventContext struct {
// contains filtered or unexported fields
}
func NewEventContext ¶ added in v1.3.0
func NewEventContext(idx IndexMergeSet, scheduler *scheduler.TaskScheduler) *EventContext
type Events ¶ added in v1.3.0
type Events struct {
// contains filtered or unexported fields
}
func (*Events) Finish ¶ added in v1.3.0
func (es *Events) Finish(success bool, ctx EventContext)
func (*Events) TriggerNewFile ¶ added in v1.3.0
func (*Events) TriggerReplaceFile ¶ added in v1.3.0
func (*Events) TriggerWriteChunkMeta ¶ added in v1.3.0
func (*Events) TriggerWriteRecord ¶ added in v1.3.0
type FileInfoExtend ¶ added in v1.3.0
type FileIterator ¶
type FileIterator struct {
// contains filtered or unexported fields
}
func NewFileIterator ¶
func NewFileIterator(r TSSPFile, log *Log.Logger) *FileIterator
func (*FileIterator) Close ¶
func (itr *FileIterator) Close()
func (*FileIterator) GetCurtChunkMeta ¶ added in v1.0.0
func (itr *FileIterator) GetCurtChunkMeta() *ChunkMeta
func (*FileIterator) NextChunkMeta ¶
func (itr *FileIterator) NextChunkMeta() bool
func (*FileIterator) ReadData ¶ added in v1.2.0
func (itr *FileIterator) ReadData(offset int64, size uint32) ([]byte, error)
func (*FileIterator) WithLog ¶
func (itr *FileIterator) WithLog(log *Log.Logger)
type FileIterators ¶
type FileIterators []*FileIterator
func (FileIterators) AverageRows ¶
func (i FileIterators) AverageRows() int
func (FileIterators) Close ¶
func (i FileIterators) Close()
func (FileIterators) MaxChunkRows ¶
func (i FileIterators) MaxChunkRows() int
func (FileIterators) MaxColumns ¶
func (i FileIterators) MaxColumns() int
type FileReader ¶ added in v1.1.0
type FileReader interface {
Open() error
Close() error
ReadData(cm *ChunkMeta, segment int, dst *record.Record, ctx *ReadContext, ioPriority int) (*record.Record, error)
Ref()
Unref() int64
MetaIndexAt(idx int) (*MetaIndex, error)
MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error)
ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error)
ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, buf *pool.Buffer, ioPriority int) ([]byte, error)
ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
Read(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error)
ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error)
LoadIdTimes(isOrder bool, p *IdTimePairs) error
Stat() *Trailer
MinMaxSeriesID() (min, max uint64, err error)
MinMaxTime() (min, max int64, err error)
Contains(id uint64, tm util.TimeRange) bool
ContainsTime(tm util.TimeRange) bool
ContainsId(id uint64) bool
Name() string
FileName() string
Rename(newName string) error
RenameOnObs(obsName string, tmp bool, obsOpt *obs.ObsOptions) error
FileSize() int64
InMemSize() int64
Version() uint64
FreeMemory() int64
FreeFileHandle() error
LoadIntoMemory() error
LoadComponents() error
AverageChunkRows() int
MaxChunkRows() int
GetFileReaderRef() int64
ChunkMetaCompressMode() uint8
}
type FileReaderContext ¶ added in v1.2.0
type FileReaderContext struct {
// contains filtered or unexported fields
}
func NewFileReaderContext ¶ added in v1.2.0
func NewFileReaderContext(tr util.TimeRange, schemas record.Schemas, decs *ReadContext, filterOpts *FilterOptions, filterBitmap *bitmap.FilterBitmap, isOrder bool) *FileReaderContext
func (*FileReaderContext) GetSchemas ¶ added in v1.3.0
func (f *FileReaderContext) GetSchemas() record.Schemas
type FileSwapper ¶ added in v1.2.0
type FileSwapper struct {
// contains filtered or unexported fields
}
func NewFileSwapper ¶ added in v1.2.0
func (*FileSwapper) MustClose ¶ added in v1.2.0
func (s *FileSwapper) MustClose()
func (*FileSwapper) SetWriter ¶ added in v1.2.0
func (s *FileSwapper) SetWriter(w io.WriteCloser)
type FilterOptions ¶
type FilterOptions struct {
// contains filtered or unexported fields
}
func NewFilterOpts ¶
func NewFilterOpts(cond influxql.Expr, filterOption *BaseFilterOptions, tags *influx.PointTags, rowFilters *[]clv.RowFilter) *FilterOptions
func (*FilterOptions) GetCond ¶ added in v1.1.0
func (fo *FilterOptions) GetCond() influxql.Expr
func (*FilterOptions) SetCondFuncs ¶ added in v1.1.0
func (fo *FilterOptions) SetCondFuncs(filterOption *BaseFilterOptions)
type FirstLastReader ¶ added in v1.1.0
type FirstLastReader struct {
// contains filtered or unexported fields
}
func (*FirstLastReader) Init ¶ added in v1.1.0
func (r *FirstLastReader) Init(cm *ChunkMeta, cr ColumnReader, ref *record.Field, dst *record.Record, first bool) *FirstLastReader
func (*FirstLastReader) Read ¶ added in v1.1.0
func (r *FirstLastReader) Read(ctx *ReadContext, copied bool, ioPriority int) error
func (*FirstLastReader) Release ¶ added in v1.1.0
func (r *FirstLastReader) Release()
type FloatPreAgg ¶
type FloatPreAgg struct {
// contains filtered or unexported fields
}
FloatPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewFloatPreAgg ¶
func NewFloatPreAgg() *FloatPreAgg
type FragmentIterator ¶ added in v1.2.0
type FragmentIterator interface {
// contains filtered or unexported methods
}
type FragmentIterators ¶ added in v1.1.1
type FragmentIterators struct {
SortKeyFileds []record.Field
TableData
Conf *Config
PkRec []*record.Record
RecordResult *record.Record
TimeClusterResult *record.Record
// contains filtered or unexported fields
}
func (*FragmentIterators) Close ¶ added in v1.1.1
func (f *FragmentIterators) Close()
func (*FragmentIterators) CompareWithBreakPoint ¶ added in v1.1.1
func (f *FragmentIterators) CompareWithBreakPoint(curPos, breakPos int) bool
func (*FragmentIterators) IsEmpty ¶ added in v1.2.0
func (f *FragmentIterators) IsEmpty() bool
func (*FragmentIterators) Len ¶ added in v1.1.1
func (f *FragmentIterators) Len() int
func (*FragmentIterators) Less ¶ added in v1.1.1
func (f *FragmentIterators) Less(i, j int) bool
func (*FragmentIterators) Pop ¶ added in v1.1.1
func (f *FragmentIterators) Pop() interface{}
func (*FragmentIterators) Push ¶ added in v1.1.1
func (f *FragmentIterators) Push(v interface{})
func (*FragmentIterators) Swap ¶ added in v1.1.1
func (f *FragmentIterators) Swap(i, j int)
func (*FragmentIterators) WithLog ¶ added in v1.1.1
func (f *FragmentIterators) WithLog(log *Log.Logger)
type FragmentIteratorsPool ¶ added in v1.1.1
type FragmentIteratorsPool struct {
// contains filtered or unexported fields
}
func NewFragmentIteratorsPool ¶ added in v1.1.1
func NewFragmentIteratorsPool(n int) *FragmentIteratorsPool
type IdTimePairs ¶
func GetIDTimePairs ¶
func GetIDTimePairs(name string) *IdTimePairs
func (*IdTimePairs) Add ¶
func (p *IdTimePairs) Add(id uint64, tm int64)
func (*IdTimePairs) AddRowCounts ¶
func (p *IdTimePairs) AddRowCounts(rowCounts int64)
func (*IdTimePairs) Len ¶
func (p *IdTimePairs) Len() int
func (*IdTimePairs) Marshal ¶
func (p *IdTimePairs) Marshal(encTimes bool, dst []byte, ctx *encoding.CoderContext) []byte
func (*IdTimePairs) Reset ¶
func (p *IdTimePairs) Reset(name string)
type ImmTable ¶ added in v1.1.0
type ImmTable interface {
GetEngineType() config.EngineType
GetCompactionType(name string) config.CompactionType
NewFileIterators(m *MmsTables, group *CompactGroup) (FilesInfo, error)
AddTSSPFiles(m *MmsTables, name string, isOrder bool, files ...TSSPFile)
AddBothTSSPFiles(flushed *bool, m *MmsTables, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
LevelPlan(m *MmsTables, level uint16) []*CompactGroup
SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
GetMstInfo(name string) (*meta.MeasurementInfo, bool)
UpdateAccumulateMetaIndexInfo(name string, index *AccumulateMetaIndex)
FullyCompacted(m *MmsTables) bool
// contains filtered or unexported methods
}
type IndexCompressWriter ¶ added in v1.2.0
type IndexCompressWriter struct {
// contains filtered or unexported fields
}
func (*IndexCompressWriter) BlockSize ¶ added in v1.2.0
func (w *IndexCompressWriter) BlockSize() int
func (*IndexCompressWriter) Close ¶ added in v1.2.0
func (w *IndexCompressWriter) Close() error
func (*IndexCompressWriter) CopyTo ¶ added in v1.2.0
func (w *IndexCompressWriter) CopyTo(to io.Writer) (int, error)
func (*IndexCompressWriter) GetWriter ¶ added in v1.2.0
func (w *IndexCompressWriter) GetWriter() *bufio.Writer
func (*IndexCompressWriter) Init ¶ added in v1.2.0
func (w *IndexCompressWriter) Init(name string, lock *string, cacheMeta bool, limitCompact bool)
func (*IndexCompressWriter) MetaDataBlocks ¶ added in v1.2.0
func (w *IndexCompressWriter) MetaDataBlocks(dst [][]byte) [][]byte
func (*IndexCompressWriter) Size ¶ added in v1.2.0
func (w *IndexCompressWriter) Size() int
func (*IndexCompressWriter) SwitchMetaBuffer ¶ added in v1.2.0
func (w *IndexCompressWriter) SwitchMetaBuffer() (int, error)
type IndexFrags ¶ added in v1.2.0
type IndexFrags interface {
BasePath() string
FragCount() int64
IndexCount() int
Indexes() interface{}
AppendIndexes(...interface{})
FragRanges() []fragment.FragmentRanges
AppendFragRanges(...fragment.FragmentRanges)
AddFragCount(int64)
SetErr(error)
GetErr() error
Size() int
}
func GetDetachedSegmentTask ¶ added in v1.2.0
func GetDetachedSegmentTask(queryID string) (IndexFrags, bool)
type IndexMergeSet ¶ added in v1.3.0
type IndexWriter ¶ added in v1.2.0
type IndexWriter interface {
Init(name string, lock *string, cacheMeta bool, limitCompact bool)
Write(p []byte) (int, error)
Size() int
BlockSize() int
CopyTo(to io.Writer) (int, error)
SwitchMetaBuffer() (int, error)
MetaDataBlocks(dst [][]byte) [][]byte
Close() error
// contains filtered or unexported methods
}
func NewPKIndexWriter ¶ added in v1.2.0
func NewPKIndexWriter(indexName string, cacheMeta bool, limitCompact bool, lockPath *string) IndexWriter
type IntegerPreAgg ¶
type IntegerPreAgg struct {
// contains filtered or unexported fields
}
IntegerPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewIntegerPreAgg ¶
func NewIntegerPreAgg() *IntegerPreAgg
type IteratorByBlock ¶ added in v1.2.0
type IteratorByBlock struct {
// contains filtered or unexported fields
}
IteratorByBlock for single mst
func NewIteratorByBlock ¶ added in v1.2.0
func NewIteratorByBlock(f *FragmentIterators, conf *Config, group FilesInfo, accumulateMetaIndex *AccumulateMetaIndex) *IteratorByBlock
func (*IteratorByBlock) WriteDetachedMeta ¶ added in v1.2.0
func (ib *IteratorByBlock) WriteDetachedMeta(pkSchema record.Schemas) error
type IteratorByRow ¶ added in v1.2.0
type IteratorByRow struct {
// contains filtered or unexported fields
}
func NewIteratorByRow ¶ added in v1.2.0
func NewIteratorByRow(f *FragmentIterators, conf *Config) *IteratorByRow
func (*IteratorByRow) GetBreakPoint ¶ added in v1.2.0
func (ir *IteratorByRow) GetBreakPoint()
func (*IteratorByRow) NextWithBreakPoint ¶ added in v1.2.0
func (ir *IteratorByRow) NextWithBreakPoint()
type Location ¶
type Location struct {
// contains filtered or unexported fields
}
func NewLocation ¶
func NewLocation(r TSSPFile, ctx *ReadContext) *Location
func (*Location) AscendingDone ¶ added in v1.1.0
func (l *Location) AscendingDone()
func (*Location) DescendingDone ¶ added in v1.1.0
func (l *Location) DescendingDone()
func (*Location) GetChunkMeta ¶
func (*Location) SetChunkMeta ¶ added in v1.2.0
func (*Location) SetClosedSignal ¶ added in v1.3.0
func (*Location) SetFragmentRanges ¶ added in v1.1.0
func (l *Location) SetFragmentRanges(frs []*fragment.FragmentRange)
type LocationCursor ¶
type LocationCursor struct {
// contains filtered or unexported fields
}
func NewLocationCursor ¶
func NewLocationCursor(n int) *LocationCursor
func (*LocationCursor) AddFilterRecPool ¶ added in v1.1.0
func (l *LocationCursor) AddFilterRecPool(pool *record.CircularRecordPool)
func (*LocationCursor) AddLocation ¶
func (l *LocationCursor) AddLocation(loc *Location)
func (*LocationCursor) AddRef ¶
func (l *LocationCursor) AddRef()
func (*LocationCursor) Close ¶ added in v1.1.0
func (l *LocationCursor) Close()
func (*LocationCursor) FragmentCount ¶ added in v1.1.0
func (l *LocationCursor) FragmentCount() int
func (*LocationCursor) Len ¶
func (l *LocationCursor) Len() int
func (*LocationCursor) Less ¶
func (l *LocationCursor) Less(i, j int) bool
func (*LocationCursor) ReadData ¶
func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap, unnestOperator UnnestOperator) (*record.Record, error)
func (*LocationCursor) ReadMeta ¶
func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap) (*record.Record, error)
func (*LocationCursor) ReadOutOfOrderMeta ¶
func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)
func (*LocationCursor) Reset ¶ added in v1.3.0
func (l *LocationCursor) Reset()
func (*LocationCursor) Reverse ¶
func (l *LocationCursor) Reverse()
func (*LocationCursor) RowCount ¶ added in v1.1.0
func (l *LocationCursor) RowCount() int
func (*LocationCursor) Swap ¶
func (l *LocationCursor) Swap(i, j int)
func (*LocationCursor) Unref ¶
func (l *LocationCursor) Unref()
type MatchAllOperator ¶ added in v1.3.0
type MatchAllOperator struct {
// contains filtered or unexported fields
}
func (*MatchAllOperator) Compute ¶ added in v1.3.0
func (r *MatchAllOperator) Compute(rec *record.Record)
type MeasurementInProcess ¶ added in v1.2.0
type MeasurementInProcess struct {
// contains filtered or unexported fields
}
func NewMeasurementInProcess ¶ added in v1.2.0
func NewMeasurementInProcess() *MeasurementInProcess
func (*MeasurementInProcess) Add ¶ added in v1.2.0
func (m *MeasurementInProcess) Add(name string) bool
func (*MeasurementInProcess) Del ¶ added in v1.2.0
func (m *MeasurementInProcess) Del(name string)
func (*MeasurementInProcess) Has ¶ added in v1.3.0
func (m *MeasurementInProcess) Has(name string) bool
type MemBlock ¶
type MemBlock struct {
// contains filtered or unexported fields
}
func (*MemBlock) AppendDataBlock ¶
func (*MemBlock) CopyBlocks ¶
func (mb *MemBlock) CopyBlocks(src MemoryReader)
func (*MemBlock) DataBlocks ¶
func (*MemBlock) DataInMemory ¶
func (*MemBlock) FreeMemory ¶
func (*MemBlock) LoadIntoMemory ¶
func (*MemBlock) MetaBlocks ¶
func (*MemBlock) MetaInMemory ¶
func (*MemBlock) ReadChunkMetaBlock ¶
func (*MemBlock) ReadDataBlock ¶
func (*MemBlock) ReserveDataBlock ¶
func (*MemBlock) ReserveMetaBlock ¶
func (*MemBlock) SetMetaBlocks ¶
type MemoryReader ¶
type MemoryReader interface {
AppendDataBlock(srcData []byte)
ReadChunkMetaBlock(metaIdx int, sid uint64, count uint32) []byte
ReadDataBlock(offset int64, size uint32, dstPtr *[]byte) ([]byte, error)
CopyBlocks(src MemoryReader)
LoadIntoMemory(dr fileops.BasicFileReader, tr *Trailer, metaIndexItems []MetaIndex) error
FreeMemory() int64
DataInMemory() bool
MetaInMemory() bool
ReserveMetaBlock(n int)
ReserveDataBlock(n int)
DataBlocks() [][]byte
MetaBlocks() [][]byte
SetMetaBlocks(blocks [][]byte)
Size() int64
Reset()
}
func NewMemReader ¶
func NewMemReader() MemoryReader
func NewMemoryReader ¶
func NewMemoryReader(blkSize int) MemoryReader
type MergeColPool ¶ added in v1.1.0
type MergeColPool struct {
// contains filtered or unexported fields
}
func (*MergeColPool) Get ¶ added in v1.1.0
func (p *MergeColPool) Get() *record.ColVal
func (*MergeColPool) Put ¶ added in v1.1.0
func (p *MergeColPool) Put(col *record.ColVal)
type MergeContext ¶ added in v1.3.0
type MergeContext struct {
// contains filtered or unexported fields
}
func BuildMergeContext ¶ added in v1.3.0
func BuildMergeContext(mst string, files *TSSPFiles, full bool, lmt *lastMergeTime) []*MergeContext
func NewMergeContext ¶ added in v1.0.0
func NewMergeContext(mst string, level uint16) *MergeContext
func (*MergeContext) AddUnordered ¶ added in v1.3.0
func (ctx *MergeContext) AddUnordered(f TSSPFile)
func (*MergeContext) Limited ¶ added in v1.3.0
func (ctx *MergeContext) Limited() bool
func (*MergeContext) MergeSelf ¶ added in v1.3.0
func (ctx *MergeContext) MergeSelf() bool
func (*MergeContext) MergeSelfFast ¶ added in v1.3.0
func (ctx *MergeContext) MergeSelfFast() bool
func (*MergeContext) Release ¶ added in v1.3.0
func (ctx *MergeContext) Release()
func (*MergeContext) Sort ¶ added in v1.3.0
func (ctx *MergeContext) Sort()
func (*MergeContext) ToLevel ¶ added in v1.3.0
func (ctx *MergeContext) ToLevel() uint16
func (*MergeContext) UnorderedLen ¶ added in v1.3.0
func (ctx *MergeContext) UnorderedLen() int
func (*MergeContext) UpdateLevel ¶ added in v1.3.0
func (ctx *MergeContext) UpdateLevel(l uint16)
type MergePerformers ¶ added in v1.3.0
type MergePerformers struct {
// contains filtered or unexported fields
}
func NewMergePerformers ¶ added in v1.3.0
func NewMergePerformers(ur *UnorderedReader) *MergePerformers
func (*MergePerformers) Close ¶ added in v1.3.0
func (c *MergePerformers) Close()
func (*MergePerformers) Closed ¶ added in v1.3.0
func (c *MergePerformers) Closed() bool
func (*MergePerformers) Len ¶ added in v1.3.0
func (c *MergePerformers) Len() int
func (*MergePerformers) Less ¶ added in v1.3.0
func (c *MergePerformers) Less(i, j int) bool
func (*MergePerformers) Next ¶ added in v1.3.0
func (c *MergePerformers) Next() error
func (*MergePerformers) Pop ¶ added in v1.3.0
func (c *MergePerformers) Pop() interface{}
func (*MergePerformers) Push ¶ added in v1.3.0
func (c *MergePerformers) Push(v interface{})
func (*MergePerformers) Release ¶ added in v1.3.0
func (c *MergePerformers) Release()
func (*MergePerformers) Swap ¶ added in v1.3.0
func (c *MergePerformers) Swap(i, j int)
type MergeSelf ¶ added in v1.3.0
type MergeSelf struct {
// contains filtered or unexported fields
}
func (*MergeSelf) InitEvents ¶ added in v1.3.0
func (m *MergeSelf) InitEvents(ctx *MergeContext) *Events
type MergeSelfParquetEvent ¶ added in v1.3.0
type MergeSelfParquetEvent struct {
TSSP2ParquetEvent
}
func (*MergeSelfParquetEvent) Instance ¶ added in v1.3.0
func (e *MergeSelfParquetEvent) Instance() Event
func (*MergeSelfParquetEvent) OnWriteRecord ¶ added in v1.3.0
func (e *MergeSelfParquetEvent) OnWriteRecord(rec *record.Record)
type MetaControl ¶ added in v1.3.0
type MetaControl interface {
Push(MetaDataInfo)
Pop() (MetaDataInfo, bool)
IsEmpty() bool
}
func NewMetaControl ¶ added in v1.3.0
func NewMetaControl(isQueue bool, count int) MetaControl
type MetaData ¶ added in v1.3.0
type MetaData struct {
// contains filtered or unexported fields
}
func NewMetaData ¶ added in v1.3.0
func (*MetaData) GetBlockIndex ¶ added in v1.3.0
func (*MetaData) GetContentBlockLength ¶ added in v1.3.0
func (*MetaData) GetContentBlockOffset ¶ added in v1.3.0
func (*MetaData) GetMaxTime ¶ added in v1.3.0
func (*MetaData) GetMinTime ¶ added in v1.3.0
type MetaDataInfo ¶ added in v1.3.0
type MetaIndex ¶
type MetaIndex struct {
// contains filtered or unexported fields
}
MetaIndex If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func GetMetaIndexAndBlockId ¶ added in v1.3.0
type MetaQueue ¶ added in v1.3.0
type MetaQueue []MetaDataInfo
func (*MetaQueue) Pop ¶ added in v1.3.0
func (q *MetaQueue) Pop() (MetaDataInfo, bool)
func (*MetaQueue) Push ¶ added in v1.3.0
func (q *MetaQueue) Push(v MetaDataInfo)
type MetaStack ¶ added in v1.3.0
type MetaStack []MetaDataInfo
func (*MetaStack) Pop ¶ added in v1.3.0
func (s *MetaStack) Pop() (MetaDataInfo, bool)
func (*MetaStack) Push ¶ added in v1.3.0
func (s *MetaStack) Push(value MetaDataInfo)
type MmsIdTime ¶
type MmsIdTime struct {
// contains filtered or unexported fields
}
func NewMmsIdTime ¶
func NewMmsIdTime(sc *SeriesCounter) *MmsIdTime
type MmsReaders ¶
type MmsReaders struct {
Orders TableReaders
OutOfOrders TableReaders
}
type MmsTables ¶
type MmsTables struct {
Order map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles}
OutOfOrder map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles}
CSFiles map[string]*TSSPFiles // {"cpu_0001": *TSSPFiles} tsspFiles for columnStore
PKFiles map[string]*colstore.PKFiles // {"cpu_0001": *PKFiles} PKFiles for columnStore
ImmTable ImmTable
Conf *Config
// contains filtered or unexported fields
}
func NewTableStore ¶
func (*MmsTables) AddBothTSSPFiles ¶ added in v1.2.0
func (*MmsTables) AddRowCountsBySid ¶
func (*MmsTables) AddTSSPFiles ¶
now not use for tsEngine
func (*MmsTables) CompactDone ¶
func (*MmsTables) CompactionDisable ¶
func (m *MmsTables) CompactionDisable()
func (*MmsTables) CompactionEnable ¶
func (m *MmsTables) CompactionEnable()
func (*MmsTables) CompactionEnabled ¶
func (*MmsTables) CopyCSFiles ¶ added in v1.3.0
func (*MmsTables) DisableCompAndMerge ¶ added in v1.0.0
func (m *MmsTables) DisableCompAndMerge()
func (*MmsTables) DropMeasurement ¶
func (*MmsTables) EnableCompAndMerge ¶ added in v1.0.0
func (m *MmsTables) EnableCompAndMerge()
func (*MmsTables) FreeAllMemReader ¶
func (m *MmsTables) FreeAllMemReader()
func (*MmsTables) FreeSequencer ¶ added in v1.0.0
func (*MmsTables) FullCompact ¶
func (*MmsTables) FullyCompacted ¶ added in v1.2.0
func (*MmsTables) GetBothFilesRef ¶ added in v1.0.0
func (*MmsTables) GetCSFiles ¶ added in v1.1.0
func (*MmsTables) GetFileSeq ¶ added in v1.0.0
func (*MmsTables) GetMstFileStat ¶
func (m *MmsTables) GetMstFileStat() *statistics.FileStat
func (*MmsTables) GetMstInfo ¶ added in v1.2.0
func (m *MmsTables) GetMstInfo(name string) (*meta.MeasurementInfo, bool)
func (*MmsTables) GetMstList ¶ added in v1.3.0
func (*MmsTables) GetObsOption ¶ added in v1.3.0
func (m *MmsTables) GetObsOption() *obs.ObsOptions
func (*MmsTables) GetOutOfOrderFileNum ¶
func (*MmsTables) GetRowCountsBySid ¶
func (*MmsTables) GetShardID ¶ added in v1.3.0
func (*MmsTables) GetTSSPFiles ¶ added in v1.0.0
func (*MmsTables) GetTableFileNum ¶ added in v1.3.0
func (*MmsTables) IsOutOfOrderFilesExist ¶ added in v1.0.0
func (*MmsTables) Listen ¶ added in v1.0.0
func (m *MmsTables) Listen(signal chan struct{}, onClose func())
func (*MmsTables) LoadSequencer ¶ added in v1.3.0
func (m *MmsTables) LoadSequencer()
func (*MmsTables) MergeDisable ¶
func (m *MmsTables) MergeDisable()
func (*MmsTables) MergeEnable ¶
func (m *MmsTables) MergeEnable()
func (*MmsTables) MergeEnabled ¶
func (*MmsTables) MergeOutOfOrder ¶
func (*MmsTables) NewChunkIterators ¶
func (m *MmsTables) NewChunkIterators(group FilesInfo) *ChunkIterators
func (*MmsTables) NewStreamIterators ¶
func (m *MmsTables) NewStreamIterators(group FilesInfo) *StreamIterators
func (*MmsTables) NewStreamWriteFile ¶ added in v1.0.0
func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile
func (*MmsTables) NextSequence ¶
func (*MmsTables) ReloadSequencer ¶ added in v1.1.0
func (*MmsTables) RenameFileToLevel ¶ added in v1.3.0
func (m *MmsTables) RenameFileToLevel(plan *CompactGroup) error
func (*MmsTables) ReplaceDownSampleFiles ¶ added in v1.0.0
func (*MmsTables) ReplaceFiles ¶
func (*MmsTables) ReplacePKFile ¶ added in v1.1.1
func (*MmsTables) SeriesTotal ¶ added in v1.1.0
func (*MmsTables) SetAccumulateMetaIndex ¶ added in v1.2.0
func (m *MmsTables) SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)
func (*MmsTables) SetAddFunc ¶ added in v1.0.0
func (*MmsTables) SetImmTableType ¶ added in v1.1.0
func (m *MmsTables) SetImmTableType(engineType config.EngineType)
func (*MmsTables) SetIndexMergeSet ¶ added in v1.3.0
func (m *MmsTables) SetIndexMergeSet(idx IndexMergeSet)
func (*MmsTables) SetLockPath ¶ added in v1.2.0
func (*MmsTables) SetMstInfo ¶ added in v1.1.1
func (m *MmsTables) SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
func (*MmsTables) SetObsOption ¶ added in v1.3.0
func (m *MmsTables) SetObsOption(option *obs.ObsOptions)
type MsBuilder ¶
type MsBuilder struct {
Path string
TableData
Conf *Config
MaxIds int
RowCount int64
ShardID uint64
Files []TSSPFile
FilesInfo []FileInfoExtend
FileName TSSPFileName
EncodeChunkDataImp EncodeChunkData
// contains filtered or unexported fields
}
func NewDetachedMsBuilder ¶ added in v1.2.0
func NewMsBuilder ¶ added in v1.1.0
func NewMsBuilder(dir, name string, lockPath *string, conf *Config, idCount int, fileName TSSPFileName, tier uint64, sequencer *Sequencer, estimateSize int, engineType config.EngineType, obsOpt *obs.ObsOptions, shardID uint64) *MsBuilder
func (*MsBuilder) BloomFilterNeedDetached ¶ added in v1.2.0
func (*MsBuilder) CloseIndexWriters ¶ added in v1.3.0
func (*MsBuilder) FileVersion ¶
func (*MsBuilder) GetChunkBuilder ¶ added in v1.2.0
func (b *MsBuilder) GetChunkBuilder() *ChunkDataBuilder
func (*MsBuilder) GetFullTextIdx ¶ added in v1.2.0
func (*MsBuilder) GetIndexBuilder ¶ added in v1.3.0
func (b *MsBuilder) GetIndexBuilder() *index.IndexWriterBuilder
func (*MsBuilder) GetLocalBfCount ¶ added in v1.2.0
func (*MsBuilder) GetPKInfoNum ¶ added in v1.1.0
func (*MsBuilder) GetPKMark ¶ added in v1.1.0
func (b *MsBuilder) GetPKMark(i int) fragment.IndexFragment
func (*MsBuilder) GetPKRecord ¶ added in v1.1.0
func (*MsBuilder) MaxRowsPerSegment ¶
func (*MsBuilder) NewIndexWriterBuilder ¶ added in v1.3.0
func (b *MsBuilder) NewIndexWriterBuilder(schema record.Schemas, indexRelation influxql.IndexRelation)
func (*MsBuilder) NewPKIndexWriter ¶ added in v1.1.0
func (b *MsBuilder) NewPKIndexWriter()
func (*MsBuilder) SetEncodeChunkDataImp ¶ added in v1.2.0
func (b *MsBuilder) SetEncodeChunkDataImp(engineType config.EngineType)
func (*MsBuilder) SetFullTextIdx ¶ added in v1.2.0
func (*MsBuilder) SetLocalBfCount ¶ added in v1.2.0
func (*MsBuilder) SetTCLocation ¶ added in v1.1.1
func (*MsBuilder) SetTimeSorted ¶ added in v1.2.0
func (*MsBuilder) StoreTimes ¶ added in v1.1.0
func (b *MsBuilder) StoreTimes()
func (*MsBuilder) SwitchChunkMeta ¶ added in v1.2.0
func (*MsBuilder) WriteChunkMeta ¶ added in v1.2.0
func (*MsBuilder) WriteDetached ¶ added in v1.2.0
func (*MsBuilder) WriteDetachedIndex ¶ added in v1.3.0
func (*MsBuilder) WriteDetachedMetaAndIndex ¶ added in v1.2.0
func (*MsBuilder) WriteRecord ¶
type PageCacheReader ¶ added in v1.2.0
type PageCacheReader struct {
// contains filtered or unexported fields
}
func NewPageCacheReader ¶ added in v1.2.0
func NewPageCacheReader(t *Trailer, r *tsspFileReader) *PageCacheReader
func (*PageCacheReader) GetCachePageIdsAndOffsets ¶ added in v1.2.0
func (pcr *PageCacheReader) GetCachePageIdsAndOffsets(start int64, size uint32) ([]int64, []int64, error)
get all cache pageIds containning bytes from start to start + size
func (*PageCacheReader) GetMaxPageIdAndOffset ¶ added in v1.2.0
func (pcr *PageCacheReader) GetMaxPageIdAndOffset() (int64, int64)
func (*PageCacheReader) Init ¶ added in v1.2.0
func (pcr *PageCacheReader) Init()
func (*PageCacheReader) ReadFixPageSize ¶ added in v1.2.0
func (pcr *PageCacheReader) ReadFixPageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
read fileBytes of pages
func (*PageCacheReader) ReadSinglePage ¶ added in v1.2.0
type ParquetTask ¶ added in v1.3.0
func (*ParquetTask) Execute ¶ added in v1.3.0
func (t *ParquetTask) Execute(ctx EventContext)
func (*ParquetTask) GetSeries ¶ added in v1.3.0
func (t *ParquetTask) GetSeries(sId uint64, ctx EventContext) (string, error)
type PreAggBuilder ¶
type PreAggBuilder interface {
// contains filtered or unexported methods
}
type PreAggBuilders ¶
type PreAggBuilders struct {
// contains filtered or unexported fields
}
func (*PreAggBuilders) FloatBuilder ¶ added in v1.2.0
func (b *PreAggBuilders) FloatBuilder() *FloatPreAgg
func (*PreAggBuilders) IntegerBuilder ¶ added in v1.2.0
func (b *PreAggBuilders) IntegerBuilder() *IntegerPreAgg
func (*PreAggBuilders) Release ¶
func (b *PreAggBuilders) Release()
type QueryfileCache ¶ added in v1.1.0
type QueryfileCache struct {
// contains filtered or unexported fields
}
func GetQueryfileCache ¶ added in v1.1.0
func GetQueryfileCache() *QueryfileCache
func NewQueryfileCache ¶ added in v1.1.0
func NewQueryfileCache(cap uint32) *QueryfileCache
func (*QueryfileCache) Get ¶ added in v1.1.0
func (qfc *QueryfileCache) Get()
func (*QueryfileCache) GetCap ¶ added in v1.1.0
func (qfc *QueryfileCache) GetCap() uint32
func (*QueryfileCache) Put ¶ added in v1.1.0
func (qfc *QueryfileCache) Put(f TSSPFile)
type ReadContext ¶
type ReadContext struct {
Ascending bool
// contains filtered or unexported fields
}
func NewReadContext ¶
func NewReadContext(ascending bool) *ReadContext
func (*ReadContext) GetCoder ¶ added in v1.1.0
func (d *ReadContext) GetCoder() *encoding.CoderContext
func (*ReadContext) GetOps ¶
func (d *ReadContext) GetOps() []*comm.CallOption
func (*ReadContext) GetReadBuff ¶ added in v1.1.0
func (d *ReadContext) GetReadBuff() []byte
func (*ReadContext) InitPreAggBuilder ¶
func (d *ReadContext) InitPreAggBuilder()
func (*ReadContext) IsAborted ¶ added in v1.3.0
func (d *ReadContext) IsAborted() bool
func (*ReadContext) MatchPreAgg ¶
func (d *ReadContext) MatchPreAgg() bool
func (*ReadContext) Release ¶
func (d *ReadContext) Release()
func (*ReadContext) Reset ¶
func (d *ReadContext) Reset()
func (*ReadContext) Set ¶
func (d *ReadContext) Set(ascending bool, tr util.TimeRange, onlyFirstOrLast bool, ops []*comm.CallOption)
func (*ReadContext) SetClosedSignal ¶ added in v1.2.0
func (d *ReadContext) SetClosedSignal(s *bool)
func (*ReadContext) SetOps ¶
func (d *ReadContext) SetOps(c []*comm.CallOption)
func (*ReadContext) SetSpan ¶ added in v1.1.0
func (d *ReadContext) SetSpan(readSpan, filterSpan *tracing.Span)
func (*ReadContext) SetTr ¶
func (d *ReadContext) SetTr(tr util.TimeRange)
type Segment ¶
type Segment struct {
// contains filtered or unexported fields
}
Segment offset/size/minT/maxT
type SegmentMeta ¶ added in v1.2.0
type SegmentMeta struct {
// contains filtered or unexported fields
}
func NewSegmentMeta ¶ added in v1.2.0
func NewSegmentMeta(id int, c *ChunkMeta) *SegmentMeta
func (*SegmentMeta) GetMaxTime ¶ added in v1.2.0
func (s *SegmentMeta) GetMaxTime() int64
func (*SegmentMeta) GetMinTime ¶ added in v1.2.0
func (s *SegmentMeta) GetMinTime() int64
type SegmentRange ¶
type SegmentRange [2]int64 // min/max
type SegmentReader ¶ added in v1.0.0
type SegmentReader struct {
// contains filtered or unexported fields
}
func NewSegmentReader ¶ added in v1.0.0
func NewSegmentReader(fi *FileIterator) *SegmentReader
type SegmentSequenceReader ¶ added in v1.3.0
type SegmentSequenceReader struct {
// contains filtered or unexported fields
}
func NewSegmentSequenceReader ¶ added in v1.3.0
func NewSegmentSequenceReader(path *sparseindex.OBSFilterPath, taskID int, count uint64, consumeInfo *consume.ConsumeInfo, schema record.Schemas, filterOpt *FilterOptions) (*SegmentSequenceReader, error)
func (*SegmentSequenceReader) Close ¶ added in v1.3.0
func (reader *SegmentSequenceReader) Close()
func (*SegmentSequenceReader) ConsumeDateByShard ¶ added in v1.3.0
type SegmentTask ¶ added in v1.2.0
type SegmentTask struct {
// contains filtered or unexported fields
}
type SequenceIterator ¶ added in v1.3.0
type SequenceIterator interface {
SetChunkMetasReader(reader SequenceIteratorChunkMetaReader)
Release()
AddFiles(files []TSSPFile)
Stop()
Run() error
Buffer() *pool.Buffer
}
func NewSequenceIterator ¶ added in v1.3.0
func NewSequenceIterator(handler SequenceIteratorHandler, logger *Log.Logger) SequenceIterator
type SequenceIteratorChunkMetaReader ¶ added in v1.3.0
type SequenceIteratorHandler ¶ added in v1.3.0
type Sequencer ¶
type Sequencer struct {
// contains filtered or unexported fields
}
func NewSequencer ¶
func NewSequencer() *Sequencer
func (*Sequencer) AddRowCounts ¶
func (*Sequencer) BatchUpdateCheckTime ¶
func (s *Sequencer) BatchUpdateCheckTime(p *IdTimePairs, incrRows bool)
func (*Sequencer) DelMmsIdTime ¶ added in v1.1.0
func (*Sequencer) GetMmsIdTime ¶ added in v1.3.0
func (*Sequencer) ResetMmsIdTime ¶ added in v1.1.0
func (s *Sequencer) ResetMmsIdTime()
func (*Sequencer) SeriesTotal ¶ added in v1.1.0
func (*Sequencer) SetToInLoading ¶ added in v1.1.0
type SeriesCounter ¶ added in v1.1.0
type SeriesCounter struct {
// contains filtered or unexported fields
}
func (*SeriesCounter) DecrN ¶ added in v1.1.0
func (sc *SeriesCounter) DecrN(n uint64)
func (*SeriesCounter) Get ¶ added in v1.1.0
func (sc *SeriesCounter) Get() uint64
func (*SeriesCounter) Incr ¶ added in v1.1.0
func (sc *SeriesCounter) Incr()
func (*SeriesCounter) Reset ¶ added in v1.1.0
func (sc *SeriesCounter) Reset()
type ShowTagValuesPlan ¶ added in v1.3.0
type ShowTagValuesPlan interface {
Execute(dst map[string]*TagSets, tagKeys map[string][][]byte, condition influxql.Expr, tr util.TimeRange, limit int) error
Stop()
}
func NewShowTagValuesPlan ¶ added in v1.3.0
func NewShowTagValuesPlan(table TablesStore, idx IndexMergeSet, logger *Log.Logger, sh EngineShard, client metaclient.MetaClient) ShowTagValuesPlan
type SortKeyIterator ¶ added in v1.1.1
type SortKeyIterator struct {
*FileIterator
// contains filtered or unexported fields
}
func NewSortKeyIterator ¶ added in v1.1.1
func NewSortKeyIterator(fi *FileIterator, sortKeyFields []record.Field, ctx *ReadContext, schema record.Schemas, tcDuration time.Duration, compactWithBlock bool, fileIdx int) (*SortKeyIterator, error)
func (*SortKeyIterator) GetNewRecord ¶ added in v1.1.1
func (s *SortKeyIterator) GetNewRecord(tcDuration time.Duration, compactWithBlock bool) error
func (*SortKeyIterator) NextSingleFragment ¶ added in v1.1.1
func (s *SortKeyIterator) NextSingleFragment(tbStore *MmsTables, impl *IteratorByRow, pkSchema record.Schemas) (*record.Record, error)
type SortLimitCursor ¶ added in v1.3.0
type SortLimitCursor struct {
// contains filtered or unexported fields
}
func NewSortLimitCursor ¶ added in v1.3.0
func NewSortLimitCursor(options hybridqp.Options, schemas record.Schemas, input comm.TimeCutKeyCursor, shardId int64) *SortLimitCursor
func (*SortLimitCursor) Close ¶ added in v1.3.0
func (t *SortLimitCursor) Close() error
func (*SortLimitCursor) EndSpan ¶ added in v1.3.0
func (t *SortLimitCursor) EndSpan()
func (*SortLimitCursor) GetInput ¶ added in v1.3.0
func (t *SortLimitCursor) GetInput() comm.TimeCutKeyCursor
func (*SortLimitCursor) GetSchema ¶ added in v1.3.0
func (t *SortLimitCursor) GetSchema() record.Schemas
func (*SortLimitCursor) Name ¶ added in v1.3.0
func (t *SortLimitCursor) Name() string
func (*SortLimitCursor) Next ¶ added in v1.3.0
func (t *SortLimitCursor) Next() (*record.Record, comm.SeriesInfoIntf, error)
func (*SortLimitCursor) NextAggData ¶ added in v1.3.0
func (*SortLimitCursor) SetOps ¶ added in v1.3.0
func (t *SortLimitCursor) SetOps(ops []*comm.CallOption)
func (*SortLimitCursor) SinkPlan ¶ added in v1.3.0
func (t *SortLimitCursor) SinkPlan(plan hybridqp.QueryNode)
func (*SortLimitCursor) StartSpan ¶ added in v1.3.0
func (t *SortLimitCursor) StartSpan(span *tracing.Span)
type SortLimitRows ¶ added in v1.3.0
type SortLimitRows struct {
// contains filtered or unexported fields
}
func NewSortLimitRows ¶ added in v1.3.0
func NewSortLimitRows(sortIndex []int, schema record.Schemas, shardId int64) *SortLimitRows
func (*SortLimitRows) Len ¶ added in v1.3.0
func (rs *SortLimitRows) Len() int
func (*SortLimitRows) Less ¶ added in v1.3.0
func (rs *SortLimitRows) Less(i, j int) bool
func (*SortLimitRows) Pop ¶ added in v1.3.0
func (rs *SortLimitRows) Pop() interface{}
func (*SortLimitRows) PopToRec ¶ added in v1.3.0
func (rs *SortLimitRows) PopToRec() *record.Record
func (*SortLimitRows) Push ¶ added in v1.3.0
func (rs *SortLimitRows) Push(x interface{})
func (*SortLimitRows) Swap ¶ added in v1.3.0
func (rs *SortLimitRows) Swap(i, j int)
type StreamCompactParquetEvent ¶ added in v1.3.0
type StreamCompactParquetEvent struct {
TSSP2ParquetEvent
}
func (*StreamCompactParquetEvent) Instance ¶ added in v1.3.0
func (e *StreamCompactParquetEvent) Instance() Event
func (*StreamCompactParquetEvent) OnWriteChunkMeta ¶ added in v1.3.0
func (e *StreamCompactParquetEvent) OnWriteChunkMeta(cm *ChunkMeta)
type StreamIterator ¶
type StreamIterator struct {
*FileIterator
// contains filtered or unexported fields
}
func NewStreamStreamIterator ¶
func NewStreamStreamIterator(fi *FileIterator) *StreamIterator
type StreamIterators ¶
func (*StreamIterators) Close ¶
func (c *StreamIterators) Close()
func (*StreamIterators) FileVersion ¶
func (c *StreamIterators) FileVersion() uint64
func (*StreamIterators) Flush ¶
func (c *StreamIterators) Flush() error
func (*StreamIterators) Init ¶
func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
func (*StreamIterators) InitEvents ¶ added in v1.3.0
func (c *StreamIterators) InitEvents(level uint16) *Events
func (*StreamIterators) Len ¶
func (c *StreamIterators) Len() int
func (*StreamIterators) Less ¶
func (c *StreamIterators) Less(i, j int) bool
func (*StreamIterators) ListenCloseSignal ¶ added in v1.2.0
func (c *StreamIterators) ListenCloseSignal(finish chan struct{})
func (*StreamIterators) NewFile ¶
func (c *StreamIterators) NewFile(addFileExt bool) error
func (*StreamIterators) NewTSSPFile ¶
func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)
func (*StreamIterators) Pop ¶
func (c *StreamIterators) Pop() interface{}
func (*StreamIterators) Push ¶
func (c *StreamIterators) Push(v interface{})
func (*StreamIterators) RemoveTmpFiles ¶ added in v1.3.0
func (c *StreamIterators) RemoveTmpFiles()
func (*StreamIterators) SetWriter ¶ added in v1.2.0
func (c *StreamIterators) SetWriter(w fileops.FileWriter)
func (*StreamIterators) Size ¶
func (c *StreamIterators) Size() int64
func (*StreamIterators) Swap ¶
func (c *StreamIterators) Swap(i, j int)
func (*StreamIterators) SwitchChunkMeta ¶ added in v1.2.0
func (c *StreamIterators) SwitchChunkMeta() error
func (*StreamIterators) WithLog ¶
func (c *StreamIterators) WithLog(log *Log.Logger)
func (*StreamIterators) WriteChunkMeta ¶ added in v1.2.0
func (c *StreamIterators) WriteChunkMeta(cm *ChunkMeta) (int, error)
type StreamIteratorsPool ¶
type StreamIteratorsPool struct {
// contains filtered or unexported fields
}
func NewStreamIteratorsPool ¶
func NewStreamIteratorsPool(n int) *StreamIteratorsPool
type StreamWriteFile ¶ added in v1.0.0
func NewWriteScanFile ¶ added in v1.0.0
func (*StreamWriteFile) AppendColumn ¶ added in v1.0.0
func (c *StreamWriteFile) AppendColumn(ref *record.Field) error
func (*StreamWriteFile) ChangeColumn ¶ added in v1.0.0
func (c *StreamWriteFile) ChangeColumn(ref record.Field) error
func (*StreamWriteFile) ChangeSid ¶ added in v1.0.0
func (c *StreamWriteFile) ChangeSid(sid uint64)
func (*StreamWriteFile) Close ¶ added in v1.0.0
func (c *StreamWriteFile) Close(isError bool)
func (*StreamWriteFile) Flush ¶ added in v1.0.0
func (c *StreamWriteFile) Flush() error
func (*StreamWriteFile) GetTSSPFile ¶ added in v1.0.0
func (c *StreamWriteFile) GetTSSPFile() TSSPFile
func (*StreamWriteFile) Init ¶ added in v1.0.0
func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)
func (*StreamWriteFile) InitFile ¶ added in v1.0.0
func (c *StreamWriteFile) InitFile(seq uint64) error
func (*StreamWriteFile) InitMergedFile ¶ added in v1.0.0
func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error
func (*StreamWriteFile) NewFile ¶ added in v1.0.0
func (c *StreamWriteFile) NewFile(addFileExt bool) error
func (*StreamWriteFile) NewTSSPFile ¶ added in v1.0.0
func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)
func (*StreamWriteFile) SetValidate ¶ added in v1.1.0
func (c *StreamWriteFile) SetValidate(en bool)
func (*StreamWriteFile) Size ¶ added in v1.0.0
func (c *StreamWriteFile) Size() int64
func (*StreamWriteFile) SortColumns ¶ added in v1.2.0
func (c *StreamWriteFile) SortColumns()
func (*StreamWriteFile) SwitchChunkMeta ¶ added in v1.2.0
func (c *StreamWriteFile) SwitchChunkMeta() error
func (*StreamWriteFile) WriteChunkMeta ¶ added in v1.2.0
func (c *StreamWriteFile) WriteChunkMeta(cm *ChunkMeta) (int, error)
func (*StreamWriteFile) WriteCurrentMeta ¶ added in v1.0.0
func (c *StreamWriteFile) WriteCurrentMeta() error
func (*StreamWriteFile) WriteFile ¶ added in v1.0.0
func (c *StreamWriteFile) WriteFile() error
func (*StreamWriteFile) WriteMeta ¶ added in v1.0.0
func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error
type StringPreAgg ¶
type StringPreAgg struct {
// contains filtered or unexported fields
}
StringPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.
func NewStringPreAgg ¶
func NewStringPreAgg() *StringPreAgg
type TSSP2ParquetEvent ¶ added in v1.3.0
type TSSP2ParquetEvent struct {
Event
// contains filtered or unexported fields
}
func (*TSSP2ParquetEvent) Enable ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) Enable() bool
func (*TSSP2ParquetEvent) Init ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) Init(mst string, level uint16)
func (*TSSP2ParquetEvent) OnFinish ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnFinish(ctx EventContext)
func (*TSSP2ParquetEvent) OnInterrupt ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnInterrupt()
func (*TSSP2ParquetEvent) OnNewFile ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnNewFile(f TSSPFile)
func (*TSSP2ParquetEvent) OnReplaceFile ¶ added in v1.3.0
func (e *TSSP2ParquetEvent) OnReplaceFile(shardDir string, lockFile string) error
type TSSP2ParquetPlan ¶ added in v1.3.0
type TSSP2ParquetPlan struct {
Mst string
Schema map[string]uint8
Files []string
// contains filtered or unexported fields
}
func (*TSSP2ParquetPlan) Init ¶ added in v1.3.0
func (p *TSSP2ParquetPlan) Init(mst string, level uint16)
type TSSPFile ¶
type TSSPFile interface {
Path() string
Name() string
FileName() TSSPFileName
LevelAndSequence() (uint16, uint64)
FileNameMerge() uint16
FileNameExtend() uint16
IsOrder() bool
Ref()
Unref()
RefFileReader()
UnrefFileReader()
Stop()
Inuse() bool
MetaIndexAt(idx int) (*MetaIndex, error)
MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error)
ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error)
ReadAt(cm *ChunkMeta, segment int, dst *record.Record, decs *ReadContext, ioPriority int) (*record.Record, error)
ReadData(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error)
ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error)
FileStat() *Trailer
// FileSize get the size of the disk occupied by file
FileSize() int64
// InMemSize get the size of the memory occupied by file
InMemSize() int64
Contains(id uint64) (bool, error)
ContainsByTime(tr util.TimeRange) (bool, error)
ContainsValue(id uint64, tr util.TimeRange) (bool, error)
MinMaxTime() (int64, int64, error)
Open() error
Close() error
LoadIntoMemory() error
LoadComponents() error
LoadIdTimes(p *IdTimePairs) error
Rename(newName string) error
UpdateLevel(level uint16)
Remove() error
FreeMemory(evictLock bool) int64
FreeFileHandle() error
Version() uint64
AverageChunkRows() int
MaxChunkRows() int
MetaIndexItemNum() int64
AddToEvictList(level uint16)
RemoveFromEvictList(level uint16)
GetFileReaderRef() int64
RenameOnObs(obsName string, tmp bool, opt *obs.ObsOptions) error
ChunkMetaCompressMode() uint8
}
type TSSPFileAttachedReader ¶ added in v1.2.0
type TSSPFileAttachedReader struct {
// contains filtered or unexported fields
}
func NewTSSPFileAttachedReader ¶ added in v1.2.0
func NewTSSPFileAttachedReader(files []TSSPFile, fragRanges []fragment.FragmentRanges, ctx *FileReaderContext, schema hybridqp.Options, unnest *influxql.Unnest) (*TSSPFileAttachedReader, error)
func (*TSSPFileAttachedReader) Close ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) Close() error
func (*TSSPFileAttachedReader) EndSpan ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) EndSpan()
func (*TSSPFileAttachedReader) GetSchema ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) GetSchema() record.Schemas
func (*TSSPFileAttachedReader) Name ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) Name() string
func (*TSSPFileAttachedReader) Next ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
func (*TSSPFileAttachedReader) NextAggData ¶ added in v1.2.0
func (*TSSPFileAttachedReader) ResetBy ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) ResetBy(files []TSSPFile, fragRanges []fragment.FragmentRanges) error
func (*TSSPFileAttachedReader) SetOps ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) SetOps(ops []*comm.CallOption)
func (*TSSPFileAttachedReader) SinkPlan ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) SinkPlan(plan hybridqp.QueryNode)
func (*TSSPFileAttachedReader) StartSpan ¶ added in v1.2.0
func (t *TSSPFileAttachedReader) StartSpan(span *tracing.Span)
type TSSPFileDetachedReader ¶ added in v1.2.0
type TSSPFileDetachedReader struct {
// contains filtered or unexported fields
}
func NewTSSPFileDetachedReader ¶ added in v1.2.0
func NewTSSPFileDetachedReader(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext, path *sparseindex.OBSFilterPath, unnest *influxql.Unnest, isSort bool, options hybridqp.Options) (*TSSPFileDetachedReader, error)
func (*TSSPFileDetachedReader) Close ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) Close() error
func (*TSSPFileDetachedReader) EndSpan ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) EndSpan()
func (*TSSPFileDetachedReader) GetSchema ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) GetSchema() record.Schemas
func (*TSSPFileDetachedReader) Name ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) Name() string
func (*TSSPFileDetachedReader) Next ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) Next() (*record.Record, comm.SeriesInfoIntf, error)
func (*TSSPFileDetachedReader) NextAggData ¶ added in v1.2.0
func (*TSSPFileDetachedReader) ResetBy ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) ResetBy(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext)
func (*TSSPFileDetachedReader) SetOps ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) SetOps(ops []*comm.CallOption)
func (*TSSPFileDetachedReader) SinkPlan ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) SinkPlan(plan hybridqp.QueryNode)
func (*TSSPFileDetachedReader) StartSpan ¶ added in v1.2.0
func (t *TSSPFileDetachedReader) StartSpan(span *tracing.Span)
func (*TSSPFileDetachedReader) UpdateTime ¶ added in v1.3.0
func (t *TSSPFileDetachedReader) UpdateTime(time int64)
type TSSPFileName ¶
type TSSPFileName struct {
// contains filtered or unexported fields
}
func NewTSSPFileName ¶
func NewTSSPFileName(seq uint64, level, merge, extent uint16, order bool, lockPath *string) TSSPFileName
func (*TSSPFileName) Equal ¶
func (n *TSSPFileName) Equal(other *TSSPFileName) bool
func (*TSSPFileName) ParseFileName ¶
func (n *TSSPFileName) ParseFileName(name string) error
func (*TSSPFileName) SetExtend ¶
func (n *TSSPFileName) SetExtend(extend uint16)
func (*TSSPFileName) SetLevel ¶
func (n *TSSPFileName) SetLevel(l uint16)
func (*TSSPFileName) SetMerge ¶
func (n *TSSPFileName) SetMerge(merge uint16)
func (*TSSPFileName) SetOrder ¶
func (n *TSSPFileName) SetOrder(v bool)
func (*TSSPFileName) SetSeq ¶
func (n *TSSPFileName) SetSeq(seq uint64)
func (*TSSPFileName) String ¶
func (n *TSSPFileName) String() string
func (*TSSPFileName) TmpPath ¶
func (n *TSSPFileName) TmpPath(dir string) string
type TSSPFiles ¶
type TSSPFiles struct {
// contains filtered or unexported fields
}
func NewTSSPFiles ¶
func NewTSSPFiles() *TSSPFiles
type TableReaders ¶
type TableReaders []TSSPFile
func (TableReaders) Len ¶
func (tables TableReaders) Len() int
func (TableReaders) Less ¶
func (tables TableReaders) Less(i, j int) bool
func (TableReaders) Swap ¶
func (tables TableReaders) Swap(i, j int)
type TableStoreGC ¶
type TableStoreGC struct {
// contains filtered or unexported fields
}
func (*TableStoreGC) Add ¶
func (sgc *TableStoreGC) Add(free bool, files ...TSSPFile)
func (*TableStoreGC) GC ¶
func (sgc *TableStoreGC) GC()
type TablesGC ¶
func NewTableStoreGC ¶
func NewTableStoreGC() TablesGC
type TablesStore ¶
type TablesStore interface {
SetOpId(shardId uint64, opId uint64)
Open() (int64, error)
Close() error
AddTable(ms *MsBuilder, isOrder bool, tmp bool)
AddTSSPFiles(name string, isOrder bool, f ...TSSPFile)
AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
AddPKFile(name, file string, rec *record.Record, mark fragment.IndexFragment, tcLocation int8)
GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool)
FreeAllMemReader()
ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) error
GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool)
ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, isOrder bool, callBack func()) error
NextSequence() uint64
Sequencer() *Sequencer
GetTSSPFiles(mm string, isOrder bool) (*TSSPFiles, bool)
GetCSFiles(mm string) (*TSSPFiles, bool)
CopyCSFiles(name string) []TSSPFile
Tier() uint64
SetTier(tier uint64)
File(name string, namePath string, isOrder bool) TSSPFile
CompactDone(seq []string)
CompactionEnable()
CompactionDisable()
MergeEnable()
MergeDisable()
CompactionEnabled() bool
MergeEnabled() bool
IsOutOfOrderFilesExist() bool
MergeOutOfOrder(shId uint64, full bool, force bool) error
LevelCompact(level uint16, shid uint64) error
FullCompact(shid uint64) error
SetAddFunc(addFunc func(int64))
LoadSequencer()
GetRowCountsBySid(measurement string, sid uint64) (int64, error)
AddRowCountsBySid(measurement string, sid uint64, rowCounts int64)
GetOutOfOrderFileNum() int
GetTableFileNum(string, bool) int
GetMstFileStat() *stats.FileStat
DropMeasurement(ctx context.Context, name string) error
GetFileSeq() uint64
DisableCompAndMerge()
EnableCompAndMerge()
FreeSequencer() bool
SetImmTableType(engineType config.EngineType)
SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)
GetMstInfo(name string) (*meta.MeasurementInfo, bool)
SeriesTotal() uint64
SetLockPath(lock *string)
FullyCompacted() bool
SetObsOption(option *obs.ObsOptions)
GetObsOption() *obs.ObsOptions
GetShardID() uint64
SetIndexMergeSet(idx IndexMergeSet)
GetMstList(isOrder bool) []string
}
type TagSets ¶ added in v1.3.0
type TagSets struct {
// contains filtered or unexported fields
}
func NewTagSets ¶ added in v1.3.0
func NewTagSets() *TagSets
func (*TagSets) TagKVCount ¶ added in v1.3.0
type TagValuesIteratorHandler ¶ added in v1.3.0
type TagValuesIteratorHandler struct {
// contains filtered or unexported fields
}
func NewTagValuesIteratorHandler ¶ added in v1.3.0
func NewTagValuesIteratorHandler(idx IndexMergeSet, condition influxql.Expr, tr *util.TimeRange, limit int) *TagValuesIteratorHandler
func (*TagValuesIteratorHandler) Begin ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Begin()
func (*TagValuesIteratorHandler) Finish ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Finish()
func (*TagValuesIteratorHandler) Init ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Init(param map[string]interface{}) error
func (*TagValuesIteratorHandler) Limited ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) Limited() bool
func (*TagValuesIteratorHandler) NextChunkMeta ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) NextChunkMeta(buf []byte) error
func (*TagValuesIteratorHandler) NextFile ¶ added in v1.3.0
func (h *TagValuesIteratorHandler) NextFile(TSSPFile)
type TimePreAgg ¶
type TimePreAgg struct {
// contains filtered or unexported fields
}
func NewTimePreAgg ¶
func NewTimePreAgg() *TimePreAgg
type Trailer ¶
type Trailer struct {
TableStat
// contains filtered or unexported fields
}
func (*Trailer) ContainsId ¶
func (*Trailer) MetaIndexItemNum ¶ added in v1.0.0
func (*Trailer) MetaIndexSize ¶ added in v1.2.0
func (*Trailer) SetChunkMetaCompressFlag ¶ added in v1.2.0
func (t *Trailer) SetChunkMetaCompressFlag()
type TsChunkDataImp ¶ added in v1.2.0
type TsChunkDataImp struct {
}
func (*TsChunkDataImp) EncodeChunk ¶ added in v1.2.0
func (t *TsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)
func (*TsChunkDataImp) EncodeChunkForCompaction ¶ added in v1.2.0
func (t *TsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)
func (*TsChunkDataImp) SetAccumulateRowsIndex ¶ added in v1.2.0
func (t *TsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)
func (*TsChunkDataImp) SetDetachedInfo ¶ added in v1.2.0
func (t *TsChunkDataImp) SetDetachedInfo(writeDetached bool)
type UnnestOperator ¶ added in v1.3.0
func GetUnnestFuncOperator ¶ added in v1.3.0
func NewMatchAllOperator ¶ added in v1.3.0
func NewMatchAllOperator(unnest *influxql.Unnest, schemas record.Schemas) UnnestOperator
type UnorderedColumn ¶ added in v1.3.0
type UnorderedColumn struct {
// contains filtered or unexported fields
}
func (*UnorderedColumn) Init ¶ added in v1.3.0
func (c *UnorderedColumn) Init(meta *ColumnMeta, idx int)
type UnorderedColumnReader ¶ added in v1.0.0
type UnorderedColumnReader struct {
// contains filtered or unexported fields
}
func (*UnorderedColumnReader) ChangeColumn ¶ added in v1.3.0
func (r *UnorderedColumnReader) ChangeColumn(sid uint64, ref *record.Field)
func (*UnorderedColumnReader) ChangeSeries ¶ added in v1.3.0
func (r *UnorderedColumnReader) ChangeSeries(sid uint64) error
func (*UnorderedColumnReader) Close ¶ added in v1.3.0
func (r *UnorderedColumnReader) Close()
func (*UnorderedColumnReader) HasColumn ¶ added in v1.3.0
func (r *UnorderedColumnReader) HasColumn() bool
func (*UnorderedColumnReader) MatchSeries ¶ added in v1.3.0
func (r *UnorderedColumnReader) MatchSeries(sid uint64) bool
func (*UnorderedColumnReader) ReadSchemas ¶ added in v1.3.0
type UnorderedColumns ¶ added in v1.3.0
type UnorderedColumns struct {
// contains filtered or unexported fields
}
func NewUnorderedColumns ¶ added in v1.3.0
func NewUnorderedColumns() *UnorderedColumns
func (*UnorderedColumns) ChangeColumn ¶ added in v1.3.0
func (c *UnorderedColumns) ChangeColumn(name string) *UnorderedColumn
func (*UnorderedColumns) GetLineOffset ¶ added in v1.3.0
func (c *UnorderedColumns) GetLineOffset(name string) int
func (*UnorderedColumns) GetSegOffset ¶ added in v1.3.0
func (c *UnorderedColumns) GetSegOffset(name string) int
func (*UnorderedColumns) IncrLineOffset ¶ added in v1.3.0
func (c *UnorderedColumns) IncrLineOffset(name string, n int)
func (*UnorderedColumns) IncrSegOffset ¶ added in v1.3.0
func (c *UnorderedColumns) IncrSegOffset(name string, n int)
func (*UnorderedColumns) Init ¶ added in v1.3.0
func (c *UnorderedColumns) Init(cm *ChunkMeta)
func (*UnorderedColumns) ReadCompleted ¶ added in v1.3.0
func (c *UnorderedColumns) ReadCompleted() bool
func (*UnorderedColumns) SetRemainLine ¶ added in v1.3.0
func (c *UnorderedColumns) SetRemainLine(n int)
func (*UnorderedColumns) TimeMeta ¶ added in v1.3.0
func (c *UnorderedColumns) TimeMeta() *ColumnMeta
func (*UnorderedColumns) Walk ¶ added in v1.3.0
func (c *UnorderedColumns) Walk(callback func(meta *ColumnMeta))
type UnorderedReader ¶ added in v1.0.0
type UnorderedReader struct {
// contains filtered or unexported fields
}
func NewUnorderedReader ¶ added in v1.0.0
func NewUnorderedReader(log *logger.Logger) *UnorderedReader
func (*UnorderedReader) AddFiles ¶ added in v1.0.0
func (r *UnorderedReader) AddFiles(files []TSSPFile)
func (*UnorderedReader) AllocNilCol ¶ added in v1.1.0
func (*UnorderedReader) ChangeColumn ¶ added in v1.3.0
func (r *UnorderedReader) ChangeColumn(sid uint64, ref *record.Field)
func (*UnorderedReader) ChangeSeries ¶ added in v1.3.0
func (r *UnorderedReader) ChangeSeries(sid uint64) error
func (*UnorderedReader) Close ¶ added in v1.0.0
func (r *UnorderedReader) Close()
func (*UnorderedReader) CloseFile ¶ added in v1.3.0
func (r *UnorderedReader) CloseFile()
func (*UnorderedReader) HasSeries ¶ added in v1.1.0
func (r *UnorderedReader) HasSeries(sid uint64) bool
func (*UnorderedReader) InitTimes ¶ added in v1.0.1
func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64)
InitTimes initialize the time column of unordered data
func (*UnorderedReader) Read ¶ added in v1.0.0
Read reads data based on the series ID, column, and time range
func (*UnorderedReader) ReadAllTimes ¶ added in v1.0.1
func (r *UnorderedReader) ReadAllTimes() []int64
func (*UnorderedReader) ReadRemain ¶ added in v1.0.0
func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error
func (*UnorderedReader) ReadSeriesSchemas ¶ added in v1.0.0
func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas
func (*UnorderedReader) ReadTimes ¶ added in v1.0.0
func (r *UnorderedReader) ReadTimes(maxTime int64) []int64
type UnorderedReaderContext ¶ added in v1.1.0
type UnorderedReaderContext struct {
// contains filtered or unexported fields
}
Source Files
¶
- chunk_iterators.go
- chunk_meta_cache.go
- chunkdata_builder.go
- chunkdata_builder_cs.go
- chunkdata_builder_ts.go
- colstore_compact.go
- column_builder.go
- column_iterator.go
- compact.go
- compaction_file_info.go
- config.go
- cs_mms_tables.go
- detached_cache.go
- detached_chunkmeta.go
- detached_meta_index.go
- detached_metadata.go
- detached_pk_data.go
- detached_pk_meta.go
- detached_pk_meta_info.go
- event_bus.go
- evict.go
- file_iterator.go
- first_last_reader.go
- index_swapper.go
- index_writer.go
- limiter.go
- location.go
- location_cursor.go
- mem_reader.go
- merge_out_of_order.go
- merge_performer.go
- merge_self.go
- merge_tool.go
- merge_util.go
- meta_data.go
- mms_loader.go
- mms_tables.go
- msbuilder.go
- pre_aggregation.go
- read_context.go
- reader.go
- segment_sequence_reader.go
- sequence_iterator.go
- sequencer.go
- show_tag_values.go
- sort_limit_cursor.go
- sort_limit_row_heap.go
- stream_compact.go
- stream_downsample.go
- table.go
- table_stat.go
- task.go
- task_parquet.go
- trailer.go
- ts_mms_tables.go
- tssp_file.go
- tssp_file_attached_reader.go
- tssp_file_detached_reader.go
- tssp_file_inmem.go
- tssp_file_meta.go
- tssp_file_name.go
- tssp_file_page_cache_reader.go
- tssp_reader.go
- unnest_func.go
- unordered_reader.go
- util.go
- writer.go