Documentation
¶
Index ¶
- Constants
- Variables
- func BitmapToIDs(bm *roaring.Bitmap) []int
- func BitmapToRanges(bm *roaring.Bitmap) fragment.FragmentRanges
- func ConvertToRPN(expr influxql.Expr) ([]interface{}, error)
- func GetBloomFilterFilePath(dir, msName, fieldName string) string
- func GetFullTextAttachFilePath(dir, msName, dataFilePath string) string
- func GetFullTextDetachFilePath(dir, msName string) string
- func GetLocalBloomFilterBlockCnts(dir, msName, lockPath string, recSchema record.Schemas, bfSchemaIdx int, ...) int64
- func IsCompareOp(op influxql.Token) bool
- func IsLogicalOp(op influxql.Token) bool
- func RegistrySKFileReaderCreator(oid uint32, creator SKFileReaderCreator) bool
- type BloomFilterFullTextIndexReader
- func (r *BloomFilterFullTextIndexReader) Close() error
- func (r *BloomFilterFullTextIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
- func (r *BloomFilterFullTextIndexReader) MayBeInFragment(fragId uint32) (bool, error)
- func (r *BloomFilterFullTextIndexReader) ReInit(file interface{}) (err error)
- func (r *BloomFilterFullTextIndexReader) StartSpan(span *tracing.Span)
- type BloomFilterFullTextReaderCreator
- type BloomFilterIndexReader
- func (r *BloomFilterIndexReader) Close() error
- func (r *BloomFilterIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
- func (r *BloomFilterIndexReader) MayBeInFragment(fragId uint32) (bool, error)
- func (r *BloomFilterIndexReader) ReInit(file interface{}) (err error)
- func (r *BloomFilterIndexReader) StartSpan(span *tracing.Span)
- type BloomFilterIpIndexWriter
- func (w BloomFilterIpIndexWriter) Close() error
- func (b *BloomFilterIpIndexWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
- func (b *BloomFilterIpIndexWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (w BloomFilterIpIndexWriter) Files() []string
- func (b *BloomFilterIpIndexWriter) Flush() error
- func (b *BloomFilterIpIndexWriter) GenBloomFilterData(src *record.ColVal, rowsPerSegment []int, refType int) []byte
- func (w BloomFilterIpIndexWriter) GetWriter(file string) (*colstore.IndexWriter, error)
- func (w BloomFilterIpIndexWriter) Open()
- type BloomFilterIpReaderCreator
- type BloomFilterReaderCreator
- type BloomFilterWriter
- func (w BloomFilterWriter) Close() error
- func (b *BloomFilterWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
- func (b *BloomFilterWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (w BloomFilterWriter) Files() []string
- func (b *BloomFilterWriter) Flush() error
- func (b *BloomFilterWriter) GenBloomFilterData(src *record.ColVal, rowsPerSegment []int, refType int) []byte
- func (w BloomFilterWriter) GetWriter(file string) (*colstore.IndexWriter, error)
- func (w BloomFilterWriter) Open()
- type ClusterIndex
- type ColumnRef
- type Condition
- type FieldCluster
- type FieldRef
- func (f *FieldRef) Equals(rhs *FieldRef) bool
- func (f *FieldRef) IsNegativeInfinity() bool
- func (f *FieldRef) IsNull() bool
- func (f *FieldRef) IsPositiveInfinity() bool
- func (f *FieldRef) Less(rhs *FieldRef) bool
- func (f *FieldRef) MatchedIndices(ColVal *record.ColVal, compOp influxql.Token, isFirstPK bool) ([]int, error)
- func (f *FieldRef) Set(cols []*ColumnRef, column, row int)
- func (f *FieldRef) SetNegativeInfinity()
- func (f *FieldRef) SetPositiveInfinity()
- type FieldToWrite
- type FullTextIdxWriter
- func (w FullTextIdxWriter) Close() error
- func (f *FullTextIdxWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
- func (f *FullTextIdxWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (w FullTextIdxWriter) Files() []string
- func (f *FullTextIdxWriter) Flush() error
- func (w FullTextIdxWriter) GetWriter(file string) (*colstore.IndexWriter, error)
- func (w FullTextIdxWriter) Open()
- type FunctionBase
- type GroupPKIndexReaderImpl
- func (r *GroupPKIndexReaderImpl) Close() error
- func (r *GroupPKIndexReaderImpl) EvaluateCondition(pkRec *record.Record, keyCondition KeyCondition, fragmentCount uint32) (fragment.FragmentRanges, error)
- func (r *GroupPKIndexReaderImpl) Scan(fileName string, pkRec *record.Record, pkMark fragment.IndexFragment, ...) (fragment.FragmentRanges, error)
- type IndexProperty
- type KeyCondition
- type KeyConditionImpl
- func (kc *KeyConditionImpl) AlwaysInRange() (bool, error)
- func (kc *KeyConditionImpl) CanDoBinarySearch() bool
- func (kc *KeyConditionImpl) CheckInRange(rgs []*Range, dataTypes []int) (Mark, error)
- func (kc *KeyConditionImpl) GetMaxKeyIndex() int
- func (kc *KeyConditionImpl) GetRPN() []*RPNElement
- func (kc *KeyConditionImpl) HavePrimaryKey() bool
- func (kc *KeyConditionImpl) IsFirstPrimaryKey() bool
- func (kc *KeyConditionImpl) MayBeInRange(usedKeySize int, leftKeys []*FieldRef, rightKeys []*FieldRef, dataTypes []int) (bool, error)
- func (kc *KeyConditionImpl) SetRPN(rpn []*RPNElement)
- type Mark
- type MinMaxFilterReaders
- func (r *MinMaxFilterReaders) Close() error
- func (r *MinMaxFilterReaders) GetRowCount(blockId int64, elem *rpn.SKRPNElement) (int64, error)
- func (r *MinMaxFilterReaders) InitReaderAndQuery(elem *rpn.SKRPNElement) (*record.Record, error)
- func (r *MinMaxFilterReaders) IsExist(blockId int64, elem *rpn.SKRPNElement) (bool, error)
- func (r *MinMaxFilterReaders) StartSpan(span *tracing.Span)
- type MinMaxIndexReader
- func (r *MinMaxIndexReader) Close() error
- func (r *MinMaxIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
- func (r *MinMaxIndexReader) MayBeInFragment(fragId uint32) (bool, error)
- func (r *MinMaxIndexReader) ReInit(file interface{}) (err error)
- func (r *MinMaxIndexReader) StartSpan(span *tracing.Span)
- type MinMaxReaderCreator
- type MinMaxWriter
- func (m *MinMaxWriter) Close() error
- func (m *MinMaxWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
- func (m *MinMaxWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (w MinMaxWriter) Files() []string
- func (m *MinMaxWriter) Flush() error
- func (w MinMaxWriter) GetWriter(file string) (*colstore.IndexWriter, error)
- func (w MinMaxWriter) Open()
- type Number
- type OBSFilterPath
- type PKIndexReader
- type PKIndexReaderImpl
- type PKIndexWriter
- type PKIndexWriterImpl
- type RPNElement
- type Range
- type SKCondition
- type SKConditionImpl
- type SKFileReader
- type SKFileReaderCreator
- type SKFileReaderCreatorFactory
- type SKIndexReader
- type SKIndexReaderImpl
- func (r *SKIndexReaderImpl) Close() error
- func (r *SKIndexReaderImpl) CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)
- func (r *SKIndexReaderImpl) Scan(reader SKFileReader, rgs fragment.FragmentRanges) (fragment.FragmentRanges, fragment.FragmentRangeDetails, error)
- type SetIndexReader
- func (r *SetIndexReader) Close() error
- func (r *SetIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
- func (r *SetIndexReader) MayBeInFragment(fragId uint32) (bool, error)
- func (r *SetIndexReader) ReInit(file interface{}) (err error)
- func (r *SetIndexReader) StartSpan(span *tracing.Span)
- type SetReaderCreator
- type SetWriter
- func (w SetWriter) Close() error
- func (s *SetWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
- func (s *SetWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (w SetWriter) Files() []string
- func (s *SetWriter) Flush() error
- func (w SetWriter) GetWriter(file string) (*colstore.IndexWriter, error)
- func (w SetWriter) Open()
- type SkInfo
- type TsspFile
- type UniversalBFCreator
- type UniversalBloomFilter
- type UniversalBloomFilterReader
- func (r *UniversalBloomFilterReader) Close() error
- func (r *UniversalBloomFilterReader) GetFragmentRowCount(fragId uint32) (int64, error)
- func (r *UniversalBloomFilterReader) GetRowCount(blockId int64, elem *rpn.SKRPNElement) (int64, error)
- func (r *UniversalBloomFilterReader) IsExist(blockId int64, elem *rpn.SKRPNElement) (bool, error)
- func (r *UniversalBloomFilterReader) MayBeInFragment(fragId uint32) (bool, error)
- func (r *UniversalBloomFilterReader) ReInit(file interface{}) error
- func (r *UniversalBloomFilterReader) StartSpan(span *tracing.Span)
- type UniversalBloomFilterWriter
- func (idx *UniversalBloomFilterWriter) BuildBloomFilterFromRecord(record *record.Record, colIdx int) (*UniversalBloomFilter, error)
- func (idx *UniversalBloomFilterWriter) BuildUniversalBloomFilter(record *record.Record, colIdx int, indexDir string) error
- func (w UniversalBloomFilterWriter) Close() error
- func (idx *UniversalBloomFilterWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
- func (idx *UniversalBloomFilterWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (w UniversalBloomFilterWriter) Files() []string
- func (idx *UniversalBloomFilterWriter) Flush() error
- func (w UniversalBloomFilterWriter) GetWriter(file string) (*colstore.IndexWriter, error)
- func (idx *UniversalBloomFilterWriter) NewUniversalBloomFilter() *UniversalBloomFilter
- func (w UniversalBloomFilterWriter) Open()
Constants ¶
const ( FullTextIdxColumnCnt = 1 FullTextIndex = "fullText" BloomFilterFileSuffix = ".idx" BloomFilterFilePrefix = "bloomfilter_" // bloomfilter_${columnName}.idx )
const ( MinValueColumnName = "min_value" MaxValueColumnName = "max_value" MinValueColumnIndex = 0 MaxValueColumnIndex = 1 EquivalentAccuracy = 1e-7 )
const Continuous = "continuous"
const DefaultFalseRate = 0.001
const Empty = "empty"
Variables ¶
var ( ErrInvalidRPN = errors.New("invalid RPN expression") ErrInvalidRPNResult = errors.New("invalid RPN result count") ErrUnknownOperator = func(op string) error { return fmt.Errorf("unknown operator: %s", op) } ErrEmptyRPN = errors.New("RPN element should not be empty") ErrInvalidCondition = errors.New("invalid condition") ErrConditionType = func(cType interface{}) error { return fmt.Errorf("invalid condition type: expected [string|float64|int64|boolean], got [%T]", cType) } )
Error definitions
var ConsiderOnlyBeTrue = NewMark(false, true)
var EmptyRoaingBitMap = roaring.New()
var ( // InitIndexFragmentFixedSize means that each fragment is fixed in size except the last fragment. InitIndexFragmentFixedSize = true )
var NEGATIVE_INFINITY = &FieldRef{row: math.MinInt64}
var POSITIVE_INFINITY = &FieldRef{row: math.MaxInt64}
var SKFileReaderOnce sync.Once
Functions ¶
func BitmapToRanges ¶ added in v1.5.0
func BitmapToRanges(bm *roaring.Bitmap) fragment.FragmentRanges
Get Ranges from Bitmap
func ConvertToRPN ¶ added in v1.5.0
ConvertToRPN converts an InfluxQL expression to Reverse Polish Notation (RPN)
func GetBloomFilterFilePath ¶ added in v1.2.0
func GetFullTextAttachFilePath ¶ added in v1.2.0
func GetFullTextDetachFilePath ¶ added in v1.2.0
func GetLocalBloomFilterBlockCnts ¶ added in v1.2.0
func GetLocalBloomFilterBlockCnts(dir, msName, lockPath string, recSchema record.Schemas, bfSchemaIdx int, fullTextIdx bool) int64
GetLocalBloomFilterBlockCnts get one local bloomFilter col's block count,if not exist return 0
func IsCompareOp ¶ added in v1.5.0
func IsLogicalOp ¶ added in v1.5.0
func RegistrySKFileReaderCreator ¶ added in v1.2.0
func RegistrySKFileReaderCreator(oid uint32, creator SKFileReaderCreator) bool
RegistrySKFileReaderCreator is used to registry the SKFileReaderCreator
Types ¶
type BloomFilterFullTextIndexReader ¶ added in v1.2.0
type BloomFilterFullTextIndexReader struct {
// contains filtered or unexported fields
}
func NewBloomFilterFullTextIndexReader ¶ added in v1.2.0
func (*BloomFilterFullTextIndexReader) Close ¶ added in v1.2.0
func (r *BloomFilterFullTextIndexReader) Close() error
func (*BloomFilterFullTextIndexReader) GetFragmentRowCount ¶ added in v1.5.0
func (r *BloomFilterFullTextIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
func (*BloomFilterFullTextIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *BloomFilterFullTextIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*BloomFilterFullTextIndexReader) ReInit ¶ added in v1.2.0
func (r *BloomFilterFullTextIndexReader) ReInit(file interface{}) (err error)
func (*BloomFilterFullTextIndexReader) StartSpan ¶ added in v1.3.0
func (r *BloomFilterFullTextIndexReader) StartSpan(span *tracing.Span)
type BloomFilterFullTextReaderCreator ¶ added in v1.2.0
type BloomFilterFullTextReaderCreator struct {
}
func (*BloomFilterFullTextReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (index *BloomFilterFullTextReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type BloomFilterIndexReader ¶ added in v1.2.0
type BloomFilterIndexReader struct {
// contains filtered or unexported fields
}
func NewBloomFilterIndexReader ¶ added in v1.2.0
func NewBloomFilterIndexReaderWithIndexType ¶ added in v1.4.0
func (*BloomFilterIndexReader) Close ¶ added in v1.2.0
func (r *BloomFilterIndexReader) Close() error
func (*BloomFilterIndexReader) GetFragmentRowCount ¶ added in v1.5.0
func (r *BloomFilterIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
func (*BloomFilterIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *BloomFilterIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*BloomFilterIndexReader) ReInit ¶ added in v1.2.0
func (r *BloomFilterIndexReader) ReInit(file interface{}) (err error)
func (*BloomFilterIndexReader) StartSpan ¶ added in v1.3.0
func (r *BloomFilterIndexReader) StartSpan(span *tracing.Span)
type BloomFilterIpIndexWriter ¶ added in v1.4.0
type BloomFilterIpIndexWriter struct {
// contains filtered or unexported fields
}
func NewBloomFilterIpWriter ¶ added in v1.4.0
func NewBloomFilterIpWriter(dir, msName, dataFilePath, lockPath string, tokens string) *BloomFilterIpIndexWriter
func (BloomFilterIpIndexWriter) Close ¶ added in v1.4.0
func (w BloomFilterIpIndexWriter) Close() error
func (*BloomFilterIpIndexWriter) CreateAttachIndex ¶ added in v1.4.0
func (b *BloomFilterIpIndexWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
func (*BloomFilterIpIndexWriter) CreateDetachIndex ¶ added in v1.4.0
func (BloomFilterIpIndexWriter) Files ¶ added in v1.5.0
func (w BloomFilterIpIndexWriter) Files() []string
func (*BloomFilterIpIndexWriter) Flush ¶ added in v1.5.0
func (b *BloomFilterIpIndexWriter) Flush() error
func (*BloomFilterIpIndexWriter) GenBloomFilterData ¶ added in v1.4.0
type BloomFilterIpReaderCreator ¶ added in v1.4.0
type BloomFilterIpReaderCreator struct{}
func (*BloomFilterIpReaderCreator) CreateSKFileReader ¶ added in v1.4.0
func (b *BloomFilterIpReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type BloomFilterReaderCreator ¶ added in v1.2.0
type BloomFilterReaderCreator struct {
}
func (*BloomFilterReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (index *BloomFilterReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type BloomFilterWriter ¶ added in v1.2.0
type BloomFilterWriter struct {
// contains filtered or unexported fields
}
func NewBloomFilterWriter ¶ added in v1.2.0
func NewBloomFilterWriter(dir, msName, dataFilePath, lockPath string, tokens string) *BloomFilterWriter
func (*BloomFilterWriter) CreateAttachIndex ¶ added in v1.3.0
func (b *BloomFilterWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
func (*BloomFilterWriter) CreateDetachIndex ¶ added in v1.3.0
func (*BloomFilterWriter) Flush ¶ added in v1.5.0
func (b *BloomFilterWriter) Flush() error
func (*BloomFilterWriter) GenBloomFilterData ¶ added in v1.2.0
type ClusterIndex ¶ added in v1.5.0
type ClusterIndex struct {
// contains filtered or unexported fields
}
ClusterIndex represents the top-level clustered index structure, organized by fields
func NewClusterIndex ¶ added in v1.5.0
func NewClusterIndex() *ClusterIndex
NewClusterIndex creates a new ClusterIndex instance
func (*ClusterIndex) Query ¶ added in v1.5.0
Query executes a query and returns the resulting segment IDs
func (*ClusterIndex) Write ¶ added in v1.5.0
func (ci *ClusterIndex) Write(segNumID uint32, fields map[string]*FieldToWrite) error
Write indexes a document into the specified segment data from the same field must be of the same type
func (*ClusterIndex) WritePkRec ¶ added in v1.5.0
func (ci *ClusterIndex) WritePkRec(pk *record.Record) error
WritePkRec indexes a primary record into the specified segment
type Condition ¶ added in v1.5.0
Condition represents a query condition (field name, operator, value)
type FieldCluster ¶ added in v1.5.0
type FieldCluster struct {
// Mapping from field values to bitmaps of segment IDs containing the value
ValueToSegments map[interface{}]*roaring.Bitmap
// Stores the field values in sorted order
SortedValues []interface{}
FieldType int
}
FieldCluster represents a clustered index for a specific field, organizing segments and documents by field values
func NewFieldCluster ¶ added in v1.5.0
func NewFieldCluster(fieldType int) *FieldCluster
NewFieldCluster creates a new FieldCluster instance
type FieldRef ¶
type FieldRef struct {
// contains filtered or unexported fields
}
func (*FieldRef) IsNegativeInfinity ¶
func (*FieldRef) IsPositiveInfinity ¶
func (*FieldRef) MatchedIndices ¶ added in v1.5.0
func (*FieldRef) SetNegativeInfinity ¶
func (f *FieldRef) SetNegativeInfinity()
func (*FieldRef) SetPositiveInfinity ¶
func (f *FieldRef) SetPositiveInfinity()
type FieldToWrite ¶ added in v1.5.0
type FieldToWrite struct {
// contains filtered or unexported fields
}
type FullTextIdxWriter ¶ added in v1.2.0
type FullTextIdxWriter struct {
// contains filtered or unexported fields
}
func NewBloomFilterFullTextWriter ¶ added in v1.3.0
func NewBloomFilterFullTextWriter(dir, msName, dataFilePath, lockPath string, tokens string) *FullTextIdxWriter
func (*FullTextIdxWriter) CreateAttachIndex ¶ added in v1.3.0
func (f *FullTextIdxWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
func (*FullTextIdxWriter) CreateDetachIndex ¶ added in v1.3.0
func (*FullTextIdxWriter) Flush ¶ added in v1.5.0
func (f *FullTextIdxWriter) Flush() error
type FunctionBase ¶
type FunctionBase struct {
}
type GroupPKIndexReaderImpl ¶ added in v1.5.0
type GroupPKIndexReaderImpl struct {
// contains filtered or unexported fields
}
func NewGroupPKIndexReader ¶ added in v1.5.0
func NewGroupPKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *GroupPKIndexReaderImpl
func (*GroupPKIndexReaderImpl) Close ¶ added in v1.5.0
func (r *GroupPKIndexReaderImpl) Close() error
func (*GroupPKIndexReaderImpl) EvaluateCondition ¶ added in v1.5.0
func (r *GroupPKIndexReaderImpl) EvaluateCondition(pkRec *record.Record, keyCondition KeyCondition, fragmentCount uint32) (fragment.FragmentRanges, error)
func (*GroupPKIndexReaderImpl) Scan ¶ added in v1.5.0
func (r *GroupPKIndexReaderImpl) Scan( fileName string, pkRec *record.Record, pkMark fragment.IndexFragment, keyCondition KeyCondition, ) (fragment.FragmentRanges, error)
type IndexProperty ¶
func NewIndexProperty ¶
func NewIndexProperty(rowsNumPerFragment, coarseIndexFragment, minRowsForSeek int) *IndexProperty
type KeyCondition ¶
type KeyCondition interface {
HavePrimaryKey() bool
GetMaxKeyIndex() int
IsFirstPrimaryKey() bool
CanDoBinarySearch() bool
MayBeInRange(usedKeySize int, indexLeft []*FieldRef, indexRight []*FieldRef, dataTypes []int) (bool, error)
CheckInRange(rgs []*Range, dataTypes []int) (Mark, error)
AlwaysInRange() (bool, error)
GetRPN() []*RPNElement
}
type KeyConditionImpl ¶
type KeyConditionImpl struct {
// contains filtered or unexported fields
}
func NewKeyCondition ¶
func (*KeyConditionImpl) AlwaysInRange ¶ added in v1.2.0
func (kc *KeyConditionImpl) AlwaysInRange() (bool, error)
AlwaysInRange checks that the index can not be used, pruning in advance to improve efficiency.
func (*KeyConditionImpl) CanDoBinarySearch ¶
func (kc *KeyConditionImpl) CanDoBinarySearch() bool
func (*KeyConditionImpl) CheckInRange ¶
func (kc *KeyConditionImpl) CheckInRange( rgs []*Range, dataTypes []int, ) (Mark, error)
CheckInRange check Whether the condition and its negation are feasible in the direct product of single column ranges specified by hyper-rectangle.
func (*KeyConditionImpl) GetMaxKeyIndex ¶
func (kc *KeyConditionImpl) GetMaxKeyIndex() int
func (*KeyConditionImpl) GetRPN ¶
func (kc *KeyConditionImpl) GetRPN() []*RPNElement
func (*KeyConditionImpl) HavePrimaryKey ¶
func (kc *KeyConditionImpl) HavePrimaryKey() bool
func (*KeyConditionImpl) IsFirstPrimaryKey ¶
func (kc *KeyConditionImpl) IsFirstPrimaryKey() bool
func (*KeyConditionImpl) MayBeInRange ¶
func (kc *KeyConditionImpl) MayBeInRange( usedKeySize int, leftKeys []*FieldRef, rightKeys []*FieldRef, dataTypes []int, ) (bool, error)
MayBeInRange is used to check whether the condition is likely to be in the target range.
func (*KeyConditionImpl) SetRPN ¶
func (kc *KeyConditionImpl) SetRPN(rpn []*RPNElement)
type Mark ¶
type Mark struct {
// contains filtered or unexported fields
}
Mark these special constants are used to implement KeyCondition. When used as an initial_mask argument in KeyCondition.CheckInRange methods, they effectively prevent calculation of discarded Mark component as it is already set to true.
type MinMaxFilterReaders ¶ added in v1.5.0
type MinMaxFilterReaders struct {
// contains filtered or unexported fields
}
MinMaxFilterReaders manages multiple MinMaxFilterReaders for different fields
func NewMinMaxFilterReaders ¶ added in v1.5.0
func NewMinMaxFilterReaders(path, file string, schemas record.Schemas, ir *influxql.IndexRelation) *MinMaxFilterReaders
NewMinMaxFilterReaders creates a new MinMaxFilterReaders
func (*MinMaxFilterReaders) Close ¶ added in v1.5.0
func (r *MinMaxFilterReaders) Close() error
Close closes all readers
func (*MinMaxFilterReaders) GetRowCount ¶ added in v1.5.0
func (r *MinMaxFilterReaders) GetRowCount(blockId int64, elem *rpn.SKRPNElement) (int64, error)
GetRowCount returns the row count for the fragment
func (*MinMaxFilterReaders) InitReaderAndQuery ¶ added in v1.5.0
func (r *MinMaxFilterReaders) InitReaderAndQuery(elem *rpn.SKRPNElement) (*record.Record, error)
InitReaderAndQuery initializes a reader and gets the query string
func (*MinMaxFilterReaders) IsExist ¶ added in v1.5.0
func (r *MinMaxFilterReaders) IsExist(blockId int64, elem *rpn.SKRPNElement) (bool, error)
IsExist checks if the fragment might contain data matching the condition
func (*MinMaxFilterReaders) StartSpan ¶ added in v1.5.0
func (r *MinMaxFilterReaders) StartSpan(span *tracing.Span)
StartSpan starts a tracing span
type MinMaxIndexReader ¶ added in v1.2.0
type MinMaxIndexReader struct {
// read the data of the index according to the file and index fields.
ReadFunc func(file interface{}, rec *record.Record, isCache bool) (*record.Record, error)
// contains filtered or unexported fields
}
func NewMinMaxIndexReader ¶ added in v1.2.0
func (*MinMaxIndexReader) Close ¶ added in v1.2.0
func (r *MinMaxIndexReader) Close() error
func (*MinMaxIndexReader) GetFragmentRowCount ¶ added in v1.5.0
func (r *MinMaxIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
func (*MinMaxIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *MinMaxIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*MinMaxIndexReader) ReInit ¶ added in v1.2.0
func (r *MinMaxIndexReader) ReInit(file interface{}) (err error)
func (*MinMaxIndexReader) StartSpan ¶ added in v1.3.0
func (r *MinMaxIndexReader) StartSpan(span *tracing.Span)
type MinMaxReaderCreator ¶ added in v1.2.0
type MinMaxReaderCreator struct {
}
func (*MinMaxReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (creator *MinMaxReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type MinMaxWriter ¶ added in v1.2.0
type MinMaxWriter struct {
// contains filtered or unexported fields
}
func NewMinMaxWriter ¶ added in v1.2.0
func NewMinMaxWriter(dir, msName, dataFilePath, lockPath string, tokens string) *MinMaxWriter
func (*MinMaxWriter) Close ¶ added in v1.2.0
func (m *MinMaxWriter) Close() error
func (*MinMaxWriter) CreateAttachIndex ¶ added in v1.3.0
func (m *MinMaxWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
func (*MinMaxWriter) CreateDetachIndex ¶ added in v1.3.0
func (*MinMaxWriter) Flush ¶ added in v1.5.0
func (m *MinMaxWriter) Flush() error
type OBSFilterPath ¶ added in v1.2.0
type OBSFilterPath struct {
// contains filtered or unexported fields
}
func NewOBSFilterPath ¶ added in v1.2.0
func NewOBSFilterPath(localPath, remotePath string, option *obs.ObsOptions) *OBSFilterPath
func (*OBSFilterPath) LocalPath ¶ added in v1.2.0
func (o *OBSFilterPath) LocalPath() string
func (*OBSFilterPath) Name ¶ added in v1.2.0
func (o *OBSFilterPath) Name() string
func (*OBSFilterPath) Option ¶ added in v1.2.0
func (o *OBSFilterPath) Option() *obs.ObsOptions
func (*OBSFilterPath) RemotePath ¶ added in v1.2.0
func (o *OBSFilterPath) RemotePath() string
type PKIndexReader ¶ added in v1.2.0
type PKIndexReader interface {
Scan(pkFile string,
pkRec *record.Record,
pkMark fragment.IndexFragment,
keyCondition KeyCondition,
) (fragment.FragmentRanges, error)
Close() error
}
type PKIndexReaderImpl ¶ added in v1.2.0
type PKIndexReaderImpl struct {
// contains filtered or unexported fields
}
func NewPKIndexReader ¶ added in v1.2.0
func NewPKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *PKIndexReaderImpl
func (*PKIndexReaderImpl) Close ¶ added in v1.2.0
func (r *PKIndexReaderImpl) Close() error
func (*PKIndexReaderImpl) Scan ¶ added in v1.2.0
func (r *PKIndexReaderImpl) Scan( pkFile string, pkRec *record.Record, pkMark fragment.IndexFragment, keyCondition KeyCondition, ) (fragment.FragmentRanges, error)
Scan is used to filter fragment ranges based on the primary key in the condition, and it determines whether to do binary search or exclusion search according to the sequence of keys in the primary key. give a specific example to illustrate the usage of scan.
origin record: x -> [1, 2, 1, 2, 1, 2, 2, 1] y -> [1, 1, 3, 4, 2, 2, 3, 4]
sorted record(sorted by x, y): x -> [1, 1, 1, 1, 2, 2, 2, 2] y -> [1, 2, 3, 4, 1, 2, 3, 4]
primary index record(fragment size is 2): x -> [1, 1, 2, 2, 2] y -> [1, 3, 1, 3, 4] fragment index -> [0, 1, 2, 3]
key condition: x > 1 and y < 3
scan results: fragment range -> [1, 3)
type PKIndexWriter ¶ added in v1.2.0
type PKIndexWriterImpl ¶ added in v1.2.0
type PKIndexWriterImpl struct {
}
func NewPKIndexWriter ¶ added in v1.2.0
func NewPKIndexWriter() *PKIndexWriterImpl
func (*PKIndexWriterImpl) Build ¶ added in v1.2.0
func (w *PKIndexWriterImpl) Build( srcRec *record.Record, pkSchema record.Schemas, rowsNumPerFragment []int, tcLocation int8, fixRowsPerSegment int, ) ( *record.Record, fragment.IndexFragment, error, )
Build generates sparse primary index based on sorted data to be flushed to disks.
func (*PKIndexWriterImpl) Close ¶ added in v1.2.0
func (w *PKIndexWriterImpl) Close() error
type RPNElement ¶
type RPNElement struct {
// contains filtered or unexported fields
}
RPNElement means that Reverse Polish notation (RPN) is a method for conveying mathematical expressions without the use of separators such as brackets and parentheses. In this notation, the operators follow their operands, hence removing the need for brackets to define evaluation priority. More details: https://en.wikipedia.org/wiki/Reverse_Polish_notation.
func NewRPNElement ¶ added in v1.2.0
func NewRPNElement(op rpn.Op) *RPNElement
func (*RPNElement) IsFirstPrimaryKey ¶ added in v1.5.0
func (rpnElem *RPNElement) IsFirstPrimaryKey() bool
func (*RPNElement) MatchedIndices ¶ added in v1.5.0
func (rpnElem *RPNElement) MatchedIndices(pkRec *record.Record) ([]int, error)
type Range ¶
type Range struct {
// contains filtered or unexported fields
}
Range means that the range with open or closed ends, possibly unbounded.
type SKCondition ¶ added in v1.2.0
type SKCondition interface {
IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)
GetRowCount(blockId int64, reader rpn.SKBaseReader) (int64, error)
}
func NewSKCondition ¶ added in v1.2.0
type SKConditionImpl ¶ added in v1.2.0
type SKConditionImpl struct {
// contains filtered or unexported fields
}
func (*SKConditionImpl) GetRowCount ¶ added in v1.5.0
func (c *SKConditionImpl) GetRowCount(blockId int64, reader rpn.SKBaseReader) (int64, error)
func (*SKConditionImpl) IsExist ¶ added in v1.2.0
func (c *SKConditionImpl) IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)
type SKFileReader ¶ added in v1.2.0
type SKFileReader interface {
// MayBeInFragment determines whether a fragment in a file meets the query condition.
MayBeInFragment(fragId uint32) (bool, error)
GetFragmentRowCount(fragId uint32) (int64, error)
// ReInit is used to that a SKFileReader is reused among multiple files.
ReInit(file interface{}) error
StartSpan(span *tracing.Span)
// Close is used to close the SKFileReader
Close() error
}
SKFileReader as an executor of skip index data reading that corresponds to the index field in the query.
type SKFileReaderCreator ¶ added in v1.2.0
type SKFileReaderCreator interface {
CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
}
SKFileReaderCreator is used to abstract SKFileReader implementation of multiple skip indexes in factory mode.
type SKFileReaderCreatorFactory ¶ added in v1.2.0
type SKFileReaderCreatorFactory struct {
// contains filtered or unexported fields
}
var SKFileReaderInstance *SKFileReaderCreatorFactory
func GetSKFileReaderFactoryInstance ¶ added in v1.2.0
func GetSKFileReaderFactoryInstance() *SKFileReaderCreatorFactory
func NewSKFileReaderCreatorFactory ¶ added in v1.2.0
func NewSKFileReaderCreatorFactory() *SKFileReaderCreatorFactory
func (*SKFileReaderCreatorFactory) Add ¶ added in v1.2.0
func (s *SKFileReaderCreatorFactory) Add(oid uint32, creator SKFileReaderCreator)
func (*SKFileReaderCreatorFactory) Find ¶ added in v1.2.0
func (s *SKFileReaderCreatorFactory) Find(oid uint32) (SKFileReaderCreator, bool)
type SKIndexReader ¶ added in v1.2.0
type SKIndexReader interface {
// CreateSKFileReaders generates SKFileReaders for each index field based on the skip index information and condition
// which is used to quickly determine whether a fragment meets the condition.
CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)
// Scan is used to filter fragment ranges based on the secondary key in the condition.
Scan(reader SKFileReader, rgs fragment.FragmentRanges) (fragment.FragmentRanges, fragment.FragmentRangeDetails, error)
// Close is used to close the SKIndexReader
Close() error
}
SKIndexReader as a skip index read interface.
type SKIndexReaderImpl ¶ added in v1.2.0
type SKIndexReaderImpl struct {
// contains filtered or unexported fields
}
func NewSKIndexReader ¶ added in v1.2.0
func NewSKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *SKIndexReaderImpl
func (*SKIndexReaderImpl) Close ¶ added in v1.2.0
func (r *SKIndexReaderImpl) Close() error
func (*SKIndexReaderImpl) CreateSKFileReaders ¶ added in v1.2.0
func (r *SKIndexReaderImpl) CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)
func (*SKIndexReaderImpl) Scan ¶ added in v1.2.0
func (r *SKIndexReaderImpl) Scan( reader SKFileReader, rgs fragment.FragmentRanges, ) (fragment.FragmentRanges, fragment.FragmentRangeDetails, error)
type SetIndexReader ¶ added in v1.2.0
type SetIndexReader struct {
// contains filtered or unexported fields
}
func NewSetIndexReader ¶ added in v1.2.0
func (*SetIndexReader) Close ¶ added in v1.2.0
func (r *SetIndexReader) Close() error
func (*SetIndexReader) GetFragmentRowCount ¶ added in v1.5.0
func (r *SetIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)
func (*SetIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *SetIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*SetIndexReader) ReInit ¶ added in v1.2.0
func (r *SetIndexReader) ReInit(file interface{}) (err error)
func (*SetIndexReader) StartSpan ¶ added in v1.3.0
func (r *SetIndexReader) StartSpan(span *tracing.Span)
type SetReaderCreator ¶ added in v1.2.0
type SetReaderCreator struct {
}
func (*SetReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (index *SetReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type SetWriter ¶ added in v1.2.0
type SetWriter struct {
// contains filtered or unexported fields
}
func NewSetWriter ¶ added in v1.2.0
func (*SetWriter) CreateAttachIndex ¶ added in v1.3.0
func (*SetWriter) CreateDetachIndex ¶ added in v1.3.0
type UniversalBFCreator ¶ added in v1.5.0
type UniversalBFCreator struct {
}
func (*UniversalBFCreator) CreateSKFileReader ¶ added in v1.5.0
func (creator *UniversalBFCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type UniversalBloomFilter ¶ added in v1.5.0
type UniversalBloomFilter struct {
Filter *bloom.BloomFilter
}
func DeserializeBloomFilter ¶ added in v1.5.0
func DeserializeBloomFilter(data []byte) (*UniversalBloomFilter, error)
func (*UniversalBloomFilter) Serialize ¶ added in v1.5.0
func (bf *UniversalBloomFilter) Serialize() ([]byte, error)
func (*UniversalBloomFilter) Test ¶ added in v1.5.0
func (bf *UniversalBloomFilter) Test(value []byte) bool
type UniversalBloomFilterReader ¶ added in v1.5.0
type UniversalBloomFilterReader struct {
// contains filtered or unexported fields
}
func NewUniversalBloomFilterReader ¶ added in v1.5.0
func (*UniversalBloomFilterReader) Close ¶ added in v1.5.0
func (r *UniversalBloomFilterReader) Close() error
func (*UniversalBloomFilterReader) GetFragmentRowCount ¶ added in v1.5.0
func (r *UniversalBloomFilterReader) GetFragmentRowCount(fragId uint32) (int64, error)
func (*UniversalBloomFilterReader) GetRowCount ¶ added in v1.5.0
func (r *UniversalBloomFilterReader) GetRowCount(blockId int64, elem *rpn.SKRPNElement) (int64, error)
func (*UniversalBloomFilterReader) IsExist ¶ added in v1.5.0
func (r *UniversalBloomFilterReader) IsExist(blockId int64, elem *rpn.SKRPNElement) (bool, error)
func (*UniversalBloomFilterReader) MayBeInFragment ¶ added in v1.5.0
func (r *UniversalBloomFilterReader) MayBeInFragment(fragId uint32) (bool, error)
func (*UniversalBloomFilterReader) ReInit ¶ added in v1.5.0
func (r *UniversalBloomFilterReader) ReInit(file interface{}) error
func (*UniversalBloomFilterReader) StartSpan ¶ added in v1.5.0
func (r *UniversalBloomFilterReader) StartSpan(span *tracing.Span)
type UniversalBloomFilterWriter ¶ added in v1.5.0
type UniversalBloomFilterWriter struct {
// contains filtered or unexported fields
}
func NewUniversalGeneralBloomFilterWriter ¶ added in v1.5.0
func NewUniversalGeneralBloomFilterWriter(dir, msName, dataFilePath, lockPath, tokens string, params *influxql.IndexParam) *UniversalBloomFilterWriter
func (*UniversalBloomFilterWriter) BuildBloomFilterFromRecord ¶ added in v1.5.0
func (idx *UniversalBloomFilterWriter) BuildBloomFilterFromRecord(record *record.Record, colIdx int) (*UniversalBloomFilter, error)
func (*UniversalBloomFilterWriter) BuildUniversalBloomFilter ¶ added in v1.5.0
func (UniversalBloomFilterWriter) Close ¶ added in v1.5.0
func (w UniversalBloomFilterWriter) Close() error
func (*UniversalBloomFilterWriter) CreateAttachIndex ¶ added in v1.5.0
func (idx *UniversalBloomFilterWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error
func (*UniversalBloomFilterWriter) CreateDetachIndex ¶ added in v1.5.0
func (UniversalBloomFilterWriter) Files ¶ added in v1.5.0
func (w UniversalBloomFilterWriter) Files() []string
func (*UniversalBloomFilterWriter) Flush ¶ added in v1.5.0
func (idx *UniversalBloomFilterWriter) Flush() error
func (UniversalBloomFilterWriter) GetWriter ¶ added in v1.5.0
func (w UniversalBloomFilterWriter) GetWriter(file string) (*colstore.IndexWriter, error)
func (*UniversalBloomFilterWriter) NewUniversalBloomFilter ¶ added in v1.5.0
func (idx *UniversalBloomFilterWriter) NewUniversalBloomFilter() *UniversalBloomFilter