sparseindex

package
v1.5.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 9, 2026 License: Apache-2.0 Imports: 34 Imported by: 0

Documentation

Index

Constants

View Source
const (
	FullTextIdxColumnCnt = 1

	FullTextIndex         = "fullText"
	BloomFilterFileSuffix = ".idx"
	BloomFilterFilePrefix = "bloomfilter_" // bloomfilter_${columnName}.idx
)
View Source
const (
	MinValueColumnName  = "min_value"
	MaxValueColumnName  = "max_value"
	MinValueColumnIndex = 0
	MaxValueColumnIndex = 1
	EquivalentAccuracy  = 1e-7
)
View Source
const Continuous = "continuous"
View Source
const DefaultFalseRate = 0.001
View Source
const Empty = "empty"

Variables

View Source
var (
	ErrInvalidRPN       = errors.New("invalid RPN expression")
	ErrInvalidRPNResult = errors.New("invalid RPN result count")
	ErrUnknownOperator  = func(op string) error { return fmt.Errorf("unknown operator: %s", op) }
	ErrEmptyRPN         = errors.New("RPN element should not be empty")
	ErrInvalidCondition = errors.New("invalid condition")
	ErrConditionType    = func(cType interface{}) error {
		return fmt.Errorf("invalid condition type: expected [string|float64|int64|boolean], got [%T]", cType)
	}
)

Error definitions

View Source
var ConsiderOnlyBeTrue = NewMark(false, true)
View Source
var EmptyRoaingBitMap = roaring.New()
View Source
var (
	// InitIndexFragmentFixedSize means that each fragment is fixed in size except the last fragment.
	InitIndexFragmentFixedSize = true
)
View Source
var NEGATIVE_INFINITY = &FieldRef{row: math.MinInt64}
View Source
var POSITIVE_INFINITY = &FieldRef{row: math.MaxInt64}
View Source
var SKFileReaderOnce sync.Once

Functions

func BitmapToIDs added in v1.5.0

func BitmapToIDs(bm *roaring.Bitmap) []int

Get IDs from Bitmap

func BitmapToRanges added in v1.5.0

func BitmapToRanges(bm *roaring.Bitmap) fragment.FragmentRanges

Get Ranges from Bitmap

func ConvertToRPN added in v1.5.0

func ConvertToRPN(expr influxql.Expr) ([]interface{}, error)

ConvertToRPN converts an InfluxQL expression to Reverse Polish Notation (RPN)

func GetBloomFilterFilePath added in v1.2.0

func GetBloomFilterFilePath(dir, msName, fieldName string) string

func GetFullTextAttachFilePath added in v1.2.0

func GetFullTextAttachFilePath(dir, msName, dataFilePath string) string

func GetFullTextDetachFilePath added in v1.2.0

func GetFullTextDetachFilePath(dir, msName string) string

func GetLocalBloomFilterBlockCnts added in v1.2.0

func GetLocalBloomFilterBlockCnts(dir, msName, lockPath string, recSchema record.Schemas, bfSchemaIdx int,
	fullTextIdx bool) int64

GetLocalBloomFilterBlockCnts get one local bloomFilter col's block count,if not exist return 0

func IsCompareOp added in v1.5.0

func IsCompareOp(op influxql.Token) bool

func IsLogicalOp added in v1.5.0

func IsLogicalOp(op influxql.Token) bool

func RegistrySKFileReaderCreator added in v1.2.0

func RegistrySKFileReaderCreator(oid uint32, creator SKFileReaderCreator) bool

RegistrySKFileReaderCreator is used to registry the SKFileReaderCreator

Types

type BloomFilterFullTextIndexReader added in v1.2.0

type BloomFilterFullTextIndexReader struct {
	// contains filtered or unexported fields
}

func NewBloomFilterFullTextIndexReader added in v1.2.0

func NewBloomFilterFullTextIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*BloomFilterFullTextIndexReader, error)

func (*BloomFilterFullTextIndexReader) Close added in v1.2.0

func (*BloomFilterFullTextIndexReader) GetFragmentRowCount added in v1.5.0

func (r *BloomFilterFullTextIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)

func (*BloomFilterFullTextIndexReader) MayBeInFragment added in v1.2.0

func (r *BloomFilterFullTextIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*BloomFilterFullTextIndexReader) ReInit added in v1.2.0

func (r *BloomFilterFullTextIndexReader) ReInit(file interface{}) (err error)

func (*BloomFilterFullTextIndexReader) StartSpan added in v1.3.0

func (r *BloomFilterFullTextIndexReader) StartSpan(span *tracing.Span)

type BloomFilterFullTextReaderCreator added in v1.2.0

type BloomFilterFullTextReaderCreator struct {
}

func (*BloomFilterFullTextReaderCreator) CreateSKFileReader added in v1.2.0

func (index *BloomFilterFullTextReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type BloomFilterIndexReader added in v1.2.0

type BloomFilterIndexReader struct {
	// contains filtered or unexported fields
}

func NewBloomFilterIndexReader added in v1.2.0

func NewBloomFilterIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*BloomFilterIndexReader, error)

func NewBloomFilterIndexReaderWithIndexType added in v1.4.0

func NewBloomFilterIndexReaderWithIndexType(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool, indexType index.IndexType) (*BloomFilterIndexReader, error)

func (*BloomFilterIndexReader) Close added in v1.2.0

func (r *BloomFilterIndexReader) Close() error

func (*BloomFilterIndexReader) GetFragmentRowCount added in v1.5.0

func (r *BloomFilterIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)

func (*BloomFilterIndexReader) MayBeInFragment added in v1.2.0

func (r *BloomFilterIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*BloomFilterIndexReader) ReInit added in v1.2.0

func (r *BloomFilterIndexReader) ReInit(file interface{}) (err error)

func (*BloomFilterIndexReader) StartSpan added in v1.3.0

func (r *BloomFilterIndexReader) StartSpan(span *tracing.Span)

type BloomFilterIpIndexWriter added in v1.4.0

type BloomFilterIpIndexWriter struct {
	// contains filtered or unexported fields
}

func NewBloomFilterIpWriter added in v1.4.0

func NewBloomFilterIpWriter(dir, msName, dataFilePath, lockPath string, tokens string) *BloomFilterIpIndexWriter

func (BloomFilterIpIndexWriter) Close added in v1.4.0

func (w BloomFilterIpIndexWriter) Close() error

func (*BloomFilterIpIndexWriter) CreateAttachIndex added in v1.4.0

func (b *BloomFilterIpIndexWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error

func (*BloomFilterIpIndexWriter) CreateDetachIndex added in v1.4.0

func (b *BloomFilterIpIndexWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int,
	dataBuf [][]byte) ([][]byte, []string)

func (BloomFilterIpIndexWriter) Files added in v1.5.0

func (w BloomFilterIpIndexWriter) Files() []string

func (*BloomFilterIpIndexWriter) Flush added in v1.5.0

func (b *BloomFilterIpIndexWriter) Flush() error

func (*BloomFilterIpIndexWriter) GenBloomFilterData added in v1.4.0

func (b *BloomFilterIpIndexWriter) GenBloomFilterData(src *record.ColVal, rowsPerSegment []int, refType int) []byte

func (BloomFilterIpIndexWriter) GetWriter added in v1.5.0

func (w BloomFilterIpIndexWriter) GetWriter(file string) (*colstore.IndexWriter, error)

func (BloomFilterIpIndexWriter) Open added in v1.4.0

func (w BloomFilterIpIndexWriter) Open()

type BloomFilterIpReaderCreator added in v1.4.0

type BloomFilterIpReaderCreator struct{}

func (*BloomFilterIpReaderCreator) CreateSKFileReader added in v1.4.0

func (b *BloomFilterIpReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type BloomFilterReaderCreator added in v1.2.0

type BloomFilterReaderCreator struct {
}

func (*BloomFilterReaderCreator) CreateSKFileReader added in v1.2.0

func (index *BloomFilterReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type BloomFilterWriter added in v1.2.0

type BloomFilterWriter struct {
	// contains filtered or unexported fields
}

func NewBloomFilterWriter added in v1.2.0

func NewBloomFilterWriter(dir, msName, dataFilePath, lockPath string, tokens string) *BloomFilterWriter

func (BloomFilterWriter) Close added in v1.2.0

func (w BloomFilterWriter) Close() error

func (*BloomFilterWriter) CreateAttachIndex added in v1.3.0

func (b *BloomFilterWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error

func (*BloomFilterWriter) CreateDetachIndex added in v1.3.0

func (b *BloomFilterWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int,
	dataBuf [][]byte) ([][]byte, []string)

func (BloomFilterWriter) Files added in v1.5.0

func (w BloomFilterWriter) Files() []string

func (*BloomFilterWriter) Flush added in v1.5.0

func (b *BloomFilterWriter) Flush() error

func (*BloomFilterWriter) GenBloomFilterData added in v1.2.0

func (b *BloomFilterWriter) GenBloomFilterData(src *record.ColVal, rowsPerSegment []int, refType int) []byte

func (BloomFilterWriter) GetWriter added in v1.5.0

func (w BloomFilterWriter) GetWriter(file string) (*colstore.IndexWriter, error)

func (BloomFilterWriter) Open added in v1.2.0

func (w BloomFilterWriter) Open()

type ClusterIndex added in v1.5.0

type ClusterIndex struct {
	// contains filtered or unexported fields
}

ClusterIndex represents the top-level clustered index structure, organized by fields

func NewClusterIndex added in v1.5.0

func NewClusterIndex() *ClusterIndex

NewClusterIndex creates a new ClusterIndex instance

func (*ClusterIndex) Query added in v1.5.0

func (ci *ClusterIndex) Query(expr influxql.Expr) (*roaring.Bitmap, error)

Query executes a query and returns the resulting segment IDs

func (*ClusterIndex) Write added in v1.5.0

func (ci *ClusterIndex) Write(segNumID uint32, fields map[string]*FieldToWrite) error

Write indexes a document into the specified segment data from the same field must be of the same type

func (*ClusterIndex) WritePkRec added in v1.5.0

func (ci *ClusterIndex) WritePkRec(pk *record.Record) error

WritePkRec indexes a primary record into the specified segment

type ColumnRef

type ColumnRef struct {
	// contains filtered or unexported fields
}

func NewColumnRef

func NewColumnRef(name string, dataType int, column *record.ColVal) *ColumnRef

type Condition added in v1.5.0

type Condition struct {
	Field string
	Op    influxql.Token
	Value interface{}
}

Condition represents a query condition (field name, operator, value)

type FieldCluster added in v1.5.0

type FieldCluster struct {
	// Mapping from field values to bitmaps of segment IDs containing the value
	ValueToSegments map[interface{}]*roaring.Bitmap
	// Stores the field values in sorted order
	SortedValues []interface{}
	FieldType    int
}

FieldCluster represents a clustered index for a specific field, organizing segments and documents by field values

func NewFieldCluster added in v1.5.0

func NewFieldCluster(fieldType int) *FieldCluster

NewFieldCluster creates a new FieldCluster instance

type FieldRef

type FieldRef struct {
	// contains filtered or unexported fields
}

func NewFieldRef

func NewFieldRef(cols []*ColumnRef, column int, row int) *FieldRef

func (*FieldRef) Equals

func (f *FieldRef) Equals(rhs *FieldRef) bool

func (*FieldRef) IsNegativeInfinity

func (f *FieldRef) IsNegativeInfinity() bool

func (*FieldRef) IsNull

func (f *FieldRef) IsNull() bool

func (*FieldRef) IsPositiveInfinity

func (f *FieldRef) IsPositiveInfinity() bool

func (*FieldRef) Less

func (f *FieldRef) Less(rhs *FieldRef) bool

func (*FieldRef) MatchedIndices added in v1.5.0

func (f *FieldRef) MatchedIndices(ColVal *record.ColVal, compOp influxql.Token, isFirstPK bool) ([]int, error)

func (*FieldRef) Set

func (f *FieldRef) Set(cols []*ColumnRef, column, row int)

func (*FieldRef) SetNegativeInfinity

func (f *FieldRef) SetNegativeInfinity()

func (*FieldRef) SetPositiveInfinity

func (f *FieldRef) SetPositiveInfinity()

type FieldToWrite added in v1.5.0

type FieldToWrite struct {
	// contains filtered or unexported fields
}

type FullTextIdxWriter added in v1.2.0

type FullTextIdxWriter struct {
	// contains filtered or unexported fields
}

func NewBloomFilterFullTextWriter added in v1.3.0

func NewBloomFilterFullTextWriter(dir, msName, dataFilePath, lockPath string, tokens string) *FullTextIdxWriter

func (FullTextIdxWriter) Close added in v1.2.0

func (w FullTextIdxWriter) Close() error

func (*FullTextIdxWriter) CreateAttachIndex added in v1.3.0

func (f *FullTextIdxWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error

func (*FullTextIdxWriter) CreateDetachIndex added in v1.3.0

func (f *FullTextIdxWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)

func (FullTextIdxWriter) Files added in v1.5.0

func (w FullTextIdxWriter) Files() []string

func (*FullTextIdxWriter) Flush added in v1.5.0

func (f *FullTextIdxWriter) Flush() error

func (FullTextIdxWriter) GetWriter added in v1.5.0

func (w FullTextIdxWriter) GetWriter(file string) (*colstore.IndexWriter, error)

func (FullTextIdxWriter) Open added in v1.2.0

func (w FullTextIdxWriter) Open()

type FunctionBase

type FunctionBase struct {
}

type GroupPKIndexReaderImpl added in v1.5.0

type GroupPKIndexReaderImpl struct {
	// contains filtered or unexported fields
}

func NewGroupPKIndexReader added in v1.5.0

func NewGroupPKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *GroupPKIndexReaderImpl

func (*GroupPKIndexReaderImpl) Close added in v1.5.0

func (r *GroupPKIndexReaderImpl) Close() error

func (*GroupPKIndexReaderImpl) EvaluateCondition added in v1.5.0

func (r *GroupPKIndexReaderImpl) EvaluateCondition(pkRec *record.Record, keyCondition KeyCondition, fragmentCount uint32) (fragment.FragmentRanges, error)

func (*GroupPKIndexReaderImpl) Scan added in v1.5.0

func (r *GroupPKIndexReaderImpl) Scan(
	fileName string,
	pkRec *record.Record,
	pkMark fragment.IndexFragment,
	keyCondition KeyCondition,
) (fragment.FragmentRanges, error)

type IndexProperty

type IndexProperty struct {
	RowsNumPerFragment  int
	CoarseIndexFragment int
	MinRowsForSeek      int
}

func NewIndexProperty

func NewIndexProperty(rowsNumPerFragment, coarseIndexFragment, minRowsForSeek int) *IndexProperty

type KeyCondition

type KeyCondition interface {
	HavePrimaryKey() bool
	GetMaxKeyIndex() int
	IsFirstPrimaryKey() bool
	CanDoBinarySearch() bool
	MayBeInRange(usedKeySize int, indexLeft []*FieldRef, indexRight []*FieldRef, dataTypes []int) (bool, error)
	CheckInRange(rgs []*Range, dataTypes []int) (Mark, error)
	AlwaysInRange() (bool, error)
	GetRPN() []*RPNElement
}

type KeyConditionImpl

type KeyConditionImpl struct {
	// contains filtered or unexported fields
}

func NewKeyCondition

func NewKeyCondition(timeCondition, condition influxql.Expr, pkSchema record.Schemas) (*KeyConditionImpl, error)

func (*KeyConditionImpl) AlwaysInRange added in v1.2.0

func (kc *KeyConditionImpl) AlwaysInRange() (bool, error)

AlwaysInRange checks that the index can not be used, pruning in advance to improve efficiency.

func (*KeyConditionImpl) CanDoBinarySearch

func (kc *KeyConditionImpl) CanDoBinarySearch() bool

func (*KeyConditionImpl) CheckInRange

func (kc *KeyConditionImpl) CheckInRange(
	rgs []*Range,
	dataTypes []int,
) (Mark, error)

CheckInRange check Whether the condition and its negation are feasible in the direct product of single column ranges specified by hyper-rectangle.

func (*KeyConditionImpl) GetMaxKeyIndex

func (kc *KeyConditionImpl) GetMaxKeyIndex() int

func (*KeyConditionImpl) GetRPN

func (kc *KeyConditionImpl) GetRPN() []*RPNElement

func (*KeyConditionImpl) HavePrimaryKey

func (kc *KeyConditionImpl) HavePrimaryKey() bool

func (*KeyConditionImpl) IsFirstPrimaryKey

func (kc *KeyConditionImpl) IsFirstPrimaryKey() bool

func (*KeyConditionImpl) MayBeInRange

func (kc *KeyConditionImpl) MayBeInRange(
	usedKeySize int,
	leftKeys []*FieldRef,
	rightKeys []*FieldRef,
	dataTypes []int,
) (bool, error)

MayBeInRange is used to check whether the condition is likely to be in the target range.

func (*KeyConditionImpl) SetRPN

func (kc *KeyConditionImpl) SetRPN(rpn []*RPNElement)

type Mark

type Mark struct {
	// contains filtered or unexported fields
}

Mark these special constants are used to implement KeyCondition. When used as an initial_mask argument in KeyCondition.CheckInRange methods, they effectively prevent calculation of discarded Mark component as it is already set to true.

func NewMark

func NewMark(canBeTrue, canBeFalse bool) Mark

func (Mark) And

func (m Mark) And(mask Mark) Mark

func (Mark) Not

func (m Mark) Not() Mark

func (Mark) Or

func (m Mark) Or(mask Mark) Mark

type MinMaxFilterReaders added in v1.5.0

type MinMaxFilterReaders struct {
	// contains filtered or unexported fields
}

MinMaxFilterReaders manages multiple MinMaxFilterReaders for different fields

func NewMinMaxFilterReaders added in v1.5.0

func NewMinMaxFilterReaders(path, file string, schemas record.Schemas, ir *influxql.IndexRelation) *MinMaxFilterReaders

NewMinMaxFilterReaders creates a new MinMaxFilterReaders

func (*MinMaxFilterReaders) Close added in v1.5.0

func (r *MinMaxFilterReaders) Close() error

Close closes all readers

func (*MinMaxFilterReaders) GetRowCount added in v1.5.0

func (r *MinMaxFilterReaders) GetRowCount(blockId int64, elem *rpn.SKRPNElement) (int64, error)

GetRowCount returns the row count for the fragment

func (*MinMaxFilterReaders) InitReaderAndQuery added in v1.5.0

func (r *MinMaxFilterReaders) InitReaderAndQuery(elem *rpn.SKRPNElement) (*record.Record, error)

InitReaderAndQuery initializes a reader and gets the query string

func (*MinMaxFilterReaders) IsExist added in v1.5.0

func (r *MinMaxFilterReaders) IsExist(blockId int64, elem *rpn.SKRPNElement) (bool, error)

IsExist checks if the fragment might contain data matching the condition

func (*MinMaxFilterReaders) StartSpan added in v1.5.0

func (r *MinMaxFilterReaders) StartSpan(span *tracing.Span)

StartSpan starts a tracing span

type MinMaxIndexReader added in v1.2.0

type MinMaxIndexReader struct {

	// read the data of the index according to the file and index fields.
	ReadFunc func(file interface{}, rec *record.Record, isCache bool) (*record.Record, error)
	// contains filtered or unexported fields
}

func NewMinMaxIndexReader added in v1.2.0

func NewMinMaxIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*MinMaxIndexReader, error)

func (*MinMaxIndexReader) Close added in v1.2.0

func (r *MinMaxIndexReader) Close() error

func (*MinMaxIndexReader) GetFragmentRowCount added in v1.5.0

func (r *MinMaxIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)

func (*MinMaxIndexReader) MayBeInFragment added in v1.2.0

func (r *MinMaxIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*MinMaxIndexReader) ReInit added in v1.2.0

func (r *MinMaxIndexReader) ReInit(file interface{}) (err error)

func (*MinMaxIndexReader) StartSpan added in v1.3.0

func (r *MinMaxIndexReader) StartSpan(span *tracing.Span)

type MinMaxReaderCreator added in v1.2.0

type MinMaxReaderCreator struct {
}

func (*MinMaxReaderCreator) CreateSKFileReader added in v1.2.0

func (creator *MinMaxReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type MinMaxWriter added in v1.2.0

type MinMaxWriter struct {
	// contains filtered or unexported fields
}

func NewMinMaxWriter added in v1.2.0

func NewMinMaxWriter(dir, msName, dataFilePath, lockPath string, tokens string) *MinMaxWriter

func (*MinMaxWriter) Close added in v1.2.0

func (m *MinMaxWriter) Close() error

func (*MinMaxWriter) CreateAttachIndex added in v1.3.0

func (m *MinMaxWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error

func (*MinMaxWriter) CreateDetachIndex added in v1.3.0

func (m *MinMaxWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)

func (MinMaxWriter) Files added in v1.5.0

func (w MinMaxWriter) Files() []string

func (*MinMaxWriter) Flush added in v1.5.0

func (m *MinMaxWriter) Flush() error

func (MinMaxWriter) GetWriter added in v1.5.0

func (w MinMaxWriter) GetWriter(file string) (*colstore.IndexWriter, error)

func (MinMaxWriter) Open added in v1.2.0

func (w MinMaxWriter) Open()

type Number added in v1.5.0

type Number interface {
	~int64 | ~float64
}

type OBSFilterPath added in v1.2.0

type OBSFilterPath struct {
	// contains filtered or unexported fields
}

func NewOBSFilterPath added in v1.2.0

func NewOBSFilterPath(localPath, remotePath string, option *obs.ObsOptions) *OBSFilterPath

func (*OBSFilterPath) LocalPath added in v1.2.0

func (o *OBSFilterPath) LocalPath() string

func (*OBSFilterPath) Name added in v1.2.0

func (o *OBSFilterPath) Name() string

func (*OBSFilterPath) Option added in v1.2.0

func (o *OBSFilterPath) Option() *obs.ObsOptions

func (*OBSFilterPath) RemotePath added in v1.2.0

func (o *OBSFilterPath) RemotePath() string

type PKIndexReader added in v1.2.0

type PKIndexReader interface {
	Scan(pkFile string,
		pkRec *record.Record,
		pkMark fragment.IndexFragment,
		keyCondition KeyCondition,
	) (fragment.FragmentRanges, error)
	Close() error
}

type PKIndexReaderImpl added in v1.2.0

type PKIndexReaderImpl struct {
	// contains filtered or unexported fields
}

func NewPKIndexReader added in v1.2.0

func NewPKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *PKIndexReaderImpl

func (*PKIndexReaderImpl) Close added in v1.2.0

func (r *PKIndexReaderImpl) Close() error

func (*PKIndexReaderImpl) Scan added in v1.2.0

func (r *PKIndexReaderImpl) Scan(
	pkFile string,
	pkRec *record.Record,
	pkMark fragment.IndexFragment,
	keyCondition KeyCondition,
) (fragment.FragmentRanges, error)

Scan is used to filter fragment ranges based on the primary key in the condition, and it determines whether to do binary search or exclusion search according to the sequence of keys in the primary key. give a specific example to illustrate the usage of scan.

  1. origin record: x -> [1, 2, 1, 2, 1, 2, 2, 1] y -> [1, 1, 3, 4, 2, 2, 3, 4]

  2. sorted record(sorted by x, y): x -> [1, 1, 1, 1, 2, 2, 2, 2] y -> [1, 2, 3, 4, 1, 2, 3, 4]

  3. primary index record(fragment size is 2): x -> [1, 1, 2, 2, 2] y -> [1, 3, 1, 3, 4] fragment index -> [0, 1, 2, 3]

  4. key condition: x > 1 and y < 3

  5. scan results: fragment range -> [1, 3)

type PKIndexWriter added in v1.2.0

type PKIndexWriter interface {
	Build(srcRec *record.Record,
		pkSchema record.Schemas,
		rowsNumPerFragment []int,
		tcLocation int8,
		fixRowsPerSegment int,
	) (
		*record.Record, fragment.IndexFragment, error,
	)
	Close() error
}

type PKIndexWriterImpl added in v1.2.0

type PKIndexWriterImpl struct {
}

func NewPKIndexWriter added in v1.2.0

func NewPKIndexWriter() *PKIndexWriterImpl

func (*PKIndexWriterImpl) Build added in v1.2.0

func (w *PKIndexWriterImpl) Build(
	srcRec *record.Record,
	pkSchema record.Schemas,
	rowsNumPerFragment []int,
	tcLocation int8,
	fixRowsPerSegment int,
) (
	*record.Record,
	fragment.IndexFragment,
	error,
)

Build generates sparse primary index based on sorted data to be flushed to disks.

func (*PKIndexWriterImpl) Close added in v1.2.0

func (w *PKIndexWriterImpl) Close() error

type RPNElement

type RPNElement struct {
	// contains filtered or unexported fields
}

RPNElement means that Reverse Polish notation (RPN) is a method for conveying mathematical expressions without the use of separators such as brackets and parentheses. In this notation, the operators follow their operands, hence removing the need for brackets to define evaluation priority. More details: https://en.wikipedia.org/wiki/Reverse_Polish_notation.

func NewRPNElement added in v1.2.0

func NewRPNElement(op rpn.Op) *RPNElement

func (*RPNElement) IsFirstPrimaryKey added in v1.5.0

func (rpnElem *RPNElement) IsFirstPrimaryKey() bool

func (*RPNElement) MatchedIndices added in v1.5.0

func (rpnElem *RPNElement) MatchedIndices(pkRec *record.Record) ([]int, error)

type Range

type Range struct {
	// contains filtered or unexported fields
}

Range means that the range with open or closed ends, possibly unbounded.

func NewRange

func NewRange(left, right *FieldRef, li, ri bool) *Range

func (*Range) MatchedIndices added in v1.5.0

func (r *Range) MatchedIndices(ColVal *record.ColVal, isFirstPK bool) ([]int, error)

type SKCondition added in v1.2.0

type SKCondition interface {
	IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)
	GetRowCount(blockId int64, reader rpn.SKBaseReader) (int64, error)
}

func NewSKCondition added in v1.2.0

func NewSKCondition(rpnExpr *rpn.RPNExpr, schema record.Schemas) (SKCondition, error)

type SKConditionImpl added in v1.2.0

type SKConditionImpl struct {
	// contains filtered or unexported fields
}

func (*SKConditionImpl) GetRowCount added in v1.5.0

func (c *SKConditionImpl) GetRowCount(blockId int64, reader rpn.SKBaseReader) (int64, error)

func (*SKConditionImpl) IsExist added in v1.2.0

func (c *SKConditionImpl) IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)

type SKFileReader added in v1.2.0

type SKFileReader interface {
	// MayBeInFragment determines whether a fragment in a file meets the query condition.
	MayBeInFragment(fragId uint32) (bool, error)
	GetFragmentRowCount(fragId uint32) (int64, error)
	// ReInit is used to that a SKFileReader is reused among multiple files.
	ReInit(file interface{}) error
	StartSpan(span *tracing.Span)
	// Close is used to close the SKFileReader
	Close() error
}

SKFileReader as an executor of skip index data reading that corresponds to the index field in the query.

type SKFileReaderCreator added in v1.2.0

type SKFileReaderCreator interface {
	CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
}

SKFileReaderCreator is used to abstract SKFileReader implementation of multiple skip indexes in factory mode.

type SKFileReaderCreatorFactory added in v1.2.0

type SKFileReaderCreatorFactory struct {
	// contains filtered or unexported fields
}
var SKFileReaderInstance *SKFileReaderCreatorFactory

func GetSKFileReaderFactoryInstance added in v1.2.0

func GetSKFileReaderFactoryInstance() *SKFileReaderCreatorFactory

func NewSKFileReaderCreatorFactory added in v1.2.0

func NewSKFileReaderCreatorFactory() *SKFileReaderCreatorFactory

func (*SKFileReaderCreatorFactory) Add added in v1.2.0

func (*SKFileReaderCreatorFactory) Find added in v1.2.0

type SKIndexReader added in v1.2.0

type SKIndexReader interface {
	// CreateSKFileReaders generates SKFileReaders for each index field based on the skip index information and condition
	// which is used to quickly determine whether a fragment meets the condition.
	CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)
	// Scan is used to filter fragment ranges based on the secondary key in the condition.
	Scan(reader SKFileReader, rgs fragment.FragmentRanges) (fragment.FragmentRanges, fragment.FragmentRangeDetails, error)
	// Close is used to close the SKIndexReader
	Close() error
}

SKIndexReader as a skip index read interface.

type SKIndexReaderImpl added in v1.2.0

type SKIndexReaderImpl struct {
	// contains filtered or unexported fields
}

func NewSKIndexReader added in v1.2.0

func NewSKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *SKIndexReaderImpl

func (*SKIndexReaderImpl) Close added in v1.2.0

func (r *SKIndexReaderImpl) Close() error

func (*SKIndexReaderImpl) CreateSKFileReaders added in v1.2.0

func (r *SKIndexReaderImpl) CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)

func (*SKIndexReaderImpl) Scan added in v1.2.0

type SetIndexReader added in v1.2.0

type SetIndexReader struct {
	// contains filtered or unexported fields
}

func NewSetIndexReader added in v1.2.0

func NewSetIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*SetIndexReader, error)

func (*SetIndexReader) Close added in v1.2.0

func (r *SetIndexReader) Close() error

func (*SetIndexReader) GetFragmentRowCount added in v1.5.0

func (r *SetIndexReader) GetFragmentRowCount(fragId uint32) (int64, error)

func (*SetIndexReader) MayBeInFragment added in v1.2.0

func (r *SetIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*SetIndexReader) ReInit added in v1.2.0

func (r *SetIndexReader) ReInit(file interface{}) (err error)

func (*SetIndexReader) StartSpan added in v1.3.0

func (r *SetIndexReader) StartSpan(span *tracing.Span)

type SetReaderCreator added in v1.2.0

type SetReaderCreator struct {
}

func (*SetReaderCreator) CreateSKFileReader added in v1.2.0

func (index *SetReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type SetWriter added in v1.2.0

type SetWriter struct {
	// contains filtered or unexported fields
}

func NewSetWriter added in v1.2.0

func NewSetWriter(dir, msName, dataFilePath, lockPath string, token string) *SetWriter

func (SetWriter) Close added in v1.2.0

func (w SetWriter) Close() error

func (*SetWriter) CreateAttachIndex added in v1.3.0

func (s *SetWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error

func (*SetWriter) CreateDetachIndex added in v1.3.0

func (s *SetWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)

func (SetWriter) Files added in v1.5.0

func (w SetWriter) Files() []string

func (*SetWriter) Flush added in v1.5.0

func (s *SetWriter) Flush() error

func (SetWriter) GetWriter added in v1.5.0

func (w SetWriter) GetWriter(file string) (*colstore.IndexWriter, error)

func (SetWriter) Open added in v1.2.0

func (w SetWriter) Open()

type SkInfo added in v1.2.0

type SkInfo struct {
	// contains filtered or unexported fields
}

type TsspFile added in v1.2.0

type TsspFile interface {
	Name() string
	Path() string
}

type UniversalBFCreator added in v1.5.0

type UniversalBFCreator struct {
}

func (*UniversalBFCreator) CreateSKFileReader added in v1.5.0

func (creator *UniversalBFCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type UniversalBloomFilter added in v1.5.0

type UniversalBloomFilter struct {
	Filter *bloom.BloomFilter
}

func DeserializeBloomFilter added in v1.5.0

func DeserializeBloomFilter(data []byte) (*UniversalBloomFilter, error)

func (*UniversalBloomFilter) Serialize added in v1.5.0

func (bf *UniversalBloomFilter) Serialize() ([]byte, error)

func (*UniversalBloomFilter) Test added in v1.5.0

func (bf *UniversalBloomFilter) Test(value []byte) bool

type UniversalBloomFilterReader added in v1.5.0

type UniversalBloomFilterReader struct {
	// contains filtered or unexported fields
}

func NewUniversalBloomFilterReader added in v1.5.0

func NewUniversalBloomFilterReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options) (*UniversalBloomFilterReader, error)

func (*UniversalBloomFilterReader) Close added in v1.5.0

func (r *UniversalBloomFilterReader) Close() error

func (*UniversalBloomFilterReader) GetFragmentRowCount added in v1.5.0

func (r *UniversalBloomFilterReader) GetFragmentRowCount(fragId uint32) (int64, error)

func (*UniversalBloomFilterReader) GetRowCount added in v1.5.0

func (r *UniversalBloomFilterReader) GetRowCount(blockId int64, elem *rpn.SKRPNElement) (int64, error)

func (*UniversalBloomFilterReader) IsExist added in v1.5.0

func (r *UniversalBloomFilterReader) IsExist(blockId int64, elem *rpn.SKRPNElement) (bool, error)

func (*UniversalBloomFilterReader) MayBeInFragment added in v1.5.0

func (r *UniversalBloomFilterReader) MayBeInFragment(fragId uint32) (bool, error)

func (*UniversalBloomFilterReader) ReInit added in v1.5.0

func (r *UniversalBloomFilterReader) ReInit(file interface{}) error

func (*UniversalBloomFilterReader) StartSpan added in v1.5.0

func (r *UniversalBloomFilterReader) StartSpan(span *tracing.Span)

type UniversalBloomFilterWriter added in v1.5.0

type UniversalBloomFilterWriter struct {
	// contains filtered or unexported fields
}

func NewUniversalGeneralBloomFilterWriter added in v1.5.0

func NewUniversalGeneralBloomFilterWriter(dir, msName, dataFilePath, lockPath, tokens string, params *influxql.IndexParam) *UniversalBloomFilterWriter

func (*UniversalBloomFilterWriter) BuildBloomFilterFromRecord added in v1.5.0

func (idx *UniversalBloomFilterWriter) BuildBloomFilterFromRecord(record *record.Record, colIdx int) (*UniversalBloomFilter, error)

func (*UniversalBloomFilterWriter) BuildUniversalBloomFilter added in v1.5.0

func (idx *UniversalBloomFilterWriter) BuildUniversalBloomFilter(record *record.Record, colIdx int, indexDir string) error

func (UniversalBloomFilterWriter) Close added in v1.5.0

func (w UniversalBloomFilterWriter) Close() error

func (*UniversalBloomFilterWriter) CreateAttachIndex added in v1.5.0

func (idx *UniversalBloomFilterWriter) CreateAttachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int) error

func (*UniversalBloomFilterWriter) CreateDetachIndex added in v1.5.0

func (idx *UniversalBloomFilterWriter) CreateDetachIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)

func (UniversalBloomFilterWriter) Files added in v1.5.0

func (w UniversalBloomFilterWriter) Files() []string

func (*UniversalBloomFilterWriter) Flush added in v1.5.0

func (idx *UniversalBloomFilterWriter) Flush() error

func (UniversalBloomFilterWriter) GetWriter added in v1.5.0

func (w UniversalBloomFilterWriter) GetWriter(file string) (*colstore.IndexWriter, error)

func (*UniversalBloomFilterWriter) NewUniversalBloomFilter added in v1.5.0

func (idx *UniversalBloomFilterWriter) NewUniversalBloomFilter() *UniversalBloomFilter

func (UniversalBloomFilterWriter) Open added in v1.5.0

func (w UniversalBloomFilterWriter) Open()

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL