coordinator

package
v1.0.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 29, 2023 License: Apache-2.0 Imports: 39 Imported by: 0

Documentation

Index

Constants

View Source
const (
	DDLRetryInternalSecond      = 1
	DDLTimeOutSecond            = 30
	DMLRetryInternalMillisecond = 200
	DMLTimeOutSecond            = 30
)
View Source
const MaxShardKey = 64 * 1024

Variables

View Source
var ErrDatabaseNameRequired = errors.New("database name required")

ErrDatabaseNameRequired is returned when executing statements that require a database, when a database has not been provided.

Functions

func GetStreamCtx added in v1.0.0

func GetStreamCtx() *streamCtx

func IgnoreErrForNotHA added in v1.0.0

func IgnoreErrForNotHA(err error) bool

IgnoreErrForNotHA returns true if we should ignore the error in not ha case. Prevents query errors, but query data may be lost.

func IsRetriedError added in v1.0.0

func IsRetriedError(err error) (isSpecial bool)

func IsRetryErrorForPtView added in v1.0.0

func IsRetryErrorForPtView(err error) bool

IsRetryErrorForPtView returns true if dbpt is not on this node.

func PutStreamCtx added in v1.0.0

func PutStreamCtx(s *streamCtx)

Types

type BuilderPool added in v1.0.0

type BuilderPool struct {
	// contains filtered or unexported fields
}

func NewBuilderPool added in v1.0.0

func NewBuilderPool() *BuilderPool

func (*BuilderPool) Get added in v1.0.0

func (p *BuilderPool) Get() *strings.Builder

func (*BuilderPool) Put added in v1.0.0

func (p *BuilderPool) Put(r *strings.Builder)

type ClusterShardMapper

type ClusterShardMapper struct {
	//Node   *meta.Node
	Logger *logger.Logger
	// Remote execution timeout
	Timeout time.Duration
	meta.MetaClient
	NetStore  netstorage.Storage
	SeriesKey []byte
}

ClusterShardMapper implements a ShardMapper for Remote shards.

func (*ClusterShardMapper) Close

func (csm *ClusterShardMapper) Close() error

func (*ClusterShardMapper) GetSeriesKey

func (csm *ClusterShardMapper) GetSeriesKey() []byte

func (*ClusterShardMapper) MapShards

type ClusterShardMapping

type ClusterShardMapping struct {
	//Node        *meta.Node
	ShardMapper *ClusterShardMapper
	NetStore    netstorage.Storage

	MetaClient meta.MetaClient

	// Remote execution timeout
	Timeout time.Duration

	ShardMap map[Source]map[uint32][]uint64

	// MinTime is the minimum time that this shard mapper will allow.
	// Any attempt to use a time before this one will automatically result in using
	// this time instead.
	MinTime time.Time

	// MaxTime is the maximum time that this shard mapper will allow.
	// Any attempt to use a time after this one will automatically result in using
	// this time instead.
	MaxTime time.Time

	ShardsTimeRage influxql.TimeRange
	Logger         *logger.Logger
}

ClusterShardMapping maps data sources to a list of shard information.

func (*ClusterShardMapping) Close

func (csm *ClusterShardMapping) Close() error

Close clears out the list of mapped shards.

func (*ClusterShardMapping) CreateLogicalPlan

func (csm *ClusterShardMapping) CreateLogicalPlan(ctx context.Context, sources influxql.Sources, schema hybridqp.Catalog) (hybridqp.QueryNode, error)

func (*ClusterShardMapping) FieldDimensions

func (csm *ClusterShardMapping) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, schema *influxql.Schema, err error)

func (*ClusterShardMapping) GetSources

func (csm *ClusterShardMapping) GetSources(sources influxql.Sources) influxql.Sources

func (*ClusterShardMapping) LogicalPlanCost

func (*ClusterShardMapping) MapType

func (*ClusterShardMapping) MapTypeBatch

func (csm *ClusterShardMapping) MapTypeBatch(m *influxql.Measurement, fields map[string]*influxql.FieldNameSpace, schema *influxql.Schema) error

func (*ClusterShardMapping) NodeNumbers

func (csm *ClusterShardMapping) NodeNumbers() int

func (*ClusterShardMapping) ShardsTimeRange

func (csm *ClusterShardMapping) ShardsTimeRange() influxql.TimeRange

type FieldCall added in v1.0.0

type FieldCall struct {
	// contains filtered or unexported fields
}

func BuildFieldCall added in v1.0.0

func BuildFieldCall(info *meta2.StreamInfo, srcSchema map[string]int32, destSchema map[string]int32) ([]FieldCall, error)

type IMetaExecutor

type IMetaExecutor interface {
	SetTimeOut(timeout time.Duration)
	EachDBNodes(database string, fn func(nodeID uint64, pts []uint32, hasErr *bool) error) error
	Close() error
}

type MetaExecutor

type MetaExecutor struct {
	Logger     *logger.Logger
	MetaClient meta.MetaClient
	// contains filtered or unexported fields
}

MetaExecutor executes meta queries on all data nodes.

func NewMetaExecutor

func NewMetaExecutor() *MetaExecutor

NewMetaExecutor returns a new initialized *MetaExecutor.

func (*MetaExecutor) Close

func (m *MetaExecutor) Close() error

func (*MetaExecutor) EachDBNodes

func (m *MetaExecutor) EachDBNodes(database string, fn func(nodeID uint64, pts []uint32, hasError *bool) error) error

func (*MetaExecutor) SetTimeOut

func (m *MetaExecutor) SetTimeOut(timeout time.Duration)

type PWMetaClient added in v1.0.0

type PWMetaClient interface {
	Database(name string) (di *meta2.DatabaseInfo, err error)
	RetentionPolicy(database, policy string) (*meta2.RetentionPolicyInfo, error)
	CreateShardGroup(database, policy string, timestamp time.Time) (*meta2.ShardGroupInfo, error)
	DBPtView(database string) (meta2.DBPtInfos, error)
	Measurement(database string, rpName string, mstName string) (*meta2.MeasurementInfo, error)
	UpdateSchema(database string, retentionPolicy string, mst string, fieldToCreate []*proto2.FieldSchema) error
	CreateMeasurement(database string, retentionPolicy string, mst string, shardKey *meta2.ShardKeyInfo, indexR *meta2.IndexRelation) (*meta2.MeasurementInfo, error)
	GetAliveShards(database string, sgi *meta2.ShardGroupInfo) []int
	GetStreamInfos() map[string]*meta2.StreamInfo
	GetDstStreamInfos(db, rp string, dstSis *[]*meta2.StreamInfo) bool
}

type PointsWriter

type PointsWriter struct {
	MetaClient PWMetaClient

	TSDBStore interface {
		WriteRows(nodeID uint64, database, rp string, pt uint32, shard uint64, streamShardIdList []uint64, rows *[]influx.Row, timeout time.Duration) error
	}
	// contains filtered or unexported fields
}

PointsWriter handles writes across multiple local and remote data nodes.

func NewPointsWriter

func NewPointsWriter(timeout time.Duration) *PointsWriter

NewPointsWriter returns a new instance of PointsWriter for a node.

func (*PointsWriter) ApplyTimeRangeLimit added in v1.0.0

func (w *PointsWriter) ApplyTimeRangeLimit(limit []toml.Duration)

func (*PointsWriter) Close added in v1.0.0

func (w *PointsWriter) Close()

func (*PointsWriter) MapRowToMeasurement added in v1.0.0

func (w *PointsWriter) MapRowToMeasurement(ctx *injestionCtx, id uint64, mst string, r *influx.Row) error

func (*PointsWriter) MapRowToShard

func (w *PointsWriter) MapRowToShard(ctx *injestionCtx, id string, r *influx.Row) error

func (*PointsWriter) RetryWritePointRows added in v1.0.0

func (w *PointsWriter) RetryWritePointRows(database, retentionPolicy string, rows []influx.Row) error

RetryWritePointRows make sure sql client got the latest metadata.

type ShowTagValuesExecutor

type ShowTagValuesExecutor struct {
	// contains filtered or unexported fields
}

func NewShowTagValuesExecutor

func NewShowTagValuesExecutor(logger *logger.Logger, mc meta.MetaClient, me IMetaExecutor, store netstorage.Storage) *ShowTagValuesExecutor

func (*ShowTagValuesExecutor) Cardinality

func (e *ShowTagValuesExecutor) Cardinality(dimensions influxql.Dimensions)

func (*ShowTagValuesExecutor) Execute

type Source

type Source struct {
	Database        string
	RetentionPolicy string
}

Source contains the database and retention policy source for data.

type Stream added in v1.0.0

type Stream struct {
	TSDBStore TSDBStore

	MetaClient PWMetaClient
	// contains filtered or unexported fields
}

func NewStream added in v1.0.0

func NewStream(tsdbStore TSDBStore, metaClient PWMetaClient, logger *logger.Logger, timeout time.Duration) *Stream

func (*Stream) GenerateGroupKey added in v1.0.0

func (s *Stream) GenerateGroupKey(ctx *streamCtx, keys []string, value *influx.Row) (string, error)

type TSDBStore added in v1.0.0

type TSDBStore interface {
	WriteRows(nodeID uint64, database, rp string, pt uint32, shard uint64, streamShardIdList []uint64, rows *[]influx.Row, timeout time.Duration) error
}

type TagValuesSlice

type TagValuesSlice []netstorage.TableTagSets

func (TagValuesSlice) Len

func (a TagValuesSlice) Len() int

func (TagValuesSlice) Less

func (a TagValuesSlice) Less(i, j int) bool

func (TagValuesSlice) Swap

func (a TagValuesSlice) Swap(i, j int)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL