Documentation
¶
Overview ¶
Package core Copyright 2022 PingCAP, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Index ¶
- Constants
- Variables
- func AsSctx(pctx base.PlanContext) (sessionctx.Context, error)
- func BinaryPlanStrFromFlatPlan(explainCtx base.PlanContext, flat *FlatPhysicalPlan, briefBinaryPlan bool) string
- func BuildHandleColsForAnalyze(_ base.PlanContext, tblInfo *model.TableInfo, allColumns bool, ...) util.HandleCols
- func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node *resolve.NodeW, ...) (base.Plan, error)
- func Cacheable(node ast.Node, is infoschema.InfoSchema) bool
- func CacheableWithCtx(sctx base.PlanContext, node ast.Node, is infoschema.InfoSchema) (bool, string)
- func CascadesOptimize(ctx context.Context, sctx base.PlanContext, flag uint64, ...) (base.LogicalPlan, base.PhysicalPlan, float64, error)
- func CheckCanConvertAggToProj(agg *logicalop.LogicalAggregation) bool
- func CheckParamTypeInt64orUint64(param *driver.ParamMarkerExpr) (bool, uint64)
- func CheckPrivilege(activeRoles []*auth.RoleIdentity, pm privilege.Manager, vs []visitInfo) error
- func CheckTableLock(ctx tablelock.TableLockReadContext, is infoschema.InfoSchema, vs []visitInfo) error
- func CheckTableMode(node *resolve.NodeW) error
- func CheckUpdateList(assignFlags []int, updt *physicalop.Update, ...) error
- func CollectFilters4MVIndexMutations(sctx base.PlanContext, filters []expression.Expression, ...) (accessFilters, remainingFilters []expression.Expression, mvColOffset int, ...)
- func ContainHeavyFunction(expr expression.Expression) bool
- func ConvertAggToProj(agg *logicalop.LogicalAggregation, schema *expression.Schema) (bool, *logicalop.LogicalProjection)
- func DoOptimize(ctx context.Context, sctx base.PlanContext, flag uint64, ...) (base.PhysicalPlan, float64, error)
- func EncodeFlatPlan(flat *FlatPhysicalPlan) string
- func EncodePlan(p base.Plan) string
- func EraseLastSemicolon(stmt ast.StmtNode)
- func EraseLastSemicolonInSQL(sql string) string
- func ExhaustPhysicalPlans4MockLogicalPlan(p *mockLogicalPlan4Test, prop *property.PhysicalProperty) ([]base.PhysicalPlan, bool, error)
- func ExplainFlatPlanInRowFormat(flat *FlatPhysicalPlan, format string, analyze bool, ...) (rows [][]string)
- func ExtractOuterApplyCorrelatedCols(p base.PhysicalPlan) []*expression.CorrelatedColumn
- func ExtractTableList(node *resolve.NodeW, asName bool) []*ast.TableName
- func FDToString(p base.LogicalPlan) string
- func FastClonePointGetForPlanCache(newCtx base.PlanContext, src, dst *physicalop.PointGetPlan) *physicalop.PointGetPlan
- func GenHintsFromFlatPlan(flat *FlatPhysicalPlan) []*ast.TableOptimizerHint
- func GenHintsFromPhysicalPlan(p base.Plan) []*ast.TableOptimizerHint
- func GetAnalyzeOptionDefaultV2ForTest() map[ast.AnalyzeOptionType]uint64
- func GetBriefBinaryPlan(p base.Plan) string
- func GetDBTableInfo(visitInfo []visitInfo) []stmtctx.TableEntry
- func GetExplainAnalyzeRowsForPlan(plan *Explain) (rows [][]string)
- func GetHashJoin(ge base.GroupExpression, la *logicalop.LogicalApply, ...) *physicalop.PhysicalHashJoin
- func GetMaxWriteSpeedFromExpression(opt *AlterDDLJobOpt) (maxWriteSpeed int64, err error)
- func GetParamSQLFromAST(stmt ast.StmtNode) (paramSQL string, paramVals []types.Datum, err error)
- func GetPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames []ast.CIStr) ([]int64, []string, error)
- func GetPlanCost(p base.PhysicalPlan, taskType property.TaskType, ...) (float64, error)
- func GetPlanCostVer14PhysicalIndexMergeReader(pp base.PhysicalPlan, _ property.TaskType, option *costusage.PlanCostOption) (float64, error)
- func GetPlanCostVer24PhysicalIndexMergeReader(pp base.PhysicalPlan, taskType property.TaskType, ...) (costusage.CostVer2, error)
- func GetPlanFromPlanCache(ctx context.Context, sctx sessionctx.Context, isNonPrepared bool, ...) (plan base.Plan, names []*types.FieldName, err error)
- func GetStats4Test(p base.LogicalPlan) *property.StatsInfo
- func GetThreadOrBatchSizeFromExpression(opt *AlterDDLJobOpt) (int64, error)
- func GroupRangesByCols(ranges []*ranger.Range, groupByColIdxs []int) ([][]*ranger.Range, error)
- func InPrepare(p *preprocessor)
- func InTxnRetry(p *preprocessor)
- func InitTxnContextProvider(p *preprocessor)
- func InjectExtraProjection(plan base.PhysicalPlan) base.PhysicalPlan
- func InjectProjBelowAgg(aggPlan base.PhysicalPlan, aggFuncs []*aggregation.AggFuncDesc, ...) base.PhysicalPlan
- func InjectProjBelowSort(p base.PhysicalPlan, orderByItems []*util.ByItems) base.PhysicalPlan
- func IsASTCacheable(ctx context.Context, sctx base.PlanContext, node ast.Node, ...) (bool, string)
- func IsAutoCommitTxn(vars *variable.SessionVars) bool
- func IsPointGetWithPKOrUniqueKeyByAutoCommit(vars *variable.SessionVars, p base.Plan) bool
- func IsReadOnly(node ast.Node, vars *variable.SessionVars) bool
- func IsReadOnlyInternal(node ast.Node, vars *variable.SessionVars, checkGlobalVars bool) bool
- func IsSafeToReusePointGetExecutor(sctx sessionctx.Context, is infoschema.InfoSchema, stmt *PlanCacheStmt) bool
- func IsTiFlashContained(plan base.Plan) (tiFlashPushDown, tiFlashExchangePushDown bool)
- func JSONToString(j []*ExplainInfoForEncode) (string, error)
- func LogicalOptimizeTest(ctx context.Context, flag uint64, logic base.LogicalPlan) (base.LogicalPlan, error)
- func NewInstancePlanCache(softMemLimit, hardMemLimit int64) sessionctx.InstancePlanCache
- func NewPlanCacheKey(sctx sessionctx.Context, stmt *PlanCacheStmt) (key, binding string, cacheable bool, reason string, err error)
- func NewProjInjector() *projInjector
- func NonPreparedPlanCacheableWithCtx(sctx base.PlanContext, node ast.Node, is infoschema.InfoSchema) (ok bool, reason string)
- func NormalizeFlatPlan(flat *FlatPhysicalPlan) (normalized string, digest *parser.Digest)
- func NormalizePlan(p base.Plan) (normalized string, digest *parser.Digest)
- func ParameterizeAST(stmt ast.StmtNode) (paramSQL string, params []*driver.ValueExpr, err error)
- func Params2Expressions(params []types.Datum) []expression.Expression
- func ParseParameterizedSQL(sctx sessionctx.Context, paramSQL string) (ast.StmtNode, error)
- func PrepareIdxColsAndUnwrapArrayType(tableInfo *model.TableInfo, idxInfo *model.IndexInfo, ...) (idxCols []*expression.Column, ok bool)
- func Preprocess(ctx context.Context, sctx sessionctx.Context, node *resolve.NodeW, ...) error
- func RebuildPlan4CachedPlan(p base.Plan) (ok bool)
- func RecheckCTE(p base.LogicalPlan)
- func RecursiveDeriveStats4Test(p base.LogicalPlan) (*property.StatsInfo, bool, error)
- func RestoreASTWithParams(stmt ast.StmtNode, params []*driver.ValueExpr) error
- func SetParameterValuesIntoSCtx(sctx base.PlanContext, isNonPrep bool, markers []ast.ParamMarkerExpr, ...) error
- func SubstituteExpression(cond expression.Expression, lp base.LogicalPlan, exprToColumn ExprColumnMap, ...) bool
- func ToString(p base.Plan) string
- func TryAddExtraLimit(ctx sessionctx.Context, node ast.StmtNode) ast.StmtNode
- func TryFastPlan(ctx base.PlanContext, node *resolve.NodeW) (p base.Plan)
- func TurnNominalSortIntoProj(p base.PhysicalPlan, onlyColumn bool, orderByItems []*util.ByItems) base.PhysicalPlan
- func VisitInfo4PrivCheck(ctx context.Context, is infoschema.InfoSchema, node ast.Node, vs []visitInfo) (privVisitInfo []visitInfo)
- func VolcanoOptimize(ctx context.Context, sctx base.PlanContext, flag uint64, ...) (base.LogicalPlan, base.PhysicalPlan, float64, error)
- type AdminPlugins
- type AdminPluginsAction
- type AdminShowBDRRole
- type AggregateFuncExtractor
- type AggregationEliminator
- type AggregationPushDownSolver
- type AlterDDLJob
- type AlterDDLJobOpt
- type Analyze
- type AnalyzeColumnsTask
- type AnalyzeIndexTask
- type AnalyzeInfo
- type CancelDDLJobs
- type CheckIndexRange
- type CheckTable
- type ChecksumTable
- type CleanupIndex
- type ClusterLogTableExtractor
- type ClusterTableExtractor
- type CompactTable
- type ConvertOuterToInnerJoin
- type DDL
- type Deallocate
- type DecorrelateSolver
- type DeriveTopNFromWindow
- type DistributeTable
- type EliminateUnionAllDualItem
- type EmptySelectionEliminator
- type ErrExprLoc
- type Execute
- type Explain
- type ExplainInfoForEncode
- type ExprColumnMap
- type FlatOperator
- type FlatPhysicalPlan
- type FlatPlanTree
- type GcSubstituter
- type HotRegionsHistoryTableExtractor
- type ImportInto
- type IndexUsageIndexInfo
- type InfoSchemaBaseExtractor
- func (e *InfoSchemaBaseExtractor) ExplainInfo(_ base.PhysicalPlan) string
- func (e *InfoSchemaBaseExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, ...) (remained []expression.Expression)
- func (e *InfoSchemaBaseExtractor) GetBase() *InfoSchemaBaseExtractor
- func (e *InfoSchemaBaseExtractor) ListSchemas(is infoschema.InfoSchema) []ast.CIStr
- func (e *InfoSchemaBaseExtractor) ListSchemasAndTables(ctx context.Context, is infoschema.InfoSchema) ([]ast.CIStr, []*model.TableInfo, error)
- type InfoSchemaCheckConstraintsExtractor
- type InfoSchemaColumnsExtractor
- type InfoSchemaDDLExtractor
- type InfoSchemaIndexesExtractor
- type InfoSchemaKeyColumnUsageExtractor
- type InfoSchemaPartitionsExtractor
- type InfoSchemaReferConstExtractor
- type InfoSchemaSchemataExtractor
- type InfoSchemaSequenceExtractor
- type InfoSchemaStatisticsExtractor
- type InfoSchemaTableConstraintsExtractor
- type InfoSchemaTablesExtractor
- type InfoSchemaTiDBCheckConstraintsExtractor
- type InfoSchemaTiDBIndexUsageExtractor
- type InfoSchemaViewsExtractor
- type InspectionResultTableExtractor
- type InspectionRuleTableExtractor
- type InspectionSummaryTableExtractor
- type JoinReOrderSolver
- type LRUPlanCache
- func (l *LRUPlanCache) Close()
- func (l *LRUPlanCache) Delete(key string)
- func (l *LRUPlanCache) DeleteAll()
- func (l *LRUPlanCache) Get(key string, paramTypes any) (value any, ok bool)
- func (l *LRUPlanCache) MemoryUsage() (sum int64)
- func (l *LRUPlanCache) Put(key string, value, paramTypes any)
- func (l *LRUPlanCache) SetCapacity(capacity uint) error
- func (l *LRUPlanCache) Size() int
- type LineFieldsInfo
- type LoadData
- type LoadDataOpt
- type LoadStats
- type LockStats
- type MetricSummaryTableExtractor
- type MetricTableExtractor
- func (e *MetricTableExtractor) ExplainInfo(pp base.PhysicalPlan) string
- func (e *MetricTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, ...) []expression.Expression
- func (e *MetricTableExtractor) GetMetricTablePromQL(sctx base.PlanContext, lowerTableName string) string
- type OperatorLabel
- type OuterJoinEliminator
- type PBPlanBuilder
- type PPDSolver
- type PauseDDLJobs
- type PhysicalSimpleWrapper
- type PlanBuilder
- func (b *PlanBuilder) Build(ctx context.Context, node *resolve.NodeW) (base.Plan, error)
- func (b *PlanBuilder) BuildDataSourceFromView(ctx context.Context, dbName ast.CIStr, tableInfo *model.TableInfo, ...) (base.LogicalPlan, error)
- func (b *PlanBuilder) GetIsForUpdateRead() bool
- func (b *PlanBuilder) GetOptFlag() uint64
- func (b *PlanBuilder) GetVisitInfo() []visitInfo
- func (b *PlanBuilder) Init(sctx base.PlanContext, is infoschema.InfoSchema, processor *hint.QBHintHandler) (*PlanBuilder, []ast.HintTable)
- func (b *PlanBuilder) ResetForReuse() *PlanBuilder
- func (b *PlanBuilder) TableHints() *h.PlanHints
- type PlanBuilderOpt
- type PlanBuilderOptAllowCastArray
- type PlanBuilderOptNoExecution
- type PlanCacheKeyEnableInstancePlanCache
- type PlanCacheKeyTestClone
- type PlanCacheKeyTestIssue43667
- type PlanCacheKeyTestIssue46760
- type PlanCacheKeyTestIssue47133
- type PlanCacheStmt
- type PlanCacheValue
- type PlanReplayer
- type PointGetExecutorCache
- type PointPlanVal
- type Prepare
- type PreprocessOpt
- type PreprocessorReturn
- type ProjectionEliminator
- type PushDownSequenceSolver
- type PushDownTopNOptimizer
- type RecommendIndexPlan
- type RecoverIndex
- type ReloadExprPushdownBlacklist
- type ReloadOptRuleBlacklist
- type ResolveExpand
- type ResultReorder
- type ResumeDDLJobs
- type RuntimeFilterGenerator
- type SQLBindOpDetail
- type SQLBindOpType
- type SQLBindPlan
- type ScalarSubQueryExpr
- func (s *ScalarSubQueryExpr) CanonicalHashCode() []byte
- func (s *ScalarSubQueryExpr) Clone() expression.Expression
- func (*ScalarSubQueryExpr) ConstLevel() expression.ConstLevel
- func (s *ScalarSubQueryExpr) Decorrelate(*expression.Schema) expression.Expression
- func (s *ScalarSubQueryExpr) Equal(_ expression.EvalContext, e expression.Expression) bool
- func (s *ScalarSubQueryExpr) Equals(other any) bool
- func (s *ScalarSubQueryExpr) Eval(_ expression.EvalContext, _ chunk.Row) (types.Datum, error)
- func (*ScalarSubQueryExpr) EvalDecimal(_ expression.EvalContext, _ chunk.Row) (val *types.MyDecimal, isNull bool, err error)
- func (*ScalarSubQueryExpr) EvalDuration(_ expression.EvalContext, _ chunk.Row) (val types.Duration, isNull bool, err error)
- func (*ScalarSubQueryExpr) EvalInt(_ expression.EvalContext, _ chunk.Row) (val int64, isNull bool, err error)
- func (*ScalarSubQueryExpr) EvalJSON(_ expression.EvalContext, _ chunk.Row) (val types.BinaryJSON, isNull bool, err error)
- func (*ScalarSubQueryExpr) EvalReal(_ expression.EvalContext, _ chunk.Row) (val float64, isNull bool, err error)
- func (*ScalarSubQueryExpr) EvalString(_ expression.EvalContext, _ chunk.Row) (val string, isNull bool, err error)
- func (*ScalarSubQueryExpr) EvalTime(_ expression.EvalContext, _ chunk.Row) (val types.Time, isNull bool, err error)
- func (s *ScalarSubQueryExpr) ExplainInfo(expression.EvalContext) string
- func (s *ScalarSubQueryExpr) ExplainNormalizedInfo() string
- func (s *ScalarSubQueryExpr) GetType(_ expression.EvalContext) *types.FieldType
- func (s *ScalarSubQueryExpr) Hash64(h base2.Hasher)
- func (s *ScalarSubQueryExpr) HashCode() []byte
- func (*ScalarSubQueryExpr) IsCorrelated() bool
- func (s *ScalarSubQueryExpr) MemoryUsage() int64
- func (s *ScalarSubQueryExpr) RemapColumn(_ map[int64]*expression.Column) (expression.Expression, error)
- func (s *ScalarSubQueryExpr) ResolveIndices(_ *expression.Schema) (expression.Expression, error)
- func (s *ScalarSubQueryExpr) ResolveIndicesByVirtualExpr(_ expression.EvalContext, _ *expression.Schema) (expression.Expression, bool)
- func (s *ScalarSubQueryExpr) String() string
- func (s *ScalarSubQueryExpr) Traverse(_ expression.TraverseAction) expression.Expression
- func (*ScalarSubQueryExpr) VecEvalDecimal(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
- func (*ScalarSubQueryExpr) VecEvalDuration(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
- func (*ScalarSubQueryExpr) VecEvalInt(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
- func (*ScalarSubQueryExpr) VecEvalJSON(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
- func (*ScalarSubQueryExpr) VecEvalReal(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
- func (*ScalarSubQueryExpr) VecEvalString(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
- func (*ScalarSubQueryExpr) VecEvalTime(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
- func (*ScalarSubQueryExpr) Vectorized() bool
- type ScalarSubqueryEvalCtx
- type SelectInto
- type SemiJoinRewriter
- type Set
- type SetConfig
- type ShowBaseExtractor
- type ShowDDL
- type ShowDDLJobQueries
- type ShowDDLJobQueriesWithRange
- type ShowNextRowID
- type ShowSlow
- type Simple
- type SkewDistinctAggRewriter
- type SlowQueryExtractor
- type SplitRegion
- type SplitRegionStatus
- type StatementsSummaryExtractor
- type TableStorageStatsExtractor
- type TiFlashSystemTableExtractor
- type TiKVRegionStatusExtractor
- type TikvRegionPeersExtractor
- type TimeRange
- type Trace
- type Traffic
- type UnlockStats
- type V2AnalyzeOptions
- type WindowFuncExtractor
- type WorkloadRepoCreate
Constants ¶
const ( // AlterDDLJobThread alter reorg worker count AlterDDLJobThread = "thread" // AlterDDLJobBatchSize alter reorg batch size AlterDDLJobBatchSize = "batch_size" // AlterDDLJobMaxWriteSpeed alter reorg max write speed AlterDDLJobMaxWriteSpeed = "max_write_speed" )
const ( // ErrExprInSelect is in select fields for the error of ErrFieldNotInGroupBy ErrExprInSelect = "SELECT list" // ErrExprInOrderBy is in order by items for the error of ErrFieldNotInGroupBy ErrExprInOrderBy = "ORDER BY" )
const ( TableSchema = "table_schema" TableName = "table_name" TidbTableID = "tidb_table_id" PartitionName = "partition_name" TidbPartitionID = "tidb_partition_id" IndexName = "index_name" SchemaName = "schema_name" DBName = "db_name" ConstraintSchema = "constraint_schema" ConstraintName = "constraint_name" TableID = "table_id" SequenceSchema = "sequence_schema" SequenceName = "sequence_name" ColumnName = "column_name" DDLStateName = "state" )
const ( // HotRegionTypeRead hot read region. HotRegionTypeRead = "read" // HotRegionTypeWrite hot write region. HotRegionTypeWrite = "write" )
const ( // MinNumRows provides a minimum to avoid underestimation. As selectivity estimation approaches // zero, all plan choices result in a low cost - making it difficult to differentiate plan choices. // A low value of 1.0 here is used for most (non probe acceses) to reduce this risk. MinNumRows = 1.0 // MinRowSize provides a minimum column length to ensure that any adjustment or calculation // in costing does not go below this value. 2.0 is used as a reasonable lowest column length. MinRowSize = 2.0 // TiFlashStartupRowPenalty applies a startup penalty for TiFlash scan to encourage TiKV usage for small scans TiFlashStartupRowPenalty = 10000 // MaxPenaltyRowCount applies a penalty for high risk scans MaxPenaltyRowCount = 1000 )
const ( // TraceFormatRow indicates row tracing format. TraceFormatRow = "row" // TraceFormatJSON indicates json tracing format. TraceFormatJSON = "json" // TraceFormatLog indicates log tracing format. TraceFormatLog = "log" // TracePlanTargetEstimation indicates CE trace target for optimizer trace. TracePlanTargetEstimation = "estimation" // TracePlanTargetDebug indicates debug trace target for optimizer trace. TracePlanTargetDebug = "debug" )
const ( // TypeInvalid for unexpected types. TypeInvalid byte = iota // TypeSelect for SelectStmt. TypeSelect // TypeSetOpr for SetOprStmt. TypeSetOpr // TypeDelete for DeleteStmt. TypeDelete // TypeUpdate for UpdateStmt. TypeUpdate // TypeInsert for InsertStmt. TypeInsert // TypeDrop for DropStmt TypeDrop // TypeCreate for CreateStmt TypeCreate // TypeAlter for AlterStmt TypeAlter // TypeRename for RenameStmt TypeRename // TypeRepair for RepairStmt TypeRepair // TypeShow for ShowStmt TypeShow // TypeExecute for ExecuteStmt TypeExecute // TypeImportInto for ImportIntoStmt TypeImportInto )
const GlobalWithoutColumnPos = -1
GlobalWithoutColumnPos marks the index has no partition column.
const (
// MaxCacheableLimitCount is the max limit count for cacheable query.
MaxCacheableLimitCount = 10000
)
const PointPlanKey = stringutil.StringerStr("pointPlanKey")
PointPlanKey is used to get point plan that is pre-built for multi-statement query.
Variables ¶
var ( // ImportIntoSchemaFTypes store the field types of the show import jobs schema. ImportIntoSchemaFTypes = []byte{ mysql.TypeLonglong, mysql.TypeString, mysql.TypeString, mysql.TypeString, mysql.TypeLonglong, mysql.TypeString, mysql.TypeString, mysql.TypeString, mysql.TypeLonglong, mysql.TypeString, mysql.TypeTimestamp, mysql.TypeTimestamp, mysql.TypeTimestamp, mysql.TypeString, mysql.TypeTimestamp, mysql.TypeString, mysql.TypeString, mysql.TypeString, mysql.TypeString, mysql.TypeString, mysql.TypeString, } // ImportIntoFieldMap store the mapping from field names to their indices. // As there are many test cases that use the index to check the result from // `SHOW IMPORT JOBS`, this structure is used to avoid hardcoding these indexs, // so adding new fields does not require modifying all the tests. ImportIntoFieldMap = make(map[string]int) // ImportIntoDataSource used inplannererrors.ErrLoadDataInvalidURI. ImportIntoDataSource = "data source" )
var AllowCartesianProduct = atomic.NewBool(true)
AllowCartesianProduct means whether tidb allows cartesian join without equal conditions.
var CMSketchSizeLimit = kv.TxnEntrySizeLimit.Load() / binary.MaxVarintLen32
CMSketchSizeLimit indicates the size limit of CMSketch.
var DefaultDisabledLogicalRulesList *atomic.Value
DefaultDisabledLogicalRulesList indicates the logical rules which should be banned.
var EvalSubqueryFirstRow func(ctx context.Context, p base.PhysicalPlan, is infoschema.InfoSchema, sctx base.PlanContext) (row []types.Datum, err error)
EvalSubqueryFirstRow evaluates incorrelated subqueries once, and get first row.
var GenPlanCostTrace func(p base.PhysicalPlan, costV *costusage.CostVer2, taskType property.TaskType, option *costusage.PlanCostOption)
GenPlanCostTrace define a hook function to customize the cost calculation.
var HeavyFunctionNameMap = map[string]struct{}{
"vec_cosine_distance": {},
"vec_l1_distance": {},
"vec_l2_distance": {},
"vec_negative_inner_product": {},
"vec_dims": {},
"vec_l2_norm": {},
}
HeavyFunctionNameMap stores function names that is worth to do HeavyFunctionOptimize. Currently this only applies to Vector data types and their functions. The HeavyFunctionOptimize eliminate the usage of the function in TopN operators to avoid vector distance re-calculation of TopN in the root task.
var ( // MaxMemoryLimitForOverlongType is the memory limit for overlong type column check. // Why is it not 128 ? // Because many customers allocate a portion of memory to their management programs, // the actual amount of usable memory does not align to 128GB. // TODO: We are also lacking test data for instances with less than 128GB of memory, so we need to plan the rules here. // TODO: internal sql can force to use chunk reuse if we ensure the memory usage is safe. // TODO: We can consider the limit/Topn in the future. MaxMemoryLimitForOverlongType = 120 * size.GB )
var OptimizeAstNode func(ctx context.Context, sctx sessionctx.Context, node *resolve.NodeW, is infoschema.InfoSchema) (base.Plan, types.NameSlice, error)
OptimizeAstNode optimizes the query to a physical plan directly.
var OptimizeAstNodeNoCache func(ctx context.Context, sctx sessionctx.Context, node *resolve.NodeW, is infoschema.InfoSchema) (base.Plan, types.NameSlice, error)
OptimizeAstNodeNoCache bypasses the plan cache and generates a physical plan directly.
var ( // PreparedPlanCacheMaxMemory stores the max memory size defined in the global config "performance-server-memory-quota". PreparedPlanCacheMaxMemory = *atomic2.NewUint64(math.MaxUint64) )
Functions ¶
func AsSctx ¶
func AsSctx(pctx base.PlanContext) (sessionctx.Context, error)
AsSctx converts PlanContext to sessionctx.Context.
func BinaryPlanStrFromFlatPlan ¶
func BinaryPlanStrFromFlatPlan(explainCtx base.PlanContext, flat *FlatPhysicalPlan, briefBinaryPlan bool) string
BinaryPlanStrFromFlatPlan generates the compressed and encoded binary plan from a FlatPhysicalPlan.
func BuildHandleColsForAnalyze ¶
func BuildHandleColsForAnalyze( _ base.PlanContext, tblInfo *model.TableInfo, allColumns bool, colsInfo []*model.ColumnInfo, ) util.HandleCols
BuildHandleColsForAnalyze returns HandleCols for ANALYZE.
func BuildLogicalPlanForTest ¶
func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node *resolve.NodeW, infoSchema infoschema.InfoSchema) (base.Plan, error)
BuildLogicalPlanForTest builds a logical plan for testing purpose from ast.Node.
func Cacheable ¶
func Cacheable(node ast.Node, is infoschema.InfoSchema) bool
Cacheable checks whether the input ast(query) is cacheable with empty session context, which is mainly for testing. TODO: only for test, remove this function later on.
func CacheableWithCtx ¶
func CacheableWithCtx(sctx base.PlanContext, node ast.Node, is infoschema.InfoSchema) (bool, string)
CacheableWithCtx checks whether the input ast(query) is cacheable. TODO: only for test, remove this function later on.
func CascadesOptimize ¶
func CascadesOptimize(ctx context.Context, sctx base.PlanContext, flag uint64, logic base.LogicalPlan) (base.LogicalPlan, base.PhysicalPlan, float64, error)
CascadesOptimize includes: normalization, cascadesOptimize, and physicalOptimize.
func CheckCanConvertAggToProj ¶
func CheckCanConvertAggToProj(agg *logicalop.LogicalAggregation) bool
CheckCanConvertAggToProj check whether a special old aggregation (which has already been pushed down) to projection. link: issue#44795
func CheckParamTypeInt64orUint64 ¶
func CheckParamTypeInt64orUint64(param *driver.ParamMarkerExpr) (bool, uint64)
CheckParamTypeInt64orUint64 check param type for plan cache limit, only allow int64 and uint64 now eg: set @a = 1;
func CheckPrivilege ¶
func CheckPrivilege(activeRoles []*auth.RoleIdentity, pm privilege.Manager, vs []visitInfo) error
CheckPrivilege checks the privilege for a user.
func CheckTableLock ¶
func CheckTableLock(ctx tablelock.TableLockReadContext, is infoschema.InfoSchema, vs []visitInfo) error
CheckTableLock checks the table lock.
func CheckTableMode ¶
CheckTableMode checks if the table is accessible by table mode, only TableModeNormal can be accessed.
func CheckUpdateList ¶
func CheckUpdateList(assignFlags []int, updt *physicalop.Update, newTblID2Table map[int64]table.Table) error
CheckUpdateList checks all related columns in updatable state.
func CollectFilters4MVIndexMutations ¶
func CollectFilters4MVIndexMutations(sctx base.PlanContext, filters []expression.Expression, idxCols []*expression.Column) (accessFilters, remainingFilters []expression.Expression, mvColOffset int, mvFilterMutations []expression.Expression)
CollectFilters4MVIndexMutations exported for unit test. For idx(x, cast(a as array), z), `x=1 and (2 member of a) and (1 member of a) and z=1 and x+z>0` is split to: accessFilters combination: 1: `x=1 and (2 member of a) and z=1`, remaining: `x+z>0`. 2: `x=1 and (1 member of a) and z=1`, remaining: `x+z>0`.
Q: case like idx(x, cast(a as array), z), condition like: x=1 and x=2 and ( 2 member of a)? we can derive the x is invalid range? A: no way to here, it will derive an empty range in table path by all these conditions, and the heuristic rule will pick the table-dual table path directly.
Theoretically For idx(x, cast(a as array), z), `x=1 and x=2 and (2 member of a) and (1 member of a) and z=1 and x+z>0` here should be split to: 1: `x=1 and x=2 and (2 member of a) and z=1`, remaining: `x+z>0`. 2: `x=1 and x=2 and (1 member of a) and z=1`, remaining: `x+z>0`. Note: x=1 and x=2 will derive an invalid range in ranger detach, for now because of heuristic rule above, we ignore this case here.
just as the 3rd point as we said in generateANDIndexMerge4ComposedIndex
3: The predicate of mv index can not converge to a linear interval range at physical phase like EQ and GT in normal index. Among the predicates in mv index (member-of/contains/overlap), multi conditions about them should be built as self-independent index path, deriving the final intersection/union handles, which means a mv index path may be reused for multi related conditions. Here means whether (2 member of a) And (1 member of a) is valid composed range or empty range can't be told until runtime intersection/union.
therefore, for multi condition about a single mv index virtual json col here: (2 member of a) and (1 member of a) we should build indexMerge above them, and each of them can access to the same mv index. That's why we should derive the mutations of virtual json col's access condition, output the accessFilter combination for each mutation of it.
In the first case: the inputs will be:
filters:[x=1, (2 member of a), (1 member of a), z=1, x+z>0], idxCols: [x,a,z]
the output will be:
accessFilters: [x=1, (2 member of a), z=1], remainingFilters: [x+z>0], mvColOffset: 1, mvFilterMutations[(2 member of a), (1 member of a)]
the outer usage will be: accessFilter[mvColOffset] = each element of mvFilterMutations to get the mv access filters mutation combination.
func ContainHeavyFunction ¶
func ContainHeavyFunction(expr expression.Expression) bool
ContainHeavyFunction check if the expr contains a function that need to do HeavyFunctionOptimize. Currently this only applies to Vector data types and their functions. The HeavyFunctionOptimize eliminate the usage of the function in TopN operators to avoid vector distance re-calculation of TopN in the root task.
func ConvertAggToProj ¶
func ConvertAggToProj(agg *logicalop.LogicalAggregation, schema *expression.Schema) (bool, *logicalop.LogicalProjection)
ConvertAggToProj convert aggregation to projection.
func DoOptimize ¶
func DoOptimize( ctx context.Context, sctx base.PlanContext, flag uint64, logic base.LogicalPlan, ) (base.PhysicalPlan, float64, error)
DoOptimize optimizes a logical plan to a physical plan.
func EncodeFlatPlan ¶
func EncodeFlatPlan(flat *FlatPhysicalPlan) string
EncodeFlatPlan encodes a FlatPhysicalPlan with compression.
func EncodePlan ¶
EncodePlan is used to encodePlan the plan to the plan tree with compressing. Deprecated: FlattenPhysicalPlan() + EncodeFlatPlan() is preferred.
func EraseLastSemicolon ¶
EraseLastSemicolon removes last semicolon of sql.
func EraseLastSemicolonInSQL ¶
EraseLastSemicolonInSQL removes last semicolon of the SQL.
func ExhaustPhysicalPlans4MockLogicalPlan ¶
func ExhaustPhysicalPlans4MockLogicalPlan(p *mockLogicalPlan4Test, prop *property.PhysicalProperty) ([]base.PhysicalPlan, bool, error)
ExhaustPhysicalPlans4MockLogicalPlan iterate physical implementation over mock logic plan.
func ExplainFlatPlanInRowFormat ¶
func ExplainFlatPlanInRowFormat(flat *FlatPhysicalPlan, format string, analyze bool, runtimeStatsColl *execdetails.RuntimeStatsColl) (rows [][]string)
ExplainFlatPlanInRowFormat returns the explain result in row format.
func ExtractOuterApplyCorrelatedCols ¶
func ExtractOuterApplyCorrelatedCols(p base.PhysicalPlan) []*expression.CorrelatedColumn
ExtractOuterApplyCorrelatedCols only extract the correlated columns whose corresponding Apply operator is outside the plan. For Plan-1, ExtractOuterApplyCorrelatedCols(CTE-1) will return cor_col_1. Plan-1:
Apply_1 |_ outerSide |_CTEExec(CTE-1) CTE-1 |_Selection(cor_col_1)
For Plan-2, the result of ExtractOuterApplyCorrelatedCols(CTE-2) will not return cor_col_3. Because Apply_3 is inside CTE-2. Plan-2:
Apply_2
|_ outerSide
|_ Selection(cor_col_2)
|_CTEExec(CTE-2)
CTE-2
|_ Apply_3
|_ outerSide
|_ innerSide(cor_col_3)
func ExtractTableList ¶
ExtractTableList is a wrapper for tableListExtractor and removes duplicate TableName If asName is true, extract AsName prior to OrigName. Privilege check should use OrigName, while expression may use AsName.
func FDToString ¶
func FDToString(p base.LogicalPlan) string
FDToString explains fd transfer over a Plan, returns description string.
func FastClonePointGetForPlanCache ¶
func FastClonePointGetForPlanCache(newCtx base.PlanContext, src, dst *physicalop.PointGetPlan) *physicalop.PointGetPlan
FastClonePointGetForPlanCache is a fast path to clone a PointGetPlan for plan cache.
func GenHintsFromFlatPlan ¶
func GenHintsFromFlatPlan(flat *FlatPhysicalPlan) []*ast.TableOptimizerHint
GenHintsFromFlatPlan generates hints from a FlatPhysicalPlan.
func GenHintsFromPhysicalPlan ¶
func GenHintsFromPhysicalPlan(p base.Plan) []*ast.TableOptimizerHint
GenHintsFromPhysicalPlan generates hints from physical plan.
func GetAnalyzeOptionDefaultV2ForTest ¶
func GetAnalyzeOptionDefaultV2ForTest() map[ast.AnalyzeOptionType]uint64
GetAnalyzeOptionDefaultV2ForTest returns the default analyze options for test.
func GetBriefBinaryPlan ¶
GetBriefBinaryPlan returns the binary plan of the plan for explainfor.
func GetDBTableInfo ¶
func GetDBTableInfo(visitInfo []visitInfo) []stmtctx.TableEntry
GetDBTableInfo gets the accessed dbs and tables info.
func GetExplainAnalyzeRowsForPlan ¶
GetExplainAnalyzeRowsForPlan get explain rows for plan.
func GetHashJoin ¶
func GetHashJoin(ge base.GroupExpression, la *logicalop.LogicalApply, prop *property.PhysicalProperty) *physicalop.PhysicalHashJoin
GetHashJoin is public for cascades planner.
func GetMaxWriteSpeedFromExpression ¶
func GetMaxWriteSpeedFromExpression(opt *AlterDDLJobOpt) (maxWriteSpeed int64, err error)
GetMaxWriteSpeedFromExpression gets the numeric value of the max write speed from the expression.
func GetParamSQLFromAST ¶
GetParamSQLFromAST returns the parameterized SQL of this AST. NOTICE: this function does not modify the original AST. paramVals are copied from this AST.
func GetPhysicalIDsAndPartitionNames ¶
func GetPhysicalIDsAndPartitionNames(tblInfo *model.TableInfo, partitionNames []ast.CIStr) ([]int64, []string, error)
GetPhysicalIDsAndPartitionNames returns physical IDs and names of these partitions.
func GetPlanCost ¶
func GetPlanCost(p base.PhysicalPlan, taskType property.TaskType, option *costusage.PlanCostOption) (float64, error)
GetPlanCost returns the cost of this plan.
func GetPlanCostVer14PhysicalIndexMergeReader ¶
func GetPlanCostVer14PhysicalIndexMergeReader(pp base.PhysicalPlan, _ property.TaskType, option *costusage.PlanCostOption) (float64, error)
GetPlanCostVer14PhysicalIndexMergeReader calculates the cost of the plan if it has not been calculated yet and returns the cost.
func GetPlanCostVer24PhysicalIndexMergeReader ¶
func GetPlanCostVer24PhysicalIndexMergeReader(pp base.PhysicalPlan, taskType property.TaskType, option *costusage.PlanCostOption, _ ...bool) (costusage.CostVer2, error)
GetPlanCostVer24PhysicalIndexMergeReader returns the plan-cost of this sub-plan, which is: plan-cost = table-side-cost + sum(index-side-cost) index-side-cost = (index-child-cost + index-net-cost) / dist-concurrency # same with IndexReader table-side-cost = (table-child-cost + table-net-cost) / dist-concurrency # same with TableReader
func GetPlanFromPlanCache ¶
func GetPlanFromPlanCache(ctx context.Context, sctx sessionctx.Context, isNonPrepared bool, is infoschema.InfoSchema, stmt *PlanCacheStmt, params []expression.Expression) (plan base.Plan, names []*types.FieldName, err error)
GetPlanFromPlanCache is the entry point of Plan Cache. It tries to get a valid cached plan from plan cache. If there is no such a plan, it'll call the optimizer to generate a new one. isNonPrepared indicates whether to use the non-prepared plan cache or the prepared plan cache.
func GetStats4Test ¶
func GetStats4Test(p base.LogicalPlan) *property.StatsInfo
GetStats4Test is a exporter just for test.
func GetThreadOrBatchSizeFromExpression ¶
func GetThreadOrBatchSizeFromExpression(opt *AlterDDLJobOpt) (int64, error)
GetThreadOrBatchSizeFromExpression gets the numeric value of the thread or batch size from the expression.
func GroupRangesByCols ¶
GroupRangesByCols groups the ranges by the values of the columns specified by groupByColIdxs.
func InPrepare ¶
func InPrepare(p *preprocessor)
InPrepare is a PreprocessOpt that indicates preprocess is executing under prepare statement.
func InTxnRetry ¶
func InTxnRetry(p *preprocessor)
InTxnRetry is a PreprocessOpt that indicates preprocess is executing under transaction retry.
func InitTxnContextProvider ¶
func InitTxnContextProvider(p *preprocessor)
InitTxnContextProvider is a PreprocessOpt that indicates preprocess should init transaction's context
func InjectExtraProjection ¶
func InjectExtraProjection(plan base.PhysicalPlan) base.PhysicalPlan
InjectExtraProjection is used to extract the expressions of specific operators into a physical Projection operator and inject the Projection below the operators. Thus we can accelerate the expression evaluation by eager evaluation. This function will be called in two situations: 1. In postOptimize. 2. TiDB can be used as a coprocessor, when a plan tree been pushed down to TiDB, we need to inject extra projections for the plan tree as well.
func InjectProjBelowAgg ¶
func InjectProjBelowAgg(aggPlan base.PhysicalPlan, aggFuncs []*aggregation.AggFuncDesc, groupByItems []expression.Expression) base.PhysicalPlan
InjectProjBelowAgg injects a ProjOperator below AggOperator. So that All scalar functions in aggregation may speed up by vectorized evaluation in the `proj`. If all the args of `aggFuncs`, and all the item of `groupByItems` are columns or constants, we do not need to build the `proj`.
func InjectProjBelowSort ¶
func InjectProjBelowSort(p base.PhysicalPlan, orderByItems []*util.ByItems) base.PhysicalPlan
InjectProjBelowSort extracts the ScalarFunctions of `orderByItems` into a PhysicalProjection and injects it below PhysicalTopN/PhysicalSort. The schema of PhysicalSort and PhysicalTopN are the same as the schema of their children. When a projection is injected as the child of PhysicalSort and PhysicalTopN, some extra columns will be added into the schema of the Projection, thus we need to add another Projection upon them to prune the redundant columns.
func IsASTCacheable ¶
func IsASTCacheable(ctx context.Context, sctx base.PlanContext, node ast.Node, is infoschema.InfoSchema) (bool, string)
IsASTCacheable checks whether the input ast(query) is cacheable. Handle "ignore_plan_cache()" hint If there are multiple hints, only one will take effect
func IsAutoCommitTxn ¶
func IsAutoCommitTxn(vars *variable.SessionVars) bool
IsAutoCommitTxn checks if session is in autocommit mode and not InTxn used for fast plan like point get
func IsPointGetWithPKOrUniqueKeyByAutoCommit ¶
func IsPointGetWithPKOrUniqueKeyByAutoCommit(vars *variable.SessionVars, p base.Plan) bool
IsPointGetWithPKOrUniqueKeyByAutoCommit returns true when meets following conditions:
- ctx is auto commit tagged
- session is not InTxn
- plan is point get by pk, or point get by unique index (no double read)
func IsReadOnly ¶
func IsReadOnly(node ast.Node, vars *variable.SessionVars) bool
IsReadOnly check whether the ast.Node is a read only statement.
func IsReadOnlyInternal ¶
IsReadOnlyInternal is that If checkGlobalVars is true, false will be returned when there are updates to global variables.
func IsSafeToReusePointGetExecutor ¶
func IsSafeToReusePointGetExecutor(sctx sessionctx.Context, is infoschema.InfoSchema, stmt *PlanCacheStmt) bool
IsSafeToReusePointGetExecutor checks whether this is a PointGet Plan and safe to reuse its executor.
func IsTiFlashContained ¶
IsTiFlashContained returns whether the plan contains TiFlash related executors.
func JSONToString ¶
func JSONToString(j []*ExplainInfoForEncode) (string, error)
JSONToString convert json to string
func LogicalOptimizeTest ¶
func LogicalOptimizeTest(ctx context.Context, flag uint64, logic base.LogicalPlan) (base.LogicalPlan, error)
LogicalOptimizeTest is just exported for test.
func NewInstancePlanCache ¶
func NewInstancePlanCache(softMemLimit, hardMemLimit int64) sessionctx.InstancePlanCache
NewInstancePlanCache creates a new instance level plan cache.
func NewPlanCacheKey ¶
func NewPlanCacheKey(sctx sessionctx.Context, stmt *PlanCacheStmt) (key, binding string, cacheable bool, reason string, err error)
NewPlanCacheKey creates the plan cache key for this statement. Note: lastUpdatedSchemaVersion will only be set in the case of rc or for update read in order to differentiate the cache key. In other cases, it will be 0. All information that might affect the plan should be considered in this function.
func NonPreparedPlanCacheableWithCtx ¶
func NonPreparedPlanCacheableWithCtx(sctx base.PlanContext, node ast.Node, is infoschema.InfoSchema) (ok bool, reason string)
NonPreparedPlanCacheableWithCtx checks whether this SQL is cacheable for non-prepared plan cache.
func NormalizeFlatPlan ¶
func NormalizeFlatPlan(flat *FlatPhysicalPlan) (normalized string, digest *parser.Digest)
NormalizeFlatPlan normalizes a FlatPhysicalPlan and generates plan digest.
func NormalizePlan ¶
NormalizePlan is used to normalize the plan and generate plan digest. Deprecated: FlattenPhysicalPlan() + NormalizeFlatPlan() is preferred.
func ParameterizeAST ¶
ParameterizeAST parameterizes this StmtNode. e.g. `select * from t where a<10 and b<23` --> `select * from t where a<? and b<?`, [10, 23]. NOTICE: this function may modify the input stmt.
func Params2Expressions ¶
func Params2Expressions(params []types.Datum) []expression.Expression
Params2Expressions converts these parameters to an expression list.
func ParseParameterizedSQL ¶
ParseParameterizedSQL parse this parameterized SQL with the specified sctx.
func PrepareIdxColsAndUnwrapArrayType ¶
func PrepareIdxColsAndUnwrapArrayType( tableInfo *model.TableInfo, idxInfo *model.IndexInfo, tblColsByID map[int64]*expression.Column, checkOnly1ArrayTypeCol bool, ) (idxCols []*expression.Column, ok bool)
PrepareIdxColsAndUnwrapArrayType collects columns for an index and returns them as []*expression.Column. If any column of them is an array type, we will use it's underlying FieldType in the returned Column.RetType. If checkOnly1ArrayTypeCol is true, we will check if this index contains only one array type column. If not, it will return (nil, false). This check works as a sanity check for an MV index. Though this function is introduced for MV index, it can also be used for normal index if you pass false to checkOnly1ArrayTypeCol. This function is exported for test.
func Preprocess ¶
func Preprocess(ctx context.Context, sctx sessionctx.Context, node *resolve.NodeW, preprocessOpt ...PreprocessOpt) error
Preprocess resolves table names of the node, and checks some statements' validation. preprocessReturn used to extract the infoschema for the tableName and the timestamp from the asof clause.
func RebuildPlan4CachedPlan ¶
RebuildPlan4CachedPlan will rebuild this plan under current user parameters.
func RecheckCTE ¶
func RecheckCTE(p base.LogicalPlan)
RecheckCTE fills the IsOuterMostCTE field for CTEs. It's a temp solution to before we fully use the Sequence to optimize the CTEs. This func checks whether the CTE is referenced only by the main query or not.
func RecursiveDeriveStats4Test ¶
RecursiveDeriveStats4Test is a exporter just for test.
func RestoreASTWithParams ¶
RestoreASTWithParams restore this parameterized AST with specific parameters. e.g. `select * from t where a<? and b<?`, [10, 23] --> `select * from t where a<10 and b<23`.
func SetParameterValuesIntoSCtx ¶
func SetParameterValuesIntoSCtx(sctx base.PlanContext, isNonPrep bool, markers []ast.ParamMarkerExpr, params []expression.Expression) error
SetParameterValuesIntoSCtx sets these parameters into session context.
func SubstituteExpression ¶
func SubstituteExpression(cond expression.Expression, lp base.LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema) bool
SubstituteExpression is Exported for bench
func TryAddExtraLimit ¶
TryAddExtraLimit trys to add an extra limit for SELECT or UNION statement when sql_select_limit is set.
func TryFastPlan ¶
TryFastPlan tries to use the PointGetPlan for the query.
func TurnNominalSortIntoProj ¶
func TurnNominalSortIntoProj(p base.PhysicalPlan, onlyColumn bool, orderByItems []*util.ByItems) base.PhysicalPlan
TurnNominalSortIntoProj will turn nominal sort into two projections. This is to check if the scalar functions will overflow.
func VisitInfo4PrivCheck ¶
func VisitInfo4PrivCheck(ctx context.Context, is infoschema.InfoSchema, node ast.Node, vs []visitInfo) (privVisitInfo []visitInfo)
VisitInfo4PrivCheck generates privilege check infos because privilege check of local temporary tables is different with normal tables. `CREATE` statement needs `CREATE TEMPORARY TABLE` privilege from the database, and subsequent statements do not need any privileges.
func VolcanoOptimize ¶
func VolcanoOptimize(ctx context.Context, sctx base.PlanContext, flag uint64, logic base.LogicalPlan) (base.LogicalPlan, base.PhysicalPlan, float64, error)
VolcanoOptimize includes: logicalOptimize, physicalOptimize
Types ¶
type AdminPlugins ¶
type AdminPlugins struct {
physicalop.SimpleSchemaProducer
Action AdminPluginsAction
Plugins []string
}
AdminPlugins administrates tidb plugins.
type AdminPluginsAction ¶
type AdminPluginsAction int
AdminPluginsAction indicate action will be taken on plugins.
const ( // Enable indicates enable plugins. Enable AdminPluginsAction = iota + 1 // Disable indicates disable plugins. Disable )
type AdminShowBDRRole ¶
type AdminShowBDRRole struct {
physicalop.SimpleSchemaProducer
}
AdminShowBDRRole represents a show bdr role plan.
type AggregateFuncExtractor ¶
type AggregateFuncExtractor struct {
// AggFuncs is the collected AggregateFuncExprs.
AggFuncs []*ast.AggregateFuncExpr
// contains filtered or unexported fields
}
AggregateFuncExtractor visits Expr tree. It collects AggregateFuncExpr from AST Node.
type AggregationEliminator ¶
type AggregationEliminator struct {
// contains filtered or unexported fields
}
AggregationEliminator is used to eliminate aggregation grouped by unique key.
func (*AggregationEliminator) Name ¶
func (*AggregationEliminator) Name() string
Name implements the base.LogicalOptRule.<1st> interface.
func (*AggregationEliminator) Optimize ¶
func (a *AggregationEliminator) Optimize(ctx context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements the base.LogicalOptRule.<0th> interface.
type AggregationPushDownSolver ¶
type AggregationPushDownSolver struct {
// contains filtered or unexported fields
}
AggregationPushDownSolver is a rule that pushes down aggregation functions to the child of LogicalJoin.
func (*AggregationPushDownSolver) Name ¶
func (*AggregationPushDownSolver) Name() string
Name implements the base.LogicalOptRule.<1st> interface.
func (*AggregationPushDownSolver) Optimize ¶
func (a *AggregationPushDownSolver) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements the base.LogicalOptRule.<0th> interface.
type AlterDDLJob ¶
type AlterDDLJob struct {
physicalop.SimpleSchemaProducer
JobID int64
Options []*AlterDDLJobOpt
}
AlterDDLJob is the plan of admin alter ddl job
type AlterDDLJobOpt ¶
type AlterDDLJobOpt struct {
Name string
Value expression.Expression
}
AlterDDLJobOpt represents alter ddl job option.
type Analyze ¶
type Analyze struct {
physicalop.SimpleSchemaProducer
ColTasks []AnalyzeColumnsTask
IdxTasks []AnalyzeIndexTask
Opts map[ast.AnalyzeOptionType]uint64
// OptionsMap is used to store the options for each partition.
OptionsMap map[int64]V2AnalyzeOptions
}
Analyze represents an analyze plan
type AnalyzeColumnsTask ¶
type AnalyzeColumnsTask struct {
HandleCols util.HandleCols
CommonHandleInfo *model.IndexInfo
ColsInfo []*model.ColumnInfo
SkipColsInfo []*model.ColumnInfo
TblInfo *model.TableInfo
Indexes []*model.IndexInfo
AnalyzeInfo
}
AnalyzeColumnsTask is used for analyze columns.
type AnalyzeIndexTask ¶
type AnalyzeIndexTask struct {
IndexInfo *model.IndexInfo
TblInfo *model.TableInfo
AnalyzeInfo
}
AnalyzeIndexTask is used for analyze index.
type AnalyzeInfo ¶
type AnalyzeInfo struct {
DBName string
TableName string
PartitionName string
TableID statistics.AnalyzeTableID
StatsVersion int
V2Options *V2AnalyzeOptions
}
AnalyzeInfo is used to store the database name, table name and partition name of analyze task.
type CancelDDLJobs ¶
type CancelDDLJobs struct {
physicalop.SimpleSchemaProducer
JobIDs []int64
}
CancelDDLJobs represents a cancel DDL jobs plan.
type CheckIndexRange ¶
type CheckIndexRange struct {
physicalop.SimpleSchemaProducer
Table *ast.TableName
IndexName string
HandleRanges []ast.HandleRange
}
CheckIndexRange is used for checking index data, output the index values that handle within begin and end.
type CheckTable ¶
type CheckTable struct {
physicalop.SimpleSchemaProducer
DBName string
Table table.Table
IndexInfos []*model.IndexInfo
IndexLookUpReaders []*physicalop.PhysicalIndexLookUpReader
CheckIndex bool
}
CheckTable is used for checking table data, built from the 'admin check table' statement.
type ChecksumTable ¶
type ChecksumTable struct {
physicalop.SimpleSchemaProducer
Tables []*resolve.TableNameW
}
ChecksumTable is used for calculating table checksum, built from the `admin checksum table` statement.
type CleanupIndex ¶
type CleanupIndex struct {
physicalop.SimpleSchemaProducer
Table *resolve.TableNameW
IndexName string
}
CleanupIndex is used to delete dangling index data.
type ClusterLogTableExtractor ¶
type ClusterLogTableExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
// NodeTypes represents all components types we should send request to.
// e.g:
// 1. SELECT * FROM cluster_log WHERE type='tikv'
// 2. SELECT * FROM cluster_log WHERE type in ('tikv', 'tidb')
NodeTypes set.StringSet
// Instances represents all components instances we should send request to.
// e.g:
// 1. SELECT * FROM cluster_log WHERE instance='192.168.1.7:2379'
// 2. SELECT * FROM cluster_log WHERE instance in ('192.168.1.7:2379', '192.168.1.9:2379')
Instances set.StringSet
// StartTime represents the beginning time of log message
// e.g: SELECT * FROM cluster_log WHERE time>'2019-10-10 10:10:10.999'
StartTime int64
// EndTime represents the ending time of log message
// e.g: SELECT * FROM cluster_log WHERE time<'2019-10-11 10:10:10.999'
EndTime int64
// Pattern is used to filter the log message
// e.g:
// 1. SELECT * FROM cluster_log WHERE message like '%gc%'
// 2. SELECT * FROM cluster_log WHERE message regexp '.*'
Patterns []string
LogLevels set.StringSet
// contains filtered or unexported fields
}
ClusterLogTableExtractor is used to extract some predicates of `cluster_config`
func (*ClusterLogTableExtractor) ExplainInfo ¶
func (e *ClusterLogTableExtractor) ExplainInfo(pp base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*ClusterLogTableExtractor) Extract ¶
func (e *ClusterLogTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface
type ClusterTableExtractor ¶
type ClusterTableExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
// NodeTypes represents all components types we should send request to.
// e.g:
// 1. SELECT * FROM cluster_config WHERE type='tikv'
// 2. SELECT * FROM cluster_config WHERE type in ('tikv', 'tidb')
NodeTypes set.StringSet
// Instances represents all components instances we should send request to.
// e.g:
// 1. SELECT * FROM cluster_config WHERE instance='192.168.1.7:2379'
// 2. SELECT * FROM cluster_config WHERE type in ('192.168.1.7:2379', '192.168.1.9:2379')
Instances set.StringSet
// contains filtered or unexported fields
}
ClusterTableExtractor is used to extract some predicates of cluster table.
func (*ClusterTableExtractor) ExplainInfo ¶
func (e *ClusterTableExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*ClusterTableExtractor) Extract ¶
func (e *ClusterTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface
type CompactTable ¶
type CompactTable struct {
physicalop.SimpleSchemaProducer
ReplicaKind ast.CompactReplicaKind
TableInfo *model.TableInfo
PartitionNames []ast.CIStr
}
CompactTable represents a "ALTER TABLE [NAME] COMPACT ..." plan.
type ConvertOuterToInnerJoin ¶
type ConvertOuterToInnerJoin struct {
}
ConvertOuterToInnerJoin converts outer to inner joins if the unmtaching rows are filtered.
func (*ConvertOuterToInnerJoin) Name ¶
func (*ConvertOuterToInnerJoin) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*ConvertOuterToInnerJoin) Optimize ¶
func (*ConvertOuterToInnerJoin) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface. convertOuterToInnerJoin is refactoring of the outer to inner join logic that used to be part of predicate push down. The rewrite passes down predicates from selection (WHERE clause) and join predicates (ON clause). All nodes except LogicalJoin are pass through where the rewrite is done for the child and nothing for the node itself. The main logic is applied for joins:
- Traversal is preorder and the passed down predicate is checked for the left/right after join
- The ON clause and passed down predicate (from higher selects or joins) are comined and applied to join children. This logic depends on the join type with the following logic: - For left/right outer joins, the ON clause an be applied only on the inner side (null producing side) - For inner/semi joins, the ON clause can be applied on both children - For anti semi joins, ON clause applied only on left side - For all other cases, do not pass ON clause.
type DDL ¶
type DDL struct {
physicalop.SimpleSchemaProducer
Statement ast.DDLNode
}
DDL represents a DDL statement plan.
type Deallocate ¶
type Deallocate struct {
physicalop.SimpleSchemaProducer
Name string
}
Deallocate represents deallocate plan.
type DecorrelateSolver ¶
type DecorrelateSolver struct{}
DecorrelateSolver tries to convert apply plan to join plan.
func (*DecorrelateSolver) Name ¶
func (*DecorrelateSolver) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*DecorrelateSolver) Optimize ¶
func (s *DecorrelateSolver) Optimize(ctx context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface.
type DeriveTopNFromWindow ¶
type DeriveTopNFromWindow struct {
}
DeriveTopNFromWindow pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase.
func (*DeriveTopNFromWindow) Name ¶
func (*DeriveTopNFromWindow) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*DeriveTopNFromWindow) Optimize ¶
func (*DeriveTopNFromWindow) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface.
type DistributeTable ¶
type DistributeTable struct {
physicalop.SimpleSchemaProducer
TableInfo *model.TableInfo
PartitionNames []ast.CIStr
Engine string
Rule string
Timeout string
}
DistributeTable represents a distribute table plan.
type EliminateUnionAllDualItem ¶
type EliminateUnionAllDualItem struct {
}
EliminateUnionAllDualItem is trying to eliminate dual item(rowcount=0) in union all case.
func (*EliminateUnionAllDualItem) Name ¶
func (*EliminateUnionAllDualItem) Name() string
Name implement the LogicalOptRule's name.
func (*EliminateUnionAllDualItem) Optimize ¶
func (*EliminateUnionAllDualItem) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implement LogicalOptRule's Optimize.
type EmptySelectionEliminator ¶
type EmptySelectionEliminator struct{}
EmptySelectionEliminator is a logical optimization rule that removes empty selections
func (*EmptySelectionEliminator) Name ¶
func (*EmptySelectionEliminator) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*EmptySelectionEliminator) Optimize ¶
func (e *EmptySelectionEliminator) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface.
type ErrExprLoc ¶
ErrExprLoc is for generate the ErrFieldNotInGroupBy error info
type Execute ¶
type Execute struct {
physicalop.SimpleSchemaProducer
Name string
Params []expression.Expression
PrepStmt *PlanCacheStmt
Stmt ast.StmtNode
Plan base.Plan
}
Execute represents prepare plan.
type Explain ¶
type Explain struct {
physicalop.SimpleSchemaProducer
TargetPlan base.Plan
Format string
Analyze bool
Explore bool // EXPLAIN EXPLORE statement
SQLDigest string // "EXPLAIN EXPLORE <sql_digest>"
ExecStmt ast.StmtNode
RuntimeStatsColl *execdetails.RuntimeStatsColl
Rows [][]string
BriefBinaryPlan string
}
Explain represents a explain plan.
func (*Explain) RenderResult ¶
RenderResult renders the explain result as specified format.
type ExplainInfoForEncode ¶
type ExplainInfoForEncode struct {
ID string `json:"id"`
EstRows string `json:"estRows"`
ActRows string `json:"actRows,omitempty"`
TaskType string `json:"taskType"`
AccessObject string `json:"accessObject,omitempty"`
ExecuteInfo string `json:"executeInfo,omitempty"`
OperatorInfo string `json:"operatorInfo,omitempty"`
EstCost string `json:"estCost,omitempty"`
CostFormula string `json:"costFormula,omitempty"`
MemoryInfo string `json:"memoryInfo,omitempty"`
DiskInfo string `json:"diskInfo,omitempty"`
TotalMemoryConsumed string `json:"totalMemoryConsumed,omitempty"`
SubOperators []*ExplainInfoForEncode `json:"subOperators,omitempty"`
}
ExplainInfoForEncode store explain info for JSON encode
type ExprColumnMap ¶
type ExprColumnMap map[expression.Expression]*expression.Column
ExprColumnMap is used to store all expressions of indexed generated columns in a table, and map them to the generated columns, thus we can substitute the expression in a query to an indexed generated column.
type FlatOperator ¶
type FlatOperator struct {
// A reference to the original operator.
Origin base.Plan
// ChildrenIdx is the indexes of the children of this operator in the FlatPlanTree.
// It's ordered from small to large.
ChildrenIdx []int
// ChildrenEndIdx is the index of the last operator of children subtrees of this operator in the FlatPlanTree.
ChildrenEndIdx int
// NeedReverseDriverSide means if we need to reverse the order of children to keep build side before probe side.
//
// Specifically, it means if the below are all true:
// 1. this operator has two children
// 2. the first child's Label is the probe side and the second's is the build side.
//
// If you call FlattenPhysicalPlan with buildSideFirst true, NeedReverseDriverSide will be useless.
NeedReverseDriverSide bool
Depth uint32
Label OperatorLabel
IsRoot bool
StoreType kv.StoreType
// ReqType is only meaningful when IsRoot is false.
ReqType physicalop.ReadReqType
// The below two fields are mainly for text tree formatting. See texttree.PrettyIdentifier().
TextTreeIndent string
IsLastChild bool
// IsINLProbeChild will change the underlying tableScan to rowIDScan for example.
// IsINLProbeChild indicates whether this operator is in indexLookupReader or indexMergeReader inner side.
IsINLProbeChild bool
IsPhysicalPlan bool
}
FlatOperator is a simplified operator. It contains a reference to the original operator and some usually needed information.
func (*FlatOperator) ExplainID ¶
func (f *FlatOperator) ExplainID() fmt.Stringer
ExplainID of FlatOperator is a wrapper for call its original ExplainID with IsINLProbeChild inside.
type FlatPhysicalPlan ¶
type FlatPhysicalPlan struct {
Main FlatPlanTree
CTEs []FlatPlanTree
ScalarSubQueries []FlatPlanTree
// InExecute means if the original plan tree contains Execute operator.
//
// Be careful when trying to use this, InExecute is true doesn't mean we are handling an EXECUTE statement.
// When collecting information from the plan in an EXECUTE statement, usually we directly use the plan
// in Execute.Plan, not Execute itself, so InExecute will be false.
//
// When will InExecute be true? When you're using "EXPLAIN FOR CONNECTION" to get the last plan of
// a connection (usually we will record Explain.TargetPlan for an EXPLAIN statement) and that plan
// is from an EXECUTE statement, we will collect from Execute itself, not directly from Execute.Plan,
// then InExecute will be true.
InExecute bool
// InExplain means if the original plan tree contains Explain operator.
InExplain bool
// contains filtered or unexported fields
}
FlatPhysicalPlan provides an easier structure to traverse a plan and collect needed information. Note: Although it's named FlatPhysicalPlan, there also could be Insert, Delete and Update at the beginning of Main.
func FlattenPhysicalPlan ¶
func FlattenPhysicalPlan(p base.Plan, buildSideFirst bool) *FlatPhysicalPlan
FlattenPhysicalPlan generates a FlatPhysicalPlan from a PhysicalPlan, Insert, Delete, Update, Explain or Execute.
type FlatPlanTree ¶
type FlatPlanTree []*FlatOperator
FlatPlanTree is a simplified plan tree. It arranges all operators in the tree as a slice, ordered by the order of traversing the tree, which means a depth-first traversal plus some special rule for some operators.
func (FlatPlanTree) GetSelectPlan ¶
func (e FlatPlanTree) GetSelectPlan() (FlatPlanTree, int)
GetSelectPlan skips Insert, Delete, and Update at the beginning of the FlatPlanTree and the foreign key check/cascade plan at the end of the FlatPlanTree. Note:
It returns a reference to the original FlatPlanTree, please avoid modifying the returned value. The second return value is the offset. Because the returned FlatPlanTree is a part of the original slice, you need to minus them by the offset when using the returned FlatOperator.Depth and FlatOperator.ChildrenIdx.
type GcSubstituter ¶
type GcSubstituter struct {
}
GcSubstituter is used to substitute the expression to indexed virtual generated column in where, group by, order by, and field clause.
func (*GcSubstituter) Name ¶
func (*GcSubstituter) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*GcSubstituter) Optimize ¶
func (gc *GcSubstituter) Optimize(ctx context.Context, lp base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface. optimize try to replace the expression to indexed virtual generate column in where, group by, order by, and field clause so that we can use the index on expression. For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and an index on c. We need to replace a+1 with c so that we can use the index on c. See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html
type HotRegionsHistoryTableExtractor ¶
type HotRegionsHistoryTableExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any pd server.
SkipRequest bool
// StartTime represents the beginning time of update time.
// e.g: SELECT * FROM tidb_hot_regions_history WHERE update_time>'2019-10-10 10:10:10.999'
StartTime int64
// EndTime represents the ending time of update time.
// e.g: SELECT * FROM tidb_hot_regions_history WHERE update_time<'2019-10-11 10:10:10.999'
EndTime int64
// RegionIDs/StoreIDs/PeerIDs represents all region/store/peer ids we should filter in PD to reduce network IO.
// e.g:
// 1. SELECT * FROM tidb_hot_regions_history WHERE region_id=1
// 2. SELECT * FROM tidb_hot_regions_history WHERE table_id in (11, 22)
// Leave range operation to above selection executor.
RegionIDs []uint64
StoreIDs []uint64
PeerIDs []uint64
// IsLearners/IsLeaders represents whether we should request for learner/leader role in PD to reduce network IO.
// e.g:
// 1. SELECT * FROM tidb_hot_regions_history WHERE is_learner=1
// 2. SELECT * FROM tidb_hot_regions_history WHERE is_learner in (0,1) -> request all
IsLearners []bool
IsLeaders []bool
// HotRegionTypes represents all hot region types we should filter in PD to reduce network IO.
// e.g:
// 1. SELECT * FROM tidb_hot_regions_history WHERE type='read'
// 2. SELECT * FROM tidb_hot_regions_history WHERE type in ('read', 'write')
// 3. SELECT * FROM tidb_hot_regions_history WHERE type='read' and type='write' -> SkipRequest = true
HotRegionTypes set.StringSet
// contains filtered or unexported fields
}
HotRegionsHistoryTableExtractor is used to extract some predicates of `tidb_hot_regions_history`
func (*HotRegionsHistoryTableExtractor) ExplainInfo ¶
func (e *HotRegionsHistoryTableExtractor) ExplainInfo(pp base.PhysicalPlan) string
ExplainInfo implements the base.MemTablePredicateExtractor interface.
func (*HotRegionsHistoryTableExtractor) Extract ¶
func (e *HotRegionsHistoryTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface
type ImportInto ¶
type ImportInto struct {
physicalop.SimpleSchemaProducer
Table *resolve.TableNameW
ColumnAssignments []*ast.Assignment
ColumnsAndUserVars []*ast.ColumnNameOrUserVar
Path string
Format *string
Options []*LoadDataOpt
GenCols physicalop.InsertGeneratedColumns
Stmt string
SelectPlan base.PhysicalPlan
}
ImportInto represents a ingest into plan.
func (ImportInto) Init ¶
func (p ImportInto) Init(ctx base.PlanContext) *ImportInto
Init initializes ImportInto.
type IndexUsageIndexInfo ¶
IndexUsageIndexInfo is the necessary index info for information_schema.tidb_index_usage. It only includes the index name and ID in lower case.
type InfoSchemaBaseExtractor ¶
type InfoSchemaBaseExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
// ColPredicates records the columns that can be extracted from the predicates.
// For example, `select * from information_schema.SCHEMATA where schema_name='mysql' or schema_name='INFORMATION_SCHEMA'`
// {"schema_name": ["mysql", "INFORMATION_SCHEMA"]}
ColPredicates map[string]set.StringSet
// used for EXPLAIN only
LikePatterns map[string][]string
// contains filtered or unexported fields
}
InfoSchemaBaseExtractor is used to extract infoSchema tables related predicates.
func (*InfoSchemaBaseExtractor) ExplainInfo ¶
func (e *InfoSchemaBaseExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*InfoSchemaBaseExtractor) Extract ¶
func (e *InfoSchemaBaseExtractor) Extract( ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
func (*InfoSchemaBaseExtractor) GetBase ¶
func (e *InfoSchemaBaseExtractor) GetBase() *InfoSchemaBaseExtractor
GetBase is only used for test.
func (*InfoSchemaBaseExtractor) ListSchemas ¶
func (e *InfoSchemaBaseExtractor) ListSchemas(is infoschema.InfoSchema) []ast.CIStr
ListSchemas lists all schemas from predicate. If no schema is specified, it lists all schemas in the storage.
func (*InfoSchemaBaseExtractor) ListSchemasAndTables ¶
func (e *InfoSchemaBaseExtractor) ListSchemasAndTables( ctx context.Context, is infoschema.InfoSchema, ) ([]ast.CIStr, []*model.TableInfo, error)
ListSchemasAndTables lists related tables and their corresponding schemas from predicate. If there is no error, returning schema slice and table slice are guaranteed to have the same length.
type InfoSchemaCheckConstraintsExtractor ¶
type InfoSchemaCheckConstraintsExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaCheckConstraintsExtractor is the predicate extractor for information_schema.check_constraints.
func NewInfoSchemaCheckConstraintsExtractor ¶
func NewInfoSchemaCheckConstraintsExtractor() *InfoSchemaCheckConstraintsExtractor
NewInfoSchemaCheckConstraintsExtractor creates a new InfoSchemaCheckConstraintsExtractor.
func (*InfoSchemaCheckConstraintsExtractor) HasConstraint ¶
func (e *InfoSchemaCheckConstraintsExtractor) HasConstraint(name string) bool
HasConstraint returns true if constraint name is specified in predicates.
type InfoSchemaColumnsExtractor ¶
type InfoSchemaColumnsExtractor struct {
InfoSchemaBaseExtractor
// contains filtered or unexported fields
}
InfoSchemaColumnsExtractor is the predicate extractor for information_schema.columns.
func NewInfoSchemaColumnsExtractor ¶
func NewInfoSchemaColumnsExtractor() *InfoSchemaColumnsExtractor
NewInfoSchemaColumnsExtractor creates a new InfoSchemaColumnsExtractor.
func (*InfoSchemaColumnsExtractor) ListColumns ¶
func (e *InfoSchemaColumnsExtractor) ListColumns( tbl *model.TableInfo, ) ([]*model.ColumnInfo, []int)
ListColumns lists unhidden columns and corresponding ordinal positions for given table from predicates. If no column found in predicate, it return all visible columns.
func (*InfoSchemaColumnsExtractor) ListTables ¶
func (e *InfoSchemaColumnsExtractor) ListTables( ctx context.Context, s ast.CIStr, is infoschema.InfoSchema, ) ([]*model.TableInfo, error)
ListTables lists related tables for given schema from predicate. If no table found in predicate, it return all tables. TODO(tangenta): remove this after streaming interface is supported.
type InfoSchemaDDLExtractor ¶
type InfoSchemaDDLExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaDDLExtractor is the predicate extractor for information_schema.ddl_jobs.
func NewInfoSchemaDDLExtractor ¶
func NewInfoSchemaDDLExtractor() *InfoSchemaDDLExtractor
NewInfoSchemaDDLExtractor creates a new InfoSchemaDDLExtractor.
func (*InfoSchemaDDLExtractor) Extract ¶
func (e *InfoSchemaDDLExtractor) Extract( ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
Different from other extractor, input predicates will not be pruned. For example, we will use state to determine whether to scan history ddl jobs, but we don't not use these predicates to do filtering. So the Selection Operator is still needed.
type InfoSchemaIndexesExtractor ¶
type InfoSchemaIndexesExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaIndexesExtractor is the predicate extractor for information_schema.tidb_indexes.
func NewInfoSchemaIndexesExtractor ¶
func NewInfoSchemaIndexesExtractor() *InfoSchemaIndexesExtractor
NewInfoSchemaIndexesExtractor creates a new InfoSchemaIndexesExtractor.
type InfoSchemaKeyColumnUsageExtractor ¶
type InfoSchemaKeyColumnUsageExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaKeyColumnUsageExtractor is the predicate extractor for information_schema.key_column_usage.
func NewInfoSchemaKeyColumnUsageExtractor ¶
func NewInfoSchemaKeyColumnUsageExtractor() *InfoSchemaKeyColumnUsageExtractor
NewInfoSchemaKeyColumnUsageExtractor creates a new InfoSchemaKeyColumnUsageExtractor.
func (*InfoSchemaKeyColumnUsageExtractor) HasConstraint ¶
func (e *InfoSchemaKeyColumnUsageExtractor) HasConstraint(name string) bool
HasConstraint returns true if constraint name is specified in predicates.
func (*InfoSchemaKeyColumnUsageExtractor) HasConstraintSchema ¶
func (e *InfoSchemaKeyColumnUsageExtractor) HasConstraintSchema(name string) bool
HasConstraintSchema returns true if constraint schema is specified in predicates.
func (*InfoSchemaKeyColumnUsageExtractor) HasPrimaryKey ¶
func (e *InfoSchemaKeyColumnUsageExtractor) HasPrimaryKey() bool
HasPrimaryKey returns true if primary key is specified in predicates.
type InfoSchemaPartitionsExtractor ¶
type InfoSchemaPartitionsExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaPartitionsExtractor is the predicate extractor for information_schema.partitions.
func NewInfoSchemaPartitionsExtractor ¶
func NewInfoSchemaPartitionsExtractor() *InfoSchemaPartitionsExtractor
NewInfoSchemaPartitionsExtractor creates a new InfoSchemaPartitionsExtractor.
func (*InfoSchemaPartitionsExtractor) HasPartition ¶
func (e *InfoSchemaPartitionsExtractor) HasPartition(name string) bool
HasPartition returns true if partition name matches the one in predicates.
func (*InfoSchemaPartitionsExtractor) HasPartitionPred ¶
func (e *InfoSchemaPartitionsExtractor) HasPartitionPred() bool
HasPartitionPred returns true if partition name is specified in predicates.
type InfoSchemaReferConstExtractor ¶
type InfoSchemaReferConstExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaReferConstExtractor is the predicate extractor for information_schema.referential_constraints.
func NewInfoSchemaReferConstExtractor ¶
func NewInfoSchemaReferConstExtractor() *InfoSchemaReferConstExtractor
NewInfoSchemaReferConstExtractor creates a new InfoSchemaReferConstExtractor.
func (*InfoSchemaReferConstExtractor) HasConstraint ¶
func (e *InfoSchemaReferConstExtractor) HasConstraint(name string) bool
HasConstraint returns true if constraint name is specified in predicates.
type InfoSchemaSchemataExtractor ¶
type InfoSchemaSchemataExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaSchemataExtractor is the predicate extractor for information_schema.schemata.
func NewInfoSchemaSchemataExtractor ¶
func NewInfoSchemaSchemataExtractor() *InfoSchemaSchemataExtractor
NewInfoSchemaSchemataExtractor creates a new InfoSchemaSchemataExtractor.
type InfoSchemaSequenceExtractor ¶
type InfoSchemaSequenceExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaSequenceExtractor is the predicate extractor for information_schema.sequences.
func NewInfoSchemaSequenceExtractor ¶
func NewInfoSchemaSequenceExtractor() *InfoSchemaSequenceExtractor
NewInfoSchemaSequenceExtractor creates a new InfoSchemaSequenceExtractor.
type InfoSchemaStatisticsExtractor ¶
type InfoSchemaStatisticsExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaStatisticsExtractor is the predicate extractor for information_schema.statistics.
func NewInfoSchemaStatisticsExtractor ¶
func NewInfoSchemaStatisticsExtractor() *InfoSchemaStatisticsExtractor
NewInfoSchemaStatisticsExtractor creates a new InfoSchemaStatisticsExtractor.
func (*InfoSchemaStatisticsExtractor) HasIndex ¶
func (e *InfoSchemaStatisticsExtractor) HasIndex(val string) bool
HasIndex returns true if index name is specified in predicates.
func (*InfoSchemaStatisticsExtractor) HasPrimaryKey ¶
func (e *InfoSchemaStatisticsExtractor) HasPrimaryKey() bool
HasPrimaryKey returns true if primary key is specified in predicates.
type InfoSchemaTableConstraintsExtractor ¶
type InfoSchemaTableConstraintsExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaTableConstraintsExtractor is the predicate extractor for information_schema.constraints.
func NewInfoSchemaTableConstraintsExtractor ¶
func NewInfoSchemaTableConstraintsExtractor() *InfoSchemaTableConstraintsExtractor
NewInfoSchemaTableConstraintsExtractor creates a new InfoSchemaTableConstraintsExtractor.
func (*InfoSchemaTableConstraintsExtractor) HasConstraint ¶
func (e *InfoSchemaTableConstraintsExtractor) HasConstraint(name string) bool
HasConstraint returns true if constraint is specified in predicates.
func (*InfoSchemaTableConstraintsExtractor) HasConstraintSchema ¶
func (e *InfoSchemaTableConstraintsExtractor) HasConstraintSchema(name string) bool
HasConstraintSchema returns true if constraint schema is specified in predicates.
func (*InfoSchemaTableConstraintsExtractor) HasPrimaryKey ¶
func (e *InfoSchemaTableConstraintsExtractor) HasPrimaryKey() bool
HasPrimaryKey returns true if primary key is specified in predicates.
type InfoSchemaTablesExtractor ¶
type InfoSchemaTablesExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaTablesExtractor is the predicate extractor for information_schema.tables.
func NewInfoSchemaTablesExtractor ¶
func NewInfoSchemaTablesExtractor() *InfoSchemaTablesExtractor
NewInfoSchemaTablesExtractor creates a new InfoSchemaTablesExtractor.
func (*InfoSchemaTablesExtractor) HasTableName ¶
func (e *InfoSchemaTablesExtractor) HasTableName(name string) bool
HasTableName returns true if table name is specified in predicates.
func (*InfoSchemaTablesExtractor) HasTableSchema ¶
func (e *InfoSchemaTablesExtractor) HasTableSchema(name string) bool
HasTableSchema returns true if table schema is specified in predicates.
type InfoSchemaTiDBCheckConstraintsExtractor ¶
type InfoSchemaTiDBCheckConstraintsExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaTiDBCheckConstraintsExtractor is the predicate extractor for information_schema.tidb_check_constraints.
func NewInfoSchemaTiDBCheckConstraintsExtractor ¶
func NewInfoSchemaTiDBCheckConstraintsExtractor() *InfoSchemaTiDBCheckConstraintsExtractor
NewInfoSchemaTiDBCheckConstraintsExtractor creates a new InfoSchemaTiDBCheckConstraintsExtractor.
func (*InfoSchemaTiDBCheckConstraintsExtractor) HasConstraint ¶
func (e *InfoSchemaTiDBCheckConstraintsExtractor) HasConstraint(name string) bool
HasConstraint returns true if constraint name is specified in predicates.
type InfoSchemaTiDBIndexUsageExtractor ¶
type InfoSchemaTiDBIndexUsageExtractor struct {
InfoSchemaBaseExtractor
// contains filtered or unexported fields
}
InfoSchemaTiDBIndexUsageExtractor is the predicate extractor for information_schema.tidb_index_usage.
func NewInfoSchemaTiDBIndexUsageExtractor ¶
func NewInfoSchemaTiDBIndexUsageExtractor() *InfoSchemaTiDBIndexUsageExtractor
NewInfoSchemaTiDBIndexUsageExtractor creates a new InfoSchemaTiDBIndexUsageExtractor.
func (*InfoSchemaTiDBIndexUsageExtractor) ListIndexes ¶
func (e *InfoSchemaTiDBIndexUsageExtractor) ListIndexes( tbl *model.TableInfo, ) []IndexUsageIndexInfo
ListIndexes lists related indexes for given table from predicate. If no index found in predicate, it return all indexes.
type InfoSchemaViewsExtractor ¶
type InfoSchemaViewsExtractor struct {
InfoSchemaBaseExtractor
}
InfoSchemaViewsExtractor is the predicate extractor for information_schema.views.
func NewInfoSchemaViewsExtractor ¶
func NewInfoSchemaViewsExtractor() *InfoSchemaViewsExtractor
NewInfoSchemaViewsExtractor creates a new InfoSchemaViewsExtractor.
type InspectionResultTableExtractor ¶
type InspectionResultTableExtractor struct {
// SkipInspection means the where clause always false, we don't need to request any component
SkipInspection bool
// Rules represents rules applied to, and we should apply all inspection rules if there is no rules specified
// e.g: SELECT * FROM inspection_result WHERE rule in ('ddl', 'config')
Rules set.StringSet
// Items represents items applied to, and we should apply all inspection item if there is no rules specified
// e.g: SELECT * FROM inspection_result WHERE item in ('ddl.lease', 'raftstore.threadpool')
Items set.StringSet
// contains filtered or unexported fields
}
InspectionResultTableExtractor is used to extract some predicates of `inspection_result`
func (*InspectionResultTableExtractor) ExplainInfo ¶
func (e *InspectionResultTableExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*InspectionResultTableExtractor) Extract ¶
func (e *InspectionResultTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
type InspectionRuleTableExtractor ¶
type InspectionRuleTableExtractor struct {
SkipRequest bool
Types set.StringSet
// contains filtered or unexported fields
}
InspectionRuleTableExtractor is used to extract some predicates of `inspection_rules`
func (*InspectionRuleTableExtractor) ExplainInfo ¶
func (e *InspectionRuleTableExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*InspectionRuleTableExtractor) Extract ¶
func (e *InspectionRuleTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
type InspectionSummaryTableExtractor ¶
type InspectionSummaryTableExtractor struct {
// SkipInspection means the where clause always false, we don't need to request any component
SkipInspection bool
// Rules represents rules applied to, and we should apply all inspection rules if there is no rules specified
// e.g: SELECT * FROM inspection_summary WHERE rule in ('ddl', 'config')
Rules set.StringSet
MetricNames set.StringSet
Quantiles []float64
// contains filtered or unexported fields
}
InspectionSummaryTableExtractor is used to extract some predicates of `inspection_summary`
func (*InspectionSummaryTableExtractor) ExplainInfo ¶
func (e *InspectionSummaryTableExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*InspectionSummaryTableExtractor) Extract ¶
func (e *InspectionSummaryTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
type JoinReOrderSolver ¶
type JoinReOrderSolver struct {
}
JoinReOrderSolver is used to reorder the join nodes in a logical plan.
func (*JoinReOrderSolver) Name ¶
func (*JoinReOrderSolver) Name() string
Name implements the base.LogicalOptRule.<1st> interface.
func (*JoinReOrderSolver) Optimize ¶
func (s *JoinReOrderSolver) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements the base.LogicalOptRule.<0th> interface.
type LRUPlanCache ¶
type LRUPlanCache struct {
// contains filtered or unexported fields
}
LRUPlanCache is a dedicated least recently used cache, Only used for plan cache.
func NewLRUPlanCache ¶
func NewLRUPlanCache(capacity uint, guard float64, quota uint64, sctx sessionctx.Context, _ bool) *LRUPlanCache
NewLRUPlanCache creates a PCLRUCache object, whose capacity is "capacity". NOTE: "capacity" should be a positive value.
func (*LRUPlanCache) Close ¶
func (l *LRUPlanCache) Close()
Close do some clean work for LRUPlanCache when close the session
func (*LRUPlanCache) Delete ¶
func (l *LRUPlanCache) Delete(key string)
Delete deletes the multi-values from the LRU Cache.
func (*LRUPlanCache) DeleteAll ¶
func (l *LRUPlanCache) DeleteAll()
DeleteAll deletes all elements from the LRU Cache.
func (*LRUPlanCache) Get ¶
func (l *LRUPlanCache) Get(key string, paramTypes any) (value any, ok bool)
Get tries to find the corresponding value according to the given key.
func (*LRUPlanCache) MemoryUsage ¶
func (l *LRUPlanCache) MemoryUsage() (sum int64)
MemoryUsage return the memory usage of LRUPlanCache
func (*LRUPlanCache) Put ¶
func (l *LRUPlanCache) Put(key string, value, paramTypes any)
Put puts the (key, value) pair into the LRU Cache.
func (*LRUPlanCache) SetCapacity ¶
func (l *LRUPlanCache) SetCapacity(capacity uint) error
SetCapacity sets capacity of the cache.
type LineFieldsInfo ¶
type LineFieldsInfo struct {
FieldsTerminatedBy string
FieldsEnclosedBy string // length always <= 1, see parser.y
FieldsEscapedBy string // length always <= 1, see parser.y
FieldsOptEnclosed bool
LinesStartingBy string
LinesTerminatedBy string
}
LineFieldsInfo used in load-data/select-into/index-advise stmt.
func NewLineFieldsInfo ¶
func NewLineFieldsInfo(fieldsInfo *ast.FieldsClause, linesInfo *ast.LinesClause) LineFieldsInfo
NewLineFieldsInfo new LineFieldsInfo from FIELDS/LINES info.
type LoadData ¶
type LoadData struct {
physicalop.SimpleSchemaProducer
FileLocRef ast.FileLocRefTp
OnDuplicate ast.OnDuplicateKeyHandlingType
Path string
Format *string
Table *resolve.TableNameW
Charset *string
Columns []*ast.ColumnName
FieldsInfo *ast.FieldsClause
LinesInfo *ast.LinesClause
IgnoreLines *uint64
ColumnAssignments []*ast.Assignment
ColumnsAndUserVars []*ast.ColumnNameOrUserVar
Options []*LoadDataOpt
GenCols physicalop.InsertGeneratedColumns
}
LoadData represents a loaddata plan.
type LoadDataOpt ¶
type LoadDataOpt struct {
// Name is the name of the option, converted to lower case during parse.
Name string
Value expression.Expression
}
LoadDataOpt represents load data option.
type LoadStats ¶
type LoadStats struct {
physicalop.SimpleSchemaProducer
Path string
}
LoadStats represents a load stats plan.
type LockStats ¶
type LockStats struct {
physicalop.SimpleSchemaProducer
Tables []*ast.TableName
}
LockStats represents a lock stats for table
type MetricSummaryTableExtractor ¶
type MetricSummaryTableExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
MetricsNames set.StringSet
Quantiles []float64
// contains filtered or unexported fields
}
MetricSummaryTableExtractor is used to extract some predicates of metrics_schema tables.
func (*MetricSummaryTableExtractor) ExplainInfo ¶
func (*MetricSummaryTableExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*MetricSummaryTableExtractor) Extract ¶
func (e *MetricSummaryTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
type MetricTableExtractor ¶
type MetricTableExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
// StartTime represents the beginning time of metric data.
StartTime time.Time
// EndTime represents the ending time of metric data.
EndTime time.Time
// LabelConditions represents the label conditions of metric data.
LabelConditions map[string]set.StringSet
Quantiles []float64
// contains filtered or unexported fields
}
MetricTableExtractor is used to extract some predicates of metrics_schema tables.
func (*MetricTableExtractor) ExplainInfo ¶
func (e *MetricTableExtractor) ExplainInfo(pp base.PhysicalPlan) string
ExplainInfo implements the base.MemTablePredicateExtractor interface.
func (*MetricTableExtractor) Extract ¶
func (e *MetricTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface
func (*MetricTableExtractor) GetMetricTablePromQL ¶
func (e *MetricTableExtractor) GetMetricTablePromQL(sctx base.PlanContext, lowerTableName string) string
GetMetricTablePromQL uses to get the promQL of metric table.
type OperatorLabel ¶
type OperatorLabel uint8
OperatorLabel acts as some additional information to the name, usually it means its relationship with its parent. It's useful for index join, apply, index lookup, cte and so on.
const ( // Empty means OperatorLabel is meaningless for this operator. Empty OperatorLabel = iota // BuildSide means this operator is at the build side of its parent BuildSide // ProbeSide means this operator is at the probe side of its parent ProbeSide // SeedPart means this operator is the seed part of its parent (a cte) SeedPart // RecursivePart means this operator is the recursive part of its parent (a cte) RecursivePart )
func (OperatorLabel) String ¶
func (d OperatorLabel) String() string
type OuterJoinEliminator ¶
type OuterJoinEliminator struct {
}
OuterJoinEliminator is used to eliminate outer join.
func (*OuterJoinEliminator) Name ¶
func (*OuterJoinEliminator) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*OuterJoinEliminator) Optimize ¶
func (o *OuterJoinEliminator) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface.
type PBPlanBuilder ¶
type PBPlanBuilder struct {
// contains filtered or unexported fields
}
PBPlanBuilder uses to build physical plan from dag protocol buffers.
func NewPBPlanBuilder ¶
func NewPBPlanBuilder(sctx base.PlanContext, is infoschema.InfoSchema, ranges []*coprocessor.KeyRange) *PBPlanBuilder
NewPBPlanBuilder creates a new pb plan builder.
func (*PBPlanBuilder) Build ¶
func (b *PBPlanBuilder) Build(executors []*tipb.Executor) (p base.PhysicalPlan, err error)
Build builds physical plan from dag protocol buffers.
type PPDSolver ¶
type PPDSolver struct{}
PPDSolver stands for Predicate Push Down.
type PauseDDLJobs ¶
type PauseDDLJobs struct {
physicalop.SimpleSchemaProducer
JobIDs []int64
}
PauseDDLJobs indicates a plan to pause the Running DDL Jobs.
type PhysicalSimpleWrapper ¶
type PhysicalSimpleWrapper struct {
physicalop.BasePhysicalPlan
Inner Simple
}
PhysicalSimpleWrapper is a wrapper of `Simple` to implement physical plan interface.
Used for simple statements executing in coprocessor.
func (*PhysicalSimpleWrapper) MemoryUsage ¶
func (p *PhysicalSimpleWrapper) MemoryUsage() (sum int64)
MemoryUsage return the memory usage of PhysicalSimpleWrapper
type PlanBuilder ¶
type PlanBuilder struct {
// contains filtered or unexported fields
}
PlanBuilder builds Plan from an ast.Node. It just builds the ast node straightforwardly.
func NewPlanBuilder ¶
func NewPlanBuilder(opts ...PlanBuilderOpt) *PlanBuilder
NewPlanBuilder creates a new PlanBuilder.
func (*PlanBuilder) BuildDataSourceFromView ¶
func (b *PlanBuilder) BuildDataSourceFromView(ctx context.Context, dbName ast.CIStr, tableInfo *model.TableInfo, qbNameMap4View map[string][]ast.HintTable, viewHints map[string][]*ast.TableOptimizerHint) (base.LogicalPlan, error)
BuildDataSourceFromView is used to build base.LogicalPlan from view qbNameMap4View and viewHints are used for the view's hint. qbNameMap4View maps the query block name to the view table lists. viewHints group the view hints based on the view's query block name.
func (*PlanBuilder) GetIsForUpdateRead ¶
func (b *PlanBuilder) GetIsForUpdateRead() bool
GetIsForUpdateRead gets if the PlanBuilder use forUpdateRead
func (*PlanBuilder) GetOptFlag ¶
func (b *PlanBuilder) GetOptFlag() uint64
GetOptFlag gets the OptFlag of the PlanBuilder.
func (*PlanBuilder) GetVisitInfo ¶
func (b *PlanBuilder) GetVisitInfo() []visitInfo
GetVisitInfo gets the visitInfo of the PlanBuilder.
func (*PlanBuilder) Init ¶
func (b *PlanBuilder) Init(sctx base.PlanContext, is infoschema.InfoSchema, processor *hint.QBHintHandler) (*PlanBuilder, []ast.HintTable)
Init initialize a PlanBuilder. Return the original PlannerSelectBlockAsName as well, callers decide if PlannerSelectBlockAsName should be restored after using this builder. This is The comman code pattern to use it: NewPlanBuilder().Init(sctx, is, processor)
func (*PlanBuilder) ResetForReuse ¶
func (b *PlanBuilder) ResetForReuse() *PlanBuilder
ResetForReuse reset the plan builder, put it into pool for reuse. After reset for reuse, the object should be equal to a object returned by NewPlanBuilder().
func (*PlanBuilder) TableHints ¶
func (b *PlanBuilder) TableHints() *h.PlanHints
TableHints returns the *TableHintInfo of PlanBuilder.
type PlanBuilderOpt ¶
type PlanBuilderOpt interface {
Apply(builder *PlanBuilder)
}
PlanBuilderOpt is used to adjust the plan builder.
type PlanBuilderOptAllowCastArray ¶
type PlanBuilderOptAllowCastArray struct{}
PlanBuilderOptAllowCastArray means the plan builder should allow build cast(... as ... array).
func (PlanBuilderOptAllowCastArray) Apply ¶
func (PlanBuilderOptAllowCastArray) Apply(builder *PlanBuilder)
Apply implements the interface PlanBuilderOpt.
type PlanBuilderOptNoExecution ¶
type PlanBuilderOptNoExecution struct{}
PlanBuilderOptNoExecution means the plan builder should not run any executor during Build().
func (PlanBuilderOptNoExecution) Apply ¶
func (PlanBuilderOptNoExecution) Apply(builder *PlanBuilder)
Apply implements the interface PlanBuilderOpt.
type PlanCacheKeyEnableInstancePlanCache ¶
type PlanCacheKeyEnableInstancePlanCache struct{}
PlanCacheKeyEnableInstancePlanCache is only for test.
type PlanCacheKeyTestClone ¶
type PlanCacheKeyTestClone struct{}
PlanCacheKeyTestClone is only for test.
type PlanCacheKeyTestIssue43667 ¶
type PlanCacheKeyTestIssue43667 struct{}
PlanCacheKeyTestIssue43667 is only for test.
type PlanCacheKeyTestIssue46760 ¶
type PlanCacheKeyTestIssue46760 struct{}
PlanCacheKeyTestIssue46760 is only for test.
type PlanCacheKeyTestIssue47133 ¶
type PlanCacheKeyTestIssue47133 struct{}
PlanCacheKeyTestIssue47133 is only for test.
type PlanCacheStmt ¶
type PlanCacheStmt struct {
PreparedAst *ast.Prepared
ResolveCtx *resolve.Context
StmtDB string // which DB the statement will be processed over
VisitInfos []visitInfo
Params []ast.ParamMarkerExpr
PointGet PointGetExecutorCache
// below fields are for PointGet short path
SchemaVersion int64
// RelateVersion stores the true cache plan table schema version, since each table schema can be updated separately in transaction.
RelateVersion map[int64]uint64
StmtCacheable bool // Whether this stmt is cacheable.
UncacheableReason string // Why this stmt is uncacheable.
NormalizedSQL string
NormalizedPlan string
SQLDigest *parser.Digest
PlanDigest *parser.Digest
ForUpdateRead bool
SnapshotTSEvaluator func(context.Context, sessionctx.Context) (uint64, error)
BindingInfo bindinfo.BindingMatchInfo
// the different between NormalizedSQL, NormalizedSQL4PC and StmtText:
// for the query `select * from t where a>1 and b<?`, then
// NormalizedSQL: select * from `t` where `a` > ? and `b` < ? --> constants are normalized to '?',
// NormalizedSQL4PC: select * from `test` . `t` where `a` > ? and `b` < ? --> schema name is added,
// StmtText: select * from t where a>1 and b <? --> just format the original query;
StmtText string
// contains filtered or unexported fields
}
PlanCacheStmt store prepared ast from PrepareExec and other related fields
func GeneratePlanCacheStmtWithAST ¶
func GeneratePlanCacheStmtWithAST(ctx context.Context, sctx sessionctx.Context, isPrepStmt bool, paramSQL string, paramStmt ast.StmtNode, is infoschema.InfoSchema) (*PlanCacheStmt, base.Plan, int, error)
GeneratePlanCacheStmtWithAST generates the PlanCacheStmt structure for this AST. paramSQL is the corresponding parameterized sql like 'select * from t where a<? and b>?'. paramStmt is the Node of paramSQL.
func GetPreparedStmt ¶
func GetPreparedStmt(stmt *ast.ExecuteStmt, vars *variable.SessionVars) (*PlanCacheStmt, error)
GetPreparedStmt extract the prepared statement from the execute statement.
type PlanCacheValue ¶
type PlanCacheValue struct {
// Meta Info, all are READ-ONLY once initialized.
SQLDigest string
SQLText string
StmtType string // select, update, insert, delete, etc.
ParseUser string // the user who parses/compiles this plan.
Binding string // the binding of this plan.
OptimizerEnvHash string // other environment information that might affect the plan like "time_zone", "sql_mode".
ParseValues string // the actual values used when parsing/compiling this plan.
PlanDigest string // digest of the plan, used to identify the plan in the cache.
BinaryPlan string // binary of this Plan, use tidb_decode_binary_plan to decode this.
Memory int64 // the memory usage of this plan, in bytes.
LoadTime time.Time // the time when this plan is loaded into the cache.
Plan base.Plan // READ-ONLY for Instance Cache, READ-WRITE for Session Cache.
OutputColumns types.NameSlice // output column names of this plan
ParamTypes []*types.FieldType // all parameters' types, different parameters may share same plan
StmtHints *hint.StmtHints // related hints of this plan, like 'max_execution_time'.
// contains filtered or unexported fields
}
PlanCacheValue stores the cached Statement and StmtNode.
func NewPlanCacheValue ¶
func NewPlanCacheValue( sctx sessionctx.Context, stmt *PlanCacheStmt, cacheKey string, binding string, plan base.Plan, names []*types.FieldName, paramTypes []*types.FieldType, stmtHints *hint.StmtHints, ) *PlanCacheValue
NewPlanCacheValue creates a SQLCacheValue.
func (*PlanCacheValue) MemoryUsage ¶
func (v *PlanCacheValue) MemoryUsage() (sum int64)
MemoryUsage return the memory usage of PlanCacheValue
func (*PlanCacheValue) RuntimeInfo ¶
func (v *PlanCacheValue) RuntimeInfo() (exec, procKeys, totKeys, sumLat int64, lastUsedTime time.Time)
RuntimeInfo returns the runtime information of the plan.
func (*PlanCacheValue) UpdateRuntimeInfo ¶
func (v *PlanCacheValue) UpdateRuntimeInfo(proKeys, totKeys, latency int64)
UpdateRuntimeInfo accumulates the runtime information of the plan.
type PlanReplayer ¶
type PlanReplayer struct {
physicalop.SimpleSchemaProducer
ExecStmt ast.StmtNode
Analyze bool
Load bool
File string
HistoricalStatsTS uint64
Capture bool
Remove bool
SQLDigest string
PlanDigest string
}
PlanReplayer represents a plan replayer plan.
type PointGetExecutorCache ¶
type PointGetExecutorCache struct {
ColumnInfos any
// Executor is only used for point get scene.
// Notice that we should only cache the PointGetExecutor that have a snapshot with MaxTS in it.
// If the current plan is not PointGet or does not use MaxTS optimization, this value should be nil here.
Executor any
// FastPlan is only used for instance plan cache.
// To ensure thread-safe, we have to clone each plan before reusing if using instance plan cache.
// To reduce the memory allocation and increase performance, we cache the FastPlan here.
FastPlan *physicalop.PointGetPlan
}
PointGetExecutorCache caches the PointGetExecutor to further improve its performance. Don't forget to reset this executor when the prior plan is invalid.
type PointPlanVal ¶
PointPlanVal is used to store point plan that is pre-built for multi-statement query. Save the plan in a struct so even if the point plan is nil, we don't need to try again.
type Prepare ¶
type Prepare struct {
physicalop.SimpleSchemaProducer
Name string
SQLText string
}
Prepare represents prepare plan.
type PreprocessOpt ¶
type PreprocessOpt func(*preprocessor)
PreprocessOpt presents optional parameters to `Preprocess` method.
func WithPreprocessorReturn ¶
func WithPreprocessorReturn(ret *PreprocessorReturn) PreprocessOpt
WithPreprocessorReturn returns a PreprocessOpt to initialize the PreprocessorReturn.
type PreprocessorReturn ¶
type PreprocessorReturn struct {
IsStaleness bool
SnapshotTSEvaluator func(context.Context, sessionctx.Context) (uint64, error)
// LastSnapshotTS is the last evaluated snapshotTS if any
// otherwise it defaults to zero
LastSnapshotTS uint64
InfoSchema infoschema.InfoSchema
// contains filtered or unexported fields
}
PreprocessorReturn is used to retain information obtained in the preprocessor.
type ProjectionEliminator ¶
type ProjectionEliminator struct {
}
ProjectionEliminator is for update stmt The projection eliminate in logical optimize has been forbidden. The projection eliminate in post optimize will optimize the projection under the projection, window, agg (the condition is same as logical optimize)
func (*ProjectionEliminator) Name ¶
func (*ProjectionEliminator) Name() string
Name implements the logicalOptRule.<1st> interface.
func (*ProjectionEliminator) Optimize ¶
func (pe *ProjectionEliminator) Optimize(_ context.Context, lp base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements the logicalOptRule interface.
type PushDownSequenceSolver ¶
type PushDownSequenceSolver struct {
}
PushDownSequenceSolver is used to push down sequence.
func (*PushDownSequenceSolver) Name ¶
func (*PushDownSequenceSolver) Name() string
Name implements the base.LogicalOptRule.<1st> interface.
func (*PushDownSequenceSolver) Optimize ¶
func (pdss *PushDownSequenceSolver) Optimize(_ context.Context, lp base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements the base.LogicalOptRule.<0th> interface.
type PushDownTopNOptimizer ¶
type PushDownTopNOptimizer struct {
}
PushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase.
func (*PushDownTopNOptimizer) Name ¶
func (*PushDownTopNOptimizer) Name() string
Name implements the base.LogicalOptRule.<1st> interface.
func (*PushDownTopNOptimizer) Optimize ¶
func (*PushDownTopNOptimizer) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements the base.LogicalOptRule.<0th> interface.
type RecommendIndexPlan ¶
type RecommendIndexPlan struct {
physicalop.SimpleSchemaProducer
Action string
SQL string
AdviseID int64
Options []ast.RecommendIndexOption
}
RecommendIndexPlan represents a plan for recommend index stmt.
type RecoverIndex ¶
type RecoverIndex struct {
physicalop.SimpleSchemaProducer
Table *resolve.TableNameW
IndexName string
}
RecoverIndex is used for backfilling corrupted index data.
type ReloadExprPushdownBlacklist ¶
type ReloadExprPushdownBlacklist struct {
physicalop.SimpleSchemaProducer
}
ReloadExprPushdownBlacklist reloads the data from expr_pushdown_blacklist table.
type ReloadOptRuleBlacklist ¶
type ReloadOptRuleBlacklist struct {
physicalop.SimpleSchemaProducer
}
ReloadOptRuleBlacklist reloads the data from opt_rule_blacklist table.
type ResolveExpand ¶
type ResolveExpand struct {
}
ResolveExpand generating Expand projection list when all the logical optimization is done.
func (*ResolveExpand) Name ¶
func (*ResolveExpand) Name() string
Name implements the base.LogicalOptRule.<1st> interface.
func (*ResolveExpand) Optimize ¶
func (*ResolveExpand) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements the base.LogicalOptRule.<0th> interface. By now, rollup syntax will build a LogicalExpand from bottom up. In LogicalExpand itself, its schema out should be 3 parts:
+---------------------------------------------------------------------+
child.output() + grouping sets column + genCol(gid, gpos)
+---------------------------------------------------------------------+
select count(a) from t group by a, b+1, c with rollup
Aggregation: (group by a, col#1, c, gid); aggFunc: count(a)
|
+------> Expand: schema[a, a', col#1, c, gid]; L1-projection[a, a', col#1, c, 0], L2-projection[a, a', col#1, null, 1]
| L3-projection[a, a', null, null, 2], L3-projection[a, null, null, null, 3]
|
+-------> Projection: a, a', b+1 as column#1, c
| +------------------------------+
(upper required) (grouping sets columns appended)
Expand operator itself is kind like a projection, while difference is that it has a multi projection list, named as leveled projection.
type ResultReorder ¶
type ResultReorder struct {
}
ResultReorder reorder query results. NOTE: it's not a common rule for all queries, it's specially implemented for a few customers.
Results of some queries are not ordered, for example:
create table t (a int); insert into t values (1), (2); select a from t;
In the case above, the result can be `1 2` or `2 1`, which is not ordered. This rule reorders results by modifying or injecting a Sort operator:
- iterate the plan from the root, and ignore all input-order operators (Sel/Proj/Limit);
- when meeting the first non-input-order operator, 2.1. if it's a Sort, update it by appending all output columns into its order-by list, 2.2. otherwise, inject a new Sort upon this operator.
func (*ResultReorder) Name ¶
func (*ResultReorder) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*ResultReorder) Optimize ¶
func (rs *ResultReorder) Optimize(_ context.Context, lp base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface.
type ResumeDDLJobs ¶
type ResumeDDLJobs struct {
physicalop.SimpleSchemaProducer
JobIDs []int64
}
ResumeDDLJobs indicates a plan to resume the Paused DDL Jobs.
type RuntimeFilterGenerator ¶
type RuntimeFilterGenerator struct {
// contains filtered or unexported fields
}
RuntimeFilterGenerator One plan one generator
func (*RuntimeFilterGenerator) GenerateRuntimeFilter ¶
func (generator *RuntimeFilterGenerator) GenerateRuntimeFilter(plan base.PhysicalPlan)
GenerateRuntimeFilter is the root method. It traverses the entire tree in preorder. It constructs RF when encountering hash join, and allocate RF when encountering table scan. It realizes the planning of RF in the entire plan tree. For example:
PhysicalPlanTree:
HashJoin
/ \
TableScan ExchangeNode
- generateRuntimeFilter HashJoin (with RF1) / \ TableScan ExchangeNode
- assignRuntimeFilter HashJoin (with RF1) / \ TableScan ExchangeNode
(assign RF1)
type SQLBindOpDetail ¶
type SQLBindOpDetail struct {
NormdOrigSQL string
BindSQL string
BindStmt ast.StmtNode
Db string
Charset string
Collation string
NewStatus string
Source string // Source indicate how this binding was created, eg: bindinfo.Manual or bindinfo.History
SQLDigest string
PlanDigest string
}
SQLBindOpDetail represents the detail of an operation on a single binding. Different SQLBindOpType use different fields in this struct.
type SQLBindOpType ¶
type SQLBindOpType int
SQLBindOpType repreents the SQL bind type
const ( // OpSQLBindCreate represents the operation to create a SQL bind. OpSQLBindCreate SQLBindOpType = iota // OpSQLBindDrop represents the operation to drop a SQL bind. OpSQLBindDrop // OpFlushBindings is used to flush plan bindings. OpFlushBindings // OpCaptureBindings is used to capture plan bindings. OpCaptureBindings // OpReloadBindings is used to reload plan binding. OpReloadBindings // OpSetBindingStatus is used to set binding status. OpSetBindingStatus // OpSQLBindDropByDigest is used to drop SQL binds by digest OpSQLBindDropByDigest // OpSetBindingStatusByDigest represents the operation to set SQL binding status by sql digest. OpSetBindingStatusByDigest )
type SQLBindPlan ¶
type SQLBindPlan struct {
physicalop.SimpleSchemaProducer
IsGlobal bool
SQLBindOp SQLBindOpType
Details []*SQLBindOpDetail
}
SQLBindPlan represents a plan for SQL bind. One SQLBindPlan can be either global or session, and can only contain one type of operation, but can contain multiple operations of that type.
type ScalarSubQueryExpr ¶
type ScalarSubQueryExpr struct {
expression.Constant
// contains filtered or unexported fields
}
ScalarSubQueryExpr is a expression placeholder for the non-correlated scalar subqueries which can be evaluated during optimizing phase. TODO: The methods related with evaluate the function will be revised in next step.
func (*ScalarSubQueryExpr) CanonicalHashCode ¶
func (s *ScalarSubQueryExpr) CanonicalHashCode() []byte
CanonicalHashCode implements the Expression interface.
func (*ScalarSubQueryExpr) Clone ¶
func (s *ScalarSubQueryExpr) Clone() expression.Expression
Clone copies an expression totally.
func (*ScalarSubQueryExpr) ConstLevel ¶
func (*ScalarSubQueryExpr) ConstLevel() expression.ConstLevel
ConstLevel returns the const level for the expression
func (*ScalarSubQueryExpr) Decorrelate ¶
func (s *ScalarSubQueryExpr) Decorrelate(*expression.Schema) expression.Expression
Decorrelate implements the Expression interface.
func (*ScalarSubQueryExpr) Equal ¶
func (s *ScalarSubQueryExpr) Equal(_ expression.EvalContext, e expression.Expression) bool
Equal implements the Expression interface.
func (*ScalarSubQueryExpr) Equals ¶
func (s *ScalarSubQueryExpr) Equals(other any) bool
Equals implements the HashEquals.<1st> interface.
func (*ScalarSubQueryExpr) Eval ¶
func (s *ScalarSubQueryExpr) Eval(_ expression.EvalContext, _ chunk.Row) (types.Datum, error)
Eval implements the Expression interface.
func (*ScalarSubQueryExpr) EvalDecimal ¶
func (*ScalarSubQueryExpr) EvalDecimal(_ expression.EvalContext, _ chunk.Row) (val *types.MyDecimal, isNull bool, err error)
EvalDecimal returns the decimal representation of expression.
func (*ScalarSubQueryExpr) EvalDuration ¶
func (*ScalarSubQueryExpr) EvalDuration(_ expression.EvalContext, _ chunk.Row) (val types.Duration, isNull bool, err error)
EvalDuration returns the duration representation of expression.
func (*ScalarSubQueryExpr) EvalInt ¶
func (*ScalarSubQueryExpr) EvalInt(_ expression.EvalContext, _ chunk.Row) (val int64, isNull bool, err error)
EvalInt returns the int64 representation of expression.
func (*ScalarSubQueryExpr) EvalJSON ¶
func (*ScalarSubQueryExpr) EvalJSON(_ expression.EvalContext, _ chunk.Row) (val types.BinaryJSON, isNull bool, err error)
EvalJSON returns the JSON representation of expression.
func (*ScalarSubQueryExpr) EvalReal ¶
func (*ScalarSubQueryExpr) EvalReal(_ expression.EvalContext, _ chunk.Row) (val float64, isNull bool, err error)
EvalReal returns the float64 representation of expression.
func (*ScalarSubQueryExpr) EvalString ¶
func (*ScalarSubQueryExpr) EvalString(_ expression.EvalContext, _ chunk.Row) (val string, isNull bool, err error)
EvalString returns the string representation of expression.
func (*ScalarSubQueryExpr) EvalTime ¶
func (*ScalarSubQueryExpr) EvalTime(_ expression.EvalContext, _ chunk.Row) (val types.Time, isNull bool, err error)
EvalTime returns the DATE/DATETIME/TIMESTAMP representation of expression.
func (*ScalarSubQueryExpr) ExplainInfo ¶
func (s *ScalarSubQueryExpr) ExplainInfo(expression.EvalContext) string
ExplainInfo implements the Expression interface.
func (*ScalarSubQueryExpr) ExplainNormalizedInfo ¶
func (s *ScalarSubQueryExpr) ExplainNormalizedInfo() string
ExplainNormalizedInfo implements the Expression interface.
func (*ScalarSubQueryExpr) GetType ¶
func (s *ScalarSubQueryExpr) GetType(_ expression.EvalContext) *types.FieldType
GetType implements the Expression interface.
func (*ScalarSubQueryExpr) Hash64 ¶
func (s *ScalarSubQueryExpr) Hash64(h base2.Hasher)
Hash64 implements the HashEquals.<0th> interface.
func (*ScalarSubQueryExpr) HashCode ¶
func (s *ScalarSubQueryExpr) HashCode() []byte
HashCode implements the Expression interface.
func (*ScalarSubQueryExpr) IsCorrelated ¶
func (*ScalarSubQueryExpr) IsCorrelated() bool
IsCorrelated implements the Expression interface.
func (*ScalarSubQueryExpr) MemoryUsage ¶
func (s *ScalarSubQueryExpr) MemoryUsage() int64
MemoryUsage implements the Expression interface.
func (*ScalarSubQueryExpr) RemapColumn ¶
func (s *ScalarSubQueryExpr) RemapColumn(_ map[int64]*expression.Column) (expression.Expression, error)
RemapColumn implements the Expression interface.
func (*ScalarSubQueryExpr) ResolveIndices ¶
func (s *ScalarSubQueryExpr) ResolveIndices(_ *expression.Schema) (expression.Expression, error)
ResolveIndices implements the Expression interface.
func (*ScalarSubQueryExpr) ResolveIndicesByVirtualExpr ¶
func (s *ScalarSubQueryExpr) ResolveIndicesByVirtualExpr(_ expression.EvalContext, _ *expression.Schema) (expression.Expression, bool)
ResolveIndicesByVirtualExpr implements the Expression interface.
func (*ScalarSubQueryExpr) String ¶
func (s *ScalarSubQueryExpr) String() string
String implements the Stringer interface.
func (*ScalarSubQueryExpr) Traverse ¶
func (s *ScalarSubQueryExpr) Traverse(_ expression.TraverseAction) expression.Expression
Traverse implements the TraverseDown interface.
func (*ScalarSubQueryExpr) VecEvalDecimal ¶
func (*ScalarSubQueryExpr) VecEvalDecimal(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
VecEvalDecimal evaluates this expression in a vectorized manner.
func (*ScalarSubQueryExpr) VecEvalDuration ¶
func (*ScalarSubQueryExpr) VecEvalDuration(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
VecEvalDuration evaluates this expression in a vectorized manner.
func (*ScalarSubQueryExpr) VecEvalInt ¶
func (*ScalarSubQueryExpr) VecEvalInt(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
VecEvalInt evaluates this expression in a vectorized manner.
func (*ScalarSubQueryExpr) VecEvalJSON ¶
func (*ScalarSubQueryExpr) VecEvalJSON(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
VecEvalJSON evaluates this expression in a vectorized manner.
func (*ScalarSubQueryExpr) VecEvalReal ¶
func (*ScalarSubQueryExpr) VecEvalReal(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
VecEvalReal evaluates this expression in a vectorized manner.
func (*ScalarSubQueryExpr) VecEvalString ¶
func (*ScalarSubQueryExpr) VecEvalString(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
VecEvalString evaluates this expression in a vectorized manner.
func (*ScalarSubQueryExpr) VecEvalTime ¶
func (*ScalarSubQueryExpr) VecEvalTime(_ expression.EvalContext, _ *chunk.Chunk, _ *chunk.Column) error
VecEvalTime evaluates this expression in a vectorized manner.
func (*ScalarSubQueryExpr) Vectorized ¶
func (*ScalarSubQueryExpr) Vectorized() bool
Vectorized returns whether the expression can be vectorized.
type ScalarSubqueryEvalCtx ¶
ScalarSubqueryEvalCtx store the plan for the subquery, used by ScalarSubQueryExpr.
func (*ScalarSubqueryEvalCtx) ExplainInfo ¶
func (ssctx *ScalarSubqueryEvalCtx) ExplainInfo() string
ExplainInfo implements the Plan interface.
func (ScalarSubqueryEvalCtx) Init ¶
func (p ScalarSubqueryEvalCtx) Init(ctx base.PlanContext, offset int) *ScalarSubqueryEvalCtx
Init initializes ScalarSubqueryEvalCtx
func (*ScalarSubqueryEvalCtx) Schema ¶
func (*ScalarSubqueryEvalCtx) Schema() *expression.Schema
Schema implements the Plan interface.
type SelectInto ¶
type SelectInto struct {
physicalop.SimpleSchemaProducer
TargetPlan base.Plan
IntoOpt *ast.SelectIntoOption
LineFieldsInfo
}
SelectInto represents a select-into plan.
type SemiJoinRewriter ¶
type SemiJoinRewriter struct {
}
SemiJoinRewriter rewrites semi join to inner join with aggregation. Note: This rewriter is only used for exists subquery. And it also requires the hint `SEMI_JOIN_REWRITE` or variable tidb_opt_enable_sem_join_rewrite to be set. For example:
select * from t where exists (select /*+ SEMI_JOIN_REWRITE() */ * from s where s.a = t.a);
will be rewriten to:
select * from t join (select a from s group by a) s on t.a = s.a;
func (*SemiJoinRewriter) Name ¶
func (*SemiJoinRewriter) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*SemiJoinRewriter) Optimize ¶
func (smj *SemiJoinRewriter) Optimize(_ context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface.
type Set ¶
type Set struct {
physicalop.SimpleSchemaProducer
VarAssigns []*expression.VarAssignment
}
Set represents a plan for set stmt.
type SetConfig ¶
type SetConfig struct {
physicalop.SimpleSchemaProducer
Type string
Instance string
Name string
Value expression.Expression
}
SetConfig represents a plan for set config stmt.
type ShowBaseExtractor ¶
ShowBaseExtractor is the definition of base extractor for derived predicates.
func (*ShowBaseExtractor) ExplainInfo ¶
func (e *ShowBaseExtractor) ExplainInfo() string
ExplainInfo implements the base.ShowPredicateExtractor interface.
func (*ShowBaseExtractor) Extract ¶
func (e *ShowBaseExtractor) Extract() bool
Extract implements the ShowPredicateExtractor interface.
func (*ShowBaseExtractor) Field ¶
func (e *ShowBaseExtractor) Field() string
Field will return the variable `field` in ShowBaseExtractor
func (*ShowBaseExtractor) FieldPatternLike ¶
func (e *ShowBaseExtractor) FieldPatternLike() collate.WildcardPattern
FieldPatternLike will return compiled collate.WildcardPattern
type ShowDDL ¶
type ShowDDL struct {
physicalop.SimpleSchemaProducer
}
ShowDDL is for showing DDL information.
type ShowDDLJobQueries ¶
type ShowDDLJobQueries struct {
physicalop.SimpleSchemaProducer
JobIDs []int64
}
ShowDDLJobQueries is for showing DDL job queries sql.
type ShowDDLJobQueriesWithRange ¶
type ShowDDLJobQueriesWithRange struct {
physicalop.SimpleSchemaProducer
Limit uint64
Offset uint64
}
ShowDDLJobQueriesWithRange is for showing DDL job queries sql with specified limit and offset.
type ShowNextRowID ¶
type ShowNextRowID struct {
physicalop.SimpleSchemaProducer
TableName *ast.TableName
}
ShowNextRowID is for showing the next global row ID.
type ShowSlow ¶
type ShowSlow struct {
physicalop.SimpleSchemaProducer
*ast.ShowSlow
}
ShowSlow is for showing slow queries.
type Simple ¶
type Simple struct {
physicalop.SimpleSchemaProducer
Statement ast.StmtNode
// IsFromRemote indicates whether the statement IS FROM REMOTE TiDB instance in cluster,
// and executing in co-processor.
// Used for `global kill`. See https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-01-global-kill.md.
IsFromRemote bool
// StaleTxnStartTS is the StartTS that is used to build a staleness transaction by 'START TRANSACTION READ ONLY' statement.
StaleTxnStartTS uint64
ResolveCtx *resolve.Context
}
Simple represents a simple statement plan which doesn't need any optimization.
func (*Simple) MemoryUsage ¶
MemoryUsage return the memory usage of Simple
type SkewDistinctAggRewriter ¶
type SkewDistinctAggRewriter struct {
}
SkewDistinctAggRewriter rewrites group distinct aggregate into 2 level aggregates.
func (*SkewDistinctAggRewriter) Name ¶
func (*SkewDistinctAggRewriter) Name() string
Name implements base.LogicalOptRule.<1st> interface.
func (*SkewDistinctAggRewriter) Optimize ¶
func (a *SkewDistinctAggRewriter) Optimize(ctx context.Context, p base.LogicalPlan) (base.LogicalPlan, bool, error)
Optimize implements base.LogicalOptRule.<0th> interface.
type SlowQueryExtractor ¶
type SlowQueryExtractor struct {
SkipRequest bool
TimeRanges []*TimeRange
// Enable is true means the executor should use the time range to locate the slow-log file that need to be parsed.
// Enable is false, means the executor should keep the behavior compatible with before, which is only parse the
// current slow-log file.
Enable bool
Desc bool
// contains filtered or unexported fields
}
SlowQueryExtractor is used to extract some predicates of `slow_query`
func (*SlowQueryExtractor) ExplainInfo ¶
func (e *SlowQueryExtractor) ExplainInfo(pp base.PhysicalPlan) string
ExplainInfo implements the base.MemTablePredicateExtractor interface.
func (*SlowQueryExtractor) Extract ¶
func (e *SlowQueryExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface
type SplitRegion ¶
type SplitRegion struct {
physicalop.SimpleSchemaProducer
TableInfo *model.TableInfo
PartitionNames []ast.CIStr
IndexInfo *model.IndexInfo
Lower []types.Datum
Upper []types.Datum
Num int
ValueLists [][]types.Datum
}
SplitRegion represents a split regions plan.
type SplitRegionStatus ¶
type SplitRegionStatus struct {
physicalop.SimpleSchemaProducer
Table table.Table
IndexInfo *model.IndexInfo
}
SplitRegionStatus represents a split regions status plan.
type StatementsSummaryExtractor ¶
type StatementsSummaryExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
// Digests represents digest applied to, and we should apply all digest if there is no digest specified.
// e.g: SELECT * FROM STATEMENTS_SUMMARY WHERE digest='8019af26debae8aa7642c501dbc43212417b3fb14e6aec779f709976b7e521be'
Digests set.StringSet
// Coarse time range predicate extracted from the where clause as:
// SELECT ... WHERE summary_begin_time <= endTime AND summary_end_time >= startTime
//
// N.B. it's only used by v2, so we should keep predicates not changed when extracting time range, or it will
// affect the correctness with v1.
CoarseTimeRange *TimeRange
// contains filtered or unexported fields
}
StatementsSummaryExtractor is used to extract some predicates of statements summary table.
func (*StatementsSummaryExtractor) ExplainInfo ¶
func (e *StatementsSummaryExtractor) ExplainInfo(pp base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*StatementsSummaryExtractor) Extract ¶
func (e *StatementsSummaryExtractor) Extract(sctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
type TableStorageStatsExtractor ¶
type TableStorageStatsExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component.
SkipRequest bool
// TableSchema represents tableSchema applied to, and we should apply all table disk usage if there is no schema specified.
// e.g: SELECT * FROM information_schema.disk_usage WHERE table_schema in ('test', 'information_schema').
TableSchema set.StringSet
// TableName represents tableName applied to, and we should apply all table disk usage if there is no table specified.
// e.g: SELECT * FROM information_schema.disk_usage WHERE table in ('schemata', 'tables').
TableName set.StringSet
// contains filtered or unexported fields
}
TableStorageStatsExtractor is used to extract some predicates of `disk_usage`.
func (*TableStorageStatsExtractor) ExplainInfo ¶
func (e *TableStorageStatsExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*TableStorageStatsExtractor) Extract ¶
func (e *TableStorageStatsExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface.
type TiFlashSystemTableExtractor ¶
type TiFlashSystemTableExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
// TiFlashInstances represents all tiflash instances we should send request to.
// e.g:
// 1. SELECT * FROM information_schema.<table_name> WHERE tiflash_instance='192.168.1.7:3930'
// 2. SELECT * FROM information_schema.<table_name> WHERE tiflash_instance in ('192.168.1.7:3930', '192.168.1.9:3930')
TiFlashInstances set.StringSet
// TidbDatabases represents tidbDatabases applied to, and we should apply all tidb database if there is no database specified.
// e.g: SELECT * FROM information_schema.<table_name> WHERE tidb_database in ('test', 'test2').
TiDBDatabases string
// TidbTables represents tidbTables applied to, and we should apply all tidb table if there is no table specified.
// e.g: SELECT * FROM information_schema.<table_name> WHERE tidb_table in ('t', 't2').
TiDBTables string
// contains filtered or unexported fields
}
TiFlashSystemTableExtractor is used to extract some predicates of tiflash system table.
func (*TiFlashSystemTableExtractor) ExplainInfo ¶
func (e *TiFlashSystemTableExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*TiFlashSystemTableExtractor) Extract ¶
func (e *TiFlashSystemTableExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface
type TiKVRegionStatusExtractor ¶
type TiKVRegionStatusExtractor struct {
// contains filtered or unexported fields
}
TiKVRegionStatusExtractor is used to extract single table region scan region from predictions
func (*TiKVRegionStatusExtractor) ExplainInfo ¶
func (e *TiKVRegionStatusExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*TiKVRegionStatusExtractor) Extract ¶
func (e *TiKVRegionStatusExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) (remained []expression.Expression)
Extract implements the MemTablePredicateExtractor Extract interface
func (*TiKVRegionStatusExtractor) GetTablesID ¶
func (e *TiKVRegionStatusExtractor) GetTablesID() []int64
GetTablesID returns TablesID
type TikvRegionPeersExtractor ¶
type TikvRegionPeersExtractor struct {
// SkipRequest means the where clause always false, we don't need to request any component
SkipRequest bool
// RegionIDs/StoreIDs represents all region/store ids we should filter in PD to reduce network IO.
// e.g:
// 1. SELECT * FROM tikv_region_peers WHERE region_id=1
// 2. SELECT * FROM tikv_region_peers WHERE table_id in (11, 22)
RegionIDs []uint64
StoreIDs []uint64
// contains filtered or unexported fields
}
TikvRegionPeersExtractor is used to extract some predicates of cluster table.
func (*TikvRegionPeersExtractor) ExplainInfo ¶
func (e *TikvRegionPeersExtractor) ExplainInfo(_ base.PhysicalPlan) string
ExplainInfo implements base.MemTablePredicateExtractor interface.
func (*TikvRegionPeersExtractor) Extract ¶
func (e *TikvRegionPeersExtractor) Extract(ctx base.PlanContext, schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression, ) []expression.Expression
Extract implements the MemTablePredicateExtractor Extract interface
type Trace ¶
type Trace struct {
physicalop.SimpleSchemaProducer
StmtNode ast.StmtNode
ResolveCtx *resolve.Context
Format string
// OptimizerTrace indicates `trace plan target = 'xxx' <statement>` case
OptimizerTrace bool
OptimizerTraceTarget string
}
Trace represents a trace plan.
type Traffic ¶
type Traffic struct {
physicalop.SimpleSchemaProducer
OpType ast.TrafficOpType
Options []*ast.TrafficOption
Dir string
}
Traffic represents a traffic plan.
type UnlockStats ¶
type UnlockStats struct {
physicalop.SimpleSchemaProducer
Tables []*ast.TableName
}
UnlockStats represents a unlock stats for table
type V2AnalyzeOptions ¶
type V2AnalyzeOptions struct {
PhyTableID int64
RawOpts map[ast.AnalyzeOptionType]uint64
FilledOpts map[ast.AnalyzeOptionType]uint64
ColChoice ast.ColumnChoice
ColumnList []*model.ColumnInfo
IsPartition bool
}
V2AnalyzeOptions is used to hold analyze options information.
type WindowFuncExtractor ¶
type WindowFuncExtractor struct {
// contains filtered or unexported fields
}
WindowFuncExtractor visits Expr tree. It converts ColumnNameExpr to WindowFuncExpr and collects WindowFuncExpr.
type WorkloadRepoCreate ¶
type WorkloadRepoCreate struct {
physicalop.SimpleSchemaProducer
}
WorkloadRepoCreate is the plan of admin create workload snapshot.
Source Files
¶
- access_object.go
- columnar_index_utils.go
- common_plans.go
- core_init.go
- encode.go
- exhaust_physical_plans.go
- expression_codec_fn.go
- expression_rewriter.go
- find_best_task.go
- flat_plan.go
- hint_utils.go
- index_join_path.go
- indexmerge_path.go
- indexmerge_unfinished_path.go
- initialize.go
- logical_initialize.go
- logical_plan_builder.go
- memtable_infoschema_extractor.go
- memtable_predicate_extractor.go
- optimizer.go
- pb_to_plan.go
- plan.go
- plan_cache.go
- plan_cache_instance.go
- plan_cache_lru.go
- plan_cache_param.go
- plan_cache_rebuild.go
- plan_cache_utils.go
- plan_cacheable_checker.go
- plan_clone_utils.go
- plan_cost_ver1.go
- plan_cost_ver2.go
- planbuilder.go
- point_get_plan.go
- preprocess.go
- property_cols_prune.go
- recheck_cte.go
- resolve_indices.go
- rule_aggregation_elimination.go
- rule_aggregation_push_down.go
- rule_aggregation_skew_rewrite.go
- rule_decorrelate.go
- rule_derive_topn_from_window.go
- rule_eliminate_empty_selection.go
- rule_eliminate_projection.go
- rule_eliminate_unionall_dual_item.go
- rule_generate_column_substitute.go
- rule_inject_extra_projection.go
- rule_join_elimination.go
- rule_join_reorder.go
- rule_join_reorder_dp.go
- rule_join_reorder_greedy.go
- rule_outer_to_inner_join.go
- rule_predicate_push_down.go
- rule_push_down_sequence.go
- rule_resolve_grouping_expand.go
- rule_result_reorder.go
- rule_semi_join_rewrite.go
- rule_topn_push_down.go
- runtime_filter_generator.go
- scalar_subq_expression.go
- show_predicate_extractor.go
- stats.go
- stringer.go
- task.go
- telemetry.go
- trace.go
- util.go
Directories
¶
| Path | Synopsis |
|---|---|
|
generator
|
|
|
hash64_equals
command
|
|
|
plan_cache
command
|
|
|
shallow_ref
command
|
|
|
operator
|
|
|
Package resolve is used for semantic resolve of the AST tree.
|
Package resolve is used for semantic resolve of the AST tree. |