Documentation
¶
Index ¶
- Constants
- Variables
- func ConvertCheckDefToConstraint(ctx *sql.Context, check *sql.CheckDefinition) (*sql.CheckConstraint, error)
- func DeepCopyNode(node sql.Node) (sql.Node, error)
- func DefaultRuleSelector(id RuleId) bool
- func FixFieldIndexes(scope *Scope, a *Analyzer, schema sql.Schema, exp sql.Expression) (sql.Expression, transform.TreeIdentity, error)
- func FixFieldIndexesForExpressions(a *Analyzer, node sql.Node, scope *Scope) (sql.Node, transform.TreeIdentity, error)
- func FixFieldIndexesForNode(a *Analyzer, scope *Scope, n sql.Node) (sql.Node, transform.TreeIdentity, error)
- func FixFieldIndexesForTableNode(ctx *sql.Context, a *Analyzer, node sql.Node, scope *Scope) (sql.Node, transform.TreeIdentity, error)
- func FixFieldIndexesOnExpressions(scope *Scope, a *Analyzer, schema sql.Schema, expressions ...sql.Expression) ([]sql.Expression, transform.TreeIdentity, error)
- func GetTransactionDatabase(ctx *sql.Context, parsed sql.Node) string
- func NewDatabaseProvider(dbs ...sql.Database) sql.DatabaseProvider
- func SelectAllBatches(string) bool
- func SetPreparedStmts(v bool)
- func StripPassthroughNodes(n sql.Node) sql.Node
- type Analyzer
- func (a *Analyzer) Analyze(ctx *sql.Context, n sql.Node, scope *Scope) (sql.Node, error)
- func (a *Analyzer) AnalyzePrepared(ctx *sql.Context, n sql.Node, scope *Scope) (sql.Node, transform.TreeIdentity, error)
- func (a *Analyzer) Log(msg string, args ...interface{})
- func (a *Analyzer) LogDiff(prev, next sql.Node)
- func (a *Analyzer) LogNode(n sql.Node)
- func (a *Analyzer) PopDebugContext()
- func (a *Analyzer) PrepareQuery(ctx *sql.Context, n sql.Node, scope *Scope) (sql.Node, error)
- func (a *Analyzer) PushDebugContext(msg string)
- type Batch
- type BatchSelector
- type Builder
- func (ab *Builder) AddPostAnalyzeRule(id RuleId, fn RuleFunc) *Builder
- func (ab *Builder) AddPostValidationRule(id RuleId, fn RuleFunc) *Builder
- func (ab *Builder) AddPreAnalyzeRule(id RuleId, fn RuleFunc) *Builder
- func (ab *Builder) AddPreValidationRule(id RuleId, fn RuleFunc) *Builder
- func (ab *Builder) Build() *Analyzer
- func (ab *Builder) RemoveAfterAllRule(id RuleId) *Builder
- func (ab *Builder) RemoveDefaultRule(id RuleId) *Builder
- func (ab *Builder) RemoveOnceAfterRule(id RuleId) *Builder
- func (ab *Builder) RemoveOnceBeforeRule(id RuleId) *Builder
- func (ab *Builder) RemoveValidationRule(id RuleId) *Builder
- func (ab *Builder) WithDebug() *Builder
- func (ab *Builder) WithParallelism(parallelism int) *Builder
- type Carder
- type Catalog
- func (c *Catalog) AllDatabases(ctx *sql.Context) []sql.Database
- func (c *Catalog) CreateDatabase(ctx *sql.Context, dbName string, collation sql.CollationID) error
- func (c *Catalog) Database(ctx *sql.Context, db string) (sql.Database, error)
- func (c *Catalog) ExternalStoredProcedure(ctx *sql.Context, name string, numOfParams int) (*sql.ExternalStoredProcedureDetails, error)
- func (c *Catalog) ExternalStoredProcedures(ctx *sql.Context, name string) ([]sql.ExternalStoredProcedureDetails, error)
- func (c *Catalog) Function(ctx *sql.Context, name string) (sql.Function, error)
- func (c *Catalog) HasDB(ctx *sql.Context, db string) bool
- func (c *Catalog) LockTable(ctx *sql.Context, table string)
- func (c *Catalog) RegisterFunction(ctx *sql.Context, fns ...sql.Function)
- func (c *Catalog) RemoveDatabase(ctx *sql.Context, dbName string) error
- func (c *Catalog) Statistics(ctx *sql.Context) (sql.StatsReadWriter, error)
- func (c *Catalog) Table(ctx *sql.Context, dbName, tableName string) (sql.Table, sql.Database, error)
- func (c *Catalog) TableAsOf(ctx *sql.Context, dbName, tableName string, asOf interface{}) (sql.Table, sql.Database, error)
- func (c *Catalog) TableFunction(ctx *sql.Context, name string) (sql.TableFunction, error)
- func (c *Catalog) UnlockTables(ctx *sql.Context, id uint32) error
- type Coster
- type ExecBuilder
- type GroupId
- type Hint
- type HintType
- type Memo
- type ProcedureCache
- type RoutineTable
- type Rule
- type RuleFunc
- type RuleId
- type RuleSelector
- type Scope
- func (s *Scope) EnforcesReadOnly() bool
- func (s *Scope) InnerToOuter() []sql.Node
- func (s *Scope) IsEmpty() bool
- func (s *Scope) MemoNodes() []sql.Node
- func (s *Scope) OuterRelUnresolved() bool
- func (s *Scope) OuterToInner() []sql.Node
- func (s *Scope) RecursionDepth() int
- func (s *Scope) Schema() sql.Schema
- type TableAliases
- type TableId
Constants ¶
const MaxBytePrefix = 3072
Variables ¶
var ( // ParallelQueryCounter describes a metric that accumulates // number of parallel queries monotonically. ParallelQueryCounter = discard.NewCounter() SingleThreadFeatureFlag = false )
var ( // ErrFieldMissing is returned when the field is not on the schema. ErrFieldMissing = errors.NewKind("field %q is not on schema") // ErrOrderByColumnIndex is returned when in an order clause there is a // column that is unknown. ErrOrderByColumnIndex = errors.NewKind("unknown column %d in order by clause") )
var DefaultRules = []Rule{
{resolveNaturalJoinsId, resolveNaturalJoins},
{qualifyColumnsId, qualifyColumns},
{resolveOrderbyLiteralsId, resolveOrderByLiterals},
{resolveFunctionsId, resolveFunctions},
{validateStarExpressionsId, validateStarExpressions},
{replaceCountStarId, replaceCountStar},
{flattenTableAliasesId, flattenTableAliases},
{pushdownSortId, pushdownSort},
{pushdownGroupbyAliasesId, pushdownGroupByAliases},
{pushdownSubqueryAliasFiltersId, pushdownSubqueryAliasFilters},
{pruneTablesId, pruneTables},
{resolveColumnsId, resolveColumns},
{validateCheckConstraintId, validateCheckConstraints},
{expandStarsId, expandStars},
{transposeRightJoinsId, transposeRightJoins},
{resolveHavingId, resolveHaving},
{mergeUnionSchemasId, mergeUnionSchemas},
{flattenAggregationExprsId, flattenAggregationExpressions},
{reorderProjectionId, reorderProjection},
{resolveSubqueriesId, resolveSubqueries},
{resolveBarewordSetVariablesId, resolveBarewordSetVariables},
{replaceCrossJoinsId, replaceCrossJoins},
{moveJoinCondsToFilterId, moveJoinConditionsToFilter},
{evalFilterId, simplifyFilters},
{optimizeDistinctId, optimizeDistinct},
}
DefaultRules to apply when analyzing nodes.
var DefaultValidationRules = []Rule{
{validateResolvedId, validateIsResolved},
{validateOrderById, validateOrderBy},
{validateGroupById, validateGroupBy},
{validateSchemaSourceId, validateSchemaSource},
{validateIndexCreationId, validateIndexCreation},
{validateOperandsId, validateOperands},
{validateIntervalUsageId, validateIntervalUsage},
{validateSubqueryColumnsId, validateSubqueryColumns},
{validateUnionSchemasMatchId, validateUnionSchemasMatch},
{validateAggregationsId, validateAggregations},
}
DefaultValidationRules to apply while analyzing nodes.
var ErrInAnalysis = errors.NewKind("error in analysis: %s")
ErrInAnalysis is thrown for generic analyzer errors
var ErrInvalidNodeType = errors.NewKind("%s: invalid node of type: %T")
ErrInvalidNodeType is thrown when the analyzer can't handle a particular kind of node type
var ErrMaxAnalysisIters = errors.NewKind("exceeded max analysis iterations (%d)")
ErrMaxAnalysisIters is thrown when the analysis iterations are exceeded
var ( // ErrUnionSchemasDifferentLength is returned when the two sides of a // UNION do not have the same number of columns in their schemas. ErrUnionSchemasDifferentLength = errors.NewKind( "cannot union two queries whose schemas are different lengths; left has %d column(s) right has %d column(s).", ) )
var ExprDefs support.GenDefs = []support.MemoDef{ { Name: "crossJoin", IsJoin: true, }, { Name: "innerJoin", IsJoin: true, }, { Name: "leftJoin", IsJoin: true, }, { Name: "semiJoin", IsJoin: true, }, { Name: "antiJoin", IsJoin: true, }, { Name: "lookupJoin", IsJoin: true, Attrs: [][2]string{ {"lookup", "*lookup"}, }, }, { Name: "concatJoin", IsJoin: true, Attrs: [][2]string{ {"concat", "[]*lookup"}, }, }, { Name: "hashJoin", IsJoin: true, Attrs: [][2]string{ {"innerAttrs", "[]sql.Expression"}, {"outerAttrs", "[]sql.Expression"}, }, }, { Name: "mergeJoin", IsJoin: true, Attrs: [][2]string{ {"innerScan", "*indexScan"}, {"outerScan", "*indexScan"}, }, }, { Name: "fullOuterJoin", IsJoin: true, }, { Name: "tableScan", SourceType: "*plan.ResolvedTable", }, { Name: "values", SourceType: "*plan.ValueDerivedTable", }, { Name: "tableAlias", SourceType: "*plan.TableAlias", }, { Name: "recursiveTable", SourceType: "*plan.RecursiveTable", }, { Name: "recursiveCte", SourceType: "*plan.RecursiveCte", }, { Name: "subqueryAlias", SourceType: "*plan.SubqueryAlias", }, { Name: "max1Row", SourceType: "sql.NameableNode", }, { Name: "tableFunc", SourceType: "sql.TableFunction", }, { Name: "emptyTable", SourceType: "*plan.EmptyTable", }, { Name: "project", IsUnary: true, Attrs: [][2]string{ {"projections", "[]sql.Expression"}, }, }, { Name: "distinct", IsUnary: true, SkipExec: true, }, }
var OnceAfterAll = []Rule{ {cacheSubqueryResultsId, cacheSubqueryResults}, {cacheSubqueryAliasesInJoinsId, cacheSubqueryAliasesInJoins}, {AutocommitId, addAutocommitNode}, {TrackProcessId, trackProcess}, {parallelizeId, parallelize}, {clearWarningsId, clearWarnings}, }
OnceAfterAll contains the rules to be applied just once after all other rules have been applied.
var OnceAfterDefault = []Rule{
{hoistOutOfScopeFiltersId, hoistOutOfScopeFilters},
{transformJoinApplyId, transformJoinApply},
{hoistSelectExistsId, hoistSelectExists},
{finalizeUnionsId, finalizeUnions},
{loadTriggersId, loadTriggers},
{loadEventsId, loadEvents},
{processTruncateId, processTruncate},
{removeUnnecessaryConvertsId, removeUnnecessaryConverts},
{stripTableNameInDefaultsId, stripTableNamesFromColumnDefaults},
{foldEmptyJoinsId, foldEmptyJoins},
{optimizeJoinsId, constructJoinPlan},
{pushdownFiltersId, pushdownFilters},
{pruneColumnsId, pruneColumns},
{finalizeSubqueriesId, finalizeSubqueries},
{subqueryIndexesId, applyIndexesFromOuterScope},
{replaceSortPkId, replacePkSort},
{setJoinScopeLenId, setJoinScopeLen},
{eraseProjectionId, eraseProjection},
{insertTopNId, insertTopNNodes},
{applyHashInId, applyHashIn},
{resolveInsertRowsId, resolveInsertRows},
{resolvePreparedInsertId, resolvePreparedInsert},
{applyTriggersId, applyTriggers},
{applyProceduresId, applyProcedures},
{assignRoutinesId, assignRoutines},
{modifyUpdateExprsForJoinId, modifyUpdateExpressionsForJoin},
{applyRowUpdateAccumulatorsId, applyUpdateAccumulators},
{wrapWithRollbackId, wrapWritesWithRollback},
{applyFKsId, applyForeignKeys},
}
OnceAfterDefault contains the rules to be applied just once after the DefaultRules.
var OnceBeforeDefault = []Rule{
{applyDefaultSelectLimitId, applyDefaultSelectLimit},
{applyBinlogReplicaControllerId, applyBinlogReplicaController},
{validateOffsetAndLimitId, validateLimitAndOffset},
{validateCreateTableId, validateCreateTable},
{validateExprSemId, validateExprSem},
{resolveVariablesId, resolveVariables},
{resolveNamedWindowsId, replaceNamedWindows},
{resolveSetVariablesId, resolveSetVariables},
{resolveViewsId, resolveViews},
{liftCtesId, hoistCommonTableExpressions},
{resolveCtesId, resolveCommonTableExpressions},
{liftRecursiveCtesId, hoistRecursiveCte},
{validateCreateProcedureId, validateCreateProcedure},
{resolveDatabasesId, resolveDatabases},
{resolveTablesId, resolveTables},
{reresolveTablesId, reresolveTables},
{setInsertColumnsId, setInsertColumns},
{setTargetSchemasId, setTargetSchemas},
{loadCheckConstraintsId, loadChecks},
{resolveAlterColumnId, resolveAlterColumn},
{validateDropTablesId, validateDropTables},
{resolveCreateLikeId, resolveCreateLike},
{resolveAnalyzeTablesId, resolveAnalyzeTables},
{assignCatalogId, assignCatalog},
{parseColumnDefaultsId, parseColumnDefaults},
{resolveDropConstraintId, resolveDropConstraint},
{validateDropConstraintId, validateDropConstraint},
{resolveCreateSelectId, resolveCreateSelect},
{resolveSubqueriesId, resolveSubqueries},
{setViewTargetSchemaId, setViewTargetSchema},
{resolveUnionsId, resolveUnions},
{resolveDescribeQueryId, resolveDescribeQuery},
{disambiguateTableFunctionsId, disambiguateTableFunctions},
{checkUniqueTableNamesId, validateUniqueTableNames},
{resolveTableFunctionsId, resolveTableFunctions},
{resolveDeclarationsId, resolveDeclarations},
{validateCreateTriggerId, validateCreateTrigger},
{loadInfoSchemaId, loadInfoSchema},
{resolveColumnDefaultsId, resolveColumnDefaults},
{validateColumnDefaultsId, validateColumnDefaults},
{validateReadOnlyDatabaseId, validateReadOnlyDatabase},
{validateReadOnlyTransactionId, validateReadOnlyTransaction},
{validateDatabaseSetId, validateDatabaseSet},
{validateDeleteFromId, validateDeleteFrom},
{validatePrivilegesId, validatePrivileges},
}
OnceBeforeDefault contains the rules to be applied just once before the DefaultRules.
var PreparedStmtDisabled bool
Functions ¶
func ConvertCheckDefToConstraint ¶ added in v0.12.0
func ConvertCheckDefToConstraint(ctx *sql.Context, check *sql.CheckDefinition) (*sql.CheckConstraint, error)
func DefaultRuleSelector ¶ added in v0.12.0
func FixFieldIndexes ¶
func FixFieldIndexes(scope *Scope, a *Analyzer, schema sql.Schema, exp sql.Expression) (sql.Expression, transform.TreeIdentity, error)
FixFieldIndexes transforms the given expression by correcting the indexes of columns in GetField expressions, according to the schema given. Used when combining multiple tables together into a single join result, or when otherwise changing / combining schemas in the node tree.
func FixFieldIndexesForExpressions ¶
func FixFieldIndexesForExpressions(a *Analyzer, node sql.Node, scope *Scope) (sql.Node, transform.TreeIdentity, error)
FixFieldIndexesForExpressions transforms the expressions in the Node given, fixing the field indexes.
func FixFieldIndexesForNode ¶ added in v0.15.0
func FixFieldIndexesForTableNode ¶
func FixFieldIndexesForTableNode(ctx *sql.Context, a *Analyzer, node sql.Node, scope *Scope) (sql.Node, transform.TreeIdentity, error)
FixFieldIndexesForTableNode transforms the expressions in the Node given, fixing the field indexes. This is useful for Table nodes that have expressions but no children.
func FixFieldIndexesOnExpressions ¶
func FixFieldIndexesOnExpressions(scope *Scope, a *Analyzer, schema sql.Schema, expressions ...sql.Expression) ([]sql.Expression, transform.TreeIdentity, error)
FixFieldIndexesOnExpressions executes FixFieldIndexes on a list of exprs.
func GetTransactionDatabase ¶ added in v0.12.0
GetTransactionDatabase returns the name of the database that should be considered current for the transaction about to begin. The database is not guaranteed to exist. For USE DATABASE statements, we consider the transaction database to be the one being USEd
func NewDatabaseProvider ¶ added in v0.11.0
func NewDatabaseProvider(dbs ...sql.Database) sql.DatabaseProvider
TODO: kill this
func SelectAllBatches ¶ added in v0.12.0
func SetPreparedStmts ¶ added in v0.12.0
func SetPreparedStmts(v bool)
func StripPassthroughNodes ¶ added in v0.12.0
StripPassthroughNodes strips all top-level passthrough nodes meant to apply only to top-level queries (query tracking, transaction logic, etc) from the node tree given and return the first non-passthrough child element. This is useful for when we invoke the analyzer recursively when e.g. analyzing subqueries or triggers TODO: instead of stripping this node off after analysis, it would be better to just not add it in the first place.
Types ¶
type Analyzer ¶
type Analyzer struct { // Whether to log various debugging messages Debug bool // Whether to output the query plan at each step of the analyzer Verbose bool Parallelism int // Batches of Rules to apply. Batches []*Batch // Catalog of databases and registered functions. Catalog *Catalog // BinlogReplicaController holds an optional controller that receives forwarded binlog // replication messages (e.g. "start replica"). BinlogReplicaController binlogreplication.BinlogReplicaController // Carder estimates the number of rows returned by a relational expression. Carder Carder // Coster estimates the incremental CPU+memory cost for execution operators. Coster Coster // ExecBuilder converts a sql.Node tree into an executable iterator. ExecBuilder sql.NodeExecBuilder // contains filtered or unexported fields }
Analyzer analyzes nodes of the execution plan and applies rules and validations to them.
func NewDefault ¶
func NewDefault(provider sql.DatabaseProvider) *Analyzer
NewDefault creates a default Analyzer instance with all default Rules and configuration. To add custom rules, the easiest way is use the Builder.
func (*Analyzer) Analyze ¶
Analyze applies the transformation rules to the node given. In the case of an error, the last successfully transformed node is returned along with the error.
func (*Analyzer) AnalyzePrepared ¶ added in v0.12.0
func (a *Analyzer) AnalyzePrepared(ctx *sql.Context, n sql.Node, scope *Scope) (sql.Node, transform.TreeIdentity, error)
AnalyzePrepared runs a partial rule set against a previously analyzed plan.
func (*Analyzer) Log ¶
Log prints an INFO message to stdout with the given message and args if the analyzer is in debug mode.
func (*Analyzer) LogDiff ¶
LogDiff logs the diff between the query plans after a transformation rules has been applied. Only can print a diff when the string representations of the nodes differ, which isn't always the case.
func (*Analyzer) PopDebugContext ¶
func (a *Analyzer) PopDebugContext()
PopDebugContext pops a context message off the context stack.
func (*Analyzer) PrepareQuery ¶ added in v0.12.0
PrepareQuery applies a partial set of transformations to a prepared plan.
func (*Analyzer) PushDebugContext ¶
PushDebugContext pushes the given context string onto the context stack, to use when logging debug messages.
type Batch ¶
Batch executes a set of rules a specific number of times. When this number of times is reached, the actual node and ErrMaxAnalysisIters is returned.
func (*Batch) Eval ¶
func (b *Batch) Eval(ctx *sql.Context, a *Analyzer, n sql.Node, scope *Scope, sel RuleSelector) (sql.Node, transform.TreeIdentity, error)
Eval executes the rules of the batch. On any error, the partially transformed node is returned along with the error. If the batch's max number of iterations is reached without achieving stabilization (batch evaluation no longer changes the node), then this method returns ErrMaxAnalysisIters.
func (*Batch) EvalWithSelector ¶ added in v0.12.0
type BatchSelector ¶ added in v0.12.0
BatchSelector filters analysis batches by name
type Builder ¶
type Builder struct {
// contains filtered or unexported fields
}
Builder provides an easy way to generate Analyzer with custom rules and options.
func NewBuilder ¶
func NewBuilder(pro sql.DatabaseProvider) *Builder
NewBuilder creates a new Builder from a specific catalog. This builder allow us add custom Rules and modify some internal properties.
func (*Builder) AddPostAnalyzeRule ¶
AddPostAnalyzeRule adds a new rule to the analyzer after standard analyzer rules.
func (*Builder) AddPostValidationRule ¶
AddPostValidationRule adds a new rule to the analyzer after standard validation rules.
func (*Builder) AddPreAnalyzeRule ¶
AddPreAnalyzeRule adds a new rule to the analyze before the standard analyzer rules.
func (*Builder) AddPreValidationRule ¶
AddPreValidationRule adds a new rule to the analyzer before standard validation rules.
func (*Builder) RemoveAfterAllRule ¶
RemoveAfterAllRule removes a default rule from the analyzer which would occur after all other rules
func (*Builder) RemoveDefaultRule ¶
RemoveDefaultRule removes a default rule from the analyzer that is executed as part of the analysis
func (*Builder) RemoveOnceAfterRule ¶
RemoveOnceAfterRule removes a default rule from the analyzer which would occur just once after the default analysis
func (*Builder) RemoveOnceBeforeRule ¶
RemoveOnceBeforeRule removes a default rule from the analyzer which would occur before other rules
func (*Builder) RemoveValidationRule ¶
RemoveValidationRule removes a default rule from the analyzer which would occur as part of the validation rules
func (*Builder) WithParallelism ¶
WithParallelism sets the parallelism level on the analyzer.
type Carder ¶ added in v0.15.0
type Carder interface { // EstimateCard returns the estimate row count outputs for a relational // expression. Cardinality is an expression group property. EstimateCard(*sql.Context, relExpr, sql.StatsReader) (float64, error) }
Carder types can estimate the cardinality (row count) of relational expressions.
func NewDefaultCarder ¶ added in v0.15.0
func NewDefaultCarder() Carder
type Catalog ¶ added in v0.11.0
type Catalog struct { MySQLDb *mysql_db.MySQLDb InfoSchema sql.Database Provider sql.DatabaseProvider // contains filtered or unexported fields }
func NewCatalog ¶ added in v0.11.0
func NewCatalog(provider sql.DatabaseProvider) *Catalog
NewCatalog returns a new empty Catalog with the given provider
func (*Catalog) AllDatabases ¶ added in v0.11.0
func (*Catalog) CreateDatabase ¶ added in v0.11.0
CreateDatabase creates a new Database and adds it to the catalog.
func (*Catalog) ExternalStoredProcedure ¶ added in v0.14.0
func (c *Catalog) ExternalStoredProcedure(ctx *sql.Context, name string, numOfParams int) (*sql.ExternalStoredProcedureDetails, error)
ExternalStoredProcedure implements sql.ExternalStoredProcedureProvider
func (*Catalog) ExternalStoredProcedures ¶ added in v0.14.0
func (c *Catalog) ExternalStoredProcedures(ctx *sql.Context, name string) ([]sql.ExternalStoredProcedureDetails, error)
ExternalStoredProcedures implements sql.ExternalStoredProcedureProvider
func (*Catalog) Function ¶ added in v0.11.0
Function returns the function with the name given, or sql.ErrFunctionNotFound if it doesn't exist
func (*Catalog) LockTable ¶ added in v0.11.0
LockTable adds a lock for the given table and session client. It is assumed the database is the current database in use.
func (*Catalog) RegisterFunction ¶ added in v0.11.0
RegisterFunction registers the functions given, adding them to the built-in functions. Integrators with custom functions should typically use the FunctionProvider interface instead.
func (*Catalog) RemoveDatabase ¶ added in v0.11.0
RemoveDatabase removes a database from the catalog.
func (*Catalog) Statistics ¶ added in v0.15.0
func (*Catalog) Table ¶ added in v0.11.0
func (c *Catalog) Table(ctx *sql.Context, dbName, tableName string) (sql.Table, sql.Database, error)
Table returns the table in the given database with the given name.
func (*Catalog) TableAsOf ¶ added in v0.11.0
func (c *Catalog) TableAsOf(ctx *sql.Context, dbName, tableName string, asOf interface{}) (sql.Table, sql.Database, error)
TableAsOf returns the table in the given database with the given name, as it existed at the time given. The database named must support timed queries.
func (*Catalog) TableFunction ¶ added in v0.12.0
TableFunction implements the TableFunctionProvider interface
type Coster ¶ added in v0.15.0
type Coster interface { // EstimateCost cost returns the incremental CPU and memory cost for an // operator, or an error. Cost is dependent on physical operator type, // and the cardinality of inputs. EstimateCost(*sql.Context, relExpr, sql.StatsReader) (float64, error) }
Coster types can estimate the CPU and memory cost of physical execution operators.
func NewDefaultCoster ¶ added in v0.15.0
func NewDefaultCoster() Coster
func NewHashBiasedCoster ¶ added in v0.15.0
func NewHashBiasedCoster() Coster
func NewInnerBiasedCoster ¶ added in v0.15.0
func NewInnerBiasedCoster() Coster
func NewLookupBiasedCoster ¶ added in v0.15.0
func NewLookupBiasedCoster() Coster
func NewMergeBiasedCoster ¶ added in v0.15.0
func NewMergeBiasedCoster() Coster
type ExecBuilder ¶ added in v0.14.0
type ExecBuilder struct{}
func NewExecBuilder ¶ added in v0.14.0
func NewExecBuilder() *ExecBuilder
type HintType ¶ added in v0.15.0
type HintType uint8
const ( HintTypeUnknown HintType = iota // HintTypeJoinOrder // JOIN_ORDER HintTypeJoinFixedOrder // JOIN_FIXED_ORDER HintTypeMergeJoin // MERGE_JOIN HintTypeLookupJoin // LOOKUP_JOIN HintTypeHashJoin // HASH_JOIN HintTypeSemiJoin // SEMI_JOIN HintTypeAntiJoin // ANTI_JOIN HintTypeInnerJoin // INNER_JOIN HintTypeRightSemiLookupJoin // RIGHT_SEMI_LOOKUP_JOIN HintTypeNoIndexConditionPushDown // NO_ICP )
TODO implement NO_ICP and JOIN_FIXED_ORDER
type Memo ¶ added in v0.14.0
type Memo struct {
// contains filtered or unexported fields
}
Memo collects a forest of query plans structured by logical and physical equivalency. Logically equivalent plans, represented by an exprGroup, produce the same rows (possibly unordered) and schema. Physical plans are stored in a linked list within an expression group.
func (*Memo) WithJoinOp ¶ added in v0.15.0
func (*Memo) WithJoinOrder ¶ added in v0.14.0
type ProcedureCache ¶ added in v0.9.0
type ProcedureCache struct { IsPopulating bool // contains filtered or unexported fields }
ProcedureCache contains all non-built-in stored procedures for each database.
func NewProcedureCache ¶ added in v0.9.0
func NewProcedureCache() *ProcedureCache
NewProcedureCache returns a *ProcedureCache.
func (*ProcedureCache) AllForDatabase ¶ added in v0.9.0
func (pc *ProcedureCache) AllForDatabase(dbName string) []*plan.Procedure
AllForDatabase returns all stored procedures for the given database, sorted by name and parameter count ascending. The database name is case-insensitive.
func (*ProcedureCache) Get ¶ added in v0.9.0
func (pc *ProcedureCache) Get(dbName, procedureName string, numOfParams int) *plan.Procedure
Get returns the stored procedure with the given name from the given database. All names are case-insensitive. If the procedure does not exist, then this returns nil. If the number of parameters do not match any given procedure, then returns the procedure with the largest number of parameters.
func (*ProcedureCache) Register ¶ added in v0.9.0
func (pc *ProcedureCache) Register(dbName string, procedure *plan.Procedure) error
Register adds the given stored procedure to the cache. Will overwrite any procedures that already exist with the same name and same number of parameters for the given database name.
type RoutineTable ¶ added in v0.12.0
type RoutineTable interface { sql.Table // AssignProcedures assigns a map of db-procedures to the routines table. AssignProcedures(p map[string][]*plan.Procedure) sql.Table }
RoutineTable is a Table that depends on a procedures and functions.
type RuleFunc ¶
type RuleFunc func(*sql.Context, *Analyzer, sql.Node, *Scope, RuleSelector) (sql.Node, transform.TreeIdentity, error)
RuleFunc is the function to be applied in a rule.
type RuleId ¶ added in v0.12.0
type RuleId int
const ( AutocommitId RuleId // addAutocommitNode TrackProcessId // trackProcess )
type RuleSelector ¶ added in v0.12.0
RuleSelector filters analysis rules by id
func NewFinalizeSubquerySel ¶ added in v0.15.0
func NewFinalizeSubquerySel(sel RuleSelector) RuleSelector
func NewFinalizeUnionSel ¶ added in v0.15.0
func NewFinalizeUnionSel(sel RuleSelector) RuleSelector
func NewProcRuleSelector ¶ added in v0.15.0
func NewProcRuleSelector(sel RuleSelector) RuleSelector
func NewResolveSubqueryExprSelector ¶ added in v0.14.0
func NewResolveSubqueryExprSelector(sel RuleSelector) RuleSelector
type Scope ¶
type Scope struct {
// contains filtered or unexported fields
}
Scope of the analysis being performed, used when analyzing subqueries to give such analysis access to outer scope.
func (*Scope) EnforcesReadOnly ¶ added in v0.15.0
func (*Scope) InnerToOuter ¶
InnerToOuter returns the scope Nodes in order of innermost scope to outermost scope. When using these nodes for analysis, always inspect the children of the nodes, rather than the nodes themselves. The children define the schema of the rows being processed by the scope node itself.
func (*Scope) OuterRelUnresolved ¶ added in v0.15.0
OuterRelUnresolved returns true if the relations in the outer scope are not qualified and resolved. note: a subquery in the outer scope is itself a scope, and by definition not an outer relation
func (*Scope) OuterToInner ¶
OuterToInner returns the scope nodes in order of outermost scope to innermost scope. When using these nodes for analysis, always inspect the children of the nodes, rather than the nodes themselves. The children define the schema of the rows being processed by the scope node itself.
func (*Scope) RecursionDepth ¶ added in v0.14.0
type TableAliases ¶
Source Files
¶
- aggregations.go
- aliases.go
- analyzer.go
- apply_binlog_controller.go
- apply_foreign_keys.go
- apply_hash_in.go
- apply_indexes_from_outer_scope.go
- apply_join.go
- apply_update_accumulators.go
- assign_catalog.go
- assign_info_schema.go
- assign_routines.go
- assign_update_join.go
- autocommit.go
- batch.go
- catalog.go
- check_constraints.go
- constraints.go
- coster.go
- declare.go
- describe.go
- exec_builder.go
- expand_stars.go
- filters.go
- fix_field_indexes.go
- hinttype_string.go
- hoist_filters.go
- hoist_select_exists.go
- index_analyzer.go
- indexed_joins.go
- indexes.go
- inserts.go
- join_order_builder.go
- load_events.go
- load_triggers.go
- memo.go
- memo.og.go
- optimization_rules.go
- parallelize.go
- privileges.go
- procedure_cache.go
- process.go
- process_truncate.go
- prune_columns.go
- pushdown.go
- quick_perm.go
- reorder_projections.go
- replace_cross_joins.go
- replace_window_names.go
- resolve_column_defaults.go
- resolve_columns.go
- resolve_create_like.go
- resolve_create_select.go
- resolve_ctes.go
- resolve_database.go
- resolve_external_stored_procedures.go
- resolve_functions.go
- resolve_having.go
- resolve_natural_joins.go
- resolve_orderby.go
- resolve_subqueries.go
- resolve_tables.go
- resolve_unions.go
- resolve_variables.go
- resolve_views.go
- rule_ids.go
- ruleid_string.go
- rules.go
- scope.go
- select_hints.go
- select_limit.go
- stored_procedures.go
- symbol_resolution.go
- tables.go
- topn.go
- triggers.go
- validate_create_table.go
- validation_rules.go
- warnings.go