Documentation
¶
Overview ¶
Package ddl is the core of TiDB DDL layer. It is used to manage the schema of TiDB Cluster.
TiDB executes using the Online DDL algorithm, see docs/design/2018-10-08-online-DDL.md for more details.
DDL maintains the following invariant:
At any time, for each schema object, such as a table, there are at most 2 versions can exist for it, current version N loaded by all TiDBs and version N+1 pushed forward by DDL, before we can finish the DDL or continue to next operation, we need to make sure all TiDBs have synchronized to version N+1. Note that we are using a global version number for all schema objects, so the versions related some table might not be continuous, as DDLs are executed in parallel.
Index ¶
- Constants
- Variables
- func AddDelRangeJobInternal(ctx context.Context, wrapper DelRangeExecWrapper, job *model.Job) error
- func AddHistoryDDLJob(ctx context.Context, sess *sess.Session, t *meta.Mutator, job *model.Job, ...) error
- func AddIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo)
- func AllocateColumnID(tblInfo *model.TableInfo) int64
- func AllocateIndexID(tblInfo *model.TableInfo) int64
- func AlterTableMode(de Executor, sctx sessionctx.Context, mode model.TableMode, ...) error
- func AppendPartitionDefs(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
- func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
- func BackupFillerTypeCount() int
- func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, spec *ast.AlterTableSpec) (*model.PartitionInfo, error)
- func BuildAffinityGroupDefinitionsForTest(codec tikv.Codec, tblInfo *model.TableInfo, ...) (map[string][]pdhttp.AffinityGroupKeyRange, error)
- func BuildElements(changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) []*meta.Element
- func BuildHandle(pkDts []types.Datum, tblInfo *model.TableInfo, pkInfo *model.IndexInfo, ...) (kv.Handle, error)
- func BuildHiddenColumnInfo(ctx *metabuild.Context, indexPartSpecifications []*ast.IndexPartSpecification, ...) ([]*model.ColumnInfo, error)
- func BuildIndexInfo(ctx *metabuild.Context, tblInfo *model.TableInfo, indexName ast.CIStr, ...) (*model.IndexInfo, error)
- func BuildSessionTemporaryTableInfo(ctx *metabuild.Context, store kv.Storage, is infoschema.InfoSchema, ...) (*model.TableInfo, error)
- func BuildTableInfo(ctx *metabuild.Context, tableName ast.CIStr, cols []*table.Column, ...) (tbInfo *model.TableInfo, err error)
- func BuildTableInfoFromAST(ctx *metabuild.Context, s *ast.CreateTableStmt) (*model.TableInfo, error)
- func BuildTableInfoWithLike(ident ast.Ident, referTblInfo *model.TableInfo, s *ast.CreateTableStmt) (*model.TableInfo, error)
- func BuildTableInfoWithStmt(ctx *metabuild.Context, s *ast.CreateTableStmt, dbCharset, dbCollate string, ...) (*model.TableInfo, error)
- func BuildViewInfo(s *ast.CreateViewStmt) (*model.ViewInfo, error)
- func CalculateRegionBatch(totalRegionCnt int, nodeCnt int, useLocalDisk bool) int
- func CancelJobs(ctx context.Context, se sessionctx.Context, ids []int64) (errs []error, err error)
- func CancelJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
- func CheckAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error
- func CheckAndBuildIndexConditionString(tblInfo *model.TableInfo, indexConditionExpr ast.ExprNode) (string, error)
- func CheckDropTablePartition(meta *model.TableInfo, partLowerNames []string) error
- func CheckImportIntoTableIsEmpty(store kv.Storage, sessCtx sessionctx.Context, tbl table.Table) (bool, error)
- func CheckIsDropPrimaryKey(indexName ast.CIStr, indexInfo *model.IndexInfo, t table.Table) (bool, error)
- func CheckPKOnGeneratedColumn(tblInfo *model.TableInfo, ...) (*model.ColumnInfo, error)
- func CheckPlacementPolicyNotInUseFromInfoSchema(is infoschema.InfoSchema, policy *model.PolicyInfo) error
- func CheckPlacementPolicyNotInUseFromMeta(t *meta.Mutator, policy *model.PolicyInfo) error
- func CheckTableInfoValidWithStmt(ctx *metabuild.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) (err error)
- func CloseOwnerManager(store kv.Storage)
- func ConvertBetweenCharAndVarchar(oldCol, newCol byte) bool
- func CreateNewColumn(ctx sessionctx.Context, schema *model.DBInfo, spec *ast.AlterTableSpec, ...) (*table.Column, error)
- func DeniedByBDR(role ast.BDRRole, action model.ActionType, args model.JobArgs) (denied bool)
- func DisableTiFlashPoll(d any)
- func DropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo)
- func EnableTiFlashPoll(d any)
- func ExtractDatumByOffsets(ctx expression.EvalContext, row chunk.Row, offsets []int, ...) []types.Datum
- func ExtractTblInfos(is infoschema.InfoSchema, oldIdent, newIdent ast.Ident, isAlterTable bool, ...) ([]*model.DBInfo, int64, error)
- func FindColumnNamesInExpr(expr ast.ExprNode) []*ast.ColumnName
- func FindRelatedIndexesToChange(tblInfo *model.TableInfo, colName ast.CIStr) []changingIndex
- func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTableType, tbInfo *model.TableInfo, ...) error
- func GetAllDDLJobs(ctx context.Context, se sessionctx.Context) ([]*model.Job, error)
- func GetAllHistoryDDLJobs(m meta.Reader) ([]*model.Job, error)
- func GetCharsetAndCollateInTableOption(startIdx int, options []*ast.TableOption, defaultUTF8MB4Coll string) (chs, coll string, err error)
- func GetColumnForeignKeyInfo(colName string, fkInfos []*model.FKInfo) *model.FKInfo
- func GetDefaultCollation(cs string, defaultUTF8MB4Collation string) (string, error)
- func GetDropOrTruncateTableInfoFromJobsByStore(jobs []*model.Job, gcSafePoint uint64, ...) (bool, error)
- func GetHistoryJobByID(sess sessionctx.Context, id int64) (*model.Job, error)
- func GetLastHistoryDDLJobsIterator(m meta.Reader) (meta.LastJobIterator, error)
- func GetLastNHistoryDDLJobs(t meta.Reader, maxNumJobs int) ([]*model.Job, error)
- func GetName4AnonymousIndex(t table.Table, colName ast.CIStr, idxName ast.CIStr) ast.CIStr
- func GetPartitionAffinityGroupID(tableID, partitionID int64) string
- func GetRangeEndKey(ctx *ReorgContext, store kv.Storage, priority int, keyPrefix kv.Key, ...) (kv.Key, error)
- func GetRangePlacementPolicyName(ctx context.Context, rangeBundleID string) (string, error)
- func GetTableAffinityGroupID(tableID int64) string
- func GetTableInfoAndCancelFaultJob(t *meta.Mutator, job *model.Job, schemaID int64) (*model.TableInfo, error)
- func GetTableMaxHandle(ctx *ReorgContext, store kv.Storage, startTS uint64, tbl table.PhysicalTable) (maxHandle kv.Handle, emptyTable bool, err error)
- func GetWaitTimeWhenErrorOccurred() time.Duration
- func HandleLockTablesOnFinish(ctx sessionctx.Context, jobW *JobWrapper, ddlErr error)
- func HandleLockTablesOnSuccessSubmit(ctx sessionctx.Context, jobW *JobWrapper)
- func InitAndAddColumnToTable(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) *model.ColumnInfo
- func IsAutoRandomColumnID(tblInfo *model.TableInfo, colID int64) bool
- func IsColumnDroppableWithCheckConstraint(col ast.CIStr, tblInfo *model.TableInfo) error
- func IsColumnRenameableWithCheckConstraint(col ast.CIStr, tblInfo *model.TableInfo) error
- func IsElemsChangedToModifyColumn(oldElems, newElems []string) bool
- func IterAllDDLJobs(ctx sessionctx.Context, txn kv.Transaction, ...) error
- func IterHistoryDDLJobs(txn kv.Transaction, finishFn func([]*model.Job) (bool, error)) error
- func JobNeedGC(job *model.Job) bool
- func LoadTiFlashReplicaInfo(tblInfo *model.TableInfo, tableList *[]TiFlashReplicaStatus)
- func LocateOffsetToMove(currentOffset int, pos *ast.ColumnPosition, tblInfo *model.TableInfo) (destOffset int, err error)
- func MockTableInfo(sctx sessionctx.Context, stmt *ast.CreateTableStmt, tableID int64) (*model.TableInfo, error)
- func NeedToOverwriteColCharset(options []*ast.TableOption) bool
- func NewAddIndexIngestPipeline(ctx *workerpool.Context, store kv.Storage, sessPool opSessPool, ...) (*operator.AsyncPipeline, error)
- func NewBackfillingSchedulerForTest(d DDL) (scheduler.Extension, error)
- func NewDDL(ctx context.Context, options ...Option) (DDL, Executor)
- func NewDDLReorgMeta(ctx sessionctx.Context) *model.DDLReorgMeta
- func NewLocalWorkerCtx(ctx context.Context, jobID int64) *workerpool.Context
- func NewMetaBuildContextWithSctx(sctx sessionctx.Context, otherOpts ...metabuild.Option) *metabuild.Context
- func NewReorgCopContext(reorgMeta *model.DDLReorgMeta, tblInfo *model.TableInfo, ...) (copr.CopContext, error)
- func NewReorgHandlerForTest(se sessionctx.Context) *reorgHandler
- func NewWriteIndexToExternalStoragePipeline(ctx *workerpool.Context, store kv.Storage, extStore objstore.Storage, ...) (*operator.AsyncPipeline, error)
- func OverwriteCollationWithBinaryFlag(colDef *ast.ColumnDef, chs, coll string, defaultUTF8MB4Coll string) (newChs string, newColl string)
- func PauseAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
- func PauseJobs(ctx context.Context, se sessionctx.Context, ids []int64) ([]error, error)
- func PauseJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
- func PollAvailableTableProgress(schemas infoschema.InfoSchema, _ sessionctx.Context, ...)
- func ProcessColumnCharsetAndCollation(ctx *metabuild.Context, col *table.Column, newCol *table.Column, ...) error
- func ProcessModifyColumnOptions(ctx sessionctx.Context, col *table.Column, options []*ast.ColumnOption) error
- func RemoveDependentHiddenColumns(tblInfo *model.TableInfo, idxInfo *model.IndexInfo)
- func ResolveAlterAlgorithm(alterSpec *ast.AlterTableSpec, specify ast.AlgorithmType) (ast.AlgorithmType, error)
- func ResolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) ([]*ast.AlterTableSpec, error)
- func ResolveCharsetCollation(charsetOpts []ast.CharsetOpt, utf8MB4DefaultColl string) (chs string, coll string, err error)
- func ResumeAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
- func ResumeJobs(ctx context.Context, se sessionctx.Context, ids []int64) ([]error, error)
- func ResumeJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
- func ScanHistoryDDLJobs(m *meta.Mutator, startJobID int64, limit int) ([]*model.Job, error)
- func SendFlashbackToVersionRPC(ctx context.Context, s tikv.Storage, version uint64, startTS, commitTS uint64, ...) (rangetask.TaskStat, error)
- func SendPrepareFlashbackToVersionRPC(ctx context.Context, s tikv.Storage, flashbackTS, startTS uint64, ...) (rangetask.TaskStat, error)
- func SetBatchInsertDeleteRangeSize(i int)
- func SetDefaultValue(ctx expression.BuildContext, col *table.Column, option *ast.ColumnOption) (hasDefaultValue bool, err error)
- func SetDirectPlacementOpt(placementSettings *model.PlacementSettings, ...) error
- func SetDirectResourceGroupBackgroundOption(resourceGroupSettings *model.ResourceGroupSettings, ...) error
- func SetDirectResourceGroupRunawayOption(resourceGroupSettings *model.ResourceGroupSettings, ...) error
- func SetDirectResourceGroupSettings(groupInfo *model.ResourceGroupInfo, opt *ast.ResourceGroupOption) error
- func SetSchemaDiffForCreateTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error
- func SetSchemaDiffForCreateTables(diff *model.SchemaDiff, job *model.Job) error
- func SetSchemaDiffForCreateView(diff *model.SchemaDiff, job *model.Job) error
- func SetSchemaDiffForDropTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
- func SetSchemaDiffForDropTablePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
- func SetSchemaDiffForExchangeTablePartition(diff *model.SchemaDiff, job *model.Job, multiInfos ...schemaIDAndTableInfo) error
- func SetSchemaDiffForFlashbackCluster(diff *model.SchemaDiff, job *model.Job)
- func SetSchemaDiffForMultiInfos(diff *model.SchemaDiff, multiInfos ...schemaIDAndTableInfo)
- func SetSchemaDiffForPartitionModify(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
- func SetSchemaDiffForRecoverSchema(diff *model.SchemaDiff, job *model.Job) error
- func SetSchemaDiffForRecoverTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
- func SetSchemaDiffForRenameTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error
- func SetSchemaDiffForRenameTables(diff *model.SchemaDiff, _ *model.Job, jobCtx *jobContext) error
- func SetSchemaDiffForReorganizePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
- func SetSchemaDiffForTruncateTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error
- func SetSchemaDiffForTruncateTablePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
- func SetWaitTimeWhenErrorOccurred(dur time.Duration)
- func ShouldBuildClusteredIndex(mode vardef.ClusteredIndexDefMode, opt *ast.IndexOption, isSingleIntPK bool) bool
- func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, ...) uint64
- func StartOwnerManager(ctx context.Context, store kv.Storage) error
- func TaskKey(jobID int64, mergeTempIdx bool) string
- func UpdateColsNull2NotNull(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error
- func UpdateIndexCol(idxCol *model.IndexColumn, changingCol *model.ColumnInfo)
- func ValidateFlashbackTS(ctx context.Context, sctx sessionctx.Context, flashBackTS uint64) error
- func ValidateRenameIndex(from, to ast.CIStr, tbl *model.TableInfo) (ignore bool, err error)
- func WaitScatterRegionFinish(ctx context.Context, store kv.SplittableStore, regionIDs ...uint64)
- type AlterAlgorithm
- type AvailableTableID
- type BackfillCleanUpS3
- type BackfillSubTaskMeta
- type BackfillTaskMeta
- type CreateTableConfig
- type CreateTableOption
- type DDL
- type DelRangeExecWrapper
- type Executor
- type ExecutorForTest
- type IndexIngestOperator
- type IndexRecordChunk
- type IndexWriteResult
- type Info
- type JobSubmitter
- type JobWrapper
- type LitBackfillScheduler
- func (sch *LitBackfillScheduler) Close()
- func (*LitBackfillScheduler) GetEligibleInstances(_ context.Context, _ *proto.Task) ([]string, error)
- func (sch *LitBackfillScheduler) GetNextStep(task *proto.TaskBase) proto.Step
- func (sch *LitBackfillScheduler) Init() (err error)
- func (*LitBackfillScheduler) IsRetryableErr(error) bool
- func (sch *LitBackfillScheduler) ModifyMeta(oldMeta []byte, modifies []proto.Modification) ([]byte, error)
- func (*LitBackfillScheduler) OnDone(_ context.Context, _ diststorage.TaskHandle, _ *proto.Task) error
- func (sch *LitBackfillScheduler) OnNextSubtasksBatch(ctx context.Context, taskHandle diststorage.TaskHandle, task *proto.Task, ...) (subtaskMeta [][]byte, err error)
- func (*LitBackfillScheduler) OnTick(_ context.Context, _ *proto.Task)
- type MergeTempIndexOperator
- type OnExist
- type Option
- func WithAutoIDClient(cli *autoid.ClientDiscover) Option
- func WithEtcdClient(client *clientv3.Client) Option
- func WithEventPublishStore(store notifier.Store) Option
- func WithInfoCache(ic *infoschema.InfoCache) Option
- func WithLease(lease time.Duration) Option
- func WithSchemaLoader(loader SchemaLoader) Option
- func WithStore(store kv.Storage) Option
- type Options
- type PollTiFlashBackoffContext
- func (b *PollTiFlashBackoffContext) Get(id int64) (*PollTiFlashBackoffElement, bool)
- func (b *PollTiFlashBackoffContext) Len() int
- func (b *PollTiFlashBackoffContext) Put(id int64) bool
- func (b *PollTiFlashBackoffContext) Remove(id int64) bool
- func (b *PollTiFlashBackoffContext) Tick(id int64) (grew bool, exist bool, cnt int)
- type PollTiFlashBackoffElement
- type ReorgContext
- type SchemaLoader
- type StartMode
- type TableScanOperator
- type TableScanTask
- type TableScanTaskSource
- type TaskKeyBuilder
- type TempIndexScanTaskSource
- type TiFlashManagementContext
- type TiFlashReplicaStatus
- type TiFlashTick
- type WriteExternalStoreOperator
Constants ¶
const ( BackfillTaskMetaVersion0 = iota BackfillTaskMetaVersion1 )
Version constants for BackfillTaskMeta.
const ( // DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing. DDLOwnerKey = "/tidb/ddl/fg/owner" // Prompt is the prompt for ddl owner manager. Prompt = "ddl" )
const ( DefNumHistoryJobs = 10 // DefNumGetDDLHistoryJobs is the max count for getting the ddl history once. DefNumGetDDLHistoryJobs = 2048 )
DefNumHistoryJobs is default value of the default number of history job
const ( BRInsertDeleteRangeSQLPrefix = insertDeleteRangeSQLPrefix BRInsertDeleteRangeSQLValue = insertDeleteRangeSQLValue )
Only used in the BR unit test. Once these const variables modified, please make sure compatible with BR.
const GlobalScatterGroupID int64 = -1
GlobalScatterGroupID is used to indicate the global scatter group ID.
const (
// MaxCommentLength is exported for testing.
MaxCommentLength = 1024
)
Variables ¶
var ( // PollTiFlashInterval is the interval between every pollTiFlashReplicaStatus call. PollTiFlashInterval = 2 * time.Second // PullTiFlashPdTick indicates the number of intervals before we fully sync all TiFlash pd rules and tables. PullTiFlashPdTick = atomicutil.NewUint64(30 * 5) // UpdateTiFlashStoreTick indicates the number of intervals before we fully update TiFlash stores. UpdateTiFlashStoreTick = atomicutil.NewUint64(5) // RefreshRulesTick indicates the number of intervals before we refresh TiFlash rules. RefreshRulesTick = atomicutil.NewUint64(10) // PollTiFlashBackoffMaxTick is the max tick before we try to update TiFlash replica availability for one table. PollTiFlashBackoffMaxTick TiFlashTick = 10 // PollTiFlashBackoffMinTick is the min tick before we try to update TiFlash replica availability for one table. PollTiFlashBackoffMinTick TiFlashTick = 1 // PollTiFlashBackoffCapacity is the cache size of backoff struct. PollTiFlashBackoffCapacity = 1000 // PollTiFlashBackoffRate is growth rate of exponential backoff threshold. PollTiFlashBackoffRate TiFlashTick = 1.5 // RefreshProgressMaxTableCount is the max count of table to refresh progress after available each poll. RefreshProgressMaxTableCount uint64 = 1000 )
var ( CheckBackfillJobFinishInterval = 300 * time.Millisecond // UpdateBackfillJobRowCountInterval is the interval of updating the job row count. UpdateBackfillJobRowCountInterval = 3 * time.Second )
CheckBackfillJobFinishInterval is export for test.
var DDLBackfillers = map[model.ActionType]string{ model.ActionAddIndex: "add_index", model.ActionModifyColumn: "modify_column", model.ActionDropIndex: "drop_index", model.ActionReorganizePartition: "reorganize_partition", }
DDLBackfillers contains the DDL need backfill step.
var DefaultAnalyzeCheckInterval = 10 * time.Second
DefaultAnalyzeCheckInterval is the interval for checking analyze status. exported for testing.
var DefaultCumulativeTimeout = 1 * time.Minute
DefaultCumulativeTimeout is the default cumulative timeout for analyze operation. exported for testing.
var ( // EnableSplitTableRegion is a flag to decide whether to split a new region for // a newly created table. It takes effect only if the Storage supports split // region. EnableSplitTableRegion = uint32(0) )
var EstimateTableRowSizeForTest = estimateTableRowSize
EstimateTableRowSizeForTest is used for test.
var LastReorgMetaFastReorgDisabled bool
LastReorgMetaFastReorgDisabled is used for test.
var MockDMLExecution func()
MockDMLExecution is only used for test.
var MockDMLExecutionMerging func()
MockDMLExecutionMerging is only used for test.
var ResultCounterForTest *atomic.Int32
ResultCounterForTest is used for test.
var ( // RunInGoTest is used to identify whether ddl in running in the test. RunInGoTest bool )
var TestReorgGoroutineRunning = make(chan struct{})
TestReorgGoroutineRunning is only used in test to indicate the reorg goroutine has been started.
var UpdateDDLJobReorgCfgInterval = 2 * time.Second
UpdateDDLJobReorgCfgInterval is the interval to check and update reorg configuration.
var ( // WaitTimeWhenErrorOccurred is waiting interval when processing DDL jobs encounter errors. WaitTimeWhenErrorOccurred = int64(1 * time.Second) )
Functions ¶
func AddDelRangeJobInternal ¶
AddDelRangeJobInternal implements the generation the delete ranges for the provided job and consumes the delete ranges through delRangeExecWrapper.
func AddHistoryDDLJob ¶
func AddHistoryDDLJob(ctx context.Context, sess *sess.Session, t *meta.Mutator, job *model.Job, updateRawArgs bool) error
AddHistoryDDLJob record the history job.
func AddIndexColumnFlag ¶
AddIndexColumnFlag aligns the column flags of columns in TableInfo to IndexInfo.
func AllocateColumnID ¶
AllocateColumnID allocates next column ID from TableInfo.
func AllocateIndexID ¶
AllocateIndexID allocates an index ID from TableInfo.
func AlterTableMode ¶
func AlterTableMode(de Executor, sctx sessionctx.Context, mode model.TableMode, schemaID, tableID int64) error
AlterTableMode creates a DDL job for alter table mode.
func AppendPartitionDefs ¶
func AppendPartitionDefs(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
AppendPartitionDefs generates a list of partition definitions needed for SHOW CREATE TABLE (in executor/show.go) as well as needed for generating the ADD PARTITION query for INTERVAL partitioning of ALTER TABLE t LAST PARTITION and generating the CREATE TABLE query from CREATE TABLE ... INTERVAL
func AppendPartitionInfo ¶
func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
AppendPartitionInfo is used in SHOW CREATE TABLE as well as generation the SQL syntax for the PartitionInfo during validation of various DDL commands
func BackupFillerTypeCount ¶
func BackupFillerTypeCount() int
BackupFillerTypeCount represents the count of ddl jobs that need to do backfill.
func BuildAddedPartitionInfo ¶
func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, spec *ast.AlterTableSpec) (*model.PartitionInfo, error)
BuildAddedPartitionInfo build alter table add partition info
func BuildAffinityGroupDefinitionsForTest ¶
func BuildAffinityGroupDefinitionsForTest( codec tikv.Codec, tblInfo *model.TableInfo, partitionDefs []model.PartitionDefinition, ) (map[string][]pdhttp.AffinityGroupKeyRange, error)
BuildAffinityGroupDefinitionsForTest is exported for testing.
func BuildElements ¶
BuildElements is exported for testing.
func BuildHandle ¶
func BuildHandle(pkDts []types.Datum, tblInfo *model.TableInfo, pkInfo *model.IndexInfo, loc *time.Location, errCtx errctx.Context) (kv.Handle, error)
BuildHandle is exported for test.
func BuildHiddenColumnInfo ¶
func BuildHiddenColumnInfo(ctx *metabuild.Context, indexPartSpecifications []*ast.IndexPartSpecification, indexName ast.CIStr, tblInfo *model.TableInfo, existCols []*table.Column) ([]*model.ColumnInfo, error)
BuildHiddenColumnInfo builds hidden column info.
func BuildIndexInfo ¶
func BuildIndexInfo( ctx *metabuild.Context, tblInfo *model.TableInfo, indexName ast.CIStr, isPrimary, isUnique bool, columnarIndexType model.ColumnarIndexType, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption, state model.SchemaState, ) (*model.IndexInfo, error)
BuildIndexInfo builds a new IndexInfo according to the index information.
func BuildSessionTemporaryTableInfo ¶
func BuildSessionTemporaryTableInfo(ctx *metabuild.Context, store kv.Storage, is infoschema.InfoSchema, s *ast.CreateTableStmt, dbCharset, dbCollate string, placementPolicyRef *model.PolicyRefInfo) (*model.TableInfo, error)
BuildSessionTemporaryTableInfo builds model.TableInfo from a SQL statement.
func BuildTableInfo ¶
func BuildTableInfo( ctx *metabuild.Context, tableName ast.CIStr, cols []*table.Column, constraints []*ast.Constraint, charset string, collate string, ) (tbInfo *model.TableInfo, err error)
BuildTableInfo creates a TableInfo.
func BuildTableInfoFromAST ¶
func BuildTableInfoFromAST(ctx *metabuild.Context, s *ast.CreateTableStmt) (*model.TableInfo, error)
BuildTableInfoFromAST builds model.TableInfo from a SQL statement. Note: TableID and PartitionID are left as uninitialized value.
func BuildTableInfoWithLike ¶
func BuildTableInfoWithLike(ident ast.Ident, referTblInfo *model.TableInfo, s *ast.CreateTableStmt) (*model.TableInfo, error)
BuildTableInfoWithLike builds a new table info according to CREATE TABLE ... LIKE statement.
func BuildTableInfoWithStmt ¶
func BuildTableInfoWithStmt(ctx *metabuild.Context, s *ast.CreateTableStmt, dbCharset, dbCollate string, placementPolicyRef *model.PolicyRefInfo) (*model.TableInfo, error)
BuildTableInfoWithStmt builds model.TableInfo from a SQL statement without validity check
func BuildViewInfo ¶
func BuildViewInfo(s *ast.CreateViewStmt) (*model.ViewInfo, error)
BuildViewInfo builds a ViewInfo structure from an ast.CreateViewStmt.
func CalculateRegionBatch ¶
CalculateRegionBatch is exported for test.
func CancelJobs ¶
CancelJobs cancels the DDL jobs according to user command.
func CancelJobsBySystem ¶
func CancelJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
CancelJobsBySystem cancels Jobs because of internal reasons.
func CheckAfterPositionExists ¶
func CheckAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error
CheckAfterPositionExists makes sure the column specified in AFTER clause is exists. For example, ALTER TABLE t ADD COLUMN c3 INT AFTER c1.
func CheckAndBuildIndexConditionString ¶
func CheckAndBuildIndexConditionString(tblInfo *model.TableInfo, indexConditionExpr ast.ExprNode) (string, error)
CheckAndBuildIndexConditionString validates whether the given expression is compatible with the table schema and returns a string representation of the expression.
func CheckDropTablePartition ¶
CheckDropTablePartition checks if the partition exists and does not allow deleting the last existing partition in the table.
func CheckImportIntoTableIsEmpty ¶
func CheckImportIntoTableIsEmpty( store kv.Storage, sessCtx sessionctx.Context, tbl table.Table, ) (bool, error)
CheckImportIntoTableIsEmpty check import into table is empty or not.
func CheckIsDropPrimaryKey ¶
func CheckIsDropPrimaryKey(indexName ast.CIStr, indexInfo *model.IndexInfo, t table.Table) (bool, error)
CheckIsDropPrimaryKey checks if we will drop PK, there are many PK implementations so we provide a helper function.
func CheckPKOnGeneratedColumn ¶
func CheckPKOnGeneratedColumn(tblInfo *model.TableInfo, indexPartSpecifications []*ast.IndexPartSpecification) (*model.ColumnInfo, error)
CheckPKOnGeneratedColumn checks the specification of PK is valid.
func CheckPlacementPolicyNotInUseFromInfoSchema ¶
func CheckPlacementPolicyNotInUseFromInfoSchema(is infoschema.InfoSchema, policy *model.PolicyInfo) error
CheckPlacementPolicyNotInUseFromInfoSchema export for test.
func CheckPlacementPolicyNotInUseFromMeta ¶
func CheckPlacementPolicyNotInUseFromMeta(t *meta.Mutator, policy *model.PolicyInfo) error
CheckPlacementPolicyNotInUseFromMeta export for test.
func CheckTableInfoValidWithStmt ¶
func CheckTableInfoValidWithStmt(ctx *metabuild.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) (err error)
CheckTableInfoValidWithStmt exposes checkTableInfoValidWithStmt to SchemaTracker. Maybe one day we can delete it.
func CloseOwnerManager ¶
CloseOwnerManager closes the global DDL owner manager.
func ConvertBetweenCharAndVarchar ¶
ConvertBetweenCharAndVarchar check whether column converted between char and varchar TODO: it is used for plugins. so change plugin's using and remove it.
func CreateNewColumn ¶
func CreateNewColumn(ctx sessionctx.Context, schema *model.DBInfo, spec *ast.AlterTableSpec, t table.Table, specNewColumn *ast.ColumnDef) (*table.Column, error)
CreateNewColumn creates a new column according to the column information.
func DeniedByBDR ¶
DeniedByBDR checks whether the DDL is denied by BDR.
func DisableTiFlashPoll ¶
func DisableTiFlashPoll(d any)
DisableTiFlashPoll disables TiFlash poll loop aka PollTiFlashReplicaStatus.
func DropIndexColumnFlag ¶
DropIndexColumnFlag drops the column flag of columns in TableInfo according to the IndexInfo.
func EnableTiFlashPoll ¶
func EnableTiFlashPoll(d any)
EnableTiFlashPoll enables TiFlash poll loop aka PollTiFlashReplicaStatus.
func ExtractDatumByOffsets ¶
func ExtractDatumByOffsets(ctx expression.EvalContext, row chunk.Row, offsets []int, expCols []*expression.Column, buf []types.Datum) []types.Datum
ExtractDatumByOffsets is exported for test.
func ExtractTblInfos ¶
func ExtractTblInfos(is infoschema.InfoSchema, oldIdent, newIdent ast.Ident, isAlterTable bool, tables map[string]int64) ([]*model.DBInfo, int64, error)
ExtractTblInfos extracts the table information from the infoschema.
func FindColumnNamesInExpr ¶
func FindColumnNamesInExpr(expr ast.ExprNode) []*ast.ColumnName
FindColumnNamesInExpr returns a slice of ast.ColumnName which is referred in expr.
func FindRelatedIndexesToChange ¶
FindRelatedIndexesToChange finds the indexes that covering the given column. The normal one will be overwritten by the temp one.
func GeneratePartDefsFromInterval ¶
func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTableType, tbInfo *model.TableInfo, partitionOptions *ast.PartitionOptions) error
GeneratePartDefsFromInterval generates range partitions from INTERVAL partitioning. Handles
- CREATE TABLE: all partitions are generated
- ALTER TABLE FIRST PARTITION (expr): Drops all partitions before the partition matching the expr (i.e. sets that partition as the new first partition) i.e. will return the partitions from old FIRST partition to (and including) new FIRST partition
- ALTER TABLE LAST PARTITION (expr): Creates new partitions from (excluding) old LAST partition to (including) new LAST partition
partition definitions will be set on partitionOptions
func GetAllDDLJobs ¶
GetAllDDLJobs get all DDL jobs and sorts jobs by job.ID.
func GetAllHistoryDDLJobs ¶
GetAllHistoryDDLJobs get all the done DDL jobs.
func GetCharsetAndCollateInTableOption ¶
func GetCharsetAndCollateInTableOption(startIdx int, options []*ast.TableOption, defaultUTF8MB4Coll string) (chs, coll string, err error)
GetCharsetAndCollateInTableOption will iterate the charset and collate in the options, and returns the last charset and collate in options. If there is no charset in the options, the returns charset will be "", the same as collate.
func GetColumnForeignKeyInfo ¶
GetColumnForeignKeyInfo returns the wanted foreign key info
func GetDefaultCollation ¶
GetDefaultCollation returns the default collation for charset and handle the default collation for UTF8MB4.
func GetDropOrTruncateTableInfoFromJobsByStore ¶
func GetDropOrTruncateTableInfoFromJobsByStore(jobs []*model.Job, gcSafePoint uint64, getTable func(uint64, int64, int64) (*model.TableInfo, error), fn func(*model.Job, *model.TableInfo) (bool, error)) (bool, error)
GetDropOrTruncateTableInfoFromJobsByStore implements GetDropOrTruncateTableInfoFromJobs
func GetHistoryJobByID ¶
GetHistoryJobByID return history DDL job by ID.
func GetLastHistoryDDLJobsIterator ¶
func GetLastHistoryDDLJobsIterator(m meta.Reader) (meta.LastJobIterator, error)
GetLastHistoryDDLJobsIterator gets latest N history DDL jobs iterator.
func GetLastNHistoryDDLJobs ¶
GetLastNHistoryDDLJobs returns the DDL history jobs and an error. The maximum count of history jobs is num.
func GetName4AnonymousIndex ¶
GetName4AnonymousIndex returns a valid name for anonymous index.
func GetPartitionAffinityGroupID ¶
GetPartitionAffinityGroupID returns the affinity group ID for a partition. Format: "_tidb_pt_{tableID}_p{partitionID}"
func GetRangeEndKey ¶
func GetRangeEndKey(ctx *ReorgContext, store kv.Storage, priority int, keyPrefix kv.Key, startKey, endKey kv.Key) (kv.Key, error)
GetRangeEndKey gets the actual end key for the range of [startKey, endKey).
func GetRangePlacementPolicyName ¶
GetRangePlacementPolicyName get the placement policy name used by range. rangeBundleID is limited to TiDBBundleRangePrefixForGlobal and TiDBBundleRangePrefixForMeta.
func GetTableAffinityGroupID ¶
GetTableAffinityGroupID returns the affinity group ID for a table. Format: "_tidb_t_{tableID}"
func GetTableInfoAndCancelFaultJob ¶
func GetTableInfoAndCancelFaultJob(t *meta.Mutator, job *model.Job, schemaID int64) (*model.TableInfo, error)
GetTableInfoAndCancelFaultJob is exported for test.
func GetTableMaxHandle ¶
func GetTableMaxHandle(ctx *ReorgContext, store kv.Storage, startTS uint64, tbl table.PhysicalTable) (maxHandle kv.Handle, emptyTable bool, err error)
GetTableMaxHandle gets the max handle of a PhysicalTable.
func GetWaitTimeWhenErrorOccurred ¶
GetWaitTimeWhenErrorOccurred return waiting interval when processing DDL jobs encounter errors.
func HandleLockTablesOnFinish ¶
func HandleLockTablesOnFinish(ctx sessionctx.Context, jobW *JobWrapper, ddlErr error)
HandleLockTablesOnFinish handles the table lock for the job which is finished. exported for testing purpose.
func HandleLockTablesOnSuccessSubmit ¶
func HandleLockTablesOnSuccessSubmit(ctx sessionctx.Context, jobW *JobWrapper)
HandleLockTablesOnSuccessSubmit handles the table lock for the job which is submitted successfully. exported for testing purpose.
func InitAndAddColumnToTable ¶
func InitAndAddColumnToTable(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) *model.ColumnInfo
InitAndAddColumnToTable initializes the ColumnInfo in-place and adds it to the table.
func IsAutoRandomColumnID ¶
IsAutoRandomColumnID returns true if the given column ID belongs to an auto_random column.
func IsColumnDroppableWithCheckConstraint ¶
IsColumnDroppableWithCheckConstraint check whether the column in check-constraint whose dependent col is more than 1
func IsColumnRenameableWithCheckConstraint ¶
IsColumnRenameableWithCheckConstraint check whether the column is referenced in check-constraint
func IsElemsChangedToModifyColumn ¶
IsElemsChangedToModifyColumn check elems changed
func IterAllDDLJobs ¶
func IterAllDDLJobs(ctx sessionctx.Context, txn kv.Transaction, finishFn func([]*model.Job) (bool, error)) error
IterAllDDLJobs will iterates running DDL jobs first, return directly if `finishFn` return true or error, then iterates history DDL jobs until the `finishFn` return true or error.
func IterHistoryDDLJobs ¶
IterHistoryDDLJobs iterates history DDL jobs until the `finishFn` return true or error.
func JobNeedGC ¶
JobNeedGC is called to determine whether delete-ranges need to be generated for the provided job.
NOTICE: BR also uses jobNeedGC to determine whether delete-ranges need to be generated for the provided job. Therefore, please make sure any modification is compatible with BR.
func LoadTiFlashReplicaInfo ¶
func LoadTiFlashReplicaInfo(tblInfo *model.TableInfo, tableList *[]TiFlashReplicaStatus)
LoadTiFlashReplicaInfo parses model.TableInfo into []TiFlashReplicaStatus.
func LocateOffsetToMove ¶
func LocateOffsetToMove(currentOffset int, pos *ast.ColumnPosition, tblInfo *model.TableInfo) (destOffset int, err error)
LocateOffsetToMove returns the offset of the column to move.
func MockTableInfo ¶
func MockTableInfo(sctx sessionctx.Context, stmt *ast.CreateTableStmt, tableID int64) (*model.TableInfo, error)
MockTableInfo mocks a table info by create table stmt ast and a specified table id.
func NeedToOverwriteColCharset ¶
func NeedToOverwriteColCharset(options []*ast.TableOption) bool
NeedToOverwriteColCharset return true for altering charset and specified CONVERT TO.
func NewAddIndexIngestPipeline ¶
func NewAddIndexIngestPipeline( ctx *workerpool.Context, store kv.Storage, sessPool opSessPool, backendCtx ingest.BackendCtx, engines []ingest.Engine, jobID int64, tbl table.PhysicalTable, idxInfos []*model.IndexInfo, startKey, endKey kv.Key, reorgMeta *model.DDLReorgMeta, avgRowSize int, concurrency int, collector execute.Collector, ) (*operator.AsyncPipeline, error)
NewAddIndexIngestPipeline creates a pipeline for adding index in ingest mode.
func NewBackfillingSchedulerForTest ¶
NewBackfillingSchedulerForTest creates a new backfillingSchedulerExt, only used for test now.
func NewDDLReorgMeta ¶
func NewDDLReorgMeta(ctx sessionctx.Context) *model.DDLReorgMeta
NewDDLReorgMeta create a DDL ReorgMeta.
func NewLocalWorkerCtx ¶
func NewLocalWorkerCtx(ctx context.Context, jobID int64) *workerpool.Context
NewLocalWorkerCtx is used for adding index with local ingest mode.
func NewMetaBuildContextWithSctx ¶
func NewMetaBuildContextWithSctx(sctx sessionctx.Context, otherOpts ...metabuild.Option) *metabuild.Context
NewMetaBuildContextWithSctx creates a new MetaBuildContext with the given session context.
func NewReorgCopContext ¶
func NewReorgCopContext( reorgMeta *model.DDLReorgMeta, tblInfo *model.TableInfo, allIdxInfo []*model.IndexInfo, requestSource string, ) (copr.CopContext, error)
NewReorgCopContext creates a CopContext for reorg
func NewReorgHandlerForTest ¶
func NewReorgHandlerForTest(se sessionctx.Context) *reorgHandler
NewReorgHandlerForTest creates a new reorgHandler, only used in test.
func NewWriteIndexToExternalStoragePipeline ¶
func NewWriteIndexToExternalStoragePipeline( ctx *workerpool.Context, store kv.Storage, extStore objstore.Storage, sessPool opSessPool, taskID, subtaskID int64, tbl table.PhysicalTable, idxInfos []*model.IndexInfo, startKey, endKey kv.Key, onClose external.OnWriterCloseFunc, reorgMeta *model.DDLReorgMeta, avgRowSize int, concurrency int, resource *proto.StepResource, collector execute.Collector, tikvCodec tikv.Codec, ) (*operator.AsyncPipeline, error)
NewWriteIndexToExternalStoragePipeline creates a pipeline for writing index to external storage.
func OverwriteCollationWithBinaryFlag ¶
func OverwriteCollationWithBinaryFlag(colDef *ast.ColumnDef, chs, coll string, defaultUTF8MB4Coll string) (newChs string, newColl string)
OverwriteCollationWithBinaryFlag is used to handle the case like
CREATE TABLE t (a VARCHAR(255) BINARY) CHARSET utf8 COLLATE utf8_general_ci;
The 'BINARY' sets the column collation to *_bin according to the table charset.
func PauseAllJobsBySystem ¶
func PauseAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
PauseAllJobsBySystem pauses all running Jobs because of internal reasons.
func PauseJobsBySystem ¶
func PauseJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
PauseJobsBySystem pauses Jobs because of internal reasons.
func PollAvailableTableProgress ¶
func PollAvailableTableProgress(schemas infoschema.InfoSchema, _ sessionctx.Context, pollTiFlashContext *TiFlashManagementContext)
PollAvailableTableProgress will poll and check availability of available tables.
func ProcessColumnCharsetAndCollation ¶
func ProcessColumnCharsetAndCollation(ctx *metabuild.Context, col *table.Column, newCol *table.Column, meta *model.TableInfo, specNewColumn *ast.ColumnDef, schema *model.DBInfo) error
ProcessColumnCharsetAndCollation process column charset and collation
func ProcessModifyColumnOptions ¶
func ProcessModifyColumnOptions(ctx sessionctx.Context, col *table.Column, options []*ast.ColumnOption) error
ProcessModifyColumnOptions process column options. Export for tiflow.
func RemoveDependentHiddenColumns ¶
RemoveDependentHiddenColumns removes hidden columns by the indexInfo.
func ResolveAlterAlgorithm ¶
func ResolveAlterAlgorithm(alterSpec *ast.AlterTableSpec, specify ast.AlgorithmType) (ast.AlgorithmType, error)
ResolveAlterAlgorithm resolves the algorithm of the alterSpec. If specify is the ast.AlterAlgorithmDefault, then the default algorithm of the alter action will be returned. If specify algorithm is not supported by the alter action, it will try to find a better algorithm in the order `INSTANT > INPLACE > COPY`, errAlterOperationNotSupported will be returned. E.g. INSTANT may be returned if specify=INPLACE If failed to choose any valid algorithm, AlgorithmTypeDefault and errAlterOperationNotSupported will be returned
func ResolveAlterTableSpec ¶
func ResolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) ([]*ast.AlterTableSpec, error)
ResolveAlterTableSpec resolves alter table algorithm and removes ignore table spec in specs. returns valid specs, and the occurred error.
func ResolveCharsetCollation ¶
func ResolveCharsetCollation(charsetOpts []ast.CharsetOpt, utf8MB4DefaultColl string) (chs string, coll string, err error)
ResolveCharsetCollation will resolve the charset and collate by the order of parameters: * If any given ast.CharsetOpt is not empty, the resolved charset and collate will be returned. * If all ast.CharsetOpts are empty, the default charset and collate will be returned.
func ResumeAllJobsBySystem ¶
func ResumeAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
ResumeAllJobsBySystem resumes all paused Jobs because of internal reasons.
func ResumeJobs ¶
ResumeJobs resume all the DDL jobs according to user command.
func ResumeJobsBySystem ¶
func ResumeJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
ResumeJobsBySystem resumes Jobs that are paused by TiDB itself.
func ScanHistoryDDLJobs ¶
ScanHistoryDDLJobs get some of the done DDL jobs. When the DDL history is quite large, GetAllHistoryDDLJobs() API can't work well, because it makes the server OOM. The result is in descending order by job ID.
func SendFlashbackToVersionRPC ¶
func SendFlashbackToVersionRPC( ctx context.Context, s tikv.Storage, version uint64, startTS, commitTS uint64, r tikvstore.KeyRange, ) (rangetask.TaskStat, error)
SendFlashbackToVersionRPC flashback the MVCC key to the version Function also be called by BR for volume snapshot backup and restore
func SendPrepareFlashbackToVersionRPC ¶
func SendPrepareFlashbackToVersionRPC( ctx context.Context, s tikv.Storage, flashbackTS, startTS uint64, r tikvstore.KeyRange, ) (rangetask.TaskStat, error)
SendPrepareFlashbackToVersionRPC prepares regions for flashback, the purpose is to put region into flashback state which region stop write Function also be called by BR for volume snapshot backup and restore
func SetBatchInsertDeleteRangeSize ¶
func SetBatchInsertDeleteRangeSize(i int)
SetBatchInsertDeleteRangeSize sets the batch insert/delete range size in the test
func SetDefaultValue ¶
func SetDefaultValue(ctx expression.BuildContext, col *table.Column, option *ast.ColumnOption) (hasDefaultValue bool, err error)
SetDefaultValue sets the default value of the column.
func SetDirectPlacementOpt ¶
func SetDirectPlacementOpt(placementSettings *model.PlacementSettings, placementOptionType ast.PlacementOptionType, stringVal string, uintVal uint64) error
SetDirectPlacementOpt tries to make the PlacementSettings assignments generic for Schema/Table/Partition
func SetDirectResourceGroupBackgroundOption ¶
func SetDirectResourceGroupBackgroundOption(resourceGroupSettings *model.ResourceGroupSettings, opt *ast.ResourceGroupBackgroundOption) error
SetDirectResourceGroupBackgroundOption set background configs of the ResourceGroupSettings.
func SetDirectResourceGroupRunawayOption ¶
func SetDirectResourceGroupRunawayOption(resourceGroupSettings *model.ResourceGroupSettings, opt *ast.ResourceGroupRunawayOption) error
SetDirectResourceGroupRunawayOption tries to set runaway part of the ResourceGroupSettings.
func SetDirectResourceGroupSettings ¶
func SetDirectResourceGroupSettings(groupInfo *model.ResourceGroupInfo, opt *ast.ResourceGroupOption) error
SetDirectResourceGroupSettings tries to set the ResourceGroupSettings.
func SetSchemaDiffForCreateTable ¶
func SetSchemaDiffForCreateTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error
SetSchemaDiffForCreateTable set SchemaDiff for ActionCreateTable.
func SetSchemaDiffForCreateTables ¶
func SetSchemaDiffForCreateTables(diff *model.SchemaDiff, job *model.Job) error
SetSchemaDiffForCreateTables set SchemaDiff for ActionCreateTables.
func SetSchemaDiffForCreateView ¶
func SetSchemaDiffForCreateView(diff *model.SchemaDiff, job *model.Job) error
SetSchemaDiffForCreateView set SchemaDiff for ActionCreateView.
func SetSchemaDiffForDropTable ¶
func SetSchemaDiffForDropTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
SetSchemaDiffForDropTable set SchemaDiff for ActionDropTable.
func SetSchemaDiffForDropTablePartition ¶
func SetSchemaDiffForDropTablePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
SetSchemaDiffForDropTablePartition set SchemaDiff for ActionDropTablePartition.
func SetSchemaDiffForExchangeTablePartition ¶
func SetSchemaDiffForExchangeTablePartition(diff *model.SchemaDiff, job *model.Job, multiInfos ...schemaIDAndTableInfo) error
SetSchemaDiffForExchangeTablePartition set SchemaDiff for ActionExchangeTablePartition.
func SetSchemaDiffForFlashbackCluster ¶
func SetSchemaDiffForFlashbackCluster(diff *model.SchemaDiff, job *model.Job)
SetSchemaDiffForFlashbackCluster set SchemaDiff for ActionFlashbackCluster.
func SetSchemaDiffForMultiInfos ¶
func SetSchemaDiffForMultiInfos(diff *model.SchemaDiff, multiInfos ...schemaIDAndTableInfo)
SetSchemaDiffForMultiInfos set SchemaDiff for multiInfos.
func SetSchemaDiffForPartitionModify ¶
func SetSchemaDiffForPartitionModify(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
SetSchemaDiffForPartitionModify set SchemaDiff for ActionRemovePartitioning, ActionAlterTablePartitioning.
func SetSchemaDiffForRecoverSchema ¶
func SetSchemaDiffForRecoverSchema(diff *model.SchemaDiff, job *model.Job) error
SetSchemaDiffForRecoverSchema set SchemaDiff for ActionRecoverSchema.
func SetSchemaDiffForRecoverTable ¶
func SetSchemaDiffForRecoverTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
SetSchemaDiffForRecoverTable set SchemaDiff for ActionRecoverTable.
func SetSchemaDiffForRenameTable ¶
func SetSchemaDiffForRenameTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error
SetSchemaDiffForRenameTable set SchemaDiff for ActionRenameTable.
func SetSchemaDiffForRenameTables ¶
func SetSchemaDiffForRenameTables(diff *model.SchemaDiff, _ *model.Job, jobCtx *jobContext) error
SetSchemaDiffForRenameTables set SchemaDiff for ActionRenameTables.
func SetSchemaDiffForReorganizePartition ¶
func SetSchemaDiffForReorganizePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
SetSchemaDiffForReorganizePartition set SchemaDiff for ActionReorganizePartition.
func SetSchemaDiffForTruncateTable ¶
func SetSchemaDiffForTruncateTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error
SetSchemaDiffForTruncateTable set SchemaDiff for ActionTruncateTable.
func SetSchemaDiffForTruncateTablePartition ¶
func SetSchemaDiffForTruncateTablePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)
SetSchemaDiffForTruncateTablePartition set SchemaDiff for ActionTruncateTablePartition.
func SetWaitTimeWhenErrorOccurred ¶
SetWaitTimeWhenErrorOccurred update waiting interval when processing DDL jobs encounter errors.
func ShouldBuildClusteredIndex ¶
func ShouldBuildClusteredIndex(mode vardef.ClusteredIndexDefMode, opt *ast.IndexOption, isSingleIntPK bool) bool
ShouldBuildClusteredIndex is used to determine whether the CREATE TABLE statement should build a clustered index table.
func SplitRecordRegion ¶
func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatterScope string) uint64
SplitRecordRegion is to split region in store by table prefix.
func StartOwnerManager ¶
StartOwnerManager starts a global DDL owner manager.
func UpdateColsNull2NotNull ¶
UpdateColsNull2NotNull changes the null option of columns of an index.
func UpdateIndexCol ¶
func UpdateIndexCol(idxCol *model.IndexColumn, changingCol *model.ColumnInfo)
UpdateIndexCol sets index column name and offset from changing ColumnInfo.
func ValidateFlashbackTS ¶
ValidateFlashbackTS validates that flashBackTS in range [gcSafePoint, currentTS).
func ValidateRenameIndex ¶
ValidateRenameIndex checks if index name is ok to be renamed.
func WaitScatterRegionFinish ¶
func WaitScatterRegionFinish(ctx context.Context, store kv.SplittableStore, regionIDs ...uint64)
WaitScatterRegionFinish will block until all regions are scattered.
Types ¶
type AlterAlgorithm ¶
type AlterAlgorithm struct {
// contains filtered or unexported fields
}
AlterAlgorithm is used to store supported alter algorithm. For now, TiDB only support AlterAlgorithmInplace and AlterAlgorithmInstant. The most alter operations are using instant algorithm, and only the add index is using inplace(not really inplace, because we never block the DML but costs some time to backfill the index data) See https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-performance.
type AvailableTableID ¶
AvailableTableID is the table id info of available table for waiting to update TiFlash replica progress.
type BackfillCleanUpS3 ¶
type BackfillCleanUpS3 struct {
}
BackfillCleanUpS3 implements scheduler.CleanUpRoutine.
type BackfillSubTaskMeta ¶
type BackfillSubTaskMeta struct {
external.BaseExternalMeta
PhysicalTableID int64 `json:"physical_table_id"`
// Used by read index step.
RowStart []byte `json:"row_start"`
RowEnd []byte `json:"row_end"`
// Used by global sort write & ingest step.
RangeJobKeys [][]byte `json:"range_job_keys,omitempty" external:"true"`
RangeSplitKeys [][]byte `json:"range_split_keys,omitempty" external:"true"`
DataFiles []string `json:"data-files,omitempty" external:"true"`
StatFiles []string `json:"stat-files,omitempty" external:"true"`
// TS is used to make sure subtasks are idempotent.
// TODO(tangenta): support local sort.
TS uint64 `json:"ts,omitempty"`
// Each group of MetaGroups represents a different index kvs meta.
MetaGroups []*external.SortedKVMeta `json:"meta_groups,omitempty" external:"true"`
// EleIDs stands for the index/column IDs to backfill with distributed framework.
// After the subtask is finished, EleIDs should have the same length as
// MetaGroups, and they are in the same order.
EleIDs []int64 `json:"ele_ids,omitempty" external:"true"`
// Only used for adding one single index.
// Keep this for compatibility with v7.5.
external.SortedKVMeta `json:",inline" external:"true"`
}
BackfillSubTaskMeta is the sub-task meta for backfilling index.
func (*BackfillSubTaskMeta) Marshal ¶
func (m *BackfillSubTaskMeta) Marshal() ([]byte, error)
Marshal marshals the backfill subtask meta to JSON.
type BackfillTaskMeta ¶
type BackfillTaskMeta struct {
Job model.Job `json:"job"`
// EleIDs stands for the index/column IDs to backfill with distributed framework.
EleIDs []int64 `json:"ele_ids"`
// EleTypeKey is the type of the element to backfill with distributed framework.
// For now, only index type is supported.
EleTypeKey []byte `json:"ele_type_key"`
CloudStorageURI string `json:"cloud_storage_uri"`
EstimateRowSize int `json:"estimate_row_size"`
MergeTempIndex bool `json:"merge_temp_index"`
Version int `json:"version,omitempty"`
}
BackfillTaskMeta is the dist task meta for backfilling index.
type CreateTableConfig ¶
type CreateTableConfig struct {
OnExist OnExist
// IDAllocated indicates whether the job has allocated all IDs for tables affected
// in the job, if true, DDL will not allocate IDs for them again, it's only used
// by BR now. By reusing IDs BR can save a lot of works such as rewriting table
// IDs in backed up KVs.
IDAllocated bool
}
CreateTableConfig is the configuration of `CreateTableWithInfo`.
func GetCreateTableConfig ¶
func GetCreateTableConfig(cs []CreateTableOption) CreateTableConfig
GetCreateTableConfig applies the series of config options from default config and returns the final config.
type CreateTableOption ¶
type CreateTableOption func(*CreateTableConfig)
CreateTableOption is the option for creating table.
func WithIDAllocated ¶
func WithIDAllocated(idAllocated bool) CreateTableOption
WithIDAllocated applies the IDAllocated option. WARNING!!!: if idAllocated == true, DDL will NOT allocate IDs by itself. That means if the caller can not promise ID is unique, then we got inconsistency. This option is only exposed to be used by BR.
func WithOnExist ¶
func WithOnExist(o OnExist) CreateTableOption
WithOnExist applies the OnExist option.
type DDL ¶
type DDL interface {
// Start campaigns the owner and starts workers.
// ctxPool is used for the worker's delRangeManager and creates sessions.
Start(startMode StartMode, ctxPool *pools.ResourcePool) error
// Stats returns the DDL statistics.
Stats(vars *variable.SessionVars) (map[string]any, error)
// GetScope gets the status variables scope.
GetScope(status string) vardef.ScopeFlag
// Stop stops DDL worker.
Stop() error
// RegisterStatsHandle registers statistics handle and its corresponding event channel for ddl.
RegisterStatsHandle(*handle.Handle)
// SchemaSyncer gets the schema syncer.
SchemaSyncer() schemaver.Syncer
// StateSyncer gets the cluster state syncer.
StateSyncer() serverstate.Syncer
// OwnerManager gets the owner manager.
OwnerManager() owner.Manager
// GetID gets the ddl ID.
GetID() string
// GetMinJobIDRefresher gets the MinJobIDRefresher, this api only works after Start.
GetMinJobIDRefresher() *systable.MinJobIDRefresher
}
DDL is responsible for updating schema in data store and maintaining in-memory InfoSchema cache.
type DelRangeExecWrapper ¶
type DelRangeExecWrapper interface {
// generate a new tso for the next job
UpdateTSOForJob() error
// initialize the paramsList
PrepareParamsList(sz int)
// rewrite table id if necessary, used for BR
RewriteTableID(tableID int64) (int64, bool)
// (job_id, element_id, start_key, end_key, ts)
// ts is generated by delRangeExecWrapper itself
AppendParamsList(jobID, elemID int64, startKey, endKey string)
// consume the delete range. For TiDB Server, it insert rows into mysql.gc_delete_range.
ConsumeDeleteRange(ctx context.Context, sql string) error
}
DelRangeExecWrapper consumes the delete ranges with the provided table ID(s) and index ID(s).
type Executor ¶
type Executor interface {
CreateSchema(ctx sessionctx.Context, stmt *ast.CreateDatabaseStmt) error
AlterSchema(sctx sessionctx.Context, stmt *ast.AlterDatabaseStmt) error
DropSchema(ctx sessionctx.Context, stmt *ast.DropDatabaseStmt) error
CreateTable(ctx sessionctx.Context, stmt *ast.CreateTableStmt) error
CreateView(ctx sessionctx.Context, stmt *ast.CreateViewStmt) error
DropTable(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error)
RecoverTable(ctx sessionctx.Context, recoverTableInfo *model.RecoverTableInfo) (err error)
RecoverSchema(ctx sessionctx.Context, recoverSchemaInfo *model.RecoverSchemaInfo) error
DropView(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error)
CreateIndex(ctx sessionctx.Context, stmt *ast.CreateIndexStmt) error
DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) error
AlterTable(ctx context.Context, sctx sessionctx.Context, stmt *ast.AlterTableStmt) error
TruncateTable(ctx sessionctx.Context, tableIdent ast.Ident) error
RenameTable(ctx sessionctx.Context, stmt *ast.RenameTableStmt) error
LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error
UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error
AlterTableMode(ctx sessionctx.Context, args *model.AlterTableModeArgs) error
CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error
UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, available bool) error
RepairTable(ctx sessionctx.Context, createStmt *ast.CreateTableStmt) error
CreateSequence(ctx sessionctx.Context, stmt *ast.CreateSequenceStmt) error
DropSequence(ctx sessionctx.Context, stmt *ast.DropSequenceStmt) (err error)
AlterSequence(ctx sessionctx.Context, stmt *ast.AlterSequenceStmt) error
CreatePlacementPolicy(ctx sessionctx.Context, stmt *ast.CreatePlacementPolicyStmt) error
DropPlacementPolicy(ctx sessionctx.Context, stmt *ast.DropPlacementPolicyStmt) error
AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacementPolicyStmt) error
AddResourceGroup(ctx sessionctx.Context, stmt *ast.CreateResourceGroupStmt) error
AlterResourceGroup(ctx sessionctx.Context, stmt *ast.AlterResourceGroupStmt) error
DropResourceGroup(ctx sessionctx.Context, stmt *ast.DropResourceGroupStmt) error
FlashbackCluster(ctx sessionctx.Context, flashbackTS uint64) error
// RefreshMeta can only be called by BR during the log restore phase.
RefreshMeta(ctx sessionctx.Context, args *model.RefreshMetaArgs) error
// CreateSchemaWithInfo creates a database (schema) given its database info.
//
// WARNING: the DDL owns the `info` after calling this function, and will modify its fields
// in-place. If you want to keep using `info`, please call Clone() first.
CreateSchemaWithInfo(
ctx sessionctx.Context,
info *model.DBInfo,
onExist OnExist) error
// CreateTableWithInfo creates a table, view or sequence given its table info.
//
// WARNING: the DDL owns the `info` after calling this function, and will modify its fields
// in-place. If you want to keep using `info`, please call Clone() first.
CreateTableWithInfo(
ctx sessionctx.Context,
schema ast.CIStr,
info *model.TableInfo,
involvingRef []model.InvolvingSchemaInfo,
cs ...CreateTableOption) error
// BatchCreateTableWithInfo is like CreateTableWithInfo, but can handle multiple tables.
BatchCreateTableWithInfo(ctx sessionctx.Context,
schema ast.CIStr,
info []*model.TableInfo,
cs ...CreateTableOption) error
// CreatePlacementPolicyWithInfo creates a placement policy
//
// WARNING: the DDL owns the `policy` after calling this function, and will modify its fields
// in-place. If you want to keep using `policy`, please call Clone() first.
CreatePlacementPolicyWithInfo(ctx sessionctx.Context, policy *model.PolicyInfo, onExist OnExist) error
}
Executor is the interface for executing DDL statements. it's mostly called by SQL executor. DDL statements are converted into DDL jobs, JobSubmitter will submit the jobs to DDL job table. Then jobScheduler will schedule them to run on workers asynchronously in parallel. Executor will wait them to finish.
type ExecutorForTest ¶
type ExecutorForTest interface {
// DoDDLJob does the DDL job, it's exported for test.
DoDDLJob(ctx sessionctx.Context, job *model.Job) error
// DoDDLJobWrapper similar to DoDDLJob, but with JobWrapper as input.
DoDDLJobWrapper(ctx sessionctx.Context, jobW *JobWrapper) error
}
ExecutorForTest is the interface for executing DDL statements in tests. TODO remove it later
type IndexIngestOperator ¶
type IndexIngestOperator struct {
*operator.AsyncOperator[IndexRecordChunk, IndexWriteResult]
}
IndexIngestOperator writes index records to ingest engine.
func NewIndexIngestOperator ¶
func NewIndexIngestOperator( ctx *workerpool.Context, copCtx copr.CopContext, sessPool opSessPool, tbl table.PhysicalTable, indexes []table.Index, engines []ingest.Engine, srcChunkPool *sync.Pool, concurrency int, reorgMeta *model.DDLReorgMeta, collector execute.Collector, ) *IndexIngestOperator
NewIndexIngestOperator creates a new IndexIngestOperator.
type IndexRecordChunk ¶
type IndexRecordChunk struct {
ID int
Chunk *chunk.Chunk
Err error
Done bool
// contains filtered or unexported fields
}
IndexRecordChunk contains one of the chunk read from corresponding TableScanTask.
func (IndexRecordChunk) RecoverArgs ¶
func (IndexRecordChunk) RecoverArgs() (metricsLabel string, funcInfo string, err error)
RecoverArgs implements workerpool.TaskMayPanic interface.
type IndexWriteResult ¶
IndexWriteResult contains the result of writing index records to ingest engine.
type Info ¶
type Info struct {
SchemaVer int64
ReorgHandle kv.Key // It's only used for DDL information.
Jobs []*model.Job // It's the currently running jobs.
}
Info is for DDL information.
func GetDDLInfo ¶
func GetDDLInfo(s sessionctx.Context) (*Info, error)
GetDDLInfo returns DDL information and only uses for testing.
func GetDDLInfoWithNewTxn ¶
func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error)
GetDDLInfoWithNewTxn returns DDL information using a new txn.
type JobSubmitter ¶
type JobSubmitter struct {
// contains filtered or unexported fields
}
JobSubmitter collects the DDL jobs and submits them to job tables in batch, it's also responsible allocating IDs for the jobs. when fast-create is enabled, it will merge the create-table jobs to a single batch create-table job. export for testing.
func (*JobSubmitter) GenGIDAndInsertJobsWithRetry ¶
func (s *JobSubmitter) GenGIDAndInsertJobsWithRetry(ctx context.Context, ddlSe *sess.Session, jobWs []*JobWrapper) error
GenGIDAndInsertJobsWithRetry generate job related global ID and inserts DDL jobs to the DDL job table with retry. job id allocation and job insertion are in the same transaction, as we want to make sure DDL jobs are inserted in id order, then we can query from a min job ID when scheduling DDL jobs to mitigate https://github.com/pingcap/tidb/issues/52905. so this function has side effect, it will set table/db/job id of 'jobs'.
type JobWrapper ¶
type JobWrapper struct {
*model.Job
// IDAllocated see config of same name in CreateTableConfig.
// exported for test.
IDAllocated bool
JobArgs model.JobArgs
// job submission is run in async, we use this channel to notify the caller.
// when fast create table enabled, we might combine multiple jobs into one, and
// append the channel to this slice.
ResultCh []chan jobSubmitResult
// contains filtered or unexported fields
}
JobWrapper is used to wrap a job and some other information. exported for testing.
func GetModifiableColumnJob ¶
func GetModifiableColumnJob( ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, ident ast.Ident, originalColName ast.CIStr, schema *model.DBInfo, t table.Table, spec *ast.AlterTableSpec, ) (*JobWrapper, error)
GetModifiableColumnJob returns a DDL job of model.ActionModifyColumn.
func NewJobWrapper ¶
func NewJobWrapper(job *model.Job, idAllocated bool) *JobWrapper
NewJobWrapper creates a new JobWrapper. exported for testing.
func NewJobWrapperWithArgs ¶
NewJobWrapperWithArgs creates a new JobWrapper with job args. TODO: merge with NewJobWrapper later.
func (*JobWrapper) FillArgsWithSubJobs ¶
func (jobW *JobWrapper) FillArgsWithSubJobs()
FillArgsWithSubJobs fill args for job and its sub jobs
func (*JobWrapper) NotifyResult ¶
func (jobW *JobWrapper) NotifyResult(err error)
NotifyResult notifies the job submit result.
type LitBackfillScheduler ¶
type LitBackfillScheduler struct {
*scheduler.BaseScheduler
GlobalSort bool
MergeTempIndex bool
// contains filtered or unexported fields
}
LitBackfillScheduler wraps BaseScheduler.
func (*LitBackfillScheduler) Close ¶
func (sch *LitBackfillScheduler) Close()
Close implements BaseScheduler interface.
func (*LitBackfillScheduler) GetEligibleInstances ¶
func (*LitBackfillScheduler) GetEligibleInstances(_ context.Context, _ *proto.Task) ([]string, error)
GetEligibleInstances implements scheduler.Extension interface.
func (*LitBackfillScheduler) GetNextStep ¶
func (sch *LitBackfillScheduler) GetNextStep(task *proto.TaskBase) proto.Step
GetNextStep implements scheduler.Extension interface.
func (*LitBackfillScheduler) Init ¶
func (sch *LitBackfillScheduler) Init() (err error)
Init implements BaseScheduler interface.
func (*LitBackfillScheduler) IsRetryableErr ¶
func (*LitBackfillScheduler) IsRetryableErr(error) bool
IsRetryableErr implements scheduler.Extension interface.
func (*LitBackfillScheduler) ModifyMeta ¶
func (sch *LitBackfillScheduler) ModifyMeta(oldMeta []byte, modifies []proto.Modification) ([]byte, error)
ModifyMeta implements scheduler.Extension interface.
func (*LitBackfillScheduler) OnDone ¶
func (*LitBackfillScheduler) OnDone(_ context.Context, _ diststorage.TaskHandle, _ *proto.Task) error
OnDone implements scheduler.Extension interface.
func (*LitBackfillScheduler) OnNextSubtasksBatch ¶
func (sch *LitBackfillScheduler) OnNextSubtasksBatch( ctx context.Context, taskHandle diststorage.TaskHandle, task *proto.Task, execIDs []string, nextStep proto.Step, ) (subtaskMeta [][]byte, err error)
OnNextSubtasksBatch generate batch of next step's plan.
type MergeTempIndexOperator ¶
type MergeTempIndexOperator struct {
*operator.AsyncOperator[tempIndexScanTask, tempIdxResult]
// contains filtered or unexported fields
}
MergeTempIndexOperator merges the temporary index records into the original index.
func NewMergeTempIndexOperator ¶
func NewMergeTempIndexOperator( ctx *workerpool.Context, store kv.Storage, ptbl table.PhysicalTable, idxInfo *model.IndexInfo, jobID int64, concurrency int, batchSize int, reorgMeta *model.DDLReorgMeta, ) *MergeTempIndexOperator
NewMergeTempIndexOperator creates a new MergeTempIndexOperator.
func (*MergeTempIndexOperator) Close ¶
func (o *MergeTempIndexOperator) Close() error
Close implements operator.Operator interface.
type OnExist ¶
type OnExist uint8
OnExist specifies what to do when a new object has a name collision.
const ( // OnExistError throws an error on name collision. OnExistError OnExist = iota // OnExistIgnore skips creating the new object. OnExistIgnore // OnExistReplace replaces the old object by the new object. This is only // supported by VIEWs at the moment. For other object types, this is // equivalent to OnExistError. OnExistReplace )
type Option ¶
type Option func(*Options)
Option represents an option to initialize the DDL module
func WithAutoIDClient ¶
func WithAutoIDClient(cli *autoid.ClientDiscover) Option
WithAutoIDClient specifies the autoid client used by the autoid service for those AUTO_ID_CACHE=1 tables.
func WithEtcdClient ¶
WithEtcdClient specifies the `clientv3.Client` of DDL used to request the etcd service
func WithEventPublishStore ¶
WithEventPublishStore specifies the store used to publish DDL events
func WithInfoCache ¶
func WithInfoCache(ic *infoschema.InfoCache) Option
WithInfoCache specifies the `infoschema.InfoCache`
func WithSchemaLoader ¶
func WithSchemaLoader(loader SchemaLoader) Option
WithSchemaLoader specifies the schema loader used to load schema from storage
type Options ¶
type Options struct {
EtcdCli *clientv3.Client
Store kv.Storage
AutoIDClient *autoid.ClientDiscover
InfoCache *infoschema.InfoCache
Lease time.Duration
SchemaLoader SchemaLoader
EventPublishStore notifier.Store
}
Options represents all the options of the DDL module needs
type PollTiFlashBackoffContext ¶
type PollTiFlashBackoffContext struct {
MinThreshold TiFlashTick
MaxThreshold TiFlashTick
// Capacity limits tables a backoff pool can handle, in order to limit handling of big tables.
Capacity int
Rate TiFlashTick
// contains filtered or unexported fields
}
PollTiFlashBackoffContext is a collection of all backoff states.
func NewPollTiFlashBackoffContext ¶
func NewPollTiFlashBackoffContext(minThreshold, maxThreshold TiFlashTick, capacity int, rate TiFlashTick) (*PollTiFlashBackoffContext, error)
NewPollTiFlashBackoffContext creates an instance of PollTiFlashBackoffContext.
func (*PollTiFlashBackoffContext) Get ¶
func (b *PollTiFlashBackoffContext) Get(id int64) (*PollTiFlashBackoffElement, bool)
Get returns pointer to inner PollTiFlashBackoffElement. Only exported for test.
func (*PollTiFlashBackoffContext) Len ¶
func (b *PollTiFlashBackoffContext) Len() int
Len gets size of PollTiFlashBackoffContext.
func (*PollTiFlashBackoffContext) Put ¶
func (b *PollTiFlashBackoffContext) Put(id int64) bool
Put will record table into backoff pool, if there is enough room, or returns false.
func (*PollTiFlashBackoffContext) Remove ¶
func (b *PollTiFlashBackoffContext) Remove(id int64) bool
Remove will reset table from backoff.
func (*PollTiFlashBackoffContext) Tick ¶
func (b *PollTiFlashBackoffContext) Tick(id int64) (grew bool, exist bool, cnt int)
Tick will first check increase Counter. It returns: 1. A bool indicates whether threshold is grown during this tick. 2. A bool indicates whether this ID exists. 3. A int indicates how many ticks ID has counted till now.
type PollTiFlashBackoffElement ¶
type PollTiFlashBackoffElement struct {
Counter int
Threshold TiFlashTick
TotalCounter int
}
PollTiFlashBackoffElement records backoff for each TiFlash Table. `Counter` increases every `Tick`, if it reached `Threshold`, it will be reset to 0 while `Threshold` grows. `TotalCounter` records total `Tick`s this element has since created.
func NewPollTiFlashBackoffElement ¶
func NewPollTiFlashBackoffElement() *PollTiFlashBackoffElement
NewPollTiFlashBackoffElement initialize backoff element for a TiFlash table.
func (*PollTiFlashBackoffElement) MaybeGrow ¶
func (e *PollTiFlashBackoffElement) MaybeGrow(b *PollTiFlashBackoffContext) bool
MaybeGrow grows threshold and reset counter when needed.
func (*PollTiFlashBackoffElement) NeedGrow ¶
func (e *PollTiFlashBackoffElement) NeedGrow() bool
NeedGrow returns if we need to grow. It is exported for testing.
type ReorgContext ¶
type ReorgContext struct {
// contains filtered or unexported fields
}
ReorgContext contains context info for reorg job. TODO there is another reorgCtx, merge them.
func NewReorgContext ¶
func NewReorgContext() *ReorgContext
NewReorgContext returns a new ddl job context.
type SchemaLoader ¶
type SchemaLoader interface {
Reload() error
}
SchemaLoader is used to reload info schema, the only impl is domain currently.
type StartMode ¶
type StartMode string
StartMode is an enum type for the start mode of the DDL.
const ( // Normal mode, cluster is in normal state. Normal StartMode = "normal" // Bootstrap mode, cluster is during bootstrap. Bootstrap StartMode = "bootstrap" // Upgrade mode, cluster is during upgrade, we will force current node to be // the DDL owner, to make sure all upgrade related DDLs are run on new version // TiDB instance. Upgrade StartMode = "upgrade" // BR mode, Start DDL from br, with this mode can skip loadSystemStore in next-gen and initLogBackup. BR StartMode = "br" )
type TableScanOperator ¶
type TableScanOperator struct {
*operator.AsyncOperator[TableScanTask, IndexRecordChunk]
// contains filtered or unexported fields
}
TableScanOperator scans table records in given key ranges from kv store.
func NewTableScanOperator ¶
func NewTableScanOperator( ctx *workerpool.Context, sessPool opSessPool, copCtx copr.CopContext, srcChkPool *sync.Pool, concurrency int, hintBatchSize int, reorgMeta *model.DDLReorgMeta, cpOp ingest.CheckpointOperator, collector execute.Collector, ) *TableScanOperator
NewTableScanOperator creates a new TableScanOperator.
func (*TableScanOperator) Close ¶
func (o *TableScanOperator) Close() error
Close implements operator.Operator interface.
type TableScanTask ¶
type TableScanTask struct {
ID int
Start kv.Key
End kv.Key
// contains filtered or unexported fields
}
TableScanTask contains the start key and the end key of a region.
func (TableScanTask) RecoverArgs ¶
func (TableScanTask) RecoverArgs() (metricsLabel string, funcInfo string, err error)
RecoverArgs implements workerpool.TaskMayPanic interface.
func (TableScanTask) String ¶
func (t TableScanTask) String() string
String implement fmt.Stringer interface.
type TableScanTaskSource ¶
type TableScanTaskSource struct {
// contains filtered or unexported fields
}
TableScanTaskSource produces TableScanTask by splitting table records into ranges.
func NewTableScanTaskSource ¶
func NewTableScanTaskSource( ctx *workerpool.Context, store kv.Storage, physicalTable table.PhysicalTable, startKey kv.Key, endKey kv.Key, cpOp ingest.CheckpointOperator, ) *TableScanTaskSource
NewTableScanTaskSource creates a new TableScanTaskSource.
func (*TableScanTaskSource) Close ¶
func (src *TableScanTaskSource) Close() error
Close implements Operator interface.
func (*TableScanTaskSource) Open ¶
func (src *TableScanTaskSource) Open() error
Open implements Operator interface.
func (*TableScanTaskSource) SetSink ¶
func (src *TableScanTaskSource) SetSink(sink operator.DataChannel[TableScanTask])
SetSink implements WithSink interface.
func (*TableScanTaskSource) String ¶
func (*TableScanTaskSource) String() string
String implements fmt.Stringer interface.
type TaskKeyBuilder ¶
type TaskKeyBuilder struct {
// contains filtered or unexported fields
}
TaskKeyBuilder is used to build task key for the backfill job.
func NewTaskKeyBuilder ¶
func NewTaskKeyBuilder() *TaskKeyBuilder
NewTaskKeyBuilder creates a new TaskKeyBuilder.
func (*TaskKeyBuilder) Build ¶
func (b *TaskKeyBuilder) Build(jobID int64) string
Build builds the task key for the backfill job.
func (*TaskKeyBuilder) SetMergeTempIndex ¶
func (b *TaskKeyBuilder) SetMergeTempIndex(flag bool) *TaskKeyBuilder
SetMergeTempIndex sets whether to merge the temporary index.
func (*TaskKeyBuilder) SetMultiSchema ¶
func (b *TaskKeyBuilder) SetMultiSchema(info *model.MultiSchemaInfo) *TaskKeyBuilder
SetMultiSchema sets the multi-schema change information.
type TempIndexScanTaskSource ¶
type TempIndexScanTaskSource struct {
// contains filtered or unexported fields
}
TempIndexScanTaskSource produces TempIndexScanTask by splitting regions of a temp index range.
func NewTempIndexScanTaskSource ¶
func NewTempIndexScanTaskSource( ctx *workerpool.Context, store kv.Storage, physicalTable table.PhysicalTable, startKey kv.Key, endKey kv.Key, ) *TempIndexScanTaskSource
NewTempIndexScanTaskSource creates a new TempIndexScanTaskSource.
func (*TempIndexScanTaskSource) Close ¶
func (src *TempIndexScanTaskSource) Close() error
Close implements Operator interface.
func (*TempIndexScanTaskSource) Open ¶
func (src *TempIndexScanTaskSource) Open() error
Open implements Operator interface.
func (*TempIndexScanTaskSource) SetSink ¶
func (src *TempIndexScanTaskSource) SetSink(sink operator.DataChannel[tempIndexScanTask])
SetSink implements WithSink interface.
func (*TempIndexScanTaskSource) String ¶
func (*TempIndexScanTaskSource) String() string
String implements fmt.Stringer interface.
type TiFlashManagementContext ¶
type TiFlashManagementContext struct {
// The latest TiFlash stores info. For Classic kernel, it contains all TiFlash nodes. For NextGen kernel, it contains only TiFlash write nodes.
TiFlashStores map[int64]pd.StoreInfo
PollCounter uint64
Backoff *PollTiFlashBackoffContext
// tables waiting for updating progress after become available.
UpdatingProgressTables *list.List
}
TiFlashManagementContext is the context for TiFlash Replica Management
func NewTiFlashManagementContext ¶
func NewTiFlashManagementContext() (*TiFlashManagementContext, error)
NewTiFlashManagementContext creates an instance for TiFlashManagementContext.
type TiFlashReplicaStatus ¶
type TiFlashReplicaStatus struct {
ID int64
Count uint64
LocationLabels []string
Available bool
LogicalTableAvailable bool
HighPriority bool
IsPartition bool
}
TiFlashReplicaStatus records status for each TiFlash replica.
type WriteExternalStoreOperator ¶
type WriteExternalStoreOperator struct {
*operator.AsyncOperator[IndexRecordChunk, IndexWriteResult]
// contains filtered or unexported fields
}
WriteExternalStoreOperator writes index records to external storage.
func NewWriteExternalStoreOperator ¶
func NewWriteExternalStoreOperator( ctx *workerpool.Context, copCtx copr.CopContext, sessPool opSessPool, taskID int64, subtaskID int64, tbl table.PhysicalTable, indexes []table.Index, store objstore.Storage, srcChunkPool *sync.Pool, concurrency int, onClose external.OnWriterCloseFunc, memoryQuota uint64, reorgMeta *model.DDLReorgMeta, tikvCodec tikv.Codec, collector execute.Collector, ) *WriteExternalStoreOperator
NewWriteExternalStoreOperator creates a new WriteExternalStoreOperator.
func (*WriteExternalStoreOperator) Close ¶
func (o *WriteExternalStoreOperator) Close() error
Close implements operator.Operator interface.
Source Files
¶
- add_column.go
- affinity.go
- backfilling.go
- backfilling_clean_s3.go
- backfilling_dist_executor.go
- backfilling_dist_scheduler.go
- backfilling_import_cloud.go
- backfilling_merge_sort.go
- backfilling_merge_temp.go
- backfilling_operators.go
- backfilling_read_index.go
- backfilling_txn_executor.go
- bdr.go
- cluster.go
- column.go
- constraint.go
- create_table.go
- ddl.go
- ddl_algorithm.go
- ddl_history.go
- ddl_running_jobs.go
- ddl_tiflash_api.go
- ddl_workerpool.go
- delete_range.go
- delete_range_util.go
- dist_owner.go
- doc.go
- executor.go
- foreign_key.go
- generated_column.go
- index.go
- index_cop.go
- index_merge_tmp.go
- index_presplit.go
- job_scheduler.go
- job_submitter.go
- job_worker.go
- metabuild.go
- mock.go
- modify_column.go
- multi_schema_change.go
- options.go
- owner_mgr.go
- partition.go
- placement_policy.go
- reorg.go
- reorg_util.go
- resource_group.go
- rollingback.go
- sanity_check.go
- schema.go
- schema_version.go
- sequence.go
- split_region.go
- stat.go
- table.go
- table_lock.go
- table_mode.go
- ttl.go
Directories
¶
| Path | Synopsis |
|---|---|
|
Package mock is a generated GoMock package.
|
Package mock is a generated GoMock package. |
|
Package systable contains all constants/methods related accessing system tables related to DDL job execution
|
Package systable contains all constants/methods related accessing system tables related to DDL job execution |
|
tests
|
|