forked from forgejo/forgejo
Integrate public as bindata optionally (#293)
* Dropped unused codekit config * Integrated dynamic and static bindata for public * Ignore public bindata * Add a general generate make task * Integrated flexible public assets into web command * Updated vendoring, added all missiong govendor deps * Made the linter happy with the bindata and dynamic code * Moved public bindata definition to modules directory * Ignoring the new bindata path now * Updated to the new public modules import path * Updated public bindata command and drop the new prefix
This commit is contained in:
parent
4680c349dd
commit
b6a95a8cb3
691 changed files with 305318 additions and 1272 deletions
178
vendor/github.com/pingcap/tidb/ddl/bg_worker.go
generated
vendored
Normal file
178
vendor/github.com/pingcap/tidb/ddl/bg_worker.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/ngaut/log"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/terror"
|
||||
)
|
||||
|
||||
// handleBgJobQueue handles the background job queue.
|
||||
func (d *ddl) handleBgJobQueue() error {
|
||||
if d.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
job := &model.Job{}
|
||||
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
|
||||
t := meta.NewMeta(txn)
|
||||
owner, err := d.checkOwner(t, bgJobFlag)
|
||||
if terror.ErrorEqual(err, ErrNotOwner) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// get the first background job and run
|
||||
job, err = d.getFirstBgJob(t)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
if job == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.runBgJob(t, job)
|
||||
if job.IsFinished() {
|
||||
err = d.finishBgJob(t, job)
|
||||
} else {
|
||||
err = d.updateBgJob(t, job)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
owner.LastUpdateTS = time.Now().UnixNano()
|
||||
err = t.SetBgJobOwner(owner)
|
||||
|
||||
return errors.Trace(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runBgJob runs a background job.
|
||||
func (d *ddl) runBgJob(t *meta.Meta, job *model.Job) {
|
||||
job.State = model.JobRunning
|
||||
|
||||
var err error
|
||||
switch job.Type {
|
||||
case model.ActionDropSchema:
|
||||
err = d.delReorgSchema(t, job)
|
||||
case model.ActionDropTable:
|
||||
err = d.delReorgTable(t, job)
|
||||
default:
|
||||
job.State = model.JobCancelled
|
||||
err = errors.Errorf("invalid background job %v", job)
|
||||
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if job.State != model.JobCancelled {
|
||||
log.Errorf("run background job err %v", errors.ErrorStack(err))
|
||||
}
|
||||
|
||||
job.Error = err.Error()
|
||||
job.ErrorCount++
|
||||
}
|
||||
}
|
||||
|
||||
// prepareBgJob prepares a background job.
|
||||
func (d *ddl) prepareBgJob(ddlJob *model.Job) error {
|
||||
job := &model.Job{
|
||||
ID: ddlJob.ID,
|
||||
SchemaID: ddlJob.SchemaID,
|
||||
TableID: ddlJob.TableID,
|
||||
Type: ddlJob.Type,
|
||||
Args: ddlJob.Args,
|
||||
}
|
||||
|
||||
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
t := meta.NewMeta(txn)
|
||||
err1 := t.EnQueueBgJob(job)
|
||||
|
||||
return errors.Trace(err1)
|
||||
})
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// startBgJob starts a background job.
|
||||
func (d *ddl) startBgJob(tp model.ActionType) {
|
||||
switch tp {
|
||||
case model.ActionDropSchema, model.ActionDropTable:
|
||||
asyncNotify(d.bgJobCh)
|
||||
}
|
||||
}
|
||||
|
||||
// getFirstBgJob gets the first background job.
|
||||
func (d *ddl) getFirstBgJob(t *meta.Meta) (*model.Job, error) {
|
||||
job, err := t.GetBgJob(0)
|
||||
return job, errors.Trace(err)
|
||||
}
|
||||
|
||||
// updateBgJob updates a background job.
|
||||
func (d *ddl) updateBgJob(t *meta.Meta, job *model.Job) error {
|
||||
err := t.UpdateBgJob(0, job)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// finishBgJob finishs a background job.
|
||||
func (d *ddl) finishBgJob(t *meta.Meta, job *model.Job) error {
|
||||
log.Warnf("[ddl] finish background job %v", job)
|
||||
if _, err := t.DeQueueBgJob(); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err := t.AddHistoryBgJob(job)
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) onBackgroundWorker() {
|
||||
defer d.wait.Done()
|
||||
|
||||
// we use 4 * lease time to check owner's timeout, so here, we will update owner's status
|
||||
// every 2 * lease time, if lease is 0, we will use default 10s.
|
||||
checkTime := chooseLeaseTime(2*d.lease, 10*time.Second)
|
||||
|
||||
ticker := time.NewTicker(checkTime)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("[ddl] wait %s to check background job status again", checkTime)
|
||||
case <-d.bgJobCh:
|
||||
case <-d.quitCh:
|
||||
return
|
||||
}
|
||||
|
||||
err := d.handleBgJobQueue()
|
||||
if err != nil {
|
||||
log.Errorf("[ddl] handle background job err %v", errors.ErrorStack(err))
|
||||
}
|
||||
}
|
||||
}
|
45
vendor/github.com/pingcap/tidb/ddl/callback.go
generated
vendored
Normal file
45
vendor/github.com/pingcap/tidb/ddl/callback.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import "github.com/pingcap/tidb/model"
|
||||
|
||||
// Callback is the interface supporting callback function when DDL changed.
|
||||
type Callback interface {
|
||||
// OnChanged is called after schema is changed.
|
||||
OnChanged(err error) error
|
||||
// OnJobRunBefore is called before running job.
|
||||
OnJobRunBefore(job *model.Job)
|
||||
// OnJobUpdated is called after the running job is updated.
|
||||
OnJobUpdated(job *model.Job)
|
||||
}
|
||||
|
||||
// BaseCallback implements Callback.OnChanged interface.
|
||||
type BaseCallback struct {
|
||||
}
|
||||
|
||||
// OnChanged implements Callback interface.
|
||||
func (c *BaseCallback) OnChanged(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// OnJobRunBefore implements Callback.OnJobRunBefore interface.
|
||||
func (c *BaseCallback) OnJobRunBefore(job *model.Job) {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
// OnJobUpdated implements Callback.OnJobUpdated interface.
|
||||
func (c *BaseCallback) OnJobUpdated(job *model.Job) {
|
||||
// Nothing to do.
|
||||
}
|
430
vendor/github.com/pingcap/tidb/ddl/column.go
generated
vendored
Normal file
430
vendor/github.com/pingcap/tidb/ddl/column.go
generated
vendored
Normal file
|
@ -0,0 +1,430 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"github.com/juju/errors"
|
||||
"github.com/ngaut/log"
|
||||
"github.com/pingcap/tidb/ast"
|
||||
"github.com/pingcap/tidb/column"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/table"
|
||||
"github.com/pingcap/tidb/table/tables"
|
||||
"github.com/pingcap/tidb/terror"
|
||||
)
|
||||
|
||||
func (d *ddl) adjustColumnOffset(columns []*model.ColumnInfo, indices []*model.IndexInfo, offset int, added bool) {
|
||||
offsetChanged := make(map[int]int)
|
||||
if added {
|
||||
for i := offset + 1; i < len(columns); i++ {
|
||||
offsetChanged[columns[i].Offset] = i
|
||||
columns[i].Offset = i
|
||||
}
|
||||
columns[offset].Offset = offset
|
||||
} else {
|
||||
for i := offset + 1; i < len(columns); i++ {
|
||||
offsetChanged[columns[i].Offset] = i - 1
|
||||
columns[i].Offset = i - 1
|
||||
}
|
||||
columns[offset].Offset = len(columns) - 1
|
||||
}
|
||||
|
||||
// TODO: index can't cover the add/remove column with offset now, we may check this later.
|
||||
|
||||
// Update index column offset info.
|
||||
for _, idx := range indices {
|
||||
for _, col := range idx.Columns {
|
||||
newOffset, ok := offsetChanged[col.Offset]
|
||||
if ok {
|
||||
col.Offset = newOffset
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) addColumn(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *ast.ColumnPosition) (*model.ColumnInfo, int, error) {
|
||||
// Check column name duplicate.
|
||||
cols := tblInfo.Columns
|
||||
position := len(cols)
|
||||
|
||||
// Get column position.
|
||||
if pos.Tp == ast.ColumnPositionFirst {
|
||||
position = 0
|
||||
} else if pos.Tp == ast.ColumnPositionAfter {
|
||||
c := findCol(cols, pos.RelativeColumn.Name.L)
|
||||
if c == nil {
|
||||
return nil, 0, errors.Errorf("No such column: %v", pos.RelativeColumn)
|
||||
}
|
||||
|
||||
// Insert position is after the mentioned column.
|
||||
position = c.Offset + 1
|
||||
}
|
||||
|
||||
colInfo.State = model.StateNone
|
||||
// To support add column asynchronous, we should mark its offset as the last column.
|
||||
// So that we can use origin column offset to get value from row.
|
||||
colInfo.Offset = len(cols)
|
||||
|
||||
// Insert col into the right place of the column list.
|
||||
newCols := make([]*model.ColumnInfo, 0, len(cols)+1)
|
||||
newCols = append(newCols, cols[:position]...)
|
||||
newCols = append(newCols, colInfo)
|
||||
newCols = append(newCols, cols[position:]...)
|
||||
|
||||
tblInfo.Columns = newCols
|
||||
return colInfo, position, nil
|
||||
}
|
||||
|
||||
func (d *ddl) onAddColumn(t *meta.Meta, job *model.Job) error {
|
||||
schemaID := job.SchemaID
|
||||
tblInfo, err := d.getTableInfo(t, job)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
col := &model.ColumnInfo{}
|
||||
pos := &ast.ColumnPosition{}
|
||||
offset := 0
|
||||
err = job.DecodeArgs(col, pos, &offset)
|
||||
if err != nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
columnInfo := findCol(tblInfo.Columns, col.Name.L)
|
||||
if columnInfo != nil {
|
||||
if columnInfo.State == model.StatePublic {
|
||||
// we already have a column with same column name
|
||||
job.State = model.JobCancelled
|
||||
return errors.Errorf("ADD COLUMN: column already exist %s", col.Name.L)
|
||||
}
|
||||
} else {
|
||||
columnInfo, offset, err = d.addColumn(tblInfo, col, pos)
|
||||
if err != nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// Set offset arg to job.
|
||||
if offset != 0 {
|
||||
job.Args = []interface{}{columnInfo, pos, offset}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch columnInfo.State {
|
||||
case model.StateNone:
|
||||
// none -> delete only
|
||||
job.SchemaState = model.StateDeleteOnly
|
||||
columnInfo.State = model.StateDeleteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateDeleteOnly:
|
||||
// delete only -> write only
|
||||
job.SchemaState = model.StateWriteOnly
|
||||
columnInfo.State = model.StateWriteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateWriteOnly:
|
||||
// write only -> reorganization
|
||||
job.SchemaState = model.StateWriteReorganization
|
||||
columnInfo.State = model.StateWriteReorganization
|
||||
// initialize SnapshotVer to 0 for later reorganization check.
|
||||
job.SnapshotVer = 0
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateWriteReorganization:
|
||||
// reorganization -> public
|
||||
// get the current version for reorganization if we don't have
|
||||
reorgInfo, err := d.getReorgInfo(t, job)
|
||||
if err != nil || reorgInfo.first {
|
||||
// if we run reorg firstly, we should update the job snapshot version
|
||||
// and then run the reorg next time.
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
tbl, err := d.getTable(schemaID, tblInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = d.runReorgJob(func() error {
|
||||
return d.backfillColumn(tbl, columnInfo, reorgInfo)
|
||||
})
|
||||
|
||||
if terror.ErrorEqual(err, errWaitReorgTimeout) {
|
||||
// if timeout, we should return, check for the owner and re-wait job done.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// Adjust column offset.
|
||||
d.adjustColumnOffset(tblInfo.Columns, tblInfo.Indices, offset, true)
|
||||
|
||||
columnInfo.State = model.StatePublic
|
||||
|
||||
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// finish this job
|
||||
job.SchemaState = model.StatePublic
|
||||
job.State = model.JobDone
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid column state %v", columnInfo.State)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) onDropColumn(t *meta.Meta, job *model.Job) error {
|
||||
schemaID := job.SchemaID
|
||||
tblInfo, err := d.getTableInfo(t, job)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
var colName model.CIStr
|
||||
err = job.DecodeArgs(&colName)
|
||||
if err != nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
colInfo := findCol(tblInfo.Columns, colName.L)
|
||||
if colInfo == nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Errorf("column %s doesn't exist", colName)
|
||||
}
|
||||
|
||||
if len(tblInfo.Columns) == 1 {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Errorf("can't drop only column %s in table %s", colName, tblInfo.Name)
|
||||
}
|
||||
|
||||
// we don't support drop column with index covered now.
|
||||
// we must drop the index first, then drop the column.
|
||||
for _, indexInfo := range tblInfo.Indices {
|
||||
for _, col := range indexInfo.Columns {
|
||||
if col.Name.L == colName.L {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Errorf("can't drop column %s with index %s covered now", colName, indexInfo.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch colInfo.State {
|
||||
case model.StatePublic:
|
||||
// public -> write only
|
||||
job.SchemaState = model.StateWriteOnly
|
||||
colInfo.State = model.StateWriteOnly
|
||||
|
||||
// set this column's offset to the last and reset all following columns' offset
|
||||
d.adjustColumnOffset(tblInfo.Columns, tblInfo.Indices, colInfo.Offset, false)
|
||||
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateWriteOnly:
|
||||
// write only -> delete only
|
||||
job.SchemaState = model.StateDeleteOnly
|
||||
colInfo.State = model.StateDeleteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateDeleteOnly:
|
||||
// delete only -> reorganization
|
||||
job.SchemaState = model.StateDeleteReorganization
|
||||
colInfo.State = model.StateDeleteReorganization
|
||||
// initialize SnapshotVer to 0 for later reorganization check.
|
||||
job.SnapshotVer = 0
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateDeleteReorganization:
|
||||
// reorganization -> absent
|
||||
reorgInfo, err := d.getReorgInfo(t, job)
|
||||
if err != nil || reorgInfo.first {
|
||||
// if we run reorg firstly, we should update the job snapshot version
|
||||
// and then run the reorg next time.
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
tbl, err := d.getTable(schemaID, tblInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = d.runReorgJob(func() error {
|
||||
return d.dropTableColumn(tbl, colInfo, reorgInfo)
|
||||
})
|
||||
|
||||
if terror.ErrorEqual(err, errWaitReorgTimeout) {
|
||||
// if timeout, we should return, check for the owner and re-wait job done.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// all reorganization jobs done, drop this column
|
||||
newColumns := make([]*model.ColumnInfo, 0, len(tblInfo.Columns))
|
||||
for _, col := range tblInfo.Columns {
|
||||
if col.Name.L != colName.L {
|
||||
newColumns = append(newColumns, col)
|
||||
}
|
||||
}
|
||||
tblInfo.Columns = newColumns
|
||||
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// finish this job
|
||||
job.SchemaState = model.StateNone
|
||||
job.State = model.JobDone
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid table state %v", tblInfo.State)
|
||||
}
|
||||
}
|
||||
|
||||
// How to backfill column data in reorganization state?
|
||||
// 1. Generate a snapshot with special version.
|
||||
// 2. Traverse the snapshot, get every row in the table.
|
||||
// 3. For one row, if the row has been already deleted, skip to next row.
|
||||
// 4. If not deleted, check whether column data has existed, if existed, skip to next row.
|
||||
// 5. If column data doesn't exist, backfill the column with default value and then continue to handle next row.
|
||||
func (d *ddl) backfillColumn(t table.Table, columnInfo *model.ColumnInfo, reorgInfo *reorgInfo) error {
|
||||
seekHandle := reorgInfo.Handle
|
||||
version := reorgInfo.SnapshotVer
|
||||
|
||||
for {
|
||||
handles, err := d.getSnapshotRows(t, version, seekHandle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if len(handles) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
seekHandle = handles[len(handles)-1] + 1
|
||||
err = d.backfillColumnData(t, columnInfo, handles, reorgInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) backfillColumnData(t table.Table, columnInfo *model.ColumnInfo, handles []int64, reorgInfo *reorgInfo) error {
|
||||
for _, handle := range handles {
|
||||
log.Info("[ddl] backfill column...", handle)
|
||||
|
||||
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
if err := d.isReorgRunnable(txn); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// First check if row exists.
|
||||
exist, err := checkRowExist(txn, t, handle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if !exist {
|
||||
// If row doesn't exist, skip it.
|
||||
return nil
|
||||
}
|
||||
|
||||
backfillKey := t.RecordKey(handle, &column.Col{ColumnInfo: *columnInfo})
|
||||
backfillValue, err := txn.Get(backfillKey)
|
||||
if err != nil && !kv.IsErrNotFound(err) {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
if backfillValue != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
value, _, err := table.GetColDefaultValue(nil, columnInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// must convert to the column field type.
|
||||
v, err := value.ConvertTo(&columnInfo.FieldType)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = lockRow(txn, t, handle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = tables.SetColValue(txn, backfillKey, v)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
return errors.Trace(reorgInfo.UpdateHandle(txn, handle))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) dropTableColumn(t table.Table, colInfo *model.ColumnInfo, reorgInfo *reorgInfo) error {
|
||||
version := reorgInfo.SnapshotVer
|
||||
seekHandle := reorgInfo.Handle
|
||||
|
||||
col := &column.Col{ColumnInfo: *colInfo}
|
||||
for {
|
||||
handles, err := d.getSnapshotRows(t, version, seekHandle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if len(handles) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
seekHandle = handles[len(handles)-1] + 1
|
||||
|
||||
err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
if err1 := d.isReorgRunnable(txn); err1 != nil {
|
||||
return errors.Trace(err1)
|
||||
}
|
||||
|
||||
var h int64
|
||||
for _, h = range handles {
|
||||
key := t.RecordKey(h, col)
|
||||
err1 := txn.Delete(key)
|
||||
if err1 != nil && !terror.ErrorEqual(err1, kv.ErrNotExist) {
|
||||
return errors.Trace(err1)
|
||||
}
|
||||
}
|
||||
return errors.Trace(reorgInfo.UpdateHandle(txn, h))
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
}
|
978
vendor/github.com/pingcap/tidb/ddl/ddl.go
generated
vendored
Normal file
978
vendor/github.com/pingcap/tidb/ddl/ddl.go
generated
vendored
Normal file
|
@ -0,0 +1,978 @@
|
|||
// Copyright 2013 The ql Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSES/QL-LICENSE file.
|
||||
|
||||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/ngaut/log"
|
||||
"github.com/pingcap/tidb/ast"
|
||||
"github.com/pingcap/tidb/column"
|
||||
"github.com/pingcap/tidb/context"
|
||||
"github.com/pingcap/tidb/evaluator"
|
||||
"github.com/pingcap/tidb/infoschema"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/meta/autoid"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/mysql"
|
||||
"github.com/pingcap/tidb/sessionctx/variable"
|
||||
"github.com/pingcap/tidb/table"
|
||||
"github.com/pingcap/tidb/util/charset"
|
||||
"github.com/pingcap/tidb/util/types"
|
||||
"github.com/twinj/uuid"
|
||||
)
|
||||
|
||||
// DDL is responsible for updating schema in data store and maintaining in-memory InfoSchema cache.
|
||||
type DDL interface {
|
||||
CreateSchema(ctx context.Context, name model.CIStr, charsetInfo *ast.CharsetOpt) error
|
||||
DropSchema(ctx context.Context, schema model.CIStr) error
|
||||
CreateTable(ctx context.Context, ident ast.Ident, cols []*ast.ColumnDef,
|
||||
constrs []*ast.Constraint, options []*ast.TableOption) error
|
||||
DropTable(ctx context.Context, tableIdent ast.Ident) (err error)
|
||||
CreateIndex(ctx context.Context, tableIdent ast.Ident, unique bool, indexName model.CIStr,
|
||||
columnNames []*ast.IndexColName) error
|
||||
DropIndex(ctx context.Context, tableIdent ast.Ident, indexName model.CIStr) error
|
||||
GetInformationSchema() infoschema.InfoSchema
|
||||
AlterTable(ctx context.Context, tableIdent ast.Ident, spec []*ast.AlterTableSpec) error
|
||||
// SetLease will reset the lease time for online DDL change,
|
||||
// it's a very dangerous function and you must guarantee that all servers have the same lease time.
|
||||
SetLease(lease time.Duration)
|
||||
// GetLease returns current schema lease time.
|
||||
GetLease() time.Duration
|
||||
// Stats returns the DDL statistics.
|
||||
Stats() (map[string]interface{}, error)
|
||||
// GetScope gets the status variables scope.
|
||||
GetScope(status string) variable.ScopeFlag
|
||||
// Stop stops DDL worker.
|
||||
Stop() error
|
||||
// Start starts DDL worker.
|
||||
Start() error
|
||||
}
|
||||
|
||||
type ddl struct {
|
||||
m sync.RWMutex
|
||||
|
||||
infoHandle *infoschema.Handle
|
||||
hook Callback
|
||||
store kv.Storage
|
||||
// schema lease seconds.
|
||||
lease time.Duration
|
||||
uuid string
|
||||
ddlJobCh chan struct{}
|
||||
ddlJobDoneCh chan struct{}
|
||||
// drop database/table job runs in the background.
|
||||
bgJobCh chan struct{}
|
||||
// reorgDoneCh is for reorganization, if the reorganization job is done,
|
||||
// we will use this channel to notify outer.
|
||||
// TODO: now we use goroutine to simulate reorganization jobs, later we may
|
||||
// use a persistent job list.
|
||||
reorgDoneCh chan error
|
||||
|
||||
quitCh chan struct{}
|
||||
wait sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewDDL creates a new DDL.
|
||||
func NewDDL(store kv.Storage, infoHandle *infoschema.Handle, hook Callback, lease time.Duration) DDL {
|
||||
return newDDL(store, infoHandle, hook, lease)
|
||||
}
|
||||
|
||||
func newDDL(store kv.Storage, infoHandle *infoschema.Handle, hook Callback, lease time.Duration) *ddl {
|
||||
if hook == nil {
|
||||
hook = &BaseCallback{}
|
||||
}
|
||||
|
||||
d := &ddl{
|
||||
infoHandle: infoHandle,
|
||||
hook: hook,
|
||||
store: store,
|
||||
lease: lease,
|
||||
uuid: uuid.NewV4().String(),
|
||||
ddlJobCh: make(chan struct{}, 1),
|
||||
ddlJobDoneCh: make(chan struct{}, 1),
|
||||
bgJobCh: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
d.start()
|
||||
|
||||
variable.RegisterStatistics(d)
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *ddl) Stop() error {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
d.close()
|
||||
|
||||
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
t := meta.NewMeta(txn)
|
||||
owner, err1 := t.GetDDLJobOwner()
|
||||
if err1 != nil {
|
||||
return errors.Trace(err1)
|
||||
}
|
||||
if owner == nil || owner.OwnerID != d.uuid {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ddl job's owner is me, clean it so other servers can compete for it quickly.
|
||||
return t.SetDDLJobOwner(&model.Owner{})
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
t := meta.NewMeta(txn)
|
||||
owner, err1 := t.GetBgJobOwner()
|
||||
if err1 != nil {
|
||||
return errors.Trace(err1)
|
||||
}
|
||||
if owner == nil || owner.OwnerID != d.uuid {
|
||||
return nil
|
||||
}
|
||||
|
||||
// background job's owner is me, clean it so other servers can compete for it quickly.
|
||||
return t.SetBgJobOwner(&model.Owner{})
|
||||
})
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) Start() error {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if !d.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.start()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) start() {
|
||||
d.quitCh = make(chan struct{})
|
||||
d.wait.Add(2)
|
||||
go d.onBackgroundWorker()
|
||||
go d.onDDLWorker()
|
||||
// for every start, we will send a fake job to let worker
|
||||
// check owner first and try to find whether a job exists and run.
|
||||
asyncNotify(d.ddlJobCh)
|
||||
asyncNotify(d.bgJobCh)
|
||||
}
|
||||
|
||||
func (d *ddl) close() {
|
||||
if d.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
close(d.quitCh)
|
||||
|
||||
d.wait.Wait()
|
||||
}
|
||||
|
||||
func (d *ddl) isClosed() bool {
|
||||
select {
|
||||
case <-d.quitCh:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) SetLease(lease time.Duration) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if lease == d.lease {
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("[ddl] change schema lease %s -> %s", d.lease, lease)
|
||||
|
||||
if d.isClosed() {
|
||||
// if already closed, just set lease and return
|
||||
d.lease = lease
|
||||
return
|
||||
}
|
||||
|
||||
// close the running worker and start again
|
||||
d.close()
|
||||
d.lease = lease
|
||||
d.start()
|
||||
}
|
||||
|
||||
func (d *ddl) GetLease() time.Duration {
|
||||
d.m.RLock()
|
||||
lease := d.lease
|
||||
d.m.RUnlock()
|
||||
return lease
|
||||
}
|
||||
|
||||
func (d *ddl) GetInformationSchema() infoschema.InfoSchema {
|
||||
return d.infoHandle.Get()
|
||||
}
|
||||
|
||||
func (d *ddl) genGlobalID() (int64, error) {
|
||||
var globalID int64
|
||||
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
var err error
|
||||
globalID, err = meta.NewMeta(txn).GenGlobalID()
|
||||
return errors.Trace(err)
|
||||
})
|
||||
|
||||
return globalID, errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) CreateSchema(ctx context.Context, schema model.CIStr, charsetInfo *ast.CharsetOpt) (err error) {
|
||||
is := d.GetInformationSchema()
|
||||
_, ok := is.SchemaByName(schema)
|
||||
if ok {
|
||||
return errors.Trace(infoschema.DatabaseExists)
|
||||
}
|
||||
|
||||
schemaID, err := d.genGlobalID()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
dbInfo := &model.DBInfo{
|
||||
Name: schema,
|
||||
}
|
||||
if charsetInfo != nil {
|
||||
dbInfo.Charset = charsetInfo.Chs
|
||||
dbInfo.Collate = charsetInfo.Col
|
||||
} else {
|
||||
dbInfo.Charset, dbInfo.Collate = getDefaultCharsetAndCollate()
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: schemaID,
|
||||
Type: model.ActionCreateSchema,
|
||||
Args: []interface{}{dbInfo},
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) DropSchema(ctx context.Context, schema model.CIStr) (err error) {
|
||||
is := d.GetInformationSchema()
|
||||
old, ok := is.SchemaByName(schema)
|
||||
if !ok {
|
||||
return errors.Trace(infoschema.DatabaseNotExists)
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: old.ID,
|
||||
Type: model.ActionDropSchema,
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func getDefaultCharsetAndCollate() (string, string) {
|
||||
// TODO: TableDefaultCharset-->DatabaseDefaultCharset-->SystemDefaultCharset.
|
||||
// TODO: change TableOption parser to parse collate.
|
||||
// This is a tmp solution.
|
||||
return "utf8", "utf8_unicode_ci"
|
||||
}
|
||||
|
||||
func setColumnFlagWithConstraint(colMap map[string]*column.Col, v *ast.Constraint) {
|
||||
switch v.Tp {
|
||||
case ast.ConstraintPrimaryKey:
|
||||
for _, key := range v.Keys {
|
||||
c, ok := colMap[key.Column.Name.L]
|
||||
if !ok {
|
||||
// TODO: table constraint on unknown column.
|
||||
continue
|
||||
}
|
||||
c.Flag |= mysql.PriKeyFlag
|
||||
// Primary key can not be NULL.
|
||||
c.Flag |= mysql.NotNullFlag
|
||||
}
|
||||
case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey:
|
||||
for i, key := range v.Keys {
|
||||
c, ok := colMap[key.Column.Name.L]
|
||||
if !ok {
|
||||
// TODO: table constraint on unknown column.
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
// Only the first column can be set
|
||||
// if unique index has multi columns,
|
||||
// the flag should be MultipleKeyFlag.
|
||||
// See: https://dev.mysql.com/doc/refman/5.7/en/show-columns.html
|
||||
if len(v.Keys) > 1 {
|
||||
c.Flag |= mysql.MultipleKeyFlag
|
||||
} else {
|
||||
c.Flag |= mysql.UniqueKeyFlag
|
||||
}
|
||||
}
|
||||
}
|
||||
case ast.ConstraintKey, ast.ConstraintIndex:
|
||||
for i, key := range v.Keys {
|
||||
c, ok := colMap[key.Column.Name.L]
|
||||
if !ok {
|
||||
// TODO: table constraint on unknown column.
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
// Only the first column can be set.
|
||||
c.Flag |= mysql.MultipleKeyFlag
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) buildColumnsAndConstraints(ctx context.Context, colDefs []*ast.ColumnDef,
|
||||
constraints []*ast.Constraint) ([]*column.Col, []*ast.Constraint, error) {
|
||||
var cols []*column.Col
|
||||
colMap := map[string]*column.Col{}
|
||||
for i, colDef := range colDefs {
|
||||
col, cts, err := d.buildColumnAndConstraint(ctx, i, colDef)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Trace(err)
|
||||
}
|
||||
col.State = model.StatePublic
|
||||
constraints = append(constraints, cts...)
|
||||
cols = append(cols, col)
|
||||
colMap[colDef.Name.Name.L] = col
|
||||
}
|
||||
// traverse table Constraints and set col.flag
|
||||
for _, v := range constraints {
|
||||
setColumnFlagWithConstraint(colMap, v)
|
||||
}
|
||||
return cols, constraints, nil
|
||||
}
|
||||
|
||||
func (d *ddl) buildColumnAndConstraint(ctx context.Context, offset int,
|
||||
colDef *ast.ColumnDef) (*column.Col, []*ast.Constraint, error) {
|
||||
// Set charset.
|
||||
if len(colDef.Tp.Charset) == 0 {
|
||||
switch colDef.Tp.Tp {
|
||||
case mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
|
||||
colDef.Tp.Charset, colDef.Tp.Collate = getDefaultCharsetAndCollate()
|
||||
default:
|
||||
colDef.Tp.Charset = charset.CharsetBin
|
||||
colDef.Tp.Collate = charset.CharsetBin
|
||||
}
|
||||
}
|
||||
|
||||
col, cts, err := columnDefToCol(ctx, offset, colDef)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
col.ID, err = d.genGlobalID()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
return col, cts, nil
|
||||
}
|
||||
|
||||
// columnDefToCol converts ColumnDef to Col and TableConstraints.
|
||||
func columnDefToCol(ctx context.Context, offset int, colDef *ast.ColumnDef) (*column.Col, []*ast.Constraint, error) {
|
||||
constraints := []*ast.Constraint{}
|
||||
col := &column.Col{
|
||||
ColumnInfo: model.ColumnInfo{
|
||||
Offset: offset,
|
||||
Name: colDef.Name.Name,
|
||||
FieldType: *colDef.Tp,
|
||||
},
|
||||
}
|
||||
|
||||
// Check and set TimestampFlag and OnUpdateNowFlag.
|
||||
if col.Tp == mysql.TypeTimestamp {
|
||||
col.Flag |= mysql.TimestampFlag
|
||||
col.Flag |= mysql.OnUpdateNowFlag
|
||||
col.Flag |= mysql.NotNullFlag
|
||||
}
|
||||
|
||||
// If flen is not assigned, assigned it by type.
|
||||
if col.Flen == types.UnspecifiedLength {
|
||||
col.Flen = mysql.GetDefaultFieldLength(col.Tp)
|
||||
}
|
||||
if col.Decimal == types.UnspecifiedLength {
|
||||
col.Decimal = mysql.GetDefaultDecimal(col.Tp)
|
||||
}
|
||||
|
||||
setOnUpdateNow := false
|
||||
hasDefaultValue := false
|
||||
if colDef.Options != nil {
|
||||
keys := []*ast.IndexColName{
|
||||
{
|
||||
Column: colDef.Name,
|
||||
Length: colDef.Tp.Flen,
|
||||
},
|
||||
}
|
||||
for _, v := range colDef.Options {
|
||||
switch v.Tp {
|
||||
case ast.ColumnOptionNotNull:
|
||||
col.Flag |= mysql.NotNullFlag
|
||||
case ast.ColumnOptionNull:
|
||||
col.Flag &= ^uint(mysql.NotNullFlag)
|
||||
removeOnUpdateNowFlag(col)
|
||||
case ast.ColumnOptionAutoIncrement:
|
||||
col.Flag |= mysql.AutoIncrementFlag
|
||||
case ast.ColumnOptionPrimaryKey:
|
||||
constraint := &ast.Constraint{Tp: ast.ConstraintPrimaryKey, Keys: keys}
|
||||
constraints = append(constraints, constraint)
|
||||
col.Flag |= mysql.PriKeyFlag
|
||||
case ast.ColumnOptionUniq:
|
||||
constraint := &ast.Constraint{Tp: ast.ConstraintUniq, Name: colDef.Name.Name.O, Keys: keys}
|
||||
constraints = append(constraints, constraint)
|
||||
col.Flag |= mysql.UniqueKeyFlag
|
||||
case ast.ColumnOptionIndex:
|
||||
constraint := &ast.Constraint{Tp: ast.ConstraintIndex, Name: colDef.Name.Name.O, Keys: keys}
|
||||
constraints = append(constraints, constraint)
|
||||
case ast.ColumnOptionUniqIndex:
|
||||
constraint := &ast.Constraint{Tp: ast.ConstraintUniqIndex, Name: colDef.Name.Name.O, Keys: keys}
|
||||
constraints = append(constraints, constraint)
|
||||
col.Flag |= mysql.UniqueKeyFlag
|
||||
case ast.ColumnOptionKey:
|
||||
constraint := &ast.Constraint{Tp: ast.ConstraintKey, Name: colDef.Name.Name.O, Keys: keys}
|
||||
constraints = append(constraints, constraint)
|
||||
case ast.ColumnOptionUniqKey:
|
||||
constraint := &ast.Constraint{Tp: ast.ConstraintUniqKey, Name: colDef.Name.Name.O, Keys: keys}
|
||||
constraints = append(constraints, constraint)
|
||||
col.Flag |= mysql.UniqueKeyFlag
|
||||
case ast.ColumnOptionDefaultValue:
|
||||
value, err := getDefaultValue(ctx, v, colDef.Tp.Tp, colDef.Tp.Decimal)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("invalid default value - %s", errors.Trace(err))
|
||||
}
|
||||
col.DefaultValue = value
|
||||
hasDefaultValue = true
|
||||
removeOnUpdateNowFlag(col)
|
||||
case ast.ColumnOptionOnUpdate:
|
||||
if !evaluator.IsCurrentTimeExpr(v.Expr) {
|
||||
return nil, nil, errors.Errorf("invalid ON UPDATE for - %s", col.Name)
|
||||
}
|
||||
|
||||
col.Flag |= mysql.OnUpdateNowFlag
|
||||
setOnUpdateNow = true
|
||||
case ast.ColumnOptionFulltext, ast.ColumnOptionComment:
|
||||
// Do nothing.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setTimestampDefaultValue(col, hasDefaultValue, setOnUpdateNow)
|
||||
|
||||
// Set `NoDefaultValueFlag` if this field doesn't have a default value and
|
||||
// it is `not null` and not an `AUTO_INCREMENT` field or `TIMESTAMP` field.
|
||||
setNoDefaultValueFlag(col, hasDefaultValue)
|
||||
|
||||
err := checkDefaultValue(col, hasDefaultValue)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Trace(err)
|
||||
}
|
||||
if col.Charset == charset.CharsetBin {
|
||||
col.Flag |= mysql.BinaryFlag
|
||||
}
|
||||
return col, constraints, nil
|
||||
}
|
||||
|
||||
func getDefaultValue(ctx context.Context, c *ast.ColumnOption, tp byte, fsp int) (interface{}, error) {
|
||||
if tp == mysql.TypeTimestamp || tp == mysql.TypeDatetime {
|
||||
value, err := evaluator.GetTimeValue(ctx, c.Expr, tp, fsp)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
// Value is nil means `default null`.
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// If value is mysql.Time, convert it to string.
|
||||
if vv, ok := value.(mysql.Time); ok {
|
||||
return vv.String(), nil
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
v, err := evaluator.Eval(ctx, c.Expr)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func removeOnUpdateNowFlag(c *column.Col) {
|
||||
// For timestamp Col, if it is set null or default value,
|
||||
// OnUpdateNowFlag should be removed.
|
||||
if mysql.HasTimestampFlag(c.Flag) {
|
||||
c.Flag &= ^uint(mysql.OnUpdateNowFlag)
|
||||
}
|
||||
}
|
||||
|
||||
func setTimestampDefaultValue(c *column.Col, hasDefaultValue bool, setOnUpdateNow bool) {
|
||||
if hasDefaultValue {
|
||||
return
|
||||
}
|
||||
|
||||
// For timestamp Col, if is not set default value or not set null, use current timestamp.
|
||||
if mysql.HasTimestampFlag(c.Flag) && mysql.HasNotNullFlag(c.Flag) {
|
||||
if setOnUpdateNow {
|
||||
c.DefaultValue = evaluator.ZeroTimestamp
|
||||
} else {
|
||||
c.DefaultValue = evaluator.CurrentTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setNoDefaultValueFlag(c *column.Col, hasDefaultValue bool) {
|
||||
if hasDefaultValue {
|
||||
return
|
||||
}
|
||||
|
||||
if !mysql.HasNotNullFlag(c.Flag) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if it is an `AUTO_INCREMENT` field or `TIMESTAMP` field.
|
||||
if !mysql.HasAutoIncrementFlag(c.Flag) && !mysql.HasTimestampFlag(c.Flag) {
|
||||
c.Flag |= mysql.NoDefaultValueFlag
|
||||
}
|
||||
}
|
||||
|
||||
func checkDefaultValue(c *column.Col, hasDefaultValue bool) error {
|
||||
if !hasDefaultValue {
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.DefaultValue != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set not null but default null is invalid.
|
||||
if mysql.HasNotNullFlag(c.Flag) {
|
||||
return errors.Errorf("invalid default value for %s", c.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDuplicateColumn(colDefs []*ast.ColumnDef) error {
|
||||
colNames := map[string]bool{}
|
||||
for _, colDef := range colDefs {
|
||||
nameLower := colDef.Name.Name.O
|
||||
if colNames[nameLower] {
|
||||
return errors.Errorf("CREATE TABLE: duplicate column %s", colDef.Name)
|
||||
}
|
||||
colNames[nameLower] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkConstraintNames(constraints []*ast.Constraint) error {
|
||||
constrNames := map[string]bool{}
|
||||
|
||||
// Check not empty constraint name whether is duplicated.
|
||||
for _, constr := range constraints {
|
||||
if constr.Tp == ast.ConstraintForeignKey {
|
||||
// Ignore foreign key.
|
||||
continue
|
||||
}
|
||||
if constr.Name != "" {
|
||||
nameLower := strings.ToLower(constr.Name)
|
||||
if constrNames[nameLower] {
|
||||
return errors.Errorf("CREATE TABLE: duplicate key %s", constr.Name)
|
||||
}
|
||||
constrNames[nameLower] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Set empty constraint names.
|
||||
for _, constr := range constraints {
|
||||
if constr.Name == "" && len(constr.Keys) > 0 {
|
||||
colName := constr.Keys[0].Column.Name.O
|
||||
constrName := colName
|
||||
i := 2
|
||||
for constrNames[strings.ToLower(constrName)] {
|
||||
// We loop forever until we find constrName that haven't been used.
|
||||
constrName = fmt.Sprintf("%s_%d", colName, i)
|
||||
i++
|
||||
}
|
||||
constr.Name = constrName
|
||||
constrNames[constrName] = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) buildTableInfo(tableName model.CIStr, cols []*column.Col, constraints []*ast.Constraint) (tbInfo *model.TableInfo, err error) {
|
||||
tbInfo = &model.TableInfo{
|
||||
Name: tableName,
|
||||
}
|
||||
tbInfo.ID, err = d.genGlobalID()
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
for _, v := range cols {
|
||||
tbInfo.Columns = append(tbInfo.Columns, &v.ColumnInfo)
|
||||
}
|
||||
for _, constr := range constraints {
|
||||
if constr.Tp == ast.ConstraintPrimaryKey {
|
||||
if len(constr.Keys) == 1 {
|
||||
key := constr.Keys[0]
|
||||
col := column.FindCol(cols, key.Column.Name.O)
|
||||
if col == nil {
|
||||
return nil, errors.Errorf("No such column: %v", key)
|
||||
}
|
||||
switch col.Tp {
|
||||
case mysql.TypeLong, mysql.TypeLonglong:
|
||||
tbInfo.PKIsHandle = true
|
||||
// Avoid creating index for PK handle column.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 1. check if the column is exists
|
||||
// 2. add index
|
||||
indexColumns := make([]*model.IndexColumn, 0, len(constr.Keys))
|
||||
for _, key := range constr.Keys {
|
||||
col := column.FindCol(cols, key.Column.Name.O)
|
||||
if col == nil {
|
||||
return nil, errors.Errorf("No such column: %v", key)
|
||||
}
|
||||
indexColumns = append(indexColumns, &model.IndexColumn{
|
||||
Name: key.Column.Name,
|
||||
Offset: col.Offset,
|
||||
Length: key.Length,
|
||||
})
|
||||
}
|
||||
idxInfo := &model.IndexInfo{
|
||||
Name: model.NewCIStr(constr.Name),
|
||||
Columns: indexColumns,
|
||||
State: model.StatePublic,
|
||||
}
|
||||
switch constr.Tp {
|
||||
case ast.ConstraintPrimaryKey:
|
||||
idxInfo.Unique = true
|
||||
idxInfo.Primary = true
|
||||
idxInfo.Name = model.NewCIStr(column.PrimaryKeyName)
|
||||
case ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex:
|
||||
idxInfo.Unique = true
|
||||
}
|
||||
if constr.Option != nil {
|
||||
idxInfo.Comment = constr.Option.Comment
|
||||
idxInfo.Tp = constr.Option.Tp
|
||||
} else {
|
||||
// Use btree as default index type.
|
||||
idxInfo.Tp = model.IndexTypeBtree
|
||||
}
|
||||
idxInfo.ID, err = d.genGlobalID()
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
tbInfo.Indices = append(tbInfo.Indices, idxInfo)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *ddl) CreateTable(ctx context.Context, ident ast.Ident, colDefs []*ast.ColumnDef,
|
||||
constraints []*ast.Constraint, options []*ast.TableOption) (err error) {
|
||||
is := d.GetInformationSchema()
|
||||
schema, ok := is.SchemaByName(ident.Schema)
|
||||
if !ok {
|
||||
return infoschema.DatabaseNotExists.Gen("database %s not exists", ident.Schema)
|
||||
}
|
||||
if is.TableExists(ident.Schema, ident.Name) {
|
||||
return errors.Trace(infoschema.TableExists)
|
||||
}
|
||||
if err = checkDuplicateColumn(colDefs); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
cols, newConstraints, err := d.buildColumnsAndConstraints(ctx, colDefs, constraints)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = checkConstraintNames(newConstraints)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
tbInfo, err := d.buildTableInfo(ident.Name, cols, newConstraints)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: schema.ID,
|
||||
TableID: tbInfo.ID,
|
||||
Type: model.ActionCreateTable,
|
||||
Args: []interface{}{tbInfo},
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
if err == nil {
|
||||
err = d.handleTableOptions(options, tbInfo, schema.ID)
|
||||
}
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo, schemaID int64) error {
|
||||
for _, op := range options {
|
||||
if op.Tp == ast.TableOptionAutoIncrement {
|
||||
alloc := autoid.NewAllocator(d.store, schemaID)
|
||||
tbInfo.State = model.StatePublic
|
||||
tb, err := table.TableFromMeta(alloc, tbInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
// The operation of the minus 1 to make sure that the current value doesn't be used,
|
||||
// the next Alloc operation will get this value.
|
||||
// Its behavior is consistent with MySQL.
|
||||
if err = tb.RebaseAutoID(int64(op.UintValue-1), false); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) AlterTable(ctx context.Context, ident ast.Ident, specs []*ast.AlterTableSpec) (err error) {
|
||||
// now we only allow one schema changes at the same time.
|
||||
if len(specs) != 1 {
|
||||
return errors.New("can't run multi schema changes in one DDL")
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
switch spec.Tp {
|
||||
case ast.AlterTableAddColumn:
|
||||
err = d.AddColumn(ctx, ident, spec)
|
||||
case ast.AlterTableDropColumn:
|
||||
err = d.DropColumn(ctx, ident, spec.DropColumn.Name)
|
||||
case ast.AlterTableDropIndex:
|
||||
err = d.DropIndex(ctx, ident, model.NewCIStr(spec.Name))
|
||||
case ast.AlterTableAddConstraint:
|
||||
constr := spec.Constraint
|
||||
switch spec.Constraint.Tp {
|
||||
case ast.ConstraintKey, ast.ConstraintIndex:
|
||||
err = d.CreateIndex(ctx, ident, false, model.NewCIStr(constr.Name), spec.Constraint.Keys)
|
||||
case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey:
|
||||
err = d.CreateIndex(ctx, ident, true, model.NewCIStr(constr.Name), spec.Constraint.Keys)
|
||||
default:
|
||||
// nothing to do now.
|
||||
}
|
||||
default:
|
||||
// nothing to do now.
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkColumnConstraint(constraints []*ast.ColumnOption) error {
|
||||
for _, constraint := range constraints {
|
||||
switch constraint.Tp {
|
||||
case ast.ColumnOptionAutoIncrement, ast.ColumnOptionPrimaryKey, ast.ColumnOptionUniq, ast.ColumnOptionUniqKey:
|
||||
return errors.Errorf("unsupported add column constraint - %v", constraint.Tp)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddColumn will add a new column to the table.
|
||||
func (d *ddl) AddColumn(ctx context.Context, ti ast.Ident, spec *ast.AlterTableSpec) error {
|
||||
// Check whether the added column constraints are supported.
|
||||
err := checkColumnConstraint(spec.Column.Options)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
is := d.infoHandle.Get()
|
||||
schema, ok := is.SchemaByName(ti.Schema)
|
||||
if !ok {
|
||||
return errors.Trace(infoschema.DatabaseNotExists)
|
||||
}
|
||||
|
||||
t, err := is.TableByName(ti.Schema, ti.Name)
|
||||
if err != nil {
|
||||
return errors.Trace(infoschema.TableNotExists)
|
||||
}
|
||||
|
||||
// Check whether added column has existed.
|
||||
colName := spec.Column.Name.Name.O
|
||||
col := column.FindCol(t.Cols(), colName)
|
||||
if col != nil {
|
||||
return errors.Errorf("column %s already exists", colName)
|
||||
}
|
||||
|
||||
// ingore table constraints now, maybe return error later
|
||||
// we use length(t.Cols()) as the default offset first, later we will change the
|
||||
// column's offset later.
|
||||
col, _, err = d.buildColumnAndConstraint(ctx, len(t.Cols()), spec.Column)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: schema.ID,
|
||||
TableID: t.Meta().ID,
|
||||
Type: model.ActionAddColumn,
|
||||
Args: []interface{}{&col.ColumnInfo, spec.Position, 0},
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// DropColumn will drop a column from the table, now we don't support drop the column with index covered.
|
||||
func (d *ddl) DropColumn(ctx context.Context, ti ast.Ident, colName model.CIStr) error {
|
||||
is := d.infoHandle.Get()
|
||||
schema, ok := is.SchemaByName(ti.Schema)
|
||||
if !ok {
|
||||
return errors.Trace(infoschema.DatabaseNotExists)
|
||||
}
|
||||
|
||||
t, err := is.TableByName(ti.Schema, ti.Name)
|
||||
if err != nil {
|
||||
return errors.Trace(infoschema.TableNotExists)
|
||||
}
|
||||
|
||||
// Check whether dropped column has existed.
|
||||
col := column.FindCol(t.Cols(), colName.L)
|
||||
if col == nil {
|
||||
return errors.Errorf("column %s doesn’t exist", colName.L)
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: schema.ID,
|
||||
TableID: t.Meta().ID,
|
||||
Type: model.ActionDropColumn,
|
||||
Args: []interface{}{colName},
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// DropTable will proceed even if some table in the list does not exists.
|
||||
func (d *ddl) DropTable(ctx context.Context, ti ast.Ident) (err error) {
|
||||
is := d.GetInformationSchema()
|
||||
schema, ok := is.SchemaByName(ti.Schema)
|
||||
if !ok {
|
||||
return infoschema.DatabaseNotExists.Gen("database %s not exists", ti.Schema)
|
||||
}
|
||||
|
||||
tb, err := is.TableByName(ti.Schema, ti.Name)
|
||||
if err != nil {
|
||||
return errors.Trace(infoschema.TableNotExists)
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: schema.ID,
|
||||
TableID: tb.Meta().ID,
|
||||
Type: model.ActionDropTable,
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) CreateIndex(ctx context.Context, ti ast.Ident, unique bool, indexName model.CIStr, idxColNames []*ast.IndexColName) error {
|
||||
is := d.infoHandle.Get()
|
||||
schema, ok := is.SchemaByName(ti.Schema)
|
||||
if !ok {
|
||||
return infoschema.DatabaseNotExists.Gen("database %s not exists", ti.Schema)
|
||||
}
|
||||
|
||||
t, err := is.TableByName(ti.Schema, ti.Name)
|
||||
if err != nil {
|
||||
return errors.Trace(infoschema.TableNotExists)
|
||||
}
|
||||
indexID, err := d.genGlobalID()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: schema.ID,
|
||||
TableID: t.Meta().ID,
|
||||
Type: model.ActionAddIndex,
|
||||
Args: []interface{}{unique, indexName, indexID, idxColNames},
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) DropIndex(ctx context.Context, ti ast.Ident, indexName model.CIStr) error {
|
||||
is := d.infoHandle.Get()
|
||||
schema, ok := is.SchemaByName(ti.Schema)
|
||||
if !ok {
|
||||
return errors.Trace(infoschema.DatabaseNotExists)
|
||||
}
|
||||
|
||||
t, err := is.TableByName(ti.Schema, ti.Name)
|
||||
if err != nil {
|
||||
return errors.Trace(infoschema.TableNotExists)
|
||||
}
|
||||
|
||||
job := &model.Job{
|
||||
SchemaID: schema.ID,
|
||||
TableID: t.Meta().ID,
|
||||
Type: model.ActionDropIndex,
|
||||
Args: []interface{}{indexName},
|
||||
}
|
||||
|
||||
err = d.startDDLJob(ctx, job)
|
||||
err = d.hook.OnChanged(err)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// findCol finds column in cols by name.
|
||||
func findCol(cols []*model.ColumnInfo, name string) *model.ColumnInfo {
|
||||
name = strings.ToLower(name)
|
||||
for _, col := range cols {
|
||||
if col.Name.L == name {
|
||||
return col
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
392
vendor/github.com/pingcap/tidb/ddl/ddl_worker.go
generated
vendored
Normal file
392
vendor/github.com/pingcap/tidb/ddl/ddl_worker.go
generated
vendored
Normal file
|
@ -0,0 +1,392 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/ngaut/log"
|
||||
"github.com/pingcap/tidb/context"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/terror"
|
||||
)
|
||||
|
||||
func (d *ddl) startDDLJob(ctx context.Context, job *model.Job) error {
|
||||
// for every DDL, we must commit current transaction.
|
||||
if err := ctx.FinishTxn(false); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// Create a new job and queue it.
|
||||
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
t := meta.NewMeta(txn)
|
||||
var err error
|
||||
job.ID, err = t.GenGlobalID()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = t.EnQueueDDLJob(job)
|
||||
return errors.Trace(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// notice worker that we push a new job and wait the job done.
|
||||
asyncNotify(d.ddlJobCh)
|
||||
|
||||
log.Warnf("[ddl] start DDL job %v", job)
|
||||
|
||||
jobID := job.ID
|
||||
|
||||
var historyJob *model.Job
|
||||
|
||||
// for a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public
|
||||
// for every state changes, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease.
|
||||
ticker := time.NewTicker(chooseLeaseTime(10*d.lease, 10*time.Second))
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-d.ddlJobDoneCh:
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
historyJob, err = d.getHistoryDDLJob(jobID)
|
||||
if err != nil {
|
||||
log.Errorf("[ddl] get history DDL job err %v, check again", err)
|
||||
continue
|
||||
} else if historyJob == nil {
|
||||
log.Warnf("[ddl] DDL job %d is not in history, maybe not run", jobID)
|
||||
continue
|
||||
}
|
||||
|
||||
// if a job is a history table, the state must be JobDone or JobCancel.
|
||||
if historyJob.State == model.JobDone {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Errorf(historyJob.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) getHistoryDDLJob(id int64) (*model.Job, error) {
|
||||
var job *model.Job
|
||||
|
||||
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
|
||||
t := meta.NewMeta(txn)
|
||||
var err1 error
|
||||
job, err1 = t.GetHistoryDDLJob(id)
|
||||
return errors.Trace(err1)
|
||||
})
|
||||
|
||||
return job, errors.Trace(err)
|
||||
}
|
||||
|
||||
func asyncNotify(ch chan struct{}) {
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) checkOwner(t *meta.Meta, flag JobType) (*model.Owner, error) {
|
||||
var owner *model.Owner
|
||||
var err error
|
||||
|
||||
switch flag {
|
||||
case ddlJobFlag:
|
||||
owner, err = t.GetDDLJobOwner()
|
||||
case bgJobFlag:
|
||||
owner, err = t.GetBgJobOwner()
|
||||
default:
|
||||
err = errInvalidJobFlag
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
if owner == nil {
|
||||
owner = &model.Owner{}
|
||||
// try to set onwer
|
||||
owner.OwnerID = d.uuid
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
// we must wait 2 * lease time to guarantee other servers update the schema,
|
||||
// the owner will update its owner status every 2 * lease time, so here we use
|
||||
// 4 * lease to check its timeout.
|
||||
maxTimeout := int64(4 * d.lease)
|
||||
if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout {
|
||||
owner.OwnerID = d.uuid
|
||||
owner.LastUpdateTS = now
|
||||
// update status.
|
||||
switch flag {
|
||||
case ddlJobFlag:
|
||||
err = t.SetDDLJobOwner(owner)
|
||||
case bgJobFlag:
|
||||
err = t.SetBgJobOwner(owner)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
log.Debugf("[ddl] become %s job owner %s", flag, owner.OwnerID)
|
||||
}
|
||||
|
||||
if owner.OwnerID != d.uuid {
|
||||
log.Debugf("[ddl] not %s job owner, owner is %s", flag, owner.OwnerID)
|
||||
return nil, errors.Trace(ErrNotOwner)
|
||||
}
|
||||
|
||||
return owner, nil
|
||||
}
|
||||
|
||||
func (d *ddl) getFirstDDLJob(t *meta.Meta) (*model.Job, error) {
|
||||
job, err := t.GetDDLJob(0)
|
||||
return job, errors.Trace(err)
|
||||
}
|
||||
|
||||
// every time we enter another state except final state, we must call this function.
|
||||
func (d *ddl) updateDDLJob(t *meta.Meta, job *model.Job) error {
|
||||
err := t.UpdateDDLJob(0, job)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) finishDDLJob(t *meta.Meta, job *model.Job) error {
|
||||
log.Warnf("[ddl] finish DDL job %v", job)
|
||||
// done, notice and run next job.
|
||||
_, err := t.DeQueueDDLJob()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
switch job.Type {
|
||||
case model.ActionDropSchema, model.ActionDropTable:
|
||||
if err = d.prepareBgJob(job); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.AddHistoryDDLJob(job)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// ErrNotOwner means we are not owner and can't handle DDL jobs.
|
||||
var ErrNotOwner = errors.New("DDL: not owner")
|
||||
|
||||
// ErrWorkerClosed means we have already closed the DDL worker.
|
||||
var ErrWorkerClosed = errors.New("DDL: worker is closed")
|
||||
|
||||
var errInvalidJobFlag = errors.New("DDL: invalid job flag")
|
||||
|
||||
// JobType is job type, including ddl/background.
|
||||
type JobType int
|
||||
|
||||
const (
|
||||
ddlJobFlag = iota + 1
|
||||
bgJobFlag
|
||||
)
|
||||
|
||||
func (j JobType) String() string {
|
||||
switch j {
|
||||
case ddlJobFlag:
|
||||
return "ddl"
|
||||
case bgJobFlag:
|
||||
return "background"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (d *ddl) handleDDLJobQueue() error {
|
||||
for {
|
||||
if d.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
waitTime := 2 * d.lease
|
||||
|
||||
var job *model.Job
|
||||
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
|
||||
t := meta.NewMeta(txn)
|
||||
owner, err := d.checkOwner(t, ddlJobFlag)
|
||||
if terror.ErrorEqual(err, ErrNotOwner) {
|
||||
// we are not owner, return and retry checking later.
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// become the owner
|
||||
// get the first job and run
|
||||
job, err = d.getFirstDDLJob(t)
|
||||
if job == nil || err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
if job.IsRunning() {
|
||||
// if we enter a new state, crash when waiting 2 * lease time, and restart quickly,
|
||||
// we may run the job immediately again, but we don't wait enough 2 * lease time to
|
||||
// let other servers update the schema.
|
||||
// so here we must check the elapsed time from last update, if < 2 * lease, we must
|
||||
// wait again.
|
||||
elapsed := time.Duration(time.Now().UnixNano() - job.LastUpdateTS)
|
||||
if elapsed > 0 && elapsed < waitTime {
|
||||
log.Warnf("[ddl] the elapsed time from last update is %s < %s, wait again", elapsed, waitTime)
|
||||
waitTime -= elapsed
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Warnf("[ddl] run DDL job %v", job)
|
||||
|
||||
d.hook.OnJobRunBefore(job)
|
||||
|
||||
// if run job meets error, we will save this error in job Error
|
||||
// and retry later if the job is not cancelled.
|
||||
d.runDDLJob(t, job)
|
||||
|
||||
if job.IsFinished() {
|
||||
err = d.finishDDLJob(t, job)
|
||||
} else {
|
||||
err = d.updateDDLJob(t, job)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// running job may cost some time, so here we must update owner status to
|
||||
// prevent other become the owner.
|
||||
owner.LastUpdateTS = time.Now().UnixNano()
|
||||
err = t.SetDDLJobOwner(owner)
|
||||
|
||||
return errors.Trace(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if job == nil {
|
||||
// no job now, return and retry get later.
|
||||
return nil
|
||||
}
|
||||
|
||||
d.hook.OnJobUpdated(job)
|
||||
|
||||
// here means the job enters another state (delete only, write only, public, etc...) or is cancelled.
|
||||
// if the job is done or still running, we will wait 2 * lease time to guarantee other servers to update
|
||||
// the newest schema.
|
||||
if job.State == model.JobRunning || job.State == model.JobDone {
|
||||
d.waitSchemaChanged(waitTime)
|
||||
}
|
||||
|
||||
if job.IsFinished() {
|
||||
d.startBgJob(job.Type)
|
||||
asyncNotify(d.ddlJobDoneCh)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func chooseLeaseTime(n1 time.Duration, n2 time.Duration) time.Duration {
|
||||
if n1 > 0 {
|
||||
return n1
|
||||
}
|
||||
|
||||
return n2
|
||||
}
|
||||
|
||||
// onDDLWorker is for async online schema change, it will try to become the owner first,
|
||||
// then wait or pull the job queue to handle a schema change job.
|
||||
func (d *ddl) onDDLWorker() {
|
||||
defer d.wait.Done()
|
||||
|
||||
// we use 4 * lease time to check owner's timeout, so here, we will update owner's status
|
||||
// every 2 * lease time, if lease is 0, we will use default 10s.
|
||||
checkTime := chooseLeaseTime(2*d.lease, 10*time.Second)
|
||||
|
||||
ticker := time.NewTicker(checkTime)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("[ddl] wait %s to check DDL status again", checkTime)
|
||||
case <-d.ddlJobCh:
|
||||
case <-d.quitCh:
|
||||
return
|
||||
}
|
||||
|
||||
err := d.handleDDLJobQueue()
|
||||
if err != nil {
|
||||
log.Errorf("[ddl] handle ddl job err %v", errors.ErrorStack(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) runDDLJob(t *meta.Meta, job *model.Job) {
|
||||
if job.IsFinished() {
|
||||
return
|
||||
}
|
||||
|
||||
job.State = model.JobRunning
|
||||
|
||||
var err error
|
||||
switch job.Type {
|
||||
case model.ActionCreateSchema:
|
||||
err = d.onCreateSchema(t, job)
|
||||
case model.ActionDropSchema:
|
||||
err = d.onDropSchema(t, job)
|
||||
case model.ActionCreateTable:
|
||||
err = d.onCreateTable(t, job)
|
||||
case model.ActionDropTable:
|
||||
err = d.onDropTable(t, job)
|
||||
case model.ActionAddColumn:
|
||||
err = d.onAddColumn(t, job)
|
||||
case model.ActionDropColumn:
|
||||
err = d.onDropColumn(t, job)
|
||||
case model.ActionAddIndex:
|
||||
err = d.onCreateIndex(t, job)
|
||||
case model.ActionDropIndex:
|
||||
err = d.onDropIndex(t, job)
|
||||
default:
|
||||
// invalid job, cancel it.
|
||||
job.State = model.JobCancelled
|
||||
err = errors.Errorf("invalid ddl job %v", job)
|
||||
}
|
||||
|
||||
// saves error in job, so that others can know error happens.
|
||||
if err != nil {
|
||||
// if job is not cancelled, we should log this error.
|
||||
if job.State != model.JobCancelled {
|
||||
log.Errorf("run ddl job err %v", errors.ErrorStack(err))
|
||||
}
|
||||
|
||||
job.Error = err.Error()
|
||||
job.ErrorCount++
|
||||
}
|
||||
}
|
||||
|
||||
// for every lease seconds, we will re-update the whole schema, so we will wait 2 * lease time
|
||||
// to guarantee that all servers have already updated schema.
|
||||
func (d *ddl) waitSchemaChanged(waitTime time.Duration) {
|
||||
if waitTime == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(waitTime):
|
||||
case <-d.quitCh:
|
||||
}
|
||||
}
|
473
vendor/github.com/pingcap/tidb/ddl/index.go
generated
vendored
Normal file
473
vendor/github.com/pingcap/tidb/ddl/index.go
generated
vendored
Normal file
|
@ -0,0 +1,473 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"github.com/juju/errors"
|
||||
"github.com/ngaut/log"
|
||||
"github.com/pingcap/tidb/ast"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/mysql"
|
||||
"github.com/pingcap/tidb/table"
|
||||
"github.com/pingcap/tidb/table/tables"
|
||||
"github.com/pingcap/tidb/terror"
|
||||
"github.com/pingcap/tidb/util"
|
||||
"github.com/pingcap/tidb/util/types"
|
||||
)
|
||||
|
||||
func buildIndexInfo(tblInfo *model.TableInfo, unique bool, indexName model.CIStr, indexID int64, idxColNames []*ast.IndexColName) (*model.IndexInfo, error) {
|
||||
// build offsets
|
||||
idxColumns := make([]*model.IndexColumn, 0, len(idxColNames))
|
||||
for _, ic := range idxColNames {
|
||||
col := findCol(tblInfo.Columns, ic.Column.Name.O)
|
||||
if col == nil {
|
||||
return nil, errors.Errorf("CREATE INDEX: column does not exist: %s", ic.Column.Name.O)
|
||||
}
|
||||
|
||||
idxColumns = append(idxColumns, &model.IndexColumn{
|
||||
Name: col.Name,
|
||||
Offset: col.Offset,
|
||||
Length: ic.Length,
|
||||
})
|
||||
}
|
||||
// create index info
|
||||
idxInfo := &model.IndexInfo{
|
||||
ID: indexID,
|
||||
Name: indexName,
|
||||
Columns: idxColumns,
|
||||
Unique: unique,
|
||||
State: model.StateNone,
|
||||
}
|
||||
return idxInfo, nil
|
||||
}
|
||||
|
||||
func addIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) {
|
||||
col := indexInfo.Columns[0]
|
||||
|
||||
if indexInfo.Unique && len(indexInfo.Columns) == 1 {
|
||||
tblInfo.Columns[col.Offset].Flag |= mysql.UniqueKeyFlag
|
||||
} else {
|
||||
tblInfo.Columns[col.Offset].Flag |= mysql.MultipleKeyFlag
|
||||
}
|
||||
}
|
||||
|
||||
func dropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) {
|
||||
col := indexInfo.Columns[0]
|
||||
|
||||
if indexInfo.Unique && len(indexInfo.Columns) == 1 {
|
||||
tblInfo.Columns[col.Offset].Flag &= ^uint(mysql.UniqueKeyFlag)
|
||||
} else {
|
||||
tblInfo.Columns[col.Offset].Flag &= ^uint(mysql.MultipleKeyFlag)
|
||||
}
|
||||
|
||||
// other index may still cover this col
|
||||
for _, index := range tblInfo.Indices {
|
||||
if index.Name.L == indexInfo.Name.L {
|
||||
continue
|
||||
}
|
||||
|
||||
if index.Columns[0].Name.L != col.Name.L {
|
||||
continue
|
||||
}
|
||||
|
||||
addIndexColumnFlag(tblInfo, index)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) onCreateIndex(t *meta.Meta, job *model.Job) error {
|
||||
schemaID := job.SchemaID
|
||||
tblInfo, err := d.getTableInfo(t, job)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
var (
|
||||
unique bool
|
||||
indexName model.CIStr
|
||||
indexID int64
|
||||
idxColNames []*ast.IndexColName
|
||||
)
|
||||
|
||||
err = job.DecodeArgs(&unique, &indexName, &indexID, &idxColNames)
|
||||
if err != nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
var indexInfo *model.IndexInfo
|
||||
for _, idx := range tblInfo.Indices {
|
||||
if idx.Name.L == indexName.L {
|
||||
if idx.State == model.StatePublic {
|
||||
// we already have a index with same index name
|
||||
job.State = model.JobCancelled
|
||||
return errors.Errorf("CREATE INDEX: index already exist %s", indexName)
|
||||
}
|
||||
|
||||
indexInfo = idx
|
||||
}
|
||||
}
|
||||
|
||||
if indexInfo == nil {
|
||||
indexInfo, err = buildIndexInfo(tblInfo, unique, indexName, indexID, idxColNames)
|
||||
if err != nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
tblInfo.Indices = append(tblInfo.Indices, indexInfo)
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch indexInfo.State {
|
||||
case model.StateNone:
|
||||
// none -> delete only
|
||||
job.SchemaState = model.StateDeleteOnly
|
||||
indexInfo.State = model.StateDeleteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateDeleteOnly:
|
||||
// delete only -> write only
|
||||
job.SchemaState = model.StateWriteOnly
|
||||
indexInfo.State = model.StateWriteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateWriteOnly:
|
||||
// write only -> reorganization
|
||||
job.SchemaState = model.StateWriteReorganization
|
||||
indexInfo.State = model.StateWriteReorganization
|
||||
// initialize SnapshotVer to 0 for later reorganization check.
|
||||
job.SnapshotVer = 0
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateWriteReorganization:
|
||||
// reorganization -> public
|
||||
reorgInfo, err := d.getReorgInfo(t, job)
|
||||
if err != nil || reorgInfo.first {
|
||||
// if we run reorg firstly, we should update the job snapshot version
|
||||
// and then run the reorg next time.
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
var tbl table.Table
|
||||
tbl, err = d.getTable(schemaID, tblInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = d.runReorgJob(func() error {
|
||||
return d.addTableIndex(tbl, indexInfo, reorgInfo)
|
||||
})
|
||||
|
||||
if terror.ErrorEqual(err, errWaitReorgTimeout) {
|
||||
// if timeout, we should return, check for the owner and re-wait job done.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
indexInfo.State = model.StatePublic
|
||||
// set column index flag.
|
||||
addIndexColumnFlag(tblInfo, indexInfo)
|
||||
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// finish this job
|
||||
job.SchemaState = model.StatePublic
|
||||
job.State = model.JobDone
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid index state %v", tblInfo.State)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) onDropIndex(t *meta.Meta, job *model.Job) error {
|
||||
schemaID := job.SchemaID
|
||||
tblInfo, err := d.getTableInfo(t, job)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
var indexName model.CIStr
|
||||
if err = job.DecodeArgs(&indexName); err != nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
var indexInfo *model.IndexInfo
|
||||
for _, idx := range tblInfo.Indices {
|
||||
if idx.Name.L == indexName.L {
|
||||
indexInfo = idx
|
||||
}
|
||||
}
|
||||
|
||||
if indexInfo == nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Errorf("index %s doesn't exist", indexName)
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch indexInfo.State {
|
||||
case model.StatePublic:
|
||||
// public -> write only
|
||||
job.SchemaState = model.StateWriteOnly
|
||||
indexInfo.State = model.StateWriteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateWriteOnly:
|
||||
// write only -> delete only
|
||||
job.SchemaState = model.StateDeleteOnly
|
||||
indexInfo.State = model.StateDeleteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateDeleteOnly:
|
||||
// delete only -> reorganization
|
||||
job.SchemaState = model.StateDeleteReorganization
|
||||
indexInfo.State = model.StateDeleteReorganization
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
return errors.Trace(err)
|
||||
case model.StateDeleteReorganization:
|
||||
// reorganization -> absent
|
||||
tbl, err := d.getTable(schemaID, tblInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = d.runReorgJob(func() error {
|
||||
return d.dropTableIndex(tbl, indexInfo)
|
||||
})
|
||||
|
||||
if terror.ErrorEqual(err, errWaitReorgTimeout) {
|
||||
// if timeout, we should return, check for the owner and re-wait job done.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// all reorganization jobs done, drop this index
|
||||
newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices))
|
||||
for _, idx := range tblInfo.Indices {
|
||||
if idx.Name.L != indexName.L {
|
||||
newIndices = append(newIndices, idx)
|
||||
}
|
||||
}
|
||||
tblInfo.Indices = newIndices
|
||||
// set column index flag.
|
||||
dropIndexColumnFlag(tblInfo, indexInfo)
|
||||
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// finish this job
|
||||
job.SchemaState = model.StateNone
|
||||
job.State = model.JobDone
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid table state %v", tblInfo.State)
|
||||
}
|
||||
}
|
||||
|
||||
func checkRowExist(txn kv.Transaction, t table.Table, handle int64) (bool, error) {
|
||||
_, err := txn.Get(t.RecordKey(handle, nil))
|
||||
if terror.ErrorEqual(err, kv.ErrNotExist) {
|
||||
// If row doesn't exist, we may have deleted the row already,
|
||||
// no need to add index again.
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, errors.Trace(err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fetchRowColVals(txn kv.Transaction, t table.Table, handle int64, indexInfo *model.IndexInfo) ([]types.Datum, error) {
|
||||
// fetch datas
|
||||
cols := t.Cols()
|
||||
vals := make([]types.Datum, 0, len(indexInfo.Columns))
|
||||
for _, v := range indexInfo.Columns {
|
||||
col := cols[v.Offset]
|
||||
k := t.RecordKey(handle, col)
|
||||
data, err := txn.Get(k)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
val, err := tables.DecodeValue(data, &col.FieldType)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
vals = append(vals, val)
|
||||
}
|
||||
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
const maxBatchSize = 1024
|
||||
|
||||
// How to add index in reorganization state?
|
||||
// 1. Generate a snapshot with special version.
|
||||
// 2. Traverse the snapshot, get every row in the table.
|
||||
// 3. For one row, if the row has been already deleted, skip to next row.
|
||||
// 4. If not deleted, check whether index has existed, if existed, skip to next row.
|
||||
// 5. If index doesn't exist, create the index and then continue to handle next row.
|
||||
func (d *ddl) addTableIndex(t table.Table, indexInfo *model.IndexInfo, reorgInfo *reorgInfo) error {
|
||||
seekHandle := reorgInfo.Handle
|
||||
version := reorgInfo.SnapshotVer
|
||||
for {
|
||||
handles, err := d.getSnapshotRows(t, version, seekHandle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if len(handles) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
seekHandle = handles[len(handles)-1] + 1
|
||||
|
||||
err = d.backfillTableIndex(t, indexInfo, handles, reorgInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) getSnapshotRows(t table.Table, version uint64, seekHandle int64) ([]int64, error) {
|
||||
ver := kv.Version{Ver: version}
|
||||
|
||||
snap, err := d.store.GetSnapshot(ver)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
defer snap.Release()
|
||||
|
||||
firstKey := t.RecordKey(seekHandle, nil)
|
||||
|
||||
it, err := snap.Seek(firstKey)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
defer it.Close()
|
||||
|
||||
handles := make([]int64, 0, maxBatchSize)
|
||||
|
||||
for it.Valid() {
|
||||
if !it.Key().HasPrefix(t.RecordPrefix()) {
|
||||
break
|
||||
}
|
||||
|
||||
var handle int64
|
||||
handle, err = tables.DecodeRecordKeyHandle(it.Key())
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
rk := t.RecordKey(handle, nil)
|
||||
|
||||
handles = append(handles, handle)
|
||||
if len(handles) == maxBatchSize {
|
||||
break
|
||||
}
|
||||
|
||||
err = kv.NextUntil(it, util.RowKeyPrefixFilter(rk))
|
||||
if terror.ErrorEqual(err, kv.ErrNotExist) {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
return handles, nil
|
||||
}
|
||||
|
||||
func lockRow(txn kv.Transaction, t table.Table, h int64) error {
|
||||
// Get row lock key
|
||||
lockKey := t.RecordKey(h, nil)
|
||||
// set row lock key to current txn
|
||||
err := txn.Set(lockKey, []byte(txn.String()))
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) backfillTableIndex(t table.Table, indexInfo *model.IndexInfo, handles []int64, reorgInfo *reorgInfo) error {
|
||||
kvX := kv.NewKVIndex(t.IndexPrefix(), indexInfo.Name.L, indexInfo.ID, indexInfo.Unique)
|
||||
|
||||
for _, handle := range handles {
|
||||
log.Debug("[ddl] building index...", handle)
|
||||
|
||||
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
if err := d.isReorgRunnable(txn); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// first check row exists
|
||||
exist, err := checkRowExist(txn, t, handle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if !exist {
|
||||
// row doesn't exist, skip it.
|
||||
return nil
|
||||
}
|
||||
|
||||
var vals []types.Datum
|
||||
vals, err = fetchRowColVals(txn, t, handle, indexInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
exist, _, err = kvX.Exist(txn, vals, handle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if exist {
|
||||
// index already exists, skip it.
|
||||
return nil
|
||||
}
|
||||
|
||||
err = lockRow(txn, t, handle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// create the index.
|
||||
err = kvX.Create(txn, vals, handle)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// update reorg next handle
|
||||
return errors.Trace(reorgInfo.UpdateHandle(txn, handle))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) dropTableIndex(t table.Table, indexInfo *model.IndexInfo) error {
|
||||
prefix := kv.GenIndexPrefix(t.IndexPrefix(), indexInfo.ID)
|
||||
err := d.delKeysWithPrefix(prefix)
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
250
vendor/github.com/pingcap/tidb/ddl/reorg.go
generated
vendored
Normal file
250
vendor/github.com/pingcap/tidb/ddl/reorg.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/pingcap/tidb/context"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/terror"
|
||||
)
|
||||
|
||||
var _ context.Context = &reorgContext{}
|
||||
|
||||
// reorgContext implements context.Context interface for reorganization use.
|
||||
type reorgContext struct {
|
||||
store kv.Storage
|
||||
m map[fmt.Stringer]interface{}
|
||||
txn kv.Transaction
|
||||
}
|
||||
|
||||
func (c *reorgContext) GetTxn(forceNew bool) (kv.Transaction, error) {
|
||||
if forceNew {
|
||||
if c.txn != nil {
|
||||
if err := c.txn.Commit(); err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
c.txn = nil
|
||||
}
|
||||
}
|
||||
|
||||
if c.txn != nil {
|
||||
return c.txn, nil
|
||||
}
|
||||
|
||||
txn, err := c.store.Begin()
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
c.txn = txn
|
||||
return c.txn, nil
|
||||
}
|
||||
|
||||
func (c *reorgContext) FinishTxn(rollback bool) error {
|
||||
if c.txn == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if rollback {
|
||||
err = c.txn.Rollback()
|
||||
} else {
|
||||
err = c.txn.Commit()
|
||||
}
|
||||
|
||||
c.txn = nil
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (c *reorgContext) SetValue(key fmt.Stringer, value interface{}) {
|
||||
c.m[key] = value
|
||||
}
|
||||
|
||||
func (c *reorgContext) Value(key fmt.Stringer) interface{} {
|
||||
return c.m[key]
|
||||
}
|
||||
|
||||
func (c *reorgContext) ClearValue(key fmt.Stringer) {
|
||||
delete(c.m, key)
|
||||
}
|
||||
|
||||
func (d *ddl) newReorgContext() context.Context {
|
||||
c := &reorgContext{
|
||||
store: d.store,
|
||||
m: make(map[fmt.Stringer]interface{}),
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
const waitReorgTimeout = 10 * time.Second
|
||||
|
||||
var errWaitReorgTimeout = errors.New("wait for reorganization timeout")
|
||||
|
||||
func (d *ddl) runReorgJob(f func() error) error {
|
||||
if d.reorgDoneCh == nil {
|
||||
// start a reorganization job
|
||||
d.wait.Add(1)
|
||||
d.reorgDoneCh = make(chan error, 1)
|
||||
go func() {
|
||||
defer d.wait.Done()
|
||||
d.reorgDoneCh <- f()
|
||||
}()
|
||||
}
|
||||
|
||||
waitTimeout := waitReorgTimeout
|
||||
// if d.lease is 0, we are using a local storage,
|
||||
// and we can wait the reorganization to be done here.
|
||||
// if d.lease > 0, we don't need to wait here because
|
||||
// we will wait 2 * lease outer and try checking again,
|
||||
// so we use a very little timeout here.
|
||||
if d.lease > 0 {
|
||||
waitTimeout = 1 * time.Millisecond
|
||||
}
|
||||
|
||||
// wait reorganization job done or timeout
|
||||
select {
|
||||
case err := <-d.reorgDoneCh:
|
||||
d.reorgDoneCh = nil
|
||||
return errors.Trace(err)
|
||||
case <-d.quitCh:
|
||||
// we return errWaitReorgTimeout here too, so that outer loop will break.
|
||||
return errWaitReorgTimeout
|
||||
case <-time.After(waitTimeout):
|
||||
// if timeout, we will return, check the owner and retry to wait job done again.
|
||||
return errWaitReorgTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) isReorgRunnable(txn kv.Transaction) error {
|
||||
if d.isClosed() {
|
||||
// worker is closed, can't run reorganization.
|
||||
return errors.Trace(ErrWorkerClosed)
|
||||
}
|
||||
|
||||
t := meta.NewMeta(txn)
|
||||
owner, err := t.GetDDLJobOwner()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
} else if owner == nil || owner.OwnerID != d.uuid {
|
||||
// if no owner, we will try later, so here just return error.
|
||||
// or another server is owner, return error too.
|
||||
return errors.Trace(ErrNotOwner)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) delKeysWithPrefix(prefix kv.Key) error {
|
||||
for {
|
||||
keys := make([]kv.Key, 0, maxBatchSize)
|
||||
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
|
||||
if err1 := d.isReorgRunnable(txn); err1 != nil {
|
||||
return errors.Trace(err1)
|
||||
}
|
||||
|
||||
iter, err := txn.Seek(prefix)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
defer iter.Close()
|
||||
for i := 0; i < maxBatchSize; i++ {
|
||||
if iter.Valid() && iter.Key().HasPrefix(prefix) {
|
||||
keys = append(keys, iter.Key().Clone())
|
||||
err = iter.Next()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
err := txn.Delete(key)
|
||||
// must skip ErrNotExist
|
||||
// if key doesn't exist, skip this error.
|
||||
if err != nil && !terror.ErrorEqual(err, kv.ErrNotExist) {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// delete no keys, return.
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type reorgInfo struct {
|
||||
*model.Job
|
||||
Handle int64
|
||||
d *ddl
|
||||
first bool
|
||||
}
|
||||
|
||||
func (d *ddl) getReorgInfo(t *meta.Meta, job *model.Job) (*reorgInfo, error) {
|
||||
var err error
|
||||
|
||||
info := &reorgInfo{
|
||||
Job: job,
|
||||
d: d,
|
||||
first: job.SnapshotVer == 0,
|
||||
}
|
||||
|
||||
if info.first {
|
||||
// get the current version for reorganization if we don't have
|
||||
var ver kv.Version
|
||||
ver, err = d.store.CurrentVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
} else if ver.Ver <= 0 {
|
||||
return nil, errors.Errorf("invalid storage current version %d", ver.Ver)
|
||||
}
|
||||
|
||||
job.SnapshotVer = ver.Ver
|
||||
} else {
|
||||
info.Handle, err = t.GetDDLReorgHandle(job)
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
}
|
||||
|
||||
if info.Handle > 0 {
|
||||
// we have already handled this handle, so use next
|
||||
info.Handle++
|
||||
}
|
||||
|
||||
return info, errors.Trace(err)
|
||||
}
|
||||
|
||||
func (r *reorgInfo) UpdateHandle(txn kv.Transaction, handle int64) error {
|
||||
t := meta.NewMeta(txn)
|
||||
return errors.Trace(t.UpdateDDLReorgHandle(r.Job, handle))
|
||||
}
|
163
vendor/github.com/pingcap/tidb/ddl/schema.go
generated
vendored
Normal file
163
vendor/github.com/pingcap/tidb/ddl/schema.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"github.com/juju/errors"
|
||||
"github.com/pingcap/tidb/infoschema"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/meta/autoid"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/table"
|
||||
"github.com/pingcap/tidb/terror"
|
||||
)
|
||||
|
||||
func (d *ddl) onCreateSchema(t *meta.Meta, job *model.Job) error {
|
||||
schemaID := job.SchemaID
|
||||
dbInfo := &model.DBInfo{}
|
||||
if err := job.DecodeArgs(dbInfo); err != nil {
|
||||
// arg error, cancel this job.
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
dbInfo.ID = schemaID
|
||||
dbInfo.State = model.StateNone
|
||||
|
||||
dbs, err := t.ListDatabases()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
for _, db := range dbs {
|
||||
if db.Name.L == dbInfo.Name.L {
|
||||
if db.ID != schemaID {
|
||||
// database exists, can't create, we should cancel this job now.
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(infoschema.DatabaseExists)
|
||||
}
|
||||
dbInfo = db
|
||||
}
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch dbInfo.State {
|
||||
case model.StateNone:
|
||||
// none -> public
|
||||
job.SchemaState = model.StatePublic
|
||||
dbInfo.State = model.StatePublic
|
||||
err = t.CreateDatabase(dbInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
// finish this job
|
||||
job.State = model.JobDone
|
||||
return nil
|
||||
default:
|
||||
// we can't enter here.
|
||||
return errors.Errorf("invalid db state %v", dbInfo.State)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) delReorgSchema(t *meta.Meta, job *model.Job) error {
|
||||
dbInfo := &model.DBInfo{}
|
||||
if err := job.DecodeArgs(dbInfo); err != nil {
|
||||
// arg error, cancel this job.
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
tables, err := t.ListTables(dbInfo.ID)
|
||||
if terror.ErrorEqual(meta.ErrDBNotExists, err) {
|
||||
job.State = model.JobDone
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
if err = d.dropSchemaData(dbInfo, tables); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// finish this background job
|
||||
job.SchemaState = model.StateNone
|
||||
job.State = model.JobDone
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) onDropSchema(t *meta.Meta, job *model.Job) error {
|
||||
dbInfo, err := t.GetDatabase(job.SchemaID)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
if dbInfo == nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(infoschema.DatabaseNotExists)
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch dbInfo.State {
|
||||
case model.StatePublic:
|
||||
// public -> write only
|
||||
job.SchemaState = model.StateWriteOnly
|
||||
dbInfo.State = model.StateWriteOnly
|
||||
err = t.UpdateDatabase(dbInfo)
|
||||
case model.StateWriteOnly:
|
||||
// write only -> delete only
|
||||
job.SchemaState = model.StateDeleteOnly
|
||||
dbInfo.State = model.StateDeleteOnly
|
||||
err = t.UpdateDatabase(dbInfo)
|
||||
case model.StateDeleteOnly:
|
||||
dbInfo.State = model.StateDeleteReorganization
|
||||
err = t.UpdateDatabase(dbInfo)
|
||||
if err = t.DropDatabase(dbInfo.ID); err != nil {
|
||||
break
|
||||
}
|
||||
// finish this job
|
||||
job.Args = []interface{}{dbInfo}
|
||||
job.State = model.JobDone
|
||||
job.SchemaState = model.StateNone
|
||||
default:
|
||||
// we can't enter here.
|
||||
err = errors.Errorf("invalid db state %v", dbInfo.State)
|
||||
}
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) dropSchemaData(dbInfo *model.DBInfo, tables []*model.TableInfo) error {
|
||||
for _, tblInfo := range tables {
|
||||
alloc := autoid.NewAllocator(d.store, dbInfo.ID)
|
||||
t, err := table.TableFromMeta(alloc, tblInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = d.dropTableData(t)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
123
vendor/github.com/pingcap/tidb/ddl/stat.go
generated
vendored
Normal file
123
vendor/github.com/pingcap/tidb/ddl/stat.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"github.com/juju/errors"
|
||||
"github.com/pingcap/tidb/inspectkv"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/sessionctx/variable"
|
||||
)
|
||||
|
||||
var (
|
||||
serverID = "server_id"
|
||||
ddlSchemaVersion = "ddl_schema_version"
|
||||
ddlOwnerID = "ddl_owner_id"
|
||||
ddlOwnerLastUpdateTS = "ddl_owner_last_update_ts"
|
||||
ddlJobID = "ddl_job_id"
|
||||
ddlJobAction = "ddl_job_action"
|
||||
ddlJobLastUpdateTS = "ddl_job_last_update_ts"
|
||||
ddlJobState = "ddl_job_state"
|
||||
ddlJobError = "ddl_job_error"
|
||||
ddlJobSchemaState = "ddl_job_schema_state"
|
||||
ddlJobSchemaID = "ddl_job_schema_id"
|
||||
ddlJobTableID = "ddl_job_table_id"
|
||||
ddlJobSnapshotVer = "ddl_job_snapshot_ver"
|
||||
ddlJobReorgHandle = "ddl_job_reorg_handle"
|
||||
ddlJobArgs = "ddl_job_args"
|
||||
bgSchemaVersion = "bg_schema_version"
|
||||
bgOwnerID = "bg_owner_id"
|
||||
bgOwnerLastUpdateTS = "bg_owner_last_update_ts"
|
||||
bgJobID = "bg_job_id"
|
||||
bgJobAction = "bg_job_action"
|
||||
bgJobLastUpdateTS = "bg_job_last_update_ts"
|
||||
bgJobState = "bg_job_state"
|
||||
bgJobError = "bg_job_error"
|
||||
bgJobSchemaState = "bg_job_schema_state"
|
||||
bgJobSchemaID = "bg_job_schema_id"
|
||||
bgJobTableID = "bg_job_table_id"
|
||||
bgJobSnapshotVer = "bg_job_snapshot_ver"
|
||||
bgJobReorgHandle = "bg_job_reorg_handle"
|
||||
bgJobArgs = "bg_job_args"
|
||||
)
|
||||
|
||||
// GetScope gets the status variables scope.
|
||||
func (d *ddl) GetScope(status string) variable.ScopeFlag {
|
||||
// Now ddl status variables scope are all default scope.
|
||||
return variable.DefaultScopeFlag
|
||||
}
|
||||
|
||||
// Stat returns the DDL statistics.
|
||||
func (d *ddl) Stats() (map[string]interface{}, error) {
|
||||
m := make(map[string]interface{})
|
||||
m[serverID] = d.uuid
|
||||
var ddlInfo, bgInfo *inspectkv.DDLInfo
|
||||
|
||||
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
|
||||
var err1 error
|
||||
ddlInfo, err1 = inspectkv.GetDDLInfo(txn)
|
||||
if err1 != nil {
|
||||
return errors.Trace(err1)
|
||||
}
|
||||
bgInfo, err1 = inspectkv.GetBgDDLInfo(txn)
|
||||
|
||||
return errors.Trace(err1)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
m[ddlSchemaVersion] = ddlInfo.SchemaVer
|
||||
if ddlInfo.Owner != nil {
|
||||
m[ddlOwnerID] = ddlInfo.Owner.OwnerID
|
||||
// LastUpdateTS uses nanosecond.
|
||||
m[ddlOwnerLastUpdateTS] = ddlInfo.Owner.LastUpdateTS / 1e9
|
||||
}
|
||||
if ddlInfo.Job != nil {
|
||||
m[ddlJobID] = ddlInfo.Job.ID
|
||||
m[ddlJobAction] = ddlInfo.Job.Type.String()
|
||||
m[ddlJobLastUpdateTS] = ddlInfo.Job.LastUpdateTS / 1e9
|
||||
m[ddlJobState] = ddlInfo.Job.State.String()
|
||||
m[ddlJobError] = ddlInfo.Job.Error
|
||||
m[ddlJobSchemaState] = ddlInfo.Job.SchemaState.String()
|
||||
m[ddlJobSchemaID] = ddlInfo.Job.SchemaID
|
||||
m[ddlJobTableID] = ddlInfo.Job.TableID
|
||||
m[ddlJobSnapshotVer] = ddlInfo.Job.SnapshotVer
|
||||
m[ddlJobReorgHandle] = ddlInfo.ReorgHandle
|
||||
m[ddlJobArgs] = ddlInfo.Job.Args
|
||||
}
|
||||
|
||||
// background DDL info
|
||||
m[bgSchemaVersion] = bgInfo.SchemaVer
|
||||
if bgInfo.Owner != nil {
|
||||
m[bgOwnerID] = bgInfo.Owner.OwnerID
|
||||
// LastUpdateTS uses nanosecond.
|
||||
m[bgOwnerLastUpdateTS] = bgInfo.Owner.LastUpdateTS / 1e9
|
||||
}
|
||||
if bgInfo.Job != nil {
|
||||
m[bgJobID] = bgInfo.Job.ID
|
||||
m[bgJobAction] = bgInfo.Job.Type.String()
|
||||
m[bgJobLastUpdateTS] = bgInfo.Job.LastUpdateTS / 1e9
|
||||
m[bgJobState] = bgInfo.Job.State.String()
|
||||
m[bgJobError] = bgInfo.Job.Error
|
||||
m[bgJobSchemaState] = bgInfo.Job.SchemaState.String()
|
||||
m[bgJobSchemaID] = bgInfo.Job.SchemaID
|
||||
m[bgJobTableID] = bgInfo.Job.TableID
|
||||
m[bgJobSnapshotVer] = bgInfo.Job.SnapshotVer
|
||||
m[bgJobReorgHandle] = bgInfo.ReorgHandle
|
||||
m[bgJobArgs] = bgInfo.Job.Args
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
194
vendor/github.com/pingcap/tidb/ddl/table.go
generated
vendored
Normal file
194
vendor/github.com/pingcap/tidb/ddl/table.go
generated
vendored
Normal file
|
@ -0,0 +1,194 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ddl
|
||||
|
||||
import (
|
||||
"github.com/juju/errors"
|
||||
"github.com/pingcap/tidb/infoschema"
|
||||
"github.com/pingcap/tidb/meta"
|
||||
"github.com/pingcap/tidb/meta/autoid"
|
||||
"github.com/pingcap/tidb/model"
|
||||
"github.com/pingcap/tidb/table"
|
||||
"github.com/pingcap/tidb/terror"
|
||||
)
|
||||
|
||||
func (d *ddl) onCreateTable(t *meta.Meta, job *model.Job) error {
|
||||
schemaID := job.SchemaID
|
||||
tbInfo := &model.TableInfo{}
|
||||
if err := job.DecodeArgs(tbInfo); err != nil {
|
||||
// arg error, cancel this job.
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
tbInfo.State = model.StateNone
|
||||
|
||||
tables, err := t.ListTables(schemaID)
|
||||
if terror.ErrorEqual(err, meta.ErrDBNotExists) {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(infoschema.DatabaseNotExists)
|
||||
} else if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
for _, tbl := range tables {
|
||||
if tbl.Name.L == tbInfo.Name.L {
|
||||
if tbl.ID != tbInfo.ID {
|
||||
// table exists, can't create, we should cancel this job now.
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(infoschema.TableExists)
|
||||
}
|
||||
|
||||
tbInfo = tbl
|
||||
}
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch tbInfo.State {
|
||||
case model.StateNone:
|
||||
// none -> public
|
||||
job.SchemaState = model.StatePublic
|
||||
tbInfo.State = model.StatePublic
|
||||
err = t.CreateTable(schemaID, tbInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
// finish this job
|
||||
job.State = model.JobDone
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("invalid table state %v", tbInfo.State)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ddl) delReorgTable(t *meta.Meta, job *model.Job) error {
|
||||
tblInfo := &model.TableInfo{}
|
||||
err := job.DecodeArgs(tblInfo)
|
||||
if err != nil {
|
||||
// arg error, cancel this job.
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(err)
|
||||
}
|
||||
tblInfo.State = model.StateDeleteReorganization
|
||||
tbl, err := d.getTable(job.SchemaID, tblInfo)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
err = d.dropTableData(tbl)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// finish this background job
|
||||
job.SchemaState = model.StateNone
|
||||
job.State = model.JobDone
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ddl) onDropTable(t *meta.Meta, job *model.Job) error {
|
||||
schemaID := job.SchemaID
|
||||
tableID := job.TableID
|
||||
|
||||
tblInfo, err := t.GetTable(schemaID, tableID)
|
||||
if terror.ErrorEqual(err, meta.ErrDBNotExists) {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(infoschema.DatabaseNotExists)
|
||||
} else if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
if tblInfo == nil {
|
||||
job.State = model.JobCancelled
|
||||
return errors.Trace(infoschema.TableNotExists)
|
||||
}
|
||||
|
||||
_, err = t.GenSchemaVersion()
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
switch tblInfo.State {
|
||||
case model.StatePublic:
|
||||
// public -> write only
|
||||
job.SchemaState = model.StateWriteOnly
|
||||
tblInfo.State = model.StateWriteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
case model.StateWriteOnly:
|
||||
// write only -> delete only
|
||||
job.SchemaState = model.StateDeleteOnly
|
||||
tblInfo.State = model.StateDeleteOnly
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
case model.StateDeleteOnly:
|
||||
tblInfo.State = model.StateNone
|
||||
err = t.UpdateTable(schemaID, tblInfo)
|
||||
if err = t.DropTable(job.SchemaID, job.TableID); err != nil {
|
||||
break
|
||||
}
|
||||
// finish this job
|
||||
job.Args = []interface{}{tblInfo}
|
||||
job.State = model.JobDone
|
||||
job.SchemaState = model.StateNone
|
||||
default:
|
||||
err = errors.Errorf("invalid table state %v", tblInfo.State)
|
||||
}
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) getTable(schemaID int64, tblInfo *model.TableInfo) (table.Table, error) {
|
||||
alloc := autoid.NewAllocator(d.store, schemaID)
|
||||
tbl, err := table.TableFromMeta(alloc, tblInfo)
|
||||
return tbl, errors.Trace(err)
|
||||
}
|
||||
|
||||
func (d *ddl) getTableInfo(t *meta.Meta, job *model.Job) (*model.TableInfo, error) {
|
||||
schemaID := job.SchemaID
|
||||
tableID := job.TableID
|
||||
tblInfo, err := t.GetTable(schemaID, tableID)
|
||||
if terror.ErrorEqual(err, meta.ErrDBNotExists) {
|
||||
job.State = model.JobCancelled
|
||||
return nil, errors.Trace(infoschema.DatabaseNotExists)
|
||||
} else if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
} else if tblInfo == nil {
|
||||
job.State = model.JobCancelled
|
||||
return nil, errors.Trace(infoschema.TableNotExists)
|
||||
}
|
||||
|
||||
if tblInfo.State != model.StatePublic {
|
||||
job.State = model.JobCancelled
|
||||
return nil, errors.Errorf("table %s is not in public, but %s", tblInfo.Name.L, tblInfo.State)
|
||||
}
|
||||
|
||||
return tblInfo, nil
|
||||
}
|
||||
|
||||
func (d *ddl) dropTableData(t table.Table) error {
|
||||
// delete table data
|
||||
err := d.delKeysWithPrefix(t.RecordPrefix())
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// delete table index
|
||||
err = d.delKeysWithPrefix(t.IndexPrefix())
|
||||
|
||||
return errors.Trace(err)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue