1
0
Fork 0
forked from forgejo/forgejo

Integrate public as bindata optionally (#293)

* Dropped unused codekit config

* Integrated dynamic and static bindata for public

* Ignore public bindata

* Add a general generate make task

* Integrated flexible public assets into web command

* Updated vendoring, added all missiong govendor deps

* Made the linter happy with the bindata and dynamic code

* Moved public bindata definition to modules directory

* Ignoring the new bindata path now

* Updated to the new public modules import path

* Updated public bindata command and drop the new prefix
This commit is contained in:
Thomas Boerger 2016-11-29 17:26:36 +01:00 committed by Lunny Xiao
parent 4680c349dd
commit b6a95a8cb3
691 changed files with 305318 additions and 1272 deletions

65
vendor/github.com/pingcap/tidb/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,65 @@
# How to contribute
This document outlines some of the conventions on development workflow, commit message formatting, contact points and other
resources to make it easier to get your contribution accepted.
## Getting started
- Fork the repository on GitHub.
- Read the README.md for build instructions.
- Play with the project, submit bugs, submit patches!
## Contribution flow
This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where you want to base your work. This is usually master.
- Make commits of logical units and add test case if the change fixes a bug or adds new functionality.
- Run tests and make sure all the tests are passed.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- Submit a pull request to pingcap/tidb.
- Your PR must receive LGTMs from two maintainers found in the [MAINTAINERS](./docs/MAINTAINERS.md) file.
Thanks for your contributions!
### Code style
The coding style suggested by the Golang community is used in TiDB. See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details.
Please follow this style to make TiDB easy to review, maintain and develop.
### Format of the Commit Message
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
store/localstore: add comment for variable declaration.
Improve documentation.
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>(optional)
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.
If the change affects more than one subsystem, you can use comma to separate them like `util/codec,util/types:`.
If the change affects many subsystems, you can use ```*``` instead, like ```*:```.
For the why part, if no specific reason for the change,
you can use one of some generic reasons like "Improve documentation.",
"Improve performance.", "Improve robustness.", "Improve test coverage."

15
vendor/github.com/pingcap/tidb/Dockerfile generated vendored Normal file
View file

@ -0,0 +1,15 @@
FROM golang
VOLUME /opt
RUN apt-get update && apt-get install -y wget git make ; \
cd /opt ; \
export PATH=$GOROOT/bin:$GOPATH/bin:$PATH ; \
go get -d github.com/pingcap/tidb ; \
cd $GOPATH/src/github.com/pingcap/tidb ; \
make ; make server ; cp tidb-server/tidb-server /usr/bin/
EXPOSE 4000
CMD ["/usr/bin/tidb-server"]

138
vendor/github.com/pingcap/tidb/Makefile generated vendored Normal file
View file

@ -0,0 +1,138 @@
### Makefile for tidb
# Ensure GOPATH is set before running build process.
ifeq "$(GOPATH)" ""
$(error Please set the environment variable GOPATH before running `make`)
endif
path_to_add := $(addsuffix /bin,$(subst :,/bin:,$(GOPATH)))
export PATH := $(path_to_add):$(PATH)
# Check the version of make and set env varirables/commands accordingly.
version_list := $(subst ., ,$(MAKE_VERSION))
major_version := $(firstword $(version_list))
old_versions := 0 1 2 3
ifeq "$(major_version)" "$(filter $(major_version),$(old_versions))"
# Old version of `make` installed. It fails to search golex/goyacc
# by using the modified `PATH`, so we specify these commands with full path.
GODEP = $$(which godep)
GOLEX = $$(which golex)
GOYACC = $$(which goyacc)
GOLINT = $$(which golint)
else
# After version 4, `make` could follow modified `PATH` to find
# golex/goyacc correctly.
GODEP := godep
GOLEX := golex
GOYACC := goyacc
GOLINT := golint
endif
GO := $(GODEP) go
ARCH := "`uname -s`"
LINUX := "Linux"
MAC := "Darwin"
LDFLAGS += -X "github.com/pingcap/tidb/util/printer.TiDBBuildTS=$(shell date -u '+%Y-%m-%d %I:%M:%S')"
LDFLAGS += -X "github.com/pingcap/tidb/util/printer.TiDBGitHash=$(shell git rev-parse HEAD)"
TARGET = ""
.PHONY: godep deps all build install update parser clean todo test gotest interpreter server
all: godep build test check
godep:
go get github.com/tools/godep
go get github.com/pingcap/go-hbase
go get github.com/pingcap/go-themis
go get github.com/pingcap/tso/client
build:
$(GO) build
install:
$(GO) install ./...
update:
go get -u github.com/pingcap/go-hbase
go get -u github.com/pingcap/go-themis
go get -u github.com/pingcap/tso/client
TEMP_FILE = temp_parser_file
parser:
go get github.com/qiuyesuifeng/goyacc
go get github.com/qiuyesuifeng/golex
$(GOYACC) -o /dev/null -xegen $(TEMP_FILE) parser/parser.y
$(GOYACC) -o parser/parser.go -xe $(TEMP_FILE) parser/parser.y 2>&1 | egrep "(shift|reduce)/reduce" | awk '{print} END {if (NR > 0) {print "Find conflict in parser.y. Please check y.output for more information."; system("rm -f $(TEMP_FILE)"); exit 1;}}'
rm -f $(TEMP_FILE)
rm -f y.output
@if [ $(ARCH) = $(LINUX) ]; \
then \
sed -i -e 's|//line.*||' -e 's/yyEofCode/yyEOFCode/' parser/parser.go; \
elif [ $(ARCH) = $(MAC) ]; \
then \
/usr/bin/sed -i "" 's|//line.*||' parser/parser.go; \
/usr/bin/sed -i "" 's/yyEofCode/yyEOFCode/' parser/parser.go; \
fi
$(GOLEX) -o parser/scanner.go parser/scanner.l
@awk 'BEGIN{print "// Code generated by goyacc"} {print $0}' parser/parser.go > tmp_parser.go && mv tmp_parser.go parser/parser.go;
@awk 'BEGIN{print "// Code generated by goyacc"} {print $0}' parser/scanner.go > tmp_scanner.go && mv tmp_scanner.go parser/scanner.go;
check:
bash gitcookie.sh
go get github.com/golang/lint/golint
@echo "vet"
@ go tool vet . 2>&1 | grep -vE 'Godeps|parser/scanner.*unreachable code' | awk '{print} END{if(NR>0) {exit 1}}'
@echo "vet --shadow"
@ go tool vet --shadow . 2>&1 | grep -vE 'Godeps' | awk '{print} END{if(NR>0) {exit 1}}'
@echo "golint"
@ $(GOLINT) ./... 2>&1 | grep -vE 'LastInsertId|NewLexer|\.pb\.go' | awk '{print} END{if(NR>0) {exit 1}}'
@echo "gofmt (simplify)"
@ gofmt -s -l . 2>&1 | grep -vE 'Godeps|parser/parser.go|parser/scanner.go' | awk '{print} END{if(NR>0) {exit 1}}'
deps:
go list -f '{{range .Deps}}{{printf "%s\n" .}}{{end}}{{range .TestImports}}{{printf "%s\n" .}}{{end}}' ./... | \
sort | uniq | grep -E '[^/]+\.[^/]+/' |grep -v "pingcap/tidb" | \
awk 'BEGIN{ print "#!/bin/bash" }{ printf("go get -u %s\n", $$1) }' > deps.sh
chmod +x deps.sh
bash deps.sh
clean:
$(GO) clean -i ./...
rm -rf *.out
rm -f deps.sh
todo:
@grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* */*.go parser/scanner.l parser/parser.y || true
@grep -n TODO */*.go parser/scanner.l parser/parser.y || true
@grep -n BUG */*.go parser/scanner.l parser/parser.y || true
@grep -n println */*.go parser/scanner.l parser/parser.y || true
test: gotest
gotest:
$(GO) test -cover ./...
race:
$(GO) test --race -cover ./...
ddl_test:
$(GO) test ./ddl/... -skip_ddl=false
ddl_race_test:
$(GO) test --race ./ddl/... -skip_ddl=false
interpreter:
@cd interpreter && $(GO) build -ldflags '$(LDFLAGS)'
server:
ifeq ($(TARGET), "")
@cd tidb-server && $(GO) build -ldflags '$(LDFLAGS)'
else
@cd tidb-server && $(GO) build -ldflags '$(LDFLAGS)' -o '$(TARGET)'
endif

62
vendor/github.com/pingcap/tidb/README.md generated vendored Normal file
View file

@ -0,0 +1,62 @@
![logo](./docs/logo_with_text.png)
[![Build Status](https://travis-ci.org/pingcap/tidb.svg?branch=master)](https://travis-ci.org/pingcap/tidb)
## What is TiDB?
TiDB is a distributed SQL database.
Inspired by the design of Google [F1](http://research.google.com/pubs/pub41344.html), TiDB supports the best features of both traditional RDBMS and NoSQL.
- __Horizontal scalability__
Grow TiDB as your business grows. You can increase the capacity simply by adding more machines.
- __Asynchronous schema changes__
Evolve TiDB schemas as your requirement evolves. You can add new columns and indices without stopping or affecting the on-going operations.
- __Consistent distributed transactions__
Think TiDB as a single-machine RDBMS. You can start a transaction that crosses multiple machines without worrying about consistency. TiDB makes your application code simple and robust.
- __Compatible with MySQL protocol__
Use TiDB as MySQL. You can replace MySQL with TiDB to power your application without changing a single line of code in most cases.
- __Written in Go__
Enjoy TiDB as much as we love Go. We believe Go code is both easy and enjoyable to work with. Go makes us improve TiDB fast and makes it easy to dive into the codebase.
- __NewSQL over HBase__
Turn HBase into NewSQL database
- __Multiple storage engine support__
Power TiDB with your most favorite engines. TiDB supports many popular storage engines in single-machine mode. You can choose from GolevelDB, LevelDB, RocksDB, LMDB, BoltDB and even more to come.
## Status
TiDB is at its early age and under heavy development, all of the features mentioned above are fully implemented.
__Please do not use it in production.__
## Roadmap
Read the [Roadmap](./docs/ROADMAP.md).
## Quick start
Read the [Quick Start](./docs/QUICKSTART.md)
## Architecture
![architecture](./docs/architecture.png)
## Contributing
Contributions are welcomed and greatly appreciated. See [CONTRIBUTING.md](CONTRIBUTING.md)
for details on submitting patches and the contribution workflow.
## Follow us
Twitter: [@PingCAP](https://twitter.com/PingCAP)
## License
TiDB is under the Apache 2.0 license. See the [LICENSE](./LICENSES/LICENSE) file for details.
## Acknowledgments
- Thanks [cznic](https://github.com/cznic) for providing some great open source tools.
- Thanks [Xiaomi](https://github.com/XiaoMi/themis) for providing the great open source project.
- Thanks [HBase](https://hbase.apache.org), [GolevelDB](https://github.com/syndtr/goleveldb), [LMDB](https://github.com/LMDB/lmdb), [BoltDB](https://github.com/boltdb/bolt) and [RocksDB](https://github.com/facebook/rocksdb) for their powerful storage engines.

189
vendor/github.com/pingcap/tidb/ast/ast.go generated vendored Normal file
View file

@ -0,0 +1,189 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ast is the abstract syntax tree parsed from a SQL statement by parser.
// It can be analysed and transformed by optimizer.
package ast
import (
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/util/types"
)
// Node is the basic element of the AST.
// Interfaces embed Node should have 'Node' name suffix.
type Node interface {
// Accept accepts Visitor to visit itself.
// The returned node should replace original node.
// ok returns false to stop visiting.
//
// Implementation of this method should first call visitor.Enter,
// assign the returned node to its method receiver, if skipChildren returns true,
// children should be skipped. Otherwise, call its children in particular order that
// later elements depends on former elements. Finally, return visitor.Leave.
Accept(v Visitor) (node Node, ok bool)
// Text returns the original text of the element.
Text() string
// SetText sets original text to the Node.
SetText(text string)
}
// Flags indicates whether an expression contains certain types of expression.
const (
FlagConstant uint64 = 0
FlagHasParamMarker uint64 = 1 << iota
FlagHasFunc
FlagHasReference
FlagHasAggregateFunc
FlagHasSubquery
FlagHasVariable
FlagHasDefault
)
// ExprNode is a node that can be evaluated.
// Name of implementations should have 'Expr' suffix.
type ExprNode interface {
// Node is embeded in ExprNode.
Node
// SetType sets evaluation type to the expression.
SetType(tp *types.FieldType)
// GetType gets the evaluation type of the expression.
GetType() *types.FieldType
// SetValue sets value to the expression.
SetValue(val interface{})
// GetValue gets value of the expression.
GetValue() interface{}
// SetDatum sets datum to the expression.
SetDatum(datum types.Datum)
// GetDatum gets datum of the expression.
GetDatum() *types.Datum
// SetFlag sets flag to the expression.
// Flag indicates whether the expression contains
// parameter marker, reference, aggregate function...
SetFlag(flag uint64)
// GetFlag returns the flag of the expression.
GetFlag() uint64
}
// FuncNode represents function call expression node.
type FuncNode interface {
ExprNode
functionExpression()
}
// StmtNode represents statement node.
// Name of implementations should have 'Stmt' suffix.
type StmtNode interface {
Node
statement()
}
// DDLNode represents DDL statement node.
type DDLNode interface {
StmtNode
ddlStatement()
}
// DMLNode represents DML statement node.
type DMLNode interface {
StmtNode
dmlStatement()
}
// ResultField represents a result field which can be a column from a table,
// or an expression in select field. It is a generated property during
// binding process. ResultField is the key element to evaluate a ColumnNameExpr.
// After resolving process, every ColumnNameExpr will be resolved to a ResultField.
// During execution, every row retrieved from table will set the row value to
// ResultFields of that table, so ColumnNameExpr resolved to that ResultField can be
// easily evaluated.
type ResultField struct {
Column *model.ColumnInfo
ColumnAsName model.CIStr
Table *model.TableInfo
TableAsName model.CIStr
DBName model.CIStr
// The expression for the result field. If it is generated from a select field, it would
// be the expression of that select field, otherwise the type would be ValueExpr and value
// will be set for every retrieved row.
Expr ExprNode
TableName *TableName
}
// Row represents a single row from Recordset.
type Row struct {
Data []types.Datum
}
// RecordSet is an abstract result set interface to help get data from Plan.
type RecordSet interface {
// Fields gets result fields.
Fields() (fields []*ResultField, err error)
// Next returns the next row, nil row means there is no more to return.
Next() (row *Row, err error)
// Close closes the underlying iterator, call Next after Close will
// restart the iteration.
Close() error
}
// ResultSetNode interface has ResultFields property which is computed and set by
// optimizer.InfoBinder during binding process. Implementations include SelectStmt,
// SubqueryExpr, TableSource, TableName and Join.
type ResultSetNode interface {
Node
// GetResultFields gets result fields of the result set node.
GetResultFields() []*ResultField
// SetResultFields sets result fields of the result set node.
SetResultFields(fields []*ResultField)
}
// Statement is an interface for SQL execution.
// NOTE: all Statement implementations must be safe for
// concurrent using by multiple goroutines.
// If the Exec method requires any Execution domain local data,
// they must be held out of the implementing instance.
type Statement interface {
// Explain gets the execution plans.
//Explain(ctx context.Context, w format.Formatter)
// IsDDL shows whether the statement is an DDL operation.
IsDDL() bool
// OriginText gets the origin SQL text.
OriginText() string
// SetText sets the executive SQL text.
SetText(text string)
// Exec executes SQL and gets a Recordset.
Exec(ctx context.Context) (RecordSet, error)
}
// Visitor visits a Node.
type Visitor interface {
// Enter is called before children nodes are visited.
// The returned node must be the same type as the input node n.
// skipChildren returns true means children nodes should be skipped,
// this is useful when work is done in Enter and there is no need to visit children.
Enter(n Node) (node Node, skipChildren bool)
// Leave is called after children nodes have been visited.
// The returned node's type can be different from the input node if it is a ExprNode,
// Non-expression node must be the same type as the input node n.
// ok returns false to stop visiting.
Leave(n Node) (node Node, ok bool)
}

119
vendor/github.com/pingcap/tidb/ast/base.go generated vendored Normal file
View file

@ -0,0 +1,119 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "github.com/pingcap/tidb/util/types"
// node is the struct implements node interface except for Accept method.
// Node implementations should embed it in.
type node struct {
text string
}
// SetText implements Node interface.
func (n *node) SetText(text string) {
n.text = text
}
// Text implements Node interface.
func (n *node) Text() string {
return n.text
}
// stmtNode implements StmtNode interface.
// Statement implementations should embed it in.
type stmtNode struct {
node
}
// statement implements StmtNode interface.
func (sn *stmtNode) statement() {}
// ddlNode implements DDLNode interface.
// DDL implementations should embed it in.
type ddlNode struct {
stmtNode
}
// ddlStatement implements DDLNode interface.
func (dn *ddlNode) ddlStatement() {}
// dmlNode is the struct implements DMLNode interface.
// DML implementations should embed it in.
type dmlNode struct {
stmtNode
}
// dmlStatement implements DMLNode interface.
func (dn *dmlNode) dmlStatement() {}
// expressionNode is the struct implements Expression interface.
// Expression implementations should embed it in.
type exprNode struct {
node
types.Datum
Type *types.FieldType
flag uint64
}
// SetDatum implements Expression interface.
func (en *exprNode) SetDatum(datum types.Datum) {
en.Datum = datum
}
// GetDatum implements Expression interface.
func (en *exprNode) GetDatum() *types.Datum {
return &en.Datum
}
// SetType implements Expression interface.
func (en *exprNode) SetType(tp *types.FieldType) {
en.Type = tp
}
// GetType implements Expression interface.
func (en *exprNode) GetType() *types.FieldType {
return en.Type
}
// SetFlag implements Expression interface.
func (en *exprNode) SetFlag(flag uint64) {
en.flag = flag
}
// GetFlag implements Expression interface.
func (en *exprNode) GetFlag() uint64 {
return en.flag
}
type funcNode struct {
exprNode
}
// FunctionExpression implements FounctionNode interface.
func (fn *funcNode) functionExpression() {}
type resultSetNode struct {
resultFields []*ResultField
}
// GetResultFields implements ResultSetNode interface.
func (rs *resultSetNode) GetResultFields() []*ResultField {
return rs.resultFields
}
// GetResultFields implements ResultSetNode interface.
func (rs *resultSetNode) SetResultFields(rfs []*ResultField) {
rs.resultFields = rfs
}

170
vendor/github.com/pingcap/tidb/ast/cloner.go generated vendored Normal file
View file

@ -0,0 +1,170 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// Cloner is an ast visitor that clones a node.
type Cloner struct {
}
// Enter implements Visitor Enter interface.
func (c *Cloner) Enter(node Node) (Node, bool) {
return copyStruct(node), false
}
// Leave implements Visitor Leave interface.
func (c *Cloner) Leave(in Node) (out Node, ok bool) {
return in, true
}
// copyStruct copies a node's struct value, if the struct has slice member,
// make a new slice and copy old slice value to new slice.
func copyStruct(in Node) (out Node) {
switch v := in.(type) {
case *ValueExpr:
nv := *v
out = &nv
case *BetweenExpr:
nv := *v
out = &nv
case *BinaryOperationExpr:
nv := *v
out = &nv
case *WhenClause:
nv := *v
out = &nv
case *CaseExpr:
nv := *v
nv.WhenClauses = make([]*WhenClause, len(v.WhenClauses))
copy(nv.WhenClauses, v.WhenClauses)
out = &nv
case *SubqueryExpr:
nv := *v
out = &nv
case *CompareSubqueryExpr:
nv := *v
out = &nv
case *ColumnName:
nv := *v
out = &nv
case *ColumnNameExpr:
nv := *v
out = &nv
case *DefaultExpr:
nv := *v
out = &nv
case *ExistsSubqueryExpr:
nv := *v
out = &nv
case *PatternInExpr:
nv := *v
nv.List = make([]ExprNode, len(v.List))
copy(nv.List, v.List)
out = &nv
case *IsNullExpr:
nv := *v
out = &nv
case *IsTruthExpr:
nv := *v
out = &nv
case *PatternLikeExpr:
nv := *v
out = &nv
case *ParamMarkerExpr:
nv := *v
out = &nv
case *ParenthesesExpr:
nv := *v
out = &nv
case *PositionExpr:
nv := *v
out = &nv
case *PatternRegexpExpr:
nv := *v
out = &nv
case *RowExpr:
nv := *v
nv.Values = make([]ExprNode, len(v.Values))
copy(nv.Values, v.Values)
out = &nv
case *UnaryOperationExpr:
nv := *v
out = &nv
case *ValuesExpr:
nv := *v
out = &nv
case *VariableExpr:
nv := *v
out = &nv
case *Join:
nv := *v
out = &nv
case *TableName:
nv := *v
out = &nv
case *TableSource:
nv := *v
out = &nv
case *OnCondition:
nv := *v
out = &nv
case *WildCardField:
nv := *v
out = &nv
case *SelectField:
nv := *v
out = &nv
case *FieldList:
nv := *v
nv.Fields = make([]*SelectField, len(v.Fields))
copy(nv.Fields, v.Fields)
out = &nv
case *TableRefsClause:
nv := *v
out = &nv
case *ByItem:
nv := *v
out = &nv
case *GroupByClause:
nv := *v
nv.Items = make([]*ByItem, len(v.Items))
copy(nv.Items, v.Items)
out = &nv
case *HavingClause:
nv := *v
out = &nv
case *OrderByClause:
nv := *v
nv.Items = make([]*ByItem, len(v.Items))
copy(nv.Items, v.Items)
out = &nv
case *SelectStmt:
nv := *v
out = &nv
case *UnionSelectList:
nv := *v
nv.Selects = make([]*SelectStmt, len(v.Selects))
copy(nv.Selects, v.Selects)
out = &nv
case *UnionStmt:
nv := *v
out = &nv
default:
// We currently only handle expression and select statement.
// Will add more when we need to.
panic("unknown ast Node type " + fmt.Sprintf("%T", v))
}
return
}

641
vendor/github.com/pingcap/tidb/ast/ddl.go generated vendored Normal file
View file

@ -0,0 +1,641 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/util/types"
)
var (
_ DDLNode = &AlterTableStmt{}
_ DDLNode = &CreateDatabaseStmt{}
_ DDLNode = &CreateIndexStmt{}
_ DDLNode = &CreateTableStmt{}
_ DDLNode = &DropDatabaseStmt{}
_ DDLNode = &DropIndexStmt{}
_ DDLNode = &DropTableStmt{}
_ DDLNode = &TruncateTableStmt{}
_ Node = &AlterTableSpec{}
_ Node = &ColumnDef{}
_ Node = &ColumnOption{}
_ Node = &ColumnPosition{}
_ Node = &Constraint{}
_ Node = &IndexColName{}
_ Node = &ReferenceDef{}
)
// CharsetOpt is used for parsing charset option from SQL.
type CharsetOpt struct {
Chs string
Col string
}
// DatabaseOptionType is the type for database options.
type DatabaseOptionType int
// Database option types.
const (
DatabaseOptionNone DatabaseOptionType = iota
DatabaseOptionCharset
DatabaseOptionCollate
)
// DatabaseOption represents database option.
type DatabaseOption struct {
Tp DatabaseOptionType
Value string
}
// CreateDatabaseStmt is a statement to create a database.
// See: https://dev.mysql.com/doc/refman/5.7/en/create-database.html
type CreateDatabaseStmt struct {
ddlNode
IfNotExists bool
Name string
Options []*DatabaseOption
}
// Accept implements Node Accept interface.
func (n *CreateDatabaseStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CreateDatabaseStmt)
return v.Leave(n)
}
// DropDatabaseStmt is a statement to drop a database and all tables in the database.
// See: https://dev.mysql.com/doc/refman/5.7/en/drop-database.html
type DropDatabaseStmt struct {
ddlNode
IfExists bool
Name string
}
// Accept implements Node Accept interface.
func (n *DropDatabaseStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DropDatabaseStmt)
return v.Leave(n)
}
// IndexColName is used for parsing index column name from SQL.
type IndexColName struct {
node
Column *ColumnName
Length int
}
// Accept implements Node Accept interface.
func (n *IndexColName) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*IndexColName)
node, ok := n.Column.Accept(v)
if !ok {
return n, false
}
n.Column = node.(*ColumnName)
return v.Leave(n)
}
// ReferenceDef is used for parsing foreign key reference option from SQL.
// See: http://dev.mysql.com/doc/refman/5.7/en/create-table-foreign-keys.html
type ReferenceDef struct {
node
Table *TableName
IndexColNames []*IndexColName
}
// Accept implements Node Accept interface.
func (n *ReferenceDef) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ReferenceDef)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
for i, val := range n.IndexColNames {
node, ok = val.Accept(v)
if !ok {
return n, false
}
n.IndexColNames[i] = node.(*IndexColName)
}
return v.Leave(n)
}
// ColumnOptionType is the type for ColumnOption.
type ColumnOptionType int
// ColumnOption types.
const (
ColumnOptionNoOption ColumnOptionType = iota
ColumnOptionPrimaryKey
ColumnOptionNotNull
ColumnOptionAutoIncrement
ColumnOptionDefaultValue
ColumnOptionUniq
ColumnOptionIndex
ColumnOptionUniqIndex
ColumnOptionKey
ColumnOptionUniqKey
ColumnOptionNull
ColumnOptionOnUpdate // For Timestamp and Datetime only.
ColumnOptionFulltext
ColumnOptionComment
)
// ColumnOption is used for parsing column constraint info from SQL.
type ColumnOption struct {
node
Tp ColumnOptionType
// The value For Default or On Update.
Expr ExprNode
}
// Accept implements Node Accept interface.
func (n *ColumnOption) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ColumnOption)
if n.Expr != nil {
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
}
return v.Leave(n)
}
// IndexOption is the index options.
// KEY_BLOCK_SIZE [=] value
// | index_type
// | WITH PARSER parser_name
// | COMMENT 'string'
// See: http://dev.mysql.com/doc/refman/5.7/en/create-table.html
type IndexOption struct {
node
KeyBlockSize uint64
Tp model.IndexType
Comment string
}
// Accept implements Node Accept interface.
func (n *IndexOption) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*IndexOption)
return v.Leave(n)
}
// ConstraintType is the type for Constraint.
type ConstraintType int
// ConstraintTypes
const (
ConstraintNoConstraint ConstraintType = iota
ConstraintPrimaryKey
ConstraintKey
ConstraintIndex
ConstraintUniq
ConstraintUniqKey
ConstraintUniqIndex
ConstraintForeignKey
ConstraintFulltext
)
// Constraint is constraint for table definition.
type Constraint struct {
node
Tp ConstraintType
Name string
// Used for PRIMARY KEY, UNIQUE, ......
Keys []*IndexColName
// Used for foreign key.
Refer *ReferenceDef
// Index Options
Option *IndexOption
}
// Accept implements Node Accept interface.
func (n *Constraint) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*Constraint)
for i, val := range n.Keys {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Keys[i] = node.(*IndexColName)
}
if n.Refer != nil {
node, ok := n.Refer.Accept(v)
if !ok {
return n, false
}
n.Refer = node.(*ReferenceDef)
}
if n.Option != nil {
node, ok := n.Option.Accept(v)
if !ok {
return n, false
}
n.Option = node.(*IndexOption)
}
return v.Leave(n)
}
// ColumnDef is used for parsing column definition from SQL.
type ColumnDef struct {
node
Name *ColumnName
Tp *types.FieldType
Options []*ColumnOption
}
// Accept implements Node Accept interface.
func (n *ColumnDef) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ColumnDef)
node, ok := n.Name.Accept(v)
if !ok {
return n, false
}
n.Name = node.(*ColumnName)
for i, val := range n.Options {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Options[i] = node.(*ColumnOption)
}
return v.Leave(n)
}
// CreateTableStmt is a statement to create a table.
// See: https://dev.mysql.com/doc/refman/5.7/en/create-table.html
type CreateTableStmt struct {
ddlNode
IfNotExists bool
Table *TableName
Cols []*ColumnDef
Constraints []*Constraint
Options []*TableOption
}
// Accept implements Node Accept interface.
func (n *CreateTableStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CreateTableStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
for i, val := range n.Cols {
node, ok = val.Accept(v)
if !ok {
return n, false
}
n.Cols[i] = node.(*ColumnDef)
}
for i, val := range n.Constraints {
node, ok = val.Accept(v)
if !ok {
return n, false
}
n.Constraints[i] = node.(*Constraint)
}
return v.Leave(n)
}
// DropTableStmt is a statement to drop one or more tables.
// See: https://dev.mysql.com/doc/refman/5.7/en/drop-table.html
type DropTableStmt struct {
ddlNode
IfExists bool
Tables []*TableName
}
// Accept implements Node Accept interface.
func (n *DropTableStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DropTableStmt)
for i, val := range n.Tables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
return v.Leave(n)
}
// CreateIndexStmt is a statement to create an index.
// See: https://dev.mysql.com/doc/refman/5.7/en/create-index.html
type CreateIndexStmt struct {
ddlNode
IndexName string
Table *TableName
Unique bool
IndexColNames []*IndexColName
}
// Accept implements Node Accept interface.
func (n *CreateIndexStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CreateIndexStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
for i, val := range n.IndexColNames {
node, ok = val.Accept(v)
if !ok {
return n, false
}
n.IndexColNames[i] = node.(*IndexColName)
}
return v.Leave(n)
}
// DropIndexStmt is a statement to drop the index.
// See: https://dev.mysql.com/doc/refman/5.7/en/drop-index.html
type DropIndexStmt struct {
ddlNode
IfExists bool
IndexName string
Table *TableName
}
// Accept implements Node Accept interface.
func (n *DropIndexStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DropIndexStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
return v.Leave(n)
}
// TableOptionType is the type for TableOption
type TableOptionType int
// TableOption types.
const (
TableOptionNone TableOptionType = iota
TableOptionEngine
TableOptionCharset
TableOptionCollate
TableOptionAutoIncrement
TableOptionComment
TableOptionAvgRowLength
TableOptionCheckSum
TableOptionCompression
TableOptionConnection
TableOptionPassword
TableOptionKeyBlockSize
TableOptionMaxRows
TableOptionMinRows
TableOptionDelayKeyWrite
TableOptionRowFormat
)
// RowFormat types
const (
RowFormatDefault uint64 = iota + 1
RowFormatDynamic
RowFormatFixed
RowFormatCompressed
RowFormatRedundant
RowFormatCompact
)
// TableOption is used for parsing table option from SQL.
type TableOption struct {
Tp TableOptionType
StrValue string
UintValue uint64
}
// ColumnPositionType is the type for ColumnPosition.
type ColumnPositionType int
// ColumnPosition Types
const (
ColumnPositionNone ColumnPositionType = iota
ColumnPositionFirst
ColumnPositionAfter
)
// ColumnPosition represent the position of the newly added column
type ColumnPosition struct {
node
// ColumnPositionNone | ColumnPositionFirst | ColumnPositionAfter
Tp ColumnPositionType
// RelativeColumn is the column the newly added column after if type is ColumnPositionAfter
RelativeColumn *ColumnName
}
// Accept implements Node Accept interface.
func (n *ColumnPosition) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ColumnPosition)
if n.RelativeColumn != nil {
node, ok := n.RelativeColumn.Accept(v)
if !ok {
return n, false
}
n.RelativeColumn = node.(*ColumnName)
}
return v.Leave(n)
}
// AlterTableType is the type for AlterTableSpec.
type AlterTableType int
// AlterTable types.
const (
AlterTableOption AlterTableType = iota + 1
AlterTableAddColumn
AlterTableAddConstraint
AlterTableDropColumn
AlterTableDropPrimaryKey
AlterTableDropIndex
AlterTableDropForeignKey
// TODO: Add more actions
)
// AlterTableSpec represents alter table specification.
type AlterTableSpec struct {
node
Tp AlterTableType
Name string
Constraint *Constraint
Options []*TableOption
Column *ColumnDef
DropColumn *ColumnName
Position *ColumnPosition
}
// Accept implements Node Accept interface.
func (n *AlterTableSpec) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AlterTableSpec)
if n.Constraint != nil {
node, ok := n.Constraint.Accept(v)
if !ok {
return n, false
}
n.Constraint = node.(*Constraint)
}
if n.Column != nil {
node, ok := n.Column.Accept(v)
if !ok {
return n, false
}
n.Column = node.(*ColumnDef)
}
if n.DropColumn != nil {
node, ok := n.DropColumn.Accept(v)
if !ok {
return n, false
}
n.DropColumn = node.(*ColumnName)
}
if n.Position != nil {
node, ok := n.Position.Accept(v)
if !ok {
return n, false
}
n.Position = node.(*ColumnPosition)
}
return v.Leave(n)
}
// AlterTableStmt is a statement to change the structure of a table.
// See: https://dev.mysql.com/doc/refman/5.7/en/alter-table.html
type AlterTableStmt struct {
ddlNode
Table *TableName
Specs []*AlterTableSpec
}
// Accept implements Node Accept interface.
func (n *AlterTableStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AlterTableStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
for i, val := range n.Specs {
node, ok = val.Accept(v)
if !ok {
return n, false
}
n.Specs[i] = node.(*AlterTableSpec)
}
return v.Leave(n)
}
// TruncateTableStmt is a statement to empty a table completely.
// See: https://dev.mysql.com/doc/refman/5.7/en/truncate-table.html
type TruncateTableStmt struct {
ddlNode
Table *TableName
}
// Accept implements Node Accept interface.
func (n *TruncateTableStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*TruncateTableStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
return v.Leave(n)
}

891
vendor/github.com/pingcap/tidb/ast/dml.go generated vendored Normal file
View file

@ -0,0 +1,891 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"github.com/pingcap/tidb/model"
)
var (
_ DMLNode = &DeleteStmt{}
_ DMLNode = &InsertStmt{}
_ DMLNode = &UnionStmt{}
_ DMLNode = &UpdateStmt{}
_ DMLNode = &SelectStmt{}
_ DMLNode = &ShowStmt{}
_ Node = &Assignment{}
_ Node = &ByItem{}
_ Node = &FieldList{}
_ Node = &GroupByClause{}
_ Node = &HavingClause{}
_ Node = &Join{}
_ Node = &Limit{}
_ Node = &OnCondition{}
_ Node = &OrderByClause{}
_ Node = &SelectField{}
_ Node = &TableName{}
_ Node = &TableRefsClause{}
_ Node = &TableSource{}
_ Node = &UnionSelectList{}
_ Node = &WildCardField{}
)
// JoinType is join type, including cross/left/right/full.
type JoinType int
const (
// CrossJoin is cross join type.
CrossJoin JoinType = iota + 1
// LeftJoin is left Join type.
LeftJoin
// RightJoin is right Join type.
RightJoin
)
// Join represents table join.
type Join struct {
node
resultSetNode
// Left table can be TableSource or JoinNode.
Left ResultSetNode
// Right table can be TableSource or JoinNode or nil.
Right ResultSetNode
// Tp represents join type.
Tp JoinType
// On represents join on condition.
On *OnCondition
}
// Accept implements Node Accept interface.
func (n *Join) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*Join)
node, ok := n.Left.Accept(v)
if !ok {
return n, false
}
n.Left = node.(ResultSetNode)
if n.Right != nil {
node, ok = n.Right.Accept(v)
if !ok {
return n, false
}
n.Right = node.(ResultSetNode)
}
if n.On != nil {
node, ok = n.On.Accept(v)
if !ok {
return n, false
}
n.On = node.(*OnCondition)
}
return v.Leave(n)
}
// TableName represents a table name.
type TableName struct {
node
resultSetNode
Schema model.CIStr
Name model.CIStr
DBInfo *model.DBInfo
TableInfo *model.TableInfo
}
// Accept implements Node Accept interface.
func (n *TableName) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*TableName)
return v.Leave(n)
}
// DeleteTableList is the tablelist used in delete statement multi-table mode.
type DeleteTableList struct {
node
Tables []*TableName
}
// Accept implements Node Accept interface.
func (n *DeleteTableList) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DeleteTableList)
if n != nil {
for i, t := range n.Tables {
node, ok := t.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
}
return v.Leave(n)
}
// OnCondition represetns JOIN on condition.
type OnCondition struct {
node
Expr ExprNode
}
// Accept implements Node Accept interface.
func (n *OnCondition) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*OnCondition)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
return v.Leave(n)
}
// TableSource represents table source with a name.
type TableSource struct {
node
// Source is the source of the data, can be a TableName,
// a SelectStmt, a UnionStmt, or a JoinNode.
Source ResultSetNode
// AsName is the alias name of the table source.
AsName model.CIStr
}
// Accept implements Node Accept interface.
func (n *TableSource) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*TableSource)
node, ok := n.Source.Accept(v)
if !ok {
return n, false
}
n.Source = node.(ResultSetNode)
return v.Leave(n)
}
// SetResultFields implements ResultSetNode interface.
func (n *TableSource) SetResultFields(rfs []*ResultField) {
n.Source.SetResultFields(rfs)
}
// GetResultFields implements ResultSetNode interface.
func (n *TableSource) GetResultFields() []*ResultField {
return n.Source.GetResultFields()
}
// SelectLockType is the lock type for SelectStmt.
type SelectLockType int
// Select lock types.
const (
SelectLockNone SelectLockType = iota
SelectLockForUpdate
SelectLockInShareMode
)
// WildCardField is a special type of select field content.
type WildCardField struct {
node
Table model.CIStr
Schema model.CIStr
}
// Accept implements Node Accept interface.
func (n *WildCardField) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*WildCardField)
return v.Leave(n)
}
// SelectField represents fields in select statement.
// There are two type of select field: wildcard
// and expression with optional alias name.
type SelectField struct {
node
// Offset is used to get original text.
Offset int
// If WildCard is not nil, Expr will be nil.
WildCard *WildCardField
// If Expr is not nil, WildCard will be nil.
Expr ExprNode
// Alias name for Expr.
AsName model.CIStr
}
// Accept implements Node Accept interface.
func (n *SelectField) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SelectField)
if n.Expr != nil {
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
}
return v.Leave(n)
}
// FieldList represents field list in select statement.
type FieldList struct {
node
Fields []*SelectField
}
// Accept implements Node Accept interface.
func (n *FieldList) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*FieldList)
for i, val := range n.Fields {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Fields[i] = node.(*SelectField)
}
return v.Leave(n)
}
// TableRefsClause represents table references clause in dml statement.
type TableRefsClause struct {
node
TableRefs *Join
}
// Accept implements Node Accept interface.
func (n *TableRefsClause) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*TableRefsClause)
node, ok := n.TableRefs.Accept(v)
if !ok {
return n, false
}
n.TableRefs = node.(*Join)
return v.Leave(n)
}
// ByItem represents an item in order by or group by.
type ByItem struct {
node
Expr ExprNode
Desc bool
}
// Accept implements Node Accept interface.
func (n *ByItem) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ByItem)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
return v.Leave(n)
}
// GroupByClause represents group by clause.
type GroupByClause struct {
node
Items []*ByItem
}
// Accept implements Node Accept interface.
func (n *GroupByClause) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*GroupByClause)
for i, val := range n.Items {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Items[i] = node.(*ByItem)
}
return v.Leave(n)
}
// HavingClause represents having clause.
type HavingClause struct {
node
Expr ExprNode
}
// Accept implements Node Accept interface.
func (n *HavingClause) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*HavingClause)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
return v.Leave(n)
}
// OrderByClause represents order by clause.
type OrderByClause struct {
node
Items []*ByItem
ForUnion bool
}
// Accept implements Node Accept interface.
func (n *OrderByClause) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*OrderByClause)
for i, val := range n.Items {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Items[i] = node.(*ByItem)
}
return v.Leave(n)
}
// SelectStmt represents the select query node.
// See: https://dev.mysql.com/doc/refman/5.7/en/select.html
type SelectStmt struct {
dmlNode
resultSetNode
// Distinct represents if the select has distinct option.
Distinct bool
// From is the from clause of the query.
From *TableRefsClause
// Where is the where clause in select statement.
Where ExprNode
// Fields is the select expression list.
Fields *FieldList
// GroupBy is the group by expression list.
GroupBy *GroupByClause
// Having is the having condition.
Having *HavingClause
// OrderBy is the ordering expression list.
OrderBy *OrderByClause
// Limit is the limit clause.
Limit *Limit
// Lock is the lock type
LockTp SelectLockType
}
// Accept implements Node Accept interface.
func (n *SelectStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SelectStmt)
if n.From != nil {
node, ok := n.From.Accept(v)
if !ok {
return n, false
}
n.From = node.(*TableRefsClause)
}
if n.Where != nil {
node, ok := n.Where.Accept(v)
if !ok {
return n, false
}
n.Where = node.(ExprNode)
}
if n.Fields != nil {
node, ok := n.Fields.Accept(v)
if !ok {
return n, false
}
n.Fields = node.(*FieldList)
}
if n.GroupBy != nil {
node, ok := n.GroupBy.Accept(v)
if !ok {
return n, false
}
n.GroupBy = node.(*GroupByClause)
}
if n.Having != nil {
node, ok := n.Having.Accept(v)
if !ok {
return n, false
}
n.Having = node.(*HavingClause)
}
if n.OrderBy != nil {
node, ok := n.OrderBy.Accept(v)
if !ok {
return n, false
}
n.OrderBy = node.(*OrderByClause)
}
if n.Limit != nil {
node, ok := n.Limit.Accept(v)
if !ok {
return n, false
}
n.Limit = node.(*Limit)
}
return v.Leave(n)
}
// UnionSelectList represents the select list in a union statement.
type UnionSelectList struct {
node
Selects []*SelectStmt
}
// Accept implements Node Accept interface.
func (n *UnionSelectList) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UnionSelectList)
for i, sel := range n.Selects {
node, ok := sel.Accept(v)
if !ok {
return n, false
}
n.Selects[i] = node.(*SelectStmt)
}
return v.Leave(n)
}
// UnionStmt represents "union statement"
// See: https://dev.mysql.com/doc/refman/5.7/en/union.html
type UnionStmt struct {
dmlNode
resultSetNode
Distinct bool
SelectList *UnionSelectList
OrderBy *OrderByClause
Limit *Limit
}
// Accept implements Node Accept interface.
func (n *UnionStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UnionStmt)
if n.SelectList != nil {
node, ok := n.SelectList.Accept(v)
if !ok {
return n, false
}
n.SelectList = node.(*UnionSelectList)
}
if n.OrderBy != nil {
node, ok := n.OrderBy.Accept(v)
if !ok {
return n, false
}
n.OrderBy = node.(*OrderByClause)
}
if n.Limit != nil {
node, ok := n.Limit.Accept(v)
if !ok {
return n, false
}
n.Limit = node.(*Limit)
}
return v.Leave(n)
}
// Assignment is the expression for assignment, like a = 1.
type Assignment struct {
node
// Column is the column name to be assigned.
Column *ColumnName
// Expr is the expression assigning to ColName.
Expr ExprNode
}
// Accept implements Node Accept interface.
func (n *Assignment) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*Assignment)
node, ok := n.Column.Accept(v)
if !ok {
return n, false
}
n.Column = node.(*ColumnName)
node, ok = n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
return v.Leave(n)
}
// Priority const values.
// See: https://dev.mysql.com/doc/refman/5.7/en/insert.html
const (
NoPriority = iota
LowPriority
HighPriority
DelayedPriority
)
// InsertStmt is a statement to insert new rows into an existing table.
// See: https://dev.mysql.com/doc/refman/5.7/en/insert.html
type InsertStmt struct {
dmlNode
IsReplace bool
Table *TableRefsClause
Columns []*ColumnName
Lists [][]ExprNode
Setlist []*Assignment
Priority int
OnDuplicate []*Assignment
Select ResultSetNode
}
// Accept implements Node Accept interface.
func (n *InsertStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*InsertStmt)
if n.Select != nil {
node, ok := n.Select.Accept(v)
if !ok {
return n, false
}
n.Select = node.(ResultSetNode)
}
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableRefsClause)
for i, val := range n.Columns {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Columns[i] = node.(*ColumnName)
}
for i, list := range n.Lists {
for j, val := range list {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Lists[i][j] = node.(ExprNode)
}
}
for i, val := range n.Setlist {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Setlist[i] = node.(*Assignment)
}
for i, val := range n.OnDuplicate {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.OnDuplicate[i] = node.(*Assignment)
}
return v.Leave(n)
}
// DeleteStmt is a statement to delete rows from table.
// See: https://dev.mysql.com/doc/refman/5.7/en/delete.html
type DeleteStmt struct {
dmlNode
// Used in both single table and multiple table delete statement.
TableRefs *TableRefsClause
// Only used in multiple table delete statement.
Tables *DeleteTableList
Where ExprNode
Order *OrderByClause
Limit *Limit
LowPriority bool
Ignore bool
Quick bool
IsMultiTable bool
BeforeFrom bool
}
// Accept implements Node Accept interface.
func (n *DeleteStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DeleteStmt)
node, ok := n.TableRefs.Accept(v)
if !ok {
return n, false
}
n.TableRefs = node.(*TableRefsClause)
node, ok = n.Tables.Accept(v)
if !ok {
return n, false
}
n.Tables = node.(*DeleteTableList)
if n.Where != nil {
node, ok = n.Where.Accept(v)
if !ok {
return n, false
}
n.Where = node.(ExprNode)
}
if n.Order != nil {
node, ok = n.Order.Accept(v)
if !ok {
return n, false
}
n.Order = node.(*OrderByClause)
}
if n.Limit != nil {
node, ok = n.Limit.Accept(v)
if !ok {
return n, false
}
n.Limit = node.(*Limit)
}
return v.Leave(n)
}
// UpdateStmt is a statement to update columns of existing rows in tables with new values.
// See: https://dev.mysql.com/doc/refman/5.7/en/update.html
type UpdateStmt struct {
dmlNode
TableRefs *TableRefsClause
List []*Assignment
Where ExprNode
Order *OrderByClause
Limit *Limit
LowPriority bool
Ignore bool
MultipleTable bool
}
// Accept implements Node Accept interface.
func (n *UpdateStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UpdateStmt)
node, ok := n.TableRefs.Accept(v)
if !ok {
return n, false
}
n.TableRefs = node.(*TableRefsClause)
for i, val := range n.List {
node, ok = val.Accept(v)
if !ok {
return n, false
}
n.List[i] = node.(*Assignment)
}
if n.Where != nil {
node, ok = n.Where.Accept(v)
if !ok {
return n, false
}
n.Where = node.(ExprNode)
}
if n.Order != nil {
node, ok = n.Order.Accept(v)
if !ok {
return n, false
}
n.Order = node.(*OrderByClause)
}
if n.Limit != nil {
node, ok = n.Limit.Accept(v)
if !ok {
return n, false
}
n.Limit = node.(*Limit)
}
return v.Leave(n)
}
// Limit is the limit clause.
type Limit struct {
node
Offset uint64
Count uint64
}
// Accept implements Node Accept interface.
func (n *Limit) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*Limit)
return v.Leave(n)
}
// ShowStmtType is the type for SHOW statement.
type ShowStmtType int
// Show statement types.
const (
ShowNone = iota
ShowEngines
ShowDatabases
ShowTables
ShowTableStatus
ShowColumns
ShowWarnings
ShowCharset
ShowVariables
ShowStatus
ShowCollation
ShowCreateTable
ShowGrants
ShowTriggers
ShowProcedureStatus
ShowIndex
)
// ShowStmt is a statement to provide information about databases, tables, columns and so on.
// See: https://dev.mysql.com/doc/refman/5.7/en/show.html
type ShowStmt struct {
dmlNode
resultSetNode
Tp ShowStmtType // Databases/Tables/Columns/....
DBName string
Table *TableName // Used for showing columns.
Column *ColumnName // Used for `desc table column`.
Flag int // Some flag parsed from sql, such as FULL.
Full bool
User string // Used for show grants.
// Used by show variables
GlobalScope bool
Pattern *PatternLikeExpr
Where ExprNode
}
// Accept implements Node Accept interface.
func (n *ShowStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ShowStmt)
if n.Table != nil {
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
}
if n.Column != nil {
node, ok := n.Column.Accept(v)
if !ok {
return n, false
}
n.Column = node.(*ColumnName)
}
if n.Pattern != nil {
node, ok := n.Pattern.Accept(v)
if !ok {
return n, false
}
n.Pattern = node.(*PatternLikeExpr)
}
if n.Where != nil {
node, ok := n.Where.Accept(v)
if !ok {
return n, false
}
n.Where = node.(ExprNode)
}
return v.Leave(n)
}

749
vendor/github.com/pingcap/tidb/ast/expressions.go generated vendored Normal file
View file

@ -0,0 +1,749 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"regexp"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/util/types"
)
var (
_ ExprNode = &BetweenExpr{}
_ ExprNode = &BinaryOperationExpr{}
_ ExprNode = &CaseExpr{}
_ ExprNode = &ColumnNameExpr{}
_ ExprNode = &CompareSubqueryExpr{}
_ ExprNode = &DefaultExpr{}
_ ExprNode = &ExistsSubqueryExpr{}
_ ExprNode = &IsNullExpr{}
_ ExprNode = &IsTruthExpr{}
_ ExprNode = &ParamMarkerExpr{}
_ ExprNode = &ParenthesesExpr{}
_ ExprNode = &PatternInExpr{}
_ ExprNode = &PatternLikeExpr{}
_ ExprNode = &PatternRegexpExpr{}
_ ExprNode = &PositionExpr{}
_ ExprNode = &RowExpr{}
_ ExprNode = &SubqueryExpr{}
_ ExprNode = &UnaryOperationExpr{}
_ ExprNode = &ValueExpr{}
_ ExprNode = &ValuesExpr{}
_ ExprNode = &VariableExpr{}
_ Node = &ColumnName{}
_ Node = &WhenClause{}
)
// ValueExpr is the simple value expression.
type ValueExpr struct {
exprNode
}
// NewValueExpr creates a ValueExpr with value, and sets default field type.
func NewValueExpr(value interface{}) *ValueExpr {
if ve, ok := value.(*ValueExpr); ok {
return ve
}
ve := &ValueExpr{}
ve.SetValue(value)
if _, ok := value.(UnquoteString); ok {
ve.Type = types.NewFieldType(mysql.TypeVarchar)
ve.Type.Charset = mysql.DefaultCharset
ve.Type.Collate = mysql.DefaultCollationName
return ve
}
ve.Type = types.DefaultTypeForValue(value)
return ve
}
// Accept implements Node interface.
func (n *ValueExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ValueExpr)
return v.Leave(n)
}
// BetweenExpr is for "between and" or "not between and" expression.
type BetweenExpr struct {
exprNode
// Expr is the expression to be checked.
Expr ExprNode
// Left is the expression for minimal value in the range.
Left ExprNode
// Right is the expression for maximum value in the range.
Right ExprNode
// Not is true, the expression is "not between and".
Not bool
}
// Accept implements Node interface.
func (n *BetweenExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*BetweenExpr)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
node, ok = n.Left.Accept(v)
if !ok {
return n, false
}
n.Left = node.(ExprNode)
node, ok = n.Right.Accept(v)
if !ok {
return n, false
}
n.Right = node.(ExprNode)
return v.Leave(n)
}
// BinaryOperationExpr is for binary operation like `1 + 1`, `1 - 1`, etc.
type BinaryOperationExpr struct {
exprNode
// Op is the operator code for BinaryOperation.
Op opcode.Op
// L is the left expression in BinaryOperation.
L ExprNode
// R is the right expression in BinaryOperation.
R ExprNode
}
// Accept implements Node interface.
func (n *BinaryOperationExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*BinaryOperationExpr)
node, ok := n.L.Accept(v)
if !ok {
return n, false
}
n.L = node.(ExprNode)
node, ok = n.R.Accept(v)
if !ok {
return n, false
}
n.R = node.(ExprNode)
return v.Leave(n)
}
// WhenClause is the when clause in Case expression for "when condition then result".
type WhenClause struct {
node
// Expr is the condition expression in WhenClause.
Expr ExprNode
// Result is the result expression in WhenClause.
Result ExprNode
}
// Accept implements Node Accept interface.
func (n *WhenClause) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*WhenClause)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
node, ok = n.Result.Accept(v)
if !ok {
return n, false
}
n.Result = node.(ExprNode)
return v.Leave(n)
}
// CaseExpr is the case expression.
type CaseExpr struct {
exprNode
// Value is the compare value expression.
Value ExprNode
// WhenClauses is the condition check expression.
WhenClauses []*WhenClause
// ElseClause is the else result expression.
ElseClause ExprNode
}
// Accept implements Node Accept interface.
func (n *CaseExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CaseExpr)
if n.Value != nil {
node, ok := n.Value.Accept(v)
if !ok {
return n, false
}
n.Value = node.(ExprNode)
}
for i, val := range n.WhenClauses {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.WhenClauses[i] = node.(*WhenClause)
}
if n.ElseClause != nil {
node, ok := n.ElseClause.Accept(v)
if !ok {
return n, false
}
n.ElseClause = node.(ExprNode)
}
return v.Leave(n)
}
// SubqueryExec represents a subquery executor interface.
// This interface is implemented in executor and used in plan/evaluator.
// It will execute the subselect and get the result.
type SubqueryExec interface {
ExprNode
// EvalRows executes the subquery and returns the multi rows with rowCount.
// rowCount < 0 means no limit.
// If the ColumnCount is 1, we will return a column result like {1, 2, 3},
// otherwise, we will return a table result like {{1, 1}, {2, 2}}.
EvalRows(ctx context.Context, rowCount int) ([]interface{}, error)
// ColumnCount returns column count for the sub query.
ColumnCount() (int, error)
}
// SubqueryExpr represents a subquery.
type SubqueryExpr struct {
exprNode
// Query is the query SelectNode.
Query ResultSetNode
SubqueryExec SubqueryExec
Evaluated bool
UseOuterContext bool
}
// Accept implements Node Accept interface.
func (n *SubqueryExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SubqueryExpr)
if n.SubqueryExec != nil {
t, ok := n.SubqueryExec.Accept(v)
if !ok {
return n, false
}
sq, ok := t.(SubqueryExec)
if !ok {
return n, false
}
n.SubqueryExec = sq
return v.Leave(n)
}
node, ok := n.Query.Accept(v)
if !ok {
return n, false
}
n.Query = node.(ResultSetNode)
return v.Leave(n)
}
// SetResultFields implements ResultSetNode interface.
func (n *SubqueryExpr) SetResultFields(rfs []*ResultField) {
n.Query.SetResultFields(rfs)
}
// GetResultFields implements ResultSetNode interface.
func (n *SubqueryExpr) GetResultFields() []*ResultField {
return n.Query.GetResultFields()
}
// CompareSubqueryExpr is the expression for "expr cmp (select ...)".
// See: https://dev.mysql.com/doc/refman/5.7/en/comparisons-using-subqueries.html
// See: https://dev.mysql.com/doc/refman/5.7/en/any-in-some-subqueries.html
// See: https://dev.mysql.com/doc/refman/5.7/en/all-subqueries.html
type CompareSubqueryExpr struct {
exprNode
// L is the left expression
L ExprNode
// Op is the comparison opcode.
Op opcode.Op
// R is the subquery for right expression, may be rewritten to other type of expression.
R ExprNode
// All is true, we should compare all records in subquery.
All bool
}
// Accept implements Node Accept interface.
func (n *CompareSubqueryExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CompareSubqueryExpr)
node, ok := n.L.Accept(v)
if !ok {
return n, false
}
n.L = node.(ExprNode)
node, ok = n.R.Accept(v)
if !ok {
return n, false
}
n.R = node.(ExprNode)
return v.Leave(n)
}
// ColumnName represents column name.
type ColumnName struct {
node
Schema model.CIStr
Table model.CIStr
Name model.CIStr
}
// Accept implements Node Accept interface.
func (n *ColumnName) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ColumnName)
return v.Leave(n)
}
// ColumnNameExpr represents a column name expression.
type ColumnNameExpr struct {
exprNode
// Name is the referenced column name.
Name *ColumnName
// Refer is the result field the column name refers to.
// The value of Refer.Expr is used as the value of the expression.
Refer *ResultField
}
// Accept implements Node Accept interface.
func (n *ColumnNameExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ColumnNameExpr)
node, ok := n.Name.Accept(v)
if !ok {
return n, false
}
n.Name = node.(*ColumnName)
return v.Leave(n)
}
// DefaultExpr is the default expression using default value for a column.
type DefaultExpr struct {
exprNode
// Name is the column name.
Name *ColumnName
}
// Accept implements Node Accept interface.
func (n *DefaultExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DefaultExpr)
if n.Name != nil {
node, ok := n.Name.Accept(v)
if !ok {
return n, false
}
n.Name = node.(*ColumnName)
}
return v.Leave(n)
}
// ExistsSubqueryExpr is the expression for "exists (select ...)".
// https://dev.mysql.com/doc/refman/5.7/en/exists-and-not-exists-subqueries.html
type ExistsSubqueryExpr struct {
exprNode
// Sel is the subquery, may be rewritten to other type of expression.
Sel ExprNode
}
// Accept implements Node Accept interface.
func (n *ExistsSubqueryExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ExistsSubqueryExpr)
node, ok := n.Sel.Accept(v)
if !ok {
return n, false
}
n.Sel = node.(ExprNode)
return v.Leave(n)
}
// PatternInExpr is the expression for in operator, like "expr in (1, 2, 3)" or "expr in (select c from t)".
type PatternInExpr struct {
exprNode
// Expr is the value expression to be compared.
Expr ExprNode
// List is the list expression in compare list.
List []ExprNode
// Not is true, the expression is "not in".
Not bool
// Sel is the subquery, may be rewritten to other type of expression.
Sel ExprNode
}
// Accept implements Node Accept interface.
func (n *PatternInExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PatternInExpr)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
for i, val := range n.List {
node, ok = val.Accept(v)
if !ok {
return n, false
}
n.List[i] = node.(ExprNode)
}
if n.Sel != nil {
node, ok = n.Sel.Accept(v)
if !ok {
return n, false
}
n.Sel = node.(ExprNode)
}
return v.Leave(n)
}
// IsNullExpr is the expression for null check.
type IsNullExpr struct {
exprNode
// Expr is the expression to be checked.
Expr ExprNode
// Not is true, the expression is "is not null".
Not bool
}
// Accept implements Node Accept interface.
func (n *IsNullExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*IsNullExpr)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
return v.Leave(n)
}
// IsTruthExpr is the expression for true/false check.
type IsTruthExpr struct {
exprNode
// Expr is the expression to be checked.
Expr ExprNode
// Not is true, the expression is "is not true/false".
Not bool
// True indicates checking true or false.
True int64
}
// Accept implements Node Accept interface.
func (n *IsTruthExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*IsTruthExpr)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
return v.Leave(n)
}
// PatternLikeExpr is the expression for like operator, e.g, expr like "%123%"
type PatternLikeExpr struct {
exprNode
// Expr is the expression to be checked.
Expr ExprNode
// Pattern is the like expression.
Pattern ExprNode
// Not is true, the expression is "not like".
Not bool
Escape byte
PatChars []byte
PatTypes []byte
}
// Accept implements Node Accept interface.
func (n *PatternLikeExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PatternLikeExpr)
if n.Expr != nil {
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
}
if n.Pattern != nil {
node, ok := n.Pattern.Accept(v)
if !ok {
return n, false
}
n.Pattern = node.(ExprNode)
}
return v.Leave(n)
}
// ParamMarkerExpr expression holds a place for another expression.
// Used in parsing prepare statement.
type ParamMarkerExpr struct {
exprNode
Offset int
}
// Accept implements Node Accept interface.
func (n *ParamMarkerExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ParamMarkerExpr)
return v.Leave(n)
}
// ParenthesesExpr is the parentheses expression.
type ParenthesesExpr struct {
exprNode
// Expr is the expression in parentheses.
Expr ExprNode
}
// Accept implements Node Accept interface.
func (n *ParenthesesExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ParenthesesExpr)
if n.Expr != nil {
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
}
return v.Leave(n)
}
// PositionExpr is the expression for order by and group by position.
// MySQL use position expression started from 1, it looks a little confused inner.
// maybe later we will use 0 at first.
type PositionExpr struct {
exprNode
// N is the position, started from 1 now.
N int
// Refer is the result field the position refers to.
Refer *ResultField
}
// Accept implements Node Accept interface.
func (n *PositionExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PositionExpr)
return v.Leave(n)
}
// PatternRegexpExpr is the pattern expression for pattern match.
type PatternRegexpExpr struct {
exprNode
// Expr is the expression to be checked.
Expr ExprNode
// Pattern is the expression for pattern.
Pattern ExprNode
// Not is true, the expression is "not rlike",
Not bool
// Re is the compiled regexp.
Re *regexp.Regexp
// Sexpr is the string for Expr expression.
Sexpr *string
}
// Accept implements Node Accept interface.
func (n *PatternRegexpExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PatternRegexpExpr)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
node, ok = n.Pattern.Accept(v)
if !ok {
return n, false
}
n.Pattern = node.(ExprNode)
return v.Leave(n)
}
// RowExpr is the expression for row constructor.
// See https://dev.mysql.com/doc/refman/5.7/en/row-subqueries.html
type RowExpr struct {
exprNode
Values []ExprNode
}
// Accept implements Node Accept interface.
func (n *RowExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*RowExpr)
for i, val := range n.Values {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Values[i] = node.(ExprNode)
}
return v.Leave(n)
}
// UnaryOperationExpr is the expression for unary operator.
type UnaryOperationExpr struct {
exprNode
// Op is the operator opcode.
Op opcode.Op
// V is the unary expression.
V ExprNode
}
// Accept implements Node Accept interface.
func (n *UnaryOperationExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UnaryOperationExpr)
node, ok := n.V.Accept(v)
if !ok {
return n, false
}
n.V = node.(ExprNode)
return v.Leave(n)
}
// ValuesExpr is the expression used in INSERT VALUES
type ValuesExpr struct {
exprNode
// model.CIStr is column name.
Column *ColumnNameExpr
}
// Accept implements Node Accept interface.
func (n *ValuesExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ValuesExpr)
node, ok := n.Column.Accept(v)
if !ok {
return n, false
}
n.Column = node.(*ColumnNameExpr)
return v.Leave(n)
}
// VariableExpr is the expression for variable.
type VariableExpr struct {
exprNode
// Name is the variable name.
Name string
// IsGlobal indicates whether this variable is global.
IsGlobal bool
// IsSystem indicates whether this variable is a system variable in current session.
IsSystem bool
}
// Accept implements Node Accept interface.
func (n *VariableExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*VariableExpr)
return v.Leave(n)
}

165
vendor/github.com/pingcap/tidb/ast/flag.go generated vendored Normal file
View file

@ -0,0 +1,165 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
const preEvaluable = FlagHasParamMarker | FlagHasFunc | FlagHasVariable | FlagHasDefault
// IsPreEvaluable checks if the expression can be evaluated before execution.
func IsPreEvaluable(expr ExprNode) bool {
return expr.GetFlag()|preEvaluable == preEvaluable
}
// IsConstant checks if the expression is constant.
// A constant expression is safe to be rewritten to value expression.
func IsConstant(expr ExprNode) bool {
return expr.GetFlag() == FlagConstant
}
// HasAggFlag checks if the expr contains FlagHasAggregateFunc.
func HasAggFlag(expr ExprNode) bool {
return expr.GetFlag()&FlagHasAggregateFunc > 0
}
// SetFlag sets flag for expression.
func SetFlag(n Node) {
var setter flagSetter
n.Accept(&setter)
}
type flagSetter struct {
}
func (f *flagSetter) Enter(in Node) (Node, bool) {
return in, false
}
func (f *flagSetter) Leave(in Node) (Node, bool) {
switch x := in.(type) {
case *AggregateFuncExpr:
f.aggregateFunc(x)
case *BetweenExpr:
x.SetFlag(x.Expr.GetFlag() | x.Left.GetFlag() | x.Right.GetFlag())
case *BinaryOperationExpr:
x.SetFlag(x.L.GetFlag() | x.R.GetFlag())
case *CaseExpr:
f.caseExpr(x)
case *ColumnNameExpr:
x.SetFlag(FlagHasReference)
case *CompareSubqueryExpr:
x.SetFlag(x.L.GetFlag() | x.R.GetFlag())
case *DefaultExpr:
x.SetFlag(FlagHasDefault)
case *ExistsSubqueryExpr:
x.SetFlag(x.Sel.GetFlag())
case *FuncCallExpr:
f.funcCall(x)
case *FuncCastExpr:
x.SetFlag(FlagHasFunc | x.Expr.GetFlag())
case *IsNullExpr:
x.SetFlag(x.Expr.GetFlag())
case *IsTruthExpr:
x.SetFlag(x.Expr.GetFlag())
case *ParamMarkerExpr:
x.SetFlag(FlagHasParamMarker)
case *ParenthesesExpr:
x.SetFlag(x.Expr.GetFlag())
case *PatternInExpr:
f.patternIn(x)
case *PatternLikeExpr:
f.patternLike(x)
case *PatternRegexpExpr:
f.patternRegexp(x)
case *PositionExpr:
x.SetFlag(FlagHasReference)
case *RowExpr:
f.row(x)
case *SubqueryExpr:
x.SetFlag(FlagHasSubquery)
case *UnaryOperationExpr:
x.SetFlag(x.V.GetFlag())
case *ValueExpr:
case *ValuesExpr:
x.SetFlag(FlagHasReference)
case *VariableExpr:
x.SetFlag(FlagHasVariable)
}
return in, true
}
func (f *flagSetter) caseExpr(x *CaseExpr) {
var flag uint64
if x.Value != nil {
flag |= x.Value.GetFlag()
}
for _, val := range x.WhenClauses {
flag |= val.Expr.GetFlag()
flag |= val.Result.GetFlag()
}
if x.ElseClause != nil {
flag |= x.ElseClause.GetFlag()
}
x.SetFlag(flag)
}
func (f *flagSetter) patternIn(x *PatternInExpr) {
flag := x.Expr.GetFlag()
for _, val := range x.List {
flag |= val.GetFlag()
}
if x.Sel != nil {
flag |= x.Sel.GetFlag()
}
x.SetFlag(flag)
}
func (f *flagSetter) patternLike(x *PatternLikeExpr) {
flag := x.Pattern.GetFlag()
if x.Expr != nil {
flag |= x.Expr.GetFlag()
}
x.SetFlag(flag)
}
func (f *flagSetter) patternRegexp(x *PatternRegexpExpr) {
flag := x.Pattern.GetFlag()
if x.Expr != nil {
flag |= x.Expr.GetFlag()
}
x.SetFlag(flag)
}
func (f *flagSetter) row(x *RowExpr) {
var flag uint64
for _, val := range x.Values {
flag |= val.GetFlag()
}
x.SetFlag(flag)
}
func (f *flagSetter) funcCall(x *FuncCallExpr) {
flag := FlagHasFunc
for _, val := range x.Args {
flag |= val.GetFlag()
}
x.SetFlag(flag)
}
func (f *flagSetter) aggregateFunc(x *AggregateFuncExpr) {
flag := FlagHasAggregateFunc
for _, val := range x.Args {
flag |= val.GetFlag()
}
x.SetFlag(flag)
}

403
vendor/github.com/pingcap/tidb/ast/functions.go generated vendored Normal file
View file

@ -0,0 +1,403 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"bytes"
"fmt"
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/util/distinct"
"github.com/pingcap/tidb/util/types"
)
var (
_ FuncNode = &AggregateFuncExpr{}
_ FuncNode = &FuncCallExpr{}
_ FuncNode = &FuncCastExpr{}
)
// UnquoteString is not quoted when printed.
type UnquoteString string
// FuncCallExpr is for function expression.
type FuncCallExpr struct {
funcNode
// FnName is the function name.
FnName model.CIStr
// Args is the function args.
Args []ExprNode
}
// Accept implements Node interface.
func (n *FuncCallExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*FuncCallExpr)
for i, val := range n.Args {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Args[i] = node.(ExprNode)
}
return v.Leave(n)
}
// CastFunctionType is the type for cast function.
type CastFunctionType int
// CastFunction types
const (
CastFunction CastFunctionType = iota + 1
CastConvertFunction
CastBinaryOperator
)
// FuncCastExpr is the cast function converting value to another type, e.g, cast(expr AS signed).
// See https://dev.mysql.com/doc/refman/5.7/en/cast-functions.html
type FuncCastExpr struct {
funcNode
// Expr is the expression to be converted.
Expr ExprNode
// Tp is the conversion type.
Tp *types.FieldType
// Cast, Convert and Binary share this struct.
FunctionType CastFunctionType
}
// Accept implements Node Accept interface.
func (n *FuncCastExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*FuncCastExpr)
node, ok := n.Expr.Accept(v)
if !ok {
return n, false
}
n.Expr = node.(ExprNode)
return v.Leave(n)
}
// TrimDirectionType is the type for trim direction.
type TrimDirectionType int
const (
// TrimBothDefault trims from both direction by default.
TrimBothDefault TrimDirectionType = iota
// TrimBoth trims from both direction with explicit notation.
TrimBoth
// TrimLeading trims from left.
TrimLeading
// TrimTrailing trims from right.
TrimTrailing
)
// DateArithType is type for DateArith type.
type DateArithType byte
const (
// DateAdd is to run adddate or date_add function option.
// See: https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_adddate
// See: https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_date-add
DateAdd DateArithType = iota + 1
// DateSub is to run subdate or date_sub function option.
// See: https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_subdate
// See: https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_date-sub
DateSub
)
// DateArithInterval is the struct of DateArith interval part.
type DateArithInterval struct {
Unit string
Interval ExprNode
}
const (
// AggFuncCount is the name of Count function.
AggFuncCount = "count"
// AggFuncSum is the name of Sum function.
AggFuncSum = "sum"
// AggFuncAvg is the name of Avg function.
AggFuncAvg = "avg"
// AggFuncFirstRow is the name of FirstRowColumn function.
AggFuncFirstRow = "firstrow"
// AggFuncMax is the name of max function.
AggFuncMax = "max"
// AggFuncMin is the name of min function.
AggFuncMin = "min"
// AggFuncGroupConcat is the name of group_concat function.
AggFuncGroupConcat = "group_concat"
)
// AggregateFuncExpr represents aggregate function expression.
type AggregateFuncExpr struct {
funcNode
// F is the function name.
F string
// Args is the function args.
Args []ExprNode
// If distinct is true, the function only aggregate distinct values.
// For example, column c1 values are "1", "2", "2", "sum(c1)" is "5",
// but "sum(distinct c1)" is "3".
Distinct bool
CurrentGroup string
// contextPerGroupMap is used to store aggregate evaluation context.
// Each entry for a group.
contextPerGroupMap map[string](*AggEvaluateContext)
}
// Accept implements Node Accept interface.
func (n *AggregateFuncExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AggregateFuncExpr)
for i, val := range n.Args {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Args[i] = node.(ExprNode)
}
return v.Leave(n)
}
// Clear clears aggregate computing context.
func (n *AggregateFuncExpr) Clear() {
n.CurrentGroup = ""
n.contextPerGroupMap = nil
}
// Update is used for update aggregate context.
func (n *AggregateFuncExpr) Update() error {
name := strings.ToLower(n.F)
switch name {
case AggFuncCount:
return n.updateCount()
case AggFuncFirstRow:
return n.updateFirstRow()
case AggFuncGroupConcat:
return n.updateGroupConcat()
case AggFuncMax:
return n.updateMaxMin(true)
case AggFuncMin:
return n.updateMaxMin(false)
case AggFuncSum, AggFuncAvg:
return n.updateSum()
}
return nil
}
// GetContext gets aggregate evaluation context for the current group.
// If it is nil, add a new context into contextPerGroupMap.
func (n *AggregateFuncExpr) GetContext() *AggEvaluateContext {
if n.contextPerGroupMap == nil {
n.contextPerGroupMap = make(map[string](*AggEvaluateContext))
}
if _, ok := n.contextPerGroupMap[n.CurrentGroup]; !ok {
c := &AggEvaluateContext{}
if n.Distinct {
c.distinctChecker = distinct.CreateDistinctChecker()
}
n.contextPerGroupMap[n.CurrentGroup] = c
}
return n.contextPerGroupMap[n.CurrentGroup]
}
func (n *AggregateFuncExpr) updateCount() error {
ctx := n.GetContext()
vals := make([]interface{}, 0, len(n.Args))
for _, a := range n.Args {
value := a.GetValue()
if value == nil {
return nil
}
vals = append(vals, value)
}
if n.Distinct {
d, err := ctx.distinctChecker.Check(vals)
if err != nil {
return errors.Trace(err)
}
if !d {
return nil
}
}
ctx.Count++
return nil
}
func (n *AggregateFuncExpr) updateFirstRow() error {
ctx := n.GetContext()
if ctx.evaluated {
return nil
}
if len(n.Args) != 1 {
return errors.New("Wrong number of args for AggFuncFirstRow")
}
ctx.Value = n.Args[0].GetValue()
ctx.evaluated = true
return nil
}
func (n *AggregateFuncExpr) updateMaxMin(max bool) error {
ctx := n.GetContext()
if len(n.Args) != 1 {
return errors.New("Wrong number of args for AggFuncFirstRow")
}
v := n.Args[0].GetValue()
if !ctx.evaluated {
ctx.Value = v
ctx.evaluated = true
return nil
}
c, err := types.Compare(ctx.Value, v)
if err != nil {
return errors.Trace(err)
}
if max {
if c == -1 {
ctx.Value = v
}
} else {
if c == 1 {
ctx.Value = v
}
}
return nil
}
func (n *AggregateFuncExpr) updateSum() error {
ctx := n.GetContext()
a := n.Args[0]
value := a.GetValue()
if value == nil {
return nil
}
if n.Distinct {
d, err := ctx.distinctChecker.Check([]interface{}{value})
if err != nil {
return errors.Trace(err)
}
if !d {
return nil
}
}
var err error
ctx.Value, err = types.CalculateSum(ctx.Value, value)
if err != nil {
return errors.Trace(err)
}
ctx.Count++
return nil
}
func (n *AggregateFuncExpr) updateGroupConcat() error {
ctx := n.GetContext()
vals := make([]interface{}, 0, len(n.Args))
for _, a := range n.Args {
value := a.GetValue()
if value == nil {
return nil
}
vals = append(vals, value)
}
if n.Distinct {
d, err := ctx.distinctChecker.Check(vals)
if err != nil {
return errors.Trace(err)
}
if !d {
return nil
}
}
if ctx.Buffer == nil {
ctx.Buffer = &bytes.Buffer{}
} else {
// now use comma separator
ctx.Buffer.WriteString(",")
}
for _, val := range vals {
ctx.Buffer.WriteString(fmt.Sprintf("%v", val))
}
// TODO: if total length is greater than global var group_concat_max_len, truncate it.
return nil
}
// AggregateFuncExtractor visits Expr tree.
// It converts ColunmNameExpr to AggregateFuncExpr and collects AggregateFuncExpr.
type AggregateFuncExtractor struct {
inAggregateFuncExpr bool
// AggFuncs is the collected AggregateFuncExprs.
AggFuncs []*AggregateFuncExpr
extracting bool
}
// Enter implements Visitor interface.
func (a *AggregateFuncExtractor) Enter(n Node) (node Node, skipChildren bool) {
switch n.(type) {
case *AggregateFuncExpr:
a.inAggregateFuncExpr = true
case *SelectStmt, *InsertStmt, *DeleteStmt, *UpdateStmt:
// Enter a new context, skip it.
// For example: select sum(c) + c + exists(select c from t) from t;
if a.extracting {
return n, true
}
}
a.extracting = true
return n, false
}
// Leave implements Visitor interface.
func (a *AggregateFuncExtractor) Leave(n Node) (node Node, ok bool) {
switch v := n.(type) {
case *AggregateFuncExpr:
a.inAggregateFuncExpr = false
a.AggFuncs = append(a.AggFuncs, v)
case *ColumnNameExpr:
// compose new AggregateFuncExpr
if !a.inAggregateFuncExpr {
// For example: select sum(c) + c from t;
// The c in sum() should be evaluated for each row.
// The c after plus should be evaluated only once.
agg := &AggregateFuncExpr{
F: AggFuncFirstRow,
Args: []ExprNode{v},
}
a.AggFuncs = append(a.AggFuncs, agg)
return agg, true
}
}
return n, true
}
// AggEvaluateContext is used to store intermediate result when caculation aggregate functions.
type AggEvaluateContext struct {
distinctChecker *distinct.Checker
Count int64
Value interface{}
Buffer *bytes.Buffer // Buffer is used for group_concat.
evaluated bool
}

507
vendor/github.com/pingcap/tidb/ast/misc.go generated vendored Normal file
View file

@ -0,0 +1,507 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"fmt"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/db"
)
var (
_ StmtNode = &AdminStmt{}
_ StmtNode = &BeginStmt{}
_ StmtNode = &CommitStmt{}
_ StmtNode = &CreateUserStmt{}
_ StmtNode = &DeallocateStmt{}
_ StmtNode = &DoStmt{}
_ StmtNode = &ExecuteStmt{}
_ StmtNode = &ExplainStmt{}
_ StmtNode = &GrantStmt{}
_ StmtNode = &PrepareStmt{}
_ StmtNode = &RollbackStmt{}
_ StmtNode = &SetCharsetStmt{}
_ StmtNode = &SetPwdStmt{}
_ StmtNode = &SetStmt{}
_ StmtNode = &UseStmt{}
_ Node = &PrivElem{}
_ Node = &VariableAssignment{}
)
// TypeOpt is used for parsing data type option from SQL.
type TypeOpt struct {
IsUnsigned bool
IsZerofill bool
}
// FloatOpt is used for parsing floating-point type option from SQL.
// See: http://dev.mysql.com/doc/refman/5.7/en/floating-point-types.html
type FloatOpt struct {
Flen int
Decimal int
}
// AuthOption is used for parsing create use statement.
type AuthOption struct {
// AuthString/HashString can be empty, so we need to decide which one to use.
ByAuthString bool
AuthString string
HashString string
// TODO: support auth_plugin
}
// ExplainStmt is a statement to provide information about how is SQL statement executed
// or get columns information in a table.
// See: https://dev.mysql.com/doc/refman/5.7/en/explain.html
type ExplainStmt struct {
stmtNode
Stmt StmtNode
}
// Accept implements Node Accept interface.
func (n *ExplainStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ExplainStmt)
node, ok := n.Stmt.Accept(v)
if !ok {
return n, false
}
n.Stmt = node.(DMLNode)
return v.Leave(n)
}
// PrepareStmt is a statement to prepares a SQL statement which contains placeholders,
// and it is executed with ExecuteStmt and released with DeallocateStmt.
// See: https://dev.mysql.com/doc/refman/5.7/en/prepare.html
type PrepareStmt struct {
stmtNode
Name string
SQLText string
SQLVar *VariableExpr
}
// Accept implements Node Accept interface.
func (n *PrepareStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PrepareStmt)
if n.SQLVar != nil {
node, ok := n.SQLVar.Accept(v)
if !ok {
return n, false
}
n.SQLVar = node.(*VariableExpr)
}
return v.Leave(n)
}
// DeallocateStmt is a statement to release PreparedStmt.
// See: https://dev.mysql.com/doc/refman/5.7/en/deallocate-prepare.html
type DeallocateStmt struct {
stmtNode
Name string
}
// Accept implements Node Accept interface.
func (n *DeallocateStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DeallocateStmt)
return v.Leave(n)
}
// ExecuteStmt is a statement to execute PreparedStmt.
// See: https://dev.mysql.com/doc/refman/5.7/en/execute.html
type ExecuteStmt struct {
stmtNode
Name string
UsingVars []ExprNode
}
// Accept implements Node Accept interface.
func (n *ExecuteStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ExecuteStmt)
for i, val := range n.UsingVars {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.UsingVars[i] = node.(ExprNode)
}
return v.Leave(n)
}
// BeginStmt is a statement to start a new transaction.
// See: https://dev.mysql.com/doc/refman/5.7/en/commit.html
type BeginStmt struct {
stmtNode
}
// Accept implements Node Accept interface.
func (n *BeginStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*BeginStmt)
return v.Leave(n)
}
// CommitStmt is a statement to commit the current transaction.
// See: https://dev.mysql.com/doc/refman/5.7/en/commit.html
type CommitStmt struct {
stmtNode
}
// Accept implements Node Accept interface.
func (n *CommitStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CommitStmt)
return v.Leave(n)
}
// RollbackStmt is a statement to roll back the current transaction.
// See: https://dev.mysql.com/doc/refman/5.7/en/commit.html
type RollbackStmt struct {
stmtNode
}
// Accept implements Node Accept interface.
func (n *RollbackStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*RollbackStmt)
return v.Leave(n)
}
// UseStmt is a statement to use the DBName database as the current database.
// See: https://dev.mysql.com/doc/refman/5.7/en/use.html
type UseStmt struct {
stmtNode
DBName string
}
// Accept implements Node Accept interface.
func (n *UseStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UseStmt)
return v.Leave(n)
}
// VariableAssignment is a variable assignment struct.
type VariableAssignment struct {
node
Name string
Value ExprNode
IsGlobal bool
IsSystem bool
}
// Accept implements Node interface.
func (n *VariableAssignment) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*VariableAssignment)
node, ok := n.Value.Accept(v)
if !ok {
return n, false
}
n.Value = node.(ExprNode)
return v.Leave(n)
}
// SetStmt is the statement to set variables.
type SetStmt struct {
stmtNode
// Variables is the list of variable assignment.
Variables []*VariableAssignment
}
// Accept implements Node Accept interface.
func (n *SetStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetStmt)
for i, val := range n.Variables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Variables[i] = node.(*VariableAssignment)
}
return v.Leave(n)
}
// SetCharsetStmt is a statement to assign values to character and collation variables.
// See: https://dev.mysql.com/doc/refman/5.7/en/set-statement.html
type SetCharsetStmt struct {
stmtNode
Charset string
Collate string
}
// Accept implements Node Accept interface.
func (n *SetCharsetStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetCharsetStmt)
return v.Leave(n)
}
// SetPwdStmt is a statement to assign a password to user account.
// See: https://dev.mysql.com/doc/refman/5.7/en/set-password.html
type SetPwdStmt struct {
stmtNode
User string
Password string
}
// Accept implements Node Accept interface.
func (n *SetPwdStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetPwdStmt)
return v.Leave(n)
}
// UserSpec is used for parsing create user statement.
type UserSpec struct {
User string
AuthOpt *AuthOption
}
// CreateUserStmt creates user account.
// See: https://dev.mysql.com/doc/refman/5.7/en/create-user.html
type CreateUserStmt struct {
stmtNode
IfNotExists bool
Specs []*UserSpec
}
// Accept implements Node Accept interface.
func (n *CreateUserStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CreateUserStmt)
return v.Leave(n)
}
// DoStmt is the struct for DO statement.
type DoStmt struct {
stmtNode
Exprs []ExprNode
}
// Accept implements Node Accept interface.
func (n *DoStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DoStmt)
for i, val := range n.Exprs {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Exprs[i] = node.(ExprNode)
}
return v.Leave(n)
}
// AdminStmtType is the type for admin statement.
type AdminStmtType int
// Admin statement types.
const (
AdminShowDDL = iota + 1
AdminCheckTable
)
// AdminStmt is the struct for Admin statement.
type AdminStmt struct {
stmtNode
Tp AdminStmtType
Tables []*TableName
}
// Accept implements Node Accpet interface.
func (n *AdminStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AdminStmt)
for i, val := range n.Tables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
return v.Leave(n)
}
// PrivElem is the privilege type and optional column list.
type PrivElem struct {
node
Priv mysql.PrivilegeType
Cols []*ColumnName
}
// Accept implements Node Accept interface.
func (n *PrivElem) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PrivElem)
for i, val := range n.Cols {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Cols[i] = node.(*ColumnName)
}
return v.Leave(n)
}
// ObjectTypeType is the type for object type.
type ObjectTypeType int
const (
// ObjectTypeNone is for empty object type.
ObjectTypeNone ObjectTypeType = iota + 1
// ObjectTypeTable means the following object is a table.
ObjectTypeTable
)
// GrantLevelType is the type for grant level.
type GrantLevelType int
const (
// GrantLevelNone is the dummy const for default value.
GrantLevelNone GrantLevelType = iota + 1
// GrantLevelGlobal means the privileges are administrative or apply to all databases on a given server.
GrantLevelGlobal
// GrantLevelDB means the privileges apply to all objects in a given database.
GrantLevelDB
// GrantLevelTable means the privileges apply to all columns in a given table.
GrantLevelTable
)
// GrantLevel is used for store the privilege scope.
type GrantLevel struct {
Level GrantLevelType
DBName string
TableName string
}
// GrantStmt is the struct for GRANT statement.
type GrantStmt struct {
stmtNode
Privs []*PrivElem
ObjectType ObjectTypeType
Level *GrantLevel
Users []*UserSpec
}
// Accept implements Node Accept interface.
func (n *GrantStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*GrantStmt)
for i, val := range n.Privs {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Privs[i] = node.(*PrivElem)
}
return v.Leave(n)
}
// Ident is the table identifier composed of schema name and table name.
type Ident struct {
Schema model.CIStr
Name model.CIStr
}
// Full returns an Ident which set schema to the current schema if it is empty.
func (i Ident) Full(ctx context.Context) (full Ident) {
full.Name = i.Name
if i.Schema.O != "" {
full.Schema = i.Schema
} else {
full.Schema = model.NewCIStr(db.GetCurrentSchema(ctx))
}
return
}
// String implements fmt.Stringer interface
func (i Ident) String() string {
if i.Schema.O == "" {
return i.Name.O
}
return fmt.Sprintf("%s.%s", i.Schema, i.Name)
}

57
vendor/github.com/pingcap/tidb/ast/stringer.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"fmt"
"github.com/pingcap/tidb/util/types"
)
// ToString converts a node to a string for debugging purpose.
func ToString(node Node) string {
s := &stringer{strMap: map[Node]string{}}
node.Accept(s)
return s.strMap[node]
}
type stringer struct {
strMap map[Node]string
}
// Enter implements Visitor Enter interface.
func (c *stringer) Enter(node Node) (Node, bool) {
return node, false
}
// Leave implements Visitor Leave interface.
func (c *stringer) Leave(in Node) (out Node, ok bool) {
switch x := in.(type) {
case *BinaryOperationExpr:
left := c.strMap[x.L]
right := c.strMap[x.R]
c.strMap[x] = left + " " + x.Op.String() + " " + right
case *ValueExpr:
str, _ := types.ToString(x.GetValue())
c.strMap[x] = str
case *ParenthesesExpr:
c.strMap[x] = "(" + c.strMap[x.Expr] + ")"
case *ColumnNameExpr:
c.strMap[x] = x.Name.Table.O + "." + x.Name.Name.O
case *BetweenExpr:
c.strMap[x] = c.strMap[x.Expr] + " BETWWEN " + c.strMap[x.Left] + " AND " + c.strMap[x.Right]
default:
c.strMap[in] = fmt.Sprintf("%T", in)
}
return in, true
}

218
vendor/github.com/pingcap/tidb/bootstrap.go generated vendored Normal file
View file

@ -0,0 +1,218 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tidb
import (
"fmt"
"runtime/debug"
"strings"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
)
const (
// CreateUserTable is the SQL statement creates User table in system db.
CreateUserTable = `CREATE TABLE if not exists mysql.user (
Host CHAR(64),
User CHAR(16),
Password CHAR(41),
Select_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Insert_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Update_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Delete_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Create_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Drop_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Grant_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Alter_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Show_db_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Execute_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Index_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
Create_user_priv ENUM('N','Y') NOT NULL DEFAULT 'N',
PRIMARY KEY (Host, User));`
// CreateDBPrivTable is the SQL statement creates DB scope privilege table in system db.
CreateDBPrivTable = `CREATE TABLE if not exists mysql.db (
Host CHAR(60),
DB CHAR(64),
User CHAR(16),
Select_priv ENUM('N','Y') Not Null DEFAULT 'N',
Insert_priv ENUM('N','Y') Not Null DEFAULT 'N',
Update_priv ENUM('N','Y') Not Null DEFAULT 'N',
Delete_priv ENUM('N','Y') Not Null DEFAULT 'N',
Create_priv ENUM('N','Y') Not Null DEFAULT 'N',
Drop_priv ENUM('N','Y') Not Null DEFAULT 'N',
Grant_priv ENUM('N','Y') Not Null DEFAULT 'N',
Index_priv ENUM('N','Y') Not Null DEFAULT 'N',
Alter_priv ENUM('N','Y') Not Null DEFAULT 'N',
Execute_priv ENUM('N','Y') Not Null DEFAULT 'N',
PRIMARY KEY (Host, DB, User));`
// CreateTablePrivTable is the SQL statement creates table scope privilege table in system db.
CreateTablePrivTable = `CREATE TABLE if not exists mysql.tables_priv (
Host CHAR(60),
DB CHAR(64),
User CHAR(16),
Table_name CHAR(64),
Grantor CHAR(77),
Timestamp Timestamp DEFAULT CURRENT_TIMESTAMP,
Table_priv SET('Select','Insert','Update','Delete','Create','Drop','Grant', 'Index','Alter'),
Column_priv SET('Select','Insert','Update'),
PRIMARY KEY (Host, DB, User, Table_name));`
// CreateColumnPrivTable is the SQL statement creates column scope privilege table in system db.
CreateColumnPrivTable = `CREATE TABLE if not exists mysql.columns_priv(
Host CHAR(60),
DB CHAR(64),
User CHAR(16),
Table_name CHAR(64),
Column_name CHAR(64),
Timestamp Timestamp DEFAULT CURRENT_TIMESTAMP,
Column_priv SET('Select','Insert','Update'),
PRIMARY KEY (Host, DB, User, Table_name, Column_name));`
// CreateGloablVariablesTable is the SQL statement creates global variable table in system db.
// TODO: MySQL puts GLOBAL_VARIABLES table in INFORMATION_SCHEMA db.
// INFORMATION_SCHEMA is a virtual db in TiDB. So we put this table in system db.
// Maybe we will put it back to INFORMATION_SCHEMA.
CreateGloablVariablesTable = `CREATE TABLE if not exists mysql.GLOBAL_VARIABLES(
VARIABLE_NAME VARCHAR(64) Not Null PRIMARY KEY,
VARIABLE_VALUE VARCHAR(1024) DEFAULT Null);`
// CreateTiDBTable is the SQL statement creates a table in system db.
// This table is a key-value struct contains some information used by TiDB.
// Currently we only put bootstrapped in it which indicates if the system is already bootstrapped.
CreateTiDBTable = `CREATE TABLE if not exists mysql.tidb(
VARIABLE_NAME VARCHAR(64) Not Null PRIMARY KEY,
VARIABLE_VALUE VARCHAR(1024) DEFAULT Null,
COMMENT VARCHAR(1024));`
)
// Bootstrap initiates system DB for a store.
func bootstrap(s Session) {
b, err := checkBootstrapped(s)
if err != nil {
log.Fatal(err)
}
if b {
return
}
doDDLWorks(s)
doDMLWorks(s)
}
const (
bootstrappedVar = "bootstrapped"
bootstrappedVarTrue = "True"
)
func checkBootstrapped(s Session) (bool, error) {
// Check if system db exists.
_, err := s.Execute(fmt.Sprintf("USE %s;", mysql.SystemDB))
if err != nil && infoschema.DatabaseNotExists.NotEqual(err) {
log.Fatal(err)
}
// Check bootstrapped variable value in TiDB table.
v, err := checkBootstrappedVar(s)
if err != nil {
return false, errors.Trace(err)
}
return v, nil
}
func checkBootstrappedVar(s Session) (bool, error) {
sql := fmt.Sprintf(`SELECT VARIABLE_VALUE FROM %s.%s WHERE VARIABLE_NAME="%s"`,
mysql.SystemDB, mysql.TiDBTable, bootstrappedVar)
rs, err := s.Execute(sql)
if err != nil {
if infoschema.TableNotExists.Equal(err) {
return false, nil
}
return false, errors.Trace(err)
}
if len(rs) != 1 {
return false, errors.New("Wrong number of Recordset")
}
r := rs[0]
row, err := r.Next()
if err != nil || row == nil {
return false, errors.Trace(err)
}
isBootstrapped := row.Data[0].GetString() == bootstrappedVarTrue
if isBootstrapped {
// Make sure that doesn't affect the following operations.
if err = s.FinishTxn(false); err != nil {
return false, errors.Trace(err)
}
}
return isBootstrapped, nil
}
// Execute DDL statements in bootstrap stage.
func doDDLWorks(s Session) {
// Create a test database.
mustExecute(s, "CREATE DATABASE IF NOT EXISTS test")
// Create system db.
mustExecute(s, fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", mysql.SystemDB))
// Create user table.
mustExecute(s, CreateUserTable)
// Create privilege tables.
mustExecute(s, CreateDBPrivTable)
mustExecute(s, CreateTablePrivTable)
mustExecute(s, CreateColumnPrivTable)
// Create global systemt variable table.
mustExecute(s, CreateGloablVariablesTable)
// Create TiDB table.
mustExecute(s, CreateTiDBTable)
}
// Execute DML statements in bootstrap stage.
// All the statements run in a single transaction.
func doDMLWorks(s Session) {
mustExecute(s, "BEGIN")
// Insert a default user with empty password.
mustExecute(s, `INSERT INTO mysql.user VALUES
("%", "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y")`)
// Init global system variables table.
values := make([]string, 0, len(variable.SysVars))
for k, v := range variable.SysVars {
value := fmt.Sprintf(`("%s", "%s")`, strings.ToLower(k), v.Value)
values = append(values, value)
}
sql := fmt.Sprintf("INSERT INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable,
strings.Join(values, ", "))
mustExecute(s, sql)
sql = fmt.Sprintf(`INSERT INTO %s.%s VALUES("%s", "%s", "Bootstrap flag. Do not delete.")
ON DUPLICATE KEY UPDATE VARIABLE_VALUE="%s"`,
mysql.SystemDB, mysql.TiDBTable, bootstrappedVar, bootstrappedVarTrue, bootstrappedVarTrue)
mustExecute(s, sql)
mustExecute(s, "COMMIT")
}
func mustExecute(s Session, sql string) {
_, err := s.Execute(sql)
if err != nil {
debug.PrintStack()
log.Fatal(err)
}
}

232
vendor/github.com/pingcap/tidb/column/column.go generated vendored Normal file
View file

@ -0,0 +1,232 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package column
import (
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/util/types"
)
// Col provides meta data describing a table column.
type Col struct {
model.ColumnInfo
}
// PrimaryKeyName defines primary key name.
const PrimaryKeyName = "PRIMARY"
// IndexedCol defines an index with info.
type IndexedCol struct {
model.IndexInfo
X kv.Index
}
// String implements fmt.Stringer interface.
func (c *Col) String() string {
ans := []string{c.Name.O, types.TypeToStr(c.Tp, c.Charset)}
if mysql.HasAutoIncrementFlag(c.Flag) {
ans = append(ans, "AUTO_INCREMENT")
}
if mysql.HasNotNullFlag(c.Flag) {
ans = append(ans, "NOT NULL")
}
return strings.Join(ans, " ")
}
// FindCol finds column in cols by name.
func FindCol(cols []*Col, name string) *Col {
for _, col := range cols {
if strings.EqualFold(col.Name.O, name) {
return col
}
}
return nil
}
// FindCols finds columns in cols by names.
func FindCols(cols []*Col, names []string) ([]*Col, error) {
var rcols []*Col
for _, name := range names {
col := FindCol(cols, name)
if col != nil {
rcols = append(rcols, col)
} else {
return nil, errors.Errorf("unknown column %s", name)
}
}
return rcols, nil
}
// FindOnUpdateCols finds columns which have OnUpdateNow flag.
func FindOnUpdateCols(cols []*Col) []*Col {
var rcols []*Col
for _, col := range cols {
if mysql.HasOnUpdateNowFlag(col.Flag) {
rcols = append(rcols, col)
}
}
return rcols
}
// CastValues casts values based on columns type.
func CastValues(ctx context.Context, rec []types.Datum, cols []*Col) (err error) {
for _, c := range cols {
var converted types.Datum
converted, err = rec[c.Offset].ConvertTo(&c.FieldType)
if err != nil {
return errors.Trace(err)
}
rec[c.Offset] = converted
}
return nil
}
// ColDesc describes column information like MySQL desc and show columns do.
type ColDesc struct {
Field string
Type string
Collation string
Null string
Key string
DefaultValue interface{}
Extra string
Privileges string
Comment string
}
const defaultPrivileges string = "select,insert,update,references"
// GetTypeDesc gets the description for column type.
func (c *Col) GetTypeDesc() string {
desc := c.FieldType.CompactStr()
if mysql.HasUnsignedFlag(c.Flag) {
desc += " UNSIGNED"
}
return desc
}
// NewColDesc returns a new ColDesc for a column.
func NewColDesc(col *Col) *ColDesc {
// TODO: if we have no primary key and a unique index which's columns are all not null
// we will set these columns' flag as PriKeyFlag
// see https://dev.mysql.com/doc/refman/5.7/en/show-columns.html
// create table
name := col.Name
nullFlag := "YES"
if mysql.HasNotNullFlag(col.Flag) {
nullFlag = "NO"
}
keyFlag := ""
if mysql.HasPriKeyFlag(col.Flag) {
keyFlag = "PRI"
} else if mysql.HasUniKeyFlag(col.Flag) {
keyFlag = "UNI"
} else if mysql.HasMultipleKeyFlag(col.Flag) {
keyFlag = "MUL"
}
var defaultValue interface{}
if !mysql.HasNoDefaultValueFlag(col.Flag) {
defaultValue = col.DefaultValue
}
extra := ""
if mysql.HasAutoIncrementFlag(col.Flag) {
extra = "auto_increment"
} else if mysql.HasOnUpdateNowFlag(col.Flag) {
extra = "on update CURRENT_TIMESTAMP"
}
return &ColDesc{
Field: name.O,
Type: col.GetTypeDesc(),
Collation: col.Collate,
Null: nullFlag,
Key: keyFlag,
DefaultValue: defaultValue,
Extra: extra,
Privileges: defaultPrivileges,
Comment: "",
}
}
// ColDescFieldNames returns the fields name in result set for desc and show columns.
func ColDescFieldNames(full bool) []string {
if full {
return []string{"Field", "Type", "Collation", "Null", "Key", "Default", "Extra", "Privileges", "Comment"}
}
return []string{"Field", "Type", "Null", "Key", "Default", "Extra"}
}
// CheckOnce checks if there are duplicated column names in cols.
func CheckOnce(cols []*Col) error {
m := map[string]struct{}{}
for _, col := range cols {
name := col.Name
_, ok := m[name.L]
if ok {
return errors.Errorf("column specified twice - %s", name)
}
m[name.L] = struct{}{}
}
return nil
}
// CheckNotNull checks if nil value set to a column with NotNull flag is set.
func (c *Col) CheckNotNull(data types.Datum) error {
if mysql.HasNotNullFlag(c.Flag) && data.Kind() == types.KindNull {
return errors.Errorf("Column %s can't be null.", c.Name)
}
return nil
}
// IsPKHandleColumn checks if the column is primary key handle column.
func (c *Col) IsPKHandleColumn(tbInfo *model.TableInfo) bool {
return mysql.HasPriKeyFlag(c.Flag) && tbInfo.PKIsHandle
}
// CheckNotNull checks if row has nil value set to a column with NotNull flag set.
func CheckNotNull(cols []*Col, row []types.Datum) error {
for _, c := range cols {
if err := c.CheckNotNull(row[c.Offset]); err != nil {
return errors.Trace(err)
}
}
return nil
}
// FetchValues fetches indexed values from a row.
func (idx *IndexedCol) FetchValues(r []types.Datum) ([]types.Datum, error) {
vals := make([]types.Datum, len(idx.Columns))
for i, ic := range idx.Columns {
if ic.Offset < 0 || ic.Offset > len(r) {
return nil, errors.New("Index column offset out of bound")
}
vals[i] = r[ic.Offset]
}
return vals, nil
}

38
vendor/github.com/pingcap/tidb/context/context.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package context
import (
"fmt"
"github.com/pingcap/tidb/kv"
)
// Context is an interface for transaction and executive args environment.
type Context interface {
// GetTxn gets a transaction for futher execution.
GetTxn(forceNew bool) (kv.Transaction, error)
// FinishTxn commits or rolls back the current transaction.
FinishTxn(rollback bool) error
// SetValue saves a value associated with this context for key.
SetValue(key fmt.Stringer, value interface{})
// Value returns the value associated with this context for key.
Value(key fmt.Stringer) interface{}
// ClearValue clears the value associated with this context for key.
ClearValue(key fmt.Stringer)
}

178
vendor/github.com/pingcap/tidb/ddl/bg_worker.go generated vendored Normal file
View file

@ -0,0 +1,178 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/terror"
)
// handleBgJobQueue handles the background job queue.
func (d *ddl) handleBgJobQueue() error {
if d.isClosed() {
return nil
}
job := &model.Job{}
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
owner, err := d.checkOwner(t, bgJobFlag)
if terror.ErrorEqual(err, ErrNotOwner) {
return nil
}
if err != nil {
return errors.Trace(err)
}
// get the first background job and run
job, err = d.getFirstBgJob(t)
if err != nil {
return errors.Trace(err)
}
if job == nil {
return nil
}
d.runBgJob(t, job)
if job.IsFinished() {
err = d.finishBgJob(t, job)
} else {
err = d.updateBgJob(t, job)
}
if err != nil {
return errors.Trace(err)
}
owner.LastUpdateTS = time.Now().UnixNano()
err = t.SetBgJobOwner(owner)
return errors.Trace(err)
})
if err != nil {
return errors.Trace(err)
}
return nil
}
// runBgJob runs a background job.
func (d *ddl) runBgJob(t *meta.Meta, job *model.Job) {
job.State = model.JobRunning
var err error
switch job.Type {
case model.ActionDropSchema:
err = d.delReorgSchema(t, job)
case model.ActionDropTable:
err = d.delReorgTable(t, job)
default:
job.State = model.JobCancelled
err = errors.Errorf("invalid background job %v", job)
}
if err != nil {
if job.State != model.JobCancelled {
log.Errorf("run background job err %v", errors.ErrorStack(err))
}
job.Error = err.Error()
job.ErrorCount++
}
}
// prepareBgJob prepares a background job.
func (d *ddl) prepareBgJob(ddlJob *model.Job) error {
job := &model.Job{
ID: ddlJob.ID,
SchemaID: ddlJob.SchemaID,
TableID: ddlJob.TableID,
Type: ddlJob.Type,
Args: ddlJob.Args,
}
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
err1 := t.EnQueueBgJob(job)
return errors.Trace(err1)
})
return errors.Trace(err)
}
// startBgJob starts a background job.
func (d *ddl) startBgJob(tp model.ActionType) {
switch tp {
case model.ActionDropSchema, model.ActionDropTable:
asyncNotify(d.bgJobCh)
}
}
// getFirstBgJob gets the first background job.
func (d *ddl) getFirstBgJob(t *meta.Meta) (*model.Job, error) {
job, err := t.GetBgJob(0)
return job, errors.Trace(err)
}
// updateBgJob updates a background job.
func (d *ddl) updateBgJob(t *meta.Meta, job *model.Job) error {
err := t.UpdateBgJob(0, job)
return errors.Trace(err)
}
// finishBgJob finishs a background job.
func (d *ddl) finishBgJob(t *meta.Meta, job *model.Job) error {
log.Warnf("[ddl] finish background job %v", job)
if _, err := t.DeQueueBgJob(); err != nil {
return errors.Trace(err)
}
err := t.AddHistoryBgJob(job)
return errors.Trace(err)
}
func (d *ddl) onBackgroundWorker() {
defer d.wait.Done()
// we use 4 * lease time to check owner's timeout, so here, we will update owner's status
// every 2 * lease time, if lease is 0, we will use default 10s.
checkTime := chooseLeaseTime(2*d.lease, 10*time.Second)
ticker := time.NewTicker(checkTime)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("[ddl] wait %s to check background job status again", checkTime)
case <-d.bgJobCh:
case <-d.quitCh:
return
}
err := d.handleBgJobQueue()
if err != nil {
log.Errorf("[ddl] handle background job err %v", errors.ErrorStack(err))
}
}
}

45
vendor/github.com/pingcap/tidb/ddl/callback.go generated vendored Normal file
View file

@ -0,0 +1,45 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import "github.com/pingcap/tidb/model"
// Callback is the interface supporting callback function when DDL changed.
type Callback interface {
// OnChanged is called after schema is changed.
OnChanged(err error) error
// OnJobRunBefore is called before running job.
OnJobRunBefore(job *model.Job)
// OnJobUpdated is called after the running job is updated.
OnJobUpdated(job *model.Job)
}
// BaseCallback implements Callback.OnChanged interface.
type BaseCallback struct {
}
// OnChanged implements Callback interface.
func (c *BaseCallback) OnChanged(err error) error {
return err
}
// OnJobRunBefore implements Callback.OnJobRunBefore interface.
func (c *BaseCallback) OnJobRunBefore(job *model.Job) {
// Nothing to do.
}
// OnJobUpdated implements Callback.OnJobUpdated interface.
func (c *BaseCallback) OnJobUpdated(job *model.Job) {
// Nothing to do.
}

430
vendor/github.com/pingcap/tidb/ddl/column.go generated vendored Normal file
View file

@ -0,0 +1,430 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/terror"
)
func (d *ddl) adjustColumnOffset(columns []*model.ColumnInfo, indices []*model.IndexInfo, offset int, added bool) {
offsetChanged := make(map[int]int)
if added {
for i := offset + 1; i < len(columns); i++ {
offsetChanged[columns[i].Offset] = i
columns[i].Offset = i
}
columns[offset].Offset = offset
} else {
for i := offset + 1; i < len(columns); i++ {
offsetChanged[columns[i].Offset] = i - 1
columns[i].Offset = i - 1
}
columns[offset].Offset = len(columns) - 1
}
// TODO: index can't cover the add/remove column with offset now, we may check this later.
// Update index column offset info.
for _, idx := range indices {
for _, col := range idx.Columns {
newOffset, ok := offsetChanged[col.Offset]
if ok {
col.Offset = newOffset
}
}
}
}
func (d *ddl) addColumn(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *ast.ColumnPosition) (*model.ColumnInfo, int, error) {
// Check column name duplicate.
cols := tblInfo.Columns
position := len(cols)
// Get column position.
if pos.Tp == ast.ColumnPositionFirst {
position = 0
} else if pos.Tp == ast.ColumnPositionAfter {
c := findCol(cols, pos.RelativeColumn.Name.L)
if c == nil {
return nil, 0, errors.Errorf("No such column: %v", pos.RelativeColumn)
}
// Insert position is after the mentioned column.
position = c.Offset + 1
}
colInfo.State = model.StateNone
// To support add column asynchronous, we should mark its offset as the last column.
// So that we can use origin column offset to get value from row.
colInfo.Offset = len(cols)
// Insert col into the right place of the column list.
newCols := make([]*model.ColumnInfo, 0, len(cols)+1)
newCols = append(newCols, cols[:position]...)
newCols = append(newCols, colInfo)
newCols = append(newCols, cols[position:]...)
tblInfo.Columns = newCols
return colInfo, position, nil
}
func (d *ddl) onAddColumn(t *meta.Meta, job *model.Job) error {
schemaID := job.SchemaID
tblInfo, err := d.getTableInfo(t, job)
if err != nil {
return errors.Trace(err)
}
col := &model.ColumnInfo{}
pos := &ast.ColumnPosition{}
offset := 0
err = job.DecodeArgs(col, pos, &offset)
if err != nil {
job.State = model.JobCancelled
return errors.Trace(err)
}
columnInfo := findCol(tblInfo.Columns, col.Name.L)
if columnInfo != nil {
if columnInfo.State == model.StatePublic {
// we already have a column with same column name
job.State = model.JobCancelled
return errors.Errorf("ADD COLUMN: column already exist %s", col.Name.L)
}
} else {
columnInfo, offset, err = d.addColumn(tblInfo, col, pos)
if err != nil {
job.State = model.JobCancelled
return errors.Trace(err)
}
// Set offset arg to job.
if offset != 0 {
job.Args = []interface{}{columnInfo, pos, offset}
}
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch columnInfo.State {
case model.StateNone:
// none -> delete only
job.SchemaState = model.StateDeleteOnly
columnInfo.State = model.StateDeleteOnly
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateDeleteOnly:
// delete only -> write only
job.SchemaState = model.StateWriteOnly
columnInfo.State = model.StateWriteOnly
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateWriteOnly:
// write only -> reorganization
job.SchemaState = model.StateWriteReorganization
columnInfo.State = model.StateWriteReorganization
// initialize SnapshotVer to 0 for later reorganization check.
job.SnapshotVer = 0
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateWriteReorganization:
// reorganization -> public
// get the current version for reorganization if we don't have
reorgInfo, err := d.getReorgInfo(t, job)
if err != nil || reorgInfo.first {
// if we run reorg firstly, we should update the job snapshot version
// and then run the reorg next time.
return errors.Trace(err)
}
tbl, err := d.getTable(schemaID, tblInfo)
if err != nil {
return errors.Trace(err)
}
err = d.runReorgJob(func() error {
return d.backfillColumn(tbl, columnInfo, reorgInfo)
})
if terror.ErrorEqual(err, errWaitReorgTimeout) {
// if timeout, we should return, check for the owner and re-wait job done.
return nil
}
if err != nil {
return errors.Trace(err)
}
// Adjust column offset.
d.adjustColumnOffset(tblInfo.Columns, tblInfo.Indices, offset, true)
columnInfo.State = model.StatePublic
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
return errors.Trace(err)
}
// finish this job
job.SchemaState = model.StatePublic
job.State = model.JobDone
return nil
default:
return errors.Errorf("invalid column state %v", columnInfo.State)
}
}
func (d *ddl) onDropColumn(t *meta.Meta, job *model.Job) error {
schemaID := job.SchemaID
tblInfo, err := d.getTableInfo(t, job)
if err != nil {
return errors.Trace(err)
}
var colName model.CIStr
err = job.DecodeArgs(&colName)
if err != nil {
job.State = model.JobCancelled
return errors.Trace(err)
}
colInfo := findCol(tblInfo.Columns, colName.L)
if colInfo == nil {
job.State = model.JobCancelled
return errors.Errorf("column %s doesn't exist", colName)
}
if len(tblInfo.Columns) == 1 {
job.State = model.JobCancelled
return errors.Errorf("can't drop only column %s in table %s", colName, tblInfo.Name)
}
// we don't support drop column with index covered now.
// we must drop the index first, then drop the column.
for _, indexInfo := range tblInfo.Indices {
for _, col := range indexInfo.Columns {
if col.Name.L == colName.L {
job.State = model.JobCancelled
return errors.Errorf("can't drop column %s with index %s covered now", colName, indexInfo.Name)
}
}
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch colInfo.State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
colInfo.State = model.StateWriteOnly
// set this column's offset to the last and reset all following columns' offset
d.adjustColumnOffset(tblInfo.Columns, tblInfo.Indices, colInfo.Offset, false)
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
colInfo.State = model.StateDeleteOnly
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateDeleteOnly:
// delete only -> reorganization
job.SchemaState = model.StateDeleteReorganization
colInfo.State = model.StateDeleteReorganization
// initialize SnapshotVer to 0 for later reorganization check.
job.SnapshotVer = 0
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateDeleteReorganization:
// reorganization -> absent
reorgInfo, err := d.getReorgInfo(t, job)
if err != nil || reorgInfo.first {
// if we run reorg firstly, we should update the job snapshot version
// and then run the reorg next time.
return errors.Trace(err)
}
tbl, err := d.getTable(schemaID, tblInfo)
if err != nil {
return errors.Trace(err)
}
err = d.runReorgJob(func() error {
return d.dropTableColumn(tbl, colInfo, reorgInfo)
})
if terror.ErrorEqual(err, errWaitReorgTimeout) {
// if timeout, we should return, check for the owner and re-wait job done.
return nil
}
if err != nil {
return errors.Trace(err)
}
// all reorganization jobs done, drop this column
newColumns := make([]*model.ColumnInfo, 0, len(tblInfo.Columns))
for _, col := range tblInfo.Columns {
if col.Name.L != colName.L {
newColumns = append(newColumns, col)
}
}
tblInfo.Columns = newColumns
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
return errors.Trace(err)
}
// finish this job
job.SchemaState = model.StateNone
job.State = model.JobDone
return nil
default:
return errors.Errorf("invalid table state %v", tblInfo.State)
}
}
// How to backfill column data in reorganization state?
// 1. Generate a snapshot with special version.
// 2. Traverse the snapshot, get every row in the table.
// 3. For one row, if the row has been already deleted, skip to next row.
// 4. If not deleted, check whether column data has existed, if existed, skip to next row.
// 5. If column data doesn't exist, backfill the column with default value and then continue to handle next row.
func (d *ddl) backfillColumn(t table.Table, columnInfo *model.ColumnInfo, reorgInfo *reorgInfo) error {
seekHandle := reorgInfo.Handle
version := reorgInfo.SnapshotVer
for {
handles, err := d.getSnapshotRows(t, version, seekHandle)
if err != nil {
return errors.Trace(err)
} else if len(handles) == 0 {
return nil
}
seekHandle = handles[len(handles)-1] + 1
err = d.backfillColumnData(t, columnInfo, handles, reorgInfo)
if err != nil {
return errors.Trace(err)
}
}
}
func (d *ddl) backfillColumnData(t table.Table, columnInfo *model.ColumnInfo, handles []int64, reorgInfo *reorgInfo) error {
for _, handle := range handles {
log.Info("[ddl] backfill column...", handle)
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
if err := d.isReorgRunnable(txn); err != nil {
return errors.Trace(err)
}
// First check if row exists.
exist, err := checkRowExist(txn, t, handle)
if err != nil {
return errors.Trace(err)
} else if !exist {
// If row doesn't exist, skip it.
return nil
}
backfillKey := t.RecordKey(handle, &column.Col{ColumnInfo: *columnInfo})
backfillValue, err := txn.Get(backfillKey)
if err != nil && !kv.IsErrNotFound(err) {
return errors.Trace(err)
}
if backfillValue != nil {
return nil
}
value, _, err := table.GetColDefaultValue(nil, columnInfo)
if err != nil {
return errors.Trace(err)
}
// must convert to the column field type.
v, err := value.ConvertTo(&columnInfo.FieldType)
if err != nil {
return errors.Trace(err)
}
err = lockRow(txn, t, handle)
if err != nil {
return errors.Trace(err)
}
err = tables.SetColValue(txn, backfillKey, v)
if err != nil {
return errors.Trace(err)
}
return errors.Trace(reorgInfo.UpdateHandle(txn, handle))
})
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (d *ddl) dropTableColumn(t table.Table, colInfo *model.ColumnInfo, reorgInfo *reorgInfo) error {
version := reorgInfo.SnapshotVer
seekHandle := reorgInfo.Handle
col := &column.Col{ColumnInfo: *colInfo}
for {
handles, err := d.getSnapshotRows(t, version, seekHandle)
if err != nil {
return errors.Trace(err)
} else if len(handles) == 0 {
return nil
}
seekHandle = handles[len(handles)-1] + 1
err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
if err1 := d.isReorgRunnable(txn); err1 != nil {
return errors.Trace(err1)
}
var h int64
for _, h = range handles {
key := t.RecordKey(h, col)
err1 := txn.Delete(key)
if err1 != nil && !terror.ErrorEqual(err1, kv.ErrNotExist) {
return errors.Trace(err1)
}
}
return errors.Trace(reorgInfo.UpdateHandle(txn, h))
})
if err != nil {
return errors.Trace(err)
}
}
}

978
vendor/github.com/pingcap/tidb/ddl/ddl.go generated vendored Normal file
View file

@ -0,0 +1,978 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"fmt"
"strings"
"sync"
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/evaluator"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/util/charset"
"github.com/pingcap/tidb/util/types"
"github.com/twinj/uuid"
)
// DDL is responsible for updating schema in data store and maintaining in-memory InfoSchema cache.
type DDL interface {
CreateSchema(ctx context.Context, name model.CIStr, charsetInfo *ast.CharsetOpt) error
DropSchema(ctx context.Context, schema model.CIStr) error
CreateTable(ctx context.Context, ident ast.Ident, cols []*ast.ColumnDef,
constrs []*ast.Constraint, options []*ast.TableOption) error
DropTable(ctx context.Context, tableIdent ast.Ident) (err error)
CreateIndex(ctx context.Context, tableIdent ast.Ident, unique bool, indexName model.CIStr,
columnNames []*ast.IndexColName) error
DropIndex(ctx context.Context, tableIdent ast.Ident, indexName model.CIStr) error
GetInformationSchema() infoschema.InfoSchema
AlterTable(ctx context.Context, tableIdent ast.Ident, spec []*ast.AlterTableSpec) error
// SetLease will reset the lease time for online DDL change,
// it's a very dangerous function and you must guarantee that all servers have the same lease time.
SetLease(lease time.Duration)
// GetLease returns current schema lease time.
GetLease() time.Duration
// Stats returns the DDL statistics.
Stats() (map[string]interface{}, error)
// GetScope gets the status variables scope.
GetScope(status string) variable.ScopeFlag
// Stop stops DDL worker.
Stop() error
// Start starts DDL worker.
Start() error
}
type ddl struct {
m sync.RWMutex
infoHandle *infoschema.Handle
hook Callback
store kv.Storage
// schema lease seconds.
lease time.Duration
uuid string
ddlJobCh chan struct{}
ddlJobDoneCh chan struct{}
// drop database/table job runs in the background.
bgJobCh chan struct{}
// reorgDoneCh is for reorganization, if the reorganization job is done,
// we will use this channel to notify outer.
// TODO: now we use goroutine to simulate reorganization jobs, later we may
// use a persistent job list.
reorgDoneCh chan error
quitCh chan struct{}
wait sync.WaitGroup
}
// NewDDL creates a new DDL.
func NewDDL(store kv.Storage, infoHandle *infoschema.Handle, hook Callback, lease time.Duration) DDL {
return newDDL(store, infoHandle, hook, lease)
}
func newDDL(store kv.Storage, infoHandle *infoschema.Handle, hook Callback, lease time.Duration) *ddl {
if hook == nil {
hook = &BaseCallback{}
}
d := &ddl{
infoHandle: infoHandle,
hook: hook,
store: store,
lease: lease,
uuid: uuid.NewV4().String(),
ddlJobCh: make(chan struct{}, 1),
ddlJobDoneCh: make(chan struct{}, 1),
bgJobCh: make(chan struct{}, 1),
}
d.start()
variable.RegisterStatistics(d)
return d
}
func (d *ddl) Stop() error {
d.m.Lock()
defer d.m.Unlock()
d.close()
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
owner, err1 := t.GetDDLJobOwner()
if err1 != nil {
return errors.Trace(err1)
}
if owner == nil || owner.OwnerID != d.uuid {
return nil
}
// ddl job's owner is me, clean it so other servers can compete for it quickly.
return t.SetDDLJobOwner(&model.Owner{})
})
if err != nil {
return errors.Trace(err)
}
err = kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
owner, err1 := t.GetBgJobOwner()
if err1 != nil {
return errors.Trace(err1)
}
if owner == nil || owner.OwnerID != d.uuid {
return nil
}
// background job's owner is me, clean it so other servers can compete for it quickly.
return t.SetBgJobOwner(&model.Owner{})
})
return errors.Trace(err)
}
func (d *ddl) Start() error {
d.m.Lock()
defer d.m.Unlock()
if !d.isClosed() {
return nil
}
d.start()
return nil
}
func (d *ddl) start() {
d.quitCh = make(chan struct{})
d.wait.Add(2)
go d.onBackgroundWorker()
go d.onDDLWorker()
// for every start, we will send a fake job to let worker
// check owner first and try to find whether a job exists and run.
asyncNotify(d.ddlJobCh)
asyncNotify(d.bgJobCh)
}
func (d *ddl) close() {
if d.isClosed() {
return
}
close(d.quitCh)
d.wait.Wait()
}
func (d *ddl) isClosed() bool {
select {
case <-d.quitCh:
return true
default:
return false
}
}
func (d *ddl) SetLease(lease time.Duration) {
d.m.Lock()
defer d.m.Unlock()
if lease == d.lease {
return
}
log.Warnf("[ddl] change schema lease %s -> %s", d.lease, lease)
if d.isClosed() {
// if already closed, just set lease and return
d.lease = lease
return
}
// close the running worker and start again
d.close()
d.lease = lease
d.start()
}
func (d *ddl) GetLease() time.Duration {
d.m.RLock()
lease := d.lease
d.m.RUnlock()
return lease
}
func (d *ddl) GetInformationSchema() infoschema.InfoSchema {
return d.infoHandle.Get()
}
func (d *ddl) genGlobalID() (int64, error) {
var globalID int64
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
var err error
globalID, err = meta.NewMeta(txn).GenGlobalID()
return errors.Trace(err)
})
return globalID, errors.Trace(err)
}
func (d *ddl) CreateSchema(ctx context.Context, schema model.CIStr, charsetInfo *ast.CharsetOpt) (err error) {
is := d.GetInformationSchema()
_, ok := is.SchemaByName(schema)
if ok {
return errors.Trace(infoschema.DatabaseExists)
}
schemaID, err := d.genGlobalID()
if err != nil {
return errors.Trace(err)
}
dbInfo := &model.DBInfo{
Name: schema,
}
if charsetInfo != nil {
dbInfo.Charset = charsetInfo.Chs
dbInfo.Collate = charsetInfo.Col
} else {
dbInfo.Charset, dbInfo.Collate = getDefaultCharsetAndCollate()
}
job := &model.Job{
SchemaID: schemaID,
Type: model.ActionCreateSchema,
Args: []interface{}{dbInfo},
}
err = d.startDDLJob(ctx, job)
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
func (d *ddl) DropSchema(ctx context.Context, schema model.CIStr) (err error) {
is := d.GetInformationSchema()
old, ok := is.SchemaByName(schema)
if !ok {
return errors.Trace(infoschema.DatabaseNotExists)
}
job := &model.Job{
SchemaID: old.ID,
Type: model.ActionDropSchema,
}
err = d.startDDLJob(ctx, job)
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
func getDefaultCharsetAndCollate() (string, string) {
// TODO: TableDefaultCharset-->DatabaseDefaultCharset-->SystemDefaultCharset.
// TODO: change TableOption parser to parse collate.
// This is a tmp solution.
return "utf8", "utf8_unicode_ci"
}
func setColumnFlagWithConstraint(colMap map[string]*column.Col, v *ast.Constraint) {
switch v.Tp {
case ast.ConstraintPrimaryKey:
for _, key := range v.Keys {
c, ok := colMap[key.Column.Name.L]
if !ok {
// TODO: table constraint on unknown column.
continue
}
c.Flag |= mysql.PriKeyFlag
// Primary key can not be NULL.
c.Flag |= mysql.NotNullFlag
}
case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey:
for i, key := range v.Keys {
c, ok := colMap[key.Column.Name.L]
if !ok {
// TODO: table constraint on unknown column.
continue
}
if i == 0 {
// Only the first column can be set
// if unique index has multi columns,
// the flag should be MultipleKeyFlag.
// See: https://dev.mysql.com/doc/refman/5.7/en/show-columns.html
if len(v.Keys) > 1 {
c.Flag |= mysql.MultipleKeyFlag
} else {
c.Flag |= mysql.UniqueKeyFlag
}
}
}
case ast.ConstraintKey, ast.ConstraintIndex:
for i, key := range v.Keys {
c, ok := colMap[key.Column.Name.L]
if !ok {
// TODO: table constraint on unknown column.
continue
}
if i == 0 {
// Only the first column can be set.
c.Flag |= mysql.MultipleKeyFlag
}
}
}
}
func (d *ddl) buildColumnsAndConstraints(ctx context.Context, colDefs []*ast.ColumnDef,
constraints []*ast.Constraint) ([]*column.Col, []*ast.Constraint, error) {
var cols []*column.Col
colMap := map[string]*column.Col{}
for i, colDef := range colDefs {
col, cts, err := d.buildColumnAndConstraint(ctx, i, colDef)
if err != nil {
return nil, nil, errors.Trace(err)
}
col.State = model.StatePublic
constraints = append(constraints, cts...)
cols = append(cols, col)
colMap[colDef.Name.Name.L] = col
}
// traverse table Constraints and set col.flag
for _, v := range constraints {
setColumnFlagWithConstraint(colMap, v)
}
return cols, constraints, nil
}
func (d *ddl) buildColumnAndConstraint(ctx context.Context, offset int,
colDef *ast.ColumnDef) (*column.Col, []*ast.Constraint, error) {
// Set charset.
if len(colDef.Tp.Charset) == 0 {
switch colDef.Tp.Tp {
case mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
colDef.Tp.Charset, colDef.Tp.Collate = getDefaultCharsetAndCollate()
default:
colDef.Tp.Charset = charset.CharsetBin
colDef.Tp.Collate = charset.CharsetBin
}
}
col, cts, err := columnDefToCol(ctx, offset, colDef)
if err != nil {
return nil, nil, errors.Trace(err)
}
col.ID, err = d.genGlobalID()
if err != nil {
return nil, nil, errors.Trace(err)
}
return col, cts, nil
}
// columnDefToCol converts ColumnDef to Col and TableConstraints.
func columnDefToCol(ctx context.Context, offset int, colDef *ast.ColumnDef) (*column.Col, []*ast.Constraint, error) {
constraints := []*ast.Constraint{}
col := &column.Col{
ColumnInfo: model.ColumnInfo{
Offset: offset,
Name: colDef.Name.Name,
FieldType: *colDef.Tp,
},
}
// Check and set TimestampFlag and OnUpdateNowFlag.
if col.Tp == mysql.TypeTimestamp {
col.Flag |= mysql.TimestampFlag
col.Flag |= mysql.OnUpdateNowFlag
col.Flag |= mysql.NotNullFlag
}
// If flen is not assigned, assigned it by type.
if col.Flen == types.UnspecifiedLength {
col.Flen = mysql.GetDefaultFieldLength(col.Tp)
}
if col.Decimal == types.UnspecifiedLength {
col.Decimal = mysql.GetDefaultDecimal(col.Tp)
}
setOnUpdateNow := false
hasDefaultValue := false
if colDef.Options != nil {
keys := []*ast.IndexColName{
{
Column: colDef.Name,
Length: colDef.Tp.Flen,
},
}
for _, v := range colDef.Options {
switch v.Tp {
case ast.ColumnOptionNotNull:
col.Flag |= mysql.NotNullFlag
case ast.ColumnOptionNull:
col.Flag &= ^uint(mysql.NotNullFlag)
removeOnUpdateNowFlag(col)
case ast.ColumnOptionAutoIncrement:
col.Flag |= mysql.AutoIncrementFlag
case ast.ColumnOptionPrimaryKey:
constraint := &ast.Constraint{Tp: ast.ConstraintPrimaryKey, Keys: keys}
constraints = append(constraints, constraint)
col.Flag |= mysql.PriKeyFlag
case ast.ColumnOptionUniq:
constraint := &ast.Constraint{Tp: ast.ConstraintUniq, Name: colDef.Name.Name.O, Keys: keys}
constraints = append(constraints, constraint)
col.Flag |= mysql.UniqueKeyFlag
case ast.ColumnOptionIndex:
constraint := &ast.Constraint{Tp: ast.ConstraintIndex, Name: colDef.Name.Name.O, Keys: keys}
constraints = append(constraints, constraint)
case ast.ColumnOptionUniqIndex:
constraint := &ast.Constraint{Tp: ast.ConstraintUniqIndex, Name: colDef.Name.Name.O, Keys: keys}
constraints = append(constraints, constraint)
col.Flag |= mysql.UniqueKeyFlag
case ast.ColumnOptionKey:
constraint := &ast.Constraint{Tp: ast.ConstraintKey, Name: colDef.Name.Name.O, Keys: keys}
constraints = append(constraints, constraint)
case ast.ColumnOptionUniqKey:
constraint := &ast.Constraint{Tp: ast.ConstraintUniqKey, Name: colDef.Name.Name.O, Keys: keys}
constraints = append(constraints, constraint)
col.Flag |= mysql.UniqueKeyFlag
case ast.ColumnOptionDefaultValue:
value, err := getDefaultValue(ctx, v, colDef.Tp.Tp, colDef.Tp.Decimal)
if err != nil {
return nil, nil, errors.Errorf("invalid default value - %s", errors.Trace(err))
}
col.DefaultValue = value
hasDefaultValue = true
removeOnUpdateNowFlag(col)
case ast.ColumnOptionOnUpdate:
if !evaluator.IsCurrentTimeExpr(v.Expr) {
return nil, nil, errors.Errorf("invalid ON UPDATE for - %s", col.Name)
}
col.Flag |= mysql.OnUpdateNowFlag
setOnUpdateNow = true
case ast.ColumnOptionFulltext, ast.ColumnOptionComment:
// Do nothing.
}
}
}
setTimestampDefaultValue(col, hasDefaultValue, setOnUpdateNow)
// Set `NoDefaultValueFlag` if this field doesn't have a default value and
// it is `not null` and not an `AUTO_INCREMENT` field or `TIMESTAMP` field.
setNoDefaultValueFlag(col, hasDefaultValue)
err := checkDefaultValue(col, hasDefaultValue)
if err != nil {
return nil, nil, errors.Trace(err)
}
if col.Charset == charset.CharsetBin {
col.Flag |= mysql.BinaryFlag
}
return col, constraints, nil
}
func getDefaultValue(ctx context.Context, c *ast.ColumnOption, tp byte, fsp int) (interface{}, error) {
if tp == mysql.TypeTimestamp || tp == mysql.TypeDatetime {
value, err := evaluator.GetTimeValue(ctx, c.Expr, tp, fsp)
if err != nil {
return nil, errors.Trace(err)
}
// Value is nil means `default null`.
if value == nil {
return nil, nil
}
// If value is mysql.Time, convert it to string.
if vv, ok := value.(mysql.Time); ok {
return vv.String(), nil
}
return value, nil
}
v, err := evaluator.Eval(ctx, c.Expr)
if err != nil {
return nil, errors.Trace(err)
}
return v, nil
}
func removeOnUpdateNowFlag(c *column.Col) {
// For timestamp Col, if it is set null or default value,
// OnUpdateNowFlag should be removed.
if mysql.HasTimestampFlag(c.Flag) {
c.Flag &= ^uint(mysql.OnUpdateNowFlag)
}
}
func setTimestampDefaultValue(c *column.Col, hasDefaultValue bool, setOnUpdateNow bool) {
if hasDefaultValue {
return
}
// For timestamp Col, if is not set default value or not set null, use current timestamp.
if mysql.HasTimestampFlag(c.Flag) && mysql.HasNotNullFlag(c.Flag) {
if setOnUpdateNow {
c.DefaultValue = evaluator.ZeroTimestamp
} else {
c.DefaultValue = evaluator.CurrentTimestamp
}
}
}
func setNoDefaultValueFlag(c *column.Col, hasDefaultValue bool) {
if hasDefaultValue {
return
}
if !mysql.HasNotNullFlag(c.Flag) {
return
}
// Check if it is an `AUTO_INCREMENT` field or `TIMESTAMP` field.
if !mysql.HasAutoIncrementFlag(c.Flag) && !mysql.HasTimestampFlag(c.Flag) {
c.Flag |= mysql.NoDefaultValueFlag
}
}
func checkDefaultValue(c *column.Col, hasDefaultValue bool) error {
if !hasDefaultValue {
return nil
}
if c.DefaultValue != nil {
return nil
}
// Set not null but default null is invalid.
if mysql.HasNotNullFlag(c.Flag) {
return errors.Errorf("invalid default value for %s", c.Name)
}
return nil
}
func checkDuplicateColumn(colDefs []*ast.ColumnDef) error {
colNames := map[string]bool{}
for _, colDef := range colDefs {
nameLower := colDef.Name.Name.O
if colNames[nameLower] {
return errors.Errorf("CREATE TABLE: duplicate column %s", colDef.Name)
}
colNames[nameLower] = true
}
return nil
}
func checkConstraintNames(constraints []*ast.Constraint) error {
constrNames := map[string]bool{}
// Check not empty constraint name whether is duplicated.
for _, constr := range constraints {
if constr.Tp == ast.ConstraintForeignKey {
// Ignore foreign key.
continue
}
if constr.Name != "" {
nameLower := strings.ToLower(constr.Name)
if constrNames[nameLower] {
return errors.Errorf("CREATE TABLE: duplicate key %s", constr.Name)
}
constrNames[nameLower] = true
}
}
// Set empty constraint names.
for _, constr := range constraints {
if constr.Name == "" && len(constr.Keys) > 0 {
colName := constr.Keys[0].Column.Name.O
constrName := colName
i := 2
for constrNames[strings.ToLower(constrName)] {
// We loop forever until we find constrName that haven't been used.
constrName = fmt.Sprintf("%s_%d", colName, i)
i++
}
constr.Name = constrName
constrNames[constrName] = true
}
}
return nil
}
func (d *ddl) buildTableInfo(tableName model.CIStr, cols []*column.Col, constraints []*ast.Constraint) (tbInfo *model.TableInfo, err error) {
tbInfo = &model.TableInfo{
Name: tableName,
}
tbInfo.ID, err = d.genGlobalID()
if err != nil {
return nil, errors.Trace(err)
}
for _, v := range cols {
tbInfo.Columns = append(tbInfo.Columns, &v.ColumnInfo)
}
for _, constr := range constraints {
if constr.Tp == ast.ConstraintPrimaryKey {
if len(constr.Keys) == 1 {
key := constr.Keys[0]
col := column.FindCol(cols, key.Column.Name.O)
if col == nil {
return nil, errors.Errorf("No such column: %v", key)
}
switch col.Tp {
case mysql.TypeLong, mysql.TypeLonglong:
tbInfo.PKIsHandle = true
// Avoid creating index for PK handle column.
continue
}
}
}
// 1. check if the column is exists
// 2. add index
indexColumns := make([]*model.IndexColumn, 0, len(constr.Keys))
for _, key := range constr.Keys {
col := column.FindCol(cols, key.Column.Name.O)
if col == nil {
return nil, errors.Errorf("No such column: %v", key)
}
indexColumns = append(indexColumns, &model.IndexColumn{
Name: key.Column.Name,
Offset: col.Offset,
Length: key.Length,
})
}
idxInfo := &model.IndexInfo{
Name: model.NewCIStr(constr.Name),
Columns: indexColumns,
State: model.StatePublic,
}
switch constr.Tp {
case ast.ConstraintPrimaryKey:
idxInfo.Unique = true
idxInfo.Primary = true
idxInfo.Name = model.NewCIStr(column.PrimaryKeyName)
case ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex:
idxInfo.Unique = true
}
if constr.Option != nil {
idxInfo.Comment = constr.Option.Comment
idxInfo.Tp = constr.Option.Tp
} else {
// Use btree as default index type.
idxInfo.Tp = model.IndexTypeBtree
}
idxInfo.ID, err = d.genGlobalID()
if err != nil {
return nil, errors.Trace(err)
}
tbInfo.Indices = append(tbInfo.Indices, idxInfo)
}
return
}
func (d *ddl) CreateTable(ctx context.Context, ident ast.Ident, colDefs []*ast.ColumnDef,
constraints []*ast.Constraint, options []*ast.TableOption) (err error) {
is := d.GetInformationSchema()
schema, ok := is.SchemaByName(ident.Schema)
if !ok {
return infoschema.DatabaseNotExists.Gen("database %s not exists", ident.Schema)
}
if is.TableExists(ident.Schema, ident.Name) {
return errors.Trace(infoschema.TableExists)
}
if err = checkDuplicateColumn(colDefs); err != nil {
return errors.Trace(err)
}
cols, newConstraints, err := d.buildColumnsAndConstraints(ctx, colDefs, constraints)
if err != nil {
return errors.Trace(err)
}
err = checkConstraintNames(newConstraints)
if err != nil {
return errors.Trace(err)
}
tbInfo, err := d.buildTableInfo(ident.Name, cols, newConstraints)
if err != nil {
return errors.Trace(err)
}
job := &model.Job{
SchemaID: schema.ID,
TableID: tbInfo.ID,
Type: model.ActionCreateTable,
Args: []interface{}{tbInfo},
}
err = d.startDDLJob(ctx, job)
if err == nil {
err = d.handleTableOptions(options, tbInfo, schema.ID)
}
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
func (d *ddl) handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo, schemaID int64) error {
for _, op := range options {
if op.Tp == ast.TableOptionAutoIncrement {
alloc := autoid.NewAllocator(d.store, schemaID)
tbInfo.State = model.StatePublic
tb, err := table.TableFromMeta(alloc, tbInfo)
if err != nil {
return errors.Trace(err)
}
// The operation of the minus 1 to make sure that the current value doesn't be used,
// the next Alloc operation will get this value.
// Its behavior is consistent with MySQL.
if err = tb.RebaseAutoID(int64(op.UintValue-1), false); err != nil {
return errors.Trace(err)
}
}
}
return nil
}
func (d *ddl) AlterTable(ctx context.Context, ident ast.Ident, specs []*ast.AlterTableSpec) (err error) {
// now we only allow one schema changes at the same time.
if len(specs) != 1 {
return errors.New("can't run multi schema changes in one DDL")
}
for _, spec := range specs {
switch spec.Tp {
case ast.AlterTableAddColumn:
err = d.AddColumn(ctx, ident, spec)
case ast.AlterTableDropColumn:
err = d.DropColumn(ctx, ident, spec.DropColumn.Name)
case ast.AlterTableDropIndex:
err = d.DropIndex(ctx, ident, model.NewCIStr(spec.Name))
case ast.AlterTableAddConstraint:
constr := spec.Constraint
switch spec.Constraint.Tp {
case ast.ConstraintKey, ast.ConstraintIndex:
err = d.CreateIndex(ctx, ident, false, model.NewCIStr(constr.Name), spec.Constraint.Keys)
case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey:
err = d.CreateIndex(ctx, ident, true, model.NewCIStr(constr.Name), spec.Constraint.Keys)
default:
// nothing to do now.
}
default:
// nothing to do now.
}
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func checkColumnConstraint(constraints []*ast.ColumnOption) error {
for _, constraint := range constraints {
switch constraint.Tp {
case ast.ColumnOptionAutoIncrement, ast.ColumnOptionPrimaryKey, ast.ColumnOptionUniq, ast.ColumnOptionUniqKey:
return errors.Errorf("unsupported add column constraint - %v", constraint.Tp)
}
}
return nil
}
// AddColumn will add a new column to the table.
func (d *ddl) AddColumn(ctx context.Context, ti ast.Ident, spec *ast.AlterTableSpec) error {
// Check whether the added column constraints are supported.
err := checkColumnConstraint(spec.Column.Options)
if err != nil {
return errors.Trace(err)
}
is := d.infoHandle.Get()
schema, ok := is.SchemaByName(ti.Schema)
if !ok {
return errors.Trace(infoschema.DatabaseNotExists)
}
t, err := is.TableByName(ti.Schema, ti.Name)
if err != nil {
return errors.Trace(infoschema.TableNotExists)
}
// Check whether added column has existed.
colName := spec.Column.Name.Name.O
col := column.FindCol(t.Cols(), colName)
if col != nil {
return errors.Errorf("column %s already exists", colName)
}
// ingore table constraints now, maybe return error later
// we use length(t.Cols()) as the default offset first, later we will change the
// column's offset later.
col, _, err = d.buildColumnAndConstraint(ctx, len(t.Cols()), spec.Column)
if err != nil {
return errors.Trace(err)
}
job := &model.Job{
SchemaID: schema.ID,
TableID: t.Meta().ID,
Type: model.ActionAddColumn,
Args: []interface{}{&col.ColumnInfo, spec.Position, 0},
}
err = d.startDDLJob(ctx, job)
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
// DropColumn will drop a column from the table, now we don't support drop the column with index covered.
func (d *ddl) DropColumn(ctx context.Context, ti ast.Ident, colName model.CIStr) error {
is := d.infoHandle.Get()
schema, ok := is.SchemaByName(ti.Schema)
if !ok {
return errors.Trace(infoschema.DatabaseNotExists)
}
t, err := is.TableByName(ti.Schema, ti.Name)
if err != nil {
return errors.Trace(infoschema.TableNotExists)
}
// Check whether dropped column has existed.
col := column.FindCol(t.Cols(), colName.L)
if col == nil {
return errors.Errorf("column %s doesnt exist", colName.L)
}
job := &model.Job{
SchemaID: schema.ID,
TableID: t.Meta().ID,
Type: model.ActionDropColumn,
Args: []interface{}{colName},
}
err = d.startDDLJob(ctx, job)
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
// DropTable will proceed even if some table in the list does not exists.
func (d *ddl) DropTable(ctx context.Context, ti ast.Ident) (err error) {
is := d.GetInformationSchema()
schema, ok := is.SchemaByName(ti.Schema)
if !ok {
return infoschema.DatabaseNotExists.Gen("database %s not exists", ti.Schema)
}
tb, err := is.TableByName(ti.Schema, ti.Name)
if err != nil {
return errors.Trace(infoschema.TableNotExists)
}
job := &model.Job{
SchemaID: schema.ID,
TableID: tb.Meta().ID,
Type: model.ActionDropTable,
}
err = d.startDDLJob(ctx, job)
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
func (d *ddl) CreateIndex(ctx context.Context, ti ast.Ident, unique bool, indexName model.CIStr, idxColNames []*ast.IndexColName) error {
is := d.infoHandle.Get()
schema, ok := is.SchemaByName(ti.Schema)
if !ok {
return infoschema.DatabaseNotExists.Gen("database %s not exists", ti.Schema)
}
t, err := is.TableByName(ti.Schema, ti.Name)
if err != nil {
return errors.Trace(infoschema.TableNotExists)
}
indexID, err := d.genGlobalID()
if err != nil {
return errors.Trace(err)
}
job := &model.Job{
SchemaID: schema.ID,
TableID: t.Meta().ID,
Type: model.ActionAddIndex,
Args: []interface{}{unique, indexName, indexID, idxColNames},
}
err = d.startDDLJob(ctx, job)
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
func (d *ddl) DropIndex(ctx context.Context, ti ast.Ident, indexName model.CIStr) error {
is := d.infoHandle.Get()
schema, ok := is.SchemaByName(ti.Schema)
if !ok {
return errors.Trace(infoschema.DatabaseNotExists)
}
t, err := is.TableByName(ti.Schema, ti.Name)
if err != nil {
return errors.Trace(infoschema.TableNotExists)
}
job := &model.Job{
SchemaID: schema.ID,
TableID: t.Meta().ID,
Type: model.ActionDropIndex,
Args: []interface{}{indexName},
}
err = d.startDDLJob(ctx, job)
err = d.hook.OnChanged(err)
return errors.Trace(err)
}
// findCol finds column in cols by name.
func findCol(cols []*model.ColumnInfo, name string) *model.ColumnInfo {
name = strings.ToLower(name)
for _, col := range cols {
if col.Name.L == name {
return col
}
}
return nil
}

392
vendor/github.com/pingcap/tidb/ddl/ddl_worker.go generated vendored Normal file
View file

@ -0,0 +1,392 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/terror"
)
func (d *ddl) startDDLJob(ctx context.Context, job *model.Job) error {
// for every DDL, we must commit current transaction.
if err := ctx.FinishTxn(false); err != nil {
return errors.Trace(err)
}
// Create a new job and queue it.
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
var err error
job.ID, err = t.GenGlobalID()
if err != nil {
return errors.Trace(err)
}
err = t.EnQueueDDLJob(job)
return errors.Trace(err)
})
if err != nil {
return errors.Trace(err)
}
// notice worker that we push a new job and wait the job done.
asyncNotify(d.ddlJobCh)
log.Warnf("[ddl] start DDL job %v", job)
jobID := job.ID
var historyJob *model.Job
// for a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public
// for every state changes, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease.
ticker := time.NewTicker(chooseLeaseTime(10*d.lease, 10*time.Second))
defer ticker.Stop()
for {
select {
case <-d.ddlJobDoneCh:
case <-ticker.C:
}
historyJob, err = d.getHistoryDDLJob(jobID)
if err != nil {
log.Errorf("[ddl] get history DDL job err %v, check again", err)
continue
} else if historyJob == nil {
log.Warnf("[ddl] DDL job %d is not in history, maybe not run", jobID)
continue
}
// if a job is a history table, the state must be JobDone or JobCancel.
if historyJob.State == model.JobDone {
return nil
}
return errors.Errorf(historyJob.Error)
}
}
func (d *ddl) getHistoryDDLJob(id int64) (*model.Job, error) {
var job *model.Job
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
var err1 error
job, err1 = t.GetHistoryDDLJob(id)
return errors.Trace(err1)
})
return job, errors.Trace(err)
}
func asyncNotify(ch chan struct{}) {
select {
case ch <- struct{}{}:
default:
}
}
func (d *ddl) checkOwner(t *meta.Meta, flag JobType) (*model.Owner, error) {
var owner *model.Owner
var err error
switch flag {
case ddlJobFlag:
owner, err = t.GetDDLJobOwner()
case bgJobFlag:
owner, err = t.GetBgJobOwner()
default:
err = errInvalidJobFlag
}
if err != nil {
return nil, errors.Trace(err)
}
if owner == nil {
owner = &model.Owner{}
// try to set onwer
owner.OwnerID = d.uuid
}
now := time.Now().UnixNano()
// we must wait 2 * lease time to guarantee other servers update the schema,
// the owner will update its owner status every 2 * lease time, so here we use
// 4 * lease to check its timeout.
maxTimeout := int64(4 * d.lease)
if owner.OwnerID == d.uuid || now-owner.LastUpdateTS > maxTimeout {
owner.OwnerID = d.uuid
owner.LastUpdateTS = now
// update status.
switch flag {
case ddlJobFlag:
err = t.SetDDLJobOwner(owner)
case bgJobFlag:
err = t.SetBgJobOwner(owner)
}
if err != nil {
return nil, errors.Trace(err)
}
log.Debugf("[ddl] become %s job owner %s", flag, owner.OwnerID)
}
if owner.OwnerID != d.uuid {
log.Debugf("[ddl] not %s job owner, owner is %s", flag, owner.OwnerID)
return nil, errors.Trace(ErrNotOwner)
}
return owner, nil
}
func (d *ddl) getFirstDDLJob(t *meta.Meta) (*model.Job, error) {
job, err := t.GetDDLJob(0)
return job, errors.Trace(err)
}
// every time we enter another state except final state, we must call this function.
func (d *ddl) updateDDLJob(t *meta.Meta, job *model.Job) error {
err := t.UpdateDDLJob(0, job)
return errors.Trace(err)
}
func (d *ddl) finishDDLJob(t *meta.Meta, job *model.Job) error {
log.Warnf("[ddl] finish DDL job %v", job)
// done, notice and run next job.
_, err := t.DeQueueDDLJob()
if err != nil {
return errors.Trace(err)
}
switch job.Type {
case model.ActionDropSchema, model.ActionDropTable:
if err = d.prepareBgJob(job); err != nil {
return errors.Trace(err)
}
}
err = t.AddHistoryDDLJob(job)
return errors.Trace(err)
}
// ErrNotOwner means we are not owner and can't handle DDL jobs.
var ErrNotOwner = errors.New("DDL: not owner")
// ErrWorkerClosed means we have already closed the DDL worker.
var ErrWorkerClosed = errors.New("DDL: worker is closed")
var errInvalidJobFlag = errors.New("DDL: invalid job flag")
// JobType is job type, including ddl/background.
type JobType int
const (
ddlJobFlag = iota + 1
bgJobFlag
)
func (j JobType) String() string {
switch j {
case ddlJobFlag:
return "ddl"
case bgJobFlag:
return "background"
}
return "unknown"
}
func (d *ddl) handleDDLJobQueue() error {
for {
if d.isClosed() {
return nil
}
waitTime := 2 * d.lease
var job *model.Job
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
t := meta.NewMeta(txn)
owner, err := d.checkOwner(t, ddlJobFlag)
if terror.ErrorEqual(err, ErrNotOwner) {
// we are not owner, return and retry checking later.
return nil
} else if err != nil {
return errors.Trace(err)
}
// become the owner
// get the first job and run
job, err = d.getFirstDDLJob(t)
if job == nil || err != nil {
return errors.Trace(err)
}
if job.IsRunning() {
// if we enter a new state, crash when waiting 2 * lease time, and restart quickly,
// we may run the job immediately again, but we don't wait enough 2 * lease time to
// let other servers update the schema.
// so here we must check the elapsed time from last update, if < 2 * lease, we must
// wait again.
elapsed := time.Duration(time.Now().UnixNano() - job.LastUpdateTS)
if elapsed > 0 && elapsed < waitTime {
log.Warnf("[ddl] the elapsed time from last update is %s < %s, wait again", elapsed, waitTime)
waitTime -= elapsed
return nil
}
}
log.Warnf("[ddl] run DDL job %v", job)
d.hook.OnJobRunBefore(job)
// if run job meets error, we will save this error in job Error
// and retry later if the job is not cancelled.
d.runDDLJob(t, job)
if job.IsFinished() {
err = d.finishDDLJob(t, job)
} else {
err = d.updateDDLJob(t, job)
}
if err != nil {
return errors.Trace(err)
}
// running job may cost some time, so here we must update owner status to
// prevent other become the owner.
owner.LastUpdateTS = time.Now().UnixNano()
err = t.SetDDLJobOwner(owner)
return errors.Trace(err)
})
if err != nil {
return errors.Trace(err)
} else if job == nil {
// no job now, return and retry get later.
return nil
}
d.hook.OnJobUpdated(job)
// here means the job enters another state (delete only, write only, public, etc...) or is cancelled.
// if the job is done or still running, we will wait 2 * lease time to guarantee other servers to update
// the newest schema.
if job.State == model.JobRunning || job.State == model.JobDone {
d.waitSchemaChanged(waitTime)
}
if job.IsFinished() {
d.startBgJob(job.Type)
asyncNotify(d.ddlJobDoneCh)
}
}
}
func chooseLeaseTime(n1 time.Duration, n2 time.Duration) time.Duration {
if n1 > 0 {
return n1
}
return n2
}
// onDDLWorker is for async online schema change, it will try to become the owner first,
// then wait or pull the job queue to handle a schema change job.
func (d *ddl) onDDLWorker() {
defer d.wait.Done()
// we use 4 * lease time to check owner's timeout, so here, we will update owner's status
// every 2 * lease time, if lease is 0, we will use default 10s.
checkTime := chooseLeaseTime(2*d.lease, 10*time.Second)
ticker := time.NewTicker(checkTime)
defer ticker.Stop()
for {
select {
case <-ticker.C:
log.Debugf("[ddl] wait %s to check DDL status again", checkTime)
case <-d.ddlJobCh:
case <-d.quitCh:
return
}
err := d.handleDDLJobQueue()
if err != nil {
log.Errorf("[ddl] handle ddl job err %v", errors.ErrorStack(err))
}
}
}
func (d *ddl) runDDLJob(t *meta.Meta, job *model.Job) {
if job.IsFinished() {
return
}
job.State = model.JobRunning
var err error
switch job.Type {
case model.ActionCreateSchema:
err = d.onCreateSchema(t, job)
case model.ActionDropSchema:
err = d.onDropSchema(t, job)
case model.ActionCreateTable:
err = d.onCreateTable(t, job)
case model.ActionDropTable:
err = d.onDropTable(t, job)
case model.ActionAddColumn:
err = d.onAddColumn(t, job)
case model.ActionDropColumn:
err = d.onDropColumn(t, job)
case model.ActionAddIndex:
err = d.onCreateIndex(t, job)
case model.ActionDropIndex:
err = d.onDropIndex(t, job)
default:
// invalid job, cancel it.
job.State = model.JobCancelled
err = errors.Errorf("invalid ddl job %v", job)
}
// saves error in job, so that others can know error happens.
if err != nil {
// if job is not cancelled, we should log this error.
if job.State != model.JobCancelled {
log.Errorf("run ddl job err %v", errors.ErrorStack(err))
}
job.Error = err.Error()
job.ErrorCount++
}
}
// for every lease seconds, we will re-update the whole schema, so we will wait 2 * lease time
// to guarantee that all servers have already updated schema.
func (d *ddl) waitSchemaChanged(waitTime time.Duration) {
if waitTime == 0 {
return
}
select {
case <-time.After(waitTime):
case <-d.quitCh:
}
}

473
vendor/github.com/pingcap/tidb/ddl/index.go generated vendored Normal file
View file

@ -0,0 +1,473 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/types"
)
func buildIndexInfo(tblInfo *model.TableInfo, unique bool, indexName model.CIStr, indexID int64, idxColNames []*ast.IndexColName) (*model.IndexInfo, error) {
// build offsets
idxColumns := make([]*model.IndexColumn, 0, len(idxColNames))
for _, ic := range idxColNames {
col := findCol(tblInfo.Columns, ic.Column.Name.O)
if col == nil {
return nil, errors.Errorf("CREATE INDEX: column does not exist: %s", ic.Column.Name.O)
}
idxColumns = append(idxColumns, &model.IndexColumn{
Name: col.Name,
Offset: col.Offset,
Length: ic.Length,
})
}
// create index info
idxInfo := &model.IndexInfo{
ID: indexID,
Name: indexName,
Columns: idxColumns,
Unique: unique,
State: model.StateNone,
}
return idxInfo, nil
}
func addIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) {
col := indexInfo.Columns[0]
if indexInfo.Unique && len(indexInfo.Columns) == 1 {
tblInfo.Columns[col.Offset].Flag |= mysql.UniqueKeyFlag
} else {
tblInfo.Columns[col.Offset].Flag |= mysql.MultipleKeyFlag
}
}
func dropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) {
col := indexInfo.Columns[0]
if indexInfo.Unique && len(indexInfo.Columns) == 1 {
tblInfo.Columns[col.Offset].Flag &= ^uint(mysql.UniqueKeyFlag)
} else {
tblInfo.Columns[col.Offset].Flag &= ^uint(mysql.MultipleKeyFlag)
}
// other index may still cover this col
for _, index := range tblInfo.Indices {
if index.Name.L == indexInfo.Name.L {
continue
}
if index.Columns[0].Name.L != col.Name.L {
continue
}
addIndexColumnFlag(tblInfo, index)
}
}
func (d *ddl) onCreateIndex(t *meta.Meta, job *model.Job) error {
schemaID := job.SchemaID
tblInfo, err := d.getTableInfo(t, job)
if err != nil {
return errors.Trace(err)
}
var (
unique bool
indexName model.CIStr
indexID int64
idxColNames []*ast.IndexColName
)
err = job.DecodeArgs(&unique, &indexName, &indexID, &idxColNames)
if err != nil {
job.State = model.JobCancelled
return errors.Trace(err)
}
var indexInfo *model.IndexInfo
for _, idx := range tblInfo.Indices {
if idx.Name.L == indexName.L {
if idx.State == model.StatePublic {
// we already have a index with same index name
job.State = model.JobCancelled
return errors.Errorf("CREATE INDEX: index already exist %s", indexName)
}
indexInfo = idx
}
}
if indexInfo == nil {
indexInfo, err = buildIndexInfo(tblInfo, unique, indexName, indexID, idxColNames)
if err != nil {
job.State = model.JobCancelled
return errors.Trace(err)
}
tblInfo.Indices = append(tblInfo.Indices, indexInfo)
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch indexInfo.State {
case model.StateNone:
// none -> delete only
job.SchemaState = model.StateDeleteOnly
indexInfo.State = model.StateDeleteOnly
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateDeleteOnly:
// delete only -> write only
job.SchemaState = model.StateWriteOnly
indexInfo.State = model.StateWriteOnly
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateWriteOnly:
// write only -> reorganization
job.SchemaState = model.StateWriteReorganization
indexInfo.State = model.StateWriteReorganization
// initialize SnapshotVer to 0 for later reorganization check.
job.SnapshotVer = 0
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateWriteReorganization:
// reorganization -> public
reorgInfo, err := d.getReorgInfo(t, job)
if err != nil || reorgInfo.first {
// if we run reorg firstly, we should update the job snapshot version
// and then run the reorg next time.
return errors.Trace(err)
}
var tbl table.Table
tbl, err = d.getTable(schemaID, tblInfo)
if err != nil {
return errors.Trace(err)
}
err = d.runReorgJob(func() error {
return d.addTableIndex(tbl, indexInfo, reorgInfo)
})
if terror.ErrorEqual(err, errWaitReorgTimeout) {
// if timeout, we should return, check for the owner and re-wait job done.
return nil
}
if err != nil {
return errors.Trace(err)
}
indexInfo.State = model.StatePublic
// set column index flag.
addIndexColumnFlag(tblInfo, indexInfo)
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
return errors.Trace(err)
}
// finish this job
job.SchemaState = model.StatePublic
job.State = model.JobDone
return nil
default:
return errors.Errorf("invalid index state %v", tblInfo.State)
}
}
func (d *ddl) onDropIndex(t *meta.Meta, job *model.Job) error {
schemaID := job.SchemaID
tblInfo, err := d.getTableInfo(t, job)
if err != nil {
return errors.Trace(err)
}
var indexName model.CIStr
if err = job.DecodeArgs(&indexName); err != nil {
job.State = model.JobCancelled
return errors.Trace(err)
}
var indexInfo *model.IndexInfo
for _, idx := range tblInfo.Indices {
if idx.Name.L == indexName.L {
indexInfo = idx
}
}
if indexInfo == nil {
job.State = model.JobCancelled
return errors.Errorf("index %s doesn't exist", indexName)
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch indexInfo.State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
indexInfo.State = model.StateWriteOnly
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
indexInfo.State = model.StateDeleteOnly
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateDeleteOnly:
// delete only -> reorganization
job.SchemaState = model.StateDeleteReorganization
indexInfo.State = model.StateDeleteReorganization
err = t.UpdateTable(schemaID, tblInfo)
return errors.Trace(err)
case model.StateDeleteReorganization:
// reorganization -> absent
tbl, err := d.getTable(schemaID, tblInfo)
if err != nil {
return errors.Trace(err)
}
err = d.runReorgJob(func() error {
return d.dropTableIndex(tbl, indexInfo)
})
if terror.ErrorEqual(err, errWaitReorgTimeout) {
// if timeout, we should return, check for the owner and re-wait job done.
return nil
}
if err != nil {
return errors.Trace(err)
}
// all reorganization jobs done, drop this index
newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices))
for _, idx := range tblInfo.Indices {
if idx.Name.L != indexName.L {
newIndices = append(newIndices, idx)
}
}
tblInfo.Indices = newIndices
// set column index flag.
dropIndexColumnFlag(tblInfo, indexInfo)
if err = t.UpdateTable(schemaID, tblInfo); err != nil {
return errors.Trace(err)
}
// finish this job
job.SchemaState = model.StateNone
job.State = model.JobDone
return nil
default:
return errors.Errorf("invalid table state %v", tblInfo.State)
}
}
func checkRowExist(txn kv.Transaction, t table.Table, handle int64) (bool, error) {
_, err := txn.Get(t.RecordKey(handle, nil))
if terror.ErrorEqual(err, kv.ErrNotExist) {
// If row doesn't exist, we may have deleted the row already,
// no need to add index again.
return false, nil
} else if err != nil {
return false, errors.Trace(err)
}
return true, nil
}
func fetchRowColVals(txn kv.Transaction, t table.Table, handle int64, indexInfo *model.IndexInfo) ([]types.Datum, error) {
// fetch datas
cols := t.Cols()
vals := make([]types.Datum, 0, len(indexInfo.Columns))
for _, v := range indexInfo.Columns {
col := cols[v.Offset]
k := t.RecordKey(handle, col)
data, err := txn.Get(k)
if err != nil {
return nil, errors.Trace(err)
}
val, err := tables.DecodeValue(data, &col.FieldType)
if err != nil {
return nil, errors.Trace(err)
}
vals = append(vals, val)
}
return vals, nil
}
const maxBatchSize = 1024
// How to add index in reorganization state?
// 1. Generate a snapshot with special version.
// 2. Traverse the snapshot, get every row in the table.
// 3. For one row, if the row has been already deleted, skip to next row.
// 4. If not deleted, check whether index has existed, if existed, skip to next row.
// 5. If index doesn't exist, create the index and then continue to handle next row.
func (d *ddl) addTableIndex(t table.Table, indexInfo *model.IndexInfo, reorgInfo *reorgInfo) error {
seekHandle := reorgInfo.Handle
version := reorgInfo.SnapshotVer
for {
handles, err := d.getSnapshotRows(t, version, seekHandle)
if err != nil {
return errors.Trace(err)
} else if len(handles) == 0 {
return nil
}
seekHandle = handles[len(handles)-1] + 1
err = d.backfillTableIndex(t, indexInfo, handles, reorgInfo)
if err != nil {
return errors.Trace(err)
}
}
}
func (d *ddl) getSnapshotRows(t table.Table, version uint64, seekHandle int64) ([]int64, error) {
ver := kv.Version{Ver: version}
snap, err := d.store.GetSnapshot(ver)
if err != nil {
return nil, errors.Trace(err)
}
defer snap.Release()
firstKey := t.RecordKey(seekHandle, nil)
it, err := snap.Seek(firstKey)
if err != nil {
return nil, errors.Trace(err)
}
defer it.Close()
handles := make([]int64, 0, maxBatchSize)
for it.Valid() {
if !it.Key().HasPrefix(t.RecordPrefix()) {
break
}
var handle int64
handle, err = tables.DecodeRecordKeyHandle(it.Key())
if err != nil {
return nil, errors.Trace(err)
}
rk := t.RecordKey(handle, nil)
handles = append(handles, handle)
if len(handles) == maxBatchSize {
break
}
err = kv.NextUntil(it, util.RowKeyPrefixFilter(rk))
if terror.ErrorEqual(err, kv.ErrNotExist) {
break
} else if err != nil {
return nil, errors.Trace(err)
}
}
return handles, nil
}
func lockRow(txn kv.Transaction, t table.Table, h int64) error {
// Get row lock key
lockKey := t.RecordKey(h, nil)
// set row lock key to current txn
err := txn.Set(lockKey, []byte(txn.String()))
return errors.Trace(err)
}
func (d *ddl) backfillTableIndex(t table.Table, indexInfo *model.IndexInfo, handles []int64, reorgInfo *reorgInfo) error {
kvX := kv.NewKVIndex(t.IndexPrefix(), indexInfo.Name.L, indexInfo.ID, indexInfo.Unique)
for _, handle := range handles {
log.Debug("[ddl] building index...", handle)
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
if err := d.isReorgRunnable(txn); err != nil {
return errors.Trace(err)
}
// first check row exists
exist, err := checkRowExist(txn, t, handle)
if err != nil {
return errors.Trace(err)
} else if !exist {
// row doesn't exist, skip it.
return nil
}
var vals []types.Datum
vals, err = fetchRowColVals(txn, t, handle, indexInfo)
if err != nil {
return errors.Trace(err)
}
exist, _, err = kvX.Exist(txn, vals, handle)
if err != nil {
return errors.Trace(err)
} else if exist {
// index already exists, skip it.
return nil
}
err = lockRow(txn, t, handle)
if err != nil {
return errors.Trace(err)
}
// create the index.
err = kvX.Create(txn, vals, handle)
if err != nil {
return errors.Trace(err)
}
// update reorg next handle
return errors.Trace(reorgInfo.UpdateHandle(txn, handle))
})
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (d *ddl) dropTableIndex(t table.Table, indexInfo *model.IndexInfo) error {
prefix := kv.GenIndexPrefix(t.IndexPrefix(), indexInfo.ID)
err := d.delKeysWithPrefix(prefix)
return errors.Trace(err)
}

250
vendor/github.com/pingcap/tidb/ddl/reorg.go generated vendored Normal file
View file

@ -0,0 +1,250 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"fmt"
"time"
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/terror"
)
var _ context.Context = &reorgContext{}
// reorgContext implements context.Context interface for reorganization use.
type reorgContext struct {
store kv.Storage
m map[fmt.Stringer]interface{}
txn kv.Transaction
}
func (c *reorgContext) GetTxn(forceNew bool) (kv.Transaction, error) {
if forceNew {
if c.txn != nil {
if err := c.txn.Commit(); err != nil {
return nil, errors.Trace(err)
}
c.txn = nil
}
}
if c.txn != nil {
return c.txn, nil
}
txn, err := c.store.Begin()
if err != nil {
return nil, errors.Trace(err)
}
c.txn = txn
return c.txn, nil
}
func (c *reorgContext) FinishTxn(rollback bool) error {
if c.txn == nil {
return nil
}
var err error
if rollback {
err = c.txn.Rollback()
} else {
err = c.txn.Commit()
}
c.txn = nil
return errors.Trace(err)
}
func (c *reorgContext) SetValue(key fmt.Stringer, value interface{}) {
c.m[key] = value
}
func (c *reorgContext) Value(key fmt.Stringer) interface{} {
return c.m[key]
}
func (c *reorgContext) ClearValue(key fmt.Stringer) {
delete(c.m, key)
}
func (d *ddl) newReorgContext() context.Context {
c := &reorgContext{
store: d.store,
m: make(map[fmt.Stringer]interface{}),
}
return c
}
const waitReorgTimeout = 10 * time.Second
var errWaitReorgTimeout = errors.New("wait for reorganization timeout")
func (d *ddl) runReorgJob(f func() error) error {
if d.reorgDoneCh == nil {
// start a reorganization job
d.wait.Add(1)
d.reorgDoneCh = make(chan error, 1)
go func() {
defer d.wait.Done()
d.reorgDoneCh <- f()
}()
}
waitTimeout := waitReorgTimeout
// if d.lease is 0, we are using a local storage,
// and we can wait the reorganization to be done here.
// if d.lease > 0, we don't need to wait here because
// we will wait 2 * lease outer and try checking again,
// so we use a very little timeout here.
if d.lease > 0 {
waitTimeout = 1 * time.Millisecond
}
// wait reorganization job done or timeout
select {
case err := <-d.reorgDoneCh:
d.reorgDoneCh = nil
return errors.Trace(err)
case <-d.quitCh:
// we return errWaitReorgTimeout here too, so that outer loop will break.
return errWaitReorgTimeout
case <-time.After(waitTimeout):
// if timeout, we will return, check the owner and retry to wait job done again.
return errWaitReorgTimeout
}
}
func (d *ddl) isReorgRunnable(txn kv.Transaction) error {
if d.isClosed() {
// worker is closed, can't run reorganization.
return errors.Trace(ErrWorkerClosed)
}
t := meta.NewMeta(txn)
owner, err := t.GetDDLJobOwner()
if err != nil {
return errors.Trace(err)
} else if owner == nil || owner.OwnerID != d.uuid {
// if no owner, we will try later, so here just return error.
// or another server is owner, return error too.
return errors.Trace(ErrNotOwner)
}
return nil
}
func (d *ddl) delKeysWithPrefix(prefix kv.Key) error {
for {
keys := make([]kv.Key, 0, maxBatchSize)
err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error {
if err1 := d.isReorgRunnable(txn); err1 != nil {
return errors.Trace(err1)
}
iter, err := txn.Seek(prefix)
if err != nil {
return errors.Trace(err)
}
defer iter.Close()
for i := 0; i < maxBatchSize; i++ {
if iter.Valid() && iter.Key().HasPrefix(prefix) {
keys = append(keys, iter.Key().Clone())
err = iter.Next()
if err != nil {
return errors.Trace(err)
}
} else {
break
}
}
for _, key := range keys {
err := txn.Delete(key)
// must skip ErrNotExist
// if key doesn't exist, skip this error.
if err != nil && !terror.ErrorEqual(err, kv.ErrNotExist) {
return errors.Trace(err)
}
}
return nil
})
if err != nil {
return errors.Trace(err)
}
// delete no keys, return.
if len(keys) == 0 {
return nil
}
}
}
type reorgInfo struct {
*model.Job
Handle int64
d *ddl
first bool
}
func (d *ddl) getReorgInfo(t *meta.Meta, job *model.Job) (*reorgInfo, error) {
var err error
info := &reorgInfo{
Job: job,
d: d,
first: job.SnapshotVer == 0,
}
if info.first {
// get the current version for reorganization if we don't have
var ver kv.Version
ver, err = d.store.CurrentVersion()
if err != nil {
return nil, errors.Trace(err)
} else if ver.Ver <= 0 {
return nil, errors.Errorf("invalid storage current version %d", ver.Ver)
}
job.SnapshotVer = ver.Ver
} else {
info.Handle, err = t.GetDDLReorgHandle(job)
if err != nil {
return nil, errors.Trace(err)
}
}
if info.Handle > 0 {
// we have already handled this handle, so use next
info.Handle++
}
return info, errors.Trace(err)
}
func (r *reorgInfo) UpdateHandle(txn kv.Transaction, handle int64) error {
t := meta.NewMeta(txn)
return errors.Trace(t.UpdateDDLReorgHandle(r.Job, handle))
}

163
vendor/github.com/pingcap/tidb/ddl/schema.go generated vendored Normal file
View file

@ -0,0 +1,163 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/terror"
)
func (d *ddl) onCreateSchema(t *meta.Meta, job *model.Job) error {
schemaID := job.SchemaID
dbInfo := &model.DBInfo{}
if err := job.DecodeArgs(dbInfo); err != nil {
// arg error, cancel this job.
job.State = model.JobCancelled
return errors.Trace(err)
}
dbInfo.ID = schemaID
dbInfo.State = model.StateNone
dbs, err := t.ListDatabases()
if err != nil {
return errors.Trace(err)
}
for _, db := range dbs {
if db.Name.L == dbInfo.Name.L {
if db.ID != schemaID {
// database exists, can't create, we should cancel this job now.
job.State = model.JobCancelled
return errors.Trace(infoschema.DatabaseExists)
}
dbInfo = db
}
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch dbInfo.State {
case model.StateNone:
// none -> public
job.SchemaState = model.StatePublic
dbInfo.State = model.StatePublic
err = t.CreateDatabase(dbInfo)
if err != nil {
return errors.Trace(err)
}
// finish this job
job.State = model.JobDone
return nil
default:
// we can't enter here.
return errors.Errorf("invalid db state %v", dbInfo.State)
}
}
func (d *ddl) delReorgSchema(t *meta.Meta, job *model.Job) error {
dbInfo := &model.DBInfo{}
if err := job.DecodeArgs(dbInfo); err != nil {
// arg error, cancel this job.
job.State = model.JobCancelled
return errors.Trace(err)
}
tables, err := t.ListTables(dbInfo.ID)
if terror.ErrorEqual(meta.ErrDBNotExists, err) {
job.State = model.JobDone
return nil
}
if err != nil {
return errors.Trace(err)
}
if err = d.dropSchemaData(dbInfo, tables); err != nil {
return errors.Trace(err)
}
// finish this background job
job.SchemaState = model.StateNone
job.State = model.JobDone
return nil
}
func (d *ddl) onDropSchema(t *meta.Meta, job *model.Job) error {
dbInfo, err := t.GetDatabase(job.SchemaID)
if err != nil {
return errors.Trace(err)
}
if dbInfo == nil {
job.State = model.JobCancelled
return errors.Trace(infoschema.DatabaseNotExists)
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch dbInfo.State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
dbInfo.State = model.StateWriteOnly
err = t.UpdateDatabase(dbInfo)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
dbInfo.State = model.StateDeleteOnly
err = t.UpdateDatabase(dbInfo)
case model.StateDeleteOnly:
dbInfo.State = model.StateDeleteReorganization
err = t.UpdateDatabase(dbInfo)
if err = t.DropDatabase(dbInfo.ID); err != nil {
break
}
// finish this job
job.Args = []interface{}{dbInfo}
job.State = model.JobDone
job.SchemaState = model.StateNone
default:
// we can't enter here.
err = errors.Errorf("invalid db state %v", dbInfo.State)
}
return errors.Trace(err)
}
func (d *ddl) dropSchemaData(dbInfo *model.DBInfo, tables []*model.TableInfo) error {
for _, tblInfo := range tables {
alloc := autoid.NewAllocator(d.store, dbInfo.ID)
t, err := table.TableFromMeta(alloc, tblInfo)
if err != nil {
return errors.Trace(err)
}
err = d.dropTableData(t)
if err != nil {
return errors.Trace(err)
}
}
return nil
}

123
vendor/github.com/pingcap/tidb/ddl/stat.go generated vendored Normal file
View file

@ -0,0 +1,123 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/inspectkv"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/variable"
)
var (
serverID = "server_id"
ddlSchemaVersion = "ddl_schema_version"
ddlOwnerID = "ddl_owner_id"
ddlOwnerLastUpdateTS = "ddl_owner_last_update_ts"
ddlJobID = "ddl_job_id"
ddlJobAction = "ddl_job_action"
ddlJobLastUpdateTS = "ddl_job_last_update_ts"
ddlJobState = "ddl_job_state"
ddlJobError = "ddl_job_error"
ddlJobSchemaState = "ddl_job_schema_state"
ddlJobSchemaID = "ddl_job_schema_id"
ddlJobTableID = "ddl_job_table_id"
ddlJobSnapshotVer = "ddl_job_snapshot_ver"
ddlJobReorgHandle = "ddl_job_reorg_handle"
ddlJobArgs = "ddl_job_args"
bgSchemaVersion = "bg_schema_version"
bgOwnerID = "bg_owner_id"
bgOwnerLastUpdateTS = "bg_owner_last_update_ts"
bgJobID = "bg_job_id"
bgJobAction = "bg_job_action"
bgJobLastUpdateTS = "bg_job_last_update_ts"
bgJobState = "bg_job_state"
bgJobError = "bg_job_error"
bgJobSchemaState = "bg_job_schema_state"
bgJobSchemaID = "bg_job_schema_id"
bgJobTableID = "bg_job_table_id"
bgJobSnapshotVer = "bg_job_snapshot_ver"
bgJobReorgHandle = "bg_job_reorg_handle"
bgJobArgs = "bg_job_args"
)
// GetScope gets the status variables scope.
func (d *ddl) GetScope(status string) variable.ScopeFlag {
// Now ddl status variables scope are all default scope.
return variable.DefaultScopeFlag
}
// Stat returns the DDL statistics.
func (d *ddl) Stats() (map[string]interface{}, error) {
m := make(map[string]interface{})
m[serverID] = d.uuid
var ddlInfo, bgInfo *inspectkv.DDLInfo
err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error {
var err1 error
ddlInfo, err1 = inspectkv.GetDDLInfo(txn)
if err1 != nil {
return errors.Trace(err1)
}
bgInfo, err1 = inspectkv.GetBgDDLInfo(txn)
return errors.Trace(err1)
})
if err != nil {
return nil, errors.Trace(err)
}
m[ddlSchemaVersion] = ddlInfo.SchemaVer
if ddlInfo.Owner != nil {
m[ddlOwnerID] = ddlInfo.Owner.OwnerID
// LastUpdateTS uses nanosecond.
m[ddlOwnerLastUpdateTS] = ddlInfo.Owner.LastUpdateTS / 1e9
}
if ddlInfo.Job != nil {
m[ddlJobID] = ddlInfo.Job.ID
m[ddlJobAction] = ddlInfo.Job.Type.String()
m[ddlJobLastUpdateTS] = ddlInfo.Job.LastUpdateTS / 1e9
m[ddlJobState] = ddlInfo.Job.State.String()
m[ddlJobError] = ddlInfo.Job.Error
m[ddlJobSchemaState] = ddlInfo.Job.SchemaState.String()
m[ddlJobSchemaID] = ddlInfo.Job.SchemaID
m[ddlJobTableID] = ddlInfo.Job.TableID
m[ddlJobSnapshotVer] = ddlInfo.Job.SnapshotVer
m[ddlJobReorgHandle] = ddlInfo.ReorgHandle
m[ddlJobArgs] = ddlInfo.Job.Args
}
// background DDL info
m[bgSchemaVersion] = bgInfo.SchemaVer
if bgInfo.Owner != nil {
m[bgOwnerID] = bgInfo.Owner.OwnerID
// LastUpdateTS uses nanosecond.
m[bgOwnerLastUpdateTS] = bgInfo.Owner.LastUpdateTS / 1e9
}
if bgInfo.Job != nil {
m[bgJobID] = bgInfo.Job.ID
m[bgJobAction] = bgInfo.Job.Type.String()
m[bgJobLastUpdateTS] = bgInfo.Job.LastUpdateTS / 1e9
m[bgJobState] = bgInfo.Job.State.String()
m[bgJobError] = bgInfo.Job.Error
m[bgJobSchemaState] = bgInfo.Job.SchemaState.String()
m[bgJobSchemaID] = bgInfo.Job.SchemaID
m[bgJobTableID] = bgInfo.Job.TableID
m[bgJobSnapshotVer] = bgInfo.Job.SnapshotVer
m[bgJobReorgHandle] = bgInfo.ReorgHandle
m[bgJobArgs] = bgInfo.Job.Args
}
return m, nil
}

194
vendor/github.com/pingcap/tidb/ddl/table.go generated vendored Normal file
View file

@ -0,0 +1,194 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/terror"
)
func (d *ddl) onCreateTable(t *meta.Meta, job *model.Job) error {
schemaID := job.SchemaID
tbInfo := &model.TableInfo{}
if err := job.DecodeArgs(tbInfo); err != nil {
// arg error, cancel this job.
job.State = model.JobCancelled
return errors.Trace(err)
}
tbInfo.State = model.StateNone
tables, err := t.ListTables(schemaID)
if terror.ErrorEqual(err, meta.ErrDBNotExists) {
job.State = model.JobCancelled
return errors.Trace(infoschema.DatabaseNotExists)
} else if err != nil {
return errors.Trace(err)
}
for _, tbl := range tables {
if tbl.Name.L == tbInfo.Name.L {
if tbl.ID != tbInfo.ID {
// table exists, can't create, we should cancel this job now.
job.State = model.JobCancelled
return errors.Trace(infoschema.TableExists)
}
tbInfo = tbl
}
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch tbInfo.State {
case model.StateNone:
// none -> public
job.SchemaState = model.StatePublic
tbInfo.State = model.StatePublic
err = t.CreateTable(schemaID, tbInfo)
if err != nil {
return errors.Trace(err)
}
// finish this job
job.State = model.JobDone
return nil
default:
return errors.Errorf("invalid table state %v", tbInfo.State)
}
}
func (d *ddl) delReorgTable(t *meta.Meta, job *model.Job) error {
tblInfo := &model.TableInfo{}
err := job.DecodeArgs(tblInfo)
if err != nil {
// arg error, cancel this job.
job.State = model.JobCancelled
return errors.Trace(err)
}
tblInfo.State = model.StateDeleteReorganization
tbl, err := d.getTable(job.SchemaID, tblInfo)
if err != nil {
return errors.Trace(err)
}
err = d.dropTableData(tbl)
if err != nil {
return errors.Trace(err)
}
// finish this background job
job.SchemaState = model.StateNone
job.State = model.JobDone
return nil
}
func (d *ddl) onDropTable(t *meta.Meta, job *model.Job) error {
schemaID := job.SchemaID
tableID := job.TableID
tblInfo, err := t.GetTable(schemaID, tableID)
if terror.ErrorEqual(err, meta.ErrDBNotExists) {
job.State = model.JobCancelled
return errors.Trace(infoschema.DatabaseNotExists)
} else if err != nil {
return errors.Trace(err)
}
if tblInfo == nil {
job.State = model.JobCancelled
return errors.Trace(infoschema.TableNotExists)
}
_, err = t.GenSchemaVersion()
if err != nil {
return errors.Trace(err)
}
switch tblInfo.State {
case model.StatePublic:
// public -> write only
job.SchemaState = model.StateWriteOnly
tblInfo.State = model.StateWriteOnly
err = t.UpdateTable(schemaID, tblInfo)
case model.StateWriteOnly:
// write only -> delete only
job.SchemaState = model.StateDeleteOnly
tblInfo.State = model.StateDeleteOnly
err = t.UpdateTable(schemaID, tblInfo)
case model.StateDeleteOnly:
tblInfo.State = model.StateNone
err = t.UpdateTable(schemaID, tblInfo)
if err = t.DropTable(job.SchemaID, job.TableID); err != nil {
break
}
// finish this job
job.Args = []interface{}{tblInfo}
job.State = model.JobDone
job.SchemaState = model.StateNone
default:
err = errors.Errorf("invalid table state %v", tblInfo.State)
}
return errors.Trace(err)
}
func (d *ddl) getTable(schemaID int64, tblInfo *model.TableInfo) (table.Table, error) {
alloc := autoid.NewAllocator(d.store, schemaID)
tbl, err := table.TableFromMeta(alloc, tblInfo)
return tbl, errors.Trace(err)
}
func (d *ddl) getTableInfo(t *meta.Meta, job *model.Job) (*model.TableInfo, error) {
schemaID := job.SchemaID
tableID := job.TableID
tblInfo, err := t.GetTable(schemaID, tableID)
if terror.ErrorEqual(err, meta.ErrDBNotExists) {
job.State = model.JobCancelled
return nil, errors.Trace(infoschema.DatabaseNotExists)
} else if err != nil {
return nil, errors.Trace(err)
} else if tblInfo == nil {
job.State = model.JobCancelled
return nil, errors.Trace(infoschema.TableNotExists)
}
if tblInfo.State != model.StatePublic {
job.State = model.JobCancelled
return nil, errors.Errorf("table %s is not in public, but %s", tblInfo.Name.L, tblInfo.State)
}
return tblInfo, nil
}
func (d *ddl) dropTableData(t table.Table) error {
// delete table data
err := d.delKeysWithPrefix(t.RecordPrefix())
if err != nil {
return errors.Trace(err)
}
// delete table index
err = d.delKeysWithPrefix(t.IndexPrefix())
return errors.Trace(err)
}

270
vendor/github.com/pingcap/tidb/domain/domain.go generated vendored Normal file
View file

@ -0,0 +1,270 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package domain
import (
"sync"
"sync/atomic"
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/localstore"
"github.com/pingcap/tidb/terror"
)
var ddlLastReloadSchemaTS = "ddl_last_reload_schema_ts"
// Domain represents a storage space. Different domains can use the same database name.
// Multiple domains can be used in parallel without synchronization.
type Domain struct {
store kv.Storage
infoHandle *infoschema.Handle
ddl ddl.DDL
leaseCh chan time.Duration
// nano seconds
lastLeaseTS int64
m sync.Mutex
}
func (do *Domain) loadInfoSchema(txn kv.Transaction) (err error) {
m := meta.NewMeta(txn)
schemaMetaVersion, err := m.GetSchemaVersion()
if err != nil {
return errors.Trace(err)
}
info := do.infoHandle.Get()
if info != nil && schemaMetaVersion <= info.SchemaMetaVersion() {
// info may be changed by other txn, so here its version may be bigger than schema version,
// so we don't need to reload.
log.Debugf("[ddl] schema version is still %d, no need reload", schemaMetaVersion)
return nil
}
schemas, err := m.ListDatabases()
if err != nil {
return errors.Trace(err)
}
for _, di := range schemas {
if di.State != model.StatePublic {
// schema is not public, can't be used outside.
continue
}
tables, err1 := m.ListTables(di.ID)
if err1 != nil {
return errors.Trace(err1)
}
di.Tables = make([]*model.TableInfo, 0, len(tables))
for _, tbl := range tables {
if tbl.State != model.StatePublic {
// schema is not public, can't be used outsiee.
continue
}
di.Tables = append(di.Tables, tbl)
}
}
log.Infof("[ddl] loadInfoSchema %d", schemaMetaVersion)
err = do.infoHandle.Set(schemas, schemaMetaVersion)
return errors.Trace(err)
}
// InfoSchema gets information schema from domain.
func (do *Domain) InfoSchema() infoschema.InfoSchema {
// try reload if possible.
do.tryReload()
return do.infoHandle.Get()
}
// DDL gets DDL from domain.
func (do *Domain) DDL() ddl.DDL {
return do.ddl
}
// Store gets KV store from domain.
func (do *Domain) Store() kv.Storage {
return do.store
}
// SetLease will reset the lease time for online DDL change.
func (do *Domain) SetLease(lease time.Duration) {
do.leaseCh <- lease
// let ddl to reset lease too.
do.ddl.SetLease(lease)
}
// Stats returns the domain statistic.
func (do *Domain) Stats() (map[string]interface{}, error) {
m := make(map[string]interface{})
m[ddlLastReloadSchemaTS] = atomic.LoadInt64(&do.lastLeaseTS) / 1e9
return m, nil
}
// GetScope gets the status variables scope.
func (do *Domain) GetScope(status string) variable.ScopeFlag {
// Now domain status variables scope are all default scope.
return variable.DefaultScopeFlag
}
func (do *Domain) tryReload() {
// if we don't have update the schema for a long time > lease, we must force reloading it.
// Although we try to reload schema every lease time in a goroutine, sometimes it may not
// run accurately, e.g, the machine has a very high load, running the ticker is delayed.
last := atomic.LoadInt64(&do.lastLeaseTS)
lease := do.ddl.GetLease()
// if lease is 0, we use the local store, so no need to reload.
if lease > 0 && time.Now().UnixNano()-last > lease.Nanoseconds() {
do.mustReload()
}
}
const minReloadTimeout = 20 * time.Second
func (do *Domain) reload() error {
// lock here for only once at same time.
do.m.Lock()
defer do.m.Unlock()
timeout := do.ddl.GetLease() / 2
if timeout < minReloadTimeout {
timeout = minReloadTimeout
}
done := make(chan error, 1)
go func() {
var err error
for {
err = kv.RunInNewTxn(do.store, false, do.loadInfoSchema)
// if err is db closed, we will return it directly, otherwise, we will
// check reloading again.
if terror.ErrorEqual(err, localstore.ErrDBClosed) {
break
}
if err != nil {
log.Errorf("[ddl] load schema err %v, retry again", errors.ErrorStack(err))
// TODO: use a backoff algorithm.
time.Sleep(500 * time.Millisecond)
continue
}
atomic.StoreInt64(&do.lastLeaseTS, time.Now().UnixNano())
break
}
done <- err
}()
select {
case err := <-done:
return errors.Trace(err)
case <-time.After(timeout):
return errors.New("reload schema timeout")
}
}
func (do *Domain) mustReload() {
// if reload error, we will terminate whole program to guarantee data safe.
err := do.reload()
if err != nil {
log.Fatalf("[ddl] reload schema err %v", errors.ErrorStack(err))
}
}
// check schema every 300 seconds default.
const defaultLoadTime = 300 * time.Second
func (do *Domain) loadSchemaInLoop(lease time.Duration) {
if lease <= 0 {
lease = defaultLoadTime
}
ticker := time.NewTicker(lease)
defer ticker.Stop()
for {
select {
case <-ticker.C:
err := do.reload()
// we may close store in test, but the domain load schema loop is still checking,
// so we can't panic for ErrDBClosed and just return here.
if terror.ErrorEqual(err, localstore.ErrDBClosed) {
return
} else if err != nil {
log.Fatalf("[ddl] reload schema err %v", errors.ErrorStack(err))
}
case newLease := <-do.leaseCh:
if newLease <= 0 {
newLease = defaultLoadTime
}
if lease == newLease {
// nothing to do
continue
}
lease = newLease
// reset ticker too.
ticker.Stop()
ticker = time.NewTicker(lease)
}
}
}
type ddlCallback struct {
ddl.BaseCallback
do *Domain
}
func (c *ddlCallback) OnChanged(err error) error {
if err != nil {
return err
}
log.Warnf("[ddl] on DDL change")
c.do.mustReload()
return nil
}
// NewDomain creates a new domain.
func NewDomain(store kv.Storage, lease time.Duration) (d *Domain, err error) {
d = &Domain{
store: store,
leaseCh: make(chan time.Duration, 1),
}
d.infoHandle = infoschema.NewHandle(d.store)
d.ddl = ddl.NewDDL(d.store, d.infoHandle, &ddlCallback{do: d}, lease)
d.mustReload()
variable.RegisterStatistics(d)
go d.loadSchemaInLoop(lease)
return d, nil
}

560
vendor/github.com/pingcap/tidb/driver.go generated vendored Normal file
View file

@ -0,0 +1,560 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// database/sql/driver
package tidb
import (
"database/sql"
"database/sql/driver"
"io"
"net/url"
"path/filepath"
"strings"
"sync"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util/types"
)
const (
// DriverName is name of TiDB driver.
DriverName = "tidb"
)
var (
_ driver.Conn = (*driverConn)(nil)
_ driver.Execer = (*driverConn)(nil)
_ driver.Queryer = (*driverConn)(nil)
_ driver.Tx = (*driverConn)(nil)
_ driver.Result = (*driverResult)(nil)
_ driver.Rows = (*driverRows)(nil)
_ driver.Stmt = (*driverStmt)(nil)
_ driver.Driver = (*sqlDriver)(nil)
txBeginSQL = "BEGIN;"
txCommitSQL = "COMMIT;"
txRollbackSQL = "ROLLBACK;"
errNoResult = errors.New("query statement does not produce a result set (no top level SELECT)")
)
type errList []error
type driverParams struct {
storePath string
dbName string
// when set to true `mysql.Time` isn't encoded as string but passed as `time.Time`
// this option is named for compatibility the same as in the mysql driver
// while we actually do not have additional parsing to do
parseTime bool
}
func (e *errList) append(err error) {
if err != nil {
*e = append(*e, err)
}
}
func (e errList) error() error {
if len(e) == 0 {
return nil
}
return e
}
func (e errList) Error() string {
a := make([]string, len(e))
for i, v := range e {
a[i] = v.Error()
}
return strings.Join(a, "\n")
}
func params(args []driver.Value) []interface{} {
r := make([]interface{}, len(args))
for i, v := range args {
r[i] = interface{}(v)
}
return r
}
var (
tidbDriver = &sqlDriver{}
driverOnce sync.Once
)
// RegisterDriver registers TiDB driver.
// The name argument can be optionally prefixed by "engine://". In that case the
// prefix is recognized as a storage engine name.
//
// The name argument can be optionally prefixed by "memory://". In that case
// the prefix is stripped before interpreting it as a name of a memory-only,
// volatile DB.
//
// [0]: http://golang.org/pkg/database/sql/driver/
func RegisterDriver() {
driverOnce.Do(func() { sql.Register(DriverName, tidbDriver) })
}
// sqlDriver implements the interface required by database/sql/driver.
type sqlDriver struct {
mu sync.Mutex
}
func (d *sqlDriver) lock() {
d.mu.Lock()
}
func (d *sqlDriver) unlock() {
d.mu.Unlock()
}
// parseDriverDSN cuts off DB name from dsn. It returns error if the dsn is not
// valid.
func parseDriverDSN(dsn string) (params *driverParams, err error) {
u, err := url.Parse(dsn)
if err != nil {
return nil, errors.Trace(err)
}
path := filepath.Join(u.Host, u.Path)
dbName := filepath.Clean(filepath.Base(path))
if dbName == "" || dbName == "." || dbName == string(filepath.Separator) {
return nil, errors.Errorf("invalid DB name %q", dbName)
}
// cut off dbName
path = filepath.Clean(filepath.Dir(path))
if path == "" || path == "." || path == string(filepath.Separator) {
return nil, errors.Errorf("invalid dsn %q", dsn)
}
u.Path, u.Host = path, ""
params = &driverParams{
storePath: u.String(),
dbName: dbName,
}
// parse additional driver params
query := u.Query()
if parseTime := query.Get("parseTime"); parseTime == "true" {
params.parseTime = true
}
return params, nil
}
// Open returns a new connection to the database.
//
// The dsn must be a URL format 'engine://path/dbname?params'.
// Engine is the storage name registered with RegisterStore.
// Path is the storage specific format.
// Params is key-value pairs split by '&', optional params are storage specific.
// Examples:
// goleveldb://relative/path/test
// boltdb:///absolute/path/test
// hbase://zk1,zk2,zk3/hbasetbl/test?tso=zk
//
// Open may return a cached connection (one previously closed), but doing so is
// unnecessary; the sql package maintains a pool of idle connections for
// efficient re-use.
//
// The behavior of the mysql driver regarding time parsing can also be imitated
// by passing ?parseTime
//
// The returned connection is only used by one goroutine at a time.
func (d *sqlDriver) Open(dsn string) (driver.Conn, error) {
params, err := parseDriverDSN(dsn)
if err != nil {
return nil, errors.Trace(err)
}
store, err := NewStore(params.storePath)
if err != nil {
return nil, errors.Trace(err)
}
sess, err := CreateSession(store)
if err != nil {
return nil, errors.Trace(err)
}
s := sess.(*session)
d.lock()
defer d.unlock()
DBName := model.NewCIStr(params.dbName)
domain := sessionctx.GetDomain(s)
cs := &ast.CharsetOpt{
Chs: "utf8",
Col: "utf8_bin",
}
if !domain.InfoSchema().SchemaExists(DBName) {
err = domain.DDL().CreateSchema(s, DBName, cs)
if err != nil {
return nil, errors.Trace(err)
}
}
driver := &sqlDriver{}
return newDriverConn(s, driver, DBName.O, params)
}
// driverConn is a connection to a database. It is not used concurrently by
// multiple goroutines.
//
// Conn is assumed to be stateful.
type driverConn struct {
s Session
driver *sqlDriver
stmts map[string]driver.Stmt
params *driverParams
}
func newDriverConn(sess *session, d *sqlDriver, schema string, params *driverParams) (driver.Conn, error) {
r := &driverConn{
driver: d,
stmts: map[string]driver.Stmt{},
s: sess,
params: params,
}
_, err := r.s.Execute("use " + schema)
if err != nil {
return nil, errors.Trace(err)
}
return r, nil
}
// Prepare returns a prepared statement, bound to this connection.
func (c *driverConn) Prepare(query string) (driver.Stmt, error) {
stmtID, paramCount, fields, err := c.s.PrepareStmt(query)
if err != nil {
return nil, err
}
s := &driverStmt{
conn: c,
query: query,
stmtID: stmtID,
paramCount: paramCount,
isQuery: fields != nil,
}
c.stmts[query] = s
return s, nil
}
// Close invalidates and potentially stops any current prepared statements and
// transactions, marking this connection as no longer in use.
//
// Because the sql package maintains a free pool of connections and only calls
// Close when there's a surplus of idle connections, it shouldn't be necessary
// for drivers to do their own connection caching.
func (c *driverConn) Close() error {
var err errList
for _, s := range c.stmts {
stmt := s.(*driverStmt)
err.append(stmt.conn.s.DropPreparedStmt(stmt.stmtID))
}
c.driver.lock()
defer c.driver.unlock()
return err.error()
}
// Begin starts and returns a new transaction.
func (c *driverConn) Begin() (driver.Tx, error) {
if c.s == nil {
return nil, errors.Errorf("Need init first")
}
if _, err := c.s.Execute(txBeginSQL); err != nil {
return nil, errors.Trace(err)
}
return c, nil
}
func (c *driverConn) Commit() error {
if c.s == nil {
return terror.CommitNotInTransaction
}
_, err := c.s.Execute(txCommitSQL)
if err != nil {
return errors.Trace(err)
}
err = c.s.FinishTxn(false)
return errors.Trace(err)
}
func (c *driverConn) Rollback() error {
if c.s == nil {
return terror.RollbackNotInTransaction
}
if _, err := c.s.Execute(txRollbackSQL); err != nil {
return errors.Trace(err)
}
return nil
}
// Execer is an optional interface that may be implemented by a Conn.
//
// If a Conn does not implement Execer, the sql package's DB.Exec will first
// prepare a query, execute the statement, and then close the statement.
//
// Exec may return driver.ErrSkip.
func (c *driverConn) Exec(query string, args []driver.Value) (driver.Result, error) {
return c.driverExec(query, args)
}
func (c *driverConn) getStmt(query string) (stmt driver.Stmt, err error) {
stmt, ok := c.stmts[query]
if !ok {
stmt, err = c.Prepare(query)
if err != nil {
return nil, errors.Trace(err)
}
}
return
}
func (c *driverConn) driverExec(query string, args []driver.Value) (driver.Result, error) {
if len(args) == 0 {
if _, err := c.s.Execute(query); err != nil {
return nil, errors.Trace(err)
}
r := &driverResult{}
r.lastInsertID, r.rowsAffected = int64(c.s.LastInsertID()), int64(c.s.AffectedRows())
return r, nil
}
stmt, err := c.getStmt(query)
if err != nil {
return nil, errors.Trace(err)
}
return stmt.Exec(args)
}
// Queryer is an optional interface that may be implemented by a Conn.
//
// If a Conn does not implement Queryer, the sql package's DB.Query will first
// prepare a query, execute the statement, and then close the statement.
//
// Query may return driver.ErrSkip.
func (c *driverConn) Query(query string, args []driver.Value) (driver.Rows, error) {
return c.driverQuery(query, args)
}
func (c *driverConn) driverQuery(query string, args []driver.Value) (driver.Rows, error) {
if len(args) == 0 {
rss, err := c.s.Execute(query)
if err != nil {
return nil, errors.Trace(err)
}
if len(rss) == 0 {
return nil, errors.Trace(errNoResult)
}
return &driverRows{params: c.params, rs: rss[0]}, nil
}
stmt, err := c.getStmt(query)
if err != nil {
return nil, errors.Trace(err)
}
return stmt.Query(args)
}
// driverResult is the result of a query execution.
type driverResult struct {
lastInsertID int64
rowsAffected int64
}
// LastInsertID returns the database's auto-generated ID after, for example, an
// INSERT into a table with primary key.
func (r *driverResult) LastInsertId() (int64, error) { // -golint
return r.lastInsertID, nil
}
// RowsAffected returns the number of rows affected by the query.
func (r *driverResult) RowsAffected() (int64, error) {
return r.rowsAffected, nil
}
// driverRows is an iterator over an executed query's results.
type driverRows struct {
rs ast.RecordSet
params *driverParams
}
// Columns returns the names of the columns. The number of columns of the
// result is inferred from the length of the slice. If a particular column
// name isn't known, an empty string should be returned for that entry.
func (r *driverRows) Columns() []string {
if r.rs == nil {
return []string{}
}
fs, _ := r.rs.Fields()
names := make([]string, len(fs))
for i, f := range fs {
names[i] = f.ColumnAsName.O
}
return names
}
// Close closes the rows iterator.
func (r *driverRows) Close() error {
if r.rs != nil {
return r.rs.Close()
}
return nil
}
// Next is called to populate the next row of data into the provided slice. The
// provided slice will be the same size as the Columns() are wide.
//
// The dest slice may be populated only with a driver Value type, but excluding
// string. All string values must be converted to []byte.
//
// Next should return io.EOF when there are no more rows.
func (r *driverRows) Next(dest []driver.Value) error {
if r.rs == nil {
return io.EOF
}
row, err := r.rs.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
return io.EOF
}
if len(row.Data) != len(dest) {
return errors.Errorf("field count mismatch: got %d, need %d", len(row.Data), len(dest))
}
for i, xi := range row.Data {
switch xi.Kind() {
case types.KindNull:
dest[i] = nil
case types.KindInt64:
dest[i] = xi.GetInt64()
case types.KindUint64:
dest[i] = xi.GetUint64()
case types.KindFloat32:
dest[i] = xi.GetFloat32()
case types.KindFloat64:
dest[i] = xi.GetFloat64()
case types.KindString:
dest[i] = xi.GetString()
case types.KindBytes:
dest[i] = xi.GetBytes()
case types.KindMysqlBit:
dest[i] = xi.GetMysqlBit().ToString()
case types.KindMysqlDecimal:
dest[i] = xi.GetMysqlDecimal().String()
case types.KindMysqlDuration:
dest[i] = xi.GetMysqlDuration().String()
case types.KindMysqlEnum:
dest[i] = xi.GetMysqlEnum().String()
case types.KindMysqlHex:
dest[i] = xi.GetMysqlHex().ToString()
case types.KindMysqlSet:
dest[i] = xi.GetMysqlSet().String()
case types.KindMysqlTime:
t := xi.GetMysqlTime()
if !r.params.parseTime {
dest[i] = t.String()
} else {
dest[i] = t.Time
}
default:
return errors.Errorf("unable to handle type %T", xi.GetValue())
}
}
return nil
}
// driverStmt is a prepared statement. It is bound to a driverConn and not used
// by multiple goroutines concurrently.
type driverStmt struct {
conn *driverConn
query string
stmtID uint32
paramCount int
isQuery bool
}
// Close closes the statement.
//
// As of Go 1.1, a Stmt will not be closed if it's in use by any queries.
func (s *driverStmt) Close() error {
s.conn.s.DropPreparedStmt(s.stmtID)
delete(s.conn.stmts, s.query)
return nil
}
// NumInput returns the number of placeholder parameters.
//
// If NumInput returns >= 0, the sql package will sanity check argument counts
// from callers and return errors to the caller before the statement's Exec or
// Query methods are called.
//
// NumInput may also return -1, if the driver doesn't know its number of
// placeholders. In that case, the sql package will not sanity check Exec or
// Query argument counts.
func (s *driverStmt) NumInput() int {
return s.paramCount
}
// Exec executes a query that doesn't return rows, such as an INSERT or UPDATE.
func (s *driverStmt) Exec(args []driver.Value) (driver.Result, error) {
c := s.conn
_, err := c.s.ExecutePreparedStmt(s.stmtID, params(args)...)
if err != nil {
return nil, errors.Trace(err)
}
r := &driverResult{}
if s != nil {
r.lastInsertID, r.rowsAffected = int64(c.s.LastInsertID()), int64(c.s.AffectedRows())
}
return r, nil
}
// Exec executes a query that may return rows, such as a SELECT.
func (s *driverStmt) Query(args []driver.Value) (driver.Rows, error) {
c := s.conn
rs, err := c.s.ExecutePreparedStmt(s.stmtID, params(args)...)
if err != nil {
return nil, errors.Trace(err)
}
if rs == nil {
if s.isQuery {
return nil, errors.Trace(errNoResult)
}
// The statement is not a query.
return &driverRows{}, nil
}
return &driverRows{params: s.conn.params, rs: rs}, nil
}
func init() {
RegisterDriver()
}

130
vendor/github.com/pingcap/tidb/evaluator/builtin.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/util/types"
)
// OldFunc is for a old builtin function.
type OldFunc struct {
// F is the specific calling function.
F func([]interface{}, context.Context) (interface{}, error)
// MinArgs is the minimal arguments needed,
MinArgs int
// MaxArgs is the maximal arguments needed, -1 for infinity.
MaxArgs int
// IsStatic shows whether this function can be called statically.
IsStatic bool
// IsAggregate represents whether this function is an aggregate function or not.
IsAggregate bool
}
// Func is for a builtin function.
type Func struct {
// F is the specific calling function.
F func([]types.Datum, context.Context) (types.Datum, error)
// MinArgs is the minimal arguments needed,
MinArgs int
// MaxArgs is the maximal arguments needed, -1 for infinity.
MaxArgs int
}
// OldFuncs holds all has old registered builtin functions.
var OldFuncs = map[string]OldFunc{
// control functions
"if": {builtinIf, 3, 3, true, false},
"ifnull": {builtinIfNull, 2, 2, true, false},
"nullif": {builtinNullIf, 2, 2, true, false},
// string functions
"replace": {builtinReplace, 3, 3, true, false},
"strcmp": {builtinStrcmp, 2, 2, true, false},
"convert": {builtinConvert, 2, 2, true, false},
"substring": {builtinSubstring, 2, 3, true, false},
"substring_index": {builtinSubstringIndex, 3, 3, true, false},
"locate": {builtinLocate, 2, 3, true, false},
"trim": {builtinTrim, 1, 3, true, false},
// information functions
"current_user": {builtinCurrentUser, 0, 0, false, false},
"database": {builtinDatabase, 0, 0, false, false},
"found_rows": {builtinFoundRows, 0, 0, false, false},
"user": {builtinUser, 0, 0, false, false},
"connection_id": {builtinConnectionID, 0, 0, true, false},
"version": {builtinVersion, 0, 0, true, false},
}
// Funcs holds all registered builtin functions.
var Funcs = map[string]Func{
// common functions
"coalesce": {builtinCoalesce, 1, -1},
// math functions
"abs": {builtinAbs, 1, 1},
"pow": {builtinPow, 2, 2},
"power": {builtinPow, 2, 2},
"rand": {builtinRand, 0, 1},
// time functions
"curdate": {builtinCurrentDate, 0, 0},
"current_date": {builtinCurrentDate, 0, 0},
"current_time": {builtinCurrentTime, 0, 1},
"current_timestamp": {builtinNow, 0, 1},
"curtime": {builtinCurrentTime, 0, 1},
"date": {builtinDate, 1, 1},
"day": {builtinDay, 1, 1},
"dayname": {builtinDayName, 1, 1},
"dayofmonth": {builtinDayOfMonth, 1, 1},
"dayofweek": {builtinDayOfWeek, 1, 1},
"dayofyear": {builtinDayOfYear, 1, 1},
"hour": {builtinHour, 1, 1},
"microsecond": {builtinMicroSecond, 1, 1},
"minute": {builtinMinute, 1, 1},
"month": {builtinMonth, 1, 1},
"now": {builtinNow, 0, 1},
"second": {builtinSecond, 1, 1},
"sysdate": {builtinSysDate, 0, 1},
"week": {builtinWeek, 1, 2},
"weekday": {builtinWeekDay, 1, 1},
"weekofyear": {builtinWeekOfYear, 1, 1},
"year": {builtinYear, 1, 1},
"yearweek": {builtinYearWeek, 1, 2},
"extract": {builtinExtract, 2, 2},
"date_arith": {builtinDateArith, 3, 3},
// string functions
"concat": {builtinConcat, 1, -1},
"concat_ws": {builtinConcatWS, 2, -1},
"left": {builtinLeft, 2, 2},
"length": {builtinLength, 1, 1},
"lower": {builtinLower, 1, 1},
"repeat": {builtinRepeat, 2, 2},
"upper": {builtinUpper, 1, 1},
}
// See: http://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#function_coalesce
func builtinCoalesce(args []types.Datum, ctx context.Context) (d types.Datum, err error) {
for _, d = range args {
if d.Kind() != types.KindNull {
return d, nil
}
}
return d, nil
}

View file

@ -0,0 +1,76 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/util/types"
)
// See https://dev.mysql.com/doc/refman/5.7/en/control-flow-functions.html#function_if
func builtinIf(args []interface{}, _ context.Context) (interface{}, error) {
// if(expr1, expr2, expr3)
// if expr1 is true, return expr2, otherwise, return expr3
v1 := args[0]
v2 := args[1]
v3 := args[2]
if v1 == nil {
return v3, nil
}
b, err := types.ToBool(v1)
if err != nil {
return nil, err
}
// TODO: check return type, must be numeric or string
if b == 1 {
return v2, nil
}
return v3, nil
}
// See https://dev.mysql.com/doc/refman/5.7/en/control-flow-functions.html#function_ifnull
func builtinIfNull(args []interface{}, _ context.Context) (interface{}, error) {
// ifnull(expr1, expr2)
// if expr1 is not null, return expr1, otherwise, return expr2
v1 := args[0]
v2 := args[1]
if v1 != nil {
return v1, nil
}
return v2, nil
}
// See https://dev.mysql.com/doc/refman/5.7/en/control-flow-functions.html#function_nullif
func builtinNullIf(args []interface{}, _ context.Context) (interface{}, error) {
// nullif(expr1, expr2)
// returns null if expr1 = expr2 is true, otherwise returns expr1
v1 := args[0]
v2 := args[1]
if v1 == nil || v2 == nil {
return v1, nil
}
if n, err := types.Compare(v1, v2); err != nil || n == 0 {
return nil, err
}
return v1, nil
}

View file

@ -0,0 +1,78 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/db"
"github.com/pingcap/tidb/sessionctx/variable"
)
// See: https://dev.mysql.com/doc/refman/5.7/en/information-functions.html
func builtinDatabase(args []interface{}, ctx context.Context) (v interface{}, err error) {
d := db.GetCurrentSchema(ctx)
if d == "" {
return nil, nil
}
return d, nil
}
func builtinFoundRows(arg []interface{}, ctx context.Context) (interface{}, error) {
data := variable.GetSessionVars(ctx)
if data == nil {
return nil, errors.Errorf("Missing session variable when evalue builtin")
}
return data.FoundRows, nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/information-functions.html#function_current-user
// TODO: The value of CURRENT_USER() can differ from the value of USER(). We will finish this after we support grant tables.
func builtinCurrentUser(args []interface{}, ctx context.Context) (v interface{}, err error) {
data := variable.GetSessionVars(ctx)
if data == nil {
return nil, errors.Errorf("Missing session variable when evalue builtin")
}
return data.User, nil
}
func builtinUser(args []interface{}, ctx context.Context) (v interface{}, err error) {
data := variable.GetSessionVars(ctx)
if data == nil {
return nil, errors.Errorf("Missing session variable when evalue builtin")
}
return data.User, nil
}
func builtinConnectionID(args []interface{}, ctx context.Context) (v interface{}, err error) {
data := variable.GetSessionVars(ctx)
if data == nil {
return nil, errors.Errorf("Missing session variable when evalue builtin")
}
return data.ConnectionID, nil
}
func builtinVersion(args []interface{}, ctx context.Context) (v interface{}, err error) {
return mysql.ServerVersion, nil
}

View file

@ -0,0 +1,83 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"math"
"math/rand"
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/util/types"
)
// see https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html
func builtinAbs(args []types.Datum, _ context.Context) (d types.Datum, err error) {
d = args[0]
switch d.Kind() {
case types.KindNull:
return d, nil
case types.KindUint64:
return d, nil
case types.KindInt64:
iv := d.GetInt64()
if iv >= 0 {
d.SetInt64(iv)
return d, nil
}
d.SetInt64(-iv)
return d, nil
default:
// we will try to convert other types to float
// TODO: if time has no precision, it will be a integer
f, err := d.ToFloat64()
d.SetFloat64(math.Abs(f))
return d, errors.Trace(err)
}
}
func builtinRand(args []types.Datum, _ context.Context) (d types.Datum, err error) {
if len(args) == 1 && args[0].Kind() != types.KindNull {
seed, err := args[0].ToInt64()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
rand.Seed(seed)
}
d.SetFloat64(rand.Float64())
return d, nil
}
func builtinPow(args []types.Datum, _ context.Context) (d types.Datum, err error) {
x, err := args[0].ToFloat64()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
y, err := args[1].ToFloat64()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
d.SetFloat64(math.Pow(x, y))
return d, nil
}

View file

@ -0,0 +1,476 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"fmt"
"strings"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/util/charset"
"github.com/pingcap/tidb/util/types"
"golang.org/x/text/transform"
)
// https://dev.mysql.com/doc/refman/5.7/en/string-functions.html
func builtinLength(args []types.Datum, _ context.Context) (d types.Datum, err error) {
switch args[0].Kind() {
case types.KindNull:
d.SetNull()
return d, nil
default:
s, err := args[0].ToString()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
d.SetInt64(int64(len(s)))
return d, nil
}
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_concat
func builtinConcat(args []types.Datum, _ context.Context) (d types.Datum, err error) {
var s []byte
for _, a := range args {
if a.Kind() == types.KindNull {
d.SetNull()
return d, nil
}
var ss string
ss, err = a.ToString()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
s = append(s, []byte(ss)...)
}
d.SetBytesAsString(s)
return d, nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_concat-ws
func builtinConcatWS(args []types.Datum, _ context.Context) (d types.Datum, err error) {
var sep string
s := make([]string, 0, len(args))
for i, a := range args {
if a.Kind() == types.KindNull {
if i == 0 {
d.SetNull()
return d, nil
}
continue
}
ss, err := a.ToString()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
if i == 0 {
sep = ss
continue
}
s = append(s, ss)
}
d.SetString(strings.Join(s, sep))
return d, nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_left
func builtinLeft(args []types.Datum, _ context.Context) (d types.Datum, err error) {
str, err := args[0].ToString()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
length, err := args[1].ToInt64()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
l := int(length)
if l < 0 {
l = 0
} else if l > len(str) {
l = len(str)
}
d.SetString(str[:l])
return d, nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_repeat
func builtinRepeat(args []types.Datum, _ context.Context) (d types.Datum, err error) {
str, err := args[0].ToString()
if err != nil {
d.SetNull()
return d, err
}
ch := fmt.Sprintf("%v", str)
num := 0
x := args[1]
switch x.Kind() {
case types.KindInt64:
num = int(x.GetInt64())
case types.KindUint64:
num = int(x.GetUint64())
}
if num < 1 {
d.SetString("")
return d, nil
}
d.SetString(strings.Repeat(ch, num))
return d, nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_lower
func builtinLower(args []types.Datum, _ context.Context) (d types.Datum, err error) {
x := args[0]
switch x.Kind() {
case types.KindNull:
d.SetNull()
return d, nil
default:
s, err := x.ToString()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
d.SetString(strings.ToLower(s))
return d, nil
}
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_upper
func builtinUpper(args []types.Datum, _ context.Context) (d types.Datum, err error) {
x := args[0]
switch x.Kind() {
case types.KindNull:
d.SetNull()
return d, nil
default:
s, err := x.ToString()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
d.SetString(strings.ToUpper(s))
return d, nil
}
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-comparison-functions.html
func builtinStrcmp(args []interface{}, _ context.Context) (interface{}, error) {
if args[0] == nil || args[1] == nil {
return nil, nil
}
left, err := types.ToString(args[0])
if err != nil {
return nil, errors.Trace(err)
}
right, err := types.ToString(args[1])
if err != nil {
return nil, errors.Trace(err)
}
res := types.CompareString(left, right)
return res, nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_replace
func builtinReplace(args []interface{}, _ context.Context) (interface{}, error) {
for _, arg := range args {
if arg == nil {
return nil, nil
}
}
str, err := types.ToString(args[0])
if err != nil {
return nil, errors.Trace(err)
}
oldStr, err := types.ToString(args[1])
if err != nil {
return nil, errors.Trace(err)
}
newStr, err := types.ToString(args[2])
if err != nil {
return nil, errors.Trace(err)
}
return strings.Replace(str, oldStr, newStr, -1), nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/cast-functions.html#function_convert
func builtinConvert(args []interface{}, _ context.Context) (interface{}, error) {
value := args[0]
Charset := args[1].(string)
// Casting nil to any type returns nil
if value == nil {
return nil, nil
}
str, ok := value.(string)
if !ok {
return nil, nil
}
if strings.ToLower(Charset) == "ascii" {
return value, nil
} else if strings.ToLower(Charset) == "utf8mb4" {
return value, nil
}
encoding, _ := charset.Lookup(Charset)
if encoding == nil {
return nil, errors.Errorf("unknown encoding: %s", Charset)
}
target, _, err := transform.String(encoding.NewDecoder(), str)
if err != nil {
log.Errorf("Convert %s to %s with error: %v", str, Charset, err)
return nil, errors.Trace(err)
}
return target, nil
}
func builtinSubstring(args []interface{}, _ context.Context) (interface{}, error) {
// The meaning of the elements of args.
// arg[0] -> StrExpr
// arg[1] -> Pos
// arg[2] -> Len (Optional)
str, err := types.ToString(args[0])
if err != nil {
return nil, errors.Errorf("Substring invalid args, need string but get %T", args[0])
}
t := args[1]
p, ok := t.(int64)
if !ok {
return nil, errors.Errorf("Substring invalid pos args, need int but get %T", t)
}
pos := int(p)
length := -1
if len(args) == 3 {
t = args[2]
p, ok = t.(int64)
if !ok {
return nil, errors.Errorf("Substring invalid pos args, need int but get %T", t)
}
length = int(p)
}
// The forms without a len argument return a substring from string str starting at position pos.
// The forms with a len argument return a substring len characters long from string str, starting at position pos.
// The forms that use FROM are standard SQL syntax. It is also possible to use a negative value for pos.
// In this case, the beginning of the substring is pos characters from the end of the string, rather than the beginning.
// A negative value may be used for pos in any of the forms of this function.
if pos < 0 {
pos = len(str) + pos
} else {
pos--
}
if pos > len(str) || pos <= 0 {
pos = len(str)
}
end := len(str)
if length != -1 {
end = pos + length
}
if end > len(str) {
end = len(str)
}
return str[pos:end], nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_substring-index
func builtinSubstringIndex(args []interface{}, _ context.Context) (interface{}, error) {
// The meaning of the elements of args.
// args[0] -> StrExpr
// args[1] -> Delim
// args[2] -> Count
fs := args[0]
str, err := types.ToString(fs)
if err != nil {
return nil, errors.Errorf("Substring_Index invalid args, need string but get %T", fs)
}
t := args[1]
delim, err := types.ToString(t)
if err != nil {
return nil, errors.Errorf("Substring_Index invalid delim, need string but get %T", t)
}
if len(delim) == 0 {
return "", nil
}
t = args[2]
c, err := types.ToInt64(t)
if err != nil {
return nil, errors.Trace(err)
}
count := int(c)
strs := strings.Split(str, delim)
var (
start = 0
end = len(strs)
)
if count > 0 {
// If count is positive, everything to the left of the final delimiter (counting from the left) is returned.
if count < end {
end = count
}
} else {
// If count is negative, everything to the right of the final delimiter (counting from the right) is returned.
count = -count
if count < end {
start = end - count
}
}
substrs := strs[start:end]
return strings.Join(substrs, delim), nil
}
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_locate
func builtinLocate(args []interface{}, _ context.Context) (interface{}, error) {
// The meaning of the elements of args.
// args[0] -> SubStr
// args[1] -> Str
// args[2] -> Pos
// eval str
fs := args[1]
if fs == nil {
return nil, nil
}
str, err := types.ToString(fs)
if err != nil {
return nil, errors.Trace(err)
}
// eval substr
fs = args[0]
if fs == nil {
return nil, nil
}
subStr, err := types.ToString(fs)
if err != nil {
return nil, errors.Trace(err)
}
// eval pos
pos := int64(0)
if len(args) == 3 {
t := args[2]
p, err := types.ToInt64(t)
if err != nil {
return nil, errors.Trace(err)
}
pos = p - 1
if pos < 0 || pos > int64(len(str)) {
return 0, nil
}
if pos > int64(len(str)-len(subStr)) {
return 0, nil
}
}
if len(subStr) == 0 {
return pos + 1, nil
}
i := strings.Index(str[pos:], subStr)
if i == -1 {
return 0, nil
}
return int64(i) + pos + 1, nil
}
const spaceChars = "\n\t\r "
// See: https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_trim
func builtinTrim(args []interface{}, _ context.Context) (interface{}, error) {
// args[0] -> Str
// args[1] -> RemStr
// args[2] -> Direction
// eval str
fs := args[0]
if fs == nil {
return nil, nil
}
str, err := types.ToString(fs)
if err != nil {
return nil, errors.Trace(err)
}
remstr := ""
// eval remstr
if len(args) > 1 {
fs = args[1]
if fs != nil {
remstr, err = types.ToString(fs)
if err != nil {
return nil, errors.Trace(err)
}
}
}
// do trim
var result string
var direction ast.TrimDirectionType
if len(args) > 2 {
direction = args[2].(ast.TrimDirectionType)
} else {
direction = ast.TrimBothDefault
}
if direction == ast.TrimLeading {
if len(remstr) > 0 {
result = trimLeft(str, remstr)
} else {
result = strings.TrimLeft(str, spaceChars)
}
} else if direction == ast.TrimTrailing {
if len(remstr) > 0 {
result = trimRight(str, remstr)
} else {
result = strings.TrimRight(str, spaceChars)
}
} else if len(remstr) > 0 {
x := trimLeft(str, remstr)
result = trimRight(x, remstr)
} else {
result = strings.Trim(str, spaceChars)
}
return result, nil
}
func trimLeft(str, remstr string) string {
for {
x := strings.TrimPrefix(str, remstr)
if len(x) == len(str) {
return x
}
str = x
}
}
func trimRight(str, remstr string) string {
for {
x := strings.TrimSuffix(str, remstr)
if len(x) == len(str) {
return x
}
str = x
}
}

View file

@ -0,0 +1,555 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/util/types"
)
func convertToTime(arg types.Datum, tp byte) (d types.Datum, err error) {
f := types.NewFieldType(tp)
f.Decimal = mysql.MaxFsp
d, err = arg.ConvertTo(f)
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
if d.Kind() == types.KindNull {
return d, nil
}
if d.Kind() != types.KindMysqlTime {
err = errors.Errorf("need time type, but got %T", d.GetValue())
d.SetNull()
return d, err
}
return d, nil
}
func convertToDuration(arg types.Datum, fsp int) (d types.Datum, err error) {
f := types.NewFieldType(mysql.TypeDuration)
f.Decimal = fsp
d, err = arg.ConvertTo(f)
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
if d.Kind() == types.KindNull {
d.SetNull()
return d, nil
}
if d.Kind() != types.KindMysqlDuration {
err = errors.Errorf("need duration type, but got %T", d.GetValue())
d.SetNull()
return d, err
}
return d, nil
}
func builtinDate(args []types.Datum, _ context.Context) (types.Datum, error) {
return convertToTime(args[0], mysql.TypeDate)
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_day
// day is a synonym for DayOfMonth
func builtinDay(args []types.Datum, ctx context.Context) (types.Datum, error) {
return builtinDayOfMonth(args, ctx)
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_hour
func builtinHour(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToDuration(args[0], mysql.MaxFsp)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
h := int64(d.GetMysqlDuration().Hour())
d.SetInt64(h)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_minute
func builtinMinute(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToDuration(args[0], mysql.MaxFsp)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
m := int64(d.GetMysqlDuration().Minute())
d.SetInt64(m)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_second
func builtinSecond(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToDuration(args[0], mysql.MaxFsp)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
s := int64(d.GetMysqlDuration().Second())
d.SetInt64(s)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_microsecond
func builtinMicroSecond(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToDuration(args[0], mysql.MaxFsp)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
m := int64(d.GetMysqlDuration().MicroSecond())
d.SetInt64(m)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_month
func builtinMonth(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
t := d.GetMysqlTime()
i := int64(0)
if t.IsZero() {
d.SetInt64(i)
return d, nil
}
i = int64(t.Month())
d.SetInt64(i)
return d, nil
}
func builtinNow(args []types.Datum, _ context.Context) (d types.Datum, err error) {
// TODO: if NOW is used in stored function or trigger, NOW will return the beginning time
// of the execution.
fsp := 0
if len(args) == 1 && args[0].Kind() != types.KindNull {
if fsp, err = checkFsp(args[0]); err != nil {
d.SetNull()
return d, errors.Trace(err)
}
}
t := mysql.Time{
Time: time.Now(),
Type: mysql.TypeDatetime,
// set unspecified for later round
Fsp: mysql.UnspecifiedFsp,
}
tr, err := t.RoundFrac(int(fsp))
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
d.SetMysqlTime(tr)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_dayname
func builtinDayName(args []types.Datum, ctx context.Context) (types.Datum, error) {
d, err := builtinWeekDay(args, ctx)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
weekday := d.GetInt64()
if (weekday < 0) || (weekday >= int64(len(mysql.WeekdayNames))) {
d.SetNull()
return d, errors.Errorf("no name for invalid weekday: %d.", weekday)
}
d.SetString(mysql.WeekdayNames[weekday])
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_dayofmonth
func builtinDayOfMonth(args []types.Datum, _ context.Context) (d types.Datum, err error) {
// TODO: some invalid format like 2000-00-00 will return 0 too.
d, err = convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
t := d.GetMysqlTime()
if t.IsZero() {
d.SetInt64(int64(0))
return d, nil
}
d.SetInt64(int64(t.Day()))
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_dayofweek
func builtinDayOfWeek(args []types.Datum, _ context.Context) (d types.Datum, err error) {
d, err = convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
t := d.GetMysqlTime()
if t.IsZero() {
d.SetNull()
// TODO: log warning or return error?
return d, nil
}
// 1 is Sunday, 2 is Monday, .... 7 is Saturday
d.SetInt64(int64(t.Weekday()) + 1)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_dayofyear
func builtinDayOfYear(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
t := d.GetMysqlTime()
if t.IsZero() {
// TODO: log warning or return error?
d.SetNull()
return d, nil
}
yd := int64(t.YearDay())
d.SetInt64(yd)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_week
func builtinWeek(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
t := d.GetMysqlTime()
if t.IsZero() {
// TODO: log warning or return error?
d.SetNull()
return d, nil
}
// TODO: support multi mode for week
_, week := t.ISOWeek()
wi := int64(week)
d.SetInt64(wi)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_weekday
func builtinWeekDay(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
t := d.GetMysqlTime()
if t.IsZero() {
// TODO: log warning or return error?
d.SetNull()
return d, nil
}
// Monday is 0, ... Sunday = 6 in MySQL
// but in go, Sunday is 0, ... Saturday is 6
// w will do a conversion.
w := (int64(t.Weekday()) + 6) % 7
d.SetInt64(w)
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_weekofyear
func builtinWeekOfYear(args []types.Datum, ctx context.Context) (types.Datum, error) {
// WeekOfYear is equivalent to to Week(date, 3)
d := types.Datum{}
d.SetInt64(3)
return builtinWeek([]types.Datum{args[0], d}, ctx)
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_year
func builtinYear(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
return d, errors.Trace(err)
}
// No need to check type here.
t := d.GetMysqlTime()
if t.IsZero() {
d.SetInt64(0)
return d, nil
}
d.SetInt64(int64(t.Year()))
return d, nil
}
// See http://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_yearweek
func builtinYearWeek(args []types.Datum, _ context.Context) (types.Datum, error) {
d, err := convertToTime(args[0], mysql.TypeDate)
if err != nil || d.Kind() == types.KindNull {
d.SetNull()
return d, errors.Trace(err)
}
// No need to check type here.
t := d.GetMysqlTime()
if t.IsZero() {
d.SetNull()
// TODO: log warning or return error?
return d, nil
}
// TODO: support multi mode for week
year, week := t.ISOWeek()
d.SetInt64(int64(year*100 + week))
return d, nil
}
func builtinSysDate(args []types.Datum, ctx context.Context) (types.Datum, error) {
// SYSDATE is not the same as NOW if NOW is used in a stored function or trigger.
// But here we can just think they are the same because we don't support stored function
// and trigger now.
return builtinNow(args, ctx)
}
// See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_curdate
func builtinCurrentDate(args []types.Datum, _ context.Context) (d types.Datum, err error) {
year, month, day := time.Now().Date()
t := mysql.Time{
Time: time.Date(year, month, day, 0, 0, 0, 0, time.Local),
Type: mysql.TypeDate, Fsp: 0}
d.SetMysqlTime(t)
return d, nil
}
// See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_curtime
func builtinCurrentTime(args []types.Datum, _ context.Context) (d types.Datum, err error) {
fsp := 0
if len(args) == 1 && args[0].Kind() != types.KindNull {
if fsp, err = checkFsp(args[0]); err != nil {
d.SetNull()
return d, errors.Trace(err)
}
}
d.SetString(time.Now().Format("15:04:05.000000"))
return convertToDuration(d, fsp)
}
// See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_extract
func builtinExtract(args []types.Datum, _ context.Context) (d types.Datum, err error) {
unit := args[0].GetString()
vd := args[1]
if vd.Kind() == types.KindNull {
d.SetNull()
return d, nil
}
f := types.NewFieldType(mysql.TypeDatetime)
f.Decimal = mysql.MaxFsp
val, err := vd.ConvertTo(f)
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
if val.Kind() == types.KindNull {
d.SetNull()
return d, nil
}
if val.Kind() != types.KindMysqlTime {
err = errors.Errorf("need time type, but got %T", val)
d.SetNull()
return d, err
}
t := val.GetMysqlTime()
n, err1 := mysql.ExtractTimeNum(unit, t)
if err1 != nil {
d.SetNull()
return d, errors.Trace(err1)
}
d.SetInt64(n)
return d, nil
}
func checkFsp(arg types.Datum) (int, error) {
fsp, err := arg.ToInt64()
if err != nil {
return 0, errors.Trace(err)
}
if int(fsp) > mysql.MaxFsp {
return 0, errors.Errorf("Too big precision %d specified. Maximum is 6.", fsp)
} else if fsp < 0 {
return 0, errors.Errorf("Invalid negative %d specified, must in [0, 6].", fsp)
}
return int(fsp), nil
}
func builtinDateArith(args []types.Datum, ctx context.Context) (d types.Datum, err error) {
// Op is used for distinguishing date_add and date_sub.
// args[0] -> Op
// args[1] -> Date
// args[2] -> DateArithInterval
// health check for date and interval
if args[1].Kind() == types.KindNull {
d.SetNull()
return d, nil
}
nodeDate := args[1]
nodeInterval := args[2].GetInterface().(ast.DateArithInterval)
nodeIntervalIntervalDatum := nodeInterval.Interval.GetDatum()
if nodeIntervalIntervalDatum.Kind() == types.KindNull {
d.SetNull()
return d, nil
}
// parse date
fieldType := mysql.TypeDate
var resultField *types.FieldType
switch nodeDate.Kind() {
case types.KindMysqlTime:
x := nodeDate.GetMysqlTime()
if (x.Type == mysql.TypeDatetime) || (x.Type == mysql.TypeTimestamp) {
fieldType = mysql.TypeDatetime
}
case types.KindString:
x := nodeDate.GetString()
if !mysql.IsDateFormat(x) {
fieldType = mysql.TypeDatetime
}
case types.KindInt64:
x := nodeDate.GetInt64()
if t, err1 := mysql.ParseTimeFromInt64(x); err1 == nil {
if (t.Type == mysql.TypeDatetime) || (t.Type == mysql.TypeTimestamp) {
fieldType = mysql.TypeDatetime
}
}
}
if mysql.IsClockUnit(nodeInterval.Unit) {
fieldType = mysql.TypeDatetime
}
resultField = types.NewFieldType(fieldType)
resultField.Decimal = mysql.MaxFsp
value, err := nodeDate.ConvertTo(resultField)
if err != nil {
d.SetNull()
return d, ErrInvalidOperation.Gen("DateArith invalid args, need date but get %T", nodeDate)
}
if value.Kind() == types.KindNull {
d.SetNull()
return d, ErrInvalidOperation.Gen("DateArith invalid args, need date but get %v", value.GetValue())
}
if value.Kind() != types.KindMysqlTime {
d.SetNull()
return d, ErrInvalidOperation.Gen("DateArith need time type, but got %T", value.GetValue())
}
result := value.GetMysqlTime()
// parse interval
var interval string
if strings.ToLower(nodeInterval.Unit) == "day" {
day, err2 := parseDayInterval(*nodeIntervalIntervalDatum)
if err2 != nil {
d.SetNull()
return d, ErrInvalidOperation.Gen("DateArith invalid day interval, need int but got %T", nodeIntervalIntervalDatum.GetString())
}
interval = fmt.Sprintf("%d", day)
} else {
if nodeIntervalIntervalDatum.Kind() == types.KindString {
interval = fmt.Sprintf("%v", nodeIntervalIntervalDatum.GetString())
} else {
ii, err := nodeIntervalIntervalDatum.ToInt64()
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
interval = fmt.Sprintf("%v", ii)
}
}
year, month, day, duration, err := mysql.ExtractTimeValue(nodeInterval.Unit, interval)
if err != nil {
d.SetNull()
return d, errors.Trace(err)
}
op := args[0].GetInterface().(ast.DateArithType)
if op == ast.DateSub {
year, month, day, duration = -year, -month, -day, -duration
}
result.Time = result.Time.Add(duration)
result.Time = result.Time.AddDate(int(year), int(month), int(day))
if result.Time.Nanosecond() == 0 {
result.Fsp = 0
}
d.SetMysqlTime(result)
return d, nil
}
var reg = regexp.MustCompile(`[\d]+`)
func parseDayInterval(value types.Datum) (int64, error) {
switch value.Kind() {
case types.KindString:
vs := value.GetString()
s := strings.ToLower(vs)
if s == "false" {
return 0, nil
} else if s == "true" {
return 1, nil
}
value.SetString(reg.FindString(vs))
}
return value.ToInt64()
}

717
vendor/github.com/pingcap/tidb/evaluator/evaluator.go generated vendored Normal file
View file

@ -0,0 +1,717 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util/types"
)
// Error instances.
var (
ErrInvalidOperation = terror.ClassEvaluator.New(CodeInvalidOperation, "invalid operation")
)
// Error codes.
const (
CodeInvalidOperation terror.ErrCode = 1
)
// Eval evaluates an expression to a value.
func Eval(ctx context.Context, expr ast.ExprNode) (interface{}, error) {
e := &Evaluator{ctx: ctx}
expr.Accept(e)
if e.err != nil {
return nil, errors.Trace(e.err)
}
return expr.GetValue(), nil
}
// EvalBool evalueates an expression to a boolean value.
func EvalBool(ctx context.Context, expr ast.ExprNode) (bool, error) {
val, err := Eval(ctx, expr)
if err != nil {
return false, errors.Trace(err)
}
if val == nil {
return false, nil
}
i, err := types.ToBool(val)
if err != nil {
return false, errors.Trace(err)
}
return i != 0, nil
}
func boolToInt64(v bool) int64 {
if v {
return int64(1)
}
return int64(0)
}
// Evaluator is an ast Visitor that evaluates an expression.
type Evaluator struct {
ctx context.Context
err error
multipleRows bool
existRow bool
}
// Enter implements ast.Visitor interface.
func (e *Evaluator) Enter(in ast.Node) (out ast.Node, skipChildren bool) {
switch v := in.(type) {
case *ast.SubqueryExpr:
if v.Evaluated && !v.UseOuterContext {
return in, true
}
case *ast.PatternInExpr, *ast.CompareSubqueryExpr:
e.multipleRows = true
case *ast.ExistsSubqueryExpr:
e.existRow = true
}
return in, false
}
// Leave implements ast.Visitor interface.
func (e *Evaluator) Leave(in ast.Node) (out ast.Node, ok bool) {
switch v := in.(type) {
case *ast.AggregateFuncExpr:
ok = e.aggregateFunc(v)
case *ast.BetweenExpr:
ok = e.between(v)
case *ast.BinaryOperationExpr:
ok = e.binaryOperation(v)
case *ast.CaseExpr:
ok = e.caseExpr(v)
case *ast.ColumnName:
ok = true
case *ast.ColumnNameExpr:
ok = e.columnName(v)
case *ast.CompareSubqueryExpr:
e.multipleRows = false
ok = e.compareSubquery(v)
case *ast.DefaultExpr:
ok = e.defaultExpr(v)
case *ast.ExistsSubqueryExpr:
e.existRow = false
ok = e.existsSubquery(v)
case *ast.FuncCallExpr:
ok = e.funcCall(v)
case *ast.FuncCastExpr:
ok = e.funcCast(v)
case *ast.IsNullExpr:
ok = e.isNull(v)
case *ast.IsTruthExpr:
ok = e.isTruth(v)
case *ast.ParamMarkerExpr:
ok = e.paramMarker(v)
case *ast.ParenthesesExpr:
ok = e.parentheses(v)
case *ast.PatternInExpr:
e.multipleRows = false
ok = e.patternIn(v)
case *ast.PatternLikeExpr:
ok = e.patternLike(v)
case *ast.PatternRegexpExpr:
ok = e.patternRegexp(v)
case *ast.PositionExpr:
ok = e.position(v)
case *ast.RowExpr:
ok = e.row(v)
case *ast.SubqueryExpr:
ok = e.subqueryExpr(v)
case ast.SubqueryExec:
ok = e.subqueryExec(v)
case *ast.UnaryOperationExpr:
ok = e.unaryOperation(v)
case *ast.ValueExpr:
ok = true
case *ast.ValuesExpr:
ok = e.values(v)
case *ast.VariableExpr:
ok = e.variable(v)
case *ast.WhenClause:
ok = true
}
out = in
return
}
func (e *Evaluator) between(v *ast.BetweenExpr) bool {
var l, r ast.ExprNode
op := opcode.AndAnd
if v.Not {
// v < lv || v > rv
op = opcode.OrOr
l = &ast.BinaryOperationExpr{Op: opcode.LT, L: v.Expr, R: v.Left}
r = &ast.BinaryOperationExpr{Op: opcode.GT, L: v.Expr, R: v.Right}
} else {
// v >= lv && v <= rv
l = &ast.BinaryOperationExpr{Op: opcode.GE, L: v.Expr, R: v.Left}
r = &ast.BinaryOperationExpr{Op: opcode.LE, L: v.Expr, R: v.Right}
}
ret := &ast.BinaryOperationExpr{Op: op, L: l, R: r}
ret.Accept(e)
if e.err != nil {
return false
}
v.SetDatum(*ret.GetDatum())
return true
}
func (e *Evaluator) caseExpr(v *ast.CaseExpr) bool {
tmp := types.NewDatum(boolToInt64(true))
target := &tmp
if v.Value != nil {
target = v.Value.GetDatum()
}
if target.Kind() != types.KindNull {
for _, val := range v.WhenClauses {
cmp, err := target.CompareDatum(*val.Expr.GetDatum())
if err != nil {
e.err = errors.Trace(err)
return false
}
if cmp == 0 {
v.SetDatum(*val.Result.GetDatum())
return true
}
}
}
if v.ElseClause != nil {
v.SetDatum(*v.ElseClause.GetDatum())
} else {
v.SetNull()
}
return true
}
func (e *Evaluator) columnName(v *ast.ColumnNameExpr) bool {
v.SetDatum(*v.Refer.Expr.GetDatum())
return true
}
func (e *Evaluator) defaultExpr(v *ast.DefaultExpr) bool {
return true
}
func (e *Evaluator) compareSubquery(cs *ast.CompareSubqueryExpr) bool {
lvDatum := cs.L.GetDatum()
if lvDatum.Kind() == types.KindNull {
cs.SetNull()
return true
}
lv := lvDatum.GetValue()
x, err := e.checkResult(cs, lv, cs.R.GetValue().([]interface{}))
if err != nil {
e.err = errors.Trace(err)
return false
}
cs.SetValue(x)
return true
}
func (e *Evaluator) checkResult(cs *ast.CompareSubqueryExpr, lv interface{}, result []interface{}) (interface{}, error) {
if cs.All {
return e.checkAllResult(cs, lv, result)
}
return e.checkAnyResult(cs, lv, result)
}
func (e *Evaluator) checkAllResult(cs *ast.CompareSubqueryExpr, lv interface{}, result []interface{}) (interface{}, error) {
hasNull := false
for _, v := range result {
if v == nil {
hasNull = true
continue
}
comRes, err := types.Compare(lv, v)
if err != nil {
return nil, errors.Trace(err)
}
res, err := getCompResult(cs.Op, comRes)
if err != nil {
return nil, errors.Trace(err)
}
if !res {
return false, nil
}
}
if hasNull {
// If no matched but we get null, return null.
// Like `insert t (c) values (1),(2),(null)`, then
// `select 3 > all (select c from t)`, returns null.
return nil, nil
}
return true, nil
}
func (e *Evaluator) checkAnyResult(cs *ast.CompareSubqueryExpr, lv interface{}, result []interface{}) (interface{}, error) {
hasNull := false
for _, v := range result {
if v == nil {
hasNull = true
continue
}
comRes, err := types.Compare(lv, v)
if err != nil {
return nil, errors.Trace(err)
}
res, err := getCompResult(cs.Op, comRes)
if err != nil {
return nil, errors.Trace(err)
}
if res {
return true, nil
}
}
if hasNull {
// If no matched but we get null, return null.
// Like `insert t (c) values (1),(2),(null)`, then
// `select 0 > any (select c from t)`, returns null.
return nil, nil
}
return false, nil
}
func (e *Evaluator) existsSubquery(v *ast.ExistsSubqueryExpr) bool {
datum := v.Sel.GetDatum()
if datum.Kind() == types.KindNull {
v.SetInt64(0)
return true
}
r := datum.GetValue()
rows, _ := r.([]interface{})
if len(rows) > 0 {
v.SetInt64(1)
} else {
v.SetInt64(0)
}
return true
}
// Evaluate SubqueryExpr.
// Get the value from v.SubQuery and set it to v.
func (e *Evaluator) subqueryExpr(v *ast.SubqueryExpr) bool {
if v.SubqueryExec != nil {
v.SetDatum(*v.SubqueryExec.GetDatum())
}
v.Evaluated = true
return true
}
// Do the real work to evaluate subquery.
func (e *Evaluator) subqueryExec(v ast.SubqueryExec) bool {
rowCount := 2
if e.multipleRows {
rowCount = -1
} else if e.existRow {
rowCount = 1
}
rows, err := v.EvalRows(e.ctx, rowCount)
if err != nil {
e.err = errors.Trace(err)
return false
}
if e.multipleRows || e.existRow {
v.SetValue(rows)
return true
}
switch len(rows) {
case 0:
v.GetDatum().SetNull()
case 1:
v.SetValue(rows[0])
default:
e.err = errors.New("Subquery returns more than 1 row")
return false
}
return true
}
func (e *Evaluator) checkInList(not bool, in interface{}, list []interface{}) interface{} {
hasNull := false
for _, v := range list {
if v == nil {
hasNull = true
continue
}
r, err := types.Compare(types.Coerce(in, v))
if err != nil {
e.err = errors.Trace(err)
return nil
}
if r == 0 {
if !not {
return 1
}
return 0
}
}
if hasNull {
// if no matched but we got null in In, return null
// e.g 1 in (null, 2, 3) returns null
return nil
}
if not {
return 1
}
return 0
}
func (e *Evaluator) patternIn(n *ast.PatternInExpr) bool {
lhs := n.Expr.GetDatum()
if lhs.Kind() == types.KindNull {
n.SetNull()
return true
}
if n.Sel == nil {
values := make([]interface{}, 0, len(n.List))
for _, ei := range n.List {
values = append(values, ei.GetValue())
}
x := e.checkInList(n.Not, lhs.GetValue(), values)
if e.err != nil {
return false
}
n.SetValue(x)
return true
}
se := n.Sel.(*ast.SubqueryExpr)
sel := se.SubqueryExec
res := sel.GetValue().([]interface{})
x := e.checkInList(n.Not, lhs.GetValue(), res)
if e.err != nil {
return false
}
n.SetValue(x)
return true
}
func (e *Evaluator) isNull(v *ast.IsNullExpr) bool {
var boolVal bool
if v.Expr.GetDatum().Kind() == types.KindNull {
boolVal = true
}
if v.Not {
boolVal = !boolVal
}
v.SetInt64(boolToInt64(boolVal))
return true
}
func (e *Evaluator) isTruth(v *ast.IsTruthExpr) bool {
var boolVal bool
datum := v.Expr.GetDatum()
if datum.Kind() != types.KindNull {
ival, err := datum.ToBool()
if err != nil {
e.err = errors.Trace(err)
return false
}
if ival == v.True {
boolVal = true
}
}
if v.Not {
boolVal = !boolVal
}
v.GetDatum().SetInt64(boolToInt64(boolVal))
return true
}
func (e *Evaluator) paramMarker(v *ast.ParamMarkerExpr) bool {
return true
}
func (e *Evaluator) parentheses(v *ast.ParenthesesExpr) bool {
v.SetDatum(*v.Expr.GetDatum())
return true
}
func (e *Evaluator) position(v *ast.PositionExpr) bool {
v.SetDatum(*v.Refer.Expr.GetDatum())
return true
}
func (e *Evaluator) row(v *ast.RowExpr) bool {
row := make([]interface{}, 0, len(v.Values))
for _, val := range v.Values {
row = append(row, val.GetValue())
}
v.SetValue(row)
return true
}
func (e *Evaluator) unaryOperation(u *ast.UnaryOperationExpr) bool {
defer func() {
if er := recover(); er != nil {
e.err = errors.Errorf("%v", er)
}
}()
aDatum := u.V.GetDatum()
if aDatum.Kind() == types.KindNull {
u.SetNull()
return true
}
switch op := u.Op; op {
case opcode.Not:
n, err := aDatum.ToBool()
if err != nil {
e.err = errors.Trace(err)
} else if n == 0 {
u.SetInt64(1)
} else {
u.SetInt64(0)
}
case opcode.BitNeg:
// for bit operation, we will use int64 first, then return uint64
n, err := aDatum.ToInt64()
if err != nil {
e.err = errors.Trace(err)
return false
}
u.SetUint64(uint64(^n))
case opcode.Plus:
switch aDatum.Kind() {
case types.KindInt64,
types.KindUint64,
types.KindFloat64,
types.KindFloat32,
types.KindMysqlDuration,
types.KindMysqlTime,
types.KindString,
types.KindMysqlDecimal,
types.KindBytes,
types.KindMysqlHex,
types.KindMysqlBit,
types.KindMysqlEnum,
types.KindMysqlSet:
u.SetDatum(*aDatum)
default:
e.err = ErrInvalidOperation
return false
}
case opcode.Minus:
switch aDatum.Kind() {
case types.KindInt64:
u.SetInt64(-aDatum.GetInt64())
case types.KindUint64:
u.SetInt64(-int64(aDatum.GetUint64()))
case types.KindFloat64:
u.SetFloat64(-aDatum.GetFloat64())
case types.KindFloat32:
u.SetFloat32(-aDatum.GetFloat32())
case types.KindMysqlDuration:
u.SetValue(mysql.ZeroDecimal.Sub(aDatum.GetMysqlDuration().ToNumber()))
case types.KindMysqlTime:
u.SetValue(mysql.ZeroDecimal.Sub(aDatum.GetMysqlTime().ToNumber()))
case types.KindString:
f, err := types.StrToFloat(aDatum.GetString())
e.err = errors.Trace(err)
u.SetFloat64(-f)
case types.KindMysqlDecimal:
f, _ := aDatum.GetMysqlDecimal().Float64()
u.SetValue(mysql.NewDecimalFromFloat(-f))
case types.KindBytes:
f, err := types.StrToFloat(string(aDatum.GetBytes()))
e.err = errors.Trace(err)
u.SetFloat64(-f)
case types.KindMysqlHex:
u.SetFloat64(-aDatum.GetMysqlHex().ToNumber())
case types.KindMysqlBit:
u.SetFloat64(-aDatum.GetMysqlBit().ToNumber())
case types.KindMysqlEnum:
u.SetFloat64(-aDatum.GetMysqlEnum().ToNumber())
case types.KindMysqlSet:
u.SetFloat64(-aDatum.GetMysqlSet().ToNumber())
default:
e.err = ErrInvalidOperation
return false
}
default:
e.err = ErrInvalidOperation
return false
}
return true
}
func (e *Evaluator) values(v *ast.ValuesExpr) bool {
v.SetDatum(*v.Column.GetDatum())
return true
}
func (e *Evaluator) variable(v *ast.VariableExpr) bool {
name := strings.ToLower(v.Name)
sessionVars := variable.GetSessionVars(e.ctx)
globalVars := variable.GetGlobalVarAccessor(e.ctx)
if !v.IsSystem {
// user vars
if value, ok := sessionVars.Users[name]; ok {
v.SetString(value)
return true
}
// select null user vars is permitted.
v.SetNull()
return true
}
_, ok := variable.SysVars[name]
if !ok {
// select null sys vars is not permitted
e.err = variable.UnknownSystemVar.Gen("Unknown system variable '%s'", name)
return false
}
if !v.IsGlobal {
if value, ok := sessionVars.Systems[name]; ok {
v.SetString(value)
return true
}
}
value, err := globalVars.GetGlobalSysVar(e.ctx, name)
if err != nil {
e.err = errors.Trace(err)
return false
}
v.SetString(value)
return true
}
func (e *Evaluator) funcCall(v *ast.FuncCallExpr) bool {
of, ok := OldFuncs[v.FnName.L]
if ok {
if len(v.Args) < of.MinArgs || (of.MaxArgs != -1 && len(v.Args) > of.MaxArgs) {
e.err = ErrInvalidOperation.Gen("number of function arguments must in [%d, %d].", of.MinArgs, of.MaxArgs)
return false
}
a := make([]interface{}, len(v.Args))
for i, arg := range v.Args {
a[i] = arg.GetValue()
}
val, err := of.F(a, e.ctx)
if err != nil {
e.err = errors.Trace(err)
return false
}
v.SetValue(val)
return true
}
f, ok := Funcs[v.FnName.L]
if !ok {
e.err = ErrInvalidOperation.Gen("unknown function %s", v.FnName.O)
return false
}
if len(v.Args) < f.MinArgs || (f.MaxArgs != -1 && len(v.Args) > f.MaxArgs) {
e.err = ErrInvalidOperation.Gen("number of function arguments must in [%d, %d].", f.MinArgs, f.MaxArgs)
return false
}
a := make([]types.Datum, len(v.Args))
for i, arg := range v.Args {
a[i] = *arg.GetDatum()
}
val, err := f.F(a, e.ctx)
if err != nil {
e.err = errors.Trace(err)
return false
}
v.SetDatum(val)
return true
}
func (e *Evaluator) funcCast(v *ast.FuncCastExpr) bool {
value := v.Expr.GetValue()
// Casting nil to any type returns null
if value == nil {
v.SetNull()
return true
}
var err error
value, err = types.Cast(value, v.Tp)
if err != nil {
e.err = errors.Trace(err)
return false
}
v.SetValue(value)
return true
}
func (e *Evaluator) aggregateFunc(v *ast.AggregateFuncExpr) bool {
name := strings.ToLower(v.F)
switch name {
case ast.AggFuncAvg:
e.evalAggAvg(v)
case ast.AggFuncCount:
e.evalAggCount(v)
case ast.AggFuncFirstRow, ast.AggFuncMax, ast.AggFuncMin, ast.AggFuncSum:
e.evalAggSetValue(v)
case ast.AggFuncGroupConcat:
e.evalAggGroupConcat(v)
}
return e.err == nil
}
func (e *Evaluator) evalAggCount(v *ast.AggregateFuncExpr) {
ctx := v.GetContext()
v.SetInt64(ctx.Count)
}
func (e *Evaluator) evalAggSetValue(v *ast.AggregateFuncExpr) {
ctx := v.GetContext()
v.SetValue(ctx.Value)
}
func (e *Evaluator) evalAggAvg(v *ast.AggregateFuncExpr) {
ctx := v.GetContext()
switch x := ctx.Value.(type) {
case float64:
ctx.Value = x / float64(ctx.Count)
case mysql.Decimal:
ctx.Value = x.Div(mysql.NewDecimalFromUint(uint64(ctx.Count), 0))
}
v.SetValue(ctx.Value)
}
func (e *Evaluator) evalAggGroupConcat(v *ast.AggregateFuncExpr) {
ctx := v.GetContext()
if ctx.Buffer != nil {
v.SetValue(ctx.Buffer.String())
} else {
v.SetValue(nil)
}
}

View file

@ -0,0 +1,564 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"math"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/util/types"
)
const (
zeroI64 int64 = 0
oneI64 int64 = 1
)
func (e *Evaluator) binaryOperation(o *ast.BinaryOperationExpr) bool {
switch o.Op {
case opcode.AndAnd, opcode.OrOr, opcode.LogicXor:
return e.handleLogicOperation(o)
case opcode.LT, opcode.LE, opcode.GE, opcode.GT, opcode.EQ, opcode.NE, opcode.NullEQ:
return e.handleComparisonOp(o)
case opcode.RightShift, opcode.LeftShift, opcode.And, opcode.Or, opcode.Xor:
return e.handleBitOp(o)
case opcode.Plus, opcode.Minus, opcode.Mod, opcode.Div, opcode.Mul, opcode.IntDiv:
return e.handleArithmeticOp(o)
default:
e.err = ErrInvalidOperation
return false
}
}
func (e *Evaluator) handleLogicOperation(o *ast.BinaryOperationExpr) bool {
switch o.Op {
case opcode.AndAnd:
return e.handleAndAnd(o)
case opcode.OrOr:
return e.handleOrOr(o)
case opcode.LogicXor:
return e.handleXor(o)
default:
e.err = ErrInvalidOperation.Gen("unkown operator %s", o.Op)
return false
}
}
func (e *Evaluator) handleAndAnd(o *ast.BinaryOperationExpr) bool {
leftVal := o.L.GetValue()
righVal := o.R.GetValue()
if leftVal != nil {
x, err := types.ToBool(leftVal)
if err != nil {
e.err = errors.Trace(err)
return false
} else if x == 0 {
// false && any other types is false
o.SetValue(x)
return true
}
}
if righVal != nil {
y, err := types.ToBool(righVal)
if err != nil {
e.err = errors.Trace(err)
return false
} else if y == 0 {
o.SetValue(y)
return true
}
}
if leftVal == nil || righVal == nil {
o.SetValue(nil)
return true
}
o.SetValue(int64(1))
return true
}
func (e *Evaluator) handleOrOr(o *ast.BinaryOperationExpr) bool {
leftVal := o.L.GetValue()
righVal := o.R.GetValue()
if leftVal != nil {
x, err := types.ToBool(leftVal)
if err != nil {
e.err = errors.Trace(err)
return false
} else if x == 1 {
// true || any other types is true.
o.SetValue(x)
return true
}
}
if righVal != nil {
y, err := types.ToBool(righVal)
if err != nil {
e.err = errors.Trace(err)
return false
} else if y == 1 {
o.SetValue(y)
return true
}
}
if leftVal == nil || righVal == nil {
o.SetValue(nil)
return true
}
o.SetValue(int64(0))
return true
}
func (e *Evaluator) handleXor(o *ast.BinaryOperationExpr) bool {
leftVal := o.L.GetValue()
righVal := o.R.GetValue()
if leftVal == nil || righVal == nil {
o.SetValue(nil)
return true
}
x, err := types.ToBool(leftVal)
if err != nil {
e.err = errors.Trace(err)
return false
}
y, err := types.ToBool(righVal)
if err != nil {
e.err = errors.Trace(err)
return false
}
if x == y {
o.SetValue(int64(0))
} else {
o.SetValue(int64(1))
}
return true
}
func (e *Evaluator) handleComparisonOp(o *ast.BinaryOperationExpr) bool {
a, b := types.Coerce(o.L.GetValue(), o.R.GetValue())
if a == nil || b == nil {
// for <=>, if a and b are both nil, return true.
// if a or b is nil, return false.
if o.Op == opcode.NullEQ {
if a == nil && b == nil {
o.SetValue(oneI64)
} else {
o.SetValue(zeroI64)
}
} else {
o.SetValue(nil)
}
return true
}
n, err := types.Compare(a, b)
if err != nil {
e.err = errors.Trace(err)
return false
}
r, err := getCompResult(o.Op, n)
if err != nil {
e.err = errors.Trace(err)
return false
}
if r {
o.SetValue(oneI64)
} else {
o.SetValue(zeroI64)
}
return true
}
func getCompResult(op opcode.Op, value int) (bool, error) {
switch op {
case opcode.LT:
return value < 0, nil
case opcode.LE:
return value <= 0, nil
case opcode.GE:
return value >= 0, nil
case opcode.GT:
return value > 0, nil
case opcode.EQ:
return value == 0, nil
case opcode.NE:
return value != 0, nil
case opcode.NullEQ:
return value == 0, nil
default:
return false, ErrInvalidOperation.Gen("invalid op %v in comparision operation", op)
}
}
func (e *Evaluator) handleBitOp(o *ast.BinaryOperationExpr) bool {
a, b := types.Coerce(o.L.GetValue(), o.R.GetValue())
if a == nil || b == nil {
o.SetValue(nil)
return true
}
x, err := types.ToInt64(a)
if err != nil {
e.err = errors.Trace(err)
return false
}
y, err := types.ToInt64(b)
if err != nil {
e.err = errors.Trace(err)
return false
}
// use a int64 for bit operator, return uint64
switch o.Op {
case opcode.And:
o.SetValue(uint64(x & y))
case opcode.Or:
o.SetValue(uint64(x | y))
case opcode.Xor:
o.SetValue(uint64(x ^ y))
case opcode.RightShift:
o.SetValue(uint64(x) >> uint64(y))
case opcode.LeftShift:
o.SetValue(uint64(x) << uint64(y))
default:
e.err = ErrInvalidOperation.Gen("invalid op %v in bit operation", o.Op)
return false
}
return true
}
func (e *Evaluator) handleArithmeticOp(o *ast.BinaryOperationExpr) bool {
a, err := coerceArithmetic(o.L.GetValue())
if err != nil {
e.err = errors.Trace(err)
return false
}
b, err := coerceArithmetic(o.R.GetValue())
if err != nil {
e.err = errors.Trace(err)
return false
}
a, b = types.Coerce(a, b)
if a == nil || b == nil {
o.SetValue(nil)
return true
}
var result interface{}
switch o.Op {
case opcode.Plus:
result, e.err = computePlus(a, b)
case opcode.Minus:
result, e.err = computeMinus(a, b)
case opcode.Mul:
result, e.err = computeMul(a, b)
case opcode.Div:
result, e.err = computeDiv(a, b)
case opcode.Mod:
result, e.err = computeMod(a, b)
case opcode.IntDiv:
result, e.err = computeIntDiv(a, b)
default:
e.err = ErrInvalidOperation.Gen("invalid op %v in arithmetic operation", o.Op)
return false
}
o.SetValue(result)
return e.err == nil
}
func computePlus(a, b interface{}) (interface{}, error) {
switch x := a.(type) {
case int64:
switch y := b.(type) {
case int64:
return types.AddInt64(x, y)
case uint64:
return types.AddInteger(y, x)
}
case uint64:
switch y := b.(type) {
case int64:
return types.AddInteger(x, y)
case uint64:
return types.AddUint64(x, y)
}
case float64:
switch y := b.(type) {
case float64:
return x + y, nil
}
case mysql.Decimal:
switch y := b.(type) {
case mysql.Decimal:
return x.Add(y), nil
}
}
return types.InvOp2(a, b, opcode.Plus)
}
func computeMinus(a, b interface{}) (interface{}, error) {
switch x := a.(type) {
case int64:
switch y := b.(type) {
case int64:
return types.SubInt64(x, y)
case uint64:
return types.SubIntWithUint(x, y)
}
case uint64:
switch y := b.(type) {
case int64:
return types.SubUintWithInt(x, y)
case uint64:
return types.SubUint64(x, y)
}
case float64:
switch y := b.(type) {
case float64:
return x - y, nil
}
case mysql.Decimal:
switch y := b.(type) {
case mysql.Decimal:
return x.Sub(y), nil
}
}
return types.InvOp2(a, b, opcode.Minus)
}
func computeMul(a, b interface{}) (interface{}, error) {
switch x := a.(type) {
case int64:
switch y := b.(type) {
case int64:
return types.MulInt64(x, y)
case uint64:
return types.MulInteger(y, x)
}
case uint64:
switch y := b.(type) {
case int64:
return types.MulInteger(x, y)
case uint64:
return types.MulUint64(x, y)
}
case float64:
switch y := b.(type) {
case float64:
return x * y, nil
}
case mysql.Decimal:
switch y := b.(type) {
case mysql.Decimal:
return x.Mul(y), nil
}
}
return types.InvOp2(a, b, opcode.Mul)
}
func computeDiv(a, b interface{}) (interface{}, error) {
// MySQL support integer divison Div and division operator /
// we use opcode.Div for division operator and will use another for integer division later.
// for division operator, we will use float64 for calculation.
switch x := a.(type) {
case float64:
y, err := types.ToFloat64(b)
if err != nil {
return nil, errors.Trace(err)
}
if y == 0 {
return nil, nil
}
return x / y, nil
default:
// the scale of the result is the scale of the first operand plus
// the value of the div_precision_increment system variable (which is 4 by default)
// we will use 4 here
xa, err := types.ToDecimal(a)
if err != nil {
return nil, errors.Trace(err)
}
xb, err := types.ToDecimal(b)
if err != nil {
return nil, errors.Trace(err)
}
if f, _ := xb.Float64(); f == 0 {
// division by zero return null
return nil, nil
}
return xa.Div(xb), nil
}
}
func computeMod(a, b interface{}) (interface{}, error) {
switch x := a.(type) {
case int64:
switch y := b.(type) {
case int64:
if y == 0 {
return nil, nil
}
return x % y, nil
case uint64:
if y == 0 {
return nil, nil
} else if x < 0 {
// first is int64, return int64.
return -int64(uint64(-x) % y), nil
}
return int64(uint64(x) % y), nil
}
case uint64:
switch y := b.(type) {
case int64:
if y == 0 {
return nil, nil
} else if y < 0 {
// first is uint64, return uint64.
return uint64(x % uint64(-y)), nil
}
return x % uint64(y), nil
case uint64:
if y == 0 {
return nil, nil
}
return x % y, nil
}
case float64:
switch y := b.(type) {
case float64:
if y == 0 {
return nil, nil
}
return math.Mod(x, y), nil
}
case mysql.Decimal:
switch y := b.(type) {
case mysql.Decimal:
xf, _ := x.Float64()
yf, _ := y.Float64()
if yf == 0 {
return nil, nil
}
return math.Mod(xf, yf), nil
}
}
return types.InvOp2(a, b, opcode.Mod)
}
func computeIntDiv(a, b interface{}) (interface{}, error) {
switch x := a.(type) {
case int64:
switch y := b.(type) {
case int64:
if y == 0 {
return nil, nil
}
return types.DivInt64(x, y)
case uint64:
if y == 0 {
return nil, nil
}
return types.DivIntWithUint(x, y)
}
case uint64:
switch y := b.(type) {
case int64:
if y == 0 {
return nil, nil
}
return types.DivUintWithInt(x, y)
case uint64:
if y == 0 {
return nil, nil
}
return x / y, nil
}
}
// if any is none integer, use decimal to calculate
x, err := types.ToDecimal(a)
if err != nil {
return nil, errors.Trace(err)
}
y, err := types.ToDecimal(b)
if err != nil {
return nil, errors.Trace(err)
}
if f, _ := y.Float64(); f == 0 {
return nil, nil
}
return x.Div(y).IntPart(), nil
}
func coerceArithmetic(a interface{}) (interface{}, error) {
switch x := a.(type) {
case string:
// MySQL will convert string to float for arithmetic operation
f, err := types.StrToFloat(x)
if err != nil {
return nil, errors.Trace(err)
}
return f, errors.Trace(err)
case mysql.Time:
// if time has no precision, return int64
v := x.ToNumber()
if x.Fsp == 0 {
return v.IntPart(), nil
}
return v, nil
case mysql.Duration:
// if duration has no precision, return int64
v := x.ToNumber()
if x.Fsp == 0 {
return v.IntPart(), nil
}
return v, nil
case []byte:
// []byte is the same as string, converted to float for arithmetic operator.
f, err := types.StrToFloat(string(x))
if err != nil {
return nil, errors.Trace(err)
}
return f, errors.Trace(err)
case mysql.Hex:
return x.ToNumber(), nil
case mysql.Bit:
return x.ToNumber(), nil
case mysql.Enum:
return x.ToNumber(), nil
case mysql.Set:
return x.ToNumber(), nil
default:
return x, nil
}
}

View file

@ -0,0 +1,217 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package evaluator
import (
"regexp"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/util/types"
)
const (
patMatch = iota + 1
patOne
patAny
)
// handle escapes and wild cards convert pattern characters and pattern types,
func compilePattern(pattern string, escape byte) (patChars, patTypes []byte) {
var lastAny bool
patChars = make([]byte, len(pattern))
patTypes = make([]byte, len(pattern))
patLen := 0
for i := 0; i < len(pattern); i++ {
var tp byte
var c = pattern[i]
switch c {
case escape:
lastAny = false
tp = patMatch
if i < len(pattern)-1 {
i++
c = pattern[i]
if c == escape || c == '_' || c == '%' {
// valid escape.
} else {
// invalid escape, fall back to escape byte
// mysql will treat escape character as the origin value even
// the escape sequence is invalid in Go or C.
// e.g, \m is invalid in Go, but in MySQL we will get "m" for select '\m'.
// Following case is correct just for escape \, not for others like +.
// TODO: add more checks for other escapes.
i--
c = escape
}
}
case '_':
lastAny = false
tp = patOne
case '%':
if lastAny {
continue
}
lastAny = true
tp = patAny
default:
lastAny = false
tp = patMatch
}
patChars[patLen] = c
patTypes[patLen] = tp
patLen++
}
for i := 0; i < patLen-1; i++ {
if (patTypes[i] == patAny) && (patTypes[i+1] == patOne) {
patTypes[i] = patOne
patTypes[i+1] = patAny
}
}
patChars = patChars[:patLen]
patTypes = patTypes[:patLen]
return
}
const caseDiff = 'a' - 'A'
func matchByteCI(a, b byte) bool {
if a == b {
return true
}
if a >= 'a' && a <= 'z' && a-caseDiff == b {
return true
}
return a >= 'A' && a <= 'Z' && a+caseDiff == b
}
func doMatch(str string, patChars, patTypes []byte) bool {
var sIdx int
for i := 0; i < len(patChars); i++ {
switch patTypes[i] {
case patMatch:
if sIdx >= len(str) || !matchByteCI(str[sIdx], patChars[i]) {
return false
}
sIdx++
case patOne:
sIdx++
if sIdx > len(str) {
return false
}
case patAny:
i++
if i == len(patChars) {
return true
}
for sIdx < len(str) {
if matchByteCI(patChars[i], str[sIdx]) && doMatch(str[sIdx:], patChars[i:], patTypes[i:]) {
return true
}
sIdx++
}
return false
}
}
return sIdx == len(str)
}
func (e *Evaluator) patternLike(p *ast.PatternLikeExpr) bool {
expr := p.Expr.GetValue()
if expr == nil {
p.SetValue(nil)
return true
}
sexpr, err := types.ToString(expr)
if err != nil {
e.err = errors.Trace(err)
return false
}
// We need to compile pattern if it has not been compiled or it is not static.
var needCompile = len(p.PatChars) == 0 || !ast.IsConstant(p.Pattern)
if needCompile {
pattern := p.Pattern.GetValue()
if pattern == nil {
p.SetValue(nil)
return true
}
spattern, err := types.ToString(pattern)
if err != nil {
e.err = errors.Trace(err)
return false
}
p.PatChars, p.PatTypes = compilePattern(spattern, p.Escape)
}
match := doMatch(sexpr, p.PatChars, p.PatTypes)
if p.Not {
match = !match
}
p.SetValue(boolToInt64(match))
return true
}
func (e *Evaluator) patternRegexp(p *ast.PatternRegexpExpr) bool {
var sexpr string
if p.Sexpr != nil {
sexpr = *p.Sexpr
} else {
expr := p.Expr.GetValue()
if expr == nil {
p.SetValue(nil)
return true
}
var err error
sexpr, err = types.ToString(expr)
if err != nil {
e.err = errors.Errorf("non-string Expression in LIKE: %v (Value of type %T)", expr, expr)
return false
}
if ast.IsConstant(p.Expr) {
p.Sexpr = new(string)
*p.Sexpr = sexpr
}
}
re := p.Re
if re == nil {
pattern := p.Pattern.GetValue()
if pattern == nil {
p.SetValue(nil)
return true
}
spattern, err := types.ToString(pattern)
if err != nil {
e.err = errors.Errorf("non-string pattern in LIKE: %v (Value of type %T)", pattern, pattern)
return false
}
if re, err = regexp.Compile(spattern); err != nil {
e.err = errors.Trace(err)
return false
}
if ast.IsConstant(p.Pattern) {
p.Re = re
}
}
match := re.MatchString(sexpr)
if p.Not {
match = !match
}
p.SetValue(boolToInt64(match))
return true
}

136
vendor/github.com/pingcap/tidb/evaluator/helper.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
package evaluator
import (
"strconv"
"strings"
"time"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/types"
)
var (
// CurrentTimestamp is the keyword getting default value for datetime and timestamp type.
CurrentTimestamp = "CURRENT_TIMESTAMP"
currentTimestampL = "current_timestamp"
// ZeroTimestamp shows the zero datetime and timestamp.
ZeroTimestamp = "0000-00-00 00:00:00"
)
var (
errDefaultValue = errors.New("invalid default value")
)
// GetTimeValue gets the time value with type tp.
func GetTimeValue(ctx context.Context, v interface{}, tp byte, fsp int) (interface{}, error) {
return getTimeValue(ctx, v, tp, fsp)
}
func getTimeValue(ctx context.Context, v interface{}, tp byte, fsp int) (interface{}, error) {
value := mysql.Time{
Type: tp,
Fsp: fsp,
}
defaultTime, err := getSystemTimestamp(ctx)
if err != nil {
return nil, errors.Trace(err)
}
switch x := v.(type) {
case string:
upperX := strings.ToUpper(x)
if upperX == CurrentTimestamp {
value.Time = defaultTime
} else if upperX == ZeroTimestamp {
value, _ = mysql.ParseTimeFromNum(0, tp, fsp)
} else {
value, err = mysql.ParseTime(x, tp, fsp)
if err != nil {
return nil, errors.Trace(err)
}
}
case *ast.ValueExpr:
switch x.Kind() {
case types.KindString:
value, err = mysql.ParseTime(x.GetString(), tp, fsp)
if err != nil {
return nil, errors.Trace(err)
}
case types.KindInt64:
value, err = mysql.ParseTimeFromNum(x.GetInt64(), tp, fsp)
if err != nil {
return nil, errors.Trace(err)
}
case types.KindNull:
return nil, nil
default:
return nil, errors.Trace(errDefaultValue)
}
case *ast.FuncCallExpr:
if x.FnName.L == currentTimestampL {
return CurrentTimestamp, nil
}
return nil, errors.Trace(errDefaultValue)
case *ast.UnaryOperationExpr:
// support some expression, like `-1`
v, err := Eval(ctx, x)
if err != nil {
return nil, errors.Trace(err)
}
ft := types.NewFieldType(mysql.TypeLonglong)
xval, err := types.Convert(v, ft)
if err != nil {
return nil, errors.Trace(err)
}
value, err = mysql.ParseTimeFromNum(xval.(int64), tp, fsp)
if err != nil {
return nil, errors.Trace(err)
}
default:
return nil, nil
}
return value, nil
}
// IsCurrentTimeExpr returns whether e is CurrentTimeExpr.
func IsCurrentTimeExpr(e ast.ExprNode) bool {
x, ok := e.(*ast.FuncCallExpr)
if !ok {
return false
}
return x.FnName.L == currentTimestampL
}
func getSystemTimestamp(ctx context.Context) (time.Time, error) {
value := time.Now()
if ctx == nil {
return value, nil
}
// check whether use timestamp varibale
sessionVars := variable.GetSessionVars(ctx)
if v, ok := sessionVars.Systems["timestamp"]; ok {
if v != "" {
timestamp, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return time.Time{}, errors.Trace(err)
}
if timestamp <= 0 {
return value, nil
}
return time.Unix(timestamp, 0), nil
}
}
return value, nil
}

120
vendor/github.com/pingcap/tidb/executor/adapter.go generated vendored Normal file
View file

@ -0,0 +1,120 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/optimizer/plan"
)
// recordSet wraps an executor, implements ast.RecordSet interface
type recordSet struct {
fields []*ast.ResultField
executor Executor
}
func (a *recordSet) Fields() ([]*ast.ResultField, error) {
return a.fields, nil
}
func (a *recordSet) Next() (*ast.Row, error) {
row, err := a.executor.Next()
if err != nil || row == nil {
return nil, errors.Trace(err)
}
return &ast.Row{Data: row.Data}, nil
}
func (a *recordSet) Close() error {
return a.executor.Close()
}
type statement struct {
is infoschema.InfoSchema
plan plan.Plan
}
func (a *statement) OriginText() string {
return ""
}
func (a *statement) SetText(text string) {
return
}
func (a *statement) IsDDL() bool {
return false
}
func (a *statement) Exec(ctx context.Context) (ast.RecordSet, error) {
b := newExecutorBuilder(ctx, a.is)
e := b.build(a.plan)
if b.err != nil {
return nil, errors.Trace(b.err)
}
if executorExec, ok := e.(*ExecuteExec); ok {
err := executorExec.Build()
if err != nil {
return nil, errors.Trace(err)
}
e = executorExec.StmtExec
}
if len(e.Fields()) == 0 {
// No result fields means no Recordset.
defer e.Close()
for {
row, err := e.Next()
if err != nil {
return nil, errors.Trace(err)
}
if row == nil {
// It's used to insert retry.
changeInsertValueForRetry(a.plan, e)
return nil, nil
}
}
}
fs := e.Fields()
for _, f := range fs {
if len(f.ColumnAsName.O) == 0 {
f.ColumnAsName = f.Column.Name
}
}
return &recordSet{
executor: e,
fields: fs,
}, nil
}
func changeInsertValueForRetry(p plan.Plan, e Executor) {
if v, ok := p.(*plan.Insert); ok {
var insertValue *InsertValues
if !v.IsReplace {
insertValue = e.(*InsertExec).InsertValues
} else {
insertValue = e.(*ReplaceExec).InsertValues
}
v.Columns = insertValue.Columns
v.Setlist = insertValue.Setlist
if len(v.Setlist) == 0 {
v.Lists = insertValue.Lists
}
}
}

438
vendor/github.com/pingcap/tidb/executor/builder.go generated vendored Normal file
View file

@ -0,0 +1,438 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"math"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/optimizer/plan"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/sessionctx/autocommit"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/types"
)
// executorBuilder builds an Executor from a Plan.
// The InfoSchema must be the same one used in InfoBinder.
type executorBuilder struct {
ctx context.Context
is infoschema.InfoSchema
err error
}
func newExecutorBuilder(ctx context.Context, is infoschema.InfoSchema) *executorBuilder {
return &executorBuilder{
ctx: ctx,
is: is,
}
}
func (b *executorBuilder) build(p plan.Plan) Executor {
switch v := p.(type) {
case nil:
return nil
case *plan.Aggregate:
return b.buildAggregate(v)
case *plan.CheckTable:
return b.buildCheckTable(v)
case *plan.DDL:
return b.buildDDL(v)
case *plan.Deallocate:
return b.buildDeallocate(v)
case *plan.Delete:
return b.buildDelete(v)
case *plan.Distinct:
return b.buildDistinct(v)
case *plan.Execute:
return b.buildExecute(v)
case *plan.Explain:
return b.buildExplain(v)
case *plan.Filter:
src := b.build(v.Src())
return b.buildFilter(src, v.Conditions)
case *plan.Having:
return b.buildHaving(v)
case *plan.IndexScan:
return b.buildIndexScan(v)
case *plan.Insert:
return b.buildInsert(v)
case *plan.JoinInner:
return b.buildJoinInner(v)
case *plan.JoinOuter:
return b.buildJoinOuter(v)
case *plan.Limit:
return b.buildLimit(v)
case *plan.Prepare:
return b.buildPrepare(v)
case *plan.SelectFields:
return b.buildSelectFields(v)
case *plan.SelectLock:
return b.buildSelectLock(v)
case *plan.ShowDDL:
return b.buildShowDDL(v)
case *plan.Show:
return b.buildShow(v)
case *plan.Simple:
return b.buildSimple(v)
case *plan.Sort:
return b.buildSort(v)
case *plan.TableScan:
return b.buildTableScan(v)
case *plan.Union:
return b.buildUnion(v)
case *plan.Update:
return b.buildUpdate(v)
default:
b.err = ErrUnknownPlan.Gen("Unknown Plan %T", p)
return nil
}
}
func (b *executorBuilder) buildFilter(src Executor, conditions []ast.ExprNode) Executor {
if len(conditions) == 0 {
return src
}
return &FilterExec{
Src: src,
Condition: b.joinConditions(conditions),
ctx: b.ctx,
}
}
func (b *executorBuilder) buildTableScan(v *plan.TableScan) Executor {
table, _ := b.is.TableByID(v.Table.ID)
e := &TableScanExec{
t: table,
fields: v.Fields(),
ctx: b.ctx,
ranges: v.Ranges,
seekHandle: math.MinInt64,
}
return b.buildFilter(e, v.FilterConditions)
}
func (b *executorBuilder) buildShowDDL(v *plan.ShowDDL) Executor {
return &ShowDDLExec{
fields: v.Fields(),
ctx: b.ctx,
}
}
func (b *executorBuilder) buildCheckTable(v *plan.CheckTable) Executor {
return &CheckTableExec{
tables: v.Tables,
ctx: b.ctx,
}
}
func (b *executorBuilder) buildDeallocate(v *plan.Deallocate) Executor {
return &DeallocateExec{
ctx: b.ctx,
Name: v.Name,
}
}
func (b *executorBuilder) buildIndexScan(v *plan.IndexScan) Executor {
tbl, _ := b.is.TableByID(v.Table.ID)
var idx *column.IndexedCol
for _, val := range tbl.Indices() {
if val.IndexInfo.Name.L == v.Index.Name.L {
idx = val
break
}
}
e := &IndexScanExec{
tbl: tbl,
idx: idx,
fields: v.Fields(),
ctx: b.ctx,
Desc: v.Desc,
valueTypes: make([]*types.FieldType, len(idx.Columns)),
}
for i, ic := range idx.Columns {
col := tbl.Cols()[ic.Offset]
e.valueTypes[i] = &col.FieldType
}
e.Ranges = make([]*IndexRangeExec, len(v.Ranges))
for i, val := range v.Ranges {
e.Ranges[i] = b.buildIndexRange(e, val)
}
return b.buildFilter(e, v.FilterConditions)
}
func (b *executorBuilder) buildIndexRange(scan *IndexScanExec, v *plan.IndexRange) *IndexRangeExec {
ran := &IndexRangeExec{
scan: scan,
lowVals: v.LowVal,
lowExclude: v.LowExclude,
highVals: v.HighVal,
highExclude: v.HighExclude,
}
return ran
}
func (b *executorBuilder) buildJoinOuter(v *plan.JoinOuter) *JoinOuterExec {
e := &JoinOuterExec{
OuterExec: b.build(v.Outer),
InnerPlan: v.Inner,
fields: v.Fields(),
builder: b,
}
return e
}
func (b *executorBuilder) buildJoinInner(v *plan.JoinInner) *JoinInnerExec {
e := &JoinInnerExec{
InnerPlans: v.Inners,
innerExecs: make([]Executor, len(v.Inners)),
Condition: b.joinConditions(v.Conditions),
fields: v.Fields(),
ctx: b.ctx,
builder: b,
}
return e
}
func (b *executorBuilder) joinConditions(conditions []ast.ExprNode) ast.ExprNode {
if len(conditions) == 0 {
return nil
}
if len(conditions) == 1 {
return conditions[0]
}
condition := &ast.BinaryOperationExpr{
Op: opcode.AndAnd,
L: conditions[0],
R: b.joinConditions(conditions[1:]),
}
return condition
}
func (b *executorBuilder) buildSelectLock(v *plan.SelectLock) Executor {
src := b.build(v.Src())
if autocommit.ShouldAutocommit(b.ctx) {
// Locking of rows for update using SELECT FOR UPDATE only applies when autocommit
// is disabled (either by beginning transaction with START TRANSACTION or by setting
// autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked.
// See: https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html
return src
}
e := &SelectLockExec{
Src: src,
Lock: v.Lock,
ctx: b.ctx,
}
return e
}
func (b *executorBuilder) buildSelectFields(v *plan.SelectFields) Executor {
src := b.build(v.Src())
e := &SelectFieldsExec{
Src: src,
ResultFields: v.Fields(),
ctx: b.ctx,
}
return e
}
func (b *executorBuilder) buildAggregate(v *plan.Aggregate) Executor {
src := b.build(v.Src())
e := &AggregateExec{
Src: src,
ResultFields: v.Fields(),
ctx: b.ctx,
AggFuncs: v.AggFuncs,
GroupByItems: v.GroupByItems,
}
return e
}
func (b *executorBuilder) buildHaving(v *plan.Having) Executor {
src := b.build(v.Src())
return b.buildFilter(src, v.Conditions)
}
func (b *executorBuilder) buildSort(v *plan.Sort) Executor {
src := b.build(v.Src())
e := &SortExec{
Src: src,
ByItems: v.ByItems,
ctx: b.ctx,
}
return e
}
func (b *executorBuilder) buildLimit(v *plan.Limit) Executor {
src := b.build(v.Src())
e := &LimitExec{
Src: src,
Offset: v.Offset,
Count: v.Count,
}
return e
}
func (b *executorBuilder) buildUnion(v *plan.Union) Executor {
e := &UnionExec{
fields: v.Fields(),
Sels: make([]Executor, len(v.Selects)),
}
for i, sel := range v.Selects {
selExec := b.build(sel)
e.Sels[i] = selExec
}
return e
}
func (b *executorBuilder) buildDistinct(v *plan.Distinct) Executor {
return &DistinctExec{Src: b.build(v.Src())}
}
func (b *executorBuilder) buildPrepare(v *plan.Prepare) Executor {
return &PrepareExec{
Ctx: b.ctx,
IS: b.is,
Name: v.Name,
SQLText: v.SQLText,
}
}
func (b *executorBuilder) buildExecute(v *plan.Execute) Executor {
return &ExecuteExec{
Ctx: b.ctx,
IS: b.is,
Name: v.Name,
UsingVars: v.UsingVars,
ID: v.ID,
}
}
func (b *executorBuilder) buildUpdate(v *plan.Update) Executor {
selExec := b.build(v.SelectPlan)
return &UpdateExec{ctx: b.ctx, SelectExec: selExec, OrderedList: v.OrderedList}
}
func (b *executorBuilder) buildDelete(v *plan.Delete) Executor {
selExec := b.build(v.SelectPlan)
return &DeleteExec{
ctx: b.ctx,
SelectExec: selExec,
Tables: v.Tables,
IsMultiTable: v.IsMultiTable,
}
}
func (b *executorBuilder) buildShow(v *plan.Show) Executor {
e := &ShowExec{
Tp: v.Tp,
DBName: model.NewCIStr(v.DBName),
Table: v.Table,
Column: v.Column,
User: v.User,
Flag: v.Flag,
Full: v.Full,
GlobalScope: v.GlobalScope,
ctx: b.ctx,
is: b.is,
fields: v.Fields(),
}
if e.Tp == ast.ShowGrants && len(e.User) == 0 {
e.User = variable.GetSessionVars(e.ctx).User
}
return e
}
func (b *executorBuilder) buildSimple(v *plan.Simple) Executor {
switch s := v.Statement.(type) {
case *ast.GrantStmt:
return b.buildGrant(s)
}
return &SimpleExec{Statement: v.Statement, ctx: b.ctx}
}
func (b *executorBuilder) buildInsert(v *plan.Insert) Executor {
ivs := &InsertValues{
ctx: b.ctx,
Columns: v.Columns,
Lists: v.Lists,
Setlist: v.Setlist,
}
if v.SelectPlan != nil {
ivs.SelectExec = b.build(v.SelectPlan)
}
// Get Table
ts, ok := v.Table.TableRefs.Left.(*ast.TableSource)
if !ok {
b.err = errors.New("Can not get table")
return nil
}
tn, ok := ts.Source.(*ast.TableName)
if !ok {
b.err = errors.New("Can not get table")
return nil
}
tableInfo := tn.TableInfo
tbl, ok := b.is.TableByID(tableInfo.ID)
if !ok {
b.err = errors.Errorf("Can not get table %d", tableInfo.ID)
return nil
}
ivs.Table = tbl
if v.IsReplace {
return b.buildReplace(ivs)
}
insert := &InsertExec{
InsertValues: ivs,
OnDuplicate: v.OnDuplicate,
Priority: v.Priority,
}
// fields is used to evaluate values expr.
insert.fields = ts.GetResultFields()
return insert
}
func (b *executorBuilder) buildReplace(vals *InsertValues) Executor {
return &ReplaceExec{
InsertValues: vals,
}
}
func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) Executor {
return &GrantExec{
ctx: b.ctx,
Privs: grant.Privs,
ObjectType: grant.ObjectType,
Level: grant.Level,
Users: grant.Users,
}
}
func (b *executorBuilder) buildDDL(v *plan.DDL) Executor {
return &DDLExec{Statement: v.Statement, ctx: b.ctx, is: b.is}
}
func (b *executorBuilder) buildExplain(v *plan.Explain) Executor {
return &ExplainExec{
StmtPlan: v.StmtPlan,
fields: v.Fields(),
}
}

60
vendor/github.com/pingcap/tidb/executor/compiler.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/optimizer"
"github.com/pingcap/tidb/optimizer/plan"
"github.com/pingcap/tidb/sessionctx"
)
// Compiler compiles an ast.StmtNode to a stmt.Statement.
type Compiler struct {
}
// Compile compiles an ast.StmtNode to a stmt.Statement.
// If it is supported to use new plan and executer, it optimizes the node to
// a plan, and we wrap the plan in an adapter as stmt.Statement.
// If it is not supported, the node will be converted to old statement.
func (c *Compiler) Compile(ctx context.Context, node ast.StmtNode) (ast.Statement, error) {
ast.SetFlag(node)
is := sessionctx.GetDomain(ctx).InfoSchema()
if err := optimizer.Preprocess(node, is, ctx); err != nil {
return nil, errors.Trace(err)
}
// Validate should be after NameResolve.
if err := optimizer.Validate(node, false); err != nil {
return nil, errors.Trace(err)
}
sb := NewSubQueryBuilder(is)
p, err := optimizer.Optimize(ctx, node, sb)
if err != nil {
return nil, errors.Trace(err)
}
sa := &statement{
is: is,
plan: p,
}
return sa, nil
}
// NewSubQueryBuilder builds and returns a new SubQuery builder.
func NewSubQueryBuilder(is infoschema.InfoSchema) plan.SubQueryBuilder {
return &subqueryBuilder{is: is}
}

1157
vendor/github.com/pingcap/tidb/executor/executor.go generated vendored Normal file

File diff suppressed because it is too large Load diff

205
vendor/github.com/pingcap/tidb/executor/executor_ddl.go generated vendored Normal file
View file

@ -0,0 +1,205 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/terror"
)
// DDLExec represents a DDL executor.
type DDLExec struct {
Statement ast.StmtNode
ctx context.Context
is infoschema.InfoSchema
done bool
}
// Fields implements Executor Fields interface.
func (e *DDLExec) Fields() []*ast.ResultField {
return nil
}
// Next implements Execution Next interface.
func (e *DDLExec) Next() (*Row, error) {
if e.done {
return nil, nil
}
var err error
switch x := e.Statement.(type) {
case *ast.TruncateTableStmt:
err = e.executeTruncateTable(x)
case *ast.CreateDatabaseStmt:
err = e.executeCreateDatabase(x)
case *ast.CreateTableStmt:
err = e.executeCreateTable(x)
case *ast.CreateIndexStmt:
err = e.executeCreateIndex(x)
case *ast.DropDatabaseStmt:
err = e.executeDropDatabase(x)
case *ast.DropTableStmt:
err = e.executeDropTable(x)
case *ast.DropIndexStmt:
err = e.executeDropIndex(x)
case *ast.AlterTableStmt:
err = e.executeAlterTable(x)
}
if err != nil {
return nil, errors.Trace(err)
}
e.done = true
return nil, nil
}
// Close implements Executor Close interface.
func (e *DDLExec) Close() error {
return nil
}
func (e *DDLExec) executeTruncateTable(s *ast.TruncateTableStmt) error {
table, ok := e.is.TableByID(s.Table.TableInfo.ID)
if !ok {
return errors.New("table not found, should never happen")
}
return table.Truncate(e.ctx)
}
func (e *DDLExec) executeCreateDatabase(s *ast.CreateDatabaseStmt) error {
var opt *ast.CharsetOpt
if len(s.Options) != 0 {
opt = &ast.CharsetOpt{}
for _, val := range s.Options {
switch val.Tp {
case ast.DatabaseOptionCharset:
opt.Chs = val.Value
case ast.DatabaseOptionCollate:
opt.Col = val.Value
}
}
}
err := sessionctx.GetDomain(e.ctx).DDL().CreateSchema(e.ctx, model.NewCIStr(s.Name), opt)
if err != nil {
if terror.ErrorEqual(err, infoschema.DatabaseExists) && s.IfNotExists {
err = nil
}
}
return errors.Trace(err)
}
func (e *DDLExec) executeCreateTable(s *ast.CreateTableStmt) error {
ident := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name}
err := sessionctx.GetDomain(e.ctx).DDL().CreateTable(e.ctx, ident, s.Cols, s.Constraints, s.Options)
if terror.ErrorEqual(err, infoschema.TableExists) {
if s.IfNotExists {
return nil
}
return infoschema.TableExists.Gen("CREATE TABLE: table exists %s", ident)
}
return errors.Trace(err)
}
func (e *DDLExec) executeCreateIndex(s *ast.CreateIndexStmt) error {
ident := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name}
err := sessionctx.GetDomain(e.ctx).DDL().CreateIndex(e.ctx, ident, s.Unique, model.NewCIStr(s.IndexName), s.IndexColNames)
return errors.Trace(err)
}
func (e *DDLExec) executeDropDatabase(s *ast.DropDatabaseStmt) error {
err := sessionctx.GetDomain(e.ctx).DDL().DropSchema(e.ctx, model.NewCIStr(s.Name))
if terror.ErrorEqual(err, infoschema.DatabaseNotExists) {
if s.IfExists {
err = nil
} else {
err = infoschema.DatabaseDropExists.Gen("Can't drop database '%s'; database doesn't exist", s.Name)
}
}
return errors.Trace(err)
}
func (e *DDLExec) executeDropTable(s *ast.DropTableStmt) error {
var notExistTables []string
for _, tn := range s.Tables {
fullti := ast.Ident{Schema: tn.Schema, Name: tn.Name}
schema, ok := e.is.SchemaByName(tn.Schema)
if !ok {
// TODO: we should return special error for table not exist, checking "not exist" is not enough,
// because some other errors may contain this error string too.
notExistTables = append(notExistTables, fullti.String())
continue
}
tb, err := e.is.TableByName(tn.Schema, tn.Name)
if err != nil && strings.HasSuffix(err.Error(), "not exist") {
notExistTables = append(notExistTables, fullti.String())
continue
} else if err != nil {
return errors.Trace(err)
}
// Check Privilege
privChecker := privilege.GetPrivilegeChecker(e.ctx)
hasPriv, err := privChecker.Check(e.ctx, schema, tb.Meta(), mysql.DropPriv)
if err != nil {
return errors.Trace(err)
}
if !hasPriv {
return errors.Errorf("You do not have the privilege to drop table %s.%s.", tn.Schema, tn.Name)
}
err = sessionctx.GetDomain(e.ctx).DDL().DropTable(e.ctx, fullti)
if infoschema.DatabaseNotExists.Equal(err) || infoschema.TableNotExists.Equal(err) {
notExistTables = append(notExistTables, fullti.String())
} else if err != nil {
return errors.Trace(err)
}
}
if len(notExistTables) > 0 && !s.IfExists {
return infoschema.TableDropExists.Gen("DROP TABLE: table %s does not exist", strings.Join(notExistTables, ","))
}
return nil
}
func (e *DDLExec) executeDropIndex(s *ast.DropIndexStmt) error {
ti := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name}
err := sessionctx.GetDomain(e.ctx).DDL().DropIndex(e.ctx, ti, model.NewCIStr(s.IndexName))
if (infoschema.DatabaseNotExists.Equal(err) || infoschema.TableNotExists.Equal(err)) && s.IfExists {
err = nil
}
return errors.Trace(err)
}
func (e *DDLExec) executeAlterTable(s *ast.AlterTableStmt) error {
ti := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name}
err := sessionctx.GetDomain(e.ctx).DDL().AlterTable(e.ctx, ti, s.Specs)
return errors.Trace(err)
}
func joinColumnName(columnName *ast.ColumnName) string {
var originStrs []string
if columnName.Schema.O != "" {
originStrs = append(originStrs, columnName.Schema.O)
}
if columnName.Table.O != "" {
originStrs = append(originStrs, columnName.Table.O)
}
originStrs = append(originStrs, columnName.Name.O)
return strings.Join(originStrs, ".")
}

View file

@ -0,0 +1,279 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"fmt"
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/evaluator"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/db"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/charset"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/types"
)
// SimpleExec represents simple statement executor.
// For statements do simple execution.
// includes `UseStmt`, 'SetStmt`, `SetCharsetStmt`.
// `DoStmt`, `BeginStmt`, `CommitStmt`, `RollbackStmt`.
// TODO: list all simple statements.
type SimpleExec struct {
Statement ast.StmtNode
ctx context.Context
done bool
}
// Fields implements Executor Fields interface.
func (e *SimpleExec) Fields() []*ast.ResultField {
return nil
}
// Next implements Execution Next interface.
func (e *SimpleExec) Next() (*Row, error) {
if e.done {
return nil, nil
}
var err error
switch x := e.Statement.(type) {
case *ast.UseStmt:
err = e.executeUse(x)
case *ast.SetStmt:
err = e.executeSet(x)
case *ast.SetCharsetStmt:
err = e.executeSetCharset(x)
case *ast.DoStmt:
err = e.executeDo(x)
case *ast.BeginStmt:
err = e.executeBegin(x)
case *ast.CommitStmt:
err = e.executeCommit(x)
case *ast.RollbackStmt:
err = e.executeRollback(x)
case *ast.CreateUserStmt:
err = e.executeCreateUser(x)
case *ast.SetPwdStmt:
err = e.executeSetPwd(x)
}
if err != nil {
return nil, errors.Trace(err)
}
e.done = true
return nil, nil
}
// Close implements Executor Close interface.
func (e *SimpleExec) Close() error {
return nil
}
func (e *SimpleExec) executeUse(s *ast.UseStmt) error {
dbname := model.NewCIStr(s.DBName)
dbinfo, exists := sessionctx.GetDomain(e.ctx).InfoSchema().SchemaByName(dbname)
if !exists {
return infoschema.DatabaseNotExists.Gen("database %s not exists", dbname)
}
db.BindCurrentSchema(e.ctx, dbname.O)
// character_set_database is the character set used by the default database.
// The server sets this variable whenever the default database changes.
// See: http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_database
sessionVars := variable.GetSessionVars(e.ctx)
sessionVars.Systems[variable.CharsetDatabase] = dbinfo.Charset
sessionVars.Systems[variable.CollationDatabase] = dbinfo.Collate
return nil
}
func (e *SimpleExec) executeSet(s *ast.SetStmt) error {
sessionVars := variable.GetSessionVars(e.ctx)
globalVars := variable.GetGlobalVarAccessor(e.ctx)
for _, v := range s.Variables {
// Variable is case insensitive, we use lower case.
name := strings.ToLower(v.Name)
if !v.IsSystem {
// User variable.
value, err := evaluator.Eval(e.ctx, v.Value)
if err != nil {
return errors.Trace(err)
}
if value == nil {
delete(sessionVars.Users, name)
} else {
sessionVars.Users[name] = fmt.Sprintf("%v", value)
}
return nil
}
sysVar := variable.GetSysVar(name)
if sysVar == nil {
return variable.UnknownSystemVar.Gen("Unknown system variable '%s'", name)
}
if sysVar.Scope == variable.ScopeNone {
return errors.Errorf("Variable '%s' is a read only variable", name)
}
if v.IsGlobal {
if sysVar.Scope&variable.ScopeGlobal > 0 {
value, err := evaluator.Eval(e.ctx, v.Value)
if err != nil {
return errors.Trace(err)
}
if value == nil {
value = ""
}
svalue, err := types.ToString(value)
if err != nil {
return errors.Trace(err)
}
err = globalVars.SetGlobalSysVar(e.ctx, name, svalue)
return errors.Trace(err)
}
return errors.Errorf("Variable '%s' is a SESSION variable and can't be used with SET GLOBAL", name)
}
if sysVar.Scope&variable.ScopeSession > 0 {
if value, err := evaluator.Eval(e.ctx, v.Value); err != nil {
return errors.Trace(err)
} else if value == nil {
sessionVars.Systems[name] = ""
} else {
sessionVars.Systems[name] = fmt.Sprintf("%v", value)
}
return nil
}
return errors.Errorf("Variable '%s' is a GLOBAL variable and should be set with SET GLOBAL", name)
}
return nil
}
func (e *SimpleExec) executeSetCharset(s *ast.SetCharsetStmt) error {
collation := s.Collate
if len(collation) == 0 {
var err error
collation, err = charset.GetDefaultCollation(s.Charset)
if err != nil {
return errors.Trace(err)
}
}
sessionVars := variable.GetSessionVars(e.ctx)
for _, v := range variable.SetNamesVariables {
sessionVars.Systems[v] = s.Charset
}
sessionVars.Systems[variable.CollationConnection] = collation
return nil
}
func (e *SimpleExec) executeDo(s *ast.DoStmt) error {
for _, expr := range s.Exprs {
_, err := evaluator.Eval(e.ctx, expr)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (e *SimpleExec) executeBegin(s *ast.BeginStmt) error {
_, err := e.ctx.GetTxn(true)
if err != nil {
return errors.Trace(err)
}
// With START TRANSACTION, autocommit remains disabled until you end
// the transaction with COMMIT or ROLLBACK. The autocommit mode then
// reverts to its previous state.
variable.GetSessionVars(e.ctx).SetStatusFlag(mysql.ServerStatusInTrans, true)
return nil
}
func (e *SimpleExec) executeCommit(s *ast.CommitStmt) error {
err := e.ctx.FinishTxn(false)
variable.GetSessionVars(e.ctx).SetStatusFlag(mysql.ServerStatusInTrans, false)
return errors.Trace(err)
}
func (e *SimpleExec) executeRollback(s *ast.RollbackStmt) error {
err := e.ctx.FinishTxn(true)
variable.GetSessionVars(e.ctx).SetStatusFlag(mysql.ServerStatusInTrans, false)
return errors.Trace(err)
}
func (e *SimpleExec) executeCreateUser(s *ast.CreateUserStmt) error {
users := make([]string, 0, len(s.Specs))
for _, spec := range s.Specs {
userName, host := parseUser(spec.User)
exists, err1 := userExists(e.ctx, userName, host)
if err1 != nil {
return errors.Trace(err1)
}
if exists {
if !s.IfNotExists {
return errors.New("Duplicate user")
}
continue
}
pwd := ""
if spec.AuthOpt.ByAuthString {
pwd = util.EncodePassword(spec.AuthOpt.AuthString)
} else {
pwd = util.EncodePassword(spec.AuthOpt.HashString)
}
user := fmt.Sprintf(`("%s", "%s", "%s")`, host, userName, pwd)
users = append(users, user)
}
if len(users) == 0 {
return nil
}
sql := fmt.Sprintf(`INSERT INTO %s.%s (Host, User, Password) VALUES %s;`, mysql.SystemDB, mysql.UserTable, strings.Join(users, ", "))
_, err := e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(e.ctx, sql)
if err != nil {
return errors.Trace(err)
}
return nil
}
// parse user string into username and host
// root@localhost -> roor, localhost
func parseUser(user string) (string, string) {
strs := strings.Split(user, "@")
return strs[0], strs[1]
}
func userExists(ctx context.Context, name string, host string) (bool, error) {
sql := fmt.Sprintf(`SELECT * FROM %s.%s WHERE User="%s" AND Host="%s";`, mysql.SystemDB, mysql.UserTable, name, host)
rs, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, sql)
if err != nil {
return false, errors.Trace(err)
}
defer rs.Close()
row, err := rs.Next()
if err != nil {
return false, errors.Trace(err)
}
return row != nil, nil
}
func (e *SimpleExec) executeSetPwd(s *ast.SetPwdStmt) error {
// TODO: If len(s.User) == 0, use CURRENT_USER()
userName, host := parseUser(s.User)
// Update mysql.user
sql := fmt.Sprintf(`UPDATE %s.%s SET password="%s" WHERE User="%s" AND Host="%s";`, mysql.SystemDB, mysql.UserTable, util.EncodePassword(s.Password), userName, host)
_, err := e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(e.ctx, sql)
return errors.Trace(err)
}

View file

@ -0,0 +1,932 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"fmt"
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/evaluator"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util/types"
)
var (
_ Executor = &UpdateExec{}
_ Executor = &DeleteExec{}
_ Executor = &InsertExec{}
)
// UpdateExec represents an update executor.
type UpdateExec struct {
SelectExec Executor
OrderedList []*ast.Assignment
// Map for unique (Table, handle) pair.
updatedRowKeys map[table.Table]map[int64]struct{}
ctx context.Context
rows []*Row // The rows fetched from TableExec.
newRowsData [][]types.Datum // The new values to be set.
fetched bool
cursor int
}
// Next implements Executor Next interface.
func (e *UpdateExec) Next() (*Row, error) {
if !e.fetched {
err := e.fetchRows()
if err != nil {
return nil, errors.Trace(err)
}
e.fetched = true
}
columns, err := getUpdateColumns(e.OrderedList)
if err != nil {
return nil, errors.Trace(err)
}
if e.cursor >= len(e.rows) {
return nil, nil
}
if e.updatedRowKeys == nil {
e.updatedRowKeys = make(map[table.Table]map[int64]struct{})
}
row := e.rows[e.cursor]
newData := e.newRowsData[e.cursor]
for _, entry := range row.RowKeys {
tbl := entry.Tbl
if e.updatedRowKeys[tbl] == nil {
e.updatedRowKeys[tbl] = make(map[int64]struct{})
}
offset := e.getTableOffset(tbl)
handle := entry.Handle
oldData := row.Data[offset : offset+len(tbl.Cols())]
newTableData := newData[offset : offset+len(tbl.Cols())]
_, ok := e.updatedRowKeys[tbl][handle]
if ok {
// Each matching row is updated once, even if it matches the conditions multiple times.
continue
}
// Update row
err1 := updateRecord(e.ctx, handle, oldData, newTableData, columns, tbl, offset, false)
if err1 != nil {
return nil, errors.Trace(err1)
}
e.updatedRowKeys[tbl][handle] = struct{}{}
}
e.cursor++
return &Row{}, nil
}
func getUpdateColumns(assignList []*ast.Assignment) (map[int]*ast.Assignment, error) {
m := make(map[int]*ast.Assignment, len(assignList))
for i, v := range assignList {
m[i] = v
}
return m, nil
}
func (e *UpdateExec) fetchRows() error {
for {
row, err := e.SelectExec.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
return nil
}
data := make([]types.Datum, len(e.SelectExec.Fields()))
newData := make([]types.Datum, len(e.SelectExec.Fields()))
for i, f := range e.SelectExec.Fields() {
data[i] = types.NewDatum(f.Expr.GetValue())
newData[i] = data[i]
if e.OrderedList[i] != nil {
val, err := evaluator.Eval(e.ctx, e.OrderedList[i].Expr)
if err != nil {
return errors.Trace(err)
}
newData[i] = types.NewDatum(val)
}
}
row.Data = data
e.rows = append(e.rows, row)
e.newRowsData = append(e.newRowsData, newData)
}
}
func (e *UpdateExec) getTableOffset(t table.Table) int {
fields := e.SelectExec.Fields()
i := 0
for i < len(fields) {
field := fields[i]
if field.Table.Name.L == t.Meta().Name.L {
return i
}
i += len(field.Table.Columns)
}
return 0
}
func updateRecord(ctx context.Context, h int64, oldData, newData []types.Datum, updateColumns map[int]*ast.Assignment, t table.Table, offset int, onDuplicateUpdate bool) error {
if err := t.LockRow(ctx, h, false); err != nil {
return errors.Trace(err)
}
cols := t.Cols()
touched := make(map[int]bool, len(cols))
assignExists := false
var newHandle types.Datum
for i, asgn := range updateColumns {
if asgn == nil {
continue
}
if i < offset || i >= offset+len(cols) {
// The assign expression is for another table, not this.
continue
}
colIndex := i - offset
col := cols[colIndex]
if col.IsPKHandleColumn(t.Meta()) {
newHandle = newData[i]
}
if mysql.HasAutoIncrementFlag(col.Flag) {
if newData[i].Kind() == types.KindNull {
return errors.Errorf("Column '%v' cannot be null", col.Name.O)
}
val, err := newData[i].ToInt64()
if err != nil {
return errors.Trace(err)
}
t.RebaseAutoID(val, true)
}
touched[colIndex] = true
assignExists = true
}
// If no assign list for this table, no need to update.
if !assignExists {
return nil
}
// Check whether new value is valid.
if err := column.CastValues(ctx, newData, cols); err != nil {
return errors.Trace(err)
}
if err := column.CheckNotNull(cols, newData); err != nil {
return errors.Trace(err)
}
// If row is not changed, we should do nothing.
rowChanged := false
for i := range oldData {
if !touched[i] {
continue
}
n, err := newData[i].CompareDatum(oldData[i])
if err != nil {
return errors.Trace(err)
}
if n != 0 {
rowChanged = true
break
}
}
if !rowChanged {
// See: https://dev.mysql.com/doc/refman/5.7/en/mysql-real-connect.html CLIENT_FOUND_ROWS
if variable.GetSessionVars(ctx).ClientCapability&mysql.ClientFoundRows > 0 {
variable.GetSessionVars(ctx).AddAffectedRows(1)
}
return nil
}
var err error
if newHandle.Kind() != types.KindNull {
err = t.RemoveRecord(ctx, h, oldData)
if err != nil {
return errors.Trace(err)
}
_, err = t.AddRecord(ctx, newData)
} else {
// Update record to new value and update index.
err = t.UpdateRecord(ctx, h, oldData, newData, touched)
}
if err != nil {
return errors.Trace(err)
}
// Record affected rows.
if !onDuplicateUpdate {
variable.GetSessionVars(ctx).AddAffectedRows(1)
} else {
variable.GetSessionVars(ctx).AddAffectedRows(2)
}
return nil
}
// Fields implements Executor Fields interface.
// Returns nil to indicate there is no output.
func (e *UpdateExec) Fields() []*ast.ResultField {
return nil
}
// Close implements Executor Close interface.
func (e *UpdateExec) Close() error {
return e.SelectExec.Close()
}
// DeleteExec represents a delete executor.
// See: https://dev.mysql.com/doc/refman/5.7/en/delete.html
type DeleteExec struct {
SelectExec Executor
ctx context.Context
Tables []*ast.TableName
IsMultiTable bool
finished bool
}
// Next implements Executor Next interface.
func (e *DeleteExec) Next() (*Row, error) {
if e.finished {
return nil, nil
}
defer func() {
e.finished = true
}()
if e.IsMultiTable && len(e.Tables) == 0 {
return &Row{}, nil
}
tblIDMap := make(map[int64]bool, len(e.Tables))
// Get table alias map.
tblNames := make(map[string]string)
// Map for unique (Table, handle) pair.
rowKeyMap := make(map[table.Table]map[int64]struct{})
if e.IsMultiTable {
// Delete from multiple tables should consider table ident list.
fs := e.SelectExec.Fields()
for _, f := range fs {
if len(f.TableAsName.L) > 0 {
tblNames[f.TableAsName.L] = f.TableName.Name.L
} else {
tblNames[f.TableName.Name.L] = f.TableName.Name.L
}
}
for _, t := range e.Tables {
// Consider DBName.
_, ok := tblNames[t.Name.L]
if !ok {
return nil, errors.Errorf("Unknown table '%s' in MULTI DELETE", t.Name.O)
}
tblIDMap[t.TableInfo.ID] = true
}
}
for {
row, err := e.SelectExec.Next()
if err != nil {
return nil, errors.Trace(err)
}
if row == nil {
break
}
for _, entry := range row.RowKeys {
if e.IsMultiTable {
tid := entry.Tbl.Meta().ID
if _, ok := tblIDMap[tid]; !ok {
continue
}
}
if rowKeyMap[entry.Tbl] == nil {
rowKeyMap[entry.Tbl] = make(map[int64]struct{})
}
rowKeyMap[entry.Tbl][entry.Handle] = struct{}{}
}
}
for t, handleMap := range rowKeyMap {
for handle := range handleMap {
data, err := t.Row(e.ctx, handle)
if err != nil {
return nil, errors.Trace(err)
}
err = e.removeRow(e.ctx, t, handle, data)
if err != nil {
return nil, errors.Trace(err)
}
}
}
return nil, nil
}
func (e *DeleteExec) getTable(ctx context.Context, tableName *ast.TableName) (table.Table, error) {
return sessionctx.GetDomain(ctx).InfoSchema().TableByName(tableName.Schema, tableName.Name)
}
func (e *DeleteExec) removeRow(ctx context.Context, t table.Table, h int64, data []types.Datum) error {
err := t.RemoveRecord(ctx, h, data)
if err != nil {
return errors.Trace(err)
}
variable.GetSessionVars(ctx).AddAffectedRows(1)
return nil
}
// Fields implements Executor Fields interface.
// Returns nil to indicate there is no output.
func (e *DeleteExec) Fields() []*ast.ResultField {
return nil
}
// Close implements Executor Close interface.
func (e *DeleteExec) Close() error {
return e.SelectExec.Close()
}
// InsertValues is the data to insert.
type InsertValues struct {
currRow int
ctx context.Context
SelectExec Executor
Table table.Table
Columns []*ast.ColumnName
Lists [][]ast.ExprNode
Setlist []*ast.Assignment
}
// InsertExec represents an insert executor.
type InsertExec struct {
*InsertValues
OnDuplicate []*ast.Assignment
fields []*ast.ResultField
Priority int
finished bool
}
// Next implements Executor Next interface.
func (e *InsertExec) Next() (*Row, error) {
if e.finished {
return nil, nil
}
cols, err := e.getColumns(e.Table.Cols())
if err != nil {
return nil, errors.Trace(err)
}
txn, err := e.ctx.GetTxn(false)
if err != nil {
return nil, errors.Trace(err)
}
toUpdateColumns, err := getOnDuplicateUpdateColumns(e.OnDuplicate, e.Table)
if err != nil {
return nil, errors.Trace(err)
}
var rows [][]types.Datum
if e.SelectExec != nil {
rows, err = e.getRowsSelect(cols)
} else {
rows, err = e.getRows(cols)
}
if err != nil {
return nil, errors.Trace(err)
}
for _, row := range rows {
if len(e.OnDuplicate) == 0 {
txn.SetOption(kv.PresumeKeyNotExists, nil)
}
h, err := e.Table.AddRecord(e.ctx, row)
txn.DelOption(kv.PresumeKeyNotExists)
if err == nil {
continue
}
if len(e.OnDuplicate) == 0 || !terror.ErrorEqual(err, kv.ErrKeyExists) {
return nil, errors.Trace(err)
}
if err = e.onDuplicateUpdate(row, h, toUpdateColumns); err != nil {
return nil, errors.Trace(err)
}
}
e.finished = true
return nil, nil
}
// Fields implements Executor Fields interface.
// Returns nil to indicate there is no output.
func (e *InsertExec) Fields() []*ast.ResultField {
return nil
}
// Close implements Executor Close interface.
func (e *InsertExec) Close() error {
if e.SelectExec != nil {
return e.SelectExec.Close()
}
return nil
}
// There are three types of insert statements:
// 1 insert ... values(...) --> name type column
// 2 insert ... set x=y... --> set type column
// 3 insert ... (select ..) --> name type column
// See: https://dev.mysql.com/doc/refman/5.7/en/insert.html
func (e *InsertValues) getColumns(tableCols []*column.Col) ([]*column.Col, error) {
var cols []*column.Col
var err error
if len(e.Setlist) > 0 {
// Process `set` type column.
columns := make([]string, 0, len(e.Setlist))
for _, v := range e.Setlist {
columns = append(columns, v.Column.Name.O)
}
cols, err = column.FindCols(tableCols, columns)
if err != nil {
return nil, errors.Errorf("INSERT INTO %s: %s", e.Table.Meta().Name.O, err)
}
if len(cols) == 0 {
return nil, errors.Errorf("INSERT INTO %s: empty column", e.Table.Meta().Name.O)
}
} else {
// Process `name` type column.
columns := make([]string, 0, len(e.Columns))
for _, v := range e.Columns {
columns = append(columns, v.Name.O)
}
cols, err = column.FindCols(tableCols, columns)
if err != nil {
return nil, errors.Errorf("INSERT INTO %s: %s", e.Table.Meta().Name.O, err)
}
// If cols are empty, use all columns instead.
if len(cols) == 0 {
cols = tableCols
}
}
// Check column whether is specified only once.
err = column.CheckOnce(cols)
if err != nil {
return nil, errors.Trace(err)
}
return cols, nil
}
func (e *InsertValues) fillValueList() error {
if len(e.Setlist) > 0 {
if len(e.Lists) > 0 {
return errors.Errorf("INSERT INTO %s: set type should not use values", e.Table)
}
var l []ast.ExprNode
for _, v := range e.Setlist {
l = append(l, v.Expr)
}
e.Lists = append(e.Lists, l)
}
return nil
}
func (e *InsertValues) checkValueCount(insertValueCount, valueCount, num int, cols []*column.Col) error {
if insertValueCount != valueCount {
// "insert into t values (), ()" is valid.
// "insert into t values (), (1)" is not valid.
// "insert into t values (1), ()" is not valid.
// "insert into t values (1,2), (1)" is not valid.
// So the value count must be same for all insert list.
return errors.Errorf("Column count doesn't match value count at row %d", num+1)
}
if valueCount == 0 && len(e.Columns) > 0 {
// "insert into t (c1) values ()" is not valid.
return errors.Errorf("INSERT INTO %s: expected %d value(s), have %d", e.Table.Meta().Name.O, len(e.Columns), 0)
} else if valueCount > 0 && valueCount != len(cols) {
return errors.Errorf("INSERT INTO %s: expected %d value(s), have %d", e.Table.Meta().Name.O, len(cols), valueCount)
}
return nil
}
func (e *InsertValues) getColumnDefaultValues(cols []*column.Col) (map[string]types.Datum, error) {
defaultValMap := map[string]types.Datum{}
for _, col := range cols {
if value, ok, err := table.GetColDefaultValue(e.ctx, &col.ColumnInfo); ok {
if err != nil {
return nil, errors.Trace(err)
}
defaultValMap[col.Name.L] = value
}
}
return defaultValMap, nil
}
func (e *InsertValues) getRows(cols []*column.Col) (rows [][]types.Datum, err error) {
// process `insert|replace ... set x=y...`
if err = e.fillValueList(); err != nil {
return nil, errors.Trace(err)
}
defaultVals, err := e.getColumnDefaultValues(e.Table.Cols())
if err != nil {
return nil, errors.Trace(err)
}
rows = make([][]types.Datum, len(e.Lists))
length := len(e.Lists[0])
for i, list := range e.Lists {
if err = e.checkValueCount(length, len(list), i, cols); err != nil {
return nil, errors.Trace(err)
}
e.currRow = i
rows[i], err = e.getRow(cols, list, defaultVals)
if err != nil {
return nil, errors.Trace(err)
}
}
return
}
func (e *InsertValues) getRow(cols []*column.Col, list []ast.ExprNode, defaultVals map[string]types.Datum) ([]types.Datum, error) {
vals := make([]types.Datum, len(list))
var err error
for i, expr := range list {
if d, ok := expr.(*ast.DefaultExpr); ok {
cn := d.Name
if cn != nil {
var found bool
vals[i], found = defaultVals[cn.Name.L]
if !found {
return nil, errors.Errorf("default column not found - %s", cn.Name.O)
}
} else {
vals[i] = defaultVals[cols[i].Name.L]
}
} else {
var val interface{}
val, err = evaluator.Eval(e.ctx, expr)
vals[i].SetValue(val)
if err != nil {
return nil, errors.Trace(err)
}
}
}
return e.fillRowData(cols, vals)
}
func (e *InsertValues) getRowsSelect(cols []*column.Col) ([][]types.Datum, error) {
// process `insert|replace into ... select ... from ...`
if len(e.SelectExec.Fields()) != len(cols) {
return nil, errors.Errorf("Column count %d doesn't match value count %d", len(cols), len(e.SelectExec.Fields()))
}
var rows [][]types.Datum
for {
innerRow, err := e.SelectExec.Next()
if err != nil {
return nil, errors.Trace(err)
}
if innerRow == nil {
break
}
e.currRow = len(rows)
row, err := e.fillRowData(cols, innerRow.Data)
if err != nil {
return nil, errors.Trace(err)
}
rows = append(rows, row)
}
return rows, nil
}
func (e *InsertValues) fillRowData(cols []*column.Col, vals []types.Datum) ([]types.Datum, error) {
row := make([]types.Datum, len(e.Table.Cols()))
marked := make(map[int]struct{}, len(vals))
for i, v := range vals {
offset := cols[i].Offset
row[offset] = v
marked[offset] = struct{}{}
}
err := e.initDefaultValues(row, marked)
if err != nil {
return nil, errors.Trace(err)
}
if err = column.CastValues(e.ctx, row, cols); err != nil {
return nil, errors.Trace(err)
}
if err = column.CheckNotNull(e.Table.Cols(), row); err != nil {
return nil, errors.Trace(err)
}
return row, nil
}
func (e *InsertValues) initDefaultValues(row []types.Datum, marked map[int]struct{}) error {
var rewriteValueCol *column.Col
var defaultValueCols []*column.Col
for i, c := range e.Table.Cols() {
if row[i].Kind() != types.KindNull {
// Column value isn't nil and column isn't auto-increment, continue.
if !mysql.HasAutoIncrementFlag(c.Flag) {
continue
}
val, err := row[i].ToInt64()
if err != nil {
return errors.Trace(err)
}
if val != 0 {
e.Table.RebaseAutoID(val, true)
continue
}
}
// If the nil value is evaluated in insert list, we will use nil except auto increment column.
if _, ok := marked[i]; ok && !mysql.HasAutoIncrementFlag(c.Flag) && !mysql.HasTimestampFlag(c.Flag) {
continue
}
if mysql.HasAutoIncrementFlag(c.Flag) {
recordID, err := e.Table.AllocAutoID()
if err != nil {
return errors.Trace(err)
}
row[i].SetInt64(recordID)
// Notes: incompatible with mysql
// MySQL will set last insert id to the first row, as follows:
// `t(id int AUTO_INCREMENT, c1 int, PRIMARY KEY (id))`
// `insert t (c1) values(1),(2),(3);`
// Last insert id will be 1, not 3.
variable.GetSessionVars(e.ctx).SetLastInsertID(uint64(recordID))
// It's used for retry.
rewriteValueCol = c
} else {
var err error
row[i], _, err = table.GetColDefaultValue(e.ctx, &c.ColumnInfo)
if err != nil {
return errors.Trace(err)
}
}
defaultValueCols = append(defaultValueCols, c)
}
if err := column.CastValues(e.ctx, row, defaultValueCols); err != nil {
return errors.Trace(err)
}
// It's used for retry.
if rewriteValueCol == nil {
return nil
}
if len(e.Setlist) > 0 {
val := &ast.Assignment{
Column: &ast.ColumnName{Name: rewriteValueCol.Name},
Expr: ast.NewValueExpr(row[rewriteValueCol.Offset].GetValue())}
if len(e.Setlist) < rewriteValueCol.Offset+1 {
e.Setlist = append(e.Setlist, val)
return nil
}
setlist := make([]*ast.Assignment, 0, len(e.Setlist)+1)
setlist = append(setlist, e.Setlist[:rewriteValueCol.Offset]...)
setlist = append(setlist, val)
e.Setlist = append(setlist, e.Setlist[rewriteValueCol.Offset:]...)
return nil
}
// records the values of each row.
vals := make([]ast.ExprNode, len(row))
for i, col := range row {
vals[i] = ast.NewValueExpr(col.GetValue())
}
if len(e.Lists) <= e.currRow {
e.Lists = append(e.Lists, vals)
} else {
e.Lists[e.currRow] = vals
}
// records the column name only once.
if e.currRow != len(e.Lists)-1 {
return nil
}
if len(e.Columns) < rewriteValueCol.Offset+1 {
e.Columns = append(e.Columns, &ast.ColumnName{Name: rewriteValueCol.Name})
return nil
}
cols := make([]*ast.ColumnName, 0, len(e.Columns)+1)
cols = append(cols, e.Columns[:rewriteValueCol.Offset]...)
cols = append(cols, &ast.ColumnName{Name: rewriteValueCol.Name})
e.Columns = append(cols, e.Columns[rewriteValueCol.Offset:]...)
return nil
}
func (e *InsertExec) onDuplicateUpdate(row []types.Datum, h int64, cols map[int]*ast.Assignment) error {
// On duplicate key update the duplicate row.
// Evaluate the updated value.
// TODO: report rows affected and last insert id.
data, err := e.Table.Row(e.ctx, h)
if err != nil {
return errors.Trace(err)
}
// For evaluate ValuesExpr
// http://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values
for i, rf := range e.fields {
rf.Expr.SetValue(row[i].GetValue())
}
// Evaluate assignment
newData := make([]types.Datum, len(data))
for i, c := range row {
asgn, ok := cols[i]
if !ok {
newData[i] = c
continue
}
var val interface{}
val, err = evaluator.Eval(e.ctx, asgn.Expr)
if err != nil {
return errors.Trace(err)
}
newData[i].SetValue(val)
}
if err = updateRecord(e.ctx, h, data, newData, cols, e.Table, 0, true); err != nil {
return errors.Trace(err)
}
return nil
}
func findColumnByName(t table.Table, name string) (*column.Col, error) {
_, tableName, colName := splitQualifiedName(name)
if len(tableName) > 0 && tableName != t.Meta().Name.O {
return nil, errors.Errorf("unknown field %s.%s", tableName, colName)
}
c := column.FindCol(t.Cols(), colName)
if c == nil {
return nil, errors.Errorf("unknown field %s", colName)
}
return c, nil
}
func getOnDuplicateUpdateColumns(assignList []*ast.Assignment, t table.Table) (map[int]*ast.Assignment, error) {
m := make(map[int]*ast.Assignment, len(assignList))
for _, v := range assignList {
col := v.Column
c, err := findColumnByName(t, joinQualifiedName("", col.Table.L, col.Name.L))
if err != nil {
return nil, errors.Trace(err)
}
m[c.Offset] = v
}
return m, nil
}
// ReplaceExec represents a replace executor.
type ReplaceExec struct {
*InsertValues
Priority int
finished bool
}
// Fields implements Executor Fields interface.
// Returns nil to indicate there is no output.
func (e *ReplaceExec) Fields() []*ast.ResultField {
return nil
}
// Close implements Executor Close interface.
func (e *ReplaceExec) Close() error {
if e.SelectExec != nil {
return e.SelectExec.Close()
}
return nil
}
// Next implements Executor Next interface.
func (e *ReplaceExec) Next() (*Row, error) {
if e.finished {
return nil, nil
}
cols, err := e.getColumns(e.Table.Cols())
if err != nil {
return nil, errors.Trace(err)
}
var rows [][]types.Datum
if e.SelectExec != nil {
rows, err = e.getRowsSelect(cols)
} else {
rows, err = e.getRows(cols)
}
if err != nil {
return nil, errors.Trace(err)
}
for _, row := range rows {
h, err := e.Table.AddRecord(e.ctx, row)
if err == nil {
continue
}
if err != nil && !terror.ErrorEqual(err, kv.ErrKeyExists) {
return nil, errors.Trace(err)
}
// While the insertion fails because a duplicate-key error occurs for a primary key or unique index,
// a storage engine may perform the REPLACE as an update rather than a delete plus insert.
// See: http://dev.mysql.com/doc/refman/5.7/en/replace.html.
if err = e.replaceRow(h, row); err != nil {
return nil, errors.Trace(err)
}
variable.GetSessionVars(e.ctx).AddAffectedRows(1)
}
e.finished = true
return nil, nil
}
func (e *ReplaceExec) replaceRow(handle int64, replaceRow []types.Datum) error {
row, err := e.Table.Row(e.ctx, handle)
if err != nil {
return errors.Trace(err)
}
isReplace := false
touched := make(map[int]bool, len(row))
for i, val := range row {
v, err1 := val.CompareDatum(replaceRow[i])
if err1 != nil {
return errors.Trace(err1)
}
if v != 0 {
touched[i] = true
isReplace = true
}
}
if isReplace {
variable.GetSessionVars(e.ctx).AddAffectedRows(1)
if err = e.Table.UpdateRecord(e.ctx, handle, row, replaceRow, touched); err != nil {
return errors.Trace(err)
}
}
return nil
}
// SplitQualifiedName splits an identifier name to db, table and field name.
func splitQualifiedName(name string) (db string, table string, field string) {
seps := strings.Split(name, ".")
l := len(seps)
switch l {
case 1:
// `name` is field.
field = seps[0]
case 2:
// `name` is `table.field`.
table, field = seps[0], seps[1]
case 3:
// `name` is `db.table.field`.
db, table, field = seps[0], seps[1], seps[2]
default:
// `name` is `db.table.field`.
db, table, field = seps[l-3], seps[l-2], seps[l-1]
}
return
}
// JoinQualifiedName converts db, table, field to a qualified name.
func joinQualifiedName(db string, table string, field string) string {
if len(db) > 0 {
return fmt.Sprintf("%s.%s.%s", db, table, field)
} else if len(table) > 0 {
return fmt.Sprintf("%s.%s", table, field)
} else {
return field
}
}

217
vendor/github.com/pingcap/tidb/executor/explain.go generated vendored Normal file
View file

@ -0,0 +1,217 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"strconv"
"strings"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/optimizer/plan"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/util/types"
)
type explainEntry struct {
ID int64
selectType string
table string
joinType string
possibleKeys string
key string
keyLen string
ref string
rows int64
extra []string
}
func (e *explainEntry) setJoinTypeForTableScan(p *plan.TableScan) {
if len(p.AccessConditions) == 0 {
e.joinType = "ALL"
return
}
if p.RefAccess {
e.joinType = "eq_ref"
return
}
for _, con := range p.AccessConditions {
if x, ok := con.(*ast.BinaryOperationExpr); ok {
if x.Op == opcode.EQ {
e.joinType = "const"
return
}
}
}
e.joinType = "range"
}
func (e *explainEntry) setJoinTypeForIndexScan(p *plan.IndexScan) {
if len(p.AccessConditions) == 0 {
e.joinType = "index"
return
}
if len(p.AccessConditions) == p.AccessEqualCount {
if p.RefAccess {
if p.Index.Unique {
e.joinType = "eq_ref"
} else {
e.joinType = "ref"
}
} else {
if p.Index.Unique {
e.joinType = "const"
} else {
e.joinType = "range"
}
}
return
}
e.joinType = "range"
}
// ExplainExec represents an explain executor.
// See: https://dev.mysql.com/doc/refman/5.7/en/explain-output.html
type ExplainExec struct {
StmtPlan plan.Plan
fields []*ast.ResultField
rows []*Row
cursor int
}
// Fields implements Executor Fields interface.
func (e *ExplainExec) Fields() []*ast.ResultField {
return e.fields
}
// Next implements Execution Next interface.
func (e *ExplainExec) Next() (*Row, error) {
if e.rows == nil {
e.fetchRows()
}
if e.cursor >= len(e.rows) {
return nil, nil
}
row := e.rows[e.cursor]
e.cursor++
return row, nil
}
func (e *ExplainExec) fetchRows() {
visitor := &explainVisitor{id: 1}
e.StmtPlan.Accept(visitor)
for _, entry := range visitor.entries {
row := &Row{}
row.Data = types.MakeDatums(
entry.ID,
entry.selectType,
entry.table,
entry.joinType,
entry.key,
entry.key,
entry.keyLen,
entry.ref,
entry.rows,
strings.Join(entry.extra, "; "),
)
for i := range row.Data {
if row.Data[i].Kind() == types.KindString && row.Data[i].GetString() == "" {
row.Data[i].SetNull()
}
}
e.rows = append(e.rows, row)
}
}
// Close implements Executor Close interface.
func (e *ExplainExec) Close() error {
return nil
}
type explainVisitor struct {
id int64
// Sort extra should be appended in the first table in a join.
sort bool
entries []*explainEntry
}
func (v *explainVisitor) Enter(p plan.Plan) (plan.Plan, bool) {
switch x := p.(type) {
case *plan.TableScan:
v.entries = append(v.entries, v.newEntryForTableScan(x))
case *plan.IndexScan:
v.entries = append(v.entries, v.newEntryForIndexScan(x))
case *plan.Sort:
v.sort = true
}
return p, false
}
func (v *explainVisitor) Leave(p plan.Plan) (plan.Plan, bool) {
return p, true
}
func (v *explainVisitor) newEntryForTableScan(p *plan.TableScan) *explainEntry {
entry := &explainEntry{
ID: v.id,
selectType: "SIMPLE",
table: p.Table.Name.O,
}
entry.setJoinTypeForTableScan(p)
if entry.joinType != "ALL" {
entry.key = "PRIMARY"
entry.keyLen = "8"
}
if len(p.AccessConditions)+len(p.FilterConditions) > 0 {
entry.extra = append(entry.extra, "Using where")
}
v.setSortExtra(entry)
return entry
}
func (v *explainVisitor) newEntryForIndexScan(p *plan.IndexScan) *explainEntry {
entry := &explainEntry{
ID: v.id,
selectType: "SIMPLE",
table: p.Table.Name.O,
key: p.Index.Name.O,
}
if len(p.AccessConditions) != 0 {
keyLen := 0
for i := 0; i < len(p.Index.Columns); i++ {
if i < p.AccessEqualCount {
keyLen += p.Index.Columns[i].Length
} else if i < len(p.AccessConditions) {
keyLen += p.Index.Columns[i].Length
break
}
}
entry.keyLen = strconv.Itoa(keyLen)
}
entry.setJoinTypeForIndexScan(p)
if len(p.AccessConditions)+len(p.FilterConditions) > 0 {
entry.extra = append(entry.extra, "Using where")
}
v.setSortExtra(entry)
return entry
}
func (v *explainVisitor) setSortExtra(entry *explainEntry) {
if v.sort {
entry.extra = append(entry.extra, "Using filesort")
v.sort = false
}
}

520
vendor/github.com/pingcap/tidb/executor/grant.go generated vendored Normal file
View file

@ -0,0 +1,520 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"fmt"
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/db"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/types"
)
/***
* Grant Statement
* See: https://dev.mysql.com/doc/refman/5.7/en/grant.html
************************************************************************************/
var (
_ Executor = (*GrantExec)(nil)
)
// GrantExec executes GrantStmt.
type GrantExec struct {
Privs []*ast.PrivElem
ObjectType ast.ObjectTypeType
Level *ast.GrantLevel
Users []*ast.UserSpec
ctx context.Context
done bool
}
// Fields implements Executor Fields interface.
func (e *GrantExec) Fields() []*ast.ResultField {
return nil
}
// Next implements Execution Next interface.
func (e *GrantExec) Next() (*Row, error) {
if e.done {
return nil, nil
}
// Grant for each user
for _, user := range e.Users {
// Check if user exists.
userName, host := parseUser(user.User)
exists, err := userExists(e.ctx, userName, host)
if err != nil {
return nil, errors.Trace(err)
}
if !exists {
return nil, errors.Errorf("Unknown user: %s", user.User)
}
// If there is no privilege entry in corresponding table, insert a new one.
// DB scope: mysql.DB
// Table scope: mysql.Tables_priv
// Column scope: mysql.Columns_priv
switch e.Level.Level {
case ast.GrantLevelDB:
err := e.checkAndInitDBPriv(userName, host)
if err != nil {
return nil, errors.Trace(err)
}
case ast.GrantLevelTable:
err := e.checkAndInitTablePriv(userName, host)
if err != nil {
return nil, errors.Trace(err)
}
}
// Grant each priv to the user.
for _, priv := range e.Privs {
if len(priv.Cols) > 0 {
// Check column scope privilege entry.
// TODO: Check validity before insert new entry.
err1 := e.checkAndInitColumnPriv(userName, host, priv.Cols)
if err1 != nil {
return nil, errors.Trace(err1)
}
}
err2 := e.grantPriv(priv, user)
if err2 != nil {
return nil, errors.Trace(err2)
}
}
}
e.done = true
return nil, nil
}
// Close implements Executor Close interface.
func (e *GrantExec) Close() error {
return nil
}
// Check if DB scope privilege entry exists in mysql.DB.
// If unexists, insert a new one.
func (e *GrantExec) checkAndInitDBPriv(user string, host string) error {
db, err := e.getTargetSchema()
if err != nil {
return errors.Trace(err)
}
ok, err := dbUserExists(e.ctx, user, host, db.Name.O)
if err != nil {
return errors.Trace(err)
}
if ok {
return nil
}
// Entry does not exist for user-host-db. Insert a new entry.
return initDBPrivEntry(e.ctx, user, host, db.Name.O)
}
// Check if table scope privilege entry exists in mysql.Tables_priv.
// If unexists, insert a new one.
func (e *GrantExec) checkAndInitTablePriv(user string, host string) error {
db, tbl, err := e.getTargetSchemaAndTable()
if err != nil {
return errors.Trace(err)
}
ok, err := tableUserExists(e.ctx, user, host, db.Name.O, tbl.Meta().Name.O)
if err != nil {
return errors.Trace(err)
}
if ok {
return nil
}
// Entry does not exist for user-host-db-tbl. Insert a new entry.
return initTablePrivEntry(e.ctx, user, host, db.Name.O, tbl.Meta().Name.O)
}
// Check if column scope privilege entry exists in mysql.Columns_priv.
// If unexists, insert a new one.
func (e *GrantExec) checkAndInitColumnPriv(user string, host string, cols []*ast.ColumnName) error {
db, tbl, err := e.getTargetSchemaAndTable()
if err != nil {
return errors.Trace(err)
}
for _, c := range cols {
col := column.FindCol(tbl.Cols(), c.Name.L)
if col == nil {
return errors.Errorf("Unknown column: %s", c.Name.O)
}
ok, err := columnPrivEntryExists(e.ctx, user, host, db.Name.O, tbl.Meta().Name.O, col.Name.O)
if err != nil {
return errors.Trace(err)
}
if ok {
continue
}
// Entry does not exist for user-host-db-tbl-col. Insert a new entry.
err = initColumnPrivEntry(e.ctx, user, host, db.Name.O, tbl.Meta().Name.O, col.Name.O)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// Insert a new row into mysql.DB with empty privilege.
func initDBPrivEntry(ctx context.Context, user string, host string, db string) error {
sql := fmt.Sprintf(`INSERT INTO %s.%s (Host, User, DB) VALUES ("%s", "%s", "%s")`, mysql.SystemDB, mysql.DBTable, host, user, db)
_, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, sql)
return errors.Trace(err)
}
// Insert a new row into mysql.Tables_priv with empty privilege.
func initTablePrivEntry(ctx context.Context, user string, host string, db string, tbl string) error {
sql := fmt.Sprintf(`INSERT INTO %s.%s (Host, User, DB, Table_name, Table_priv, Column_priv) VALUES ("%s", "%s", "%s", "%s", "", "")`, mysql.SystemDB, mysql.TablePrivTable, host, user, db, tbl)
_, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, sql)
return errors.Trace(err)
}
// Insert a new row into mysql.Columns_priv with empty privilege.
func initColumnPrivEntry(ctx context.Context, user string, host string, db string, tbl string, col string) error {
sql := fmt.Sprintf(`INSERT INTO %s.%s (Host, User, DB, Table_name, Column_name, Column_priv) VALUES ("%s", "%s", "%s", "%s", "%s", "")`, mysql.SystemDB, mysql.ColumnPrivTable, host, user, db, tbl, col)
_, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, sql)
return errors.Trace(err)
}
// Grant priv to user in s.Level scope.
func (e *GrantExec) grantPriv(priv *ast.PrivElem, user *ast.UserSpec) error {
switch e.Level.Level {
case ast.GrantLevelGlobal:
return e.grantGlobalPriv(priv, user)
case ast.GrantLevelDB:
return e.grantDBPriv(priv, user)
case ast.GrantLevelTable:
if len(priv.Cols) == 0 {
return e.grantTablePriv(priv, user)
}
return e.grantColumnPriv(priv, user)
default:
return errors.Errorf("Unknown grant level: %#v", e.Level)
}
}
// Manipulate mysql.user table.
func (e *GrantExec) grantGlobalPriv(priv *ast.PrivElem, user *ast.UserSpec) error {
asgns, err := composeGlobalPrivUpdate(priv.Priv)
if err != nil {
return errors.Trace(err)
}
userName, host := parseUser(user.User)
sql := fmt.Sprintf(`UPDATE %s.%s SET %s WHERE User="%s" AND Host="%s"`, mysql.SystemDB, mysql.UserTable, asgns, userName, host)
_, err = e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(e.ctx, sql)
return errors.Trace(err)
}
// Manipulate mysql.db table.
func (e *GrantExec) grantDBPriv(priv *ast.PrivElem, user *ast.UserSpec) error {
db, err := e.getTargetSchema()
if err != nil {
return errors.Trace(err)
}
asgns, err := composeDBPrivUpdate(priv.Priv)
if err != nil {
return errors.Trace(err)
}
userName, host := parseUser(user.User)
sql := fmt.Sprintf(`UPDATE %s.%s SET %s WHERE User="%s" AND Host="%s" AND DB="%s";`, mysql.SystemDB, mysql.DBTable, asgns, userName, host, db.Name.O)
_, err = e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(e.ctx, sql)
return errors.Trace(err)
}
// Manipulate mysql.tables_priv table.
func (e *GrantExec) grantTablePriv(priv *ast.PrivElem, user *ast.UserSpec) error {
db, tbl, err := e.getTargetSchemaAndTable()
if err != nil {
return errors.Trace(err)
}
userName, host := parseUser(user.User)
asgns, err := composeTablePrivUpdate(e.ctx, priv.Priv, userName, host, db.Name.O, tbl.Meta().Name.O)
if err != nil {
return errors.Trace(err)
}
sql := fmt.Sprintf(`UPDATE %s.%s SET %s WHERE User="%s" AND Host="%s" AND DB="%s" AND Table_name="%s";`, mysql.SystemDB, mysql.TablePrivTable, asgns, userName, host, db.Name.O, tbl.Meta().Name.O)
_, err = e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(e.ctx, sql)
return errors.Trace(err)
}
// Manipulate mysql.tables_priv table.
func (e *GrantExec) grantColumnPriv(priv *ast.PrivElem, user *ast.UserSpec) error {
db, tbl, err := e.getTargetSchemaAndTable()
if err != nil {
return errors.Trace(err)
}
userName, host := parseUser(user.User)
for _, c := range priv.Cols {
col := column.FindCol(tbl.Cols(), c.Name.L)
if col == nil {
return errors.Errorf("Unknown column: %s", c)
}
asgns, err := composeColumnPrivUpdate(e.ctx, priv.Priv, userName, host, db.Name.O, tbl.Meta().Name.O, col.Name.O)
if err != nil {
return errors.Trace(err)
}
sql := fmt.Sprintf(`UPDATE %s.%s SET %s WHERE User="%s" AND Host="%s" AND DB="%s" AND Table_name="%s" AND Column_name="%s";`, mysql.SystemDB, mysql.ColumnPrivTable, asgns, userName, host, db.Name.O, tbl.Meta().Name.O, col.Name.O)
_, err = e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(e.ctx, sql)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// Compose update stmt assignment list string for global scope privilege update.
func composeGlobalPrivUpdate(priv mysql.PrivilegeType) (string, error) {
if priv == mysql.AllPriv {
strs := make([]string, 0, len(mysql.Priv2UserCol))
for _, v := range mysql.Priv2UserCol {
strs = append(strs, fmt.Sprintf(`%s="Y"`, v))
}
return strings.Join(strs, ", "), nil
}
col, ok := mysql.Priv2UserCol[priv]
if !ok {
return "", errors.Errorf("Unknown priv: %v", priv)
}
return fmt.Sprintf(`%s="Y"`, col), nil
}
// Compose update stmt assignment list for db scope privilege update.
func composeDBPrivUpdate(priv mysql.PrivilegeType) (string, error) {
if priv == mysql.AllPriv {
strs := make([]string, 0, len(mysql.AllDBPrivs))
for _, p := range mysql.AllDBPrivs {
v, ok := mysql.Priv2UserCol[p]
if !ok {
return "", errors.Errorf("Unknown db privilege %v", priv)
}
strs = append(strs, fmt.Sprintf(`%s="Y"`, v))
}
return strings.Join(strs, ", "), nil
}
col, ok := mysql.Priv2UserCol[priv]
if !ok {
return "", errors.Errorf("Unknown priv: %v", priv)
}
return fmt.Sprintf(`%s="Y"`, col), nil
}
// Compose update stmt assignment list for table scope privilege update.
func composeTablePrivUpdate(ctx context.Context, priv mysql.PrivilegeType, name string, host string, db string, tbl string) (string, error) {
var newTablePriv, newColumnPriv string
if priv == mysql.AllPriv {
for _, p := range mysql.AllTablePrivs {
v, ok := mysql.Priv2SetStr[p]
if !ok {
return "", errors.Errorf("Unknown table privilege %v", p)
}
if len(newTablePriv) == 0 {
newTablePriv = v
} else {
newTablePriv = fmt.Sprintf("%s,%s", newTablePriv, v)
}
}
for _, p := range mysql.AllColumnPrivs {
v, ok := mysql.Priv2SetStr[p]
if !ok {
return "", errors.Errorf("Unknown column privilege %v", p)
}
if len(newColumnPriv) == 0 {
newColumnPriv = v
} else {
newColumnPriv = fmt.Sprintf("%s,%s", newColumnPriv, v)
}
}
} else {
currTablePriv, currColumnPriv, err := getTablePriv(ctx, name, host, db, tbl)
if err != nil {
return "", errors.Trace(err)
}
p, ok := mysql.Priv2SetStr[priv]
if !ok {
return "", errors.Errorf("Unknown priv: %v", priv)
}
if len(currTablePriv) == 0 {
newTablePriv = p
} else {
newTablePriv = fmt.Sprintf("%s,%s", currTablePriv, p)
}
for _, cp := range mysql.AllColumnPrivs {
if priv == cp {
if len(currColumnPriv) == 0 {
newColumnPriv = p
} else {
newColumnPriv = fmt.Sprintf("%s,%s", currColumnPriv, p)
}
break
}
}
}
return fmt.Sprintf(`Table_priv="%s", Column_priv="%s", Grantor="%s"`, newTablePriv, newColumnPriv, variable.GetSessionVars(ctx).User), nil
}
// Compose update stmt assignment list for column scope privilege update.
func composeColumnPrivUpdate(ctx context.Context, priv mysql.PrivilegeType, name string, host string, db string, tbl string, col string) (string, error) {
newColumnPriv := ""
if priv == mysql.AllPriv {
for _, p := range mysql.AllColumnPrivs {
v, ok := mysql.Priv2SetStr[p]
if !ok {
return "", errors.Errorf("Unknown column privilege %v", p)
}
if len(newColumnPriv) == 0 {
newColumnPriv = v
} else {
newColumnPriv = fmt.Sprintf("%s,%s", newColumnPriv, v)
}
}
} else {
currColumnPriv, err := getColumnPriv(ctx, name, host, db, tbl, col)
if err != nil {
return "", errors.Trace(err)
}
p, ok := mysql.Priv2SetStr[priv]
if !ok {
return "", errors.Errorf("Unknown priv: %v", priv)
}
if len(currColumnPriv) == 0 {
newColumnPriv = p
} else {
newColumnPriv = fmt.Sprintf("%s,%s", currColumnPriv, p)
}
}
return fmt.Sprintf(`Column_priv="%s"`, newColumnPriv), nil
}
// Helper function to check if the sql returns any row.
func recordExists(ctx context.Context, sql string) (bool, error) {
rs, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, sql)
if err != nil {
return false, errors.Trace(err)
}
defer rs.Close()
row, err := rs.Next()
if err != nil {
return false, errors.Trace(err)
}
return row != nil, nil
}
// Check if there is an entry with key user-host-db in mysql.DB.
func dbUserExists(ctx context.Context, name string, host string, db string) (bool, error) {
sql := fmt.Sprintf(`SELECT * FROM %s.%s WHERE User="%s" AND Host="%s" AND DB="%s";`, mysql.SystemDB, mysql.DBTable, name, host, db)
return recordExists(ctx, sql)
}
// Check if there is an entry with key user-host-db-tbl in mysql.Tables_priv.
func tableUserExists(ctx context.Context, name string, host string, db string, tbl string) (bool, error) {
sql := fmt.Sprintf(`SELECT * FROM %s.%s WHERE User="%s" AND Host="%s" AND DB="%s" AND Table_name="%s";`, mysql.SystemDB, mysql.TablePrivTable, name, host, db, tbl)
return recordExists(ctx, sql)
}
// Check if there is an entry with key user-host-db-tbl-col in mysql.Columns_priv.
func columnPrivEntryExists(ctx context.Context, name string, host string, db string, tbl string, col string) (bool, error) {
sql := fmt.Sprintf(`SELECT * FROM %s.%s WHERE User="%s" AND Host="%s" AND DB="%s" AND Table_name="%s" AND Column_name="%s";`, mysql.SystemDB, mysql.ColumnPrivTable, name, host, db, tbl, col)
return recordExists(ctx, sql)
}
// Get current table scope privilege set from mysql.Tables_priv.
// Return Table_priv and Column_priv.
func getTablePriv(ctx context.Context, name string, host string, db string, tbl string) (string, string, error) {
sql := fmt.Sprintf(`SELECT Table_priv, Column_priv FROM %s.%s WHERE User="%s" AND Host="%s" AND DB="%s" AND Table_name="%s";`, mysql.SystemDB, mysql.TablePrivTable, name, host, db, tbl)
rs, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, sql)
if err != nil {
return "", "", errors.Trace(err)
}
defer rs.Close()
row, err := rs.Next()
if err != nil {
return "", "", errors.Trace(err)
}
var tPriv, cPriv string
if row.Data[0].Kind() == types.KindMysqlSet {
tablePriv := row.Data[0].GetMysqlSet()
tPriv = tablePriv.Name
}
if row.Data[1].Kind() == types.KindMysqlSet {
columnPriv := row.Data[1].GetMysqlSet()
cPriv = columnPriv.Name
}
return tPriv, cPriv, nil
}
// Get current column scope privilege set from mysql.Columns_priv.
// Return Column_priv.
func getColumnPriv(ctx context.Context, name string, host string, db string, tbl string, col string) (string, error) {
sql := fmt.Sprintf(`SELECT Column_priv FROM %s.%s WHERE User="%s" AND Host="%s" AND DB="%s" AND Table_name="%s" AND Column_name="%s";`, mysql.SystemDB, mysql.ColumnPrivTable, name, host, db, tbl, col)
rs, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, sql)
if err != nil {
return "", errors.Trace(err)
}
defer rs.Close()
row, err := rs.Next()
if err != nil {
return "", errors.Trace(err)
}
cPriv := ""
if row.Data[0].Kind() == types.KindMysqlSet {
cPriv = row.Data[0].GetMysqlSet().Name
}
return cPriv, nil
}
// Find the schema by dbName.
func (e *GrantExec) getTargetSchema() (*model.DBInfo, error) {
dbName := e.Level.DBName
if len(dbName) == 0 {
// Grant *, use current schema
dbName = db.GetCurrentSchema(e.ctx)
if len(dbName) == 0 {
return nil, errors.New("Miss DB name for grant privilege.")
}
}
//check if db exists
schema := model.NewCIStr(dbName)
is := sessionctx.GetDomain(e.ctx).InfoSchema()
db, ok := is.SchemaByName(schema)
if !ok {
return nil, errors.Errorf("Unknown schema name: %s", dbName)
}
return db, nil
}
// Find the schema and table by dbName and tableName.
func (e *GrantExec) getTargetSchemaAndTable() (*model.DBInfo, table.Table, error) {
db, err := e.getTargetSchema()
if err != nil {
return nil, nil, errors.Trace(err)
}
name := model.NewCIStr(e.Level.TableName)
is := sessionctx.GetDomain(e.ctx).InfoSchema()
tbl, err := is.TableByName(db.Name, name)
if err != nil {
return nil, nil, errors.Trace(err)
}
return db, tbl, nil
}

277
vendor/github.com/pingcap/tidb/executor/prepared.go generated vendored Normal file
View file

@ -0,0 +1,277 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"sort"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/evaluator"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/optimizer"
"github.com/pingcap/tidb/optimizer/plan"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
)
var (
_ Executor = &DeallocateExec{}
_ Executor = &ExecuteExec{}
_ Executor = &PrepareExec{}
)
type paramMarkerSorter struct {
markers []*ast.ParamMarkerExpr
}
func (p *paramMarkerSorter) Len() int {
return len(p.markers)
}
func (p *paramMarkerSorter) Less(i, j int) bool {
return p.markers[i].Offset < p.markers[j].Offset
}
func (p *paramMarkerSorter) Swap(i, j int) {
p.markers[i], p.markers[j] = p.markers[j], p.markers[i]
}
type paramMarkerExtractor struct {
markers []*ast.ParamMarkerExpr
}
func (e *paramMarkerExtractor) Enter(in ast.Node) (ast.Node, bool) {
return in, false
}
func (e *paramMarkerExtractor) Leave(in ast.Node) (ast.Node, bool) {
if x, ok := in.(*ast.ParamMarkerExpr); ok {
e.markers = append(e.markers, x)
}
return in, true
}
// Prepared represents a prepared statement.
type Prepared struct {
Stmt ast.StmtNode
Params []*ast.ParamMarkerExpr
SchemaVersion int64
}
// PrepareExec represents a PREPARE executor.
type PrepareExec struct {
IS infoschema.InfoSchema
Ctx context.Context
Name string
SQLText string
ID uint32
ResultFields []*ast.ResultField
ParamCount int
Err error
}
// Fields implements Executor Fields interface.
func (e *PrepareExec) Fields() []*ast.ResultField {
// returns nil to indicate prepare will not return Recordset.
return nil
}
// Next implements Executor Next interface.
func (e *PrepareExec) Next() (*Row, error) {
e.DoPrepare()
return nil, e.Err
}
// Close implements plan.Plan Close interface.
func (e *PrepareExec) Close() error {
return nil
}
// DoPrepare prepares the statement, it can be called multiple times without
// side effect.
func (e *PrepareExec) DoPrepare() {
vars := variable.GetSessionVars(e.Ctx)
if e.ID != 0 {
// Must be the case when we retry a prepare.
// Make sure it is idempotent.
_, ok := vars.PreparedStmts[e.ID]
if ok {
return
}
}
charset, collation := variable.GetCharsetInfo(e.Ctx)
stmts, err := parser.Parse(e.SQLText, charset, collation)
if err != nil {
e.Err = errors.Trace(err)
return
}
if len(stmts) != 1 {
e.Err = ErrPrepareMulti
return
}
stmt := stmts[0]
var extractor paramMarkerExtractor
stmt.Accept(&extractor)
// The parameter markers are appended in visiting order, which may not
// be the same as the position order in the query string. We need to
// sort it by position.
sorter := &paramMarkerSorter{markers: extractor.markers}
sort.Sort(sorter)
e.ParamCount = len(sorter.markers)
prepared := &Prepared{
Stmt: stmt,
Params: sorter.markers,
SchemaVersion: e.IS.SchemaMetaVersion(),
}
err = optimizer.Prepare(e.IS, e.Ctx, stmt)
if err != nil {
e.Err = errors.Trace(err)
return
}
if resultSetNode, ok := stmt.(ast.ResultSetNode); ok {
e.ResultFields = resultSetNode.GetResultFields()
}
if e.ID == 0 {
e.ID = vars.GetNextPreparedStmtID()
}
if e.Name != "" {
vars.PreparedStmtNameToID[e.Name] = e.ID
}
vars.PreparedStmts[e.ID] = prepared
}
// ExecuteExec represents an EXECUTE executor.
// It executes a prepared statement.
type ExecuteExec struct {
IS infoschema.InfoSchema
Ctx context.Context
Name string
UsingVars []ast.ExprNode
ID uint32
StmtExec Executor
}
// Fields implements Executor Fields interface.
func (e *ExecuteExec) Fields() []*ast.ResultField {
// Will never be called.
return nil
}
// Next implements Executor Next interface.
func (e *ExecuteExec) Next() (*Row, error) {
// Will never be called.
return nil, nil
}
// Close implements plan.Plan Close interface.
func (e *ExecuteExec) Close() error {
// Will never be called.
return nil
}
// Build builds a prepared statement into an executor.
func (e *ExecuteExec) Build() error {
vars := variable.GetSessionVars(e.Ctx)
if e.Name != "" {
e.ID = vars.PreparedStmtNameToID[e.Name]
}
v := vars.PreparedStmts[e.ID]
if v == nil {
return ErrStmtNotFound
}
prepared := v.(*Prepared)
if len(prepared.Params) != len(e.UsingVars) {
return ErrWrongParamCount
}
for i, usingVar := range e.UsingVars {
val, err := evaluator.Eval(e.Ctx, usingVar)
if err != nil {
return errors.Trace(err)
}
prepared.Params[i].SetValue(val)
}
if prepared.SchemaVersion != e.IS.SchemaMetaVersion() {
// If the schema version has changed we need to prepare it again,
// if this time it failed, the real reason for the error is schema changed.
err := optimizer.Prepare(e.IS, e.Ctx, prepared.Stmt)
if err != nil {
return ErrSchemaChanged.Gen("Schema change casued error: %s", err.Error())
}
prepared.SchemaVersion = e.IS.SchemaMetaVersion()
}
sb := &subqueryBuilder{is: e.IS}
plan, err := optimizer.Optimize(e.Ctx, prepared.Stmt, sb)
if err != nil {
return errors.Trace(err)
}
b := newExecutorBuilder(e.Ctx, e.IS)
stmtExec := b.build(plan)
if b.err != nil {
return errors.Trace(b.err)
}
e.StmtExec = stmtExec
return nil
}
// DeallocateExec represent a DEALLOCATE executor.
type DeallocateExec struct {
Name string
ctx context.Context
}
// Fields implements Executor Fields interface.
func (e *DeallocateExec) Fields() []*ast.ResultField {
return nil
}
// Next implements Executor Next interface.
func (e *DeallocateExec) Next() (*Row, error) {
vars := variable.GetSessionVars(e.ctx)
id, ok := vars.PreparedStmtNameToID[e.Name]
if !ok {
return nil, ErrStmtNotFound
}
delete(vars.PreparedStmtNameToID, e.Name)
delete(vars.PreparedStmts, id)
return nil, nil
}
// Close implements plan.Plan Close interface.
func (e *DeallocateExec) Close() error {
return nil
}
// CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement.
func CompileExecutePreparedStmt(ctx context.Context, ID uint32, args ...interface{}) ast.Statement {
execPlan := &plan.Execute{ID: ID}
execPlan.UsingVars = make([]ast.ExprNode, len(args))
for i, val := range args {
execPlan.UsingVars[i] = ast.NewValueExpr(val)
}
sa := &statement{
is: sessionctx.GetDomain(ctx).InfoSchema(),
plan: execPlan,
}
return sa
}

461
vendor/github.com/pingcap/tidb/executor/show.go generated vendored Normal file
View file

@ -0,0 +1,461 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/util/charset"
"github.com/pingcap/tidb/util/types"
)
// ShowExec represents a show executor.
type ShowExec struct {
Tp ast.ShowStmtType // Databases/Tables/Columns/....
DBName model.CIStr
Table *ast.TableName // Used for showing columns.
Column *ast.ColumnName // Used for `desc table column`.
Flag int // Some flag parsed from sql, such as FULL.
Full bool
User string // Used for show grants.
// Used by show variables
GlobalScope bool
fields []*ast.ResultField
ctx context.Context
is infoschema.InfoSchema
fetched bool
rows []*Row
cursor int
}
// Fields implements Executor Fields interface.
func (e *ShowExec) Fields() []*ast.ResultField {
return e.fields
}
// Next implements Execution Next interface.
func (e *ShowExec) Next() (*Row, error) {
if e.rows == nil {
err := e.fetchAll()
if err != nil {
return nil, errors.Trace(err)
}
}
if e.cursor >= len(e.rows) {
return nil, nil
}
row := e.rows[e.cursor]
for i, field := range e.fields {
field.Expr.SetValue(row.Data[i].GetValue())
}
e.cursor++
return row, nil
}
func (e *ShowExec) fetchAll() error {
switch e.Tp {
case ast.ShowCharset:
return e.fetchShowCharset()
case ast.ShowCollation:
return e.fetchShowCollation()
case ast.ShowColumns:
return e.fetchShowColumns()
case ast.ShowCreateTable:
return e.fetchShowCreateTable()
case ast.ShowDatabases:
return e.fetchShowDatabases()
case ast.ShowEngines:
return e.fetchShowEngines()
case ast.ShowGrants:
return e.fetchShowGrants()
case ast.ShowIndex:
return e.fetchShowIndex()
case ast.ShowProcedureStatus:
return e.fetchShowProcedureStatus()
case ast.ShowStatus:
return e.fetchShowStatus()
case ast.ShowTables:
return e.fetchShowTables()
case ast.ShowTableStatus:
return e.fetchShowTableStatus()
case ast.ShowTriggers:
return e.fetchShowTriggers()
case ast.ShowVariables:
return e.fetchShowVariables()
case ast.ShowWarnings:
// empty result
}
return nil
}
func (e *ShowExec) fetchShowEngines() error {
row := &Row{
Data: types.MakeDatums(
"InnoDB",
"DEFAULT",
"Supports transactions, row-level locking, and foreign keys",
"YES",
"YES",
"YES",
),
}
e.rows = append(e.rows, row)
return nil
}
func (e *ShowExec) fetchShowDatabases() error {
dbs := e.is.AllSchemaNames()
// TODO: let information_schema be the first database
sort.Strings(dbs)
for _, d := range dbs {
e.rows = append(e.rows, &Row{Data: types.MakeDatums(d)})
}
return nil
}
func (e *ShowExec) fetchShowTables() error {
if !e.is.SchemaExists(e.DBName) {
return errors.Errorf("Can not find DB: %s", e.DBName)
}
// sort for tables
var tableNames []string
for _, v := range e.is.SchemaTables(e.DBName) {
tableNames = append(tableNames, v.Meta().Name.L)
}
sort.Strings(tableNames)
for _, v := range tableNames {
data := types.MakeDatums(v)
if e.Full {
// TODO: support "VIEW" later if we have supported view feature.
// now, just use "BASE TABLE".
data = append(data, types.NewDatum("BASE TABLE"))
}
e.rows = append(e.rows, &Row{Data: data})
}
return nil
}
func (e *ShowExec) fetchShowTableStatus() error {
if !e.is.SchemaExists(e.DBName) {
return errors.Errorf("Can not find DB: %s", e.DBName)
}
// sort for tables
var tableNames []string
for _, v := range e.is.SchemaTables(e.DBName) {
tableNames = append(tableNames, v.Meta().Name.L)
}
sort.Strings(tableNames)
for _, v := range tableNames {
now := mysql.CurrentTime(mysql.TypeDatetime)
data := types.MakeDatums(v, "InnoDB", "10", "Compact", 100, 100, 100, 100, 100, 100, 100,
now, now, now, "utf8_general_ci", "", "", "")
e.rows = append(e.rows, &Row{Data: data})
}
return nil
}
func (e *ShowExec) fetchShowColumns() error {
tb, err := e.getTable()
if err != nil {
return errors.Trace(err)
}
cols := tb.Cols()
for _, col := range cols {
if e.Column != nil && e.Column.Name.L != col.Name.L {
continue
}
desc := column.NewColDesc(col)
// The FULL keyword causes the output to include the column collation and comments,
// as well as the privileges you have for each column.
row := &Row{}
if e.Full {
row.Data = types.MakeDatums(
desc.Field,
desc.Type,
desc.Collation,
desc.Null,
desc.Key,
desc.DefaultValue,
desc.Extra,
desc.Privileges,
desc.Comment,
)
} else {
row.Data = types.MakeDatums(
desc.Field,
desc.Type,
desc.Null,
desc.Key,
desc.DefaultValue,
desc.Extra,
)
}
e.rows = append(e.rows, row)
}
return nil
}
func (e *ShowExec) fetchShowIndex() error {
tb, err := e.getTable()
if err != nil {
return errors.Trace(err)
}
for _, idx := range tb.Indices() {
for i, col := range idx.Columns {
nonUniq := 1
if idx.Unique {
nonUniq = 0
}
var subPart interface{}
if col.Length != types.UnspecifiedLength {
subPart = col.Length
}
data := types.MakeDatums(
tb.Meta().Name.O, // Table
nonUniq, // Non_unique
idx.Name.O, // Key_name
i+1, // Seq_in_index
col.Name.O, // Column_name
"utf8_bin", // Colation
0, // Cardinality
subPart, // Sub_part
nil, // Packed
"YES", // Null
idx.Tp.String(), // Index_type
"", // Comment
idx.Comment, // Index_comment
)
e.rows = append(e.rows, &Row{Data: data})
}
}
return nil
}
func (e *ShowExec) fetchShowCharset() error {
// See: http://dev.mysql.com/doc/refman/5.7/en/show-character-set.html
descs := charset.GetAllCharsets()
for _, desc := range descs {
row := &Row{
Data: types.MakeDatums(
desc.Name,
desc.Desc,
desc.DefaultCollation,
desc.Maxlen,
),
}
e.rows = append(e.rows, row)
}
return nil
}
func (e *ShowExec) fetchShowVariables() error {
sessionVars := variable.GetSessionVars(e.ctx)
globalVars := variable.GetGlobalVarAccessor(e.ctx)
for _, v := range variable.SysVars {
var err error
var value string
if !e.GlobalScope {
// Try to get Session Scope variable value first.
sv, ok := sessionVars.Systems[v.Name]
if ok {
value = sv
} else {
// If session scope variable is not set, get the global scope value.
value, err = globalVars.GetGlobalSysVar(e.ctx, v.Name)
if err != nil {
return errors.Trace(err)
}
}
} else {
value, err = globalVars.GetGlobalSysVar(e.ctx, v.Name)
if err != nil {
return errors.Trace(err)
}
}
row := &Row{Data: types.MakeDatums(v.Name, value)}
e.rows = append(e.rows, row)
}
return nil
}
func (e *ShowExec) fetchShowStatus() error {
statusVars, err := variable.GetStatusVars()
if err != nil {
return errors.Trace(err)
}
for status, v := range statusVars {
if e.GlobalScope && v.Scope == variable.ScopeSession {
continue
}
value, err := types.ToString(v.Value)
if err != nil {
return errors.Trace(err)
}
row := &Row{Data: types.MakeDatums(status, value)}
e.rows = append(e.rows, row)
}
return nil
}
func (e *ShowExec) fetchShowCreateTable() error {
tb, err := e.getTable()
if err != nil {
return errors.Trace(err)
}
// TODO: let the result more like MySQL.
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("CREATE TABLE `%s` (\n", tb.Meta().Name.O))
for i, col := range tb.Cols() {
buf.WriteString(fmt.Sprintf(" `%s` %s", col.Name.O, col.GetTypeDesc()))
if mysql.HasAutoIncrementFlag(col.Flag) {
buf.WriteString(" NOT NULL AUTO_INCREMENT")
} else {
if mysql.HasNotNullFlag(col.Flag) {
buf.WriteString(" NOT NULL")
}
switch col.DefaultValue {
case nil:
buf.WriteString(" DEFAULT NULL")
case "CURRENT_TIMESTAMP":
buf.WriteString(" DEFAULT CURRENT_TIMESTAMP")
default:
buf.WriteString(fmt.Sprintf(" DEFAULT '%v'", col.DefaultValue))
}
if mysql.HasOnUpdateNowFlag(col.Flag) {
buf.WriteString(" ON UPDATE CURRENT_TIMESTAMP")
}
}
if i != len(tb.Cols())-1 {
buf.WriteString(",\n")
}
}
if len(tb.Indices()) > 0 {
buf.WriteString(",\n")
}
for i, idx := range tb.Indices() {
if idx.Primary {
buf.WriteString(" PRIMARY KEY ")
} else if idx.Unique {
buf.WriteString(fmt.Sprintf(" UNIQUE KEY `%s` ", idx.Name.O))
} else {
buf.WriteString(fmt.Sprintf(" KEY `%s` ", idx.Name.O))
}
cols := make([]string, 0, len(idx.Columns))
for _, c := range idx.Columns {
cols = append(cols, c.Name.O)
}
buf.WriteString(fmt.Sprintf("(`%s`)", strings.Join(cols, "`,`")))
if i != len(tb.Indices())-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n")
buf.WriteString(") ENGINE=InnoDB")
if s := tb.Meta().Charset; len(s) > 0 {
buf.WriteString(fmt.Sprintf(" DEFAULT CHARSET=%s", s))
} else {
buf.WriteString(" DEFAULT CHARSET=latin1")
}
data := types.MakeDatums(tb.Meta().Name.O, buf.String())
e.rows = append(e.rows, &Row{Data: data})
return nil
}
func (e *ShowExec) fetchShowCollation() error {
collations := charset.GetCollations()
for _, v := range collations {
isDefault := ""
if v.IsDefault {
isDefault = "Yes"
}
row := &Row{Data: types.MakeDatums(
v.Name,
v.CharsetName,
v.ID,
isDefault,
"Yes",
1,
)}
e.rows = append(e.rows, row)
}
return nil
}
func (e *ShowExec) fetchShowGrants() error {
// Get checker
checker := privilege.GetPrivilegeChecker(e.ctx)
if checker == nil {
return errors.New("Miss privilege checker!")
}
gs, err := checker.ShowGrants(e.ctx, e.User)
if err != nil {
return errors.Trace(err)
}
for _, g := range gs {
data := types.MakeDatums(g)
e.rows = append(e.rows, &Row{Data: data})
}
return nil
}
func (e *ShowExec) fetchShowTriggers() error {
return nil
}
func (e *ShowExec) fetchShowProcedureStatus() error {
return nil
}
func (e *ShowExec) getTable() (table.Table, error) {
if e.Table == nil {
return nil, errors.New("table not found")
}
tb, ok := e.is.TableByID(e.Table.TableInfo.ID)
if !ok {
return nil, errors.Errorf("table %s not found", e.Table.Name)
}
return tb, nil
}
// Close implements Executor Close interface.
func (e *ShowExec) Close() error {
return nil
}

145
vendor/github.com/pingcap/tidb/executor/subquery.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/optimizer/plan"
"github.com/pingcap/tidb/util/types"
)
var _ ast.SubqueryExec = &subquery{}
// subquery is an exprNode with a plan.
type subquery struct {
types.Datum
Type *types.FieldType
flag uint64
text string
plan plan.Plan
is infoschema.InfoSchema
}
// SetDatum implements Expression interface.
func (sq *subquery) SetDatum(datum types.Datum) {
sq.Datum = datum
}
// GetDatum implements Expression interface.
func (sq *subquery) GetDatum() *types.Datum {
return &sq.Datum
}
// SetFlag implements Expression interface.
func (sq *subquery) SetFlag(flag uint64) {
sq.flag = flag
}
// GetFlag implements Expression interface.
func (sq *subquery) GetFlag() uint64 {
return sq.flag
}
// SetText implements Node interface.
func (sq *subquery) SetText(text string) {
sq.text = text
}
// Text implements Node interface.
func (sq *subquery) Text() string {
return sq.text
}
// SetType implements Expression interface.
func (sq *subquery) SetType(tp *types.FieldType) {
sq.Type = tp
}
// GetType implements Expression interface.
func (sq *subquery) GetType() *types.FieldType {
return sq.Type
}
func (sq *subquery) Accept(v ast.Visitor) (ast.Node, bool) {
// SubQuery is not a normal ExprNode.
newNode, skipChildren := v.Enter(sq)
if skipChildren {
return v.Leave(newNode)
}
sq = newNode.(*subquery)
return v.Leave(sq)
}
func (sq *subquery) EvalRows(ctx context.Context, rowCount int) ([]interface{}, error) {
b := newExecutorBuilder(ctx, sq.is)
plan.Refine(sq.plan)
e := b.build(sq.plan)
if b.err != nil {
return nil, errors.Trace(b.err)
}
defer e.Close()
if len(e.Fields()) == 0 {
// No result fields means no Recordset.
for {
row, err := e.Next()
if err != nil {
return nil, errors.Trace(err)
}
if row == nil {
return nil, nil
}
}
}
var (
err error
row *Row
rows = []interface{}{}
)
for rowCount != 0 {
row, err = e.Next()
if err != nil {
return rows, errors.Trace(err)
}
if row == nil {
break
}
if len(row.Data) == 1 {
rows = append(rows, row.Data[0].GetValue())
} else {
rows = append(rows, types.DatumsToInterfaces(row.Data))
}
if rowCount > 0 {
rowCount--
}
}
return rows, nil
}
func (sq *subquery) ColumnCount() (int, error) {
return len(sq.plan.Fields()), nil
}
type subqueryBuilder struct {
is infoschema.InfoSchema
}
func (sb *subqueryBuilder) Build(p plan.Plan) ast.SubqueryExec {
return &subquery{
is: sb.is,
plan: p,
}
}

9
vendor/github.com/pingcap/tidb/gitcookie.sh generated vendored Normal file
View file

@ -0,0 +1,9 @@
touch ~/.gitcookies
chmod 0600 ~/.gitcookies
git config --global http.cookiefile ~/.gitcookies
tr , \\t <<\__END__ >>~/.gitcookies
go.googlesource.com,FALSE,/,TRUE,2147483647,o,git-z.pingcap.com=1/Xv6CBlnVpdrhYBXT5i_VexGocQcbgkKsrW938zgjqx0
go-review.googlesource.com,FALSE,/,TRUE,2147483647,o,git-z.pingcap.com=1/Xv6CBlnVpdrhYBXT5i_VexGocQcbgkKsrW938zgjqx0
__END__

499
vendor/github.com/pingcap/tidb/infoschema/infoschema.go generated vendored Normal file
View file

@ -0,0 +1,499 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package infoschema
import (
"strings"
"sync/atomic"
"github.com/juju/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/perfschema"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/terror"
// import table implementation to init table.TableFromMeta
_ "github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/util/types"
)
// InfoSchema is the interface used to retrieve the schema information.
// It works as a in memory cache and doesn't handle any schema change.
// InfoSchema is read-only, and the returned value is a copy.
// TODO: add more methods to retrieve tables and columns.
type InfoSchema interface {
SchemaByName(schema model.CIStr) (*model.DBInfo, bool)
SchemaExists(schema model.CIStr) bool
TableByName(schema, table model.CIStr) (table.Table, error)
TableExists(schema, table model.CIStr) bool
ColumnByName(schema, table, column model.CIStr) (*model.ColumnInfo, bool)
ColumnExists(schema, table, column model.CIStr) bool
IndexByName(schema, table, index model.CIStr) (*model.IndexInfo, bool)
SchemaByID(id int64) (*model.DBInfo, bool)
TableByID(id int64) (table.Table, bool)
AllocByID(id int64) (autoid.Allocator, bool)
ColumnByID(id int64) (*model.ColumnInfo, bool)
ColumnIndicesByID(id int64) ([]*model.IndexInfo, bool)
AllSchemaNames() []string
AllSchemas() []*model.DBInfo
Clone() (result []*model.DBInfo)
SchemaTables(schema model.CIStr) []table.Table
SchemaMetaVersion() int64
}
// Infomation Schema Name.
const (
Name = "INFORMATION_SCHEMA"
)
type infoSchema struct {
schemaNameToID map[string]int64
tableNameToID map[tableName]int64
columnNameToID map[columnName]int64
schemas map[int64]*model.DBInfo
tables map[int64]table.Table
tableAllocators map[int64]autoid.Allocator
columns map[int64]*model.ColumnInfo
indices map[indexName]*model.IndexInfo
columnIndices map[int64][]*model.IndexInfo
// We should check version when change schema.
schemaMetaVersion int64
}
var _ InfoSchema = (*infoSchema)(nil)
type tableName struct {
schema string
table string
}
type columnName struct {
tableName
name string
}
type indexName struct {
tableName
name string
}
func (is *infoSchema) SchemaByName(schema model.CIStr) (val *model.DBInfo, ok bool) {
id, ok := is.schemaNameToID[schema.L]
if !ok {
return
}
val, ok = is.schemas[id]
return
}
func (is *infoSchema) SchemaMetaVersion() int64 {
return is.schemaMetaVersion
}
func (is *infoSchema) SchemaExists(schema model.CIStr) bool {
_, ok := is.schemaNameToID[schema.L]
return ok
}
func (is *infoSchema) TableByName(schema, table model.CIStr) (t table.Table, err error) {
id, ok := is.tableNameToID[tableName{schema: schema.L, table: table.L}]
if !ok {
return nil, TableNotExists.Gen("table %s.%s does not exist", schema, table)
}
t = is.tables[id]
return
}
func (is *infoSchema) TableExists(schema, table model.CIStr) bool {
_, ok := is.tableNameToID[tableName{schema: schema.L, table: table.L}]
return ok
}
func (is *infoSchema) ColumnByName(schema, table, column model.CIStr) (val *model.ColumnInfo, ok bool) {
id, ok := is.columnNameToID[columnName{tableName: tableName{schema: schema.L, table: table.L}, name: column.L}]
if !ok {
return
}
val, ok = is.columns[id]
return
}
func (is *infoSchema) ColumnExists(schema, table, column model.CIStr) bool {
_, ok := is.columnNameToID[columnName{tableName: tableName{schema: schema.L, table: table.L}, name: column.L}]
return ok
}
func (is *infoSchema) IndexByName(schema, table, index model.CIStr) (val *model.IndexInfo, ok bool) {
val, ok = is.indices[indexName{tableName: tableName{schema: schema.L, table: table.L}, name: index.L}]
return
}
func (is *infoSchema) SchemaByID(id int64) (val *model.DBInfo, ok bool) {
val, ok = is.schemas[id]
return
}
func (is *infoSchema) TableByID(id int64) (val table.Table, ok bool) {
val, ok = is.tables[id]
return
}
func (is *infoSchema) AllocByID(id int64) (val autoid.Allocator, ok bool) {
val, ok = is.tableAllocators[id]
return
}
func (is *infoSchema) ColumnByID(id int64) (val *model.ColumnInfo, ok bool) {
val, ok = is.columns[id]
return
}
func (is *infoSchema) ColumnIndicesByID(id int64) (indices []*model.IndexInfo, ok bool) {
indices, ok = is.columnIndices[id]
return
}
func (is *infoSchema) AllSchemaNames() (names []string) {
for _, v := range is.schemas {
names = append(names, v.Name.O)
}
return
}
func (is *infoSchema) AllSchemas() (schemas []*model.DBInfo) {
for _, v := range is.schemas {
schemas = append(schemas, v)
}
return
}
func (is *infoSchema) SchemaTables(schema model.CIStr) (tables []table.Table) {
di, ok := is.SchemaByName(schema)
if !ok {
return
}
for _, ti := range di.Tables {
tables = append(tables, is.tables[ti.ID])
}
return
}
func (is *infoSchema) Clone() (result []*model.DBInfo) {
for _, v := range is.schemas {
result = append(result, v.Clone())
}
return
}
// Handle handles information schema, including getting and setting.
type Handle struct {
value atomic.Value
store kv.Storage
}
// NewHandle creates a new Handle.
func NewHandle(store kv.Storage) *Handle {
h := &Handle{
store: store,
}
// init memory tables
initMemoryTables(store)
initPerfSchema(store)
return h
}
func initPerfSchema(store kv.Storage) {
perfHandle = perfschema.NewPerfHandle(store)
}
func genGlobalID(store kv.Storage) (int64, error) {
var globalID int64
err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error {
var err error
globalID, err = meta.NewMeta(txn).GenGlobalID()
return errors.Trace(err)
})
return globalID, errors.Trace(err)
}
var (
// Information_Schema
isDB *model.DBInfo
schemataTbl table.Table
tablesTbl table.Table
columnsTbl table.Table
statisticsTbl table.Table
charsetTbl table.Table
collationsTbl table.Table
filesTbl table.Table
defTbl table.Table
profilingTbl table.Table
nameToTable map[string]table.Table
perfHandle perfschema.PerfSchema
)
func setColumnID(meta *model.TableInfo, store kv.Storage) error {
var err error
for _, c := range meta.Columns {
c.ID, err = genGlobalID(store)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func initMemoryTables(store kv.Storage) error {
// Init Information_Schema
var (
err error
tbl table.Table
)
dbID, err := genGlobalID(store)
if err != nil {
return errors.Trace(err)
}
nameToTable = make(map[string]table.Table)
isTables := make([]*model.TableInfo, 0, len(tableNameToColumns))
for name, cols := range tableNameToColumns {
meta := buildTableMeta(name, cols)
isTables = append(isTables, meta)
meta.ID, err = genGlobalID(store)
if err != nil {
return errors.Trace(err)
}
err = setColumnID(meta, store)
if err != nil {
return errors.Trace(err)
}
alloc := autoid.NewMemoryAllocator(dbID)
tbl, err = createMemoryTable(meta, alloc)
if err != nil {
return errors.Trace(err)
}
nameToTable[meta.Name.L] = tbl
}
schemataTbl = nameToTable[strings.ToLower(tableSchemata)]
tablesTbl = nameToTable[strings.ToLower(tableTables)]
columnsTbl = nameToTable[strings.ToLower(tableColumns)]
statisticsTbl = nameToTable[strings.ToLower(tableStatistics)]
charsetTbl = nameToTable[strings.ToLower(tableCharacterSets)]
collationsTbl = nameToTable[strings.ToLower(tableCollations)]
// CharacterSets/Collations contain static data. Init them now.
err = insertData(charsetTbl, dataForCharacterSets())
if err != nil {
return errors.Trace(err)
}
err = insertData(collationsTbl, dataForColltions())
if err != nil {
return errors.Trace(err)
}
// create db
isDB = &model.DBInfo{
ID: dbID,
Name: model.NewCIStr(Name),
Charset: mysql.DefaultCharset,
Collate: mysql.DefaultCollationName,
Tables: isTables,
}
return nil
}
func insertData(tbl table.Table, rows [][]types.Datum) error {
for _, r := range rows {
_, err := tbl.AddRecord(nil, r)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func refillTable(tbl table.Table, rows [][]types.Datum) error {
err := tbl.Truncate(nil)
if err != nil {
return errors.Trace(err)
}
return insertData(tbl, rows)
}
// Set sets DBInfo to information schema.
func (h *Handle) Set(newInfo []*model.DBInfo, schemaMetaVersion int64) error {
info := &infoSchema{
schemaNameToID: map[string]int64{},
tableNameToID: map[tableName]int64{},
columnNameToID: map[columnName]int64{},
schemas: map[int64]*model.DBInfo{},
tables: map[int64]table.Table{},
tableAllocators: map[int64]autoid.Allocator{},
columns: map[int64]*model.ColumnInfo{},
indices: map[indexName]*model.IndexInfo{},
columnIndices: map[int64][]*model.IndexInfo{},
schemaMetaVersion: schemaMetaVersion,
}
var err error
var hasOldInfo bool
infoschema := h.Get()
if infoschema != nil {
hasOldInfo = true
}
for _, di := range newInfo {
info.schemas[di.ID] = di
info.schemaNameToID[di.Name.L] = di.ID
for _, t := range di.Tables {
alloc := autoid.NewAllocator(h.store, di.ID)
if hasOldInfo {
val, ok := infoschema.AllocByID(t.ID)
if ok {
alloc = val
}
}
info.tableAllocators[t.ID] = alloc
info.tables[t.ID], err = table.TableFromMeta(alloc, t)
if err != nil {
return errors.Trace(err)
}
tname := tableName{di.Name.L, t.Name.L}
info.tableNameToID[tname] = t.ID
for _, c := range t.Columns {
info.columns[c.ID] = c
info.columnNameToID[columnName{tname, c.Name.L}] = c.ID
}
for _, idx := range t.Indices {
info.indices[indexName{tname, idx.Name.L}] = idx
for _, idxCol := range idx.Columns {
columnID := t.Columns[idxCol.Offset].ID
columnIndices := info.columnIndices[columnID]
info.columnIndices[columnID] = append(columnIndices, idx)
}
}
}
}
// Build Information_Schema
info.schemaNameToID[isDB.Name.L] = isDB.ID
info.schemas[isDB.ID] = isDB
for _, t := range isDB.Tables {
tbl, ok := nameToTable[t.Name.L]
if !ok {
return errors.Errorf("table `%s` is missing.", t.Name)
}
info.tables[t.ID] = tbl
tname := tableName{isDB.Name.L, t.Name.L}
info.tableNameToID[tname] = t.ID
for _, c := range t.Columns {
info.columns[c.ID] = c
info.columnNameToID[columnName{tname, c.Name.L}] = c.ID
}
}
// Add Performance_Schema
psDB := perfHandle.GetDBMeta()
info.schemaNameToID[psDB.Name.L] = psDB.ID
info.schemas[psDB.ID] = psDB
for _, t := range psDB.Tables {
tbl, ok := perfHandle.GetTable(t.Name.O)
if !ok {
return errors.Errorf("table `%s` is missing.", t.Name)
}
info.tables[t.ID] = tbl
tname := tableName{psDB.Name.L, t.Name.L}
info.tableNameToID[tname] = t.ID
for _, c := range t.Columns {
info.columns[c.ID] = c
info.columnNameToID[columnName{tname, c.Name.L}] = c.ID
}
}
// Should refill some tables in Information_Schema.
// schemata/tables/columns/statistics
dbNames := make([]string, 0, len(info.schemas))
dbInfos := make([]*model.DBInfo, 0, len(info.schemas))
for _, v := range info.schemas {
dbNames = append(dbNames, v.Name.L)
dbInfos = append(dbInfos, v)
}
err = refillTable(schemataTbl, dataForSchemata(dbNames))
if err != nil {
return errors.Trace(err)
}
err = refillTable(tablesTbl, dataForTables(dbInfos))
if err != nil {
return errors.Trace(err)
}
err = refillTable(columnsTbl, dataForColumns(dbInfos))
if err != nil {
return errors.Trace(err)
}
err = refillTable(statisticsTbl, dataForStatistics(dbInfos))
if err != nil {
return errors.Trace(err)
}
h.value.Store(info)
return nil
}
// Get gets information schema from Handle.
func (h *Handle) Get() InfoSchema {
v := h.value.Load()
schema, _ := v.(InfoSchema)
return schema
}
// Schema error codes.
const (
CodeDbDropExists terror.ErrCode = 1008
CodeDatabaseNotExists = 1049
CodeTableNotExists = 1146
CodeColumnNotExists = 1054
CodeDatabaseExists = 1007
CodeTableExists = 1050
CodeBadTable = 1051
)
var (
// DatabaseDropExists returns for drop an unexist database.
DatabaseDropExists = terror.ClassSchema.New(CodeDbDropExists, "database doesn't exist")
// DatabaseNotExists returns for database not exists.
DatabaseNotExists = terror.ClassSchema.New(CodeDatabaseNotExists, "database not exists")
// TableNotExists returns for table not exists.
TableNotExists = terror.ClassSchema.New(CodeTableNotExists, "table not exists")
// ColumnNotExists returns for column not exists.
ColumnNotExists = terror.ClassSchema.New(CodeColumnNotExists, "field not exists")
// DatabaseExists returns for database already exists.
DatabaseExists = terror.ClassSchema.New(CodeDatabaseExists, "database already exists")
// TableExists returns for table already exists.
TableExists = terror.ClassSchema.New(CodeTableExists, "table already exists")
// TableDropExists returns for drop an unexist table.
TableDropExists = terror.ClassSchema.New(CodeBadTable, "unknown table")
)
func init() {
schemaMySQLErrCodes := map[terror.ErrCode]uint16{
CodeDbDropExists: mysql.ErrDbDropExists,
CodeDatabaseNotExists: mysql.ErrBadDb,
CodeTableNotExists: mysql.ErrNoSuchTable,
CodeColumnNotExists: mysql.ErrBadField,
CodeDatabaseExists: mysql.ErrDbCreateExists,
CodeTableExists: mysql.ErrTableExists,
CodeBadTable: mysql.ErrBadTable,
}
terror.ErrClassToMySQLCodes[terror.ClassSchema] = schemaMySQLErrCodes
}

458
vendor/github.com/pingcap/tidb/infoschema/tables.go generated vendored Normal file
View file

@ -0,0 +1,458 @@
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package infoschema
import (
"fmt"
"sort"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/util/charset"
"github.com/pingcap/tidb/util/types"
)
const (
tableSchemata = "SCHEMATA"
tableTables = "TABLES"
tableColumns = "COLUMNS"
tableStatistics = "STATISTICS"
tableCharacterSets = "CHARACTER_SETS"
tableCollations = "COLLATIONS"
tableFiles = "FILES"
catalogVal = "def"
tableProfiling = "PROFILING"
)
type columnInfo struct {
name string
tp byte
size int
flag uint
deflt interface{}
elems []string
}
func buildColumnInfo(tableName string, col columnInfo) *model.ColumnInfo {
mCharset := charset.CharsetBin
mCollation := charset.CharsetBin
mFlag := mysql.UnsignedFlag
if col.tp == mysql.TypeVarchar || col.tp == mysql.TypeBlob {
mCharset = mysql.DefaultCharset
mCollation = mysql.DefaultCollationName
mFlag = 0
}
fieldType := types.FieldType{
Charset: mCharset,
Collate: mCollation,
Tp: col.tp,
Flen: col.size,
Flag: uint(mFlag),
}
return &model.ColumnInfo{
Name: model.NewCIStr(col.name),
FieldType: fieldType,
State: model.StatePublic,
}
}
func buildTableMeta(tableName string, cs []columnInfo) *model.TableInfo {
cols := make([]*model.ColumnInfo, 0, len(cs))
for _, c := range cs {
cols = append(cols, buildColumnInfo(tableName, c))
}
for i, col := range cols {
col.Offset = i
}
return &model.TableInfo{
Name: model.NewCIStr(tableName),
Columns: cols,
State: model.StatePublic,
}
}
var schemataCols = []columnInfo{
{"CATALOG_NAME", mysql.TypeVarchar, 512, 0, nil, nil},
{"SCHEMA_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"DEFAULT_CHARACTER_SET_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"DEFAULT_COLLATION_NAME", mysql.TypeVarchar, 32, 0, nil, nil},
{"SQL_PATH", mysql.TypeVarchar, 512, 0, nil, nil},
}
var tablesCols = []columnInfo{
{"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil},
{"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil},
{"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"TABLE_TYPE", mysql.TypeVarchar, 64, 0, nil, nil},
{"ENGINE", mysql.TypeVarchar, 64, 0, nil, nil},
{"VERSION", mysql.TypeLonglong, 21, 0, nil, nil},
{"ROW_FORMAT", mysql.TypeVarchar, 10, 0, nil, nil},
{"TABLE_ROWS", mysql.TypeLonglong, 21, 0, nil, nil},
{"AVG_ROW_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil},
{"DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil},
{"MAX_DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil},
{"INDEX_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil},
{"DATA_FREE", mysql.TypeLonglong, 21, 0, nil, nil},
{"AUTO_INCREMENT", mysql.TypeLonglong, 21, 0, nil, nil},
{"CREATE_TIME", mysql.TypeDatetime, 19, 0, nil, nil},
{"UPDATE_TIME", mysql.TypeDatetime, 19, 0, nil, nil},
{"CHECK_TIME", mysql.TypeDatetime, 19, 0, nil, nil},
{"TABLE_COLLATION", mysql.TypeVarchar, 32, 0, nil, nil},
{"CHECK_SUM", mysql.TypeLonglong, 21, 0, nil, nil},
{"CREATE_OPTIONS", mysql.TypeVarchar, 255, 0, nil, nil},
{"TABLE_COMMENT", mysql.TypeVarchar, 2048, 0, nil, nil},
}
var columnsCols = []columnInfo{
{"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil},
{"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil},
{"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"COLUMN_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"ORIGINAL_POSITION", mysql.TypeLonglong, 64, 0, nil, nil},
{"COLUMN_DEFAULT", mysql.TypeBlob, 196606, 0, nil, nil},
{"IS_NULLABLE", mysql.TypeVarchar, 3, 0, nil, nil},
{"DATA_TYPE", mysql.TypeVarchar, 64, 0, nil, nil},
{"CHARACTER_MAXIMUM_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil},
{"CHARACTOR_OCTET_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil},
{"NUMERIC_PRECISION", mysql.TypeLonglong, 21, 0, nil, nil},
{"NUMERIC_SCALE", mysql.TypeLonglong, 21, 0, nil, nil},
{"DATETIME_PRECISION", mysql.TypeLonglong, 21, 0, nil, nil},
{"CHARACTER_SET_NAME", mysql.TypeVarchar, 32, 0, nil, nil},
{"COLLATION_NAME", mysql.TypeVarchar, 32, 0, nil, nil},
{"COLUMN_TYPE", mysql.TypeBlob, 196606, 0, nil, nil},
{"COLUMN_KEY", mysql.TypeVarchar, 3, 0, nil, nil},
{"EXTRA", mysql.TypeVarchar, 30, 0, nil, nil},
{"PRIVILEGES", mysql.TypeVarchar, 80, 0, nil, nil},
{"COLUMN_COMMENT", mysql.TypeVarchar, 1024, 0, nil, nil},
}
var statisticsCols = []columnInfo{
{"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil},
{"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil},
{"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"NON_UNIQUE", mysql.TypeVarchar, 1, 0, nil, nil},
{"INDEX_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil},
{"INDEX_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"SEQ_IN_INDEX", mysql.TypeLonglong, 2, 0, nil, nil},
{"COLUMN_NAME", mysql.TypeVarchar, 21, 0, nil, nil},
{"COLLATION", mysql.TypeVarchar, 1, 0, nil, nil},
{"CARDINALITY", mysql.TypeLonglong, 21, 0, nil, nil},
{"SUB_PART", mysql.TypeLonglong, 3, 0, nil, nil},
{"PACKED", mysql.TypeVarchar, 10, 0, nil, nil},
{"NULLABLE", mysql.TypeVarchar, 3, 0, nil, nil},
{"INDEX_TYPE", mysql.TypeVarchar, 16, 0, nil, nil},
{"COMMENT", mysql.TypeVarchar, 16, 0, nil, nil},
{"INDEX_COMMENT", mysql.TypeVarchar, 1024, 0, nil, nil},
}
var profilingCols = []columnInfo{
{"QUERY_ID", mysql.TypeLong, 20, 0, nil, nil},
{"SEQ", mysql.TypeLong, 20, 0, nil, nil},
{"STATE", mysql.TypeVarchar, 30, 0, nil, nil},
{"DURATION", mysql.TypeNewDecimal, 9, 0, nil, nil},
{"CPU_USER", mysql.TypeNewDecimal, 9, 0, nil, nil},
{"CPU_SYSTEM", mysql.TypeNewDecimal, 9, 0, nil, nil},
{"CONTEXT_VOLUNTARY", mysql.TypeLong, 20, 0, nil, nil},
{"CONTEXT_INVOLUNTARY", mysql.TypeLong, 20, 0, nil, nil},
{"BLOCK_OPS_IN", mysql.TypeLong, 20, 0, nil, nil},
{"BLOCK_OPS_OUT", mysql.TypeLong, 20, 0, nil, nil},
{"MESSAGES_SENT", mysql.TypeLong, 20, 0, nil, nil},
{"MESSAGES_RECEIVED", mysql.TypeLong, 20, 0, nil, nil},
{"PAGE_FAULTS_MAJOR", mysql.TypeLong, 20, 0, nil, nil},
{"PAGE_FAULTS_MINOR", mysql.TypeLong, 20, 0, nil, nil},
{"SWAPS", mysql.TypeLong, 20, 0, nil, nil},
{"SOURCE_FUNCTION", mysql.TypeVarchar, 30, 0, nil, nil},
{"SOURCE_FILE", mysql.TypeVarchar, 20, 0, nil, nil},
{"SOURCE_LINE", mysql.TypeLong, 20, 0, nil, nil},
}
var charsetCols = []columnInfo{
{"CHARACTER_SET_NAME", mysql.TypeVarchar, 32, 0, nil, nil},
{"DEFAULT_COLLATE_NAME", mysql.TypeVarchar, 32, 0, nil, nil},
{"DESCRIPTION", mysql.TypeVarchar, 60, 0, nil, nil},
{"MAXLEN", mysql.TypeLonglong, 3, 0, nil, nil},
}
var collationsCols = []columnInfo{
{"COLLATION_NAME", mysql.TypeVarchar, 32, 0, nil, nil},
{"CHARACTER_SET_NAME", mysql.TypeVarchar, 32, 0, nil, nil},
{"ID", mysql.TypeLonglong, 11, 0, nil, nil},
{"IS_DEFAULT", mysql.TypeVarchar, 3, 0, nil, nil},
{"IS_COMPILED", mysql.TypeVarchar, 3, 0, nil, nil},
{"SORTLEN", mysql.TypeLonglong, 3, 0, nil, nil},
}
func dataForCharacterSets() (records [][]types.Datum) {
records = append(records,
types.MakeDatums("ascii", "ascii_general_ci", "US ASCII", 1),
types.MakeDatums("binary", "binary", "Binary pseudo charset", 1),
types.MakeDatums("latin1", "latin1_swedish_ci", "cp1252 West European", 1),
types.MakeDatums("utf8", "utf8_general_ci", "UTF-8 Unicode", 3),
types.MakeDatums("utf8mb4", "utf8mb4_general_ci", "UTF-8 Unicode", 4),
)
return records
}
func dataForColltions() (records [][]types.Datum) {
records = append(records,
types.MakeDatums("ascii_general_ci", "ascii", 1, "Yes", "Yes", 1),
types.MakeDatums("binary", "binary", 2, "Yes", "Yes", 1),
types.MakeDatums("latin1_swedish_ci", "latin1", 3, "Yes", "Yes", 1),
types.MakeDatums("utf8_general_ci", "utf8", 4, "Yes", "Yes", 1),
types.MakeDatums("utf8mb4_general_ci", "utf8mb4", 5, "Yes", "Yes", 1),
)
return records
}
var filesCols = []columnInfo{
{"FILE_ID", mysql.TypeLonglong, 4, 0, nil, nil},
{"FILE_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"TABLESPACE_NAME", mysql.TypeVarchar, 20, 0, nil, nil},
{"TABLE_CATALOG", mysql.TypeVarchar, 64, 0, nil, nil},
{"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil},
{"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"LOGFILE_GROUP_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"LOGFILE_GROUP_NUMBER", mysql.TypeLonglong, 32, 0, nil, nil},
{"ENGINE", mysql.TypeVarchar, 64, 0, nil, nil},
{"FULLTEXT_KEYS", mysql.TypeVarchar, 64, 0, nil, nil},
{"DELETED_ROWS", mysql.TypeLonglong, 4, 0, nil, nil},
{"UPDATE_COUNT", mysql.TypeLonglong, 4, 0, nil, nil},
{"FREE_EXTENTS", mysql.TypeLonglong, 4, 0, nil, nil},
{"TOTAL_EXTENTS", mysql.TypeLonglong, 4, 0, nil, nil},
{"EXTENT_SIZE", mysql.TypeLonglong, 4, 0, nil, nil},
{"INITIAL_SIZE", mysql.TypeLonglong, 21, 0, nil, nil},
{"MAXIMUM_SIZE", mysql.TypeLonglong, 21, 0, nil, nil},
{"AUTOEXTEND_SIZE", mysql.TypeLonglong, 21, 0, nil, nil},
{"CREATION_TIME", mysql.TypeDatetime, -1, 0, nil, nil},
{"LAST_UPDATE_TIME", mysql.TypeDatetime, -1, 0, nil, nil},
{"LAST_ACCESS_TIME", mysql.TypeDatetime, -1, 0, nil, nil},
{"RECOVER_TIME", mysql.TypeLonglong, 4, 0, nil, nil},
{"TRANSACTION_COUNTER", mysql.TypeLonglong, 4, 0, nil, nil},
{"VERSION", mysql.TypeLonglong, 21, 0, nil, nil},
{"ROW_FORMAT", mysql.TypeVarchar, 21, 0, nil, nil},
{"TABLE_ROWS", mysql.TypeLonglong, 21, 0, nil, nil},
{"AVG_ROW_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil},
{"DATA_FREE", mysql.TypeLonglong, 21, 0, nil, nil},
{"CREATE_TIME", mysql.TypeDatetime, -1, 0, nil, nil},
{"UPDATE_TIME", mysql.TypeDatetime, -1, 0, nil, nil},
{"CHECK_TIME", mysql.TypeDatetime, -1, 0, nil, nil},
{"CHECKSUM", mysql.TypeLonglong, 21, 0, nil, nil},
{"STATUS", mysql.TypeVarchar, 20, 0, nil, nil},
{"EXTRA", mysql.TypeVarchar, 255, 0, nil, nil},
}
func dataForSchemata(schemas []string) [][]types.Datum {
sort.Strings(schemas)
rows := [][]types.Datum{}
for _, schema := range schemas {
record := types.MakeDatums(
catalogVal, // CATALOG_NAME
schema, // SCHEMA_NAME
mysql.DefaultCharset, // DEFAULT_CHARACTER_SET_NAME
mysql.DefaultCollationName, // DEFAULT_COLLATION_NAME
nil,
)
rows = append(rows, record)
}
return rows
}
func dataForTables(schemas []*model.DBInfo) [][]types.Datum {
rows := [][]types.Datum{}
for _, schema := range schemas {
for _, table := range schema.Tables {
record := types.MakeDatums(
catalogVal, // TABLE_CATALOG
schema.Name.O, // TABLE_SCHEMA
table.Name.O, // TABLE_NAME
"BASE_TABLE", // TABLE_TYPE
"InnoDB", // ENGINE
uint64(10), // VERSION
"Compact", // ROW_FORMAT
uint64(0), // TABLE_ROWS
uint64(0), // AVG_ROW_LENGTH
uint64(16384), // DATA_LENGTH
uint64(0), // MAX_DATA_LENGTH
uint64(0), // INDEX_LENGTH
uint64(0), // DATA_FREE
nil, // AUTO_INCREMENT
nil, // CREATE_TIME
nil, // UPDATE_TIME
nil, // CHECK_TIME
"latin1_swedish_ci", // TABLE_COLLATION
nil, // CHECKSUM
"", // CREATE_OPTIONS
"", // TABLE_COMMENT
)
rows = append(rows, record)
}
}
return rows
}
func dataForColumns(schemas []*model.DBInfo) [][]types.Datum {
rows := [][]types.Datum{}
for _, schema := range schemas {
for _, table := range schema.Tables {
rs := dataForColumnsInTable(schema, table)
for _, r := range rs {
rows = append(rows, r)
}
}
}
return rows
}
func dataForColumnsInTable(schema *model.DBInfo, table *model.TableInfo) [][]types.Datum {
rows := [][]types.Datum{}
for i, col := range table.Columns {
colLen := col.Flen
if colLen == types.UnspecifiedLength {
colLen = mysql.GetDefaultFieldLength(col.Tp)
}
decimal := col.Decimal
if decimal == types.UnspecifiedLength {
decimal = 0
}
columnType := col.FieldType.CompactStr()
columnDesc := column.NewColDesc(&column.Col{ColumnInfo: *col})
var columnDefault interface{}
if columnDesc.DefaultValue != nil {
columnDefault = fmt.Sprintf("%v", columnDesc.DefaultValue)
}
record := types.MakeDatums(
catalogVal, // TABLE_CATALOG
schema.Name.O, // TABLE_SCHEMA
table.Name.O, // TABLE_NAME
col.Name.O, // COLUMN_NAME
i+1, // ORIGINAL_POSITION
columnDefault, // COLUMN_DEFAULT
columnDesc.Null, // IS_NULLABLE
types.TypeToStr(col.Tp, col.Charset), // DATA_TYPE
colLen, // CHARACTER_MAXIMUM_LENGTH
colLen, // CHARACTOR_OCTET_LENGTH
decimal, // NUMERIC_PRECISION
0, // NUMERIC_SCALE
0, // DATETIME_PRECISION
col.Charset, // CHARACTER_SET_NAME
col.Collate, // COLLATION_NAME
columnType, // COLUMN_TYPE
columnDesc.Key, // COLUMN_KEY
columnDesc.Extra, // EXTRA
"select,insert,update,references", // PRIVILEGES
"", // COLUMN_COMMENT
)
rows = append(rows, record)
}
return rows
}
func dataForStatistics(schemas []*model.DBInfo) [][]types.Datum {
rows := [][]types.Datum{}
for _, schema := range schemas {
for _, table := range schema.Tables {
rs := dataForStatisticsInTable(schema, table)
for _, r := range rs {
rows = append(rows, r)
}
}
}
return rows
}
func dataForStatisticsInTable(schema *model.DBInfo, table *model.TableInfo) [][]types.Datum {
rows := [][]types.Datum{}
if table.PKIsHandle {
for _, col := range table.Columns {
if mysql.HasPriKeyFlag(col.Flag) {
record := types.MakeDatums(
catalogVal, // TABLE_CATALOG
schema.Name.O, // TABLE_SCHEMA
table.Name.O, // TABLE_NAME
"0", // NON_UNIQUE
schema.Name.O, // INDEX_SCHEMA
"PRIMARY", // INDEX_NAME
1, // SEQ_IN_INDEX
col.Name.O, // COLUMN_NAME
"A", // COLLATION
0, // CARDINALITY
nil, // SUB_PART
nil, // PACKED
"", // NULLABLE
"BTREE", // INDEX_TYPE
"", // COMMENT
"", // INDEX_COMMENT
)
rows = append(rows, record)
}
}
}
nameToCol := make(map[string]*model.ColumnInfo, len(table.Columns))
for _, c := range table.Columns {
nameToCol[c.Name.L] = c
}
for _, index := range table.Indices {
nonUnique := "1"
if index.Unique {
nonUnique = "0"
}
for i, key := range index.Columns {
col := nameToCol[key.Name.L]
nullable := "YES"
if mysql.HasNotNullFlag(col.Flag) {
nullable = ""
}
record := types.MakeDatums(
catalogVal, // TABLE_CATALOG
schema.Name.O, // TABLE_SCHEMA
table.Name.O, // TABLE_NAME
nonUnique, // NON_UNIQUE
schema.Name.O, // INDEX_SCHEMA
index.Name.O, // INDEX_NAME
i+1, // SEQ_IN_INDEX
key.Name.O, // COLUMN_NAME
"A", // COLLATION
0, // CARDINALITY
nil, // SUB_PART
nil, // PACKED
nullable, // NULLABLE
"BTREE", // INDEX_TYPE
"", // COMMENT
"", // INDEX_COMMENT
)
rows = append(rows, record)
}
}
return rows
}
var tableNameToColumns = map[string]([]columnInfo){
tableSchemata: schemataCols,
tableTables: tablesCols,
tableColumns: columnsCols,
tableStatistics: statisticsCols,
tableCharacterSets: charsetCols,
tableCollations: collationsCols,
tableFiles: filesCols,
tableProfiling: profilingCols,
}
func createMemoryTable(meta *model.TableInfo, alloc autoid.Allocator) (table.Table, error) {
tbl, _ := tables.MemoryTableFromMeta(alloc, meta)
return tbl, nil
}

452
vendor/github.com/pingcap/tidb/inspectkv/inspectkv.go generated vendored Normal file
View file

@ -0,0 +1,452 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package inspectkv
import (
"io"
"reflect"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/column"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/types"
)
// DDLInfo is for DDL information.
type DDLInfo struct {
SchemaVer int64
ReorgHandle int64 // it's only used for DDL information.
Owner *model.Owner
Job *model.Job
}
// GetDDLInfo returns DDL information.
func GetDDLInfo(txn kv.Transaction) (*DDLInfo, error) {
var err error
info := &DDLInfo{}
t := meta.NewMeta(txn)
info.Owner, err = t.GetDDLJobOwner()
if err != nil {
return nil, errors.Trace(err)
}
info.Job, err = t.GetDDLJob(0)
if err != nil {
return nil, errors.Trace(err)
}
info.SchemaVer, err = t.GetSchemaVersion()
if err != nil {
return nil, errors.Trace(err)
}
if info.Job == nil {
return info, nil
}
info.ReorgHandle, err = t.GetDDLReorgHandle(info.Job)
if err != nil {
return nil, errors.Trace(err)
}
return info, nil
}
// GetBgDDLInfo returns background DDL information.
func GetBgDDLInfo(txn kv.Transaction) (*DDLInfo, error) {
var err error
info := &DDLInfo{}
t := meta.NewMeta(txn)
info.Owner, err = t.GetBgJobOwner()
if err != nil {
return nil, errors.Trace(err)
}
info.Job, err = t.GetBgJob(0)
if err != nil {
return nil, errors.Trace(err)
}
info.SchemaVer, err = t.GetSchemaVersion()
if err != nil {
return nil, errors.Trace(err)
}
return info, nil
}
func nextIndexVals(data []types.Datum) []types.Datum {
// Add 0x0 to the end of data.
return append(data, types.Datum{})
}
// RecordData is the record data composed of a handle and values.
type RecordData struct {
Handle int64
Values []types.Datum
}
// GetIndexRecordsCount returns the total number of the index records from startVals.
// If startVals = nil, returns the total number of the index records.
func GetIndexRecordsCount(txn kv.Transaction, kvIndex kv.Index, startVals []types.Datum) (int64, error) {
it, _, err := kvIndex.Seek(txn, startVals)
if err != nil {
return 0, errors.Trace(err)
}
defer it.Close()
var cnt int64
for {
_, _, err := it.Next()
if terror.ErrorEqual(err, io.EOF) {
break
} else if err != nil {
return 0, errors.Trace(err)
}
cnt++
}
return cnt, nil
}
// ScanIndexData scans the index handles and values in a limited number, according to the index information.
// It returns data and the next startVals until it doesn't have data, then returns data is nil and
// the next startVals is the values which can't get data. If startVals = nil and limit = -1,
// it returns the index data of the whole.
func ScanIndexData(txn kv.Transaction, kvIndex kv.Index, startVals []types.Datum, limit int64) (
[]*RecordData, []types.Datum, error) {
it, _, err := kvIndex.Seek(txn, startVals)
if err != nil {
return nil, nil, errors.Trace(err)
}
defer it.Close()
var idxRows []*RecordData
var curVals []types.Datum
for limit != 0 {
val, h, err1 := it.Next()
if terror.ErrorEqual(err1, io.EOF) {
return idxRows, nextIndexVals(curVals), nil
} else if err1 != nil {
return nil, nil, errors.Trace(err1)
}
idxRows = append(idxRows, &RecordData{Handle: h, Values: val})
limit--
curVals = val
}
nextVals, _, err := it.Next()
if terror.ErrorEqual(err, io.EOF) {
return idxRows, nextIndexVals(curVals), nil
} else if err != nil {
return nil, nil, errors.Trace(err)
}
return idxRows, nextVals, nil
}
// CompareIndexData compares index data one by one.
// It returns nil if the data from the index is equal to the data from the table columns,
// otherwise it returns an error with a different set of records.
func CompareIndexData(txn kv.Transaction, t table.Table, idx *column.IndexedCol) error {
err := checkIndexAndRecord(txn, t, idx)
if err != nil {
return errors.Trace(err)
}
return checkRecordAndIndex(txn, t, idx)
}
func checkIndexAndRecord(txn kv.Transaction, t table.Table, idx *column.IndexedCol) error {
kvIndex := kv.NewKVIndex(t.IndexPrefix(), idx.Name.L, idx.ID, idx.Unique)
it, err := kvIndex.SeekFirst(txn)
if err != nil {
return errors.Trace(err)
}
defer it.Close()
cols := make([]*column.Col, len(idx.Columns))
for i, col := range idx.Columns {
cols[i] = t.Cols()[col.Offset]
}
for {
vals1, h, err := it.Next()
if terror.ErrorEqual(err, io.EOF) {
break
} else if err != nil {
return errors.Trace(err)
}
vals2, err := rowWithCols(txn, t, h, cols)
if terror.ErrorEqual(err, kv.ErrNotExist) {
record := &RecordData{Handle: h, Values: vals1}
err = errors.Errorf("index:%v != record:%v", record, nil)
}
if err != nil {
return errors.Trace(err)
}
if !reflect.DeepEqual(vals1, vals2) {
record1 := &RecordData{Handle: h, Values: vals1}
record2 := &RecordData{Handle: h, Values: vals2}
return errors.Errorf("index:%v != record:%v", record1, record2)
}
}
return nil
}
func checkRecordAndIndex(txn kv.Transaction, t table.Table, idx *column.IndexedCol) error {
cols := make([]*column.Col, len(idx.Columns))
for i, col := range idx.Columns {
cols[i] = t.Cols()[col.Offset]
}
startKey := t.RecordKey(0, nil)
kvIndex := kv.NewKVIndex(t.IndexPrefix(), idx.Name.L, idx.ID, idx.Unique)
filterFunc := func(h1 int64, vals1 []types.Datum, cols []*column.Col) (bool, error) {
isExist, h2, err := kvIndex.Exist(txn, vals1, h1)
if terror.ErrorEqual(err, kv.ErrKeyExists) {
record1 := &RecordData{Handle: h1, Values: vals1}
record2 := &RecordData{Handle: h2, Values: vals1}
return false, errors.Errorf("index:%v != record:%v", record2, record1)
}
if err != nil {
return false, errors.Trace(err)
}
if !isExist {
record := &RecordData{Handle: h1, Values: vals1}
return false, errors.Errorf("index:%v != record:%v", nil, record)
}
return true, nil
}
err := iterRecords(txn, t, startKey, cols, filterFunc)
if err != nil {
return errors.Trace(err)
}
return nil
}
func scanTableData(retriever kv.Retriever, t table.Table, cols []*column.Col, startHandle, limit int64) (
[]*RecordData, int64, error) {
var records []*RecordData
startKey := t.RecordKey(startHandle, nil)
filterFunc := func(h int64, d []types.Datum, cols []*column.Col) (bool, error) {
if limit != 0 {
r := &RecordData{
Handle: h,
Values: d,
}
records = append(records, r)
limit--
return true, nil
}
return false, nil
}
err := iterRecords(retriever, t, startKey, cols, filterFunc)
if err != nil {
return nil, 0, errors.Trace(err)
}
if len(records) == 0 {
return records, startHandle, nil
}
nextHandle := records[len(records)-1].Handle + 1
return records, nextHandle, nil
}
// ScanTableRecord scans table row handles and column values in a limited number.
// It returns data and the next startHandle until it doesn't have data, then returns data is nil and
// the next startHandle is the handle which can't get data. If startHandle = 0 and limit = -1,
// it returns the table data of the whole.
func ScanTableRecord(retriever kv.Retriever, t table.Table, startHandle, limit int64) (
[]*RecordData, int64, error) {
return scanTableData(retriever, t, t.Cols(), startHandle, limit)
}
// ScanSnapshotTableRecord scans the ver version of the table data in a limited number.
// It returns data and the next startHandle until it doesn't have data, then returns data is nil and
// the next startHandle is the handle which can't get data. If startHandle = 0 and limit = -1,
// it returns the table data of the whole.
func ScanSnapshotTableRecord(store kv.Storage, ver kv.Version, t table.Table, startHandle, limit int64) (
[]*RecordData, int64, error) {
snap, err := store.GetSnapshot(ver)
if err != nil {
return nil, 0, errors.Trace(err)
}
defer snap.Release()
records, nextHandle, err := ScanTableRecord(snap, t, startHandle, limit)
return records, nextHandle, errors.Trace(err)
}
// CompareTableRecord compares data and the corresponding table data one by one.
// It returns nil if data is equal to the data that scans from table, otherwise
// it returns an error with a different set of records. If exact is false, only compares handle.
func CompareTableRecord(txn kv.Transaction, t table.Table, data []*RecordData, exact bool) error {
m := make(map[int64][]types.Datum, len(data))
for _, r := range data {
if _, ok := m[r.Handle]; ok {
return errors.Errorf("handle:%d is repeated in data", r.Handle)
}
m[r.Handle] = r.Values
}
startKey := t.RecordKey(0, nil)
filterFunc := func(h int64, vals []types.Datum, cols []*column.Col) (bool, error) {
vals2, ok := m[h]
if !ok {
record := &RecordData{Handle: h, Values: vals}
return false, errors.Errorf("data:%v != record:%v", nil, record)
}
if !exact {
delete(m, h)
return true, nil
}
if !reflect.DeepEqual(vals, vals2) {
record1 := &RecordData{Handle: h, Values: vals2}
record2 := &RecordData{Handle: h, Values: vals}
return false, errors.Errorf("data:%v != record:%v", record1, record2)
}
delete(m, h)
return true, nil
}
err := iterRecords(txn, t, startKey, t.Cols(), filterFunc)
if err != nil {
return errors.Trace(err)
}
for h, vals := range m {
record := &RecordData{Handle: h, Values: vals}
return errors.Errorf("data:%v != record:%v", record, nil)
}
return nil
}
// GetTableRecordsCount returns the total number of table records from startHandle.
// If startHandle = 0, returns the total number of table records.
func GetTableRecordsCount(txn kv.Transaction, t table.Table, startHandle int64) (int64, error) {
startKey := t.RecordKey(startHandle, nil)
it, err := txn.Seek(startKey)
if err != nil {
return 0, errors.Trace(err)
}
var cnt int64
prefix := t.RecordPrefix()
for it.Valid() && it.Key().HasPrefix(prefix) {
handle, err := tables.DecodeRecordKeyHandle(it.Key())
if err != nil {
return 0, errors.Trace(err)
}
it.Close()
rk := t.RecordKey(handle+1, nil)
it, err = txn.Seek(rk)
if err != nil {
return 0, errors.Trace(err)
}
cnt++
}
it.Close()
return cnt, nil
}
func rowWithCols(txn kv.Retriever, t table.Table, h int64, cols []*column.Col) ([]types.Datum, error) {
v := make([]types.Datum, len(cols))
for i, col := range cols {
if col.State != model.StatePublic {
return nil, errors.Errorf("Cannot use none public column - %v", cols)
}
if col.IsPKHandleColumn(t.Meta()) {
v[i].SetInt64(h)
continue
}
k := t.RecordKey(h, col)
data, err := txn.Get(k)
if err != nil {
return nil, errors.Trace(err)
}
val, err := tables.DecodeValue(data, &col.FieldType)
if err != nil {
return nil, errors.Trace(err)
}
v[i] = val
}
return v, nil
}
func iterRecords(retriever kv.Retriever, t table.Table, startKey kv.Key, cols []*column.Col,
fn table.RecordIterFunc) error {
it, err := retriever.Seek(startKey)
if err != nil {
return errors.Trace(err)
}
defer it.Close()
if !it.Valid() {
return nil
}
log.Debugf("startKey:%q, key:%q, value:%q", startKey, it.Key(), it.Value())
prefix := t.RecordPrefix()
for it.Valid() && it.Key().HasPrefix(prefix) {
// first kv pair is row lock information.
// TODO: check valid lock
// get row handle
handle, err := tables.DecodeRecordKeyHandle(it.Key())
if err != nil {
return errors.Trace(err)
}
data, err := rowWithCols(retriever, t, handle, cols)
if err != nil {
return errors.Trace(err)
}
more, err := fn(handle, data, cols)
if !more || err != nil {
return errors.Trace(err)
}
rk := t.RecordKey(handle, nil)
err = kv.NextUntil(it, util.RowKeyPrefixFilter(rk))
if err != nil {
return errors.Trace(err)
}
}
return nil
}

142
vendor/github.com/pingcap/tidb/kv/btree_buffer.go generated vendored Normal file
View file

@ -0,0 +1,142 @@
// Copyright 2015 PingCAP, Inc.
//
// Copyright 2015 Wenbin Xiao
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"io"
"github.com/juju/errors"
"github.com/pingcap/tidb/kv/memkv"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util/types"
)
type btreeBuffer struct {
tree *memkv.Tree
}
// NewBTreeBuffer returns a breeBuffer.
func NewBTreeBuffer() MemBuffer {
return &btreeBuffer{
tree: memkv.NewTree(types.Collators[true]),
}
}
// Get returns the value associated with the key; ErrNotExist error if the key does not exist.
func (b *btreeBuffer) Get(k Key) ([]byte, error) {
v, ok := b.tree.Get(toIfaces(k))
if !ok {
return nil, ErrNotExist
}
return fromIfaces(v), nil
}
// Set associates the key with the value.
func (b *btreeBuffer) Set(k Key, v []byte) error {
if len(v) == 0 {
return errors.Trace(ErrCannotSetNilValue)
}
b.tree.Set(toIfaces(k), toIfaces(v))
return nil
}
// Delete removes the entry from buffer with provided key.
func (b *btreeBuffer) Delete(k Key) error {
b.tree.Set(toIfaces(k), nil)
return nil
}
// Release clear the whole buffer.
func (b *btreeBuffer) Release() {
b.tree.Clear()
}
type btreeIter struct {
e *memkv.Enumerator
k Key
v []byte
ok bool
}
// Seek creates a new Iterator based on the provided key.
func (b *btreeBuffer) Seek(k Key) (Iterator, error) {
var e *memkv.Enumerator
var err error
if k == nil {
e, err = b.tree.SeekFirst()
if err != nil {
if terror.ErrorEqual(err, io.EOF) {
return &btreeIter{ok: false}, nil
}
return &btreeIter{ok: false}, errors.Trace(err)
}
} else {
key := toIfaces([]byte(k))
e, _ = b.tree.Seek(key)
}
iter := &btreeIter{e: e}
// the initial push...
err = iter.Next()
if err != nil {
return &btreeIter{ok: false}, errors.Trace(err)
}
return iter, nil
}
// Close implements Iterator Close.
func (i *btreeIter) Close() {
//noop
}
// Key implements Iterator Key.
func (i *btreeIter) Key() Key {
return i.k
}
// Value implements Iterator Value.
func (i *btreeIter) Value() []byte {
return i.v
}
// Next implements Iterator Next.
func (i *btreeIter) Next() error {
k, v, err := i.e.Next()
if err != nil {
i.ok = false
if terror.ErrorEqual(err, io.EOF) {
return nil
}
return errors.Trace(err)
}
i.k, i.v, i.ok = fromIfaces(k), fromIfaces(v), true
return nil
}
// Valid implements Iterator Valid.
func (i *btreeIter) Valid() bool {
return i.ok
}
func toIfaces(v []byte) []interface{} {
return []interface{}{v}
}
func fromIfaces(v []interface{}) []byte {
if v == nil {
return nil
}
return v[0].([]byte)
}

93
vendor/github.com/pingcap/tidb/kv/buffer_store.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"github.com/juju/errors"
)
// BufferStore wraps a Retriever for read and a MemBuffer for buffered write.
// Common usage pattern:
// bs := NewBufferStore(r) // use BufferStore to wrap a Retriever
// defer bs.Release() // make sure it will be released
// // ...
// // read/write on bs
// // ...
// bs.SaveTo(m) // save above operations to a Mutator
type BufferStore struct {
MemBuffer
r Retriever
}
// NewBufferStore creates a BufferStore using r for read.
func NewBufferStore(r Retriever) *BufferStore {
return &BufferStore{
r: r,
MemBuffer: &lazyMemBuffer{},
}
}
// Get implements the Retriever interface.
func (s *BufferStore) Get(k Key) ([]byte, error) {
val, err := s.MemBuffer.Get(k)
if IsErrNotFound(err) {
val, err = s.r.Get(k)
}
if err != nil {
return nil, errors.Trace(err)
}
if len(val) == 0 {
return nil, errors.Trace(ErrNotExist)
}
return val, nil
}
// Seek implements the Retriever interface.
func (s *BufferStore) Seek(k Key) (Iterator, error) {
bufferIt, err := s.MemBuffer.Seek(k)
if err != nil {
return nil, errors.Trace(err)
}
retrieverIt, err := s.r.Seek(k)
if err != nil {
return nil, errors.Trace(err)
}
return newUnionIter(bufferIt, retrieverIt), nil
}
// WalkBuffer iterates all buffered kv pairs.
func (s *BufferStore) WalkBuffer(f func(k Key, v []byte) error) error {
iter, err := s.MemBuffer.Seek(nil)
if err != nil {
return errors.Trace(err)
}
defer iter.Close()
for ; iter.Valid(); iter.Next() {
if err := f(iter.Key(), iter.Value()); err != nil {
return errors.Trace(err)
}
}
return nil
}
// SaveTo saves all buffered kv pairs into a Mutator.
func (s *BufferStore) SaveTo(m Mutator) error {
err := s.WalkBuffer(func(k Key, v []byte) error {
if len(v) == 0 {
return errors.Trace(m.Delete(k))
}
return errors.Trace(m.Set(k, v))
})
return errors.Trace(err)
}

58
vendor/github.com/pingcap/tidb/kv/bufpool.go generated vendored Normal file
View file

@ -0,0 +1,58 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"sync"
"github.com/ngaut/log"
)
// A cache holds a set of reusable objects.
// The slice is a stack (LIFO).
// If more are needed, the cache creates them by calling new.
type cache struct {
mu sync.Mutex
name string
saved []MemBuffer
// factory
fact func() MemBuffer
}
func (c *cache) put(x MemBuffer) {
c.mu.Lock()
if len(c.saved) < cap(c.saved) {
c.saved = append(c.saved, x)
} else {
log.Warnf("%s is full, size: %d, you may need to increase pool size", c.name, len(c.saved))
}
c.mu.Unlock()
}
func (c *cache) get() MemBuffer {
c.mu.Lock()
n := len(c.saved)
if n == 0 {
c.mu.Unlock()
return c.fact()
}
x := c.saved[n-1]
c.saved = c.saved[0 : n-1]
c.mu.Unlock()
return x
}
func newCache(name string, cap int, fact func() MemBuffer) *cache {
return &cache{name: name, saved: make([]MemBuffer, 0, cap), fact: fact}
}

91
vendor/github.com/pingcap/tidb/kv/error.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"errors"
"strings"
"github.com/pingcap/go-themis"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/terror"
)
// KV error codes.
const (
CodeIncompatibleDBFormat terror.ErrCode = 1
CodeNoDataForHandle terror.ErrCode = 2
CodeKeyExists terror.ErrCode = 3
)
var (
// ErrClosed is used when close an already closed txn.
ErrClosed = errors.New("Error: Transaction already closed")
// ErrNotExist is used when try to get an entry with an unexist key from KV store.
ErrNotExist = errors.New("Error: key not exist")
// ErrConditionNotMatch is used when condition is not met.
ErrConditionNotMatch = errors.New("Error: Condition not match")
// ErrLockConflict is used when try to lock an already locked key.
ErrLockConflict = errors.New("Error: Lock conflict")
// ErrLazyConditionPairsNotMatch is used when value in store differs from expect pairs.
ErrLazyConditionPairsNotMatch = errors.New("Error: Lazy condition pairs not match")
// ErrRetryable is used when KV store occurs RPC error or some other
// errors which SQL layer can safely retry.
ErrRetryable = errors.New("Error: KV error safe to retry")
// ErrCannotSetNilValue is the error when sets an empty value.
ErrCannotSetNilValue = errors.New("can not set nil value")
// ErrInvalidTxn is the error when commits or rollbacks in an invalid transaction.
ErrInvalidTxn = errors.New("invalid transaction")
// ErrNotCommitted is the error returned by CommitVersion when this
// transaction is not committed.
ErrNotCommitted = errors.New("this transaction has not committed")
// ErrKeyExists returns when key is already exist.
ErrKeyExists = terror.ClassKV.New(CodeKeyExists, "key already exist")
)
func init() {
kvMySQLErrCodes := map[terror.ErrCode]uint16{
CodeKeyExists: mysql.ErrDupEntry,
}
terror.ErrClassToMySQLCodes[terror.ClassKV] = kvMySQLErrCodes
}
// IsRetryableError checks if the err is a fatal error and the under going operation is worth to retry.
func IsRetryableError(err error) bool {
if err == nil {
return false
}
if terror.ErrorEqual(err, ErrRetryable) ||
terror.ErrorEqual(err, ErrLockConflict) ||
terror.ErrorEqual(err, ErrConditionNotMatch) ||
terror.ErrorEqual(err, themis.ErrRetryable) ||
// HBase exception message will tell you if you should retry or not
strings.Contains(err.Error(), "try again later") {
return true
}
return false
}
// IsErrNotFound checks if err is a kind of NotFound error.
func IsErrNotFound(err error) bool {
if terror.ErrorEqual(err, ErrNotExist) {
return true
}
return false
}

290
vendor/github.com/pingcap/tidb/kv/index_iter.go generated vendored Normal file
View file

@ -0,0 +1,290 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"bytes"
"encoding/binary"
"io"
"github.com/juju/errors"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/types"
)
var (
_ Index = (*kvIndex)(nil)
_ IndexIterator = (*indexIter)(nil)
)
// IndexIterator is the interface for iterator of index data on KV store.
type IndexIterator interface {
Next() (k []types.Datum, h int64, err error)
Close()
}
// Index is the interface for index data on KV store.
type Index interface {
// Create supports insert into statement.
Create(rm RetrieverMutator, indexedValues []types.Datum, h int64) error
// Delete supports delete from statement.
Delete(m Mutator, indexedValues []types.Datum, h int64) error
// Drop supports drop table, drop index statements.
Drop(rm RetrieverMutator) error
// Exist supports check index exists or not.
Exist(rm RetrieverMutator, indexedValues []types.Datum, h int64) (bool, int64, error)
// GenIndexKey generates an index key.
GenIndexKey(indexedValues []types.Datum, h int64) (key []byte, distinct bool, err error)
// Seek supports where clause.
Seek(r Retriever, indexedValues []types.Datum) (iter IndexIterator, hit bool, err error)
// SeekFirst supports aggregate min and ascend order by.
SeekFirst(r Retriever) (iter IndexIterator, err error)
}
func encodeHandle(h int64) []byte {
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.BigEndian, h)
if err != nil {
panic(err)
}
return buf.Bytes()
}
func decodeHandle(data []byte) (int64, error) {
var h int64
buf := bytes.NewBuffer(data)
err := binary.Read(buf, binary.BigEndian, &h)
return h, errors.Trace(err)
}
// indexIter is for KV store index iterator.
type indexIter struct {
it Iterator
idx *kvIndex
prefix Key
}
// Close does the clean up works when KV store index iterator is closed.
func (c *indexIter) Close() {
if c.it != nil {
c.it.Close()
c.it = nil
}
}
// Next returns current key and moves iterator to the next step.
func (c *indexIter) Next() (val []types.Datum, h int64, err error) {
if !c.it.Valid() {
return nil, 0, errors.Trace(io.EOF)
}
if !c.it.Key().HasPrefix(c.prefix) {
return nil, 0, errors.Trace(io.EOF)
}
// get indexedValues
buf := c.it.Key()[len(c.prefix):]
vv, err := codec.Decode(buf)
if err != nil {
return nil, 0, errors.Trace(err)
}
// if index is *not* unique, the handle is in keybuf
if !c.idx.unique {
h = vv[len(vv)-1].GetInt64()
val = vv[0 : len(vv)-1]
} else {
// otherwise handle is value
h, err = decodeHandle(c.it.Value())
if err != nil {
return nil, 0, errors.Trace(err)
}
val = vv
}
// update new iter to next
err = c.it.Next()
if err != nil {
return nil, 0, errors.Trace(err)
}
return
}
// kvIndex is the data structure for index data in the KV store.
type kvIndex struct {
indexName string
indexID int64
unique bool
prefix Key
}
// GenIndexPrefix generates the index prefix.
func GenIndexPrefix(indexPrefix Key, indexID int64) Key {
buf := make([]byte, 0, len(indexPrefix)+8)
buf = append(buf, indexPrefix...)
buf = codec.EncodeInt(buf, indexID)
return buf
}
// NewKVIndex builds a new kvIndex object.
func NewKVIndex(indexPrefix Key, indexName string, indexID int64, unique bool) Index {
index := &kvIndex{
indexName: indexName,
indexID: indexID,
unique: unique,
prefix: GenIndexPrefix(indexPrefix, indexID),
}
return index
}
// GenIndexKey generates storage key for index values. Returned distinct indicates whether the
// indexed values should be distinct in storage (i.e. whether handle is encoded in the key).
func (c *kvIndex) GenIndexKey(indexedValues []types.Datum, h int64) (key []byte, distinct bool, err error) {
if c.unique {
// See: https://dev.mysql.com/doc/refman/5.7/en/create-index.html
// A UNIQUE index creates a constraint such that all values in the index must be distinct.
// An error occurs if you try to add a new row with a key value that matches an existing row.
// For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL.
distinct = true
for _, cv := range indexedValues {
if cv.Kind() == types.KindNull {
distinct = false
break
}
}
}
key = append(key, c.prefix...)
if distinct {
key, err = codec.EncodeKey(key, indexedValues...)
} else {
key, err = codec.EncodeKey(key, append(indexedValues, types.NewDatum(h))...)
}
if err != nil {
return nil, false, errors.Trace(err)
}
return
}
// Create creates a new entry in the kvIndex data.
// If the index is unique and there is an existing entry with the same key, Create will return ErrKeyExists.
func (c *kvIndex) Create(rm RetrieverMutator, indexedValues []types.Datum, h int64) error {
key, distinct, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return errors.Trace(err)
}
if !distinct {
// TODO: reconsider value
err = rm.Set(key, []byte("timestamp?"))
return errors.Trace(err)
}
_, err = rm.Get(key)
if IsErrNotFound(err) {
err = rm.Set(key, encodeHandle(h))
return errors.Trace(err)
}
return errors.Trace(ErrKeyExists)
}
// Delete removes the entry for handle h and indexdValues from KV index.
func (c *kvIndex) Delete(m Mutator, indexedValues []types.Datum, h int64) error {
key, _, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return errors.Trace(err)
}
err = m.Delete(key)
return errors.Trace(err)
}
// Drop removes the KV index from store.
func (c *kvIndex) Drop(rm RetrieverMutator) error {
it, err := rm.Seek(c.prefix)
if err != nil {
return errors.Trace(err)
}
defer it.Close()
// remove all indices
for it.Valid() {
if !it.Key().HasPrefix(c.prefix) {
break
}
err := rm.Delete(it.Key())
if err != nil {
return errors.Trace(err)
}
err = it.Next()
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// Seek searches KV index for the entry with indexedValues.
func (c *kvIndex) Seek(r Retriever, indexedValues []types.Datum) (iter IndexIterator, hit bool, err error) {
key, _, err := c.GenIndexKey(indexedValues, 0)
if err != nil {
return nil, false, errors.Trace(err)
}
it, err := r.Seek(key)
if err != nil {
return nil, false, errors.Trace(err)
}
// check if hit
hit = false
if it.Valid() && it.Key().Cmp(key) == 0 {
hit = true
}
return &indexIter{it: it, idx: c, prefix: c.prefix}, hit, nil
}
// SeekFirst returns an iterator which points to the first entry of the KV index.
func (c *kvIndex) SeekFirst(r Retriever) (iter IndexIterator, err error) {
it, err := r.Seek(c.prefix)
if err != nil {
return nil, errors.Trace(err)
}
return &indexIter{it: it, idx: c, prefix: c.prefix}, nil
}
func (c *kvIndex) Exist(rm RetrieverMutator, indexedValues []types.Datum, h int64) (bool, int64, error) {
key, distinct, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return false, 0, errors.Trace(err)
}
value, err := rm.Get(key)
if IsErrNotFound(err) {
return false, 0, nil
}
if err != nil {
return false, 0, errors.Trace(err)
}
// For distinct index, the value of key is handle.
if distinct {
handle, err := decodeHandle(value)
if err != nil {
return false, 0, errors.Trace(err)
}
if handle != h {
return true, handle, errors.Trace(ErrKeyExists)
}
return true, handle, nil
}
return true, h, nil
}

29
vendor/github.com/pingcap/tidb/kv/iter.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import "github.com/juju/errors"
// NextUntil applies FnKeyCmp to each entry of the iterator until meets some condition.
// It will stop when fn returns true, or iterator is invalid or an error occurs.
func NextUntil(it Iterator, fn FnKeyCmp) error {
var err error
for it.Valid() && !fn(it.Key()) {
err = it.Next()
if err != nil {
return errors.Trace(err)
}
}
return nil
}

57
vendor/github.com/pingcap/tidb/kv/key.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import "bytes"
// Key represents high-level Key type.
type Key []byte
// Next returns the next key in byte-order.
func (k Key) Next() Key {
// add 0x0 to the end of key
buf := make([]byte, len([]byte(k))+1)
copy(buf, []byte(k))
return buf
}
// Cmp returns the comparison result of two key.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
func (k Key) Cmp(another Key) int {
return bytes.Compare(k, another)
}
// HasPrefix tests whether the Key begins with prefix.
func (k Key) HasPrefix(prefix Key) bool {
return bytes.HasPrefix(k, prefix)
}
// Clone returns a copy of the Key.
func (k Key) Clone() Key {
return append([]byte(nil), k...)
}
// EncodedKey represents encoded key in low-level storage engine.
type EncodedKey []byte
// Cmp returns the comparison result of two key.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
func (k EncodedKey) Cmp(another EncodedKey) int {
return bytes.Compare(k, another)
}
// Next returns the next key in byte-order.
func (k EncodedKey) Next() EncodedKey {
return EncodedKey(bytes.Join([][]byte{k, Key{0}}, nil))
}

172
vendor/github.com/pingcap/tidb/kv/kv.go generated vendored Normal file
View file

@ -0,0 +1,172 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import "io"
const (
// PresumeKeyNotExists directives that when dealing with a Get operation but failing to read data from cache,
// we presume that the key does not exist in Store. The actual existence will be checked before the
// transaction's commit.
// This option is an optimization for frequent checks during a transaction, e.g. batch inserts.
PresumeKeyNotExists Option = iota + 1
// PresumeKeyNotExistsError is the option key for error.
// When PresumeKeyNotExists is set and condition is not match, should throw the error.
PresumeKeyNotExistsError
)
// Retriever is the interface wraps the basic Get and Seek methods.
type Retriever interface {
// Get gets the value for key k from kv store.
// If corresponding kv pair does not exist, it returns nil and ErrNotExist.
Get(k Key) ([]byte, error)
// Seek creates an Iterator positioned on the first entry that k <= entry's key.
// If such entry is not found, it returns an invalid Iterator with no error.
// The Iterator must be Closed after use.
Seek(k Key) (Iterator, error)
}
// Mutator is the interface wraps the basic Set and Delete methods.
type Mutator interface {
// Set sets the value for key k as v into kv store.
// v must NOT be nil or empty, otherwise it returns ErrCannotSetNilValue.
Set(k Key, v []byte) error
// Delete removes the entry for key k from kv store.
Delete(k Key) error
}
// RetrieverMutator is the interface that groups Retriever and Mutator interfaces.
type RetrieverMutator interface {
Retriever
Mutator
}
// MemBuffer is an in-memory kv collection. It should be released after use.
type MemBuffer interface {
RetrieverMutator
// Release releases the buffer.
Release()
}
// Transaction defines the interface for operations inside a Transaction.
// This is not thread safe.
type Transaction interface {
RetrieverMutator
// Commit commits the transaction operations to KV store.
Commit() error
// Rollback undoes the transaction operations to KV store.
Rollback() error
// String implements fmt.Stringer interface.
String() string
// LockKeys tries to lock the entries with the keys in KV store.
LockKeys(keys ...Key) error
// SetOption sets an option with a value, when val is nil, uses the default
// value of this option.
SetOption(opt Option, val interface{})
// DelOption deletes an option.
DelOption(opt Option)
// IsReadOnly checks if the transaction has only performed read operations.
IsReadOnly() bool
// GetClient gets a client instance.
GetClient() Client
// StartTS returns the transaction start timestamp.
StartTS() int64
}
// Client is used to send request to KV layer.
type Client interface {
// Send sends request to KV layer, returns a Response.
Send(req *Request) Response
// SupportRequestType checks if reqType and subType is supported.
SupportRequestType(reqType, subType int64) bool
}
// ReqTypes.
const (
ReqTypeSelect = 101
ReqTypeIndex = 102
)
// KeyRange represents a range where StartKey <= key < EndKey.
type KeyRange struct {
StartKey Key
EndKey Key
}
// Request represents a kv request.
type Request struct {
// The request type.
Tp int64
Data []byte
// Key Ranges
KeyRanges []KeyRange
// If desc is true, the request is sent in descending order.
Desc bool
// If concurrency is 1, it only sends the request to a single storage unit when
// ResponseIterator.Next is called. If concurrency is greater than 1, the request will be
// sent to multiple storage units concurrently.
Concurrency int
}
// Response represents the response returned from KV layer.
type Response interface {
// Next returns a resultSubset from a single storage unit.
// When full result set is returned, nil is returned.
Next() (resultSubset io.ReadCloser, err error)
}
// Snapshot defines the interface for the snapshot fetched from KV store.
type Snapshot interface {
Retriever
// BatchGet gets a batch of values from snapshot.
BatchGet(keys []Key) (map[string][]byte, error)
// Release releases the snapshot to store.
Release()
}
// Driver is the interface that must be implemented by a KV storage.
type Driver interface {
// Open returns a new Storage.
// The path is the string for storage specific format.
Open(path string) (Storage, error)
}
// Storage defines the interface for storage.
// Isolation should be at least SI(SNAPSHOT ISOLATION)
type Storage interface {
// Begin transaction
Begin() (Transaction, error)
// GetSnapshot gets a snapshot that is able to read any data which data is <= ver.
// if ver is MaxVersion or > current max committed version, we will use current version for this snapshot.
GetSnapshot(ver Version) (Snapshot, error)
// Close store
Close() error
// Storage's unique ID
UUID() string
// CurrentVersion returns current max committed version.
CurrentVersion() (Version, error)
}
// FnKeyCmp is the function for iterator the keys
type FnKeyCmp func(key Key) bool
// Iterator is the interface for a iterator on KV store.
type Iterator interface {
Valid() bool
Key() Key
Value() []byte
Next() error
Close()
}

106
vendor/github.com/pingcap/tidb/kv/memdb_buffer.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
// Copyright 2015 PingCAP, Inc.
//
// Copyright 2015 Wenbin Xiao
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/terror"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/util"
)
type memDbBuffer struct {
db *memdb.DB
}
type memDbIter struct {
iter iterator.Iterator
}
// NewMemDbBuffer creates a new memDbBuffer.
func NewMemDbBuffer() MemBuffer {
return &memDbBuffer{db: memdb.New(comparer.DefaultComparer, 4*1024)}
}
// Seek creates an Iterator.
func (m *memDbBuffer) Seek(k Key) (Iterator, error) {
var i Iterator
if k == nil {
i = &memDbIter{iter: m.db.NewIterator(&util.Range{})}
} else {
i = &memDbIter{iter: m.db.NewIterator(&util.Range{Start: []byte(k)})}
}
i.Next()
return i, nil
}
// Get returns the value associated with key.
func (m *memDbBuffer) Get(k Key) ([]byte, error) {
v, err := m.db.Get(k)
if terror.ErrorEqual(err, leveldb.ErrNotFound) {
return nil, ErrNotExist
}
return v, nil
}
// Set associates key with value.
func (m *memDbBuffer) Set(k Key, v []byte) error {
if len(v) == 0 {
return errors.Trace(ErrCannotSetNilValue)
}
err := m.db.Put(k, v)
return errors.Trace(err)
}
// Delete removes the entry from buffer with provided key.
func (m *memDbBuffer) Delete(k Key) error {
err := m.db.Put(k, nil)
return errors.Trace(err)
}
// Release reset the buffer.
func (m *memDbBuffer) Release() {
m.db.Reset()
}
// Next implements the Iterator Next.
func (i *memDbIter) Next() error {
i.iter.Next()
return nil
}
// Valid implements the Iterator Valid.
func (i *memDbIter) Valid() bool {
return i.iter.Valid()
}
// Key implements the Iterator Key.
func (i *memDbIter) Key() Key {
return i.iter.Key()
}
// Value implements the Iterator Value.
func (i *memDbIter) Value() []byte {
return i.iter.Value()
}
// Close Implements the Iterator Close.
func (i *memDbIter) Close() {
i.iter.Release()
}

739
vendor/github.com/pingcap/tidb/kv/memkv/btree.go generated vendored Normal file
View file

@ -0,0 +1,739 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package memkv
import (
"io"
)
const (
kx = 128 //DONE benchmark tune this number if using custom key/value type(s).
kd = 64 //DONE benchmark tune this number if using custom key/value type(s).
)
type (
// cmp compares a and b. Return value is:
//
// < 0 if a < b
// 0 if a == b
// > 0 if a > b
//
cmp func(a, b []interface{}) int
d struct { // data page
c int
d [2*kd + 1]de
n *d
p *d
}
de struct { // d element
k []interface{}
v []interface{}
}
// Enumerator is the iterator for btree
Enumerator struct {
err error
hit bool
i int
k []interface{}
q *d
t *Tree
ver int64
}
// Tree is a B+tree.
Tree struct {
c int
cmp cmp
first *d
last *d
r interface{}
ver int64
}
xe struct { // x element
ch interface{}
sep *d
}
x struct { // index page
c int
x [2*kx + 2]xe
}
)
var ( // R/O zero values
zd d
zde de
zx x
zxe xe
)
func clr(q interface{}) {
switch z := q.(type) {
case *x:
for i := 0; i <= z.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
clr(z.x[i].ch)
}
*z = zx // GC
case *d:
*z = zd // GC
}
}
// -------------------------------------------------------------------------- x
func newX(ch0 interface{}) *x {
r := &x{}
r.x[0].ch = ch0
return r
}
func (q *x) extract(i int) {
q.c--
if i < q.c {
copy(q.x[i:], q.x[i+1:q.c+1])
q.x[q.c].ch = q.x[q.c+1].ch
q.x[q.c].sep = nil // GC
q.x[q.c+1] = zxe // GC
}
}
func (q *x) insert(i int, d *d, ch interface{}) *x {
c := q.c
if i < c {
q.x[c+1].ch = q.x[c].ch
copy(q.x[i+2:], q.x[i+1:c])
q.x[i+1].sep = q.x[i].sep
}
c++
q.c = c
q.x[i].sep = d
q.x[i+1].ch = ch
return q
}
func (q *x) siblings(i int) (l, r *d) {
if i >= 0 {
if i > 0 {
l = q.x[i-1].ch.(*d)
}
if i < q.c {
r = q.x[i+1].ch.(*d)
}
}
return
}
// -------------------------------------------------------------------------- d
func (l *d) mvL(r *d, c int) {
copy(l.d[l.c:], r.d[:c])
copy(r.d[:], r.d[c:r.c])
l.c += c
r.c -= c
}
func (l *d) mvR(r *d, c int) {
copy(r.d[c:], r.d[:r.c])
copy(r.d[:c], l.d[l.c-c:])
r.c += c
l.c -= c
}
// ----------------------------------------------------------------------- tree
// NewTree returns a newly created, empty tree. The compare function is used
// for key collation.
func NewTree(cmp cmp) *Tree {
return &Tree{cmp: cmp}
}
// Clear removes all K/V pairs from the tree.
func (t *Tree) Clear() {
if t.r == nil {
return
}
clr(t.r)
t.c, t.first, t.last, t.r = 0, nil, nil, nil
t.ver++
}
func (t *Tree) cat(p *x, q, r *d, pi int) {
t.ver++
q.mvL(r, r.c)
if r.n != nil {
r.n.p = q
} else {
t.last = q
}
q.n = r.n
if p.c > 1 {
p.extract(pi)
p.x[pi].ch = q
} else {
t.r = q
}
}
func (t *Tree) catX(p, q, r *x, pi int) {
t.ver++
q.x[q.c].sep = p.x[pi].sep
copy(q.x[q.c+1:], r.x[:r.c])
q.c += r.c + 1
q.x[q.c].ch = r.x[r.c].ch
if p.c > 1 {
p.c--
pc := p.c
if pi < pc {
p.x[pi].sep = p.x[pi+1].sep
copy(p.x[pi+1:], p.x[pi+2:pc+1])
p.x[pc].ch = p.x[pc+1].ch
p.x[pc].sep = nil // GC
p.x[pc+1].ch = nil // GC
}
return
}
t.r = q
}
//Delete removes the k's KV pair, if it exists, in which case Delete returns
//true.
func (t *Tree) Delete(k []interface{}) (ok bool) {
pi := -1
var p *x
q := t.r
if q == nil {
return
}
for {
var i int
i, ok = t.find(q, k)
if ok {
switch z := q.(type) {
case *x:
dp := z.x[i].sep
switch {
case dp.c > kd:
t.extract(dp, 0)
default:
if z.c < kx && q != t.r {
t.underflowX(p, &z, pi, &i)
}
pi = i + 1
p = z
q = z.x[pi].ch
ok = false
continue
}
case *d:
t.extract(z, i)
if z.c >= kd {
return
}
if q != t.r {
t.underflow(p, z, pi)
} else if t.c == 0 {
t.Clear()
}
}
return
}
switch z := q.(type) {
case *x:
if z.c < kx && q != t.r {
t.underflowX(p, &z, pi, &i)
}
pi = i
p = z
q = z.x[i].ch
case *d:
return
}
}
}
func (t *Tree) extract(q *d, i int) { // (r []interface{}) {
t.ver++
//r = q.d[i].v // prepared for Extract
q.c--
if i < q.c {
copy(q.d[i:], q.d[i+1:q.c+1])
}
q.d[q.c] = zde // GC
t.c--
return
}
func (t *Tree) find(q interface{}, k []interface{}) (i int, ok bool) {
var mk []interface{}
l := 0
switch z := q.(type) {
case *x:
h := z.c - 1
for l <= h {
m := (l + h) >> 1
mk = z.x[m].sep.d[0].k
switch cmp := t.cmp(k, mk); {
case cmp > 0:
l = m + 1
case cmp == 0:
return m, true
default:
h = m - 1
}
}
case *d:
h := z.c - 1
for l <= h {
m := (l + h) >> 1
mk = z.d[m].k
switch cmp := t.cmp(k, mk); {
case cmp > 0:
l = m + 1
case cmp == 0:
return m, true
default:
h = m - 1
}
}
}
return l, false
}
// First returns the first item of the tree in the key collating order, or
// (nil, nil) if the tree is empty.
func (t *Tree) First() (k []interface{}, v []interface{}) {
if q := t.first; q != nil {
q := &q.d[0]
k, v = q.k, q.v
}
return
}
// Get returns the value associated with k and true if it exists. Otherwise Get
// returns (nil, false).
func (t *Tree) Get(k []interface{}) (v []interface{}, ok bool) {
q := t.r
if q == nil {
return
}
for {
var i int
if i, ok = t.find(q, k); ok {
switch z := q.(type) {
case *x:
return z.x[i].sep.d[0].v, true
case *d:
return z.d[i].v, true
}
}
switch z := q.(type) {
case *x:
q = z.x[i].ch
default:
return
}
}
}
func (t *Tree) insert(q *d, i int, k []interface{}, v []interface{}) *d {
t.ver++
c := q.c
if i < c {
copy(q.d[i+1:], q.d[i:c])
}
c++
q.c = c
q.d[i].k, q.d[i].v = k, v
t.c++
return q
}
// Last returns the last item of the tree in the key collating order, or (nil,
// nil) if the tree is empty.
func (t *Tree) Last() (k []interface{}, v []interface{}) {
if q := t.last; q != nil {
q := &q.d[q.c-1]
k, v = q.k, q.v
}
return
}
// Len returns the number of items in the tree.
func (t *Tree) Len() int {
return t.c
}
func (t *Tree) overflow(p *x, q *d, pi, i int, k []interface{}, v []interface{}) {
t.ver++
l, r := p.siblings(pi)
if l != nil && l.c < 2*kd {
l.mvL(q, 1)
t.insert(q, i-1, k, v)
return
}
if r != nil && r.c < 2*kd {
if i < 2*kd {
q.mvR(r, 1)
t.insert(q, i, k, v)
} else {
t.insert(r, 0, k, v)
}
return
}
t.split(p, q, pi, i, k, v)
}
// Seek returns an Enumerator positioned on a an item such that k >= item's
// key. ok reports if k == item.key The Enumerator's position is possibly
// after the last item in the tree.
func (t *Tree) Seek(k []interface{}) (e *Enumerator, ok bool) {
q := t.r
if q == nil {
e = &Enumerator{nil, false, 0, k, nil, t, t.ver}
return
}
for {
var i int
if i, ok = t.find(q, k); ok {
switch z := q.(type) {
case *x:
e = &Enumerator{nil, ok, 0, k, z.x[i].sep, t, t.ver}
return
case *d:
e = &Enumerator{nil, ok, i, k, z, t, t.ver}
return
}
}
switch z := q.(type) {
case *x:
q = z.x[i].ch
case *d:
e = &Enumerator{nil, ok, i, k, z, t, t.ver}
return
}
}
}
// SeekFirst returns an Enumerator positioned on the first KV pair in the tree,
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
func (t *Tree) SeekFirst() (e *Enumerator, err error) {
q := t.first
if q == nil {
return nil, io.EOF
}
return &Enumerator{nil, true, 0, q.d[0].k, q, t, t.ver}, nil
}
// SeekLast returns an Enumerator positioned on the last KV pair in the tree,
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
func (t *Tree) SeekLast() (e *Enumerator, err error) {
q := t.last
if q == nil {
return nil, io.EOF
}
return &Enumerator{nil, true, q.c - 1, q.d[q.c-1].k, q, t, t.ver}, nil
}
// Set sets the value associated with k.
func (t *Tree) Set(k []interface{}, v []interface{}) {
pi := -1
var p *x
q := t.r
if q != nil {
for {
i, ok := t.find(q, k)
if ok {
switch z := q.(type) {
case *x:
z.x[i].sep.d[0].v = v
case *d:
z.d[i].v = v
}
return
}
switch z := q.(type) {
case *x:
if z.c > 2*kx {
t.splitX(p, &z, pi, &i)
}
pi = i
p = z
q = z.x[i].ch
case *d:
switch {
case z.c < 2*kd:
t.insert(z, i, k, v)
default:
t.overflow(p, z, pi, i, k, v)
}
return
}
}
}
z := t.insert(&d{}, 0, k, v)
t.r, t.first, t.last = z, z, z
return
}
func (t *Tree) split(p *x, q *d, pi, i int, k []interface{}, v []interface{}) {
t.ver++
r := &d{}
if q.n != nil {
r.n = q.n
r.n.p = r
} else {
t.last = r
}
q.n = r
r.p = q
copy(r.d[:], q.d[kd:2*kd])
for i := range q.d[kd:] {
q.d[kd+i] = zde
}
q.c = kd
r.c = kd
if pi >= 0 {
p.insert(pi, r, r)
} else {
t.r = newX(q).insert(0, r, r)
}
if i > kd {
t.insert(r, i-kd, k, v)
return
}
t.insert(q, i, k, v)
}
func (t *Tree) splitX(p *x, pp **x, pi int, i *int) {
t.ver++
q := *pp
r := &x{}
copy(r.x[:], q.x[kx+1:])
q.c = kx
r.c = kx
if pi >= 0 {
p.insert(pi, q.x[kx].sep, r)
} else {
t.r = newX(q).insert(0, q.x[kx].sep, r)
}
q.x[kx].sep = nil
for i := range q.x[kx+1:] {
q.x[kx+i+1] = zxe
}
if *i > kx {
*pp = r
*i -= kx + 1
}
}
func (t *Tree) underflow(p *x, q *d, pi int) {
t.ver++
l, r := p.siblings(pi)
if l != nil && l.c+q.c >= 2*kd {
l.mvR(q, 1)
} else if r != nil && q.c+r.c >= 2*kd {
q.mvL(r, 1)
r.d[r.c] = zde // GC
} else if l != nil {
t.cat(p, l, q, pi-1)
} else {
t.cat(p, q, r, pi)
}
}
func (t *Tree) underflowX(p *x, pp **x, pi int, i *int) {
t.ver++
var l, r *x
q := *pp
if pi >= 0 {
if pi > 0 {
l = p.x[pi-1].ch.(*x)
}
if pi < p.c {
r = p.x[pi+1].ch.(*x)
}
}
if l != nil && l.c > kx {
q.x[q.c+1].ch = q.x[q.c].ch
copy(q.x[1:], q.x[:q.c])
q.x[0].ch = l.x[l.c].ch
q.x[0].sep = p.x[pi-1].sep
q.c++
*i++
l.c--
p.x[pi-1].sep = l.x[l.c].sep
return
}
if r != nil && r.c > kx {
q.x[q.c].sep = p.x[pi].sep
q.c++
q.x[q.c].ch = r.x[0].ch
p.x[pi].sep = r.x[0].sep
copy(r.x[:], r.x[1:r.c])
r.c--
rc := r.c
r.x[rc].ch = r.x[rc+1].ch
r.x[rc].sep = nil
r.x[rc+1].ch = nil
return
}
if l != nil {
*i += l.c + 1
t.catX(p, l, q, pi-1)
*pp = l
return
}
t.catX(p, q, r, pi)
}
// ----------------------------------------------------------------- Enumerator
// Next returns the currently enumerated item, if it exists and moves to the
// next item in the key collation order. If there is no item to return, err ==
// io.EOF is returned.
func (e *Enumerator) Next() (k []interface{}, v []interface{}, err error) {
if err = e.err; err != nil {
return
}
if e.ver != e.t.ver {
f, hit := e.t.Seek(e.k)
if !e.hit && hit {
if err = f.next(); err != nil {
return
}
}
*e = *f
}
if e.q == nil {
e.err, err = io.EOF, io.EOF
return
}
if e.i >= e.q.c {
if err = e.next(); err != nil {
return
}
}
i := e.q.d[e.i]
k, v = i.k, i.v
e.k, e.hit = k, false
e.next()
return
}
func (e *Enumerator) next() error {
if e.q == nil {
e.err = io.EOF
return io.EOF
}
switch {
case e.i < e.q.c-1:
e.i++
default:
if e.q, e.i = e.q.n, 0; e.q == nil {
e.err = io.EOF
}
}
return e.err
}
// Prev returns the currently enumerated item, if it exists and moves to the
// previous item in the key collation order. If there is no item to return, err
// == io.EOF is returned.
func (e *Enumerator) Prev() (k []interface{}, v []interface{}, err error) {
if err = e.err; err != nil {
return
}
if e.ver != e.t.ver {
f, hit := e.t.Seek(e.k)
if !e.hit && hit {
if err = f.prev(); err != nil {
return
}
}
*e = *f
}
if e.q == nil {
e.err, err = io.EOF, io.EOF
return
}
if e.i >= e.q.c {
if err = e.next(); err != nil {
return
}
}
i := e.q.d[e.i]
k, v = i.k, i.v
e.k, e.hit = k, false
e.prev()
return
}
func (e *Enumerator) prev() error {
if e.q == nil {
e.err = io.EOF
return io.EOF
}
switch {
case e.i > 0:
e.i--
default:
if e.q = e.q.p; e.q == nil {
e.err = io.EOF
break
}
e.i = e.q.c - 1
}
return e.err
}

72
vendor/github.com/pingcap/tidb/kv/memkv/temp.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package memkv
import (
"github.com/pingcap/tidb/util/types"
)
type btreeIterator interface {
Next() (k, v []interface{}, err error)
}
// Temp is the interface of a memory kv storage
type Temp interface {
Drop() (err error)
Get(k []interface{}) (v []interface{}, err error)
SeekFirst() (e btreeIterator, err error)
Set(k, v []interface{}) (err error)
}
// memtemp for join/groupby or any aggregation operation
type memTemp struct {
// memory btree
tree *Tree
}
// CreateTemp returns a new empty memory kv
func CreateTemp(asc bool) (_ Temp, err error) {
return &memTemp{
tree: NewTree(types.Collators[asc]),
}, nil
}
func (t *memTemp) Get(k []interface{}) (v []interface{}, err error) {
v, _ = t.tree.Get(k)
return
}
func (t *memTemp) Drop() (err error) { return }
func (t *memTemp) Set(k, v []interface{}) (err error) {
vv, err := types.Clone(v)
if err != nil {
return err
}
t.tree.Set(append([]interface{}(nil), k...), vv.([]interface{}))
return
}
func (t *memTemp) SeekFirst() (e btreeIterator, err error) {
it, err := t.tree.SeekFirst()
if err != nil {
return
}
return it, nil
}

78
vendor/github.com/pingcap/tidb/kv/txn.go generated vendored Normal file
View file

@ -0,0 +1,78 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"math"
"math/rand"
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
)
// RunInNewTxn will run the f in a new transaction environment.
func RunInNewTxn(store Storage, retryable bool, f func(txn Transaction) error) error {
for i := 0; i < maxRetryCnt; i++ {
txn, err := store.Begin()
if err != nil {
log.Errorf("[kv] RunInNewTxn error - %v", err)
return errors.Trace(err)
}
err = f(txn)
if retryable && IsRetryableError(err) {
log.Warnf("[kv] Retry txn %v", txn)
txn.Rollback()
continue
}
if err != nil {
txn.Rollback()
return errors.Trace(err)
}
err = txn.Commit()
if retryable && IsRetryableError(err) {
log.Warnf("[kv] Retry txn %v", txn)
txn.Rollback()
BackOff(i)
continue
}
if err != nil {
return errors.Trace(err)
}
break
}
return nil
}
var (
// Max retry count in RunInNewTxn
maxRetryCnt = 100
// retryBackOffBase is the initial duration, in microsecond, a failed transaction stays dormancy before it retries
retryBackOffBase = 1
// retryBackOffCap is the max amount of duration, in microsecond, a failed transaction stays dormancy before it retries
retryBackOffCap = 100
)
// BackOff Implements exponential backoff with full jitter.
// Returns real back off time in microsecond.
// See: http://www.awsarchitectureblog.com/2015/03/backoff.html.
func BackOff(attempts int) int {
upper := int(math.Min(float64(retryBackOffCap), float64(retryBackOffBase)*math.Pow(2.0, float64(attempts))))
sleep := time.Duration(rand.Intn(upper)) * time.Millisecond
time.Sleep(sleep)
return int(sleep)
}

156
vendor/github.com/pingcap/tidb/kv/union_iter.go generated vendored Normal file
View file

@ -0,0 +1,156 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import "github.com/ngaut/log"
// UnionIter is the iterator on an UnionStore.
type UnionIter struct {
dirtyIt Iterator
snapshotIt Iterator
dirtyValid bool
snapshotValid bool
curIsDirty bool
isValid bool
}
func newUnionIter(dirtyIt Iterator, snapshotIt Iterator) *UnionIter {
it := &UnionIter{
dirtyIt: dirtyIt,
snapshotIt: snapshotIt,
dirtyValid: dirtyIt.Valid(),
snapshotValid: snapshotIt.Valid(),
}
it.updateCur()
return it
}
// Go next and update valid status.
func (iter *UnionIter) dirtyNext() {
iter.dirtyIt.Next()
iter.dirtyValid = iter.dirtyIt.Valid()
}
// Go next and update valid status.
func (iter *UnionIter) snapshotNext() {
iter.snapshotIt.Next()
iter.snapshotValid = iter.snapshotIt.Valid()
}
func (iter *UnionIter) updateCur() {
iter.isValid = true
for {
if !iter.dirtyValid && !iter.snapshotValid {
iter.isValid = false
return
}
if !iter.dirtyValid {
iter.curIsDirty = false
return
}
if !iter.snapshotValid {
iter.curIsDirty = true
// if delete it
if len(iter.dirtyIt.Value()) == 0 {
iter.dirtyNext()
continue
}
break
}
// both valid
if iter.snapshotValid && iter.dirtyValid {
snapshotKey := iter.snapshotIt.Key()
dirtyKey := iter.dirtyIt.Key()
cmp := dirtyKey.Cmp(snapshotKey)
// if equal, means both have value
if cmp == 0 {
if len(iter.dirtyIt.Value()) == 0 {
// snapshot has a record, but txn says we have deleted it
// just go next
iter.dirtyNext()
iter.snapshotNext()
continue
}
// both go next
iter.snapshotNext()
iter.curIsDirty = true
break
} else if cmp > 0 {
// record from snapshot comes first
iter.curIsDirty = false
break
} else {
// record from dirty comes first
if len(iter.dirtyIt.Value()) == 0 {
log.Warnf("[kv] delete a record not exists? k = %q", iter.dirtyIt.Key())
// jump over this deletion
iter.dirtyNext()
continue
}
iter.curIsDirty = true
break
}
}
}
}
// Next implements the Iterator Next interface.
func (iter *UnionIter) Next() error {
if !iter.curIsDirty {
iter.snapshotNext()
} else {
iter.dirtyNext()
}
iter.updateCur()
return nil
}
// Value implements the Iterator Value interface.
// Multi columns
func (iter *UnionIter) Value() []byte {
if !iter.curIsDirty {
return iter.snapshotIt.Value()
}
return iter.dirtyIt.Value()
}
// Key implements the Iterator Key interface.
func (iter *UnionIter) Key() Key {
if !iter.curIsDirty {
return iter.snapshotIt.Key()
}
return iter.dirtyIt.Key()
}
// Valid implements the Iterator Valid interface.
func (iter *UnionIter) Valid() bool {
return iter.isValid
}
// Close implements the Iterator Close interface.
func (iter *UnionIter) Close() {
if iter.snapshotIt != nil {
iter.snapshotIt.Close()
iter.snapshotIt = nil
}
if iter.dirtyIt != nil {
iter.dirtyIt.Close()
iter.dirtyIt = nil
}
}

212
vendor/github.com/pingcap/tidb/kv/union_store.go generated vendored Normal file
View file

@ -0,0 +1,212 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"bytes"
"github.com/juju/errors"
)
// UnionStore is a store that wraps a snapshot for read and a BufferStore for buffered write.
// Also, it provides some transaction related utilities.
type UnionStore interface {
MemBuffer
// CheckLazyConditionPairs loads all lazy values from store then checks if all values are matched.
// Lazy condition pairs should be checked before transaction commit.
CheckLazyConditionPairs() error
// WalkBuffer iterates all buffered kv pairs.
WalkBuffer(f func(k Key, v []byte) error) error
// SetOption sets an option with a value, when val is nil, uses the default
// value of this option.
SetOption(opt Option, val interface{})
// DelOption deletes an option.
DelOption(opt Option)
}
// Option is used for customizing kv store's behaviors during a transaction.
type Option int
// Options is an interface of a set of options. Each option is associated with a value.
type Options interface {
// Get gets an option value.
Get(opt Option) (v interface{}, ok bool)
}
var (
p = newCache("memdb pool", 100, func() MemBuffer {
return NewMemDbBuffer()
})
)
// conditionPair is used to store lazy check condition.
// If condition not match (value is not equal as expected one), returns err.
type conditionPair struct {
key Key
value []byte
err error
}
// UnionStore is an in-memory Store which contains a buffer for write and a
// snapshot for read.
type unionStore struct {
*BufferStore
snapshot Snapshot // for read
lazyConditionPairs map[string](*conditionPair) // for delay check
opts options
}
// NewUnionStore builds a new UnionStore.
func NewUnionStore(snapshot Snapshot) UnionStore {
return &unionStore{
BufferStore: NewBufferStore(snapshot),
snapshot: snapshot,
lazyConditionPairs: make(map[string](*conditionPair)),
opts: make(map[Option]interface{}),
}
}
type lazyMemBuffer struct {
mb MemBuffer
}
func (lmb *lazyMemBuffer) Get(k Key) ([]byte, error) {
if lmb.mb == nil {
return nil, ErrNotExist
}
return lmb.mb.Get(k)
}
func (lmb *lazyMemBuffer) Set(key Key, value []byte) error {
if lmb.mb == nil {
lmb.mb = p.get()
}
return lmb.mb.Set(key, value)
}
func (lmb *lazyMemBuffer) Delete(k Key) error {
if lmb.mb == nil {
lmb.mb = p.get()
}
return lmb.mb.Delete(k)
}
func (lmb *lazyMemBuffer) Seek(k Key) (Iterator, error) {
if lmb.mb == nil {
lmb.mb = p.get()
}
return lmb.mb.Seek(k)
}
func (lmb *lazyMemBuffer) Release() {
if lmb.mb == nil {
return
}
lmb.mb.Release()
p.put(lmb.mb)
lmb.mb = nil
}
// Get implements the Retriever interface.
func (us *unionStore) Get(k Key) ([]byte, error) {
v, err := us.MemBuffer.Get(k)
if IsErrNotFound(err) {
if _, ok := us.opts.Get(PresumeKeyNotExists); ok {
e, ok := us.opts.Get(PresumeKeyNotExistsError)
if ok && e != nil {
us.markLazyConditionPair(k, nil, e.(error))
} else {
us.markLazyConditionPair(k, nil, ErrKeyExists)
}
return nil, errors.Trace(ErrNotExist)
}
}
if IsErrNotFound(err) {
v, err = us.BufferStore.r.Get(k)
}
if err != nil {
return v, errors.Trace(err)
}
if len(v) == 0 {
return nil, errors.Trace(ErrNotExist)
}
return v, nil
}
// markLazyConditionPair marks a kv pair for later check.
// If condition not match, should return e as error.
func (us *unionStore) markLazyConditionPair(k Key, v []byte, e error) {
us.lazyConditionPairs[string(k)] = &conditionPair{
key: k.Clone(),
value: v,
err: e,
}
}
// CheckLazyConditionPairs implements the UnionStore interface.
func (us *unionStore) CheckLazyConditionPairs() error {
if len(us.lazyConditionPairs) == 0 {
return nil
}
keys := make([]Key, 0, len(us.lazyConditionPairs))
for _, v := range us.lazyConditionPairs {
keys = append(keys, v.key)
}
values, err := us.snapshot.BatchGet(keys)
if err != nil {
return errors.Trace(err)
}
for k, v := range us.lazyConditionPairs {
if len(v.value) == 0 {
if _, exist := values[k]; exist {
return errors.Trace(v.err)
}
} else {
if bytes.Compare(values[k], v.value) != 0 {
return errors.Trace(ErrLazyConditionPairsNotMatch)
}
}
}
return nil
}
// SetOption implements the UnionStore SetOption interface.
func (us *unionStore) SetOption(opt Option, val interface{}) {
us.opts[opt] = val
}
// DelOption implements the UnionStore DelOption interface.
func (us *unionStore) DelOption(opt Option) {
delete(us.opts, opt)
}
// Release implements the UnionStore Release interface.
func (us *unionStore) Release() {
us.snapshot.Release()
us.BufferStore.Release()
}
type options map[Option]interface{}
func (opts options) Get(opt Option) (interface{}, bool) {
v, ok := opts[opt]
return v, ok
}

60
vendor/github.com/pingcap/tidb/kv/utils.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"strconv"
"github.com/juju/errors"
)
// IncInt64 increases the value for key k in kv store by step.
func IncInt64(rm RetrieverMutator, k Key, step int64) (int64, error) {
val, err := rm.Get(k)
if IsErrNotFound(err) {
err = rm.Set(k, []byte(strconv.FormatInt(step, 10)))
if err != nil {
return 0, errors.Trace(err)
}
return step, nil
}
if err != nil {
return 0, errors.Trace(err)
}
intVal, err := strconv.ParseInt(string(val), 10, 0)
if err != nil {
return 0, errors.Trace(err)
}
intVal += step
err = rm.Set(k, []byte(strconv.FormatInt(intVal, 10)))
if err != nil {
return 0, errors.Trace(err)
}
return intVal, nil
}
// GetInt64 get int64 value which created by IncInt64 method.
func GetInt64(r Retriever, k Key) (int64, error) {
val, err := r.Get(k)
if IsErrNotFound(err) {
return 0, nil
}
if err != nil {
return 0, errors.Trace(err)
}
intVal, err := strconv.ParseInt(string(val), 10, 0)
return intVal, errors.Trace(err)
}

51
vendor/github.com/pingcap/tidb/kv/version.go generated vendored Normal file
View file

@ -0,0 +1,51 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import "math"
// VersionProvider provides increasing IDs.
type VersionProvider interface {
CurrentVersion() (Version, error)
}
// Version is the wrapper of KV's version.
type Version struct {
Ver uint64
}
var (
// MaxVersion is the maximum version, notice that it's not a valid version.
MaxVersion = Version{Ver: math.MaxUint64}
// MinVersion is the minimum version, it's not a valid version, too.
MinVersion = Version{Ver: 0}
)
// NewVersion creates a new Version struct.
func NewVersion(v uint64) Version {
return Version{
Ver: v,
}
}
// Cmp returns the comparison result of two versions.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
func (v Version) Cmp(another Version) int {
if v.Ver > another.Ver {
return 1
} else if v.Ver < another.Ver {
return -1
}
return 0
}

32
vendor/github.com/pingcap/tidb/make.cmd generated vendored Normal file
View file

@ -0,0 +1,32 @@
@echo off
::go build option
set TiDBBuildTS=%date:~0,10% %time:~1,7%
for /f "delims=" %%i in ('git rev-parse HEAD') do (set TiDBGitHash=%%i)
set LDFLAGS="-X github.com/pingcap/tidb/util/printer.TiDBBuildTS=%TiDBBuildTS% -X github.com/pingcap/tidb/util/printer.TiDBGitHash=%TiDBGitHash%"
:: godep
go get github.com/tools/godep
@echo [Parser]
go get github.com/qiuyesuifeng/goyacc
go get github.com/qiuyesuifeng/golex
type nul >>temp.XXXXXX
goyacc -o nul -xegen "temp.XXXXXX" parser/parser.y
goyacc -o parser/parser.go -xe "temp.XXXXXX" parser/parser.y
DEL /F /A /Q temp.XXXXXX
DEL /F /A /Q y.output
golex -o parser/scanner.go parser/scanner.l
@echo [Build]
godep go build -ldflags '%LDFLAGS%'
@echo [Install]
godep go install ./...
@echo [Test]
godep go test -cover ./...
::done
@echo [Done]

180
vendor/github.com/pingcap/tidb/meta/autoid/autoid.go generated vendored Normal file
View file

@ -0,0 +1,180 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package autoid
import (
"sync"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
)
const (
step = 1000
)
// Allocator is an auto increment id generator.
// Just keep id unique actually.
type Allocator interface {
// Alloc allocs the next autoID for table with tableID.
// It gets a batch of autoIDs at a time. So it does not need to access storage for each call.
Alloc(tableID int64) (int64, error)
// Rebase rebases the autoID base for table with tableID and the new base value.
// If allocIDs is true, it will allocate some IDs and save to the cache.
// If allocIDs is false, it will not allocate IDs.
Rebase(tableID, newBase int64, allocIDs bool) error
}
type allocator struct {
mu sync.Mutex
base int64
end int64
store kv.Storage
dbID int64
}
// Rebase implements autoid.Allocator Rebase interface.
func (alloc *allocator) Rebase(tableID, newBase int64, allocIDs bool) error {
if tableID == 0 {
return errors.New("Invalid tableID")
}
alloc.mu.Lock()
defer alloc.mu.Unlock()
if newBase <= alloc.base {
return nil
}
if newBase <= alloc.end {
alloc.base = newBase
return nil
}
return kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error {
m := meta.NewMeta(txn)
end, err := m.GetAutoTableID(alloc.dbID, tableID)
if err != nil {
return errors.Trace(err)
}
if newBase <= end {
return nil
}
newStep := newBase - end + step
if !allocIDs {
newStep = newBase - end
}
end, err = m.GenAutoTableID(alloc.dbID, tableID, newStep)
if err != nil {
return errors.Trace(err)
}
alloc.end = end
alloc.base = newBase
if !allocIDs {
alloc.base = alloc.end
}
return nil
})
}
// Alloc implements autoid.Allocator Alloc interface.
func (alloc *allocator) Alloc(tableID int64) (int64, error) {
if tableID == 0 {
return 0, errors.New("Invalid tableID")
}
alloc.mu.Lock()
defer alloc.mu.Unlock()
if alloc.base == alloc.end { // step
err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error {
m := meta.NewMeta(txn)
base, err1 := m.GetAutoTableID(alloc.dbID, tableID)
if err1 != nil {
return errors.Trace(err1)
}
end, err1 := m.GenAutoTableID(alloc.dbID, tableID, step)
if err1 != nil {
return errors.Trace(err1)
}
alloc.end = end
if end == step {
alloc.base = base
} else {
alloc.base = end - step
}
return nil
})
if err != nil {
return 0, errors.Trace(err)
}
}
alloc.base++
log.Debugf("[kv] Alloc id %d, table ID:%d, from %p, database ID:%d", alloc.base, tableID, alloc, alloc.dbID)
return alloc.base, nil
}
var (
memID int64
memIDLock sync.Mutex
)
type memoryAllocator struct {
mu sync.Mutex
base int64
end int64
dbID int64
}
// Rebase implements autoid.Allocator Rebase interface.
func (alloc *memoryAllocator) Rebase(tableID, newBase int64, allocIDs bool) error {
// TODO: implement it.
return nil
}
// Alloc implements autoid.Allocator Alloc interface.
func (alloc *memoryAllocator) Alloc(tableID int64) (int64, error) {
if tableID == 0 {
return 0, errors.New("Invalid tableID")
}
alloc.mu.Lock()
defer alloc.mu.Unlock()
if alloc.base == alloc.end { // step
memIDLock.Lock()
memID = memID + step
alloc.end = memID
alloc.base = alloc.end - step
memIDLock.Unlock()
}
alloc.base++
return alloc.base, nil
}
// NewAllocator returns a new auto increment id generator on the store.
func NewAllocator(store kv.Storage, dbID int64) Allocator {
return &allocator{
store: store,
dbID: dbID,
}
}
// NewMemoryAllocator returns a new auto increment id generator in memory.
func NewMemoryAllocator(dbID int64) Allocator {
return &memoryAllocator{
dbID: dbID,
}
}

650
vendor/github.com/pingcap/tidb/meta/meta.go generated vendored Normal file
View file

@ -0,0 +1,650 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package meta
import (
"encoding/binary"
"encoding/json"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/juju/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/structure"
)
var (
globalIDMutex sync.Mutex
)
// Meta structure:
// NextGlobalID -> int64
// SchemaVersion -> int64
// DBs -> {
// DB:1 -> db meta data []byte
// DB:2 -> db meta data []byte
// }
// DB:1 -> {
// Table:1 -> table meta data []byte
// Table:2 -> table meta data []byte
// TID:1 -> int64
// TID:2 -> int64
// }
//
var (
mNextGlobalIDKey = []byte("NextGlobalID")
mSchemaVersionKey = []byte("SchemaVersionKey")
mDBs = []byte("DBs")
mDBPrefix = "DB"
mTablePrefix = "Table"
mTableIDPrefix = "TID"
mBootstrapKey = []byte("BootstrapKey")
)
var (
// ErrDBExists is the error for db exists.
ErrDBExists = errors.New("database already exists")
// ErrDBNotExists is the error for db not exists.
ErrDBNotExists = errors.New("database doesn't exist")
// ErrTableExists is the error for table exists.
ErrTableExists = errors.New("table already exists")
// ErrTableNotExists is the error for table not exists.
ErrTableNotExists = errors.New("table doesn't exist")
)
// Meta is for handling meta information in a transaction.
type Meta struct {
txn *structure.TxStructure
}
// NewMeta creates a Meta in transaction txn.
func NewMeta(txn kv.Transaction) *Meta {
t := structure.NewStructure(txn, []byte{'m'})
return &Meta{txn: t}
}
// GenGlobalID generates next id globally.
func (m *Meta) GenGlobalID() (int64, error) {
globalIDMutex.Lock()
defer globalIDMutex.Unlock()
return m.txn.Inc(mNextGlobalIDKey, 1)
}
// GetGlobalID gets current global id.
func (m *Meta) GetGlobalID() (int64, error) {
return m.txn.GetInt64(mNextGlobalIDKey)
}
func (m *Meta) dbKey(dbID int64) []byte {
return []byte(fmt.Sprintf("%s:%d", mDBPrefix, dbID))
}
func (m *Meta) parseDatabaseID(key string) (int64, error) {
seps := strings.Split(key, ":")
if len(seps) != 2 {
return 0, errors.Errorf("invalid db key %s", key)
}
n, err := strconv.ParseInt(seps[1], 10, 64)
return n, errors.Trace(err)
}
func (m *Meta) autoTalbeIDKey(tableID int64) []byte {
return []byte(fmt.Sprintf("%s:%d", mTableIDPrefix, tableID))
}
func (m *Meta) tableKey(tableID int64) []byte {
return []byte(fmt.Sprintf("%s:%d", mTablePrefix, tableID))
}
func (m *Meta) parseTableID(key string) (int64, error) {
seps := strings.Split(key, ":")
if len(seps) != 2 {
return 0, errors.Errorf("invalid table meta key %s", key)
}
n, err := strconv.ParseInt(seps[1], 10, 64)
return n, errors.Trace(err)
}
// GenAutoTableID adds step to the auto id of the table and returns the sum.
func (m *Meta) GenAutoTableID(dbID int64, tableID int64, step int64) (int64, error) {
// Check if db exists.
dbKey := m.dbKey(dbID)
if err := m.checkDBExists(dbKey); err != nil {
return 0, errors.Trace(err)
}
// Check if table exists.
tableKey := m.tableKey(tableID)
if err := m.checkTableExists(dbKey, tableKey); err != nil {
return 0, errors.Trace(err)
}
return m.txn.HInc(dbKey, m.autoTalbeIDKey(tableID), step)
}
// GetAutoTableID gets current auto id with table id.
func (m *Meta) GetAutoTableID(dbID int64, tableID int64) (int64, error) {
return m.txn.HGetInt64(m.dbKey(dbID), m.autoTalbeIDKey(tableID))
}
// GetSchemaVersion gets current global schema version.
func (m *Meta) GetSchemaVersion() (int64, error) {
return m.txn.GetInt64(mSchemaVersionKey)
}
// GenSchemaVersion generates next schema version.
func (m *Meta) GenSchemaVersion() (int64, error) {
return m.txn.Inc(mSchemaVersionKey, 1)
}
func (m *Meta) checkDBExists(dbKey []byte) error {
v, err := m.txn.HGet(mDBs, dbKey)
if err != nil {
return errors.Trace(err)
} else if v == nil {
return ErrDBNotExists
}
return nil
}
func (m *Meta) checkDBNotExists(dbKey []byte) error {
v, err := m.txn.HGet(mDBs, dbKey)
if err != nil {
return errors.Trace(err)
}
if v != nil {
return ErrDBExists
}
return nil
}
func (m *Meta) checkTableExists(dbKey []byte, tableKey []byte) error {
v, err := m.txn.HGet(dbKey, tableKey)
if err != nil {
return errors.Trace(err)
}
if v == nil {
return ErrTableNotExists
}
return nil
}
func (m *Meta) checkTableNotExists(dbKey []byte, tableKey []byte) error {
v, err := m.txn.HGet(dbKey, tableKey)
if err != nil {
return errors.Trace(err)
}
if v != nil {
return ErrTableExists
}
return nil
}
// CreateDatabase creates a database with db info.
func (m *Meta) CreateDatabase(dbInfo *model.DBInfo) error {
dbKey := m.dbKey(dbInfo.ID)
if err := m.checkDBNotExists(dbKey); err != nil {
return errors.Trace(err)
}
data, err := json.Marshal(dbInfo)
if err != nil {
return errors.Trace(err)
}
return m.txn.HSet(mDBs, dbKey, data)
}
// UpdateDatabase updates a database with db info.
func (m *Meta) UpdateDatabase(dbInfo *model.DBInfo) error {
dbKey := m.dbKey(dbInfo.ID)
if err := m.checkDBExists(dbKey); err != nil {
return errors.Trace(err)
}
data, err := json.Marshal(dbInfo)
if err != nil {
return errors.Trace(err)
}
return m.txn.HSet(mDBs, dbKey, data)
}
// CreateTable creates a table with tableInfo in database.
func (m *Meta) CreateTable(dbID int64, tableInfo *model.TableInfo) error {
// Check if db exists.
dbKey := m.dbKey(dbID)
if err := m.checkDBExists(dbKey); err != nil {
return errors.Trace(err)
}
// Check if table exists.
tableKey := m.tableKey(tableInfo.ID)
if err := m.checkTableNotExists(dbKey, tableKey); err != nil {
return errors.Trace(err)
}
data, err := json.Marshal(tableInfo)
if err != nil {
return errors.Trace(err)
}
return m.txn.HSet(dbKey, tableKey, data)
}
// DropDatabase drops whole database.
func (m *Meta) DropDatabase(dbID int64) error {
// Check if db exists.
dbKey := m.dbKey(dbID)
if err := m.txn.HClear(dbKey); err != nil {
return errors.Trace(err)
}
if err := m.txn.HDel(mDBs, dbKey); err != nil {
return errors.Trace(err)
}
return nil
}
// DropTable drops table in database.
func (m *Meta) DropTable(dbID int64, tableID int64) error {
// Check if db exists.
dbKey := m.dbKey(dbID)
if err := m.checkDBExists(dbKey); err != nil {
return errors.Trace(err)
}
// Check if table exists.
tableKey := m.tableKey(tableID)
if err := m.checkTableExists(dbKey, tableKey); err != nil {
return errors.Trace(err)
}
if err := m.txn.HDel(dbKey, tableKey); err != nil {
return errors.Trace(err)
}
if err := m.txn.HDel(dbKey, m.autoTalbeIDKey(tableID)); err != nil {
return errors.Trace(err)
}
return nil
}
// UpdateTable updates the table with table info.
func (m *Meta) UpdateTable(dbID int64, tableInfo *model.TableInfo) error {
// Check if db exists.
dbKey := m.dbKey(dbID)
if err := m.checkDBExists(dbKey); err != nil {
return errors.Trace(err)
}
// Check if table exists.
tableKey := m.tableKey(tableInfo.ID)
if err := m.checkTableExists(dbKey, tableKey); err != nil {
return errors.Trace(err)
}
data, err := json.Marshal(tableInfo)
if err != nil {
return errors.Trace(err)
}
err = m.txn.HSet(dbKey, tableKey, data)
return errors.Trace(err)
}
// ListTables shows all tables in database.
func (m *Meta) ListTables(dbID int64) ([]*model.TableInfo, error) {
dbKey := m.dbKey(dbID)
if err := m.checkDBExists(dbKey); err != nil {
return nil, errors.Trace(err)
}
res, err := m.txn.HGetAll(dbKey)
if err != nil {
return nil, errors.Trace(err)
}
tables := make([]*model.TableInfo, 0, len(res)/2)
for _, r := range res {
// only handle table meta
tableKey := string(r.Field)
if !strings.HasPrefix(tableKey, mTablePrefix) {
continue
}
tbInfo := &model.TableInfo{}
err = json.Unmarshal(r.Value, tbInfo)
if err != nil {
return nil, errors.Trace(err)
}
tables = append(tables, tbInfo)
}
return tables, nil
}
// ListDatabases shows all databases.
func (m *Meta) ListDatabases() ([]*model.DBInfo, error) {
res, err := m.txn.HGetAll(mDBs)
if err != nil {
return nil, errors.Trace(err)
}
dbs := make([]*model.DBInfo, 0, len(res))
for _, r := range res {
dbInfo := &model.DBInfo{}
err = json.Unmarshal(r.Value, dbInfo)
if err != nil {
return nil, errors.Trace(err)
}
dbs = append(dbs, dbInfo)
}
return dbs, nil
}
// GetDatabase gets the database value with ID.
func (m *Meta) GetDatabase(dbID int64) (*model.DBInfo, error) {
dbKey := m.dbKey(dbID)
value, err := m.txn.HGet(mDBs, dbKey)
if err != nil || value == nil {
return nil, errors.Trace(err)
}
dbInfo := &model.DBInfo{}
err = json.Unmarshal(value, dbInfo)
return dbInfo, errors.Trace(err)
}
// GetTable gets the table value in database with tableID.
func (m *Meta) GetTable(dbID int64, tableID int64) (*model.TableInfo, error) {
// Check if db exists.
dbKey := m.dbKey(dbID)
if err := m.checkDBExists(dbKey); err != nil {
return nil, errors.Trace(err)
}
tableKey := m.tableKey(tableID)
value, err := m.txn.HGet(dbKey, tableKey)
if err != nil || value == nil {
return nil, errors.Trace(err)
}
tableInfo := &model.TableInfo{}
err = json.Unmarshal(value, tableInfo)
return tableInfo, errors.Trace(err)
}
// DDL job structure
// DDLOnwer: []byte
// DDLJobList: list jobs
// DDLJobHistory: hash
// DDLJobReorg: hash
//
// for multi DDL workers, only one can become the owner
// to operate DDL jobs, and dispatch them to MR Jobs.
var (
mDDLJobOwnerKey = []byte("DDLJobOwner")
mDDLJobListKey = []byte("DDLJobList")
mDDLJobHistoryKey = []byte("DDLJobHistory")
mDDLJobReorgKey = []byte("DDLJobReorg")
)
func (m *Meta) getJobOwner(key []byte) (*model.Owner, error) {
value, err := m.txn.Get(key)
if err != nil || value == nil {
return nil, errors.Trace(err)
}
owner := &model.Owner{}
err = json.Unmarshal(value, owner)
return owner, errors.Trace(err)
}
// GetDDLJobOwner gets the current owner for DDL.
func (m *Meta) GetDDLJobOwner() (*model.Owner, error) {
return m.getJobOwner(mDDLJobOwnerKey)
}
func (m *Meta) setJobOwner(key []byte, o *model.Owner) error {
b, err := json.Marshal(o)
if err != nil {
return errors.Trace(err)
}
return m.txn.Set(key, b)
}
// SetDDLJobOwner sets the current owner for DDL.
func (m *Meta) SetDDLJobOwner(o *model.Owner) error {
return m.setJobOwner(mDDLJobOwnerKey, o)
}
func (m *Meta) enQueueDDLJob(key []byte, job *model.Job) error {
b, err := job.Encode()
if err != nil {
return errors.Trace(err)
}
return m.txn.RPush(key, b)
}
// EnQueueDDLJob adds a DDL job to the list.
func (m *Meta) EnQueueDDLJob(job *model.Job) error {
return m.enQueueDDLJob(mDDLJobListKey, job)
}
func (m *Meta) deQueueDDLJob(key []byte) (*model.Job, error) {
value, err := m.txn.LPop(key)
if err != nil || value == nil {
return nil, errors.Trace(err)
}
job := &model.Job{}
err = job.Decode(value)
return job, errors.Trace(err)
}
// DeQueueDDLJob pops a DDL job from the list.
func (m *Meta) DeQueueDDLJob() (*model.Job, error) {
return m.deQueueDDLJob(mDDLJobListKey)
}
func (m *Meta) getDDLJob(key []byte, index int64) (*model.Job, error) {
value, err := m.txn.LIndex(key, index)
if err != nil || value == nil {
return nil, errors.Trace(err)
}
job := &model.Job{}
err = job.Decode(value)
return job, errors.Trace(err)
}
// GetDDLJob returns the DDL job with index.
func (m *Meta) GetDDLJob(index int64) (*model.Job, error) {
job, err := m.getDDLJob(mDDLJobListKey, index)
return job, errors.Trace(err)
}
func (m *Meta) updateDDLJob(index int64, job *model.Job, key []byte) error {
// TODO: use timestamp allocated by TSO
job.LastUpdateTS = time.Now().UnixNano()
b, err := job.Encode()
if err != nil {
return errors.Trace(err)
}
return m.txn.LSet(key, index, b)
}
// UpdateDDLJob updates the DDL job with index.
func (m *Meta) UpdateDDLJob(index int64, job *model.Job) error {
return m.updateDDLJob(index, job, mDDLJobListKey)
}
// DDLJobQueueLen returns the DDL job queue length.
func (m *Meta) DDLJobQueueLen() (int64, error) {
return m.txn.LLen(mDDLJobListKey)
}
func (m *Meta) jobIDKey(id int64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(id))
return b
}
func (m *Meta) addHistoryDDLJob(key []byte, job *model.Job) error {
b, err := job.Encode()
if err != nil {
return errors.Trace(err)
}
return m.txn.HSet(key, m.jobIDKey(job.ID), b)
}
// AddHistoryDDLJob adds DDL job to history.
func (m *Meta) AddHistoryDDLJob(job *model.Job) error {
return m.addHistoryDDLJob(mDDLJobHistoryKey, job)
}
func (m *Meta) getHistoryDDLJob(key []byte, id int64) (*model.Job, error) {
value, err := m.txn.HGet(key, m.jobIDKey(id))
if err != nil || value == nil {
return nil, errors.Trace(err)
}
job := &model.Job{}
err = job.Decode(value)
return job, errors.Trace(err)
}
// GetHistoryDDLJob gets a history DDL job.
func (m *Meta) GetHistoryDDLJob(id int64) (*model.Job, error) {
return m.getHistoryDDLJob(mDDLJobHistoryKey, id)
}
// IsBootstrapped returns whether we have already run bootstrap or not.
// return true means we don't need doing any other bootstrap.
func (m *Meta) IsBootstrapped() (bool, error) {
value, err := m.txn.GetInt64(mBootstrapKey)
if err != nil {
return false, errors.Trace(err)
}
return value == 1, nil
}
// FinishBootstrap finishes bootstrap.
func (m *Meta) FinishBootstrap() error {
err := m.txn.Set(mBootstrapKey, []byte("1"))
return errors.Trace(err)
}
// UpdateDDLReorgHandle saves the job reorganization latest processed handle for later resuming.
func (m *Meta) UpdateDDLReorgHandle(job *model.Job, handle int64) error {
err := m.txn.HSet(mDDLJobReorgKey, m.jobIDKey(job.ID), []byte(strconv.FormatInt(handle, 10)))
return errors.Trace(err)
}
// RemoveDDLReorgHandle removes the job reorganization handle.
func (m *Meta) RemoveDDLReorgHandle(job *model.Job) error {
err := m.txn.HDel(mDDLJobReorgKey, m.jobIDKey(job.ID))
return errors.Trace(err)
}
// GetDDLReorgHandle gets the latest processed handle.
func (m *Meta) GetDDLReorgHandle(job *model.Job) (int64, error) {
value, err := m.txn.HGetInt64(mDDLJobReorgKey, m.jobIDKey(job.ID))
return value, errors.Trace(err)
}
// DDL background job structure
// BgJobOnwer: []byte
// BgJobList: list jobs
// BgJobHistory: hash
// BgJobReorg: hash
//
// for multi background worker, only one can become the owner
// to operate background job, and dispatch them to MR background job.
var (
mBgJobOwnerKey = []byte("BgJobOwner")
mBgJobListKey = []byte("BgJobList")
mBgJobHistoryKey = []byte("BgJobHistory")
)
// UpdateBgJob updates the background job with index.
func (m *Meta) UpdateBgJob(index int64, job *model.Job) error {
return m.updateDDLJob(index, job, mBgJobListKey)
}
// GetBgJob returns the background job with index.
func (m *Meta) GetBgJob(index int64) (*model.Job, error) {
job, err := m.getDDLJob(mBgJobListKey, index)
return job, errors.Trace(err)
}
// EnQueueBgJob adds a background job to the list.
func (m *Meta) EnQueueBgJob(job *model.Job) error {
return m.enQueueDDLJob(mBgJobListKey, job)
}
// BgJobQueueLen returns the background job queue length.
func (m *Meta) BgJobQueueLen() (int64, error) {
return m.txn.LLen(mBgJobListKey)
}
// AddHistoryBgJob adds background job to history.
func (m *Meta) AddHistoryBgJob(job *model.Job) error {
return m.addHistoryDDLJob(mBgJobHistoryKey, job)
}
// GetHistoryBgJob gets a history background job.
func (m *Meta) GetHistoryBgJob(id int64) (*model.Job, error) {
return m.getHistoryDDLJob(mBgJobHistoryKey, id)
}
// DeQueueBgJob pops a background job from the list.
func (m *Meta) DeQueueBgJob() (*model.Job, error) {
return m.deQueueDDLJob(mBgJobListKey)
}
// GetBgJobOwner gets the current background job owner.
func (m *Meta) GetBgJobOwner() (*model.Owner, error) {
return m.getJobOwner(mBgJobOwnerKey)
}
// SetBgJobOwner sets the current background job owner.
func (m *Meta) SetBgJobOwner(o *model.Owner) error {
return m.setJobOwner(mBgJobOwnerKey, o)
}

163
vendor/github.com/pingcap/tidb/model/ddl.go generated vendored Normal file
View file

@ -0,0 +1,163 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"fmt"
"github.com/juju/errors"
)
// ActionType is the type for DDL action.
type ActionType byte
// List DDL actions.
const (
ActionNone ActionType = iota
ActionCreateSchema
ActionDropSchema
ActionCreateTable
ActionDropTable
ActionAddColumn
ActionDropColumn
ActionAddIndex
ActionDropIndex
)
func (action ActionType) String() string {
switch action {
case ActionCreateSchema:
return "create schema"
case ActionDropSchema:
return "drop schema"
case ActionCreateTable:
return "create table"
case ActionDropTable:
return "drop table"
case ActionAddColumn:
return "add column"
case ActionDropColumn:
return "drop column"
case ActionAddIndex:
return "add index"
case ActionDropIndex:
return "drop index"
default:
return "none"
}
}
// Job is for a DDL operation.
type Job struct {
ID int64 `json:"id"`
Type ActionType `json:"type"`
SchemaID int64 `json:"schema_id"`
TableID int64 `json:"table_id"`
State JobState `json:"state"`
Error string `json:"err"`
// every time we meet an error when running job, we will increase it
ErrorCount int64 `json:"err_count"`
Args []interface{} `json:"-"`
// we must use json raw message for delay parsing special args.
RawArgs json.RawMessage `json:"raw_args"`
SchemaState SchemaState `json:"schema_state"`
// snapshot version for this job.
SnapshotVer uint64 `json:"snapshot_ver"`
// unix nano seconds
// TODO: use timestamp allocated by TSO
LastUpdateTS int64 `json:"last_update_ts"`
}
// Encode encodes job with json format.
func (job *Job) Encode() ([]byte, error) {
var err error
job.RawArgs, err = json.Marshal(job.Args)
if err != nil {
return nil, errors.Trace(err)
}
var b []byte
b, err = json.Marshal(job)
return b, errors.Trace(err)
}
// Decode decodes job from the json buffer, we must use DecodeArgs later to
// decode special args for this job.
func (job *Job) Decode(b []byte) error {
err := json.Unmarshal(b, job)
return errors.Trace(err)
}
// DecodeArgs decodes job args.
func (job *Job) DecodeArgs(args ...interface{}) error {
job.Args = args
err := json.Unmarshal(job.RawArgs, &job.Args)
return errors.Trace(err)
}
// String implements fmt.Stringer interface.
func (job *Job) String() string {
return fmt.Sprintf("ID:%d, Type:%s, State:%s, SchemaState:%s, SchemaID:%d, TableID:%d, Args:%s",
job.ID, job.Type, job.State, job.SchemaState, job.SchemaID, job.TableID, job.RawArgs)
}
// IsFinished returns whether job is finished or not.
// If the job state is Done or Cancelled, it is finished.
func (job *Job) IsFinished() bool {
return job.State == JobDone || job.State == JobCancelled
}
// IsRunning returns whether job is still running or not.
func (job *Job) IsRunning() bool {
return job.State == JobRunning
}
// JobState is for job state.
type JobState byte
// List job states.
const (
JobNone JobState = iota
JobRunning
JobDone
JobCancelled
)
// String implements fmt.Stringer interface.
func (s JobState) String() string {
switch s {
case JobRunning:
return "running"
case JobDone:
return "done"
case JobCancelled:
return "cancelled"
default:
return "none"
}
}
// Owner is for DDL Owner.
type Owner struct {
OwnerID string `json:"owner_id"`
// unix nano seconds
// TODO: use timestamp allocated by TSO
LastUpdateTS int64 `json:"last_update_ts"`
}
// String implements fmt.Stringer interface.
func (o *Owner) String() string {
return fmt.Sprintf("ID:%s, LastUpdateTS:%d", o.OwnerID, o.LastUpdateTS)
}

199
vendor/github.com/pingcap/tidb/model/model.go generated vendored Normal file
View file

@ -0,0 +1,199 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"strings"
"github.com/pingcap/tidb/util/types"
)
// SchemaState is the state for schema elements.
type SchemaState byte
const (
// StateNone means this schema element is absent and can't be used.
StateNone SchemaState = iota
// StateDeleteOnly means we can only delete items for this schema element.
StateDeleteOnly
// StateWriteOnly means we can use any write operation on this schema element,
// but outer can't read the changed data.
StateWriteOnly
// StateWriteReorganization means we are re-organizating whole data after write only state.
StateWriteReorganization
// StateDeleteReorganization means we are re-organizating whole data after delete only state.
StateDeleteReorganization
// StatePublic means this schema element is ok for all write and read operations.
StatePublic
)
// String implements fmt.Stringer interface.
func (s SchemaState) String() string {
switch s {
case StateDeleteOnly:
return "delete only"
case StateWriteOnly:
return "write only"
case StateWriteReorganization:
return "write reorganization"
case StateDeleteReorganization:
return "delete reorganization"
case StatePublic:
return "public"
default:
return "none"
}
}
// ColumnInfo provides meta data describing of a table column.
type ColumnInfo struct {
ID int64 `json:"id"`
Name CIStr `json:"name"`
Offset int `json:"offset"`
DefaultValue interface{} `json:"default"`
types.FieldType `json:"type"`
State SchemaState `json:"state"`
}
// Clone clones ColumnInfo.
func (c *ColumnInfo) Clone() *ColumnInfo {
nc := *c
return &nc
}
// TableInfo provides meta data describing a DB table.
type TableInfo struct {
ID int64 `json:"id"`
Name CIStr `json:"name"`
Charset string `json:"charset"`
Collate string `json:"collate"`
// Columns are listed in the order in which they appear in the schema.
Columns []*ColumnInfo `json:"cols"`
Indices []*IndexInfo `json:"index_info"`
State SchemaState `json:"state"`
PKIsHandle bool `json:"pk_is_handle"`
Comment string `json:"comment"`
}
// Clone clones TableInfo.
func (t *TableInfo) Clone() *TableInfo {
nt := *t
nt.Columns = make([]*ColumnInfo, len(t.Columns))
nt.Indices = make([]*IndexInfo, len(t.Indices))
for i := range t.Columns {
nt.Columns[i] = t.Columns[i].Clone()
}
for i := range t.Indices {
nt.Indices[i] = t.Indices[i].Clone()
}
return &nt
}
// IndexColumn provides index column info.
type IndexColumn struct {
Name CIStr `json:"name"` // Index name
Offset int `json:"offset"` // Index offset
Length int `json:"length"` // Index length
}
// Clone clones IndexColumn.
func (i *IndexColumn) Clone() *IndexColumn {
ni := *i
return &ni
}
// IndexType is the type of index
type IndexType int
// String implements Stringer interface.
func (t IndexType) String() string {
switch t {
case IndexTypeBtree:
return "BTREE"
case IndexTypeHash:
return "HASH"
}
return ""
}
// IndexTypes
const (
IndexTypeBtree IndexType = iota + 1
IndexTypeHash
)
// IndexInfo provides meta data describing a DB index.
// It corresponds to the statement `CREATE INDEX Name ON Table (Column);`
// See: https://dev.mysql.com/doc/refman/5.7/en/create-index.html
type IndexInfo struct {
ID int64 `json:"id"`
Name CIStr `json:"idx_name"` // Index name.
Table CIStr `json:"tbl_name"` // Table name.
Columns []*IndexColumn `json:"idx_cols"` // Index columns.
Unique bool `json:"is_unique"` // Whether the index is unique.
Primary bool `json:"is_primary"` // Whether the index is primary key.
State SchemaState `json:"state"`
Comment string `json:"comment"` // Comment
Tp IndexType `json:"index_type"` // Index type: Btree or Hash
}
// Clone clones IndexInfo.
func (index *IndexInfo) Clone() *IndexInfo {
ni := *index
ni.Columns = make([]*IndexColumn, len(index.Columns))
for i := range index.Columns {
ni.Columns[i] = index.Columns[i].Clone()
}
return &ni
}
// DBInfo provides meta data describing a DB.
type DBInfo struct {
ID int64 `json:"id"` // Database ID
Name CIStr `json:"db_name"` // DB name.
Charset string `json:"charset"`
Collate string `json:"collate"`
Tables []*TableInfo `json:"-"` // Tables in the DB.
State SchemaState `json:"state"`
}
// Clone clones DBInfo.
func (db *DBInfo) Clone() *DBInfo {
newInfo := *db
newInfo.Tables = make([]*TableInfo, len(db.Tables))
for i := range db.Tables {
newInfo.Tables[i] = db.Tables[i].Clone()
}
return &newInfo
}
// CIStr is case insensitve string.
type CIStr struct {
O string `json:"O"` // Original string.
L string `json:"L"` // Lower case string.
}
// String implements fmt.Stringer interface.
func (cis CIStr) String() string {
return cis.O
}
// NewCIStr creates a new CIStr.
func NewCIStr(s string) (cs CIStr) {
cs.O = s
cs.L = strings.ToLower(s)
return
}

106
vendor/github.com/pingcap/tidb/mysql/bit.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
"fmt"
"strconv"
"strings"
"github.com/juju/errors"
)
// Bit is for mysql bit type.
type Bit struct {
// Value holds the value for bit type.
Value uint64
// Width is the display with for bit value.
// e.g, with is 8, 0 is for 0b00000000.
Width int
}
// String implements fmt.Stringer interface.
func (b Bit) String() string {
format := fmt.Sprintf("0b%%0%db", b.Width)
return fmt.Sprintf(format, b.Value)
}
// ToNumber changes bit type to float64 for numeric operation.
// MySQL treats bit as double type.
func (b Bit) ToNumber() float64 {
return float64(b.Value)
}
// ToString returns the binary string for bit type.
func (b Bit) ToString() string {
byteSize := (b.Width + 7) / 8
buf := make([]byte, byteSize)
for i := byteSize - 1; i >= 0; i-- {
buf[byteSize-i-1] = byte(b.Value >> uint(i*8))
}
return string(buf)
}
// Min and Max bit width.
const (
MinBitWidth = 1
MaxBitWidth = 64
// UnspecifiedBitWidth is the unspecified with if you want to calculate bit width dynamically.
UnspecifiedBitWidth = -1
)
// ParseBit parses bit string.
// The string format can be b'val', B'val' or 0bval, val must be 0 or 1.
// Width is the display width for bit representation. -1 means calculating
// width dynamically, using following algorithm: (len("011101") + 7) & ^7,
// e.g, if bit string is 0b01, the above will return 8 for its bit width.
func ParseBit(s string, width int) (Bit, error) {
if len(s) == 0 {
return Bit{}, errors.Errorf("invalid empty string for parsing bit type")
}
if s[0] == 'b' || s[0] == 'B' {
// format is b'val' or B'val'
s = strings.Trim(s[1:], "'")
} else if strings.HasPrefix(s, "0b") {
s = s[2:]
} else {
// here means format is not b'val', B'val' or 0bval.
return Bit{}, errors.Errorf("invalid bit type format %s", s)
}
if width == UnspecifiedBitWidth {
width = (len(s) + 7) & ^7
}
if width == 0 {
width = MinBitWidth
}
if width < MinBitWidth || width > MaxBitWidth {
return Bit{}, errors.Errorf("invalid display width for bit type, must in [1, 64], but %d", width)
}
n, err := strconv.ParseUint(s, 2, 64)
if err != nil {
return Bit{}, errors.Trace(err)
}
if n > (uint64(1)<<uint64(width))-1 {
return Bit{}, errors.Errorf("bit %s is too long for width %d", s, width)
}
return Bit{Value: n, Width: width}, nil
}

556
vendor/github.com/pingcap/tidb/mysql/charset.go generated vendored Normal file
View file

@ -0,0 +1,556 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
// CharsetIDs maps charset name to its default collation ID.
var CharsetIDs = map[string]uint8{
"big5": 1,
"dec8": 3,
"cp850": 4,
"hp8": 6,
"koi8r": 7,
"latin1": 8,
"latin2": 9,
"swe7": 10,
"ascii": 11,
"ujis": 12,
"sjis": 13,
"hebrew": 16,
"tis620": 18,
"euckr": 19,
"koi8u": 22,
"gb2312": 24,
"greek": 25,
"cp1250": 26,
"gbk": 28,
"latin5": 30,
"armscii8": 32,
"utf8": 33,
"ucs2": 35,
"cp866": 36,
"keybcs2": 37,
"macce": 38,
"macroman": 39,
"cp852": 40,
"latin7": 41,
"utf8mb4": 45,
"cp1251": 51,
"utf16": 54,
"utf16le": 56,
"cp1256": 57,
"cp1257": 59,
"utf32": 60,
"binary": 63,
"geostd8": 92,
"cp932": 95,
"eucjpms": 97,
}
// Charsets maps charset name to its default collation name.
var Charsets = map[string]string{
"big5": "big5_chinese_ci",
"dec8": "dec8_swedish_ci",
"cp850": "cp850_general_ci",
"hp8": "hp8_english_ci",
"koi8r": "koi8r_general_ci",
"latin1": "latin1_swedish_ci",
"latin2": "latin2_general_ci",
"swe7": "swe7_swedish_ci",
"ascii": "ascii_general_ci",
"ujis": "ujis_japanese_ci",
"sjis": "sjis_japanese_ci",
"hebrew": "hebrew_general_ci",
"tis620": "tis620_thai_ci",
"euckr": "euckr_korean_ci",
"koi8u": "koi8u_general_ci",
"gb2312": "gb2312_chinese_ci",
"greek": "greek_general_ci",
"cp1250": "cp1250_general_ci",
"gbk": "gbk_chinese_ci",
"latin5": "latin5_turkish_ci",
"armscii8": "armscii8_general_ci",
"utf8": "utf8_general_ci",
"ucs2": "ucs2_general_ci",
"cp866": "cp866_general_ci",
"keybcs2": "keybcs2_general_ci",
"macce": "macce_general_ci",
"macroman": "macroman_general_ci",
"cp852": "cp852_general_ci",
"latin7": "latin7_general_ci",
"utf8mb4": "utf8mb4_general_ci",
"cp1251": "cp1251_general_ci",
"utf16": "utf16_general_ci",
"utf16le": "utf16le_general_ci",
"cp1256": "cp1256_general_ci",
"cp1257": "cp1257_general_ci",
"utf32": "utf32_general_ci",
"binary": "binary",
"geostd8": "geostd8_general_ci",
"cp932": "cp932_japanese_ci",
"eucjpms": "eucjpms_japanese_ci",
}
// Collations maps MySQL default collation ID to its name.
var Collations = map[uint8]string{
1: "big5_chinese_ci",
2: "latin2_czech_cs",
3: "dec8_swedish_ci",
4: "cp850_general_ci",
5: "latin1_german1_ci",
6: "hp8_english_ci",
7: "koi8r_general_ci",
8: "latin1_swedish_ci",
9: "latin2_general_ci",
10: "swe7_swedish_ci",
11: "ascii_general_ci",
12: "ujis_japanese_ci",
13: "sjis_japanese_ci",
14: "cp1251_bulgarian_ci",
15: "latin1_danish_ci",
16: "hebrew_general_ci",
18: "tis620_thai_ci",
19: "euckr_korean_ci",
20: "latin7_estonian_cs",
21: "latin2_hungarian_ci",
22: "koi8u_general_ci",
23: "cp1251_ukrainian_ci",
24: "gb2312_chinese_ci",
25: "greek_general_ci",
26: "cp1250_general_ci",
27: "latin2_croatian_ci",
28: "gbk_chinese_ci",
29: "cp1257_lithuanian_ci",
30: "latin5_turkish_ci",
31: "latin1_german2_ci",
32: "armscii8_general_ci",
33: "utf8_general_ci",
34: "cp1250_czech_cs",
35: "ucs2_general_ci",
36: "cp866_general_ci",
37: "keybcs2_general_ci",
38: "macce_general_ci",
39: "macroman_general_ci",
40: "cp852_general_ci",
41: "latin7_general_ci",
42: "latin7_general_cs",
43: "macce_bin",
44: "cp1250_croatian_ci",
45: "utf8mb4_general_ci",
46: "utf8mb4_bin",
47: "latin1_bin",
48: "latin1_general_ci",
49: "latin1_general_cs",
50: "cp1251_bin",
51: "cp1251_general_ci",
52: "cp1251_general_cs",
53: "macroman_bin",
54: "utf16_general_ci",
55: "utf16_bin",
56: "utf16le_general_ci",
57: "cp1256_general_ci",
58: "cp1257_bin",
59: "cp1257_general_ci",
60: "utf32_general_ci",
61: "utf32_bin",
62: "utf16le_bin",
63: "binary",
64: "armscii8_bin",
65: "ascii_bin",
66: "cp1250_bin",
67: "cp1256_bin",
68: "cp866_bin",
69: "dec8_bin",
70: "greek_bin",
71: "hebrew_bin",
72: "hp8_bin",
73: "keybcs2_bin",
74: "koi8r_bin",
75: "koi8u_bin",
77: "latin2_bin",
78: "latin5_bin",
79: "latin7_bin",
80: "cp850_bin",
81: "cp852_bin",
82: "swe7_bin",
83: "utf8_bin",
84: "big5_bin",
85: "euckr_bin",
86: "gb2312_bin",
87: "gbk_bin",
88: "sjis_bin",
89: "tis620_bin",
90: "ucs2_bin",
91: "ujis_bin",
92: "geostd8_general_ci",
93: "geostd8_bin",
94: "latin1_spanish_ci",
95: "cp932_japanese_ci",
96: "cp932_bin",
97: "eucjpms_japanese_ci",
98: "eucjpms_bin",
99: "cp1250_polish_ci",
101: "utf16_unicode_ci",
102: "utf16_icelandic_ci",
103: "utf16_latvian_ci",
104: "utf16_romanian_ci",
105: "utf16_slovenian_ci",
106: "utf16_polish_ci",
107: "utf16_estonian_ci",
108: "utf16_spanish_ci",
109: "utf16_swedish_ci",
110: "utf16_turkish_ci",
111: "utf16_czech_ci",
112: "utf16_danish_ci",
113: "utf16_lithuanian_ci",
114: "utf16_slovak_ci",
115: "utf16_spanish2_ci",
116: "utf16_roman_ci",
117: "utf16_persian_ci",
118: "utf16_esperanto_ci",
119: "utf16_hungarian_ci",
120: "utf16_sinhala_ci",
121: "utf16_german2_ci",
122: "utf16_croatian_ci",
123: "utf16_unicode_520_ci",
124: "utf16_vietnamese_ci",
128: "ucs2_unicode_ci",
129: "ucs2_icelandic_ci",
130: "ucs2_latvian_ci",
131: "ucs2_romanian_ci",
132: "ucs2_slovenian_ci",
133: "ucs2_polish_ci",
134: "ucs2_estonian_ci",
135: "ucs2_spanish_ci",
136: "ucs2_swedish_ci",
137: "ucs2_turkish_ci",
138: "ucs2_czech_ci",
139: "ucs2_danish_ci",
140: "ucs2_lithuanian_ci",
141: "ucs2_slovak_ci",
142: "ucs2_spanish2_ci",
143: "ucs2_roman_ci",
144: "ucs2_persian_ci",
145: "ucs2_esperanto_ci",
146: "ucs2_hungarian_ci",
147: "ucs2_sinhala_ci",
148: "ucs2_german2_ci",
149: "ucs2_croatian_ci",
150: "ucs2_unicode_520_ci",
151: "ucs2_vietnamese_ci",
159: "ucs2_general_mysql500_ci",
160: "utf32_unicode_ci",
161: "utf32_icelandic_ci",
162: "utf32_latvian_ci",
163: "utf32_romanian_ci",
164: "utf32_slovenian_ci",
165: "utf32_polish_ci",
166: "utf32_estonian_ci",
167: "utf32_spanish_ci",
168: "utf32_swedish_ci",
169: "utf32_turkish_ci",
170: "utf32_czech_ci",
171: "utf32_danish_ci",
172: "utf32_lithuanian_ci",
173: "utf32_slovak_ci",
174: "utf32_spanish2_ci",
175: "utf32_roman_ci",
176: "utf32_persian_ci",
177: "utf32_esperanto_ci",
178: "utf32_hungarian_ci",
179: "utf32_sinhala_ci",
180: "utf32_german2_ci",
181: "utf32_croatian_ci",
182: "utf32_unicode_520_ci",
183: "utf32_vietnamese_ci",
192: "utf8_unicode_ci",
193: "utf8_icelandic_ci",
194: "utf8_latvian_ci",
195: "utf8_romanian_ci",
196: "utf8_slovenian_ci",
197: "utf8_polish_ci",
198: "utf8_estonian_ci",
199: "utf8_spanish_ci",
200: "utf8_swedish_ci",
201: "utf8_turkish_ci",
202: "utf8_czech_ci",
203: "utf8_danish_ci",
204: "utf8_lithuanian_ci",
205: "utf8_slovak_ci",
206: "utf8_spanish2_ci",
207: "utf8_roman_ci",
208: "utf8_persian_ci",
209: "utf8_esperanto_ci",
210: "utf8_hungarian_ci",
211: "utf8_sinhala_ci",
212: "utf8_german2_ci",
213: "utf8_croatian_ci",
214: "utf8_unicode_520_ci",
215: "utf8_vietnamese_ci",
223: "utf8_general_mysql500_ci",
224: "utf8mb4_unicode_ci",
225: "utf8mb4_icelandic_ci",
226: "utf8mb4_latvian_ci",
227: "utf8mb4_romanian_ci",
228: "utf8mb4_slovenian_ci",
229: "utf8mb4_polish_ci",
230: "utf8mb4_estonian_ci",
231: "utf8mb4_spanish_ci",
232: "utf8mb4_swedish_ci",
233: "utf8mb4_turkish_ci",
234: "utf8mb4_czech_ci",
235: "utf8mb4_danish_ci",
236: "utf8mb4_lithuanian_ci",
237: "utf8mb4_slovak_ci",
238: "utf8mb4_spanish2_ci",
239: "utf8mb4_roman_ci",
240: "utf8mb4_persian_ci",
241: "utf8mb4_esperanto_ci",
242: "utf8mb4_hungarian_ci",
243: "utf8mb4_sinhala_ci",
244: "utf8mb4_german2_ci",
245: "utf8mb4_croatian_ci",
246: "utf8mb4_unicode_520_ci",
247: "utf8mb4_vietnamese_ci",
}
// CollationNames maps MySQL default collation name to its ID
var CollationNames = map[string]uint8{
"big5_chinese_ci": 1,
"latin2_czech_cs": 2,
"dec8_swedish_ci": 3,
"cp850_general_ci": 4,
"latin1_german1_ci": 5,
"hp8_english_ci": 6,
"koi8r_general_ci": 7,
"latin1_swedish_ci": 8,
"latin2_general_ci": 9,
"swe7_swedish_ci": 10,
"ascii_general_ci": 11,
"ujis_japanese_ci": 12,
"sjis_japanese_ci": 13,
"cp1251_bulgarian_ci": 14,
"latin1_danish_ci": 15,
"hebrew_general_ci": 16,
"tis620_thai_ci": 18,
"euckr_korean_ci": 19,
"latin7_estonian_cs": 20,
"latin2_hungarian_ci": 21,
"koi8u_general_ci": 22,
"cp1251_ukrainian_ci": 23,
"gb2312_chinese_ci": 24,
"greek_general_ci": 25,
"cp1250_general_ci": 26,
"latin2_croatian_ci": 27,
"gbk_chinese_ci": 28,
"cp1257_lithuanian_ci": 29,
"latin5_turkish_ci": 30,
"latin1_german2_ci": 31,
"armscii8_general_ci": 32,
"utf8_general_ci": 33,
"cp1250_czech_cs": 34,
"ucs2_general_ci": 35,
"cp866_general_ci": 36,
"keybcs2_general_ci": 37,
"macce_general_ci": 38,
"macroman_general_ci": 39,
"cp852_general_ci": 40,
"latin7_general_ci": 41,
"latin7_general_cs": 42,
"macce_bin": 43,
"cp1250_croatian_ci": 44,
"utf8mb4_general_ci": 45,
"utf8mb4_bin": 46,
"latin1_bin": 47,
"latin1_general_ci": 48,
"latin1_general_cs": 49,
"cp1251_bin": 50,
"cp1251_general_ci": 51,
"cp1251_general_cs": 52,
"macroman_bin": 53,
"utf16_general_ci": 54,
"utf16_bin": 55,
"utf16le_general_ci": 56,
"cp1256_general_ci": 57,
"cp1257_bin": 58,
"cp1257_general_ci": 59,
"utf32_general_ci": 60,
"utf32_bin": 61,
"utf16le_bin": 62,
"binary": 63,
"armscii8_bin": 64,
"ascii_bin": 65,
"cp1250_bin": 66,
"cp1256_bin": 67,
"cp866_bin": 68,
"dec8_bin": 69,
"greek_bin": 70,
"hebrew_bin": 71,
"hp8_bin": 72,
"keybcs2_bin": 73,
"koi8r_bin": 74,
"koi8u_bin": 75,
"latin2_bin": 77,
"latin5_bin": 78,
"latin7_bin": 79,
"cp850_bin": 80,
"cp852_bin": 81,
"swe7_bin": 82,
"utf8_bin": 83,
"big5_bin": 84,
"euckr_bin": 85,
"gb2312_bin": 86,
"gbk_bin": 87,
"sjis_bin": 88,
"tis620_bin": 89,
"ucs2_bin": 90,
"ujis_bin": 91,
"geostd8_general_ci": 92,
"geostd8_bin": 93,
"latin1_spanish_ci": 94,
"cp932_japanese_ci": 95,
"cp932_bin": 96,
"eucjpms_japanese_ci": 97,
"eucjpms_bin": 98,
"cp1250_polish_ci": 99,
"utf16_unicode_ci": 101,
"utf16_icelandic_ci": 102,
"utf16_latvian_ci": 103,
"utf16_romanian_ci": 104,
"utf16_slovenian_ci": 105,
"utf16_polish_ci": 106,
"utf16_estonian_ci": 107,
"utf16_spanish_ci": 108,
"utf16_swedish_ci": 109,
"utf16_turkish_ci": 110,
"utf16_czech_ci": 111,
"utf16_danish_ci": 112,
"utf16_lithuanian_ci": 113,
"utf16_slovak_ci": 114,
"utf16_spanish2_ci": 115,
"utf16_roman_ci": 116,
"utf16_persian_ci": 117,
"utf16_esperanto_ci": 118,
"utf16_hungarian_ci": 119,
"utf16_sinhala_ci": 120,
"utf16_german2_ci": 121,
"utf16_croatian_ci": 122,
"utf16_unicode_520_ci": 123,
"utf16_vietnamese_ci": 124,
"ucs2_unicode_ci": 128,
"ucs2_icelandic_ci": 129,
"ucs2_latvian_ci": 130,
"ucs2_romanian_ci": 131,
"ucs2_slovenian_ci": 132,
"ucs2_polish_ci": 133,
"ucs2_estonian_ci": 134,
"ucs2_spanish_ci": 135,
"ucs2_swedish_ci": 136,
"ucs2_turkish_ci": 137,
"ucs2_czech_ci": 138,
"ucs2_danish_ci": 139,
"ucs2_lithuanian_ci": 140,
"ucs2_slovak_ci": 141,
"ucs2_spanish2_ci": 142,
"ucs2_roman_ci": 143,
"ucs2_persian_ci": 144,
"ucs2_esperanto_ci": 145,
"ucs2_hungarian_ci": 146,
"ucs2_sinhala_ci": 147,
"ucs2_german2_ci": 148,
"ucs2_croatian_ci": 149,
"ucs2_unicode_520_ci": 150,
"ucs2_vietnamese_ci": 151,
"ucs2_general_mysql500_ci": 159,
"utf32_unicode_ci": 160,
"utf32_icelandic_ci": 161,
"utf32_latvian_ci": 162,
"utf32_romanian_ci": 163,
"utf32_slovenian_ci": 164,
"utf32_polish_ci": 165,
"utf32_estonian_ci": 166,
"utf32_spanish_ci": 167,
"utf32_swedish_ci": 168,
"utf32_turkish_ci": 169,
"utf32_czech_ci": 170,
"utf32_danish_ci": 171,
"utf32_lithuanian_ci": 172,
"utf32_slovak_ci": 173,
"utf32_spanish2_ci": 174,
"utf32_roman_ci": 175,
"utf32_persian_ci": 176,
"utf32_esperanto_ci": 177,
"utf32_hungarian_ci": 178,
"utf32_sinhala_ci": 179,
"utf32_german2_ci": 180,
"utf32_croatian_ci": 181,
"utf32_unicode_520_ci": 182,
"utf32_vietnamese_ci": 183,
"utf8_unicode_ci": 192,
"utf8_icelandic_ci": 193,
"utf8_latvian_ci": 194,
"utf8_romanian_ci": 195,
"utf8_slovenian_ci": 196,
"utf8_polish_ci": 197,
"utf8_estonian_ci": 198,
"utf8_spanish_ci": 199,
"utf8_swedish_ci": 200,
"utf8_turkish_ci": 201,
"utf8_czech_ci": 202,
"utf8_danish_ci": 203,
"utf8_lithuanian_ci": 204,
"utf8_slovak_ci": 205,
"utf8_spanish2_ci": 206,
"utf8_roman_ci": 207,
"utf8_persian_ci": 208,
"utf8_esperanto_ci": 209,
"utf8_hungarian_ci": 210,
"utf8_sinhala_ci": 211,
"utf8_german2_ci": 212,
"utf8_croatian_ci": 213,
"utf8_unicode_520_ci": 214,
"utf8_vietnamese_ci": 215,
"utf8_general_mysql500_ci": 223,
"utf8mb4_unicode_ci": 224,
"utf8mb4_icelandic_ci": 225,
"utf8mb4_latvian_ci": 226,
"utf8mb4_romanian_ci": 227,
"utf8mb4_slovenian_ci": 228,
"utf8mb4_polish_ci": 229,
"utf8mb4_estonian_ci": 230,
"utf8mb4_spanish_ci": 231,
"utf8mb4_swedish_ci": 232,
"utf8mb4_turkish_ci": 233,
"utf8mb4_czech_ci": 234,
"utf8mb4_danish_ci": 235,
"utf8mb4_lithuanian_ci": 236,
"utf8mb4_slovak_ci": 237,
"utf8mb4_spanish2_ci": 238,
"utf8mb4_roman_ci": 239,
"utf8mb4_persian_ci": 240,
"utf8mb4_esperanto_ci": 241,
"utf8mb4_hungarian_ci": 242,
"utf8mb4_sinhala_ci": 243,
"utf8mb4_german2_ci": 244,
"utf8mb4_croatian_ci": 245,
"utf8mb4_unicode_520_ci": 246,
"utf8mb4_vietnamese_ci": 247,
}
// MySQL collation informations.
const (
DefaultCharset = "utf8"
DefaultCollationID = 33
BinaryCollationID = 63
DefaultCollationName = "utf8_general_ci"
)

261
vendor/github.com/pingcap/tidb/mysql/const.go generated vendored Normal file
View file

@ -0,0 +1,261 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
// Version informations.
const (
MinProtocolVersion byte = 10
MaxPayloadLen int = 1<<24 - 1
ServerVersion string = "5.5.31-TiDB-1.0"
)
// Header informations.
const (
OKHeader byte = 0x00
ErrHeader byte = 0xff
EOFHeader byte = 0xfe
LocalInFileHeader byte = 0xfb
)
// Server informations.
const (
ServerStatusInTrans uint16 = 0x0001
ServerStatusAutocommit uint16 = 0x0002
ServerMoreResultsExists uint16 = 0x0008
ServerStatusNoGoodIndexUsed uint16 = 0x0010
ServerStatusNoIndexUsed uint16 = 0x0020
ServerStatusCursorExists uint16 = 0x0040
ServerStatusLastRowSend uint16 = 0x0080
ServerStatusDBDropped uint16 = 0x0100
ServerStatusNoBackslashEscaped uint16 = 0x0200
ServerStatusMetadataChanged uint16 = 0x0400
ServerStatusWasSlow uint16 = 0x0800
ServerPSOutParams uint16 = 0x1000
)
// Command informations.
const (
ComSleep byte = iota
ComQuit
ComInitDB
ComQuery
ComFieldList
ComCreateDB
ComDropDB
ComRefresh
ComShutdown
ComStatistics
ComProcessInfo
ComConnect
ComProcessKill
ComDebug
ComPing
ComTime
ComDelayedInsert
ComChangeUser
ComBinlogDump
ComTableDump
ComConnectOut
ComRegisterSlave
ComStmtPrepare
ComStmtExecute
ComStmtSendLongData
ComStmtClose
ComStmtReset
ComSetOption
ComStmtFetch
ComDaemon
ComBinlogDumpGtid
ComResetConnection
)
// Client informations.
const (
ClientLongPassword uint32 = 1 << iota
ClientFoundRows
ClientLongFlag
ClientConnectWithDB
ClientNoSchema
ClientCompress
ClientODBC
ClientLocalFiles
ClientIgnoreSpace
ClientProtocol41
ClientInteractive
ClientSSL
ClientIgnoreSigpipe
ClientTransactions
ClientReserved
ClientSecureConnection
ClientMultiStatements
ClientMultiResults
ClientPSMultiResults
ClientPluginAuth
ClientConnectAtts
ClientPluginAuthLenencClientData
)
// Cache type informations.
const (
TypeNoCache byte = 0xff
)
// Auth name informations.
const (
AuthName = "mysql_native_password"
)
// MySQL database and tables.
const (
// SystemDB is the name of system database.
SystemDB = "mysql"
// UserTable is the table in system db contains user info.
UserTable = "User"
// DBTable is the table in system db contains db scope privilege info.
DBTable = "DB"
// TablePrivTable is the table in system db contains table scope privilege info.
TablePrivTable = "Tables_priv"
// ColumnPrivTable is the table in system db contains column scope privilege info.
ColumnPrivTable = "Columns_priv"
// GlobalVariablesTable is the table contains global system variables.
GlobalVariablesTable = "GLOBAL_VARIABLES"
// GlobalStatusTable is the table contains global status variables.
GlobalStatusTable = "GLOBAL_STATUS"
// TiDBTable is the table contains tidb info.
TiDBTable = "tidb"
)
// PrivilegeType privilege
type PrivilegeType uint32
const (
_ PrivilegeType = 1 << iota
// CreatePriv is the privilege to create schema/table.
CreatePriv
// SelectPriv is the privilege to read from table.
SelectPriv
// InsertPriv is the privilege to insert data into table.
InsertPriv
// UpdatePriv is the privilege to update data in table.
UpdatePriv
// DeletePriv is the privilege to delete data from table.
DeletePriv
// ShowDBPriv is the privilege to run show databases statement.
ShowDBPriv
// CreateUserPriv is the privilege to create user.
CreateUserPriv
// DropPriv is the privilege to drop schema/table.
DropPriv
// GrantPriv is the privilege to grant privilege to user.
GrantPriv
// AlterPriv is the privilege to run alter statement.
AlterPriv
// ExecutePriv is the privilege to run execute statement.
ExecutePriv
// IndexPriv is the privilege to create/drop index.
IndexPriv
// AllPriv is the privilege for all actions.
AllPriv
)
// Priv2UserCol is the privilege to mysql.user table column name.
var Priv2UserCol = map[PrivilegeType]string{
CreatePriv: "Create_priv",
SelectPriv: "Select_priv",
InsertPriv: "Insert_priv",
UpdatePriv: "Update_priv",
DeletePriv: "Delete_priv",
ShowDBPriv: "Show_db_priv",
CreateUserPriv: "Create_user_priv",
DropPriv: "Drop_priv",
GrantPriv: "Grant_priv",
AlterPriv: "Alter_priv",
ExecutePriv: "Execute_priv",
IndexPriv: "Index_priv",
}
// Col2PrivType is the privilege tables column name to privilege type.
var Col2PrivType = map[string]PrivilegeType{
"Create_priv": CreatePriv,
"Select_priv": SelectPriv,
"Insert_priv": InsertPriv,
"Update_priv": UpdatePriv,
"Delete_priv": DeletePriv,
"Show_db_priv": ShowDBPriv,
"Create_user_priv": CreateUserPriv,
"Drop_priv": DropPriv,
"Grant_priv": GrantPriv,
"Alter_priv": AlterPriv,
"Execute_priv": ExecutePriv,
"Index_priv": IndexPriv,
}
// AllGlobalPrivs is all the privileges in global scope.
var AllGlobalPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv, DeletePriv, CreatePriv, DropPriv, GrantPriv, AlterPriv, ShowDBPriv, ExecutePriv, IndexPriv, CreateUserPriv}
// Priv2Str is the map for privilege to string.
var Priv2Str = map[PrivilegeType]string{
CreatePriv: "Create",
SelectPriv: "Select",
InsertPriv: "Insert",
UpdatePriv: "Update",
DeletePriv: "Delete",
ShowDBPriv: "Show Databases",
CreateUserPriv: "Create User",
DropPriv: "Drop",
GrantPriv: "Grant Option",
AlterPriv: "Alter",
ExecutePriv: "Execute",
IndexPriv: "Index",
}
// Priv2SetStr is the map for privilege to string.
var Priv2SetStr = map[PrivilegeType]string{
CreatePriv: "Create",
SelectPriv: "Select",
InsertPriv: "Insert",
UpdatePriv: "Update",
DeletePriv: "Delete",
DropPriv: "Drop",
GrantPriv: "Grant",
AlterPriv: "Alter",
ExecutePriv: "Execute",
IndexPriv: "Index",
}
// SetStr2Priv is the map for privilege set string to privilege type.
var SetStr2Priv = map[string]PrivilegeType{
"Create": CreatePriv,
"Select": SelectPriv,
"Insert": InsertPriv,
"Update": UpdatePriv,
"Delete": DeletePriv,
"Drop": DropPriv,
"Grant": GrantPriv,
"Alter": AlterPriv,
"Execute": ExecutePriv,
"Index": IndexPriv,
}
// AllDBPrivs is all the privileges in database scope.
var AllDBPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv, DeletePriv, CreatePriv, DropPriv, GrantPriv, AlterPriv, ExecutePriv, IndexPriv}
// AllTablePrivs is all the privileges in table scope.
var AllTablePrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv, DeletePriv, CreatePriv, DropPriv, GrantPriv, AlterPriv, IndexPriv}
// AllColumnPrivs is all the privileges in column scope.
var AllColumnPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv}
// AllPrivilegeLiteral is the string literal for All Privilege.
const AllPrivilegeLiteral = "ALL PRIVILEGES"

755
vendor/github.com/pingcap/tidb/mysql/decimal.go generated vendored Normal file
View file

@ -0,0 +1,755 @@
// The MIT License (MIT)
// Copyright (c) 2015 Spring, Inc.
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// - Based on https://github.com/oguzbilgic/fpd, which has the following license:
// """
// The MIT License (MIT)
// Copyright (c) 2013 Oguz Bilgic
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// """
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
// Decimal implements an arbitrary precision fixed-point decimal.
//
// To use as part of a struct:
//
// type Struct struct {
// Number Decimal
// }
//
// The zero-value of a Decimal is 0, as you would expect.
//
// The best way to create a new Decimal is to use decimal.NewFromString, ex:
//
// n, err := decimal.NewFromString("-123.4567")
// n.String() // output: "-123.4567"
//
// NOTE: this can "only" represent numbers with a maximum of 2^31 digits
// after the decimal point.
import (
"database/sql/driver"
"fmt"
"math"
"math/big"
"strconv"
"strings"
)
// DivisionPrecision is the number of decimal places in the result when it
// doesn't divide exactly.
//
// Example:
//
// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)
// d1.String() // output: "0.6667"
// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)
// d2.String() // output: "0.0001"
// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)
// d3.String() // output: "6666.6666666666666667"
// decimal.DivisionPrecision = 3
// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)
// d4.String() // output: "0.6667"
//
const (
MaxFractionDigits = 30
DivIncreasePrecision = 4
)
// ZeroDecimal is zero constant, to make computations faster.
var ZeroDecimal = NewDecimalFromInt(0, 1)
var zeroInt = big.NewInt(0)
var oneInt = big.NewInt(1)
var fiveInt = big.NewInt(5)
var tenInt = big.NewInt(10)
// Decimal represents a fixed-point decimal. It is immutable.
// number = value * 10 ^ exp
type Decimal struct {
value *big.Int
// this must be an int32, because we cast it to float64 during
// calculations. If exp is 64 bit, we might lose precision.
// If we cared about being able to represent every possible decimal, we
// could make exp a *big.Int but it would hurt performance and numbers
// like that are unrealistic.
exp int32
fracDigits int32 // Number of fractional digits for string result.
}
// ConvertToDecimal converts interface to decimal.
func ConvertToDecimal(value interface{}) (Decimal, error) {
switch v := value.(type) {
case int8:
return NewDecimalFromInt(int64(v), 0), nil
case int16:
return NewDecimalFromInt(int64(v), 0), nil
case int32:
return NewDecimalFromInt(int64(v), 0), nil
case int64:
return NewDecimalFromInt(int64(v), 0), nil
case int:
return NewDecimalFromInt(int64(v), 0), nil
case uint8:
return NewDecimalFromUint(uint64(v), 0), nil
case uint16:
return NewDecimalFromUint(uint64(v), 0), nil
case uint32:
return NewDecimalFromUint(uint64(v), 0), nil
case uint64:
return NewDecimalFromUint(uint64(v), 0), nil
case uint:
return NewDecimalFromUint(uint64(v), 0), nil
case float32:
return NewDecimalFromFloat(float64(v)), nil
case float64:
return NewDecimalFromFloat(float64(v)), nil
case string:
return ParseDecimal(v)
case Decimal:
return v, nil
case Hex:
return NewDecimalFromInt(int64(v.Value), 0), nil
case Bit:
return NewDecimalFromUint(uint64(v.Value), 0), nil
case Enum:
return NewDecimalFromUint(uint64(v.Value), 0), nil
case Set:
return NewDecimalFromUint(uint64(v.Value), 0), nil
default:
return Decimal{}, fmt.Errorf("can't convert %v to decimal", value)
}
}
// NewDecimalFromInt returns a new fixed-point decimal, value * 10 ^ exp.
func NewDecimalFromInt(value int64, exp int32) Decimal {
return Decimal{
value: big.NewInt(value),
exp: exp,
fracDigits: fracDigitsDefault(exp),
}
}
// NewDecimalFromUint returns a new fixed-point decimal, value * 10 ^ exp.
func NewDecimalFromUint(value uint64, exp int32) Decimal {
return Decimal{
value: big.NewInt(0).SetUint64(value),
exp: exp,
fracDigits: fracDigitsDefault(exp),
}
}
// ParseDecimal returns a new Decimal from a string representation.
//
// Example:
//
// d, err := ParseDecimal("-123.45")
// d2, err := ParseDecimal(".0001")
//
func ParseDecimal(value string) (Decimal, error) {
var intString string
var exp = int32(0)
n := strings.IndexAny(value, "eE")
if n > 0 {
// It is scientific notation, like 3.14e10
expInt, err := strconv.Atoi(value[n+1:])
if err != nil {
return Decimal{}, fmt.Errorf("can't convert %s to decimal, incorrect exponent", value)
}
value = value[0:n]
exp = int32(expInt)
}
parts := strings.Split(value, ".")
if len(parts) == 1 {
// There is no decimal point, we can just parse the original string as
// an int.
intString = value
} else if len(parts) == 2 {
intString = parts[0] + parts[1]
expInt := -len(parts[1])
exp += int32(expInt)
} else {
return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value)
}
dValue := new(big.Int)
_, ok := dValue.SetString(intString, 10)
if !ok {
return Decimal{}, fmt.Errorf("can't convert %s to decimal", value)
}
val := Decimal{
value: dValue,
exp: exp,
fracDigits: fracDigitsDefault(exp),
}
if exp < -MaxFractionDigits {
val = val.rescale(-MaxFractionDigits)
}
return val, nil
}
// NewDecimalFromFloat converts a float64 to Decimal.
//
// Example:
//
// NewDecimalFromFloat(123.45678901234567).String() // output: "123.4567890123456"
// NewDecimalFromFloat(.00000000000000001).String() // output: "0.00000000000000001"
//
// NOTE: this will panic on NaN, +/-inf.
func NewDecimalFromFloat(value float64) Decimal {
floor := math.Floor(value)
// fast path, where float is an int.
if floor == value && !math.IsInf(value, 0) {
return NewDecimalFromInt(int64(value), 0)
}
str := strconv.FormatFloat(value, 'f', -1, 64)
dec, err := ParseDecimal(str)
if err != nil {
panic(err)
}
return dec
}
// NewDecimalFromFloatWithExponent converts a float64 to Decimal, with an arbitrary
// number of fractional digits.
//
// Example:
//
// NewDecimalFromFloatWithExponent(123.456, -2).String() // output: "123.46"
//
func NewDecimalFromFloatWithExponent(value float64, exp int32) Decimal {
mul := math.Pow(10, -float64(exp))
floatValue := value * mul
if math.IsNaN(floatValue) || math.IsInf(floatValue, 0) {
panic(fmt.Sprintf("Cannot create a Decimal from %v", floatValue))
}
dValue := big.NewInt(round(floatValue))
return Decimal{
value: dValue,
exp: exp,
fracDigits: fracDigitsDefault(exp),
}
}
// rescale returns a rescaled version of the decimal. Returned
// decimal may be less precise if the given exponent is bigger
// than the initial exponent of the Decimal.
// NOTE: this will truncate, NOT round
//
// Example:
//
// d := New(12345, -4)
// d2 := d.rescale(-1)
// d3 := d2.rescale(-4)
// println(d1)
// println(d2)
// println(d3)
//
// Output:
//
// 1.2345
// 1.2
// 1.2000
//
func (d Decimal) rescale(exp int32) Decimal {
d.ensureInitialized()
if exp < -MaxFractionDigits-1 {
// Limit the number of digits but we can not call Round here because it is called by Round.
// Limit it to MaxFractionDigits + 1 to make sure the final result is correct.
exp = -MaxFractionDigits - 1
}
// Must convert exps to float64 before - to prevent overflow.
diff := math.Abs(float64(exp) - float64(d.exp))
value := new(big.Int).Set(d.value)
expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil)
if exp > d.exp {
value = value.Quo(value, expScale)
} else if exp < d.exp {
value = value.Mul(value, expScale)
}
return Decimal{
value: value,
exp: exp,
fracDigits: d.fracDigits,
}
}
// Abs returns the absolute value of the decimal.
func (d Decimal) Abs() Decimal {
d.ensureInitialized()
d2Value := new(big.Int).Abs(d.value)
return Decimal{
value: d2Value,
exp: d.exp,
fracDigits: d.fracDigits,
}
}
// Add returns d + d2.
func (d Decimal) Add(d2 Decimal) Decimal {
baseExp := min(d.exp, d2.exp)
rd := d.rescale(baseExp)
rd2 := d2.rescale(baseExp)
d3Value := new(big.Int).Add(rd.value, rd2.value)
return Decimal{
value: d3Value,
exp: baseExp,
fracDigits: fracDigitsPlus(d.fracDigits, d2.fracDigits),
}
}
// Sub returns d - d2.
func (d Decimal) Sub(d2 Decimal) Decimal {
baseExp := min(d.exp, d2.exp)
rd := d.rescale(baseExp)
rd2 := d2.rescale(baseExp)
d3Value := new(big.Int).Sub(rd.value, rd2.value)
return Decimal{
value: d3Value,
exp: baseExp,
fracDigits: fracDigitsPlus(d.fracDigits, d2.fracDigits),
}
}
// Mul returns d * d2.
func (d Decimal) Mul(d2 Decimal) Decimal {
d.ensureInitialized()
d2.ensureInitialized()
expInt64 := int64(d.exp) + int64(d2.exp)
if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 {
// It is better to panic than to give incorrect results, as
// decimals are usually used for money.
panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64))
}
d3Value := new(big.Int).Mul(d.value, d2.value)
val := Decimal{
value: d3Value,
exp: int32(expInt64),
fracDigits: fracDigitsMul(d.fracDigits, d2.fracDigits),
}
if val.exp < -(MaxFractionDigits) {
val = val.Round(MaxFractionDigits)
}
return val
}
// Div returns d / d2. If it doesn't divide exactly, the result will have
// DivisionPrecision digits after the decimal point.
func (d Decimal) Div(d2 Decimal) Decimal {
// Division is hard, use Rat to do it.
ratNum := d.Rat()
ratDenom := d2.Rat()
quoRat := big.NewRat(0, 1).Quo(ratNum, ratDenom)
// Converting from Rat to Decimal inefficiently for now.
ret, err := ParseDecimal(quoRat.FloatString(MaxFractionDigits + 1))
if err != nil {
panic(err) // This should never happen.
}
// To pass test "2 / 3 * 3 < 2" -> "1".
ret = ret.Truncate(MaxFractionDigits)
ret.fracDigits = fracDigitsDiv(d.fracDigits)
return ret
}
// Cmp compares the numbers represented by d and d2, and returns:
//
// -1 if d < d2
// 0 if d == d2
// +1 if d > d2
//
func (d Decimal) Cmp(d2 Decimal) int {
baseExp := min(d.exp, d2.exp)
rd := d.rescale(baseExp)
rd2 := d2.rescale(baseExp)
return rd.value.Cmp(rd2.value)
}
// Equals returns whether the numbers represented by d and d2 are equal.
func (d Decimal) Equals(d2 Decimal) bool {
return d.Cmp(d2) == 0
}
// Exponent returns the exponent, or scale component of the decimal.
func (d Decimal) Exponent() int32 {
return d.exp
}
// FracDigits returns the number of fractional digits of the decimal.
func (d Decimal) FracDigits() int32 {
return d.fracDigits
}
// IntPart returns the integer component of the decimal.
func (d Decimal) IntPart() int64 {
scaledD := d.rescale(0)
return scaledD.value.Int64()
}
// Rat returns a rational number representation of the decimal.
func (d Decimal) Rat() *big.Rat {
d.ensureInitialized()
if d.exp <= 0 {
// It must negate after casting to prevent int32 overflow.
denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil)
return new(big.Rat).SetFrac(d.value, denom)
}
mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil)
num := new(big.Int).Mul(d.value, mul)
return new(big.Rat).SetFrac(num, oneInt)
}
// Float64 returns the nearest float64 value for d and a bool indicating
// whether f represents d exactly.
// For more details, see the documentation for big.Rat.Float64.
func (d Decimal) Float64() (f float64, exact bool) {
return d.Rat().Float64()
}
// String returns the string representation of the decimal
// with the fixed point.
//
// Example:
//
// d := New(-12345, -3)
// println(d.String())
//
// Output:
//
// -12.345
//
func (d Decimal) String() string {
return d.StringFixed(d.fracDigits)
}
// StringFixed returns a rounded fixed-point string with places digits after
// the decimal point.
//
// Example:
//
// NewFromFloat(0).StringFixed(2) // output: "0.00"
// NewFromFloat(0).StringFixed(0) // output: "0"
// NewFromFloat(5.45).StringFixed(0) // output: "5"
// NewFromFloat(5.45).StringFixed(1) // output: "5.5"
// NewFromFloat(5.45).StringFixed(2) // output: "5.45"
// NewFromFloat(5.45).StringFixed(3) // output: "5.450"
// NewFromFloat(545).StringFixed(-1) // output: "550"
//
func (d Decimal) StringFixed(places int32) string {
rounded := d.Round(places)
return rounded.string(false)
}
// Round rounds the decimal to places decimal places.
// If places < 0, it will round the integer part to the nearest 10^(-places).
//
// Example:
//
// NewFromFloat(5.45).Round(1).String() // output: "5.5"
// NewFromFloat(545).Round(-1).String() // output: "550"
//
func (d Decimal) Round(places int32) Decimal {
// Truncate to places + 1.
ret := d.rescale(-places - 1)
// Add sign(d) * 0.5.
if ret.value.Sign() < 0 {
ret.value.Sub(ret.value, fiveInt)
} else {
ret.value.Add(ret.value, fiveInt)
}
// Floor for positive numbers, Ceil for negative numbers.
_, m := ret.value.DivMod(ret.value, tenInt, new(big.Int))
ret.exp++
if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 {
ret.value.Add(ret.value, oneInt)
}
ret.fracDigits = places
return ret
}
// Floor returns the nearest integer value less than or equal to d.
func (d Decimal) Floor() Decimal {
d.ensureInitialized()
exp := big.NewInt(10)
// It must negate after casting to prevent int32 overflow.
exp.Exp(exp, big.NewInt(-int64(d.exp)), nil)
z := new(big.Int).Div(d.value, exp)
return Decimal{value: z, exp: 0}
}
// Ceil returns the nearest integer value greater than or equal to d.
func (d Decimal) Ceil() Decimal {
d.ensureInitialized()
exp := big.NewInt(10)
// It must negate after casting to prevent int32 overflow.
exp.Exp(exp, big.NewInt(-int64(d.exp)), nil)
z, m := new(big.Int).DivMod(d.value, exp, new(big.Int))
if m.Cmp(zeroInt) != 0 {
z.Add(z, oneInt)
}
return Decimal{value: z, exp: 0}
}
// Truncate truncates off digits from the number, without rounding.
//
// NOTE: precision is the last digit that will not be truncated (must be >= 0).
//
// Example:
//
// decimal.NewFromString("123.456").Truncate(2).String() // "123.45"
//
func (d Decimal) Truncate(precision int32) Decimal {
d.ensureInitialized()
if precision >= 0 && -precision > d.exp {
d = d.rescale(-precision)
}
d.fracDigits = precision
return d
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error {
str, err := unquoteIfQuoted(decimalBytes)
if err != nil {
return fmt.Errorf("Error decoding string '%s': %s", decimalBytes, err)
}
decimal, err := ParseDecimal(str)
*d = decimal
if err != nil {
return fmt.Errorf("Error decoding string '%s': %s", str, err)
}
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (d Decimal) MarshalJSON() ([]byte, error) {
str := "\"" + d.String() + "\""
return []byte(str), nil
}
// Scan implements the sql.Scanner interface for database deserialization.
func (d *Decimal) Scan(value interface{}) error {
str, err := unquoteIfQuoted(value)
if err != nil {
return err
}
*d, err = ParseDecimal(str)
return err
}
// Value implements the driver.Valuer interface for database serialization.
func (d Decimal) Value() (driver.Value, error) {
return d.String(), nil
}
// BigIntValue returns the *bit.Int value member of decimal.
func (d Decimal) BigIntValue() *big.Int {
return d.value
}
// UnmarshalText implements the encoding.TextUnmarshaler interface for XML
// deserialization.
func (d *Decimal) UnmarshalText(text []byte) error {
str := string(text)
dec, err := ParseDecimal(str)
*d = dec
if err != nil {
return fmt.Errorf("Error decoding string '%s': %s", str, err)
}
return nil
}
// MarshalText implements the encoding.TextMarshaler interface for XML
// serialization.
func (d Decimal) MarshalText() (text []byte, err error) {
return []byte(d.String()), nil
}
// StringScaled first scales the decimal then calls .String() on it.
// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead.
func (d Decimal) StringScaled(exp int32) string {
return d.rescale(exp).String()
}
func (d Decimal) string(trimTrailingZeros bool) string {
if d.exp >= 0 {
return d.rescale(0).value.String()
}
abs := new(big.Int).Abs(d.value)
str := abs.String()
var intPart, fractionalPart string
// this cast to int will cause bugs if d.exp == INT_MIN
// and you are on a 32-bit machine. Won't fix this super-edge case.
dExpInt := int(d.exp)
if len(str) > -dExpInt {
intPart = str[:len(str)+dExpInt]
fractionalPart = str[len(str)+dExpInt:]
} else {
intPart = "0"
num0s := -dExpInt - len(str)
fractionalPart = strings.Repeat("0", num0s) + str
}
if trimTrailingZeros {
i := len(fractionalPart) - 1
for ; i >= 0; i-- {
if fractionalPart[i] != '0' {
break
}
}
fractionalPart = fractionalPart[:i+1]
}
number := intPart
if len(fractionalPart) > 0 {
number += "." + fractionalPart
}
if d.value.Sign() < 0 {
return "-" + number
}
return number
}
func (d *Decimal) ensureInitialized() {
if d.value == nil {
d.value = new(big.Int)
}
}
func min(x, y int32) int32 {
if x >= y {
return y
}
return x
}
func max(x, y int32) int32 {
if x >= y {
return x
}
return y
}
func round(n float64) int64 {
if n < 0 {
return int64(n - 0.5)
}
return int64(n + 0.5)
}
func unquoteIfQuoted(value interface{}) (string, error) {
bytes, ok := value.([]byte)
if !ok {
return "", fmt.Errorf("Could not convert value '%+v' to byte array",
value)
}
// If the amount is quoted, strip the quotes.
if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' {
bytes = bytes[1 : len(bytes)-1]
}
return string(bytes), nil
}
func fracDigitsDefault(exp int32) int32 {
if exp < 0 {
return min(MaxFractionDigits, -exp)
}
return 0
}
func fracDigitsPlus(x, y int32) int32 {
return max(x, y)
}
func fracDigitsDiv(x int32) int32 {
return min(x+DivIncreasePrecision, MaxFractionDigits)
}
func fracDigitsMul(a, b int32) int32 {
return min(MaxFractionDigits, a+b)
}

62
vendor/github.com/pingcap/tidb/mysql/enum.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
"strconv"
"strings"
"github.com/juju/errors"
)
// Enum is for MySQL enum type.
type Enum struct {
Name string
Value uint64
}
// String implements fmt.Stringer interface.
func (e Enum) String() string {
return e.Name
}
// ToNumber changes enum index to float64 for numeric operation.
func (e Enum) ToNumber() float64 {
return float64(e.Value)
}
// ParseEnumName creates a Enum with item name.
func ParseEnumName(elems []string, name string) (Enum, error) {
for i, n := range elems {
if strings.EqualFold(n, name) {
return Enum{Name: n, Value: uint64(i) + 1}, nil
}
}
// name doesn't exist, maybe an integer?
if num, err := strconv.ParseUint(name, 0, 64); err == nil {
return ParseEnumValue(elems, num)
}
return Enum{}, errors.Errorf("item %s is not in enum %v", name, elems)
}
// ParseEnumValue creates a Enum with special number.
func ParseEnumValue(elems []string, number uint64) (Enum, error) {
if number == 0 || number > uint64(len(elems)) {
return Enum{}, errors.Errorf("number %d overflow enum boundary [1, %d]", number, len(elems))
}
return Enum{Name: elems[number-1], Value: number}, nil
}

885
vendor/github.com/pingcap/tidb/mysql/errcode.go generated vendored Normal file
View file

@ -0,0 +1,885 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
// MySQL error code.
// This value is numeric. It is not portable to other database systems.
const (
ErrErrorFirst uint16 = 1000
ErrHashchk = 1000
ErrNisamchk = 1001
ErrNo = 1002
ErrYes = 1003
ErrCantCreateFile = 1004
ErrCantCreateTable = 1005
ErrCantCreateDb = 1006
ErrDbCreateExists = 1007
ErrDbDropExists = 1008
ErrDbDropDelete = 1009
ErrDbDropRmdir = 1010
ErrCantDeleteFile = 1011
ErrCantFindSystemRec = 1012
ErrCantGetStat = 1013
ErrCantGetWd = 1014
ErrCantLock = 1015
ErrCantOpenFile = 1016
ErrFileNotFound = 1017
ErrCantReadDir = 1018
ErrCantSetWd = 1019
ErrCheckread = 1020
ErrDiskFull = 1021
ErrDupKey = 1022
ErrErrorOnClose = 1023
ErrErrorOnRead = 1024
ErrErrorOnRename = 1025
ErrErrorOnWrite = 1026
ErrFileUsed = 1027
ErrFilsortAbort = 1028
ErrFormNotFound = 1029
ErrGetErrno = 1030
ErrIllegalHa = 1031
ErrKeyNotFound = 1032
ErrNotFormFile = 1033
ErrNotKeyfile = 1034
ErrOldKeyfile = 1035
ErrOpenAsReadonly = 1036
ErrOutofmemory = 1037
ErrOutOfSortmemory = 1038
ErrUnexpectedEOF = 1039
ErrConCount = 1040
ErrOutOfResources = 1041
ErrBadHost = 1042
ErrHandshake = 1043
ErrDbaccessDenied = 1044
ErrAccessDenied = 1045
ErrNoDb = 1046
ErrUnknownCom = 1047
ErrBadNull = 1048
ErrBadDb = 1049
ErrTableExists = 1050
ErrBadTable = 1051
ErrNonUniq = 1052
ErrServerShutdown = 1053
ErrBadField = 1054
ErrWrongFieldWithGroup = 1055
ErrWrongGroupField = 1056
ErrWrongSumSelect = 1057
ErrWrongValueCount = 1058
ErrTooLongIdent = 1059
ErrDupFieldname = 1060
ErrDupKeyname = 1061
ErrDupEntry = 1062
ErrWrongFieldSpec = 1063
ErrParse = 1064
ErrEmptyQuery = 1065
ErrNonuniqTable = 1066
ErrInvalidDefault = 1067
ErrMultiplePriKey = 1068
ErrTooManyKeys = 1069
ErrTooManyKeyParts = 1070
ErrTooLongKey = 1071
ErrKeyColumnDoesNotExits = 1072
ErrBlobUsedAsKey = 1073
ErrTooBigFieldlength = 1074
ErrWrongAutoKey = 1075
ErrReady = 1076
ErrNormalShutdown = 1077
ErrGotSignal = 1078
ErrShutdownComplete = 1079
ErrForcingClose = 1080
ErrIpsock = 1081
ErrNoSuchIndex = 1082
ErrWrongFieldTerminators = 1083
ErrBlobsAndNoTerminated = 1084
ErrTextfileNotReadable = 1085
ErrFileExists = 1086
ErrLoadInfo = 1087
ErrAlterInfo = 1088
ErrWrongSubKey = 1089
ErrCantRemoveAllFields = 1090
ErrCantDropFieldOrKey = 1091
ErrInsertInfo = 1092
ErrUpdateTableUsed = 1093
ErrNoSuchThread = 1094
ErrKillDenied = 1095
ErrNoTablesUsed = 1096
ErrTooBigSet = 1097
ErrNoUniqueLogfile = 1098
ErrTableNotLockedForWrite = 1099
ErrTableNotLocked = 1100
ErrBlobCantHaveDefault = 1101
ErrWrongDbName = 1102
ErrWrongTableName = 1103
ErrTooBigSelect = 1104
ErrUnknown = 1105
ErrUnknownProcedure = 1106
ErrWrongParamcountToProcedure = 1107
ErrWrongParametersToProcedure = 1108
ErrUnknownTable = 1109
ErrFieldSpecifiedTwice = 1110
ErrInvalidGroupFuncUse = 1111
ErrUnsupportedExtension = 1112
ErrTableMustHaveColumns = 1113
ErrRecordFileFull = 1114
ErrUnknownCharacterSet = 1115
ErrTooManyTables = 1116
ErrTooManyFields = 1117
ErrTooBigRowsize = 1118
ErrStackOverrun = 1119
ErrWrongOuterJoin = 1120
ErrNullColumnInIndex = 1121
ErrCantFindUdf = 1122
ErrCantInitializeUdf = 1123
ErrUdfNoPaths = 1124
ErrUdfExists = 1125
ErrCantOpenLibrary = 1126
ErrCantFindDlEntry = 1127
ErrFunctionNotDefined = 1128
ErrHostIsBlocked = 1129
ErrHostNotPrivileged = 1130
ErrPasswordAnonymousUser = 1131
ErrPasswordNotAllowed = 1132
ErrPasswordNoMatch = 1133
ErrUpdateInfo = 1134
ErrCantCreateThread = 1135
ErrWrongValueCountOnRow = 1136
ErrCantReopenTable = 1137
ErrInvalidUseOfNull = 1138
ErrRegexp = 1139
ErrMixOfGroupFuncAndFields = 1140
ErrNonexistingGrant = 1141
ErrTableaccessDenied = 1142
ErrColumnaccessDenied = 1143
ErrIllegalGrantForTable = 1144
ErrGrantWrongHostOrUser = 1145
ErrNoSuchTable = 1146
ErrNonexistingTableGrant = 1147
ErrNotAllowedCommand = 1148
ErrSyntax = 1149
ErrDelayedCantChangeLock = 1150
ErrTooManyDelayedThreads = 1151
ErrAbortingConnection = 1152
ErrNetPacketTooLarge = 1153
ErrNetReadErrorFromPipe = 1154
ErrNetFcntl = 1155
ErrNetPacketsOutOfOrder = 1156
ErrNetUncompress = 1157
ErrNetRead = 1158
ErrNetReadInterrupted = 1159
ErrNetErrorOnWrite = 1160
ErrNetWriteInterrupted = 1161
ErrTooLongString = 1162
ErrTableCantHandleBlob = 1163
ErrTableCantHandleAutoIncrement = 1164
ErrDelayedInsertTableLocked = 1165
ErrWrongColumnName = 1166
ErrWrongKeyColumn = 1167
ErrWrongMrgTable = 1168
ErrDupUnique = 1169
ErrBlobKeyWithoutLength = 1170
ErrPrimaryCantHaveNull = 1171
ErrTooManyRows = 1172
ErrRequiresPrimaryKey = 1173
ErrNoRaidCompiled = 1174
ErrUpdateWithoutKeyInSafeMode = 1175
ErrKeyDoesNotExits = 1176
ErrCheckNoSuchTable = 1177
ErrCheckNotImplemented = 1178
ErrCantDoThisDuringAnTransaction = 1179
ErrErrorDuringCommit = 1180
ErrErrorDuringRollback = 1181
ErrErrorDuringFlushLogs = 1182
ErrErrorDuringCheckpoint = 1183
ErrNewAbortingConnection = 1184
ErrDumpNotImplemented = 1185
ErrFlushMasterBinlogClosed = 1186
ErrIndexRebuild = 1187
ErrMaster = 1188
ErrMasterNetRead = 1189
ErrMasterNetWrite = 1190
ErrFtMatchingKeyNotFound = 1191
ErrLockOrActiveTransaction = 1192
ErrUnknownSystemVariable = 1193
ErrCrashedOnUsage = 1194
ErrCrashedOnRepair = 1195
ErrWarningNotCompleteRollback = 1196
ErrTransCacheFull = 1197
ErrSlaveMustStop = 1198
ErrSlaveNotRunning = 1199
ErrBadSlave = 1200
ErrMasterInfo = 1201
ErrSlaveThread = 1202
ErrTooManyUserConnections = 1203
ErrSetConstantsOnly = 1204
ErrLockWaitTimeout = 1205
ErrLockTableFull = 1206
ErrReadOnlyTransaction = 1207
ErrDropDbWithReadLock = 1208
ErrCreateDbWithReadLock = 1209
ErrWrongArguments = 1210
ErrNoPermissionToCreateUser = 1211
ErrUnionTablesInDifferentDir = 1212
ErrLockDeadlock = 1213
ErrTableCantHandleFt = 1214
ErrCannotAddForeign = 1215
ErrNoReferencedRow = 1216
ErrRowIsReferenced = 1217
ErrConnectToMaster = 1218
ErrQueryOnMaster = 1219
ErrErrorWhenExecutingCommand = 1220
ErrWrongUsage = 1221
ErrWrongNumberOfColumnsInSelect = 1222
ErrCantUpdateWithReadlock = 1223
ErrMixingNotAllowed = 1224
ErrDupArgument = 1225
ErrUserLimitReached = 1226
ErrSpecificAccessDenied = 1227
ErrLocalVariable = 1228
ErrGlobalVariable = 1229
ErrNoDefault = 1230
ErrWrongValueForVar = 1231
ErrWrongTypeForVar = 1232
ErrVarCantBeRead = 1233
ErrCantUseOptionHere = 1234
ErrNotSupportedYet = 1235
ErrMasterFatalErrorReadingBinlog = 1236
ErrSlaveIgnoredTable = 1237
ErrIncorrectGlobalLocalVar = 1238
ErrWrongFkDef = 1239
ErrKeyRefDoNotMatchTableRef = 1240
ErrOperandColumns = 1241
ErrSubqueryNo1Row = 1242
ErrUnknownStmtHandler = 1243
ErrCorruptHelpDb = 1244
ErrCyclicReference = 1245
ErrAutoConvert = 1246
ErrIllegalReference = 1247
ErrDerivedMustHaveAlias = 1248
ErrSelectReduced = 1249
ErrTablenameNotAllowedHere = 1250
ErrNotSupportedAuthMode = 1251
ErrSpatialCantHaveNull = 1252
ErrCollationCharsetMismatch = 1253
ErrSlaveWasRunning = 1254
ErrSlaveWasNotRunning = 1255
ErrTooBigForUncompress = 1256
ErrZlibZMem = 1257
ErrZlibZBuf = 1258
ErrZlibZData = 1259
ErrCutValueGroupConcat = 1260
ErrWarnTooFewRecords = 1261
ErrWarnTooManyRecords = 1262
ErrWarnNullToNotnull = 1263
ErrWarnDataOutOfRange = 1264
WarnDataTruncated = 1265
ErrWarnUsingOtherHandler = 1266
ErrCantAggregate2collations = 1267
ErrDropUser = 1268
ErrRevokeGrants = 1269
ErrCantAggregate3collations = 1270
ErrCantAggregateNcollations = 1271
ErrVariableIsNotStruct = 1272
ErrUnknownCollation = 1273
ErrSlaveIgnoredSslParams = 1274
ErrServerIsInSecureAuthMode = 1275
ErrWarnFieldResolved = 1276
ErrBadSlaveUntilCond = 1277
ErrMissingSkipSlave = 1278
ErrUntilCondIgnored = 1279
ErrWrongNameForIndex = 1280
ErrWrongNameForCatalog = 1281
ErrWarnQcResize = 1282
ErrBadFtColumn = 1283
ErrUnknownKeyCache = 1284
ErrWarnHostnameWontWork = 1285
ErrUnknownStorageEngine = 1286
ErrWarnDeprecatedSyntax = 1287
ErrNonUpdatableTable = 1288
ErrFeatureDisabled = 1289
ErrOptionPreventsStatement = 1290
ErrDuplicatedValueInType = 1291
ErrTruncatedWrongValue = 1292
ErrTooMuchAutoTimestampCols = 1293
ErrInvalidOnUpdate = 1294
ErrUnsupportedPs = 1295
ErrGetErrmsg = 1296
ErrGetTemporaryErrmsg = 1297
ErrUnknownTimeZone = 1298
ErrWarnInvalidTimestamp = 1299
ErrInvalidCharacterString = 1300
ErrWarnAllowedPacketOverflowed = 1301
ErrConflictingDeclarations = 1302
ErrSpNoRecursiveCreate = 1303
ErrSpAlreadyExists = 1304
ErrSpDoesNotExist = 1305
ErrSpDropFailed = 1306
ErrSpStoreFailed = 1307
ErrSpLilabelMismatch = 1308
ErrSpLabelRedefine = 1309
ErrSpLabelMismatch = 1310
ErrSpUninitVar = 1311
ErrSpBadselect = 1312
ErrSpBadreturn = 1313
ErrSpBadstatement = 1314
ErrUpdateLogDeprecatedIgnored = 1315
ErrUpdateLogDeprecatedTranslated = 1316
ErrQueryInterrupted = 1317
ErrSpWrongNoOfArgs = 1318
ErrSpCondMismatch = 1319
ErrSpNoreturn = 1320
ErrSpNoreturnend = 1321
ErrSpBadCursorQuery = 1322
ErrSpBadCursorSelect = 1323
ErrSpCursorMismatch = 1324
ErrSpCursorAlreadyOpen = 1325
ErrSpCursorNotOpen = 1326
ErrSpUndeclaredVar = 1327
ErrSpWrongNoOfFetchArgs = 1328
ErrSpFetchNoData = 1329
ErrSpDupParam = 1330
ErrSpDupVar = 1331
ErrSpDupCond = 1332
ErrSpDupCurs = 1333
ErrSpCantAlter = 1334
ErrSpSubselectNyi = 1335
ErrStmtNotAllowedInSfOrTrg = 1336
ErrSpVarcondAfterCurshndlr = 1337
ErrSpCursorAfterHandler = 1338
ErrSpCaseNotFound = 1339
ErrFparserTooBigFile = 1340
ErrFparserBadHeader = 1341
ErrFparserEOFInComment = 1342
ErrFparserErrorInParameter = 1343
ErrFparserEOFInUnknownParameter = 1344
ErrViewNoExplain = 1345
ErrFrmUnknownType = 1346
ErrWrongObject = 1347
ErrNonupdateableColumn = 1348
ErrViewSelectDerived = 1349
ErrViewSelectClause = 1350
ErrViewSelectVariable = 1351
ErrViewSelectTmptable = 1352
ErrViewWrongList = 1353
ErrWarnViewMerge = 1354
ErrWarnViewWithoutKey = 1355
ErrViewInvalid = 1356
ErrSpNoDropSp = 1357
ErrSpGotoInHndlr = 1358
ErrTrgAlreadyExists = 1359
ErrTrgDoesNotExist = 1360
ErrTrgOnViewOrTempTable = 1361
ErrTrgCantChangeRow = 1362
ErrTrgNoSuchRowInTrg = 1363
ErrNoDefaultForField = 1364
ErrDivisionByZero = 1365
ErrTruncatedWrongValueForField = 1366
ErrIllegalValueForType = 1367
ErrViewNonupdCheck = 1368
ErrViewCheckFailed = 1369
ErrProcaccessDenied = 1370
ErrRelayLogFail = 1371
ErrPasswdLength = 1372
ErrUnknownTargetBinlog = 1373
ErrIoErrLogIndexRead = 1374
ErrBinlogPurgeProhibited = 1375
ErrFseekFail = 1376
ErrBinlogPurgeFatalErr = 1377
ErrLogInUse = 1378
ErrLogPurgeUnknownErr = 1379
ErrRelayLogInit = 1380
ErrNoBinaryLogging = 1381
ErrReservedSyntax = 1382
ErrWsasFailed = 1383
ErrDiffGroupsProc = 1384
ErrNoGroupForProc = 1385
ErrOrderWithProc = 1386
ErrLoggingProhibitChangingOf = 1387
ErrNoFileMapping = 1388
ErrWrongMagic = 1389
ErrPsManyParam = 1390
ErrKeyPart0 = 1391
ErrViewChecksum = 1392
ErrViewMultiupdate = 1393
ErrViewNoInsertFieldList = 1394
ErrViewDeleteMergeView = 1395
ErrCannotUser = 1396
ErrXaerNota = 1397
ErrXaerInval = 1398
ErrXaerRmfail = 1399
ErrXaerOutside = 1400
ErrXaerRmerr = 1401
ErrXaRbrollback = 1402
ErrNonexistingProcGrant = 1403
ErrProcAutoGrantFail = 1404
ErrProcAutoRevokeFail = 1405
ErrDataTooLong = 1406
ErrSpBadSQLstate = 1407
ErrStartup = 1408
ErrLoadFromFixedSizeRowsToVar = 1409
ErrCantCreateUserWithGrant = 1410
ErrWrongValueForType = 1411
ErrTableDefChanged = 1412
ErrSpDupHandler = 1413
ErrSpNotVarArg = 1414
ErrSpNoRetset = 1415
ErrCantCreateGeometryObject = 1416
ErrFailedRoutineBreakBinlog = 1417
ErrBinlogUnsafeRoutine = 1418
ErrBinlogCreateRoutineNeedSuper = 1419
ErrExecStmtWithOpenCursor = 1420
ErrStmtHasNoOpenCursor = 1421
ErrCommitNotAllowedInSfOrTrg = 1422
ErrNoDefaultForViewField = 1423
ErrSpNoRecursion = 1424
ErrTooBigScale = 1425
ErrTooBigPrecision = 1426
ErrMBiggerThanD = 1427
ErrWrongLockOfSystemTable = 1428
ErrConnectToForeignDataSource = 1429
ErrQueryOnForeignDataSource = 1430
ErrForeignDataSourceDoesntExist = 1431
ErrForeignDataStringInvalidCantCreate = 1432
ErrForeignDataStringInvalid = 1433
ErrCantCreateFederatedTable = 1434
ErrTrgInWrongSchema = 1435
ErrStackOverrunNeedMore = 1436
ErrTooLongBody = 1437
ErrWarnCantDropDefaultKeycache = 1438
ErrTooBigDisplaywidth = 1439
ErrXaerDupid = 1440
ErrDatetimeFunctionOverflow = 1441
ErrCantUpdateUsedTableInSfOrTrg = 1442
ErrViewPreventUpdate = 1443
ErrPsNoRecursion = 1444
ErrSpCantSetAutocommit = 1445
ErrMalformedDefiner = 1446
ErrViewFrmNoUser = 1447
ErrViewOtherUser = 1448
ErrNoSuchUser = 1449
ErrForbidSchemaChange = 1450
ErrRowIsReferenced2 = 1451
ErrNoReferencedRow2 = 1452
ErrSpBadVarShadow = 1453
ErrTrgNoDefiner = 1454
ErrOldFileFormat = 1455
ErrSpRecursionLimit = 1456
ErrSpProcTableCorrupt = 1457
ErrSpWrongName = 1458
ErrTableNeedsUpgrade = 1459
ErrSpNoAggregate = 1460
ErrMaxPreparedStmtCountReached = 1461
ErrViewRecursive = 1462
ErrNonGroupingFieldUsed = 1463
ErrTableCantHandleSpkeys = 1464
ErrNoTriggersOnSystemSchema = 1465
ErrRemovedSpaces = 1466
ErrAutoincReadFailed = 1467
ErrUsername = 1468
ErrHostname = 1469
ErrWrongStringLength = 1470
ErrNonInsertableTable = 1471
ErrAdminWrongMrgTable = 1472
ErrTooHighLevelOfNestingForSelect = 1473
ErrNameBecomesEmpty = 1474
ErrAmbiguousFieldTerm = 1475
ErrForeignServerExists = 1476
ErrForeignServerDoesntExist = 1477
ErrIllegalHaCreateOption = 1478
ErrPartitionRequiresValues = 1479
ErrPartitionWrongValues = 1480
ErrPartitionMaxvalue = 1481
ErrPartitionSubpartition = 1482
ErrPartitionSubpartMix = 1483
ErrPartitionWrongNoPart = 1484
ErrPartitionWrongNoSubpart = 1485
ErrWrongExprInPartitionFunc = 1486
ErrNoConstExprInRangeOrList = 1487
ErrFieldNotFoundPart = 1488
ErrListOfFieldsOnlyInHash = 1489
ErrInconsistentPartitionInfo = 1490
ErrPartitionFuncNotAllowed = 1491
ErrPartitionsMustBeDefined = 1492
ErrRangeNotIncreasing = 1493
ErrInconsistentTypeOfFunctions = 1494
ErrMultipleDefConstInListPart = 1495
ErrPartitionEntry = 1496
ErrMixHandler = 1497
ErrPartitionNotDefined = 1498
ErrTooManyPartitions = 1499
ErrSubpartition = 1500
ErrCantCreateHandlerFile = 1501
ErrBlobFieldInPartFunc = 1502
ErrUniqueKeyNeedAllFieldsInPf = 1503
ErrNoParts = 1504
ErrPartitionMgmtOnNonpartitioned = 1505
ErrForeignKeyOnPartitioned = 1506
ErrDropPartitionNonExistent = 1507
ErrDropLastPartition = 1508
ErrCoalesceOnlyOnHashPartition = 1509
ErrReorgHashOnlyOnSameNo = 1510
ErrReorgNoParam = 1511
ErrOnlyOnRangeListPartition = 1512
ErrAddPartitionSubpart = 1513
ErrAddPartitionNoNewPartition = 1514
ErrCoalescePartitionNoPartition = 1515
ErrReorgPartitionNotExist = 1516
ErrSameNamePartition = 1517
ErrNoBinlog = 1518
ErrConsecutiveReorgPartitions = 1519
ErrReorgOutsideRange = 1520
ErrPartitionFunctionFailure = 1521
ErrPartState = 1522
ErrLimitedPartRange = 1523
ErrPluginIsNotLoaded = 1524
ErrWrongValue = 1525
ErrNoPartitionForGivenValue = 1526
ErrFilegroupOptionOnlyOnce = 1527
ErrCreateFilegroupFailed = 1528
ErrDropFilegroupFailed = 1529
ErrTablespaceAutoExtend = 1530
ErrWrongSizeNumber = 1531
ErrSizeOverflow = 1532
ErrAlterFilegroupFailed = 1533
ErrBinlogRowLoggingFailed = 1534
ErrBinlogRowWrongTableDef = 1535
ErrBinlogRowRbrToSbr = 1536
ErrEventAlreadyExists = 1537
ErrEventStoreFailed = 1538
ErrEventDoesNotExist = 1539
ErrEventCantAlter = 1540
ErrEventDropFailed = 1541
ErrEventIntervalNotPositiveOrTooBig = 1542
ErrEventEndsBeforeStarts = 1543
ErrEventExecTimeInThePast = 1544
ErrEventOpenTableFailed = 1545
ErrEventNeitherMExprNorMAt = 1546
ErrObsoleteColCountDoesntMatchCorrupted = 1547
ErrObsoleteCannotLoadFromTable = 1548
ErrEventCannotDelete = 1549
ErrEventCompile = 1550
ErrEventSameName = 1551
ErrEventDataTooLong = 1552
ErrDropIndexFk = 1553
ErrWarnDeprecatedSyntaxWithVer = 1554
ErrCantWriteLockLogTable = 1555
ErrCantLockLogTable = 1556
ErrForeignDuplicateKeyOldUnused = 1557
ErrColCountDoesntMatchPleaseUpdate = 1558
ErrTempTablePreventsSwitchOutOfRbr = 1559
ErrStoredFunctionPreventsSwitchBinlogFormat = 1560
ErrNdbCantSwitchBinlogFormat = 1561
ErrPartitionNoTemporary = 1562
ErrPartitionConstDomain = 1563
ErrPartitionFunctionIsNotAllowed = 1564
ErrDdlLog = 1565
ErrNullInValuesLessThan = 1566
ErrWrongPartitionName = 1567
ErrCantChangeTxCharacteristics = 1568
ErrDupEntryAutoincrementCase = 1569
ErrEventModifyQueue = 1570
ErrEventSetVar = 1571
ErrPartitionMerge = 1572
ErrCantActivateLog = 1573
ErrRbrNotAvailable = 1574
ErrBase64Decode = 1575
ErrEventRecursionForbidden = 1576
ErrEventsDb = 1577
ErrOnlyIntegersAllowed = 1578
ErrUnsuportedLogEngine = 1579
ErrBadLogStatement = 1580
ErrCantRenameLogTable = 1581
ErrWrongParamcountToNativeFct = 1582
ErrWrongParametersToNativeFct = 1583
ErrWrongParametersToStoredFct = 1584
ErrNativeFctNameCollision = 1585
ErrDupEntryWithKeyName = 1586
ErrBinlogPurgeEmfile = 1587
ErrEventCannotCreateInThePast = 1588
ErrEventCannotAlterInThePast = 1589
ErrSlaveIncident = 1590
ErrNoPartitionForGivenValueSilent = 1591
ErrBinlogUnsafeStatement = 1592
ErrSlaveFatal = 1593
ErrSlaveRelayLogReadFailure = 1594
ErrSlaveRelayLogWriteFailure = 1595
ErrSlaveCreateEventFailure = 1596
ErrSlaveMasterComFailure = 1597
ErrBinlogLoggingImpossible = 1598
ErrViewNoCreationCtx = 1599
ErrViewInvalidCreationCtx = 1600
ErrSrInvalidCreationCtx = 1601
ErrTrgCorruptedFile = 1602
ErrTrgNoCreationCtx = 1603
ErrTrgInvalidCreationCtx = 1604
ErrEventInvalidCreationCtx = 1605
ErrTrgCantOpenTable = 1606
ErrCantCreateSroutine = 1607
ErrNeverUsed = 1608
ErrNoFormatDescriptionEventBeforeBinlogStatement = 1609
ErrSlaveCorruptEvent = 1610
ErrLoadDataInvalidColumn = 1611
ErrLogPurgeNoFile = 1612
ErrXaRbtimeout = 1613
ErrXaRbdeadlock = 1614
ErrNeedReprepare = 1615
ErrDelayedNotSupported = 1616
WarnNoMasterInfo = 1617
WarnOptionIgnored = 1618
WarnPluginDeleteBuiltin = 1619
WarnPluginBusy = 1620
ErrVariableIsReadonly = 1621
ErrWarnEngineTransactionRollback = 1622
ErrSlaveHeartbeatFailure = 1623
ErrSlaveHeartbeatValueOutOfRange = 1624
ErrNdbReplicationSchema = 1625
ErrConflictFnParse = 1626
ErrExceptionsWrite = 1627
ErrTooLongTableComment = 1628
ErrTooLongFieldComment = 1629
ErrFuncInexistentNameCollision = 1630
ErrDatabaseName = 1631
ErrTableName = 1632
ErrPartitionName = 1633
ErrSubpartitionName = 1634
ErrTemporaryName = 1635
ErrRenamedName = 1636
ErrTooManyConcurrentTrxs = 1637
WarnNonASCIISeparatorNotImplemented = 1638
ErrDebugSyncTimeout = 1639
ErrDebugSyncHitLimit = 1640
ErrDupSignalSet = 1641
ErrSignalWarn = 1642
ErrSignalNotFound = 1643
ErrSignalException = 1644
ErrResignalWithoutActiveHandler = 1645
ErrSignalBadConditionType = 1646
WarnCondItemTruncated = 1647
ErrCondItemTooLong = 1648
ErrUnknownLocale = 1649
ErrSlaveIgnoreServerIds = 1650
ErrQueryCacheDisabled = 1651
ErrSameNamePartitionField = 1652
ErrPartitionColumnList = 1653
ErrWrongTypeColumnValue = 1654
ErrTooManyPartitionFuncFields = 1655
ErrMaxvalueInValuesIn = 1656
ErrTooManyValues = 1657
ErrRowSinglePartitionField = 1658
ErrFieldTypeNotAllowedAsPartitionField = 1659
ErrPartitionFieldsTooLong = 1660
ErrBinlogRowEngineAndStmtEngine = 1661
ErrBinlogRowModeAndStmtEngine = 1662
ErrBinlogUnsafeAndStmtEngine = 1663
ErrBinlogRowInjectionAndStmtEngine = 1664
ErrBinlogStmtModeAndRowEngine = 1665
ErrBinlogRowInjectionAndStmtMode = 1666
ErrBinlogMultipleEnginesAndSelfLoggingEngine = 1667
ErrBinlogUnsafeLimit = 1668
ErrBinlogUnsafeInsertDelayed = 1669
ErrBinlogUnsafeSystemTable = 1670
ErrBinlogUnsafeAutoincColumns = 1671
ErrBinlogUnsafeUdf = 1672
ErrBinlogUnsafeSystemVariable = 1673
ErrBinlogUnsafeSystemFunction = 1674
ErrBinlogUnsafeNontransAfterTrans = 1675
ErrMessageAndStatement = 1676
ErrSlaveConversionFailed = 1677
ErrSlaveCantCreateConversion = 1678
ErrInsideTransactionPreventsSwitchBinlogFormat = 1679
ErrPathLength = 1680
ErrWarnDeprecatedSyntaxNoReplacement = 1681
ErrWrongNativeTableStructure = 1682
ErrWrongPerfschemaUsage = 1683
ErrWarnISSkippedTable = 1684
ErrInsideTransactionPreventsSwitchBinlogDirect = 1685
ErrStoredFunctionPreventsSwitchBinlogDirect = 1686
ErrSpatialMustHaveGeomCol = 1687
ErrTooLongIndexComment = 1688
ErrLockAborted = 1689
ErrDataOutOfRange = 1690
ErrWrongSpvarTypeInLimit = 1691
ErrBinlogUnsafeMultipleEnginesAndSelfLoggingEngine = 1692
ErrBinlogUnsafeMixedStatement = 1693
ErrInsideTransactionPreventsSwitchSQLLogBin = 1694
ErrStoredFunctionPreventsSwitchSQLLogBin = 1695
ErrFailedReadFromParFile = 1696
ErrValuesIsNotIntType = 1697
ErrAccessDeniedNoPassword = 1698
ErrSetPasswordAuthPlugin = 1699
ErrGrantPluginUserExists = 1700
ErrTruncateIllegalFk = 1701
ErrPluginIsPermanent = 1702
ErrSlaveHeartbeatValueOutOfRangeMin = 1703
ErrSlaveHeartbeatValueOutOfRangeMax = 1704
ErrStmtCacheFull = 1705
ErrMultiUpdateKeyConflict = 1706
ErrTableNeedsRebuild = 1707
WarnOptionBelowLimit = 1708
ErrIndexColumnTooLong = 1709
ErrErrorInTriggerBody = 1710
ErrErrorInUnknownTriggerBody = 1711
ErrIndexCorrupt = 1712
ErrUndoRecordTooBig = 1713
ErrBinlogUnsafeInsertIgnoreSelect = 1714
ErrBinlogUnsafeInsertSelectUpdate = 1715
ErrBinlogUnsafeReplaceSelect = 1716
ErrBinlogUnsafeCreateIgnoreSelect = 1717
ErrBinlogUnsafeCreateReplaceSelect = 1718
ErrBinlogUnsafeUpdateIgnore = 1719
ErrPluginNoUninstall = 1720
ErrPluginNoInstall = 1721
ErrBinlogUnsafeWriteAutoincSelect = 1722
ErrBinlogUnsafeCreateSelectAutoinc = 1723
ErrBinlogUnsafeInsertTwoKeys = 1724
ErrTableInFkCheck = 1725
ErrUnsupportedEngine = 1726
ErrBinlogUnsafeAutoincNotFirst = 1727
ErrCannotLoadFromTableV2 = 1728
ErrMasterDelayValueOutOfRange = 1729
ErrOnlyFdAndRbrEventsAllowedInBinlogStatement = 1730
ErrPartitionExchangeDifferentOption = 1731
ErrPartitionExchangePartTable = 1732
ErrPartitionExchangeTempTable = 1733
ErrPartitionInsteadOfSubpartition = 1734
ErrUnknownPartition = 1735
ErrTablesDifferentMetadata = 1736
ErrRowDoesNotMatchPartition = 1737
ErrBinlogCacheSizeGreaterThanMax = 1738
ErrWarnIndexNotApplicable = 1739
ErrPartitionExchangeForeignKey = 1740
ErrNoSuchKeyValue = 1741
ErrRplInfoDataTooLong = 1742
ErrNetworkReadEventChecksumFailure = 1743
ErrBinlogReadEventChecksumFailure = 1744
ErrBinlogStmtCacheSizeGreaterThanMax = 1745
ErrCantUpdateTableInCreateTableSelect = 1746
ErrPartitionClauseOnNonpartitioned = 1747
ErrRowDoesNotMatchGivenPartitionSet = 1748
ErrNoSuchPartitionunused = 1749
ErrChangeRplInfoRepositoryFailure = 1750
ErrWarningNotCompleteRollbackWithCreatedTempTable = 1751
ErrWarningNotCompleteRollbackWithDroppedTempTable = 1752
ErrMtsFeatureIsNotSupported = 1753
ErrMtsUpdatedDbsGreaterMax = 1754
ErrMtsCantParallel = 1755
ErrMtsInconsistentData = 1756
ErrFulltextNotSupportedWithPartitioning = 1757
ErrDaInvalidConditionNumber = 1758
ErrInsecurePlainText = 1759
ErrInsecureChangeMaster = 1760
ErrForeignDuplicateKeyWithChildInfo = 1761
ErrForeignDuplicateKeyWithoutChildInfo = 1762
ErrSQLthreadWithSecureSlave = 1763
ErrTableHasNoFt = 1764
ErrVariableNotSettableInSfOrTrigger = 1765
ErrVariableNotSettableInTransaction = 1766
ErrGtidNextIsNotInGtidNextList = 1767
ErrCantChangeGtidNextInTransactionWhenGtidNextListIsNull = 1768
ErrSetStatementCannotInvokeFunction = 1769
ErrGtidNextCantBeAutomaticIfGtidNextListIsNonNull = 1770
ErrSkippingLoggedTransaction = 1771
ErrMalformedGtidSetSpecification = 1772
ErrMalformedGtidSetEncoding = 1773
ErrMalformedGtidSpecification = 1774
ErrGnoExhausted = 1775
ErrBadSlaveAutoPosition = 1776
ErrAutoPositionRequiresGtidModeOn = 1777
ErrCantDoImplicitCommitInTrxWhenGtidNextIsSet = 1778
ErrGtidMode2Or3RequiresEnforceGtidConsistencyOn = 1779
ErrGtidModeRequiresBinlog = 1780
ErrCantSetGtidNextToGtidWhenGtidModeIsOff = 1781
ErrCantSetGtidNextToAnonymousWhenGtidModeIsOn = 1782
ErrCantSetGtidNextListToNonNullWhenGtidModeIsOff = 1783
ErrFoundGtidEventWhenGtidModeIsOff = 1784
ErrGtidUnsafeNonTransactionalTable = 1785
ErrGtidUnsafeCreateSelect = 1786
ErrGtidUnsafeCreateDropTemporaryTableInTransaction = 1787
ErrGtidModeCanOnlyChangeOneStepAtATime = 1788
ErrMasterHasPurgedRequiredGtids = 1789
ErrCantSetGtidNextWhenOwningGtid = 1790
ErrUnknownExplainFormat = 1791
ErrCantExecuteInReadOnlyTransaction = 1792
ErrTooLongTablePartitionComment = 1793
ErrSlaveConfiguration = 1794
ErrInnodbFtLimit = 1795
ErrInnodbNoFtTempTable = 1796
ErrInnodbFtWrongDocidColumn = 1797
ErrInnodbFtWrongDocidIndex = 1798
ErrInnodbOnlineLogTooBig = 1799
ErrUnknownAlterAlgorithm = 1800
ErrUnknownAlterLock = 1801
ErrMtsChangeMasterCantRunWithGaps = 1802
ErrMtsRecoveryFailure = 1803
ErrMtsResetWorkers = 1804
ErrColCountDoesntMatchCorruptedV2 = 1805
ErrSlaveSilentRetryTransaction = 1806
ErrDiscardFkChecksRunning = 1807
ErrTableSchemaMismatch = 1808
ErrTableInSystemTablespace = 1809
ErrIoRead = 1810
ErrIoWrite = 1811
ErrTablespaceMissing = 1812
ErrTablespaceExists = 1813
ErrTablespaceDiscarded = 1814
ErrInternal = 1815
ErrInnodbImport = 1816
ErrInnodbIndexCorrupt = 1817
ErrInvalidYearColumnLength = 1818
ErrNotValidPassword = 1819
ErrMustChangePassword = 1820
ErrFkNoIndexChild = 1821
ErrFkNoIndexParent = 1822
ErrFkFailAddSystem = 1823
ErrFkCannotOpenParent = 1824
ErrFkIncorrectOption = 1825
ErrFkDupName = 1826
ErrPasswordFormat = 1827
ErrFkColumnCannotDrop = 1828
ErrFkColumnCannotDropChild = 1829
ErrFkColumnNotNull = 1830
ErrDupIndex = 1831
ErrFkColumnCannotChange = 1832
ErrFkColumnCannotChangeChild = 1833
ErrFkCannotDeleteParent = 1834
ErrMalformedPacket = 1835
ErrReadOnlyMode = 1836
ErrGtidNextTypeUndefinedGroup = 1837
ErrVariableNotSettableInSp = 1838
ErrCantSetGtidPurgedWhenGtidModeIsOff = 1839
ErrCantSetGtidPurgedWhenGtidExecutedIsNotEmpty = 1840
ErrCantSetGtidPurgedWhenOwnedGtidsIsNotEmpty = 1841
ErrGtidPurgedWasChanged = 1842
ErrGtidExecutedWasChanged = 1843
ErrBinlogStmtModeAndNoReplTables = 1844
ErrAlterOperationNotSupported = 1845
ErrAlterOperationNotSupportedReason = 1846
ErrAlterOperationNotSupportedReasonCopy = 1847
ErrAlterOperationNotSupportedReasonPartition = 1848
ErrAlterOperationNotSupportedReasonFkRename = 1849
ErrAlterOperationNotSupportedReasonColumnType = 1850
ErrAlterOperationNotSupportedReasonFkCheck = 1851
ErrAlterOperationNotSupportedReasonIgnore = 1852
ErrAlterOperationNotSupportedReasonNopk = 1853
ErrAlterOperationNotSupportedReasonAutoinc = 1854
ErrAlterOperationNotSupportedReasonHiddenFts = 1855
ErrAlterOperationNotSupportedReasonChangeFts = 1856
ErrAlterOperationNotSupportedReasonFts = 1857
ErrSQLSlaveSkipCounterNotSettableInGtidMode = 1858
ErrDupUnknownInIndex = 1859
ErrIdentCausesTooLongPath = 1860
ErrAlterOperationNotSupportedReasonNotNull = 1861
ErrMustChangePasswordLogin = 1862
ErrRowInWrongPartition = 1863
ErrErrorLast = 1863
)

882
vendor/github.com/pingcap/tidb/mysql/errname.go generated vendored Normal file
View file

@ -0,0 +1,882 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
// MySQLErrName maps error code to MySQL error messages.
var MySQLErrName = map[uint16]string{
ErrHashchk: "hashchk",
ErrNisamchk: "isamchk",
ErrNo: "NO",
ErrYes: "YES",
ErrCantCreateFile: "Can't create file '%-.200s' (errno: %d - %s)",
ErrCantCreateTable: "Can't create table '%-.200s' (errno: %d)",
ErrCantCreateDb: "Can't create database '%-.192s' (errno: %d)",
ErrDbCreateExists: "Can't create database '%-.192s'; database exists",
ErrDbDropExists: "Can't drop database '%-.192s'; database doesn't exist",
ErrDbDropDelete: "Error dropping database (can't delete '%-.192s', errno: %d)",
ErrDbDropRmdir: "Error dropping database (can't rmdir '%-.192s', errno: %d)",
ErrCantDeleteFile: "Error on delete of '%-.192s' (errno: %d - %s)",
ErrCantFindSystemRec: "Can't read record in system table",
ErrCantGetStat: "Can't get status of '%-.200s' (errno: %d - %s)",
ErrCantGetWd: "Can't get working directory (errno: %d - %s)",
ErrCantLock: "Can't lock file (errno: %d - %s)",
ErrCantOpenFile: "Can't open file: '%-.200s' (errno: %d - %s)",
ErrFileNotFound: "Can't find file: '%-.200s' (errno: %d - %s)",
ErrCantReadDir: "Can't read dir of '%-.192s' (errno: %d - %s)",
ErrCantSetWd: "Can't change dir to '%-.192s' (errno: %d - %s)",
ErrCheckread: "Record has changed since last read in table '%-.192s'",
ErrDiskFull: "Disk full (%s); waiting for someone to free some space... (errno: %d - %s)",
ErrDupKey: "Can't write; duplicate key in table '%-.192s'",
ErrErrorOnClose: "Error on close of '%-.192s' (errno: %d - %s)",
ErrErrorOnRead: "Error reading file '%-.200s' (errno: %d - %s)",
ErrErrorOnRename: "Error on rename of '%-.210s' to '%-.210s' (errno: %d - %s)",
ErrErrorOnWrite: "Error writing file '%-.200s' (errno: %d - %s)",
ErrFileUsed: "'%-.192s' is locked against change",
ErrFilsortAbort: "Sort aborted",
ErrFormNotFound: "View '%-.192s' doesn't exist for '%-.192s'",
ErrGetErrno: "Got error %d from storage engine",
ErrIllegalHa: "Table storage engine for '%-.192s' doesn't have this option",
ErrKeyNotFound: "Can't find record in '%-.192s'",
ErrNotFormFile: "Incorrect information in file: '%-.200s'",
ErrNotKeyfile: "Incorrect key file for table '%-.200s'; try to repair it",
ErrOldKeyfile: "Old key file for table '%-.192s'; repair it!",
ErrOpenAsReadonly: "Table '%-.192s' is read only",
ErrOutofmemory: "Out of memory; restart server and try again (needed %d bytes)",
ErrOutOfSortmemory: "Out of sort memory, consider increasing server sort buffer size",
ErrUnexpectedEOF: "Unexpected EOF found when reading file '%-.192s' (errno: %d - %s)",
ErrConCount: "Too many connections",
ErrOutOfResources: "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space",
ErrBadHost: "Can't get hostname for your address",
ErrHandshake: "Bad handshake",
ErrDbaccessDenied: "Access denied for user '%-.48s'@'%-.64s' to database '%-.192s'",
ErrAccessDenied: "Access denied for user '%-.48s'@'%-.64s' (using password: %s)",
ErrNoDb: "No database selected",
ErrUnknownCom: "Unknown command",
ErrBadNull: "Column '%-.192s' cannot be null",
ErrBadDb: "Unknown database '%-.192s'",
ErrTableExists: "Table '%-.192s' already exists",
ErrBadTable: "Unknown table '%-.100s'",
ErrNonUniq: "Column '%-.192s' in %-.192s is ambiguous",
ErrServerShutdown: "Server shutdown in progress",
ErrBadField: "Unknown column '%-.192s' in '%-.192s'",
ErrWrongFieldWithGroup: "'%-.192s' isn't in GROUP BY",
ErrWrongGroupField: "Can't group on '%-.192s'",
ErrWrongSumSelect: "Statement has sum functions and columns in same statement",
ErrWrongValueCount: "Column count doesn't match value count",
ErrTooLongIdent: "Identifier name '%-.100s' is too long",
ErrDupFieldname: "Duplicate column name '%-.192s'",
ErrDupKeyname: "Duplicate key name '%-.192s'",
ErrDupEntry: "Duplicate entry '%-.192s' for key %d",
ErrWrongFieldSpec: "Incorrect column specifier for column '%-.192s'",
ErrParse: "%s near '%-.80s' at line %d",
ErrEmptyQuery: "Query was empty",
ErrNonuniqTable: "Not unique table/alias: '%-.192s'",
ErrInvalidDefault: "Invalid default value for '%-.192s'",
ErrMultiplePriKey: "Multiple primary key defined",
ErrTooManyKeys: "Too many keys specified; max %d keys allowed",
ErrTooManyKeyParts: "Too many key parts specified; max %d parts allowed",
ErrTooLongKey: "Specified key was too long; max key length is %d bytes",
ErrKeyColumnDoesNotExits: "Key column '%-.192s' doesn't exist in table",
ErrBlobUsedAsKey: "BLOB column '%-.192s' can't be used in key specification with the used table type",
ErrTooBigFieldlength: "Column length too big for column '%-.192s' (max = %lu); use BLOB or TEXT instead",
ErrWrongAutoKey: "Incorrect table definition; there can be only one auto column and it must be defined as a key",
ErrReady: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d",
ErrNormalShutdown: "%s: Normal shutdown\n",
ErrGotSignal: "%s: Got signal %d. Aborting!\n",
ErrShutdownComplete: "%s: Shutdown complete\n",
ErrForcingClose: "%s: Forcing close of thread %ld user: '%-.48s'\n",
ErrIpsock: "Can't create IP socket",
ErrNoSuchIndex: "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table",
ErrWrongFieldTerminators: "Field separator argument is not what is expected; check the manual",
ErrBlobsAndNoTerminated: "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'",
ErrTextfileNotReadable: "The file '%-.128s' must be in the database directory or be readable by all",
ErrFileExists: "File '%-.200s' already exists",
ErrLoadInfo: "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld",
ErrAlterInfo: "Records: %ld Duplicates: %ld",
ErrWrongSubKey: "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys",
ErrCantRemoveAllFields: "You can't delete all columns with ALTER TABLE; use DROP TABLE instead",
ErrCantDropFieldOrKey: "Can't DROP '%-.192s'; check that column/key exists",
ErrInsertInfo: "Records: %ld Duplicates: %ld Warnings: %ld",
ErrUpdateTableUsed: "You can't specify target table '%-.192s' for update in FROM clause",
ErrNoSuchThread: "Unknown thread id: %lu",
ErrKillDenied: "You are not owner of thread %lu",
ErrNoTablesUsed: "No tables used",
ErrTooBigSet: "Too many strings for column %-.192s and SET",
ErrNoUniqueLogfile: "Can't generate a unique log-filename %-.200s.(1-999)\n",
ErrTableNotLockedForWrite: "Table '%-.192s' was locked with a READ lock and can't be updated",
ErrTableNotLocked: "Table '%-.192s' was not locked with LOCK TABLES",
ErrBlobCantHaveDefault: "BLOB/TEXT column '%-.192s' can't have a default value",
ErrWrongDbName: "Incorrect database name '%-.100s'",
ErrWrongTableName: "Incorrect table name '%-.100s'",
ErrTooBigSelect: "The SELECT would examine more than MAXJOINSIZE rows; check your WHERE and use SET SQLBIGSELECTS=1 or SET MAXJOINSIZE=# if the SELECT is okay",
ErrUnknown: "Unknown error",
ErrUnknownProcedure: "Unknown procedure '%-.192s'",
ErrWrongParamcountToProcedure: "Incorrect parameter count to procedure '%-.192s'",
ErrWrongParametersToProcedure: "Incorrect parameters to procedure '%-.192s'",
ErrUnknownTable: "Unknown table '%-.192s' in %-.32s",
ErrFieldSpecifiedTwice: "Column '%-.192s' specified twice",
ErrInvalidGroupFuncUse: "Invalid use of group function",
ErrUnsupportedExtension: "Table '%-.192s' uses an extension that doesn't exist in this MySQL version",
ErrTableMustHaveColumns: "A table must have at least 1 column",
ErrRecordFileFull: "The table '%-.192s' is full",
ErrUnknownCharacterSet: "Unknown character set: '%-.64s'",
ErrTooManyTables: "Too many tables; MySQL can only use %d tables in a join",
ErrTooManyFields: "Too many columns",
ErrTooBigRowsize: "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs",
ErrStackOverrun: "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld --threadStack=#' to specify a bigger stack if needed",
ErrWrongOuterJoin: "Cross dependency found in OUTER JOIN; examine your ON conditions",
ErrNullColumnInIndex: "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler",
ErrCantFindUdf: "Can't load function '%-.192s'",
ErrCantInitializeUdf: "Can't initialize function '%-.192s'; %-.80s",
ErrUdfNoPaths: "No paths allowed for shared library",
ErrUdfExists: "Function '%-.192s' already exists",
ErrCantOpenLibrary: "Can't open shared library '%-.192s' (errno: %d %-.128s)",
ErrCantFindDlEntry: "Can't find symbol '%-.128s' in library",
ErrFunctionNotDefined: "Function '%-.192s' is not defined",
ErrHostIsBlocked: "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'",
ErrHostNotPrivileged: "Host '%-.64s' is not allowed to connect to this MySQL server",
ErrPasswordAnonymousUser: "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords",
ErrPasswordNotAllowed: "You must have privileges to update tables in the mysql database to be able to change passwords for others",
ErrPasswordNoMatch: "Can't find any matching row in the user table",
ErrUpdateInfo: "Rows matched: %ld Changed: %ld Warnings: %ld",
ErrCantCreateThread: "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug",
ErrWrongValueCountOnRow: "Column count doesn't match value count at row %ld",
ErrCantReopenTable: "Can't reopen table: '%-.192s'",
ErrInvalidUseOfNull: "Invalid use of NULL value",
ErrRegexp: "Got error '%-.64s' from regexp",
ErrMixOfGroupFuncAndFields: "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause",
ErrNonexistingGrant: "There is no such grant defined for user '%-.48s' on host '%-.64s'",
ErrTableaccessDenied: "%-.128s command denied to user '%-.48s'@'%-.64s' for table '%-.64s'",
ErrColumnaccessDenied: "%-.16s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'",
ErrIllegalGrantForTable: "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used",
ErrGrantWrongHostOrUser: "The host or user argument to GRANT is too long",
ErrNoSuchTable: "Table '%-.192s.%-.192s' doesn't exist",
ErrNonexistingTableGrant: "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'",
ErrNotAllowedCommand: "The used command is not allowed with this MySQL version",
ErrSyntax: "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use",
ErrDelayedCantChangeLock: "Delayed insert thread couldn't get requested lock for table %-.192s",
ErrTooManyDelayedThreads: "Too many delayed threads in use",
ErrAbortingConnection: "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)",
ErrNetPacketTooLarge: "Got a packet bigger than 'maxAllowedPacket' bytes",
ErrNetReadErrorFromPipe: "Got a read error from the connection pipe",
ErrNetFcntl: "Got an error from fcntl()",
ErrNetPacketsOutOfOrder: "Got packets out of order",
ErrNetUncompress: "Couldn't uncompress communication packet",
ErrNetRead: "Got an error reading communication packets",
ErrNetReadInterrupted: "Got timeout reading communication packets",
ErrNetErrorOnWrite: "Got an error writing communication packets",
ErrNetWriteInterrupted: "Got timeout writing communication packets",
ErrTooLongString: "Result string is longer than 'maxAllowedPacket' bytes",
ErrTableCantHandleBlob: "The used table type doesn't support BLOB/TEXT columns",
ErrTableCantHandleAutoIncrement: "The used table type doesn't support AUTOINCREMENT columns",
ErrDelayedInsertTableLocked: "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES",
ErrWrongColumnName: "Incorrect column name '%-.100s'",
ErrWrongKeyColumn: "The used storage engine can't index column '%-.192s'",
ErrWrongMrgTable: "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist",
ErrDupUnique: "Can't write, because of unique constraint, to table '%-.192s'",
ErrBlobKeyWithoutLength: "BLOB/TEXT column '%-.192s' used in key specification without a key length",
ErrPrimaryCantHaveNull: "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead",
ErrTooManyRows: "Result consisted of more than one row",
ErrRequiresPrimaryKey: "This table type requires a primary key",
ErrNoRaidCompiled: "This version of MySQL is not compiled with RAID support",
ErrUpdateWithoutKeyInSafeMode: "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column",
ErrKeyDoesNotExits: "Key '%-.192s' doesn't exist in table '%-.192s'",
ErrCheckNoSuchTable: "Can't open table",
ErrCheckNotImplemented: "The storage engine for the table doesn't support %s",
ErrCantDoThisDuringAnTransaction: "You are not allowed to execute this command in a transaction",
ErrErrorDuringCommit: "Got error %d during COMMIT",
ErrErrorDuringRollback: "Got error %d during ROLLBACK",
ErrErrorDuringFlushLogs: "Got error %d during FLUSHLOGS",
ErrErrorDuringCheckpoint: "Got error %d during CHECKPOINT",
ErrNewAbortingConnection: "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)",
ErrDumpNotImplemented: "The storage engine for the table does not support binary table dump",
ErrFlushMasterBinlogClosed: "Binlog closed, cannot RESET MASTER",
ErrIndexRebuild: "Failed rebuilding the index of dumped table '%-.192s'",
ErrMaster: "Error from master: '%-.64s'",
ErrMasterNetRead: "Net error reading from master",
ErrMasterNetWrite: "Net error writing to master",
ErrFtMatchingKeyNotFound: "Can't find FULLTEXT index matching the column list",
ErrLockOrActiveTransaction: "Can't execute the given command because you have active locked tables or an active transaction",
ErrUnknownSystemVariable: "Unknown system variable '%-.64s'",
ErrCrashedOnUsage: "Table '%-.192s' is marked as crashed and should be repaired",
ErrCrashedOnRepair: "Table '%-.192s' is marked as crashed and last (automatic?) repair failed",
ErrWarningNotCompleteRollback: "Some non-transactional changed tables couldn't be rolled back",
ErrTransCacheFull: "Multi-statement transaction required more than 'maxBinlogCacheSize' bytes of storage; increase this mysqld variable and try again",
ErrSlaveMustStop: "This operation cannot be performed with a running slave; run STOP SLAVE first",
ErrSlaveNotRunning: "This operation requires a running slave; configure slave and do START SLAVE",
ErrBadSlave: "The server is not configured as slave; fix in config file or with CHANGE MASTER TO",
ErrMasterInfo: "Could not initialize master info structure; more error messages can be found in the MySQL error log",
ErrSlaveThread: "Could not create slave thread; check system resources",
ErrTooManyUserConnections: "User %-.64s already has more than 'maxUserConnections' active connections",
ErrSetConstantsOnly: "You may only use constant expressions with SET",
ErrLockWaitTimeout: "Lock wait timeout exceeded; try restarting transaction",
ErrLockTableFull: "The total number of locks exceeds the lock table size",
ErrReadOnlyTransaction: "Update locks cannot be acquired during a READ UNCOMMITTED transaction",
ErrDropDbWithReadLock: "DROP DATABASE not allowed while thread is holding global read lock",
ErrCreateDbWithReadLock: "CREATE DATABASE not allowed while thread is holding global read lock",
ErrWrongArguments: "Incorrect arguments to %s",
ErrNoPermissionToCreateUser: "'%-.48s'@'%-.64s' is not allowed to create new users",
ErrUnionTablesInDifferentDir: "Incorrect table definition; all MERGE tables must be in the same database",
ErrLockDeadlock: "Deadlock found when trying to get lock; try restarting transaction",
ErrTableCantHandleFt: "The used table type doesn't support FULLTEXT indexes",
ErrCannotAddForeign: "Cannot add foreign key constraint",
ErrNoReferencedRow: "Cannot add or update a child row: a foreign key constraint fails",
ErrRowIsReferenced: "Cannot delete or update a parent row: a foreign key constraint fails",
ErrConnectToMaster: "Error connecting to master: %-.128s",
ErrQueryOnMaster: "Error running query on master: %-.128s",
ErrErrorWhenExecutingCommand: "Error when executing command %s: %-.128s",
ErrWrongUsage: "Incorrect usage of %s and %s",
ErrWrongNumberOfColumnsInSelect: "The used SELECT statements have a different number of columns",
ErrCantUpdateWithReadlock: "Can't execute the query because you have a conflicting read lock",
ErrMixingNotAllowed: "Mixing of transactional and non-transactional tables is disabled",
ErrDupArgument: "Option '%s' used twice in statement",
ErrUserLimitReached: "User '%-.64s' has exceeded the '%s' resource (current value: %ld)",
ErrSpecificAccessDenied: "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation",
ErrLocalVariable: "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL",
ErrGlobalVariable: "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL",
ErrNoDefault: "Variable '%-.64s' doesn't have a default value",
ErrWrongValueForVar: "Variable '%-.64s' can't be set to the value of '%-.200s'",
ErrWrongTypeForVar: "Incorrect argument type to variable '%-.64s'",
ErrVarCantBeRead: "Variable '%-.64s' can only be set, not read",
ErrCantUseOptionHere: "Incorrect usage/placement of '%s'",
ErrNotSupportedYet: "This version of MySQL doesn't yet support '%s'",
ErrMasterFatalErrorReadingBinlog: "Got fatal error %d from master when reading data from binary log: '%-.320s'",
ErrSlaveIgnoredTable: "Slave SQL thread ignored the query because of replicate-*-table rules",
ErrIncorrectGlobalLocalVar: "Variable '%-.192s' is a %s variable",
ErrWrongFkDef: "Incorrect foreign key definition for '%-.192s': %s",
ErrKeyRefDoNotMatchTableRef: "Key reference and table reference don't match",
ErrOperandColumns: "Operand should contain %d column(s)",
ErrSubqueryNo1Row: "Subquery returns more than 1 row",
ErrUnknownStmtHandler: "Unknown prepared statement handler (%.*s) given to %s",
ErrCorruptHelpDb: "Help database is corrupt or does not exist",
ErrCyclicReference: "Cyclic reference on subqueries",
ErrAutoConvert: "Converting column '%s' from %s to %s",
ErrIllegalReference: "Reference '%-.64s' not supported (%s)",
ErrDerivedMustHaveAlias: "Every derived table must have its own alias",
ErrSelectReduced: "Select %u was reduced during optimization",
ErrTablenameNotAllowedHere: "Table '%-.192s' from one of the SELECTs cannot be used in %-.32s",
ErrNotSupportedAuthMode: "Client does not support authentication protocol requested by server; consider upgrading MySQL client",
ErrSpatialCantHaveNull: "All parts of a SPATIAL index must be NOT NULL",
ErrCollationCharsetMismatch: "COLLATION '%s' is not valid for CHARACTER SET '%s'",
ErrSlaveWasRunning: "Slave is already running",
ErrSlaveWasNotRunning: "Slave already has been stopped",
ErrTooBigForUncompress: "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)",
ErrZlibZMem: "ZLIB: Not enough memory",
ErrZlibZBuf: "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)",
ErrZlibZData: "ZLIB: Input data corrupted",
ErrCutValueGroupConcat: "Row %u was cut by GROUPCONCAT()",
ErrWarnTooFewRecords: "Row %ld doesn't contain data for all columns",
ErrWarnTooManyRecords: "Row %ld was truncated; it contained more data than there were input columns",
ErrWarnNullToNotnull: "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld",
ErrWarnDataOutOfRange: "Out of range value for column '%s' at row %ld",
WarnDataTruncated: "Data truncated for column '%s' at row %ld",
ErrWarnUsingOtherHandler: "Using storage engine %s for table '%s'",
ErrCantAggregate2collations: "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'",
ErrDropUser: "Cannot drop one or more of the requested users",
ErrRevokeGrants: "Can't revoke all privileges for one or more of the requested users",
ErrCantAggregate3collations: "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'",
ErrCantAggregateNcollations: "Illegal mix of collations for operation '%s'",
ErrVariableIsNotStruct: "Variable '%-.64s' is not a variable component (can't be used as XXXX.variableName)",
ErrUnknownCollation: "Unknown collation: '%-.64s'",
ErrSlaveIgnoredSslParams: "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started",
ErrServerIsInSecureAuthMode: "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format",
ErrWarnFieldResolved: "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d",
ErrBadSlaveUntilCond: "Incorrect parameter or combination of parameters for START SLAVE UNTIL",
ErrMissingSkipSlave: "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart",
ErrUntilCondIgnored: "SQL thread is not to be started so UNTIL options are ignored",
ErrWrongNameForIndex: "Incorrect index name '%-.100s'",
ErrWrongNameForCatalog: "Incorrect catalog name '%-.100s'",
ErrWarnQcResize: "Query cache failed to set size %lu; new query cache size is %lu",
ErrBadFtColumn: "Column '%-.192s' cannot be part of FULLTEXT index",
ErrUnknownKeyCache: "Unknown key cache '%-.100s'",
ErrWarnHostnameWontWork: "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work",
ErrUnknownStorageEngine: "Unknown storage engine '%s'",
ErrWarnDeprecatedSyntax: "'%s' is deprecated and will be removed in a future release. Please use %s instead",
ErrNonUpdatableTable: "The target table %-.100s of the %s is not updatable",
ErrFeatureDisabled: "The '%s' feature is disabled; you need MySQL built with '%s' to have it working",
ErrOptionPreventsStatement: "The MySQL server is running with the %s option so it cannot execute this statement",
ErrDuplicatedValueInType: "Column '%-.100s' has duplicated value '%-.64s' in %s",
ErrTruncatedWrongValue: "Truncated incorrect %-.32s value: '%-.128s'",
ErrTooMuchAutoTimestampCols: "Incorrect table definition; there can be only one TIMESTAMP column with CURRENTTIMESTAMP in DEFAULT or ON UPDATE clause",
ErrInvalidOnUpdate: "Invalid ON UPDATE clause for '%-.192s' column",
ErrUnsupportedPs: "This command is not supported in the prepared statement protocol yet",
ErrGetErrmsg: "Got error %d '%-.100s' from %s",
ErrGetTemporaryErrmsg: "Got temporary error %d '%-.100s' from %s",
ErrUnknownTimeZone: "Unknown or incorrect time zone: '%-.64s'",
ErrWarnInvalidTimestamp: "Invalid TIMESTAMP value in column '%s' at row %ld",
ErrInvalidCharacterString: "Invalid %s character string: '%.64s'",
ErrWarnAllowedPacketOverflowed: "Result of %s() was larger than maxAllowedPacket (%ld) - truncated",
ErrConflictingDeclarations: "Conflicting declarations: '%s%s' and '%s%s'",
ErrSpNoRecursiveCreate: "Can't create a %s from within another stored routine",
ErrSpAlreadyExists: "%s %s already exists",
ErrSpDoesNotExist: "%s %s does not exist",
ErrSpDropFailed: "Failed to DROP %s %s",
ErrSpStoreFailed: "Failed to CREATE %s %s",
ErrSpLilabelMismatch: "%s with no matching label: %s",
ErrSpLabelRedefine: "Redefining label %s",
ErrSpLabelMismatch: "End-label %s without match",
ErrSpUninitVar: "Referring to uninitialized variable %s",
ErrSpBadselect: "PROCEDURE %s can't return a result set in the given context",
ErrSpBadreturn: "RETURN is only allowed in a FUNCTION",
ErrSpBadstatement: "%s is not allowed in stored procedures",
ErrUpdateLogDeprecatedIgnored: "The update log is deprecated and replaced by the binary log; SET SQLLOGUPDATE has been ignored.",
ErrUpdateLogDeprecatedTranslated: "The update log is deprecated and replaced by the binary log; SET SQLLOGUPDATE has been translated to SET SQLLOGBIN.",
ErrQueryInterrupted: "Query execution was interrupted",
ErrSpWrongNoOfArgs: "Incorrect number of arguments for %s %s; expected %u, got %u",
ErrSpCondMismatch: "Undefined CONDITION: %s",
ErrSpNoreturn: "No RETURN found in FUNCTION %s",
ErrSpNoreturnend: "FUNCTION %s ended without RETURN",
ErrSpBadCursorQuery: "Cursor statement must be a SELECT",
ErrSpBadCursorSelect: "Cursor SELECT must not have INTO",
ErrSpCursorMismatch: "Undefined CURSOR: %s",
ErrSpCursorAlreadyOpen: "Cursor is already open",
ErrSpCursorNotOpen: "Cursor is not open",
ErrSpUndeclaredVar: "Undeclared variable: %s",
ErrSpWrongNoOfFetchArgs: "Incorrect number of FETCH variables",
ErrSpFetchNoData: "No data - zero rows fetched, selected, or processed",
ErrSpDupParam: "Duplicate parameter: %s",
ErrSpDupVar: "Duplicate variable: %s",
ErrSpDupCond: "Duplicate condition: %s",
ErrSpDupCurs: "Duplicate cursor: %s",
ErrSpCantAlter: "Failed to ALTER %s %s",
ErrSpSubselectNyi: "Subquery value not supported",
ErrStmtNotAllowedInSfOrTrg: "%s is not allowed in stored function or trigger",
ErrSpVarcondAfterCurshndlr: "Variable or condition declaration after cursor or handler declaration",
ErrSpCursorAfterHandler: "Cursor declaration after handler declaration",
ErrSpCaseNotFound: "Case not found for CASE statement",
ErrFparserTooBigFile: "Configuration file '%-.192s' is too big",
ErrFparserBadHeader: "Malformed file type header in file '%-.192s'",
ErrFparserEOFInComment: "Unexpected end of file while parsing comment '%-.200s'",
ErrFparserErrorInParameter: "Error while parsing parameter '%-.192s' (line: '%-.192s')",
ErrFparserEOFInUnknownParameter: "Unexpected end of file while skipping unknown parameter '%-.192s'",
ErrViewNoExplain: "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table",
ErrFrmUnknownType: "File '%-.192s' has unknown type '%-.64s' in its header",
ErrWrongObject: "'%-.192s.%-.192s' is not %s",
ErrNonupdateableColumn: "Column '%-.192s' is not updatable",
ErrViewSelectDerived: "View's SELECT contains a subquery in the FROM clause",
ErrViewSelectClause: "View's SELECT contains a '%s' clause",
ErrViewSelectVariable: "View's SELECT contains a variable or parameter",
ErrViewSelectTmptable: "View's SELECT refers to a temporary table '%-.192s'",
ErrViewWrongList: "View's SELECT and view's field list have different column counts",
ErrWarnViewMerge: "View merge algorithm can't be used here for now (assumed undefined algorithm)",
ErrWarnViewWithoutKey: "View being updated does not have complete key of underlying table in it",
ErrViewInvalid: "View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them",
ErrSpNoDropSp: "Can't drop or alter a %s from within another stored routine",
ErrSpGotoInHndlr: "GOTO is not allowed in a stored procedure handler",
ErrTrgAlreadyExists: "Trigger already exists",
ErrTrgDoesNotExist: "Trigger does not exist",
ErrTrgOnViewOrTempTable: "Trigger's '%-.192s' is view or temporary table",
ErrTrgCantChangeRow: "Updating of %s row is not allowed in %strigger",
ErrTrgNoSuchRowInTrg: "There is no %s row in %s trigger",
ErrNoDefaultForField: "Field '%-.192s' doesn't have a default value",
ErrDivisionByZero: "Division by 0",
ErrTruncatedWrongValueForField: "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld",
ErrIllegalValueForType: "Illegal %s '%-.192s' value found during parsing",
ErrViewNonupdCheck: "CHECK OPTION on non-updatable view '%-.192s.%-.192s'",
ErrViewCheckFailed: "CHECK OPTION failed '%-.192s.%-.192s'",
ErrProcaccessDenied: "%-.16s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'",
ErrRelayLogFail: "Failed purging old relay logs: %s",
ErrPasswdLength: "Password hash should be a %d-digit hexadecimal number",
ErrUnknownTargetBinlog: "Target log not found in binlog index",
ErrIoErrLogIndexRead: "I/O error reading log index file",
ErrBinlogPurgeProhibited: "Server configuration does not permit binlog purge",
ErrFseekFail: "Failed on fseek()",
ErrBinlogPurgeFatalErr: "Fatal error during log purge",
ErrLogInUse: "A purgeable log is in use, will not purge",
ErrLogPurgeUnknownErr: "Unknown error during log purge",
ErrRelayLogInit: "Failed initializing relay log position: %s",
ErrNoBinaryLogging: "You are not using binary logging",
ErrReservedSyntax: "The '%-.64s' syntax is reserved for purposes internal to the MySQL server",
ErrWsasFailed: "WSAStartup Failed",
ErrDiffGroupsProc: "Can't handle procedures with different groups yet",
ErrNoGroupForProc: "Select must have a group with this procedure",
ErrOrderWithProc: "Can't use ORDER clause with this procedure",
ErrLoggingProhibitChangingOf: "Binary logging and replication forbid changing the global server %s",
ErrNoFileMapping: "Can't map file: %-.200s, errno: %d",
ErrWrongMagic: "Wrong magic in %-.64s",
ErrPsManyParam: "Prepared statement contains too many placeholders",
ErrKeyPart0: "Key part '%-.192s' length cannot be 0",
ErrViewChecksum: "View text checksum failed",
ErrViewMultiupdate: "Can not modify more than one base table through a join view '%-.192s.%-.192s'",
ErrViewNoInsertFieldList: "Can not insert into join view '%-.192s.%-.192s' without fields list",
ErrViewDeleteMergeView: "Can not delete from join view '%-.192s.%-.192s'",
ErrCannotUser: "Operation %s failed for %.256s",
ErrXaerNota: "XAERNOTA: Unknown XID",
ErrXaerInval: "XAERINVAL: Invalid arguments (or unsupported command)",
ErrXaerRmfail: "XAERRMFAIL: The command cannot be executed when global transaction is in the %.64s state",
ErrXaerOutside: "XAEROUTSIDE: Some work is done outside global transaction",
ErrXaerRmerr: "XAERRMERR: Fatal error occurred in the transaction branch - check your data for consistency",
ErrXaRbrollback: "XARBROLLBACK: Transaction branch was rolled back",
ErrNonexistingProcGrant: "There is no such grant defined for user '%-.48s' on host '%-.64s' on routine '%-.192s'",
ErrProcAutoGrantFail: "Failed to grant EXECUTE and ALTER ROUTINE privileges",
ErrProcAutoRevokeFail: "Failed to revoke all privileges to dropped routine",
ErrDataTooLong: "Data too long for column '%s' at row %ld",
ErrSpBadSQLstate: "Bad SQLSTATE: '%s'",
ErrStartup: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s",
ErrLoadFromFixedSizeRowsToVar: "Can't load value from file with fixed size rows to variable",
ErrCantCreateUserWithGrant: "You are not allowed to create a user with GRANT",
ErrWrongValueForType: "Incorrect %-.32s value: '%-.128s' for function %-.32s",
ErrTableDefChanged: "Table definition has changed, please retry transaction",
ErrSpDupHandler: "Duplicate handler declared in the same block",
ErrSpNotVarArg: "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger",
ErrSpNoRetset: "Not allowed to return a result set from a %s",
ErrCantCreateGeometryObject: "Cannot get geometry object from data you send to the GEOMETRY field",
ErrFailedRoutineBreakBinlog: "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes",
ErrBinlogUnsafeRoutine: "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe logBinTrustFunctionCreators variable)",
ErrBinlogCreateRoutineNeedSuper: "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe logBinTrustFunctionCreators variable)",
ErrExecStmtWithOpenCursor: "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it.",
ErrStmtHasNoOpenCursor: "The statement (%lu) has no open cursor.",
ErrCommitNotAllowedInSfOrTrg: "Explicit or implicit commit is not allowed in stored function or trigger.",
ErrNoDefaultForViewField: "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value",
ErrSpNoRecursion: "Recursive stored functions and triggers are not allowed.",
ErrTooBigScale: "Too big scale %d specified for column '%-.192s'. Maximum is %lu.",
ErrTooBigPrecision: "Too big precision %d specified for column '%-.192s'. Maximum is %lu.",
ErrMBiggerThanD: "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s').",
ErrWrongLockOfSystemTable: "You can't combine write-locking of system tables with other tables or lock types",
ErrConnectToForeignDataSource: "Unable to connect to foreign data source: %.64s",
ErrQueryOnForeignDataSource: "There was a problem processing the query on the foreign data source. Data source : %-.64s",
ErrForeignDataSourceDoesntExist: "The foreign data source you are trying to reference does not exist. Data source : %-.64s",
ErrForeignDataStringInvalidCantCreate: "Can't create federated table. The data source connection string '%-.64s' is not in the correct format",
ErrForeignDataStringInvalid: "The data source connection string '%-.64s' is not in the correct format",
ErrCantCreateFederatedTable: "Can't create federated table. Foreign data src : %-.64s",
ErrTrgInWrongSchema: "Trigger in wrong schema",
ErrStackOverrunNeedMore: "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld --threadStack=#' to specify a bigger stack.",
ErrTooLongBody: "Routine body for '%-.100s' is too long",
ErrWarnCantDropDefaultKeycache: "Cannot drop default keycache",
ErrTooBigDisplaywidth: "Display width out of range for column '%-.192s' (max = %lu)",
ErrXaerDupid: "XAERDUPID: The XID already exists",
ErrDatetimeFunctionOverflow: "Datetime function: %-.32s field overflow",
ErrCantUpdateUsedTableInSfOrTrg: "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger.",
ErrViewPreventUpdate: "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'.",
ErrPsNoRecursion: "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner",
ErrSpCantSetAutocommit: "Not allowed to set autocommit from a stored function or trigger",
ErrMalformedDefiner: "Definer is not fully qualified",
ErrViewFrmNoUser: "View '%-.192s'.'%-.192s' has no definer information (old table format). Current user is used as definer. Please recreate the view!",
ErrViewOtherUser: "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer",
ErrNoSuchUser: "The user specified as a definer ('%-.64s'@'%-.64s') does not exist",
ErrForbidSchemaChange: "Changing schema from '%-.192s' to '%-.192s' is not allowed.",
ErrRowIsReferenced2: "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)",
ErrNoReferencedRow2: "Cannot add or update a child row: a foreign key constraint fails (%.192s)",
ErrSpBadVarShadow: "Variable '%-.64s' must be quoted with `...`, or renamed",
ErrTrgNoDefiner: "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger.",
ErrOldFileFormat: "'%-.192s' has an old format, you should re-create the '%s' object(s)",
ErrSpRecursionLimit: "Recursive limit %d (as set by the maxSpRecursionDepth variable) was exceeded for routine %.192s",
ErrSpProcTableCorrupt: "Failed to load routine %-.192s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)",
ErrSpWrongName: "Incorrect routine name '%-.192s'",
ErrTableNeedsUpgrade: "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\"",
ErrSpNoAggregate: "AGGREGATE is not supported for stored functions",
ErrMaxPreparedStmtCountReached: "Can't create more than maxPreparedStmtCount statements (current value: %lu)",
ErrViewRecursive: "`%-.192s`.`%-.192s` contains view recursion",
ErrNonGroupingFieldUsed: "Non-grouping field '%-.192s' is used in %-.64s clause",
ErrTableCantHandleSpkeys: "The used table type doesn't support SPATIAL indexes",
ErrNoTriggersOnSystemSchema: "Triggers can not be created on system tables",
ErrRemovedSpaces: "Leading spaces are removed from name '%s'",
ErrAutoincReadFailed: "Failed to read auto-increment value from storage engine",
ErrUsername: "user name",
ErrHostname: "host name",
ErrWrongStringLength: "String '%-.70s' is too long for %s (should be no longer than %d)",
ErrNonInsertableTable: "The target table %-.100s of the %s is not insertable-into",
ErrAdminWrongMrgTable: "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist",
ErrTooHighLevelOfNestingForSelect: "Too high level of nesting for select",
ErrNameBecomesEmpty: "Name '%-.64s' has become ''",
ErrAmbiguousFieldTerm: "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY",
ErrForeignServerExists: "The foreign server, %s, you are trying to create already exists.",
ErrForeignServerDoesntExist: "The foreign server name you are trying to reference does not exist. Data source : %-.64s",
ErrIllegalHaCreateOption: "Table storage engine '%-.64s' does not support the create option '%.64s'",
ErrPartitionRequiresValues: "Syntax : %-.64s PARTITIONING requires definition of VALUES %-.64s for each partition",
ErrPartitionWrongValues: "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition",
ErrPartitionMaxvalue: "MAXVALUE can only be used in last partition definition",
ErrPartitionSubpartition: "Subpartitions can only be hash partitions and by key",
ErrPartitionSubpartMix: "Must define subpartitions on all partitions if on one partition",
ErrPartitionWrongNoPart: "Wrong number of partitions defined, mismatch with previous setting",
ErrPartitionWrongNoSubpart: "Wrong number of subpartitions defined, mismatch with previous setting",
ErrWrongExprInPartitionFunc: "Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed",
ErrNoConstExprInRangeOrList: "Expression in RANGE/LIST VALUES must be constant",
ErrFieldNotFoundPart: "Field in list of fields for partition function not found in table",
ErrListOfFieldsOnlyInHash: "List of fields is only allowed in KEY partitions",
ErrInconsistentPartitionInfo: "The partition info in the frm file is not consistent with what can be written into the frm file",
ErrPartitionFuncNotAllowed: "The %-.192s function returns the wrong type",
ErrPartitionsMustBeDefined: "For %-.64s partitions each partition must be defined",
ErrRangeNotIncreasing: "VALUES LESS THAN value must be strictly increasing for each partition",
ErrInconsistentTypeOfFunctions: "VALUES value must be of same type as partition function",
ErrMultipleDefConstInListPart: "Multiple definition of same constant in list partitioning",
ErrPartitionEntry: "Partitioning can not be used stand-alone in query",
ErrMixHandler: "The mix of handlers in the partitions is not allowed in this version of MySQL",
ErrPartitionNotDefined: "For the partitioned engine it is necessary to define all %-.64s",
ErrTooManyPartitions: "Too many partitions (including subpartitions) were defined",
ErrSubpartition: "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning",
ErrCantCreateHandlerFile: "Failed to create specific handler file",
ErrBlobFieldInPartFunc: "A BLOB field is not allowed in partition function",
ErrUniqueKeyNeedAllFieldsInPf: "A %-.192s must include all columns in the table's partitioning function",
ErrNoParts: "Number of %-.64s = 0 is not an allowed value",
ErrPartitionMgmtOnNonpartitioned: "Partition management on a not partitioned table is not possible",
ErrForeignKeyOnPartitioned: "Foreign key clause is not yet supported in conjunction with partitioning",
ErrDropPartitionNonExistent: "Error in list of partitions to %-.64s",
ErrDropLastPartition: "Cannot remove all partitions, use DROP TABLE instead",
ErrCoalesceOnlyOnHashPartition: "COALESCE PARTITION can only be used on HASH/KEY partitions",
ErrReorgHashOnlyOnSameNo: "REORGANIZE PARTITION can only be used to reorganize partitions not to change their numbers",
ErrReorgNoParam: "REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs",
ErrOnlyOnRangeListPartition: "%-.64s PARTITION can only be used on RANGE/LIST partitions",
ErrAddPartitionSubpart: "Trying to Add partition(s) with wrong number of subpartitions",
ErrAddPartitionNoNewPartition: "At least one partition must be added",
ErrCoalescePartitionNoPartition: "At least one partition must be coalesced",
ErrReorgPartitionNotExist: "More partitions to reorganize than there are partitions",
ErrSameNamePartition: "Duplicate partition name %-.192s",
ErrNoBinlog: "It is not allowed to shut off binlog on this command",
ErrConsecutiveReorgPartitions: "When reorganizing a set of partitions they must be in consecutive order",
ErrReorgOutsideRange: "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range",
ErrPartitionFunctionFailure: "Partition function not supported in this version for this handler",
ErrPartState: "Partition state cannot be defined from CREATE/ALTER TABLE",
ErrLimitedPartRange: "The %-.64s handler only supports 32 bit integers in VALUES",
ErrPluginIsNotLoaded: "Plugin '%-.192s' is not loaded",
ErrWrongValue: "Incorrect %-.32s value: '%-.128s'",
ErrNoPartitionForGivenValue: "Table has no partition for value %-.64s",
ErrFilegroupOptionOnlyOnce: "It is not allowed to specify %s more than once",
ErrCreateFilegroupFailed: "Failed to create %s",
ErrDropFilegroupFailed: "Failed to drop %s",
ErrTablespaceAutoExtend: "The handler doesn't support autoextend of tablespaces",
ErrWrongSizeNumber: "A size parameter was incorrectly specified, either number or on the form 10M",
ErrSizeOverflow: "The size number was correct but we don't allow the digit part to be more than 2 billion",
ErrAlterFilegroupFailed: "Failed to alter: %s",
ErrBinlogRowLoggingFailed: "Writing one row to the row-based binary log failed",
ErrBinlogRowWrongTableDef: "Table definition on master and slave does not match: %s",
ErrBinlogRowRbrToSbr: "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events",
ErrEventAlreadyExists: "Event '%-.192s' already exists",
ErrEventStoreFailed: "Failed to store event %s. Error code %d from storage engine.",
ErrEventDoesNotExist: "Unknown event '%-.192s'",
ErrEventCantAlter: "Failed to alter event '%-.192s'",
ErrEventDropFailed: "Failed to drop %s",
ErrEventIntervalNotPositiveOrTooBig: "INTERVAL is either not positive or too big",
ErrEventEndsBeforeStarts: "ENDS is either invalid or before STARTS",
ErrEventExecTimeInThePast: "Event execution time is in the past. Event has been disabled",
ErrEventOpenTableFailed: "Failed to open mysql.event",
ErrEventNeitherMExprNorMAt: "No datetime expression provided",
ErrObsoleteColCountDoesntMatchCorrupted: "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted",
ErrObsoleteCannotLoadFromTable: "Cannot load from mysql.%s. The table is probably corrupted",
ErrEventCannotDelete: "Failed to delete the event from mysql.event",
ErrEventCompile: "Error during compilation of event's body",
ErrEventSameName: "Same old and new event name",
ErrEventDataTooLong: "Data for column '%s' too long",
ErrDropIndexFk: "Cannot drop index '%-.192s': needed in a foreign key constraint",
ErrWarnDeprecatedSyntaxWithVer: "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead",
ErrCantWriteLockLogTable: "You can't write-lock a log table. Only read access is possible",
ErrCantLockLogTable: "You can't use locks with log tables.",
ErrForeignDuplicateKeyOldUnused: "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry",
ErrColCountDoesntMatchPleaseUpdate: "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysqlUpgrade to fix this error.",
ErrTempTablePreventsSwitchOutOfRbr: "Cannot switch out of the row-based binary log format when the session has open temporary tables",
ErrStoredFunctionPreventsSwitchBinlogFormat: "Cannot change the binary logging format inside a stored function or trigger",
ErrNdbCantSwitchBinlogFormat: "The NDB cluster engine does not support changing the binlog format on the fly yet",
ErrPartitionNoTemporary: "Cannot create temporary table with partitions",
ErrPartitionConstDomain: "Partition constant is out of partition function domain",
ErrPartitionFunctionIsNotAllowed: "This partition function is not allowed",
ErrDdlLog: "Error in DDL log",
ErrNullInValuesLessThan: "Not allowed to use NULL value in VALUES LESS THAN",
ErrWrongPartitionName: "Incorrect partition name",
ErrCantChangeTxCharacteristics: "Transaction characteristics can't be changed while a transaction is in progress",
ErrDupEntryAutoincrementCase: "ALTER TABLE causes autoIncrement resequencing, resulting in duplicate entry '%-.192s' for key '%-.192s'",
ErrEventModifyQueue: "Internal scheduler error %d",
ErrEventSetVar: "Error during starting/stopping of the scheduler. Error code %u",
ErrPartitionMerge: "Engine cannot be used in partitioned tables",
ErrCantActivateLog: "Cannot activate '%-.64s' log",
ErrRbrNotAvailable: "The server was not built with row-based replication",
ErrBase64Decode: "Decoding of base64 string failed",
ErrEventRecursionForbidden: "Recursion of EVENT DDL statements is forbidden when body is present",
ErrEventsDb: "Cannot proceed because system tables used by Event Scheduler were found damaged at server start",
ErrOnlyIntegersAllowed: "Only integers allowed as number here",
ErrUnsuportedLogEngine: "This storage engine cannot be used for log tables\"",
ErrBadLogStatement: "You cannot '%s' a log table if logging is enabled",
ErrCantRenameLogTable: "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'",
ErrWrongParamcountToNativeFct: "Incorrect parameter count in the call to native function '%-.192s'",
ErrWrongParametersToNativeFct: "Incorrect parameters in the call to native function '%-.192s'",
ErrWrongParametersToStoredFct: "Incorrect parameters in the call to stored function '%-.192s'",
ErrNativeFctNameCollision: "This function '%-.192s' has the same name as a native function",
ErrDupEntryWithKeyName: "Duplicate entry '%-.64s' for key '%-.192s'",
ErrBinlogPurgeEmfile: "Too many files opened, please execute the command again",
ErrEventCannotCreateInThePast: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.",
ErrEventCannotAlterInThePast: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future.",
ErrSlaveIncident: "The incident %s occured on the master. Message: %-.64s",
ErrNoPartitionForGivenValueSilent: "Table has no partition for some existing values",
ErrBinlogUnsafeStatement: "Unsafe statement written to the binary log using statement format since BINLOGFORMAT = STATEMENT. %s",
ErrSlaveFatal: "Fatal : %s",
ErrSlaveRelayLogReadFailure: "Relay log read failure: %s",
ErrSlaveRelayLogWriteFailure: "Relay log write failure: %s",
ErrSlaveCreateEventFailure: "Failed to create %s",
ErrSlaveMasterComFailure: "Master command %s failed: %s",
ErrBinlogLoggingImpossible: "Binary logging not possible. Message: %s",
ErrViewNoCreationCtx: "View `%-.64s`.`%-.64s` has no creation context",
ErrViewInvalidCreationCtx: "Creation context of view `%-.64s`.`%-.64s' is invalid",
ErrSrInvalidCreationCtx: "Creation context of stored routine `%-.64s`.`%-.64s` is invalid",
ErrTrgCorruptedFile: "Corrupted TRG file for table `%-.64s`.`%-.64s`",
ErrTrgNoCreationCtx: "Triggers for table `%-.64s`.`%-.64s` have no creation context",
ErrTrgInvalidCreationCtx: "Trigger creation context of table `%-.64s`.`%-.64s` is invalid",
ErrEventInvalidCreationCtx: "Creation context of event `%-.64s`.`%-.64s` is invalid",
ErrTrgCantOpenTable: "Cannot open table for trigger `%-.64s`.`%-.64s`",
ErrCantCreateSroutine: "Cannot create stored routine `%-.64s`. Check warnings",
ErrNeverUsed: "Ambiguous slave modes combination. %s",
ErrNoFormatDescriptionEventBeforeBinlogStatement: "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement.",
ErrSlaveCorruptEvent: "Corrupted replication event was detected",
ErrLoadDataInvalidColumn: "Invalid column reference (%-.64s) in LOAD DATA",
ErrLogPurgeNoFile: "Being purged log %s was not found",
ErrXaRbtimeout: "XARBTIMEOUT: Transaction branch was rolled back: took too long",
ErrXaRbdeadlock: "XARBDEADLOCK: Transaction branch was rolled back: deadlock was detected",
ErrNeedReprepare: "Prepared statement needs to be re-prepared",
ErrDelayedNotSupported: "DELAYED option not supported for table '%-.192s'",
WarnNoMasterInfo: "The master info structure does not exist",
WarnOptionIgnored: "<%-.64s> option ignored",
WarnPluginDeleteBuiltin: "Built-in plugins cannot be deleted",
WarnPluginBusy: "Plugin is busy and will be uninstalled on shutdown",
ErrVariableIsReadonly: "%s variable '%s' is read-only. Use SET %s to assign the value",
ErrWarnEngineTransactionRollback: "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted",
ErrSlaveHeartbeatFailure: "Unexpected master's heartbeat data: %s",
ErrSlaveHeartbeatValueOutOfRange: "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds).",
ErrNdbReplicationSchema: "Bad schema for mysql.ndbReplication table. Message: %-.64s",
ErrConflictFnParse: "Error in parsing conflict function. Message: %-.64s",
ErrExceptionsWrite: "Write to exceptions table failed. Message: %-.128s\"",
ErrTooLongTableComment: "Comment for table '%-.64s' is too long (max = %lu)",
ErrTooLongFieldComment: "Comment for field '%-.64s' is too long (max = %lu)",
ErrFuncInexistentNameCollision: "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual",
ErrDatabaseName: "Database",
ErrTableName: "Table",
ErrPartitionName: "Partition",
ErrSubpartitionName: "Subpartition",
ErrTemporaryName: "Temporary",
ErrRenamedName: "Renamed",
ErrTooManyConcurrentTrxs: "Too many active concurrent transactions",
WarnNonASCIISeparatorNotImplemented: "Non-ASCII separator arguments are not fully supported",
ErrDebugSyncTimeout: "debug sync point wait timed out",
ErrDebugSyncHitLimit: "debug sync point hit limit reached",
ErrDupSignalSet: "Duplicate condition information item '%s'",
ErrSignalWarn: "Unhandled user-defined warning condition",
ErrSignalNotFound: "Unhandled user-defined not found condition",
ErrSignalException: "Unhandled user-defined exception condition",
ErrResignalWithoutActiveHandler: "RESIGNAL when handler not active",
ErrSignalBadConditionType: "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE",
WarnCondItemTruncated: "Data truncated for condition item '%s'",
ErrCondItemTooLong: "Data too long for condition item '%s'",
ErrUnknownLocale: "Unknown locale: '%-.64s'",
ErrSlaveIgnoreServerIds: "The requested server id %d clashes with the slave startup option --replicate-same-server-id",
ErrQueryCacheDisabled: "Query cache is disabled; restart the server with queryCacheType=1 to enable it",
ErrSameNamePartitionField: "Duplicate partition field name '%-.192s'",
ErrPartitionColumnList: "Inconsistency in usage of column lists for partitioning",
ErrWrongTypeColumnValue: "Partition column values of incorrect type",
ErrTooManyPartitionFuncFields: "Too many fields in '%-.192s'",
ErrMaxvalueInValuesIn: "Cannot use MAXVALUE as value in VALUES IN",
ErrTooManyValues: "Cannot have more than one value for this type of %-.64s partitioning",
ErrRowSinglePartitionField: "Row expressions in VALUES IN only allowed for multi-field column partitioning",
ErrFieldTypeNotAllowedAsPartitionField: "Field '%-.192s' is of a not allowed type for this type of partitioning",
ErrPartitionFieldsTooLong: "The total length of the partitioning fields is too large",
ErrBinlogRowEngineAndStmtEngine: "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved.",
ErrBinlogRowModeAndStmtEngine: "Cannot execute statement: impossible to write to binary log since BINLOGFORMAT = ROW and at least one table uses a storage engine limited to statement-based logging.",
ErrBinlogUnsafeAndStmtEngine: "Cannot execute statement: impossible to write to binary log since statement is unsafe, storage engine is limited to statement-based logging, and BINLOGFORMAT = MIXED. %s",
ErrBinlogRowInjectionAndStmtEngine: "Cannot execute statement: impossible to write to binary log since statement is in row format and at least one table uses a storage engine limited to statement-based logging.",
ErrBinlogStmtModeAndRowEngine: "Cannot execute statement: impossible to write to binary log since BINLOGFORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging.%s",
ErrBinlogRowInjectionAndStmtMode: "Cannot execute statement: impossible to write to binary log since statement is in row format and BINLOGFORMAT = STATEMENT.",
ErrBinlogMultipleEnginesAndSelfLoggingEngine: "Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging.",
ErrBinlogUnsafeLimit: "The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.",
ErrBinlogUnsafeInsertDelayed: "The statement is unsafe because it uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted.",
ErrBinlogUnsafeSystemTable: "The statement is unsafe because it uses the general log, slow query log, or performanceSchema table(s). This is unsafe because system tables may differ on slaves.",
ErrBinlogUnsafeAutoincColumns: "Statement is unsafe because it invokes a trigger or a stored function that inserts into an AUTOINCREMENT column. Inserted values cannot be logged correctly.",
ErrBinlogUnsafeUdf: "Statement is unsafe because it uses a UDF which may not return the same value on the slave.",
ErrBinlogUnsafeSystemVariable: "Statement is unsafe because it uses a system variable that may have a different value on the slave.",
ErrBinlogUnsafeSystemFunction: "Statement is unsafe because it uses a system function that may return a different value on the slave.",
ErrBinlogUnsafeNontransAfterTrans: "Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction.",
ErrMessageAndStatement: "%s Statement: %s",
ErrSlaveConversionFailed: "Column %d of table '%-.192s.%-.192s' cannot be converted from type '%-.32s' to type '%-.32s'",
ErrSlaveCantCreateConversion: "Can't create conversion table for table '%-.192s.%-.192s'",
ErrInsideTransactionPreventsSwitchBinlogFormat: "Cannot modify @@session.binlogFormat inside a transaction",
ErrPathLength: "The path specified for %.64s is too long.",
ErrWarnDeprecatedSyntaxNoReplacement: "'%s' is deprecated and will be removed in a future release.",
ErrWrongNativeTableStructure: "Native table '%-.64s'.'%-.64s' has the wrong structure",
ErrWrongPerfschemaUsage: "Invalid performanceSchema usage.",
ErrWarnISSkippedTable: "Table '%s'.'%s' was skipped since its definition is being modified by concurrent DDL statement",
ErrInsideTransactionPreventsSwitchBinlogDirect: "Cannot modify @@session.binlogDirectNonTransactionalUpdates inside a transaction",
ErrStoredFunctionPreventsSwitchBinlogDirect: "Cannot change the binlog direct flag inside a stored function or trigger",
ErrSpatialMustHaveGeomCol: "A SPATIAL index may only contain a geometrical type column",
ErrTooLongIndexComment: "Comment for index '%-.64s' is too long (max = %lu)",
ErrLockAborted: "Wait on a lock was aborted due to a pending exclusive lock",
ErrDataOutOfRange: "%s value is out of range in '%s'",
ErrWrongSpvarTypeInLimit: "A variable of a non-integer based type in LIMIT clause",
ErrBinlogUnsafeMultipleEnginesAndSelfLoggingEngine: "Mixing self-logging and non-self-logging engines in a statement is unsafe.",
ErrBinlogUnsafeMixedStatement: "Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them.",
ErrInsideTransactionPreventsSwitchSQLLogBin: "Cannot modify @@session.sqlLogBin inside a transaction",
ErrStoredFunctionPreventsSwitchSQLLogBin: "Cannot change the sqlLogBin inside a stored function or trigger",
ErrFailedReadFromParFile: "Failed to read from the .par file",
ErrValuesIsNotIntType: "VALUES value for partition '%-.64s' must have type INT",
ErrAccessDeniedNoPassword: "Access denied for user '%-.48s'@'%-.64s'",
ErrSetPasswordAuthPlugin: "SET PASSWORD has no significance for users authenticating via plugins",
ErrGrantPluginUserExists: "GRANT with IDENTIFIED WITH is illegal because the user %-.*s already exists",
ErrTruncateIllegalFk: "Cannot truncate a table referenced in a foreign key constraint (%.192s)",
ErrPluginIsPermanent: "Plugin '%s' is forcePlusPermanent and can not be unloaded",
ErrSlaveHeartbeatValueOutOfRangeMin: "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled.",
ErrSlaveHeartbeatValueOutOfRangeMax: "The requested value for the heartbeat period exceeds the value of `slaveNetTimeout' seconds. A sensible value for the period should be less than the timeout.",
ErrStmtCacheFull: "Multi-row statements required more than 'maxBinlogStmtCacheSize' bytes of storage; increase this mysqld variable and try again",
ErrMultiUpdateKeyConflict: "Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'.",
ErrTableNeedsRebuild: "Table rebuild required. Please do \"ALTER TABLE `%-.32s` FORCE\" or dump/reload to fix it!",
WarnOptionBelowLimit: "The value of '%s' should be no less than the value of '%s'",
ErrIndexColumnTooLong: "Index column size too large. The maximum column size is %lu bytes.",
ErrErrorInTriggerBody: "Trigger '%-.64s' has an error in its body: '%-.256s'",
ErrErrorInUnknownTriggerBody: "Unknown trigger has an error in its body: '%-.256s'",
ErrIndexCorrupt: "Index %s is corrupted",
ErrUndoRecordTooBig: "Undo log record is too big.",
ErrBinlogUnsafeInsertIgnoreSelect: "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
ErrBinlogUnsafeInsertSelectUpdate: "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.",
ErrBinlogUnsafeReplaceSelect: "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.",
ErrBinlogUnsafeCreateIgnoreSelect: "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
ErrBinlogUnsafeCreateReplaceSelect: "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.",
ErrBinlogUnsafeUpdateIgnore: "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
ErrPluginNoUninstall: "Plugin '%s' is marked as not dynamically uninstallable. You have to stop the server to uninstall it.",
ErrPluginNoInstall: "Plugin '%s' is marked as not dynamically installable. You have to stop the server to install it.",
ErrBinlogUnsafeWriteAutoincSelect: "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave.",
ErrBinlogUnsafeCreateSelectAutoinc: "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave.",
ErrBinlogUnsafeInsertTwoKeys: "INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe",
ErrTableInFkCheck: "Table is being used in foreign key check.",
ErrUnsupportedEngine: "Storage engine '%s' does not support system tables. [%s.%s]",
ErrBinlogUnsafeAutoincNotFirst: "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe.",
ErrCannotLoadFromTableV2: "Cannot load from %s.%s. The table is probably corrupted",
ErrMasterDelayValueOutOfRange: "The requested value %u for the master delay exceeds the maximum %u",
ErrOnlyFdAndRbrEventsAllowedInBinlogStatement: "Only FormatDescriptionLogEvent and row events are allowed in BINLOG statements (but %s was provided)",
ErrPartitionExchangeDifferentOption: "Non matching attribute '%-.64s' between partition and table",
ErrPartitionExchangePartTable: "Table to exchange with partition is partitioned: '%-.64s'",
ErrPartitionExchangeTempTable: "Table to exchange with partition is temporary: '%-.64s'",
ErrPartitionInsteadOfSubpartition: "Subpartitioned table, use subpartition instead of partition",
ErrUnknownPartition: "Unknown partition '%-.64s' in table '%-.64s'",
ErrTablesDifferentMetadata: "Tables have different definitions",
ErrRowDoesNotMatchPartition: "Found a row that does not match the partition",
ErrBinlogCacheSizeGreaterThanMax: "Option binlogCacheSize (%lu) is greater than maxBinlogCacheSize (%lu); setting binlogCacheSize equal to maxBinlogCacheSize.",
ErrWarnIndexNotApplicable: "Cannot use %-.64s access on index '%-.64s' due to type or collation conversion on field '%-.64s'",
ErrPartitionExchangeForeignKey: "Table to exchange with partition has foreign key references: '%-.64s'",
ErrNoSuchKeyValue: "Key value '%-.192s' was not found in table '%-.192s.%-.192s'",
ErrRplInfoDataTooLong: "Data for column '%s' too long",
ErrNetworkReadEventChecksumFailure: "Replication event checksum verification failed while reading from network.",
ErrBinlogReadEventChecksumFailure: "Replication event checksum verification failed while reading from a log file.",
ErrBinlogStmtCacheSizeGreaterThanMax: "Option binlogStmtCacheSize (%lu) is greater than maxBinlogStmtCacheSize (%lu); setting binlogStmtCacheSize equal to maxBinlogStmtCacheSize.",
ErrCantUpdateTableInCreateTableSelect: "Can't update table '%-.192s' while '%-.192s' is being created.",
ErrPartitionClauseOnNonpartitioned: "PARTITION () clause on non partitioned table",
ErrRowDoesNotMatchGivenPartitionSet: "Found a row not matching the given partition set",
ErrNoSuchPartitionunused: "partition '%-.64s' doesn't exist",
ErrChangeRplInfoRepositoryFailure: "Failure while changing the type of replication repository: %s.",
ErrWarningNotCompleteRollbackWithCreatedTempTable: "The creation of some temporary tables could not be rolled back.",
ErrWarningNotCompleteRollbackWithDroppedTempTable: "Some temporary tables were dropped, but these operations could not be rolled back.",
ErrMtsFeatureIsNotSupported: "%s is not supported in multi-threaded slave mode. %s",
ErrMtsUpdatedDbsGreaterMax: "The number of modified databases exceeds the maximum %d; the database names will not be included in the replication event metadata.",
ErrMtsCantParallel: "Cannot execute the current event group in the parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this event group in parallel mode. Reason: %s.",
ErrMtsInconsistentData: "%s",
ErrFulltextNotSupportedWithPartitioning: "FULLTEXT index is not supported for partitioned tables.",
ErrDaInvalidConditionNumber: "Invalid condition number",
ErrInsecurePlainText: "Sending passwords in plain text without SSL/TLS is extremely insecure.",
ErrInsecureChangeMaster: "Storing MySQL user name or password information in the master.info repository is not secure and is therefore not recommended. Please see the MySQL Manual for more about this issue and possible alternatives.",
ErrForeignDuplicateKeyWithChildInfo: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in table '%.192s', key '%.192s'",
ErrForeignDuplicateKeyWithoutChildInfo: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in a child table",
ErrSQLthreadWithSecureSlave: "Setting authentication options is not possible when only the Slave SQL Thread is being started.",
ErrTableHasNoFt: "The table does not have FULLTEXT index to support this query",
ErrVariableNotSettableInSfOrTrigger: "The system variable %.200s cannot be set in stored functions or triggers.",
ErrVariableNotSettableInTransaction: "The system variable %.200s cannot be set when there is an ongoing transaction.",
ErrGtidNextIsNotInGtidNextList: "The system variable @@SESSION.GTIDNEXT has the value %.200s, which is not listed in @@SESSION.GTIDNEXTLIST.",
ErrCantChangeGtidNextInTransactionWhenGtidNextListIsNull: "When @@SESSION.GTIDNEXTLIST == NULL, the system variable @@SESSION.GTIDNEXT cannot change inside a transaction.",
ErrSetStatementCannotInvokeFunction: "The statement 'SET %.200s' cannot invoke a stored function.",
ErrGtidNextCantBeAutomaticIfGtidNextListIsNonNull: "The system variable @@SESSION.GTIDNEXT cannot be 'AUTOMATIC' when @@SESSION.GTIDNEXTLIST is non-NULL.",
ErrSkippingLoggedTransaction: "Skipping transaction %.200s because it has already been executed and logged.",
ErrMalformedGtidSetSpecification: "Malformed GTID set specification '%.200s'.",
ErrMalformedGtidSetEncoding: "Malformed GTID set encoding.",
ErrMalformedGtidSpecification: "Malformed GTID specification '%.200s'.",
ErrGnoExhausted: "Impossible to generate Global Transaction Identifier: the integer component reached the maximal value. Restart the server with a new serverUuid.",
ErrBadSlaveAutoPosition: "Parameters MASTERLOGFILE, MASTERLOGPOS, RELAYLOGFILE and RELAYLOGPOS cannot be set when MASTERAUTOPOSITION is active.",
ErrAutoPositionRequiresGtidModeOn: "CHANGE MASTER TO MASTERAUTOPOSITION = 1 can only be executed when @@GLOBAL.GTIDMODE = ON.",
ErrCantDoImplicitCommitInTrxWhenGtidNextIsSet: "Cannot execute statements with implicit commit inside a transaction when @@SESSION.GTIDNEXT != AUTOMATIC or @@SESSION.GTIDNEXTLIST != NULL.",
ErrGtidMode2Or3RequiresEnforceGtidConsistencyOn: "@@GLOBAL.GTIDMODE = ON or UPGRADESTEP2 requires @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1.",
ErrGtidModeRequiresBinlog: "@@GLOBAL.GTIDMODE = ON or UPGRADESTEP1 or UPGRADESTEP2 requires --log-bin and --log-slave-updates.",
ErrCantSetGtidNextToGtidWhenGtidModeIsOff: "@@SESSION.GTIDNEXT cannot be set to UUID:NUMBER when @@GLOBAL.GTIDMODE = OFF.",
ErrCantSetGtidNextToAnonymousWhenGtidModeIsOn: "@@SESSION.GTIDNEXT cannot be set to ANONYMOUS when @@GLOBAL.GTIDMODE = ON.",
ErrCantSetGtidNextListToNonNullWhenGtidModeIsOff: "@@SESSION.GTIDNEXTLIST cannot be set to a non-NULL value when @@GLOBAL.GTIDMODE = OFF.",
ErrFoundGtidEventWhenGtidModeIsOff: "Found a GtidLogEvent or PreviousGtidsLogEvent when @@GLOBAL.GTIDMODE = OFF.",
ErrGtidUnsafeNonTransactionalTable: "When @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables.",
ErrGtidUnsafeCreateSelect: "CREATE TABLE ... SELECT is forbidden when @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1.",
ErrGtidUnsafeCreateDropTemporaryTableInTransaction: "When @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1.",
ErrGtidModeCanOnlyChangeOneStepAtATime: "The value of @@GLOBAL.GTIDMODE can only change one step at a time: OFF <-> UPGRADESTEP1 <-> UPGRADESTEP2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions.",
ErrMasterHasPurgedRequiredGtids: "The slave is connecting using CHANGE MASTER TO MASTERAUTOPOSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires.",
ErrCantSetGtidNextWhenOwningGtid: "@@SESSION.GTIDNEXT cannot be changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK.",
ErrUnknownExplainFormat: "Unknown EXPLAIN format name: '%s'",
ErrCantExecuteInReadOnlyTransaction: "Cannot execute statement in a READ ONLY transaction.",
ErrTooLongTablePartitionComment: "Comment for table partition '%-.64s' is too long (max = %lu)",
ErrSlaveConfiguration: "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log.",
ErrInnodbFtLimit: "InnoDB presently supports one FULLTEXT index creation at a time",
ErrInnodbNoFtTempTable: "Cannot create FULLTEXT index on temporary InnoDB table",
ErrInnodbFtWrongDocidColumn: "Column '%-.192s' is of wrong type for an InnoDB FULLTEXT index",
ErrInnodbFtWrongDocidIndex: "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index",
ErrInnodbOnlineLogTooBig: "Creating index '%-.192s' required more than 'innodbOnlineAlterLogMaxSize' bytes of modification log. Please try again.",
ErrUnknownAlterAlgorithm: "Unknown ALGORITHM '%s'",
ErrUnknownAlterLock: "Unknown LOCK type '%s'",
ErrMtsChangeMasterCantRunWithGaps: "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL.",
ErrMtsRecoveryFailure: "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log.",
ErrMtsResetWorkers: "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log.",
ErrColCountDoesntMatchCorruptedV2: "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted",
ErrSlaveSilentRetryTransaction: "Slave must silently retry current transaction",
ErrDiscardFkChecksRunning: "There is a foreign key check running on table '%-.192s'. Cannot discard the table.",
ErrTableSchemaMismatch: "Schema mismatch (%s)",
ErrTableInSystemTablespace: "Table '%-.192s' in system tablespace",
ErrIoRead: "IO Read : (%lu, %s) %s",
ErrIoWrite: "IO Write : (%lu, %s) %s",
ErrTablespaceMissing: "Tablespace is missing for table '%-.192s'",
ErrTablespaceExists: "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT.",
ErrTablespaceDiscarded: "Tablespace has been discarded for table '%-.192s'",
ErrInternal: "Internal : %s",
ErrInnodbImport: "ALTER TABLE '%-.192s' IMPORT TABLESPACE failed with error %lu : '%s'",
ErrInnodbIndexCorrupt: "Index corrupt: %s",
ErrInvalidYearColumnLength: "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead.",
ErrNotValidPassword: "Your password does not satisfy the current policy requirements",
ErrMustChangePassword: "You must SET PASSWORD before executing this statement",
ErrFkNoIndexChild: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'",
ErrFkNoIndexParent: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the referenced table '%s'",
ErrFkFailAddSystem: "Failed to add the foreign key constraint '%s' to system tables",
ErrFkCannotOpenParent: "Failed to open the referenced table '%s'",
ErrFkIncorrectOption: "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'",
ErrFkDupName: "Duplicate foreign key constraint name '%s'",
ErrPasswordFormat: "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function.",
ErrFkColumnCannotDrop: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'",
ErrFkColumnCannotDropChild: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table '%-.192s'",
ErrFkColumnNotNull: "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL",
ErrDupIndex: "Duplicate index '%-.64s' defined on the table '%-.64s.%-.64s'. This is deprecated and will be disallowed in a future release.",
ErrFkColumnCannotChange: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'",
ErrFkColumnCannotChangeChild: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s' of table '%-.192s'",
ErrFkCannotDeleteParent: "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'",
ErrMalformedPacket: "Malformed communication packet.",
ErrReadOnlyMode: "Running in read-only mode",
ErrGtidNextTypeUndefinedGroup: "When @@SESSION.GTIDNEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET @@SESSION.GTIDNEXT before a transaction and forgot to set @@SESSION.GTIDNEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current @@SESSION.GTIDNEXT is '%s'.",
ErrVariableNotSettableInSp: "The system variable %.200s cannot be set in stored procedures.",
ErrCantSetGtidPurgedWhenGtidModeIsOff: "@@GLOBAL.GTIDPURGED can only be set when @@GLOBAL.GTIDMODE = ON.",
ErrCantSetGtidPurgedWhenGtidExecutedIsNotEmpty: "@@GLOBAL.GTIDPURGED can only be set when @@GLOBAL.GTIDEXECUTED is empty.",
ErrCantSetGtidPurgedWhenOwnedGtidsIsNotEmpty: "@@GLOBAL.GTIDPURGED can only be set when there are no ongoing transactions (not even in other clients).",
ErrGtidPurgedWasChanged: "@@GLOBAL.GTIDPURGED was changed from '%s' to '%s'.",
ErrGtidExecutedWasChanged: "@@GLOBAL.GTIDEXECUTED was changed from '%s' to '%s'.",
ErrBinlogStmtModeAndNoReplTables: "Cannot execute statement: impossible to write to binary log since BINLOGFORMAT = STATEMENT, and both replicated and non replicated tables are written to.",
ErrAlterOperationNotSupported: "%s is not supported for this operation. Try %s.",
ErrAlterOperationNotSupportedReason: "%s is not supported. Reason: %s. Try %s.",
ErrAlterOperationNotSupportedReasonCopy: "COPY algorithm requires a lock",
ErrAlterOperationNotSupportedReasonPartition: "Partition specific operations do not yet support LOCK/ALGORITHM",
ErrAlterOperationNotSupportedReasonFkRename: "Columns participating in a foreign key are renamed",
ErrAlterOperationNotSupportedReasonColumnType: "Cannot change column type INPLACE",
ErrAlterOperationNotSupportedReasonFkCheck: "Adding foreign keys needs foreignKeyChecks=OFF",
ErrAlterOperationNotSupportedReasonIgnore: "Creating unique indexes with IGNORE requires COPY algorithm to remove duplicate rows",
ErrAlterOperationNotSupportedReasonNopk: "Dropping a primary key is not allowed without also adding a new primary key",
ErrAlterOperationNotSupportedReasonAutoinc: "Adding an auto-increment column requires a lock",
ErrAlterOperationNotSupportedReasonHiddenFts: "Cannot replace hidden FTSDOCID with a user-visible one",
ErrAlterOperationNotSupportedReasonChangeFts: "Cannot drop or rename FTSDOCID",
ErrAlterOperationNotSupportedReasonFts: "Fulltext index creation requires a lock",
ErrSQLSlaveSkipCounterNotSettableInGtidMode: "sqlSlaveSkipCounter can not be set when the server is running with @@GLOBAL.GTIDMODE = ON. Instead, for each transaction that you want to skip, generate an empty transaction with the same GTID as the transaction",
ErrDupUnknownInIndex: "Duplicate entry for key '%-.192s'",
ErrIdentCausesTooLongPath: "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'.",
ErrAlterOperationNotSupportedReasonNotNull: "cannot silently convert NULL values, as required in this SQLMODE",
ErrMustChangePasswordLogin: "Your password has expired. To log in you must change it using a client that supports expired passwords.",
ErrRowInWrongPartition: "Found a row in wrong partition %s",
}

71
vendor/github.com/pingcap/tidb/mysql/error.go generated vendored Normal file
View file

@ -0,0 +1,71 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
"errors"
"fmt"
)
// Portable analogs of some common call errors.
var (
ErrBadConn = errors.New("connection was bad")
ErrMalformPacket = errors.New("Malform packet error")
)
// SQLError records an error information, from executing SQL.
type SQLError struct {
Code uint16
Message string
State string
}
// Error prints errors, with a formatted string.
func (e *SQLError) Error() string {
return fmt.Sprintf("ERROR %d (%s): %s", e.Code, e.State, e.Message)
}
// NewErr generates a SQL error, with an error code and default format specifier defined in MySQLErrName.
func NewErr(errCode uint16, args ...interface{}) *SQLError {
e := &SQLError{Code: errCode}
if s, ok := MySQLState[errCode]; ok {
e.State = s
} else {
e.State = DefaultMySQLState
}
if format, ok := MySQLErrName[errCode]; ok {
e.Message = fmt.Sprintf(format, args...)
} else {
e.Message = fmt.Sprint(args...)
}
return e
}
// NewErrf creates a SQL error, with an error code and a format specifier
func NewErrf(errCode uint16, format string, args ...interface{}) *SQLError {
e := &SQLError{Code: errCode}
if s, ok := MySQLState[errCode]; ok {
e.State = s
} else {
e.State = DefaultMySQLState
}
e.Message = fmt.Sprintf(format, args...)
return e
}

92
vendor/github.com/pingcap/tidb/mysql/fsp.go generated vendored Normal file
View file

@ -0,0 +1,92 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
"math"
"strconv"
"strings"
"github.com/juju/errors"
)
const (
// UnspecifiedFsp is the unspecified fractional seconds part.
UnspecifiedFsp int = -1
// MaxFsp is the maximum digit of fractional seconds part.
MaxFsp int = 6
// MinFsp is the minimum digit of fractional seconds part.
MinFsp int = 0
// DefaultFsp is the default digit of fractional seconds part.
// MySQL use 0 as the default Fsp.
DefaultFsp int = 0
)
func checkFsp(fsp int) (int, error) {
if fsp == UnspecifiedFsp {
return DefaultFsp, nil
}
if fsp < MinFsp || fsp > MaxFsp {
return DefaultFsp, errors.Errorf("Invalid fsp %d", fsp)
}
return fsp, nil
}
func parseFrac(s string, fsp int) (int, error) {
if len(s) == 0 {
return 0, nil
}
var err error
fsp, err = checkFsp(fsp)
if err != nil {
return 0, errors.Trace(err)
}
// Use float to calculate frac, e.g, "123" -> "0.123"
if !strings.HasPrefix(s, ".") && !strings.HasPrefix(s, "0.") {
s = "0." + s
}
frac, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0, errors.Trace(err)
}
// round frac to the nearest value with FSP
var round float64
pow := math.Pow(10, float64(fsp))
digit := pow * frac
_, div := math.Modf(digit)
if div >= 0.5 {
round = math.Ceil(digit)
} else {
round = math.Floor(digit)
}
// Get the final frac, with 6 digit number
// 0.1236 round 3 -> 124 -> 123000
// 0.0312 round 2 -> 3 -> 30000
return int(round * math.Pow10(MaxFsp-fsp)), nil
}
// alignFrac is used to generate alignment frac, like `100` -> `100000`
func alignFrac(s string, fsp int) string {
sl := len(s)
if sl < fsp {
return s + strings.Repeat("0", fsp-sl)
}
return s
}

85
vendor/github.com/pingcap/tidb/mysql/hex.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
"encoding/hex"
"fmt"
"strconv"
"strings"
"github.com/juju/errors"
)
// Hex is for mysql hexadecimal literal type.
type Hex struct {
// Value holds numeric value for hexadecimal literal.
Value int64
}
// String implements fmt.Stringer interface.
func (h Hex) String() string {
s := fmt.Sprintf("%X", h.Value)
if len(s)%2 != 0 {
return "0x0" + s
}
return "0x" + s
}
// ToNumber changes hexadecimal type to float64 for numeric operation.
// MySQL treats hexadecimal literal as double type.
func (h Hex) ToNumber() float64 {
return float64(h.Value)
}
// ToString returns the string representation for hexadecimal literal.
func (h Hex) ToString() string {
s := fmt.Sprintf("%x", h.Value)
if len(s)%2 != 0 {
s = "0" + s
}
// should never error.
b, _ := hex.DecodeString(s)
return string(b)
}
// ParseHex parses hexadecimal literal string.
// The string format can be X'val', x'val' or 0xval.
// val must in (0...9, a...z, A...Z).
func ParseHex(s string) (Hex, error) {
if len(s) == 0 {
return Hex{}, errors.Errorf("invalid empty string for parsing hexadecimal literal")
}
if s[0] == 'x' || s[0] == 'X' {
// format is x'val' or X'val'
s = strings.Trim(s[1:], "'")
if len(s)%2 != 0 {
return Hex{}, errors.Errorf("invalid hexadecimal format, must even numbers, but %d", len(s))
}
s = "0x" + s
} else if !strings.HasPrefix(s, "0x") {
// here means format is not x'val', X'val' or 0xval.
return Hex{}, errors.Errorf("invalid hexadecimal format %s", s)
}
n, err := strconv.ParseInt(s, 0, 64)
if err != nil {
return Hex{}, errors.Trace(err)
}
return Hex{Value: n}, nil
}

111
vendor/github.com/pingcap/tidb/mysql/set.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
"strconv"
"strings"
"github.com/juju/errors"
)
var zeroSet = Set{Name: "", Value: 0}
// Set is for MySQL Set type.
type Set struct {
Name string
Value uint64
}
// String implements fmt.Stringer interface.
func (e Set) String() string {
return e.Name
}
// ToNumber changes Set to float64 for numeric operation.
func (e Set) ToNumber() float64 {
return float64(e.Value)
}
// ParseSetName creates a Set with name.
func ParseSetName(elems []string, name string) (Set, error) {
if len(name) == 0 {
return zeroSet, nil
}
seps := strings.Split(name, ",")
marked := make(map[string]struct{}, len(seps))
for _, s := range seps {
marked[strings.ToLower(s)] = struct{}{}
}
items := make([]string, 0, len(seps))
value := uint64(0)
for i, n := range elems {
key := strings.ToLower(n)
if _, ok := marked[key]; ok {
value |= (1 << uint64(i))
delete(marked, key)
items = append(items, key)
}
}
if len(marked) == 0 {
return Set{Name: strings.Join(items, ","), Value: value}, nil
}
// name doesn't exist, maybe an integer?
if num, err := strconv.ParseUint(name, 0, 64); err == nil {
return ParseSetValue(elems, num)
}
return Set{}, errors.Errorf("item %s is not in Set %v", name, elems)
}
var (
setIndexValue []uint64
setIndexInvertValue []uint64
)
func init() {
setIndexValue = make([]uint64, 64)
setIndexInvertValue = make([]uint64, 64)
for i := 0; i < 64; i++ {
setIndexValue[i] = 1 << uint64(i)
setIndexInvertValue[i] = ^setIndexValue[i]
}
}
// ParseSetValue creates a Set with special number.
func ParseSetValue(elems []string, number uint64) (Set, error) {
if number == 0 {
return zeroSet, nil
}
value := number
var items []string
for i := 0; i < len(elems); i++ {
if number&setIndexValue[i] > 0 {
items = append(items, elems[i])
number &= setIndexInvertValue[i]
}
}
if number != 0 {
return Set{}, errors.Errorf("invalid number %d for Set %v", number, elems)
}
return Set{Name: strings.Join(items, ","), Value: value}, nil
}

249
vendor/github.com/pingcap/tidb/mysql/state.go generated vendored Normal file
View file

@ -0,0 +1,249 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
const (
// DefaultMySQLState is default state of the mySQL
DefaultMySQLState = "HY000"
)
// MySQLState maps error code to MySQL SQLSTATE value.
// The values are taken from ANSI SQL and ODBC and are more standardized.
var MySQLState = map[uint16]string{
ErrDupKey: "23000",
ErrOutofmemory: "HY001",
ErrOutOfSortmemory: "HY001",
ErrConCount: "08004",
ErrBadHost: "08S01",
ErrHandshake: "08S01",
ErrDbaccessDenied: "42000",
ErrAccessDenied: "28000",
ErrNoDb: "3D000",
ErrUnknownCom: "08S01",
ErrBadNull: "23000",
ErrBadDb: "42000",
ErrTableExists: "42S01",
ErrBadTable: "42S02",
ErrNonUniq: "23000",
ErrServerShutdown: "08S01",
ErrBadField: "42S22",
ErrWrongFieldWithGroup: "42000",
ErrWrongSumSelect: "42000",
ErrWrongGroupField: "42000",
ErrWrongValueCount: "21S01",
ErrTooLongIdent: "42000",
ErrDupFieldname: "42S21",
ErrDupKeyname: "42000",
ErrDupEntry: "23000",
ErrWrongFieldSpec: "42000",
ErrParse: "42000",
ErrEmptyQuery: "42000",
ErrNonuniqTable: "42000",
ErrInvalidDefault: "42000",
ErrMultiplePriKey: "42000",
ErrTooManyKeys: "42000",
ErrTooManyKeyParts: "42000",
ErrTooLongKey: "42000",
ErrKeyColumnDoesNotExits: "42000",
ErrBlobUsedAsKey: "42000",
ErrTooBigFieldlength: "42000",
ErrWrongAutoKey: "42000",
ErrForcingClose: "08S01",
ErrIpsock: "08S01",
ErrNoSuchIndex: "42S12",
ErrWrongFieldTerminators: "42000",
ErrBlobsAndNoTerminated: "42000",
ErrCantRemoveAllFields: "42000",
ErrCantDropFieldOrKey: "42000",
ErrBlobCantHaveDefault: "42000",
ErrWrongDbName: "42000",
ErrWrongTableName: "42000",
ErrTooBigSelect: "42000",
ErrUnknownProcedure: "42000",
ErrWrongParamcountToProcedure: "42000",
ErrUnknownTable: "42S02",
ErrFieldSpecifiedTwice: "42000",
ErrUnsupportedExtension: "42000",
ErrTableMustHaveColumns: "42000",
ErrUnknownCharacterSet: "42000",
ErrTooBigRowsize: "42000",
ErrWrongOuterJoin: "42000",
ErrNullColumnInIndex: "42000",
ErrPasswordAnonymousUser: "42000",
ErrPasswordNotAllowed: "42000",
ErrPasswordNoMatch: "42000",
ErrWrongValueCountOnRow: "21S01",
ErrInvalidUseOfNull: "22004",
ErrRegexp: "42000",
ErrMixOfGroupFuncAndFields: "42000",
ErrNonexistingGrant: "42000",
ErrTableaccessDenied: "42000",
ErrColumnaccessDenied: "42000",
ErrIllegalGrantForTable: "42000",
ErrGrantWrongHostOrUser: "42000",
ErrNoSuchTable: "42S02",
ErrNonexistingTableGrant: "42000",
ErrNotAllowedCommand: "42000",
ErrSyntax: "42000",
ErrAbortingConnection: "08S01",
ErrNetPacketTooLarge: "08S01",
ErrNetReadErrorFromPipe: "08S01",
ErrNetFcntl: "08S01",
ErrNetPacketsOutOfOrder: "08S01",
ErrNetUncompress: "08S01",
ErrNetRead: "08S01",
ErrNetReadInterrupted: "08S01",
ErrNetErrorOnWrite: "08S01",
ErrNetWriteInterrupted: "08S01",
ErrTooLongString: "42000",
ErrTableCantHandleBlob: "42000",
ErrTableCantHandleAutoIncrement: "42000",
ErrWrongColumnName: "42000",
ErrWrongKeyColumn: "42000",
ErrDupUnique: "23000",
ErrBlobKeyWithoutLength: "42000",
ErrPrimaryCantHaveNull: "42000",
ErrTooManyRows: "42000",
ErrRequiresPrimaryKey: "42000",
ErrKeyDoesNotExits: "42000",
ErrCheckNoSuchTable: "42000",
ErrCheckNotImplemented: "42000",
ErrCantDoThisDuringAnTransaction: "25000",
ErrNewAbortingConnection: "08S01",
ErrMasterNetRead: "08S01",
ErrMasterNetWrite: "08S01",
ErrTooManyUserConnections: "42000",
ErrReadOnlyTransaction: "25000",
ErrNoPermissionToCreateUser: "42000",
ErrLockDeadlock: "40001",
ErrNoReferencedRow: "23000",
ErrRowIsReferenced: "23000",
ErrConnectToMaster: "08S01",
ErrWrongNumberOfColumnsInSelect: "21000",
ErrUserLimitReached: "42000",
ErrSpecificAccessDenied: "42000",
ErrNoDefault: "42000",
ErrWrongValueForVar: "42000",
ErrWrongTypeForVar: "42000",
ErrCantUseOptionHere: "42000",
ErrNotSupportedYet: "42000",
ErrWrongFkDef: "42000",
ErrOperandColumns: "21000",
ErrSubqueryNo1Row: "21000",
ErrIllegalReference: "42S22",
ErrDerivedMustHaveAlias: "42000",
ErrSelectReduced: "01000",
ErrTablenameNotAllowedHere: "42000",
ErrNotSupportedAuthMode: "08004",
ErrSpatialCantHaveNull: "42000",
ErrCollationCharsetMismatch: "42000",
ErrWarnTooFewRecords: "01000",
ErrWarnTooManyRecords: "01000",
ErrWarnNullToNotnull: "22004",
ErrWarnDataOutOfRange: "22003",
WarnDataTruncated: "01000",
ErrWrongNameForIndex: "42000",
ErrWrongNameForCatalog: "42000",
ErrUnknownStorageEngine: "42000",
ErrTruncatedWrongValue: "22007",
ErrSpNoRecursiveCreate: "2F003",
ErrSpAlreadyExists: "42000",
ErrSpDoesNotExist: "42000",
ErrSpLilabelMismatch: "42000",
ErrSpLabelRedefine: "42000",
ErrSpLabelMismatch: "42000",
ErrSpUninitVar: "01000",
ErrSpBadselect: "0A000",
ErrSpBadreturn: "42000",
ErrSpBadstatement: "0A000",
ErrUpdateLogDeprecatedIgnored: "42000",
ErrUpdateLogDeprecatedTranslated: "42000",
ErrQueryInterrupted: "70100",
ErrSpWrongNoOfArgs: "42000",
ErrSpCondMismatch: "42000",
ErrSpNoreturn: "42000",
ErrSpNoreturnend: "2F005",
ErrSpBadCursorQuery: "42000",
ErrSpBadCursorSelect: "42000",
ErrSpCursorMismatch: "42000",
ErrSpCursorAlreadyOpen: "24000",
ErrSpCursorNotOpen: "24000",
ErrSpUndeclaredVar: "42000",
ErrSpFetchNoData: "02000",
ErrSpDupParam: "42000",
ErrSpDupVar: "42000",
ErrSpDupCond: "42000",
ErrSpDupCurs: "42000",
ErrSpSubselectNyi: "0A000",
ErrStmtNotAllowedInSfOrTrg: "0A000",
ErrSpVarcondAfterCurshndlr: "42000",
ErrSpCursorAfterHandler: "42000",
ErrSpCaseNotFound: "20000",
ErrDivisionByZero: "22012",
ErrIllegalValueForType: "22007",
ErrProcaccessDenied: "42000",
ErrXaerNota: "XAE04",
ErrXaerInval: "XAE05",
ErrXaerRmfail: "XAE07",
ErrXaerOutside: "XAE09",
ErrXaerRmerr: "XAE03",
ErrXaRbrollback: "XA100",
ErrNonexistingProcGrant: "42000",
ErrDataTooLong: "22001",
ErrSpBadSQLstate: "42000",
ErrCantCreateUserWithGrant: "42000",
ErrSpDupHandler: "42000",
ErrSpNotVarArg: "42000",
ErrSpNoRetset: "0A000",
ErrCantCreateGeometryObject: "22003",
ErrTooBigScale: "42000",
ErrTooBigPrecision: "42000",
ErrMBiggerThanD: "42000",
ErrTooLongBody: "42000",
ErrTooBigDisplaywidth: "42000",
ErrXaerDupid: "XAE08",
ErrDatetimeFunctionOverflow: "22008",
ErrRowIsReferenced2: "23000",
ErrNoReferencedRow2: "23000",
ErrSpBadVarShadow: "42000",
ErrSpWrongName: "42000",
ErrSpNoAggregate: "42000",
ErrMaxPreparedStmtCountReached: "42000",
ErrNonGroupingFieldUsed: "42000",
ErrForeignDuplicateKeyOldUnused: "23000",
ErrCantChangeTxCharacteristics: "25001",
ErrWrongParamcountToNativeFct: "42000",
ErrWrongParametersToNativeFct: "42000",
ErrWrongParametersToStoredFct: "42000",
ErrDupEntryWithKeyName: "23000",
ErrXaRbtimeout: "XA106",
ErrXaRbdeadlock: "XA102",
ErrFuncInexistentNameCollision: "42000",
ErrDupSignalSet: "42000",
ErrSignalWarn: "01000",
ErrSignalNotFound: "02000",
ErrSignalException: "HY000",
ErrResignalWithoutActiveHandler: "0K000",
ErrSpatialMustHaveGeomCol: "42000",
ErrDataOutOfRange: "22003",
ErrAccessDeniedNoPassword: "28000",
ErrTruncateIllegalFk: "42000",
ErrDaInvalidConditionNumber: "35000",
ErrForeignDuplicateKeyWithChildInfo: "23000",
ErrForeignDuplicateKeyWithoutChildInfo: "23000",
ErrCantExecuteInReadOnlyTransaction: "25006",
ErrAlterOperationNotSupported: "0A000",
ErrAlterOperationNotSupportedReason: "0A000",
ErrDupUnknownInIndex: "23000",
}

1422
vendor/github.com/pingcap/tidb/mysql/time.go generated vendored Normal file

File diff suppressed because it is too large Load diff

144
vendor/github.com/pingcap/tidb/mysql/type.go generated vendored Normal file
View file

@ -0,0 +1,144 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
// MySQL type informations.
const (
TypeDecimal byte = iota
TypeTiny
TypeShort
TypeLong
TypeFloat
TypeDouble
TypeNull
TypeTimestamp
TypeLonglong
TypeInt24
TypeDate
TypeDuration /* Original name was TypeTime, renamed to Duration to resolve the conflict with Go type Time.*/
TypeDatetime
TypeYear
TypeNewDate
TypeVarchar
TypeBit
)
// TypeUnspecified is an uninitialized type. TypeDecimal is not used in MySQL.
var TypeUnspecified = TypeDecimal
// MySQL type informations.
const (
TypeNewDecimal byte = iota + 0xf6
TypeEnum
TypeSet
TypeTinyBlob
TypeMediumBlob
TypeLongBlob
TypeBlob
TypeVarString
TypeString
TypeGeometry
)
// IsUninitializedType check if a type code is uninitialized.
// TypeDecimal is the old type code for decimal and not be used in the new mysql version.
func IsUninitializedType(tp byte) bool {
return tp == TypeDecimal
}
// Flag informations.
const (
NotNullFlag = 1 /* Field can't be NULL */
PriKeyFlag = 2 /* Field is part of a primary key */
UniqueKeyFlag = 4 /* Field is part of a unique key */
MultipleKeyFlag = 8 /* Field is part of a key */
BlobFlag = 16 /* Field is a blob */
UnsignedFlag = 32 /* Field is unsigned */
ZerofillFlag = 64 /* Field is zerofill */
BinaryFlag = 128 /* Field is binary */
EnumFlag = 256 /* Field is an enum */
AutoIncrementFlag = 512 /* Field is an auto increment field */
TimestampFlag = 1024 /* Field is a timestamp */
SetFlag = 2048 /* Field is a set */
NoDefaultValueFlag = 4096 /* Field doesn't have a default value */
OnUpdateNowFlag = 8192 /* Field is set to NOW on UPDATE */
NumFlag = 32768 /* Field is a num (for clients) */
PartKeyFlag = 16384 /* Intern: Part of some keys */
GroupFlag = 32768 /* Intern: Group field */
UniqueFlag = 65536 /* Intern: Used by sql_yacc */
BinCmpFlag = 131072 /* Intern: Used by sql_yacc */
)
// TypeInt24 bounds.
const (
MaxUint24 = 1<<24 - 1
MaxInt24 = 1<<23 - 1
MinInt24 = -1 << 23
)
// HasNotNullFlag checks if NotNullFlag is set.
func HasNotNullFlag(flag uint) bool {
return (flag & NotNullFlag) > 0
}
// HasNoDefaultValueFlag checks if NoDefaultValueFlag is set.
func HasNoDefaultValueFlag(flag uint) bool {
return (flag & NoDefaultValueFlag) > 0
}
// HasAutoIncrementFlag checks if AutoIncrementFlag is set.
func HasAutoIncrementFlag(flag uint) bool {
return (flag & AutoIncrementFlag) > 0
}
// HasUnsignedFlag checks if UnsignedFlag is set.
func HasUnsignedFlag(flag uint) bool {
return (flag & UnsignedFlag) > 0
}
// HasZerofillFlag checks if ZerofillFlag is set.
func HasZerofillFlag(flag uint) bool {
return (flag & ZerofillFlag) > 0
}
// HasBinaryFlag checks if BinaryFlag is set.
func HasBinaryFlag(flag uint) bool {
return (flag & BinaryFlag) > 0
}
// HasPriKeyFlag checks if PriKeyFlag is set.
func HasPriKeyFlag(flag uint) bool {
return (flag & PriKeyFlag) > 0
}
// HasUniKeyFlag checks if UniqueKeyFlag is set.
func HasUniKeyFlag(flag uint) bool {
return (flag & UniqueKeyFlag) > 0
}
// HasMultipleKeyFlag checks if MultipleKeyFlag is set.
func HasMultipleKeyFlag(flag uint) bool {
return (flag & MultipleKeyFlag) > 0
}
// HasTimestampFlag checks if HasTimestampFlag is set.
func HasTimestampFlag(flag uint) bool {
return (flag & TimestampFlag) > 0
}
// HasOnUpdateNowFlag checks if OnUpdateNowFlag is set.
func HasOnUpdateNowFlag(flag uint) bool {
return (flag & OnUpdateNowFlag) > 0
}

54
vendor/github.com/pingcap/tidb/mysql/util.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
// GetDefaultFieldLength is used for Interger Types, Flen is the display length.
// Call this when no Flen assigned in ddl.
// or column value is calculated from an expression.
// For example: "select count(*) from t;", the column type is int64 and Flen in ResultField will be 21.
// See: https://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html
func GetDefaultFieldLength(tp byte) int {
switch tp {
case TypeTiny:
return 4
case TypeShort:
return 6
case TypeInt24:
return 9
case TypeLong:
return 11
case TypeLonglong:
return 21
case TypeDecimal:
// See: https://dev.mysql.com/doc/refman/5.7/en/fixed-point-types.html
return 10
case TypeBit, TypeBlob:
return -1
default:
//TODO: add more types
return -1
}
}
// GetDefaultDecimal returns the default decimal length for column.
func GetDefaultDecimal(tp byte) int {
switch tp {
case TypeDecimal:
// See: https://dev.mysql.com/doc/refman/5.7/en/fixed-point-types.html
return 0
default:
//TODO: add more types
return -1
}
}

65
vendor/github.com/pingcap/tidb/optimizer/logic.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package optimizer
import (
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/evaluator"
)
// logicOptimize does logic optimization works on AST.
func logicOptimize(ctx context.Context, node ast.Node) error {
return preEvaluate(ctx, node)
}
// preEvaluate evaluates preEvaluable expression and rewrites constant expression to value expression.
func preEvaluate(ctx context.Context, node ast.Node) error {
pe := preEvaluator{ctx: ctx}
node.Accept(&pe)
return pe.err
}
type preEvaluator struct {
ctx context.Context
err error
}
func (r *preEvaluator) Enter(in ast.Node) (ast.Node, bool) {
return in, false
}
func (r *preEvaluator) Leave(in ast.Node) (ast.Node, bool) {
if expr, ok := in.(ast.ExprNode); ok {
if _, ok = expr.(*ast.ValueExpr); ok {
return in, true
} else if ast.IsPreEvaluable(expr) {
val, err := evaluator.Eval(r.ctx, expr)
if err != nil {
r.err = err
return in, false
}
if ast.IsConstant(expr) {
// The expression is constant, rewrite the expression to value expression.
valExpr := &ast.ValueExpr{}
valExpr.SetText(expr.Text())
valExpr.SetType(expr.GetType())
valExpr.SetValue(val)
return valExpr, true
}
expr.SetValue(val)
}
}
return in, true
}

90
vendor/github.com/pingcap/tidb/optimizer/optimizer.go generated vendored Normal file
View file

@ -0,0 +1,90 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package optimizer
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/optimizer/plan"
"github.com/pingcap/tidb/terror"
)
// Optimize does optimization and creates a Plan.
// The node must be prepared first.
func Optimize(ctx context.Context, node ast.Node, sb plan.SubQueryBuilder) (plan.Plan, error) {
// We have to infer type again because after parameter is set, the expression type may change.
if err := InferType(node); err != nil {
return nil, errors.Trace(err)
}
if err := logicOptimize(ctx, node); err != nil {
return nil, errors.Trace(err)
}
p, err := plan.BuildPlan(node, sb)
if err != nil {
return nil, errors.Trace(err)
}
err = plan.Refine(p)
if err != nil {
return nil, errors.Trace(err)
}
return p, nil
}
// Prepare prepares a raw statement parsed from parser.
// The statement must be prepared before it can be passed to optimize function.
// We pass InfoSchema instead of getting from Context in case it is changed after resolving name.
func Prepare(is infoschema.InfoSchema, ctx context.Context, node ast.Node) error {
ast.SetFlag(node)
if err := Preprocess(node, is, ctx); err != nil {
return errors.Trace(err)
}
if err := Validate(node, true); err != nil {
return errors.Trace(err)
}
return nil
}
// Optimizer error codes.
const (
CodeOneColumn terror.ErrCode = 1
CodeSameColumns terror.ErrCode = 2
CodeMultiWildCard terror.ErrCode = 3
CodeUnsupported terror.ErrCode = 4
CodeInvalidGroupFuncUse terror.ErrCode = 5
CodeIllegalReference terror.ErrCode = 6
)
// Optimizer base errors.
var (
ErrOneColumn = terror.ClassOptimizer.New(CodeOneColumn, "Operand should contain 1 column(s)")
ErrSameColumns = terror.ClassOptimizer.New(CodeSameColumns, "Operands should contain same columns")
ErrMultiWildCard = terror.ClassOptimizer.New(CodeMultiWildCard, "wildcard field exist more than once")
ErrUnSupported = terror.ClassOptimizer.New(CodeUnsupported, "unsupported")
ErrInvalidGroupFuncUse = terror.ClassOptimizer.New(CodeInvalidGroupFuncUse, "Invalid use of group function")
ErrIllegalReference = terror.ClassOptimizer.New(CodeIllegalReference, "Illegal reference")
)
func init() {
mySQLErrCodes := map[terror.ErrCode]uint16{
CodeOneColumn: mysql.ErrOperandColumns,
CodeSameColumns: mysql.ErrOperandColumns,
CodeMultiWildCard: mysql.ErrParse,
CodeInvalidGroupFuncUse: mysql.ErrInvalidGroupFuncUse,
CodeIllegalReference: mysql.ErrIllegalReference,
}
terror.ErrClassToMySQLCodes[terror.ClassOptimizer] = mySQLErrCodes
}

109
vendor/github.com/pingcap/tidb/optimizer/plan/cost.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"math"
)
// Pre-defined cost factors.
const (
FullRangeCount = 10000
HalfRangeCount = 4000
MiddleRangeCount = 100
RowCost = 1.0
IndexCost = 2.0
SortCost = 2.0
FilterRate = 0.5
)
// CostEstimator estimates the cost of a plan.
type costEstimator struct {
}
// Enter implements Visitor Enter interface.
func (c *costEstimator) Enter(p Plan) (Plan, bool) {
return p, false
}
// Leave implements Visitor Leave interface.
func (c *costEstimator) Leave(p Plan) (Plan, bool) {
switch v := p.(type) {
case *IndexScan:
c.indexScan(v)
case *Limit:
v.rowCount = v.Src().RowCount()
v.startupCost = v.Src().StartupCost()
v.totalCost = v.Src().TotalCost()
case *SelectFields:
if v.Src() != nil {
v.startupCost = v.Src().StartupCost()
v.rowCount = v.Src().RowCount()
v.totalCost = v.Src().TotalCost()
}
case *SelectLock:
v.startupCost = v.Src().StartupCost()
v.rowCount = v.Src().RowCount()
v.totalCost = v.Src().TotalCost()
case *Sort:
// Sort plan must retrieve all the rows before returns the first row.
v.startupCost = v.Src().TotalCost() + v.Src().RowCount()*SortCost
if v.limit == 0 {
v.rowCount = v.Src().RowCount()
} else {
v.rowCount = math.Min(v.Src().RowCount(), v.limit)
}
v.totalCost = v.startupCost + v.rowCount*RowCost
case *TableScan:
c.tableScan(v)
}
return p, true
}
func (c *costEstimator) tableScan(v *TableScan) {
var rowCount float64 = FullRangeCount
for _, con := range v.AccessConditions {
rowCount *= guesstimateFilterRate(con)
}
v.startupCost = 0
if v.limit == 0 {
// limit is zero means no limit.
v.rowCount = rowCount
} else {
v.rowCount = math.Min(rowCount, v.limit)
}
v.totalCost = v.rowCount * RowCost
}
func (c *costEstimator) indexScan(v *IndexScan) {
var rowCount float64 = FullRangeCount
for _, con := range v.AccessConditions {
rowCount *= guesstimateFilterRate(con)
}
v.startupCost = 0
if v.limit == 0 {
// limit is zero means no limit.
v.rowCount = rowCount
} else {
v.rowCount = math.Min(rowCount, v.limit)
}
v.totalCost = v.rowCount * RowCost
}
// EstimateCost estimates the cost of the plan.
func EstimateCost(p Plan) float64 {
var estimator costEstimator
p.Accept(&estimator)
return p.TotalCost()
}

View file

@ -0,0 +1,115 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/parser/opcode"
)
const (
rateFull float64 = 1
rateEqual float64 = 0.01
rateNotEqual float64 = 0.99
rateBetween float64 = 0.1
rateGreaterOrLess float64 = 0.33
rateIsFalse float64 = 0.1
rateIsNull float64 = 0.1
rateLike float64 = 0.1
)
// guesstimateFilterRate guesstimates the filter rate for an expression.
// For example: a table has 100 rows, after filter expression 'a between 0 and 9',
// 10 rows returned, then the filter rate is '0.1'.
// It only depends on the expression type, not the expression value.
// The expr parameter should contain only one column name.
func guesstimateFilterRate(expr ast.ExprNode) float64 {
switch x := expr.(type) {
case *ast.BetweenExpr:
return rateBetween
case *ast.BinaryOperationExpr:
return guesstimateBinop(x)
case *ast.ColumnNameExpr:
return rateFull
case *ast.IsNullExpr:
return guesstimateIsNull(x)
case *ast.IsTruthExpr:
return guesstimateIsTrue(x)
case *ast.ParenthesesExpr:
return guesstimateFilterRate(x.Expr)
case *ast.PatternInExpr:
return guesstimatePatternIn(x)
case *ast.PatternLikeExpr:
return guesstimatePatternLike(x)
}
return rateFull
}
func guesstimateBinop(expr *ast.BinaryOperationExpr) float64 {
switch expr.Op {
case opcode.AndAnd:
// P(A and B) = P(A) * P(B)
return guesstimateFilterRate(expr.L) * guesstimateFilterRate(expr.R)
case opcode.OrOr:
// P(A or B) = P(A) + P(B) P(A and B)
rateL := guesstimateFilterRate(expr.L)
rateR := guesstimateFilterRate(expr.R)
return rateL + rateR - rateL*rateR
case opcode.EQ:
return rateEqual
case opcode.GT, opcode.GE, opcode.LT, opcode.LE:
return rateGreaterOrLess
case opcode.NE:
return rateNotEqual
}
return rateFull
}
func guesstimateIsNull(expr *ast.IsNullExpr) float64 {
if expr.Not {
return rateFull - rateIsNull
}
return rateIsNull
}
func guesstimateIsTrue(expr *ast.IsTruthExpr) float64 {
if expr.True == 0 {
if expr.Not {
return rateFull - rateIsFalse
}
return rateIsFalse
}
if expr.Not {
return rateIsFalse + rateIsNull
}
return rateFull - rateIsFalse - rateIsNull
}
func guesstimatePatternIn(expr *ast.PatternInExpr) float64 {
if len(expr.List) > 0 {
rate := rateEqual * float64(len(expr.List))
if expr.Not {
return rateFull - rate
}
return rate
}
return rateFull
}
func guesstimatePatternLike(expr *ast.PatternLikeExpr) float64 {
if expr.Not {
return rateFull - rateLike
}
return rateLike
}

127
vendor/github.com/pingcap/tidb/optimizer/plan/plan.go generated vendored Normal file
View file

@ -0,0 +1,127 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"math"
"github.com/pingcap/tidb/ast"
)
// Plan is a description of an execution flow.
// It is created from ast.Node first, then optimized by optimizer,
// then used by executor to create a Cursor which executes the statement.
type Plan interface {
// Accept a visitor, implementation should call Visitor.Enter first,
// then call children Accept methods, finally call Visitor.Leave.
Accept(v Visitor) (out Plan, ok bool)
// Fields returns the result fields of the plan.
Fields() []*ast.ResultField
// SetFields sets the results fields of the plan.
SetFields(fields []*ast.ResultField)
// The cost before returning fhe first row.
StartupCost() float64
// The cost after returning all the rows.
TotalCost() float64
// The expected row count.
RowCount() float64
// SetLimit is used to push limit to upstream to estimate the cost.
SetLimit(limit float64)
}
// WithSrcPlan is a Plan has a source Plan.
type WithSrcPlan interface {
Plan
Src() Plan
SetSrc(src Plan)
}
// Visitor visits a Plan.
type Visitor interface {
// Enter is called before visit children.
// The out plan should be of exactly the same type as the in plan.
// if skipChildren is true, the children should not be visited.
Enter(in Plan) (out Plan, skipChildren bool)
// Leave is called after children has been visited, the out Plan can
// be another type, this is different than ast.Visitor Leave, because
// Plans only contain children plans as Plan interface type, so it is safe
// to return a different type of plan.
Leave(in Plan) (out Plan, ok bool)
}
// basePlan implements base Plan interface.
// Should be used as embedded struct in Plan implementations.
type basePlan struct {
fields []*ast.ResultField
startupCost float64
totalCost float64
rowCount float64
limit float64
}
// StartupCost implements Plan StartupCost interface.
func (p *basePlan) StartupCost() float64 {
return p.startupCost
}
// TotalCost implements Plan TotalCost interface.
func (p *basePlan) TotalCost() float64 {
return p.totalCost
}
// RowCount implements Plan RowCount interface.
func (p *basePlan) RowCount() float64 {
if p.limit == 0 {
return p.rowCount
}
return math.Min(p.rowCount, p.limit)
}
// SetLimit implements Plan SetLimit interface.
func (p *basePlan) SetLimit(limit float64) {
p.limit = limit
}
// Fields implements Plan Fields interface.
func (p *basePlan) Fields() []*ast.ResultField {
return p.fields
}
// SetFields implements Plan SetFields interface.
func (p *basePlan) SetFields(fields []*ast.ResultField) {
p.fields = fields
}
// srcPlan implements base PlanWithSrc interface.
type planWithSrc struct {
basePlan
src Plan
}
// Src implements PlanWithSrc interface.
func (p *planWithSrc) Src() Plan {
return p.src
}
// SetSrc implements PlanWithSrc interface.
func (p *planWithSrc) SetSrc(src Plan) {
p.src = src
}
// SetLimit implements Plan interface.
func (p *planWithSrc) SetLimit(limit float64) {
p.limit = limit
p.src.SetLimit(limit)
}

View file

@ -0,0 +1,926 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util/charset"
"github.com/pingcap/tidb/util/types"
)
// Error instances.
var (
ErrUnsupportedType = terror.ClassOptimizerPlan.New(CodeUnsupportedType, "Unsupported type")
)
// Error codes.
const (
CodeUnsupportedType terror.ErrCode = 1
)
// BuildPlan builds a plan from a node.
// It returns ErrUnsupportedType if ast.Node type is not supported yet.
func BuildPlan(node ast.Node, sb SubQueryBuilder) (Plan, error) {
builder := planBuilder{sb: sb}
p := builder.build(node)
return p, builder.err
}
// planBuilder builds Plan from an ast.Node.
// It just builds the ast node straightforwardly.
type planBuilder struct {
err error
hasAgg bool
sb SubQueryBuilder
obj interface{}
}
func (b *planBuilder) build(node ast.Node) Plan {
switch x := node.(type) {
case *ast.AdminStmt:
return b.buildAdmin(x)
case *ast.AlterTableStmt:
return b.buildDDL(x)
case *ast.CreateDatabaseStmt:
return b.buildDDL(x)
case *ast.CreateIndexStmt:
return b.buildDDL(x)
case *ast.CreateTableStmt:
return b.buildDDL(x)
case *ast.DeallocateStmt:
return &Deallocate{Name: x.Name}
case *ast.DeleteStmt:
return b.buildDelete(x)
case *ast.DropDatabaseStmt:
return b.buildDDL(x)
case *ast.DropIndexStmt:
return b.buildDDL(x)
case *ast.DropTableStmt:
return b.buildDDL(x)
case *ast.ExecuteStmt:
return &Execute{Name: x.Name, UsingVars: x.UsingVars}
case *ast.ExplainStmt:
return b.buildExplain(x)
case *ast.InsertStmt:
return b.buildInsert(x)
case *ast.PrepareStmt:
return b.buildPrepare(x)
case *ast.SelectStmt:
return b.buildSelect(x)
case *ast.UnionStmt:
return b.buildUnion(x)
case *ast.UpdateStmt:
return b.buildUpdate(x)
case *ast.UseStmt:
return b.buildSimple(x)
case *ast.SetCharsetStmt:
return b.buildSimple(x)
case *ast.SetStmt:
return b.buildSimple(x)
case *ast.ShowStmt:
return b.buildShow(x)
case *ast.DoStmt:
return b.buildSimple(x)
case *ast.BeginStmt:
return b.buildSimple(x)
case *ast.CommitStmt:
return b.buildSimple(x)
case *ast.RollbackStmt:
return b.buildSimple(x)
case *ast.CreateUserStmt:
return b.buildSimple(x)
case *ast.SetPwdStmt:
return b.buildSimple(x)
case *ast.GrantStmt:
return b.buildSimple(x)
case *ast.TruncateTableStmt:
return b.buildDDL(x)
}
b.err = ErrUnsupportedType.Gen("Unsupported type %T", node)
return nil
}
// Detect aggregate function or groupby clause.
func (b *planBuilder) detectSelectAgg(sel *ast.SelectStmt) bool {
if sel.GroupBy != nil {
return true
}
for _, f := range sel.GetResultFields() {
if ast.HasAggFlag(f.Expr) {
return true
}
}
if sel.Having != nil {
if ast.HasAggFlag(sel.Having.Expr) {
return true
}
}
if sel.OrderBy != nil {
for _, item := range sel.OrderBy.Items {
if ast.HasAggFlag(item.Expr) {
return true
}
}
}
return false
}
// extractSelectAgg extracts aggregate functions and converts ColumnNameExpr to aggregate function.
func (b *planBuilder) extractSelectAgg(sel *ast.SelectStmt) []*ast.AggregateFuncExpr {
extractor := &ast.AggregateFuncExtractor{AggFuncs: make([]*ast.AggregateFuncExpr, 0)}
for _, f := range sel.GetResultFields() {
n, ok := f.Expr.Accept(extractor)
if !ok {
b.err = errors.New("Failed to extract agg expr!")
return nil
}
ve, ok := f.Expr.(*ast.ValueExpr)
if ok && len(f.Column.Name.O) > 0 {
agg := &ast.AggregateFuncExpr{
F: ast.AggFuncFirstRow,
Args: []ast.ExprNode{ve},
}
extractor.AggFuncs = append(extractor.AggFuncs, agg)
n = agg
}
f.Expr = n.(ast.ExprNode)
}
// Extract agg funcs from having clause.
if sel.Having != nil {
n, ok := sel.Having.Expr.Accept(extractor)
if !ok {
b.err = errors.New("Failed to extract agg expr from having clause")
return nil
}
sel.Having.Expr = n.(ast.ExprNode)
}
// Extract agg funcs from orderby clause.
if sel.OrderBy != nil {
for _, item := range sel.OrderBy.Items {
n, ok := item.Expr.Accept(extractor)
if !ok {
b.err = errors.New("Failed to extract agg expr from orderby clause")
return nil
}
item.Expr = n.(ast.ExprNode)
// If item is PositionExpr, we need to rebind it.
// For PositionExpr will refer to a ResultField in fieldlist.
// After extract AggExpr from fieldlist, it may be changed (See the code above).
if pe, ok := item.Expr.(*ast.PositionExpr); ok {
pe.Refer = sel.GetResultFields()[pe.N-1]
}
}
}
return extractor.AggFuncs
}
func (b *planBuilder) buildSubquery(n ast.Node) {
sv := &subqueryVisitor{
builder: b,
}
_, ok := n.Accept(sv)
if !ok {
log.Errorf("Extract subquery error")
}
}
func (b *planBuilder) buildSelect(sel *ast.SelectStmt) Plan {
var aggFuncs []*ast.AggregateFuncExpr
hasAgg := b.detectSelectAgg(sel)
if hasAgg {
aggFuncs = b.extractSelectAgg(sel)
}
// Build subquery
// Convert subquery to expr with plan
b.buildSubquery(sel)
var p Plan
if sel.From != nil {
p = b.buildFrom(sel)
if b.err != nil {
return nil
}
if sel.LockTp != ast.SelectLockNone {
p = b.buildSelectLock(p, sel.LockTp)
if b.err != nil {
return nil
}
}
if hasAgg {
p = b.buildAggregate(p, aggFuncs, sel.GroupBy)
}
p = b.buildSelectFields(p, sel.GetResultFields())
if b.err != nil {
return nil
}
} else {
if hasAgg {
p = b.buildAggregate(p, aggFuncs, nil)
}
p = b.buildSelectFields(p, sel.GetResultFields())
if b.err != nil {
return nil
}
}
if sel.Having != nil {
p = b.buildHaving(p, sel.Having)
if b.err != nil {
return nil
}
}
if sel.Distinct {
p = b.buildDistinct(p)
if b.err != nil {
return nil
}
}
if sel.OrderBy != nil && !matchOrder(p, sel.OrderBy.Items) {
p = b.buildSort(p, sel.OrderBy.Items)
if b.err != nil {
return nil
}
}
if sel.Limit != nil {
p = b.buildLimit(p, sel.Limit)
if b.err != nil {
return nil
}
}
return p
}
func (b *planBuilder) buildFrom(sel *ast.SelectStmt) Plan {
from := sel.From.TableRefs
if from.Right == nil {
return b.buildSingleTable(sel)
}
return b.buildJoin(sel)
}
func (b *planBuilder) buildSingleTable(sel *ast.SelectStmt) Plan {
from := sel.From.TableRefs
ts, ok := from.Left.(*ast.TableSource)
if !ok {
b.err = ErrUnsupportedType.Gen("Unsupported type %T", from.Left)
return nil
}
var bestPlan Plan
switch v := ts.Source.(type) {
case *ast.TableName:
case *ast.SelectStmt:
bestPlan = b.buildSelect(v)
}
if bestPlan != nil {
return bestPlan
}
tn, ok := ts.Source.(*ast.TableName)
if !ok {
b.err = ErrUnsupportedType.Gen("Unsupported type %T", ts.Source)
return nil
}
conditions := splitWhere(sel.Where)
path := &joinPath{table: tn, conditions: conditions}
candidates := b.buildAllAccessMethodsPlan(path)
var lowestCost float64
for _, v := range candidates {
cost := EstimateCost(b.buildPseudoSelectPlan(v, sel))
if bestPlan == nil {
bestPlan = v
lowestCost = cost
}
if cost < lowestCost {
bestPlan = v
lowestCost = cost
}
}
return bestPlan
}
func (b *planBuilder) buildAllAccessMethodsPlan(path *joinPath) []Plan {
var candidates []Plan
p := b.buildTableScanPlan(path)
candidates = append(candidates, p)
for _, index := range path.table.TableInfo.Indices {
ip := b.buildIndexScanPlan(index, path)
candidates = append(candidates, ip)
}
return candidates
}
func (b *planBuilder) buildTableScanPlan(path *joinPath) Plan {
tn := path.table
p := &TableScan{
Table: tn.TableInfo,
}
// Equal condition contains a column from previous joined table.
p.RefAccess = len(path.eqConds) > 0
p.SetFields(tn.GetResultFields())
var pkName model.CIStr
if p.Table.PKIsHandle {
for _, colInfo := range p.Table.Columns {
if mysql.HasPriKeyFlag(colInfo.Flag) {
pkName = colInfo.Name
}
}
}
for _, con := range path.conditions {
if pkName.L != "" {
checker := conditionChecker{tableName: tn.TableInfo.Name, pkName: pkName}
if checker.check(con) {
p.AccessConditions = append(p.AccessConditions, con)
} else {
p.FilterConditions = append(p.FilterConditions, con)
}
} else {
p.FilterConditions = append(p.FilterConditions, con)
}
}
return p
}
func (b *planBuilder) buildIndexScanPlan(index *model.IndexInfo, path *joinPath) Plan {
tn := path.table
ip := &IndexScan{Table: tn.TableInfo, Index: index}
ip.RefAccess = len(path.eqConds) > 0
ip.SetFields(tn.GetResultFields())
condMap := map[ast.ExprNode]bool{}
for _, con := range path.conditions {
condMap[con] = true
}
out:
// Build equal access conditions first.
// Starts from the first index column, if equal condition is found, add it to access conditions,
// proceed to the next index column. until we can't find any equal condition for the column.
for ip.AccessEqualCount < len(index.Columns) {
for con := range condMap {
binop, ok := con.(*ast.BinaryOperationExpr)
if !ok || binop.Op != opcode.EQ {
continue
}
if ast.IsPreEvaluable(binop.L) {
binop.L, binop.R = binop.R, binop.L
}
if !ast.IsPreEvaluable(binop.R) {
continue
}
cn, ok2 := binop.L.(*ast.ColumnNameExpr)
if !ok2 || cn.Refer.Column.Name.L != index.Columns[ip.AccessEqualCount].Name.L {
continue
}
ip.AccessConditions = append(ip.AccessConditions, con)
delete(condMap, con)
ip.AccessEqualCount++
continue out
}
break
}
for con := range condMap {
if ip.AccessEqualCount < len(ip.Index.Columns) {
// Try to add non-equal access condition for index column at AccessEqualCount.
checker := conditionChecker{tableName: tn.TableInfo.Name, idx: index, columnOffset: ip.AccessEqualCount}
if checker.check(con) {
ip.AccessConditions = append(ip.AccessConditions, con)
} else {
ip.FilterConditions = append(ip.FilterConditions, con)
}
} else {
ip.FilterConditions = append(ip.FilterConditions, con)
}
}
return ip
}
// buildPseudoSelectPlan pre-builds more complete plans that may affect total cost.
func (b *planBuilder) buildPseudoSelectPlan(p Plan, sel *ast.SelectStmt) Plan {
if sel.OrderBy == nil {
return p
}
if sel.GroupBy != nil {
return p
}
if !matchOrder(p, sel.OrderBy.Items) {
np := &Sort{ByItems: sel.OrderBy.Items}
np.SetSrc(p)
p = np
}
if sel.Limit != nil {
np := &Limit{Offset: sel.Limit.Offset, Count: sel.Limit.Count}
np.SetSrc(p)
np.SetLimit(0)
p = np
}
return p
}
func (b *planBuilder) buildSelectLock(src Plan, lock ast.SelectLockType) *SelectLock {
selectLock := &SelectLock{
Lock: lock,
}
selectLock.SetSrc(src)
selectLock.SetFields(src.Fields())
return selectLock
}
func (b *planBuilder) buildSelectFields(src Plan, fields []*ast.ResultField) Plan {
selectFields := &SelectFields{}
selectFields.SetSrc(src)
selectFields.SetFields(fields)
return selectFields
}
func (b *planBuilder) buildAggregate(src Plan, aggFuncs []*ast.AggregateFuncExpr, groupby *ast.GroupByClause) Plan {
// Add aggregate plan.
aggPlan := &Aggregate{
AggFuncs: aggFuncs,
}
aggPlan.SetSrc(src)
if src != nil {
aggPlan.SetFields(src.Fields())
}
if groupby != nil {
aggPlan.GroupByItems = groupby.Items
}
return aggPlan
}
func (b *planBuilder) buildHaving(src Plan, having *ast.HavingClause) Plan {
p := &Having{
Conditions: splitWhere(having.Expr),
}
p.SetSrc(src)
p.SetFields(src.Fields())
return p
}
func (b *planBuilder) buildSort(src Plan, byItems []*ast.ByItem) Plan {
sort := &Sort{
ByItems: byItems,
}
sort.SetSrc(src)
sort.SetFields(src.Fields())
return sort
}
func (b *planBuilder) buildLimit(src Plan, limit *ast.Limit) Plan {
li := &Limit{
Offset: limit.Offset,
Count: limit.Count,
}
li.SetSrc(src)
li.SetFields(src.Fields())
return li
}
func (b *planBuilder) buildPrepare(x *ast.PrepareStmt) Plan {
p := &Prepare{
Name: x.Name,
}
if x.SQLVar != nil {
p.SQLText, _ = x.SQLVar.GetValue().(string)
} else {
p.SQLText = x.SQLText
}
return p
}
func (b *planBuilder) buildAdmin(as *ast.AdminStmt) Plan {
var p Plan
switch as.Tp {
case ast.AdminCheckTable:
p = &CheckTable{Tables: as.Tables}
case ast.AdminShowDDL:
p = &ShowDDL{}
p.SetFields(buildShowDDLFields())
default:
b.err = ErrUnsupportedType.Gen("Unsupported type %T", as)
}
return p
}
func buildShowDDLFields() []*ast.ResultField {
rfs := make([]*ast.ResultField, 0, 6)
rfs = append(rfs, buildResultField("", "SCHEMA_VER", mysql.TypeLonglong, 4))
rfs = append(rfs, buildResultField("", "OWNER", mysql.TypeVarchar, 64))
rfs = append(rfs, buildResultField("", "JOB", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "BG_SCHEMA_VER", mysql.TypeLonglong, 4))
rfs = append(rfs, buildResultField("", "BG_OWNER", mysql.TypeVarchar, 64))
rfs = append(rfs, buildResultField("", "BG_JOB", mysql.TypeVarchar, 128))
return rfs
}
func buildResultField(tableName, name string, tp byte, size int) *ast.ResultField {
cs := charset.CharsetBin
cl := charset.CharsetBin
flag := mysql.UnsignedFlag
if tp == mysql.TypeVarchar || tp == mysql.TypeBlob {
cs = mysql.DefaultCharset
cl = mysql.DefaultCollationName
flag = 0
}
fieldType := types.FieldType{
Charset: cs,
Collate: cl,
Tp: tp,
Flen: size,
Flag: uint(flag),
}
colInfo := &model.ColumnInfo{
Name: model.NewCIStr(name),
FieldType: fieldType,
}
expr := &ast.ValueExpr{}
expr.SetType(&fieldType)
return &ast.ResultField{
Column: colInfo,
ColumnAsName: colInfo.Name,
TableAsName: model.NewCIStr(tableName),
DBName: model.NewCIStr(infoschema.Name),
Expr: expr,
}
}
// matchOrder checks if the plan has the same ordering as items.
func matchOrder(p Plan, items []*ast.ByItem) bool {
switch x := p.(type) {
case *Aggregate:
return false
case *IndexScan:
if len(items) > len(x.Index.Columns) {
return false
}
for i, item := range items {
if item.Desc {
return false
}
var rf *ast.ResultField
switch y := item.Expr.(type) {
case *ast.ColumnNameExpr:
rf = y.Refer
case *ast.PositionExpr:
rf = y.Refer
default:
return false
}
if rf.Table.Name.L != x.Table.Name.L || rf.Column.Name.L != x.Index.Columns[i].Name.L {
return false
}
}
return true
case *TableScan:
if len(items) != 1 || !x.Table.PKIsHandle {
return false
}
if items[0].Desc {
return false
}
var refer *ast.ResultField
switch x := items[0].Expr.(type) {
case *ast.ColumnNameExpr:
refer = x.Refer
case *ast.PositionExpr:
refer = x.Refer
default:
return false
}
if mysql.HasPriKeyFlag(refer.Column.Flag) {
return true
}
return false
case *JoinOuter:
return false
case *JoinInner:
return false
case *Sort:
// Sort plan should not be checked here as there should only be one sort plan in a plan tree.
return false
case WithSrcPlan:
return matchOrder(x.Src(), items)
}
return true
}
// splitWhere split a where expression to a list of AND conditions.
func splitWhere(where ast.ExprNode) []ast.ExprNode {
var conditions []ast.ExprNode
switch x := where.(type) {
case nil:
case *ast.BinaryOperationExpr:
if x.Op == opcode.AndAnd {
conditions = append(conditions, splitWhere(x.L)...)
conditions = append(conditions, splitWhere(x.R)...)
} else {
conditions = append(conditions, x)
}
case *ast.ParenthesesExpr:
conditions = append(conditions, splitWhere(x.Expr)...)
default:
conditions = append(conditions, where)
}
return conditions
}
// SubQueryBuilder is the interface for building SubQuery executor.
type SubQueryBuilder interface {
Build(p Plan) ast.SubqueryExec
}
// subqueryVisitor visits AST and handles SubqueryExpr.
type subqueryVisitor struct {
builder *planBuilder
}
func (se *subqueryVisitor) Enter(in ast.Node) (out ast.Node, skipChildren bool) {
switch x := in.(type) {
case *ast.SubqueryExpr:
p := se.builder.build(x.Query)
// The expr pointor is copyed into ResultField when running name resolver.
// So we can not just replace the expr node in AST. We need to put SubQuery into the expr.
// See: optimizer.nameResolver.createResultFields()
x.SubqueryExec = se.builder.sb.Build(p)
return in, true
case *ast.Join:
// SubSelect in from clause will be handled in buildJoin().
return in, true
}
return in, false
}
func (se *subqueryVisitor) Leave(in ast.Node) (out ast.Node, ok bool) {
return in, true
}
func (b *planBuilder) buildUnion(union *ast.UnionStmt) Plan {
sels := make([]Plan, len(union.SelectList.Selects))
for i, sel := range union.SelectList.Selects {
sels[i] = b.buildSelect(sel)
}
var p Plan
p = &Union{
Selects: sels,
}
unionFields := union.GetResultFields()
for _, sel := range sels {
for i, f := range sel.Fields() {
if i == len(unionFields) {
b.err = errors.New("The used SELECT statements have a different number of columns")
return nil
}
uField := unionFields[i]
/*
* The lengths of the columns in the UNION result take into account the values retrieved by all of the SELECT statements
* SELECT REPEAT('a',1) UNION SELECT REPEAT('b',10);
* +---------------+
* | REPEAT('a',1) |
* +---------------+
* | a |
* | bbbbbbbbbb |
* +---------------+
*/
if f.Column.Flen > uField.Column.Flen {
uField.Column.Flen = f.Column.Flen
}
// For select nul union select "abc", we should not convert "abc" to nil.
// And the result field type should be VARCHAR.
if uField.Column.Tp == 0 || uField.Column.Tp == mysql.TypeNull {
uField.Column.Tp = f.Column.Tp
}
}
}
for _, v := range unionFields {
v.Expr.SetType(&v.Column.FieldType)
}
p.SetFields(unionFields)
if union.Distinct {
p = b.buildDistinct(p)
}
if union.OrderBy != nil {
p = b.buildSort(p, union.OrderBy.Items)
}
if union.Limit != nil {
p = b.buildLimit(p, union.Limit)
}
return p
}
func (b *planBuilder) buildDistinct(src Plan) Plan {
d := &Distinct{}
d.src = src
d.SetFields(src.Fields())
return d
}
func (b *planBuilder) buildUpdate(update *ast.UpdateStmt) Plan {
sel := &ast.SelectStmt{From: update.TableRefs, Where: update.Where, OrderBy: update.Order, Limit: update.Limit}
p := b.buildFrom(sel)
if sel.OrderBy != nil && !matchOrder(p, sel.OrderBy.Items) {
p = b.buildSort(p, sel.OrderBy.Items)
if b.err != nil {
return nil
}
}
if sel.Limit != nil {
p = b.buildLimit(p, sel.Limit)
if b.err != nil {
return nil
}
}
orderedList := b.buildUpdateLists(update.List, p.Fields())
if b.err != nil {
return nil
}
return &Update{OrderedList: orderedList, SelectPlan: p}
}
func (b *planBuilder) buildUpdateLists(list []*ast.Assignment, fields []*ast.ResultField) []*ast.Assignment {
newList := make([]*ast.Assignment, len(fields))
for _, assign := range list {
offset, err := columnOffsetInFields(assign.Column, fields)
if err != nil {
b.err = errors.Trace(err)
return nil
}
newList[offset] = assign
}
return newList
}
func (b *planBuilder) buildDelete(del *ast.DeleteStmt) Plan {
sel := &ast.SelectStmt{From: del.TableRefs, Where: del.Where, OrderBy: del.Order, Limit: del.Limit}
p := b.buildFrom(sel)
if sel.OrderBy != nil && !matchOrder(p, sel.OrderBy.Items) {
p = b.buildSort(p, sel.OrderBy.Items)
if b.err != nil {
return nil
}
}
if sel.Limit != nil {
p = b.buildLimit(p, sel.Limit)
if b.err != nil {
return nil
}
}
var tables []*ast.TableName
if del.Tables != nil {
tables = del.Tables.Tables
}
return &Delete{
Tables: tables,
IsMultiTable: del.IsMultiTable,
SelectPlan: p,
}
}
func columnOffsetInFields(cn *ast.ColumnName, fields []*ast.ResultField) (int, error) {
offset := -1
tableNameL := cn.Table.L
columnNameL := cn.Name.L
if tableNameL != "" {
for i, f := range fields {
// Check table name.
if f.TableAsName.L != "" {
if tableNameL != f.TableAsName.L {
continue
}
} else {
if tableNameL != f.Table.Name.L {
continue
}
}
// Check column name.
if f.ColumnAsName.L != "" {
if columnNameL != f.ColumnAsName.L {
continue
}
} else {
if columnNameL != f.Column.Name.L {
continue
}
}
offset = i
}
} else {
for i, f := range fields {
matchAsName := f.ColumnAsName.L != "" && f.ColumnAsName.L == columnNameL
matchColumnName := f.ColumnAsName.L == "" && f.Column.Name.L == columnNameL
if matchAsName || matchColumnName {
if offset != -1 {
return -1, errors.Errorf("column %s is ambiguous.", cn.Name.O)
}
offset = i
}
}
}
if offset == -1 {
return -1, errors.Errorf("column %s not found", cn.Name.O)
}
return offset, nil
}
func (b *planBuilder) buildShow(show *ast.ShowStmt) Plan {
var p Plan
p = &Show{
Tp: show.Tp,
DBName: show.DBName,
Table: show.Table,
Column: show.Column,
Flag: show.Flag,
Full: show.Full,
User: show.User,
}
p.SetFields(show.GetResultFields())
var conditions []ast.ExprNode
if show.Pattern != nil {
conditions = append(conditions, show.Pattern)
}
if show.Where != nil {
conditions = append(conditions, show.Where)
}
if len(conditions) != 0 {
filter := &Filter{Conditions: conditions}
filter.SetSrc(p)
p = filter
}
return p
}
func (b *planBuilder) buildSimple(node ast.StmtNode) Plan {
return &Simple{Statement: node}
}
func (b *planBuilder) buildInsert(insert *ast.InsertStmt) Plan {
insertPlan := &Insert{
Table: insert.Table,
Columns: insert.Columns,
Lists: insert.Lists,
Setlist: insert.Setlist,
OnDuplicate: insert.OnDuplicate,
IsReplace: insert.IsReplace,
Priority: insert.Priority,
}
if insert.Select != nil {
insertPlan.SelectPlan = b.build(insert.Select)
if b.err != nil {
return nil
}
}
return insertPlan
}
func (b *planBuilder) buildDDL(node ast.DDLNode) Plan {
return &DDL{Statement: node}
}
func (b *planBuilder) buildExplain(explain *ast.ExplainStmt) Plan {
if show, ok := explain.Stmt.(*ast.ShowStmt); ok {
return b.buildShow(show)
}
targetPlan := b.build(explain.Stmt)
if b.err != nil {
return nil
}
p := &Explain{StmtPlan: targetPlan}
p.SetFields(buildExplainFields())
return p
}
// See: https://dev.mysql.com/doc/refman/5.7/en/explain-output.html
func buildExplainFields() []*ast.ResultField {
rfs := make([]*ast.ResultField, 0, 10)
rfs = append(rfs, buildResultField("", "id", mysql.TypeLonglong, 4))
rfs = append(rfs, buildResultField("", "select_type", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "table", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "type", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "possible_keys", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "key", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "key_len", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "ref", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "rows", mysql.TypeVarchar, 128))
rfs = append(rfs, buildResultField("", "Extra", mysql.TypeVarchar, 128))
return rfs
}

View file

@ -0,0 +1,795 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"strings"
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/parser/opcode"
)
// equalCond represents an equivalent join condition, like "t1.c1 = t2.c1".
type equalCond struct {
left *ast.ResultField
leftIdx bool
right *ast.ResultField
rightIdx bool
}
func newEqualCond(left, right *ast.ResultField) *equalCond {
eq := &equalCond{left: left, right: right}
eq.leftIdx = equivHasIndex(eq.left)
eq.rightIdx = equivHasIndex(eq.right)
return eq
}
func equivHasIndex(rf *ast.ResultField) bool {
if rf.Table.PKIsHandle && mysql.HasPriKeyFlag(rf.Column.Flag) {
return true
}
for _, idx := range rf.Table.Indices {
if len(idx.Columns) == 1 && idx.Columns[0].Name.L == rf.Column.Name.L {
return true
}
}
return false
}
// joinPath can be a single table path, inner join or outer join.
type joinPath struct {
// for table path
table *ast.TableName
totalFilterRate float64
// for subquery
subquery ast.Node
asName model.CIStr
neighborCount int // number of neighbor table.
idxDepCount int // number of paths this table depends on.
ordering *ast.ResultField
orderingDesc bool
// for outer join path
outer *joinPath
inner *joinPath
rightJoin bool
// for inner join path
inners []*joinPath
// common
parent *joinPath
filterRate float64
conditions []ast.ExprNode
eqConds []*equalCond
// The joinPaths that this path's index depends on.
idxDeps map[*joinPath]bool
neighbors map[*joinPath]bool
}
// newTablePath creates a new table join path.
func newTablePath(table *ast.TableName) *joinPath {
return &joinPath{
table: table,
filterRate: rateFull,
}
}
// newSubqueryPath creates a new subquery join path.
func newSubqueryPath(node ast.Node, asName model.CIStr) *joinPath {
return &joinPath{
subquery: node,
asName: asName,
filterRate: rateFull,
}
}
// newOuterJoinPath creates a new outer join path and pushes on condition to children paths.
// The returned joinPath slice has one element.
func newOuterJoinPath(isRightJoin bool, leftPath, rightPath *joinPath, on *ast.OnCondition) *joinPath {
outerJoin := &joinPath{rightJoin: isRightJoin, outer: leftPath, inner: rightPath, filterRate: 1}
leftPath.parent = outerJoin
rightPath.parent = outerJoin
if isRightJoin {
outerJoin.outer, outerJoin.inner = outerJoin.inner, outerJoin.outer
}
if on != nil {
conditions := splitWhere(on.Expr)
availablePaths := []*joinPath{outerJoin.outer}
for _, con := range conditions {
if !outerJoin.inner.attachCondition(con, availablePaths) {
log.Errorf("Inner failed to attach ON condition")
}
}
}
return outerJoin
}
// newInnerJoinPath creates inner join path and pushes on condition to children paths.
// If left path or right path is also inner join, it will be merged.
func newInnerJoinPath(leftPath, rightPath *joinPath, on *ast.OnCondition) *joinPath {
var innerJoin *joinPath
if len(leftPath.inners) != 0 {
innerJoin = leftPath
} else {
innerJoin = &joinPath{filterRate: leftPath.filterRate}
innerJoin.inners = append(innerJoin.inners, leftPath)
}
if len(rightPath.inners) != 0 {
innerJoin.inners = append(innerJoin.inners, rightPath.inners...)
innerJoin.conditions = append(innerJoin.conditions, rightPath.conditions...)
} else {
innerJoin.inners = append(innerJoin.inners, rightPath)
}
innerJoin.filterRate *= rightPath.filterRate
for _, in := range innerJoin.inners {
in.parent = innerJoin
}
if on != nil {
conditions := splitWhere(on.Expr)
for _, con := range conditions {
if !innerJoin.attachCondition(con, nil) {
innerJoin.conditions = append(innerJoin.conditions, con)
}
}
}
return innerJoin
}
func (p *joinPath) resultFields() []*ast.ResultField {
if p.table != nil {
return p.table.GetResultFields()
}
if p.outer != nil {
if p.rightJoin {
return append(p.inner.resultFields(), p.outer.resultFields()...)
}
return append(p.outer.resultFields(), p.inner.resultFields()...)
}
var rfs []*ast.ResultField
for _, in := range p.inners {
rfs = append(rfs, in.resultFields()...)
}
return rfs
}
// attachCondition tries to attach a condition as deep as possible.
// availablePaths are paths join before this path.
func (p *joinPath) attachCondition(condition ast.ExprNode, availablePaths []*joinPath) (attached bool) {
filterRate := guesstimateFilterRate(condition)
// table
if p.table != nil || p.subquery != nil {
attacher := conditionAttachChecker{targetPath: p, availablePaths: availablePaths}
condition.Accept(&attacher)
if attacher.invalid {
return false
}
p.conditions = append(p.conditions, condition)
p.filterRate *= filterRate
return true
}
// inner join
if len(p.inners) > 0 {
for _, in := range p.inners {
if in.attachCondition(condition, availablePaths) {
p.filterRate *= filterRate
return true
}
}
attacher := &conditionAttachChecker{targetPath: p, availablePaths: availablePaths}
condition.Accept(attacher)
if attacher.invalid {
return false
}
p.conditions = append(p.conditions, condition)
p.filterRate *= filterRate
return true
}
// outer join
if p.outer.attachCondition(condition, availablePaths) {
p.filterRate *= filterRate
return true
}
if p.inner.attachCondition(condition, append(availablePaths, p.outer)) {
p.filterRate *= filterRate
return true
}
return false
}
func (p *joinPath) containsTable(table *ast.TableName) bool {
if p.table != nil {
return p.table == table
}
if p.subquery != nil {
return p.asName.L == table.Name.L
}
if len(p.inners) != 0 {
for _, in := range p.inners {
if in.containsTable(table) {
return true
}
}
return false
}
return p.outer.containsTable(table) || p.inner.containsTable(table)
}
// attachEqualCond tries to attach an equalCond deep into a table path if applicable.
func (p *joinPath) attachEqualCond(eqCon *equalCond, availablePaths []*joinPath) (attached bool) {
// table
if p.table != nil {
var prevTable *ast.TableName
var needSwap bool
if eqCon.left.TableName == p.table {
prevTable = eqCon.right.TableName
} else if eqCon.right.TableName == p.table {
prevTable = eqCon.left.TableName
needSwap = true
}
if prevTable != nil {
for _, prev := range availablePaths {
if prev.containsTable(prevTable) {
if needSwap {
eqCon.left, eqCon.right = eqCon.right, eqCon.left
eqCon.leftIdx, eqCon.rightIdx = eqCon.rightIdx, eqCon.leftIdx
}
p.eqConds = append(p.eqConds, eqCon)
return true
}
}
}
return false
}
// inner join
if len(p.inners) > 0 {
for _, in := range p.inners {
if in.attachEqualCond(eqCon, availablePaths) {
p.filterRate *= rateEqual
return true
}
}
return false
}
// outer join
if p.outer.attachEqualCond(eqCon, availablePaths) {
p.filterRate *= rateEqual
return true
}
if p.inner.attachEqualCond(eqCon, append(availablePaths, p.outer)) {
p.filterRate *= rateEqual
return true
}
return false
}
func (p *joinPath) extractEqualConditon() {
var equivs []*equalCond
var cons []ast.ExprNode
for _, con := range p.conditions {
eq := equivFromExpr(con)
if eq != nil {
equivs = append(equivs, eq)
if p.table != nil {
if eq.right.TableName == p.table {
eq.left, eq.right = eq.right, eq.left
eq.leftIdx, eq.rightIdx = eq.rightIdx, eq.leftIdx
}
}
} else {
cons = append(cons, con)
}
}
p.eqConds = equivs
p.conditions = cons
for _, in := range p.inners {
in.extractEqualConditon()
}
if p.outer != nil {
p.outer.extractEqualConditon()
p.inner.extractEqualConditon()
}
}
func (p *joinPath) addIndexDependency() {
if p.outer != nil {
p.outer.addIndexDependency()
p.inner.addIndexDependency()
return
}
if p.table != nil {
return
}
for _, eq := range p.eqConds {
if !eq.leftIdx && !eq.rightIdx {
continue
}
pathLeft := p.findInnerContains(eq.left.TableName)
if pathLeft == nil {
continue
}
pathRight := p.findInnerContains(eq.right.TableName)
if pathRight == nil {
continue
}
if eq.leftIdx && eq.rightIdx {
pathLeft.addNeighbor(pathRight)
pathRight.addNeighbor(pathLeft)
} else if eq.leftIdx {
if !pathLeft.hasOuterIdxEqualCond() {
pathLeft.addIndexDep(pathRight)
}
} else if eq.rightIdx {
if !pathRight.hasOuterIdxEqualCond() {
pathRight.addIndexDep(pathLeft)
}
}
}
for _, in := range p.inners {
in.removeIndexDepCycle(in)
in.addIndexDependency()
}
}
func (p *joinPath) hasOuterIdxEqualCond() bool {
if p.table != nil {
for _, eq := range p.eqConds {
if eq.leftIdx {
return true
}
}
return false
}
if p.outer != nil {
return p.outer.hasOuterIdxEqualCond()
}
for _, in := range p.inners {
if in.hasOuterIdxEqualCond() {
return true
}
}
return false
}
func (p *joinPath) findInnerContains(table *ast.TableName) *joinPath {
for _, in := range p.inners {
if in.containsTable(table) {
return in
}
}
return nil
}
func (p *joinPath) addNeighbor(neighbor *joinPath) {
if p.neighbors == nil {
p.neighbors = map[*joinPath]bool{}
}
p.neighbors[neighbor] = true
p.neighborCount++
}
func (p *joinPath) addIndexDep(dep *joinPath) {
if p.idxDeps == nil {
p.idxDeps = map[*joinPath]bool{}
}
p.idxDeps[dep] = true
p.idxDepCount++
}
func (p *joinPath) removeIndexDepCycle(origin *joinPath) {
if p.idxDeps == nil {
return
}
for dep := range p.idxDeps {
if dep == origin {
delete(p.idxDeps, origin)
continue
}
dep.removeIndexDepCycle(origin)
}
}
func (p *joinPath) score() float64 {
return 1 / p.filterRate
}
func (p *joinPath) String() string {
if p.table != nil {
return p.table.TableInfo.Name.L
}
if p.outer != nil {
return "outer{" + p.outer.String() + "," + p.inner.String() + "}"
}
var innerStrs []string
for _, in := range p.inners {
innerStrs = append(innerStrs, in.String())
}
return "inner{" + strings.Join(innerStrs, ",") + "}"
}
func (p *joinPath) optimizeJoinOrder(availablePaths []*joinPath) {
if p.table != nil {
return
}
if p.outer != nil {
p.outer.optimizeJoinOrder(availablePaths)
p.inner.optimizeJoinOrder(append(availablePaths, p.outer))
return
}
var ordered []*joinPath
pathMap := map[*joinPath]bool{}
for _, in := range p.inners {
pathMap[in] = true
}
for len(pathMap) > 0 {
next := p.nextPath(pathMap, availablePaths)
next.optimizeJoinOrder(availablePaths)
ordered = append(ordered, next)
delete(pathMap, next)
availablePaths = append(availablePaths, next)
for path := range pathMap {
if path.idxDeps != nil {
delete(path.idxDeps, next)
}
if path.neighbors != nil {
delete(path.neighbors, next)
}
}
p.reattach(pathMap, availablePaths)
}
p.inners = ordered
}
// reattach is called by inner joinPath to retry attach conditions to inner paths
// after an inner path has been added to available paths.
func (p *joinPath) reattach(pathMap map[*joinPath]bool, availablePaths []*joinPath) {
if len(p.conditions) != 0 {
remainedConds := make([]ast.ExprNode, 0, len(p.conditions))
for _, con := range p.conditions {
var attached bool
for path := range pathMap {
if path.attachCondition(con, availablePaths) {
attached = true
break
}
}
if !attached {
remainedConds = append(remainedConds, con)
}
}
p.conditions = remainedConds
}
if len(p.eqConds) != 0 {
remainedEqConds := make([]*equalCond, 0, len(p.eqConds))
for _, eq := range p.eqConds {
var attached bool
for path := range pathMap {
if path.attachEqualCond(eq, availablePaths) {
attached = true
break
}
}
if !attached {
remainedEqConds = append(remainedEqConds, eq)
}
}
p.eqConds = remainedEqConds
}
}
func (p *joinPath) nextPath(pathMap map[*joinPath]bool, availablePaths []*joinPath) *joinPath {
cans := p.candidates(pathMap)
if len(cans) == 0 {
var v *joinPath
for v = range pathMap {
log.Errorf("index dep %v, prevs %v\n", v.idxDeps, len(availablePaths))
}
return v
}
indexPath := p.nextIndexPath(cans)
if indexPath != nil {
return indexPath
}
return p.pickPath(cans)
}
func (p *joinPath) candidates(pathMap map[*joinPath]bool) []*joinPath {
var cans []*joinPath
for t := range pathMap {
if len(t.idxDeps) > 0 {
continue
}
cans = append(cans, t)
}
return cans
}
func (p *joinPath) nextIndexPath(candidates []*joinPath) *joinPath {
var best *joinPath
for _, can := range candidates {
// Since we may not have equal conditions attached on the path, we
// need to check neighborCount and idxDepCount to see if this path
// can be joined with index.
neighborIsAvailable := len(can.neighbors) < can.neighborCount
idxDepIsAvailable := can.idxDepCount > 0
if can.hasOuterIdxEqualCond() || neighborIsAvailable || idxDepIsAvailable {
if best == nil {
best = can
}
if can.score() > best.score() {
best = can
}
}
}
return best
}
func (p *joinPath) pickPath(candidates []*joinPath) *joinPath {
var best *joinPath
for _, path := range candidates {
if best == nil {
best = path
}
if path.score() > best.score() {
best = path
}
}
return best
}
// conditionAttachChecker checks if an expression is valid to
// attach to a path. attach is valid only if all the referenced tables in the
// expression are available.
type conditionAttachChecker struct {
targetPath *joinPath
availablePaths []*joinPath
invalid bool
}
func (c *conditionAttachChecker) Enter(in ast.Node) (ast.Node, bool) {
switch x := in.(type) {
case *ast.ColumnNameExpr:
table := x.Refer.TableName
if c.targetPath.containsTable(table) {
return in, false
}
c.invalid = true
for _, path := range c.availablePaths {
if path.containsTable(table) {
c.invalid = false
return in, false
}
}
}
return in, false
}
func (c *conditionAttachChecker) Leave(in ast.Node) (ast.Node, bool) {
return in, !c.invalid
}
func (b *planBuilder) buildJoin(sel *ast.SelectStmt) Plan {
nrfinder := &nullRejectFinder{nullRejectTables: map[*ast.TableName]bool{}}
if sel.Where != nil {
sel.Where.Accept(nrfinder)
}
path := b.buildBasicJoinPath(sel.From.TableRefs, nrfinder.nullRejectTables)
rfs := path.resultFields()
whereConditions := splitWhere(sel.Where)
for _, whereCond := range whereConditions {
if !path.attachCondition(whereCond, nil) {
// TODO: Find a better way to handle this condition.
path.conditions = append(path.conditions, whereCond)
log.Errorf("Failed to attach where condtion.")
}
}
path.extractEqualConditon()
path.addIndexDependency()
path.optimizeJoinOrder(nil)
p := b.buildPlanFromJoinPath(path)
p.SetFields(rfs)
return p
}
type nullRejectFinder struct {
nullRejectTables map[*ast.TableName]bool
}
func (n *nullRejectFinder) Enter(in ast.Node) (ast.Node, bool) {
switch x := in.(type) {
case *ast.BinaryOperationExpr:
if x.Op == opcode.NullEQ || x.Op == opcode.OrOr {
return in, true
}
case *ast.IsNullExpr:
if !x.Not {
return in, true
}
case *ast.IsTruthExpr:
if x.Not {
return in, true
}
}
return in, false
}
func (n *nullRejectFinder) Leave(in ast.Node) (ast.Node, bool) {
switch x := in.(type) {
case *ast.ColumnNameExpr:
n.nullRejectTables[x.Refer.TableName] = true
}
return in, true
}
func (b *planBuilder) buildBasicJoinPath(node ast.ResultSetNode, nullRejectTables map[*ast.TableName]bool) *joinPath {
switch x := node.(type) {
case nil:
return nil
case *ast.Join:
leftPath := b.buildBasicJoinPath(x.Left, nullRejectTables)
if x.Right == nil {
return leftPath
}
righPath := b.buildBasicJoinPath(x.Right, nullRejectTables)
isOuter := b.isOuterJoin(x.Tp, leftPath, righPath, nullRejectTables)
if isOuter {
return newOuterJoinPath(x.Tp == ast.RightJoin, leftPath, righPath, x.On)
}
return newInnerJoinPath(leftPath, righPath, x.On)
case *ast.TableSource:
switch v := x.Source.(type) {
case *ast.TableName:
return newTablePath(v)
case *ast.SelectStmt, *ast.UnionStmt:
return newSubqueryPath(v, x.AsName)
default:
b.err = ErrUnsupportedType.Gen("unsupported table source type %T", x)
return nil
}
default:
b.err = ErrUnsupportedType.Gen("unsupported table source type %T", x)
return nil
}
}
func (b *planBuilder) isOuterJoin(tp ast.JoinType, leftPaths, rightPaths *joinPath,
nullRejectTables map[*ast.TableName]bool) bool {
var innerPath *joinPath
switch tp {
case ast.LeftJoin:
innerPath = rightPaths
case ast.RightJoin:
innerPath = leftPaths
default:
return false
}
for table := range nullRejectTables {
if innerPath.containsTable(table) {
return false
}
}
return true
}
func equivFromExpr(expr ast.ExprNode) *equalCond {
binop, ok := expr.(*ast.BinaryOperationExpr)
if !ok || binop.Op != opcode.EQ {
return nil
}
ln, lOK := binop.L.(*ast.ColumnNameExpr)
rn, rOK := binop.R.(*ast.ColumnNameExpr)
if !lOK || !rOK {
return nil
}
if ln.Name.Table.L == "" || rn.Name.Table.L == "" {
return nil
}
if ln.Name.Schema.L == rn.Name.Schema.L && ln.Name.Table.L == rn.Name.Table.L {
return nil
}
return newEqualCond(ln.Refer, rn.Refer)
}
func (b *planBuilder) buildPlanFromJoinPath(path *joinPath) Plan {
if path.table != nil {
return b.buildTablePlanFromJoinPath(path)
}
if path.subquery != nil {
return b.buildSubqueryJoinPath(path)
}
if path.outer != nil {
join := &JoinOuter{
Outer: b.buildPlanFromJoinPath(path.outer),
Inner: b.buildPlanFromJoinPath(path.inner),
}
if path.rightJoin {
join.SetFields(append(join.Inner.Fields(), join.Outer.Fields()...))
} else {
join.SetFields(append(join.Outer.Fields(), join.Inner.Fields()...))
}
return join
}
join := &JoinInner{}
for _, in := range path.inners {
join.Inners = append(join.Inners, b.buildPlanFromJoinPath(in))
join.fields = append(join.fields, in.resultFields()...)
}
join.Conditions = path.conditions
for _, equiv := range path.eqConds {
cond := &ast.BinaryOperationExpr{L: equiv.left.Expr, R: equiv.right.Expr, Op: opcode.EQ}
join.Conditions = append(join.Conditions, cond)
}
return join
}
func (b *planBuilder) buildTablePlanFromJoinPath(path *joinPath) Plan {
for _, equiv := range path.eqConds {
columnNameExpr := &ast.ColumnNameExpr{}
columnNameExpr.Name = &ast.ColumnName{}
columnNameExpr.Name.Name = equiv.left.Column.Name
columnNameExpr.Name.Table = equiv.left.Table.Name
columnNameExpr.Refer = equiv.left
condition := &ast.BinaryOperationExpr{L: columnNameExpr, R: equiv.right.Expr, Op: opcode.EQ}
ast.SetFlag(condition)
path.conditions = append(path.conditions, condition)
}
candidates := b.buildAllAccessMethodsPlan(path)
var p Plan
var lowestCost float64
for _, can := range candidates {
cost := EstimateCost(can)
if p == nil {
p = can
lowestCost = cost
}
if cost < lowestCost {
p = can
lowestCost = cost
}
}
return p
}
// Build subquery join path plan
func (b *planBuilder) buildSubqueryJoinPath(path *joinPath) Plan {
for _, equiv := range path.eqConds {
columnNameExpr := &ast.ColumnNameExpr{}
columnNameExpr.Name = &ast.ColumnName{}
columnNameExpr.Name.Name = equiv.left.Column.Name
columnNameExpr.Name.Table = equiv.left.Table.Name
columnNameExpr.Refer = equiv.left
condition := &ast.BinaryOperationExpr{L: columnNameExpr, R: equiv.right.Expr, Op: opcode.EQ}
ast.SetFlag(condition)
path.conditions = append(path.conditions, condition)
}
p := b.build(path.subquery)
if len(path.conditions) == 0 {
return p
}
filterPlan := &Filter{Conditions: path.conditions}
filterPlan.SetSrc(p)
filterPlan.SetFields(p.Fields())
return filterPlan
}

677
vendor/github.com/pingcap/tidb/optimizer/plan/plans.go generated vendored Normal file
View file

@ -0,0 +1,677 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"fmt"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/util/types"
)
// TableRange represents a range of row handle.
type TableRange struct {
LowVal int64
HighVal int64
}
// TableScan represents a table scan plan.
type TableScan struct {
basePlan
Table *model.TableInfo
Desc bool
Ranges []TableRange
// RefAccess indicates it references a previous joined table, used in explain.
RefAccess bool
// AccessConditions can be used to build index range.
AccessConditions []ast.ExprNode
// FilterConditions can be used to filter result.
FilterConditions []ast.ExprNode
}
// Accept implements Plan Accept interface.
func (p *TableScan) Accept(v Visitor) (Plan, bool) {
np, _ := v.Enter(p)
return v.Leave(np)
}
// ShowDDL is for showing DDL information.
type ShowDDL struct {
basePlan
}
// Accept implements Plan Accept interface.
func (p *ShowDDL) Accept(v Visitor) (Plan, bool) {
np, _ := v.Enter(p)
return v.Leave(np)
}
// CheckTable is for checking table data.
type CheckTable struct {
basePlan
Tables []*ast.TableName
}
// Accept implements Plan Accept interface.
func (p *CheckTable) Accept(v Visitor) (Plan, bool) {
np, _ := v.Enter(p)
return v.Leave(np)
}
// IndexRange represents an index range to be scanned.
type IndexRange struct {
LowVal []types.Datum
LowExclude bool
HighVal []types.Datum
HighExclude bool
}
// IsPoint returns if the index range is a point.
func (ir *IndexRange) IsPoint() bool {
if len(ir.LowVal) != len(ir.HighVal) {
return false
}
for i := range ir.LowVal {
a := ir.LowVal[i]
b := ir.HighVal[i]
if a.Kind() == types.KindMinNotNull || b.Kind() == types.KindMaxValue {
return false
}
cmp, err := a.CompareDatum(b)
if err != nil {
return false
}
if cmp != 0 {
return false
}
}
return !ir.LowExclude && !ir.HighExclude
}
// IndexScan represents an index scan plan.
type IndexScan struct {
basePlan
// The index used.
Index *model.IndexInfo
// The table to lookup.
Table *model.TableInfo
// Ordered and non-overlapping ranges to be scanned.
Ranges []*IndexRange
// Desc indicates whether the index should be scanned in descending order.
Desc bool
// RefAccess indicates it references a previous joined table, used in explain.
RefAccess bool
// AccessConditions can be used to build index range.
AccessConditions []ast.ExprNode
// Number of leading equal access condition.
// The offset of each equal condition correspond to the offset of index column.
// For example, an index has column (a, b, c), condition is 'a = 0 and b = 0 and c > 0'
// AccessEqualCount would be 2.
AccessEqualCount int
// FilterConditions can be used to filter result.
FilterConditions []ast.ExprNode
}
// Accept implements Plan Accept interface.
func (p *IndexScan) Accept(v Visitor) (Plan, bool) {
np, _ := v.Enter(p)
return v.Leave(np)
}
// JoinOuter represents outer join plan.
type JoinOuter struct {
basePlan
Outer Plan
Inner Plan
}
// Accept implements Plan interface.
func (p *JoinOuter) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*JoinOuter)
var ok bool
p.Outer, ok = p.Outer.Accept(v)
if !ok {
return p, false
}
p.Inner, ok = p.Inner.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// JoinInner represents inner join plan.
type JoinInner struct {
basePlan
Inners []Plan
Conditions []ast.ExprNode
}
func (p *JoinInner) String() string {
return fmt.Sprintf("JoinInner()")
}
// Accept implements Plan interface.
func (p *JoinInner) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*JoinInner)
for i, in := range p.Inners {
x, ok := in.Accept(v)
if !ok {
return p, false
}
p.Inners[i] = x
}
return v.Leave(p)
}
// SelectLock represents a select lock plan.
type SelectLock struct {
planWithSrc
Lock ast.SelectLockType
}
// Accept implements Plan Accept interface.
func (p *SelectLock) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*SelectLock)
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
func (p *SelectLock) SetLimit(limit float64) {
p.limit = limit
p.src.SetLimit(p.limit)
}
// SelectFields represents a select fields plan.
type SelectFields struct {
planWithSrc
}
// Accept implements Plan Accept interface.
func (p *SelectFields) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*SelectFields)
if p.src != nil {
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
func (p *SelectFields) SetLimit(limit float64) {
p.limit = limit
if p.src != nil {
p.src.SetLimit(limit)
}
}
// Sort represents a sorting plan.
type Sort struct {
planWithSrc
ByItems []*ast.ByItem
}
// Accept implements Plan Accept interface.
func (p *Sort) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Sort)
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
// It set the Src limit only if it is bypassed.
// Bypass has to be determined before this get called.
func (p *Sort) SetLimit(limit float64) {
p.limit = limit
}
// Limit represents offset and limit plan.
type Limit struct {
planWithSrc
Offset uint64
Count uint64
}
// Accept implements Plan Accept interface.
func (p *Limit) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Limit)
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
// As Limit itself determine the real limit,
// We just ignore the input, and set the real limit.
func (p *Limit) SetLimit(limit float64) {
p.limit = float64(p.Offset + p.Count)
p.src.SetLimit(p.limit)
}
// Union represents Union plan.
type Union struct {
basePlan
Selects []Plan
}
// Accept implements Plan Accept interface.
func (p *Union) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(p)
}
p = np.(*Union)
for i, sel := range p.Selects {
var ok bool
p.Selects[i], ok = sel.Accept(v)
if !ok {
return p, false
}
}
return v.Leave(p)
}
// Distinct represents Distinct plan.
type Distinct struct {
planWithSrc
}
// Accept implements Plan Accept interface.
func (p *Distinct) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(p)
}
p = np.(*Distinct)
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
func (p *Distinct) SetLimit(limit float64) {
p.limit = limit
if p.src != nil {
p.src.SetLimit(limit)
}
}
// Prepare represents prepare plan.
type Prepare struct {
basePlan
Name string
SQLText string
}
// Accept implements Plan Accept interface.
func (p *Prepare) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Prepare)
return v.Leave(p)
}
// Execute represents prepare plan.
type Execute struct {
basePlan
Name string
UsingVars []ast.ExprNode
ID uint32
}
// Accept implements Plan Accept interface.
func (p *Execute) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Execute)
return v.Leave(p)
}
// Deallocate represents deallocate plan.
type Deallocate struct {
basePlan
Name string
}
// Accept implements Plan Accept interface.
func (p *Deallocate) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Deallocate)
return v.Leave(p)
}
// Aggregate represents a select fields plan.
type Aggregate struct {
planWithSrc
AggFuncs []*ast.AggregateFuncExpr
GroupByItems []*ast.ByItem
}
// Accept implements Plan Accept interface.
func (p *Aggregate) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Aggregate)
if p.src != nil {
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
func (p *Aggregate) SetLimit(limit float64) {
p.limit = limit
if p.src != nil {
p.src.SetLimit(limit)
}
}
// Having represents a having plan.
// The having plan should after aggregate plan.
type Having struct {
planWithSrc
// Originally the WHERE or ON condition is parsed into a single expression,
// but after we converted to CNF(Conjunctive normal form), it can be
// split into a list of AND conditions.
Conditions []ast.ExprNode
}
// Accept implements Plan Accept interface.
func (p *Having) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Having)
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
func (p *Having) SetLimit(limit float64) {
p.limit = limit
// We assume 50% of the src row is filtered out.
p.src.SetLimit(limit * 2)
}
// Update represents an update plan.
type Update struct {
basePlan
OrderedList []*ast.Assignment // OrderedList has the same offset as TablePlan's result fields.
SelectPlan Plan
}
// Accept implements Plan Accept interface.
func (p *Update) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Update)
var ok bool
p.SelectPlan, ok = p.SelectPlan.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// Delete represents a delete plan.
type Delete struct {
basePlan
SelectPlan Plan
Tables []*ast.TableName
IsMultiTable bool
}
// Accept implements Plan Accept interface.
func (p *Delete) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Delete)
var ok bool
p.SelectPlan, ok = p.SelectPlan.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// Filter represents a plan that filter srcplan result.
type Filter struct {
planWithSrc
// Originally the WHERE or ON condition is parsed into a single expression,
// but after we converted to CNF(Conjunctive normal form), it can be
// split into a list of AND conditions.
Conditions []ast.ExprNode
}
// Accept implements Plan Accept interface.
func (p *Filter) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Filter)
var ok bool
p.src, ok = p.src.Accept(v)
if !ok {
return p, false
}
return v.Leave(p)
}
// SetLimit implements Plan SetLimit interface.
func (p *Filter) SetLimit(limit float64) {
p.limit = limit
// We assume 50% of the src row is filtered out.
p.src.SetLimit(limit * 2)
}
// Show represents a show plan.
type Show struct {
basePlan
Tp ast.ShowStmtType // Databases/Tables/Columns/....
DBName string
Table *ast.TableName // Used for showing columns.
Column *ast.ColumnName // Used for `desc table column`.
Flag int // Some flag parsed from sql, such as FULL.
Full bool
User string // Used for show grants.
// Used by show variables
GlobalScope bool
}
// Accept implements Plan Accept interface.
func (p *Show) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Show)
return v.Leave(p)
}
// Simple represents a simple statement plan which doesn't need any optimization.
type Simple struct {
basePlan
Statement ast.StmtNode
}
// Accept implements Plan Accept interface.
func (p *Simple) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Simple)
return v.Leave(p)
}
// Insert represents an insert plan.
type Insert struct {
basePlan
Table *ast.TableRefsClause
Columns []*ast.ColumnName
Lists [][]ast.ExprNode
Setlist []*ast.Assignment
OnDuplicate []*ast.Assignment
SelectPlan Plan
IsReplace bool
Priority int
}
// Accept implements Plan Accept interface.
func (p *Insert) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*Insert)
if p.SelectPlan != nil {
var ok bool
p.SelectPlan, ok = p.SelectPlan.Accept(v)
if !ok {
return p, false
}
}
return v.Leave(p)
}
// DDL represents a DDL statement plan.
type DDL struct {
basePlan
Statement ast.DDLNode
}
// Accept implements Plan Accept interface.
func (p *DDL) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
return v.Leave(np)
}
p = np.(*DDL)
return v.Leave(p)
}
// Explain represents a explain plan.
type Explain struct {
basePlan
StmtPlan Plan
}
// Accept implements Plan Accept interface.
func (p *Explain) Accept(v Visitor) (Plan, bool) {
np, skip := v.Enter(p)
if skip {
v.Leave(np)
}
p = np.(*Explain)
return v.Leave(p)
}

505
vendor/github.com/pingcap/tidb/optimizer/plan/range.go generated vendored Normal file
View file

@ -0,0 +1,505 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"fmt"
"math"
"sort"
"github.com/juju/errors"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/util/types"
)
type rangePoint struct {
value types.Datum
excl bool // exclude
start bool
}
func (rp rangePoint) String() string {
val := rp.value.GetValue()
if rp.value.Kind() == types.KindMinNotNull {
val = "-inf"
} else if rp.value.Kind() == types.KindMaxValue {
val = "+inf"
}
if rp.start {
symbol := "["
if rp.excl {
symbol = "("
}
return fmt.Sprintf("%s%v", symbol, val)
}
symbol := "]"
if rp.excl {
symbol = ")"
}
return fmt.Sprintf("%v%s", val, symbol)
}
type rangePointSorter struct {
points []rangePoint
err error
}
func (r *rangePointSorter) Len() int {
return len(r.points)
}
func (r *rangePointSorter) Less(i, j int) bool {
a := r.points[i]
b := r.points[j]
cmp, err := a.value.CompareDatum(b.value)
if err != nil {
r.err = err
return true
}
if cmp == 0 {
return r.equalValueLess(a, b)
}
return cmp < 0
}
func (r *rangePointSorter) equalValueLess(a, b rangePoint) bool {
if a.start && b.start {
return !a.excl && b.excl
} else if a.start {
return !b.excl
} else if b.start {
return a.excl || b.excl
}
return a.excl && !b.excl
}
func (r *rangePointSorter) Swap(i, j int) {
r.points[i], r.points[j] = r.points[j], r.points[i]
}
type rangeBuilder struct {
err error
}
func (r *rangeBuilder) build(expr ast.ExprNode) []rangePoint {
switch x := expr.(type) {
case *ast.BinaryOperationExpr:
return r.buildFromBinop(x)
case *ast.PatternInExpr:
return r.buildFromIn(x)
case *ast.ParenthesesExpr:
return r.build(x.Expr)
case *ast.BetweenExpr:
return r.buildFromBetween(x)
case *ast.IsNullExpr:
return r.buildFromIsNull(x)
case *ast.IsTruthExpr:
return r.buildFromIsTruth(x)
case *ast.PatternLikeExpr:
rans := r.buildFromPatternLike(x)
return rans
case *ast.ColumnNameExpr:
return r.buildFromColumnName(x)
}
return fullRange
}
func (r *rangeBuilder) buildFromBinop(x *ast.BinaryOperationExpr) []rangePoint {
if x.Op == opcode.OrOr {
return r.union(r.build(x.L), r.build(x.R))
} else if x.Op == opcode.AndAnd {
return r.intersection(r.build(x.L), r.build(x.R))
}
// This has been checked that the binary operation is comparison operation, and one of
// the operand is column name expression.
var value types.Datum
var op opcode.Op
if _, ok := x.L.(*ast.ValueExpr); ok {
value = types.NewDatum(x.L.GetValue())
switch x.Op {
case opcode.GE:
op = opcode.LE
case opcode.GT:
op = opcode.LT
case opcode.LT:
op = opcode.GT
case opcode.LE:
op = opcode.GE
default:
op = x.Op
}
} else {
value = types.NewDatum(x.R.GetValue())
op = x.Op
}
if value.Kind() == types.KindNull {
return nil
}
switch op {
case opcode.EQ:
startPoint := rangePoint{value: value, start: true}
endPoint := rangePoint{value: value}
return []rangePoint{startPoint, endPoint}
case opcode.NE:
startPoint1 := rangePoint{value: types.MinNotNullDatum(), start: true}
endPoint1 := rangePoint{value: value, excl: true}
startPoint2 := rangePoint{value: value, start: true, excl: true}
endPoint2 := rangePoint{value: types.MaxValueDatum()}
return []rangePoint{startPoint1, endPoint1, startPoint2, endPoint2}
case opcode.LT:
startPoint := rangePoint{value: types.MinNotNullDatum(), start: true}
endPoint := rangePoint{value: value, excl: true}
return []rangePoint{startPoint, endPoint}
case opcode.LE:
startPoint := rangePoint{value: types.MinNotNullDatum(), start: true}
endPoint := rangePoint{value: value}
return []rangePoint{startPoint, endPoint}
case opcode.GT:
startPoint := rangePoint{value: value, start: true, excl: true}
endPoint := rangePoint{value: types.MaxValueDatum()}
return []rangePoint{startPoint, endPoint}
case opcode.GE:
startPoint := rangePoint{value: value, start: true}
endPoint := rangePoint{value: types.MaxValueDatum()}
return []rangePoint{startPoint, endPoint}
}
return nil
}
func (r *rangeBuilder) buildFromIn(x *ast.PatternInExpr) []rangePoint {
if x.Not {
r.err = ErrUnsupportedType.Gen("NOT IN is not supported")
return fullRange
}
var rangePoints []rangePoint
for _, v := range x.List {
startPoint := rangePoint{value: types.NewDatum(v.GetValue()), start: true}
endPoint := rangePoint{value: types.NewDatum(v.GetValue())}
rangePoints = append(rangePoints, startPoint, endPoint)
}
sorter := rangePointSorter{points: rangePoints}
sort.Sort(&sorter)
if sorter.err != nil {
r.err = sorter.err
}
// check duplicates
hasDuplicate := false
isStart := false
for _, v := range rangePoints {
if isStart == v.start {
hasDuplicate = true
break
}
isStart = v.start
}
if !hasDuplicate {
return rangePoints
}
// remove duplicates
distinctRangePoints := make([]rangePoint, 0, len(rangePoints))
isStart = false
for i := 0; i < len(rangePoints); i++ {
current := rangePoints[i]
if isStart == current.start {
continue
}
distinctRangePoints = append(distinctRangePoints, current)
isStart = current.start
}
return distinctRangePoints
}
func (r *rangeBuilder) buildFromBetween(x *ast.BetweenExpr) []rangePoint {
if x.Not {
binop1 := &ast.BinaryOperationExpr{Op: opcode.LT, L: x.Expr, R: x.Left}
binop2 := &ast.BinaryOperationExpr{Op: opcode.GT, L: x.Expr, R: x.Right}
range1 := r.buildFromBinop(binop1)
range2 := r.buildFromBinop(binop2)
return r.union(range1, range2)
}
binop1 := &ast.BinaryOperationExpr{Op: opcode.GE, L: x.Expr, R: x.Left}
binop2 := &ast.BinaryOperationExpr{Op: opcode.LE, L: x.Expr, R: x.Right}
range1 := r.buildFromBinop(binop1)
range2 := r.buildFromBinop(binop2)
return r.intersection(range1, range2)
}
func (r *rangeBuilder) buildFromIsNull(x *ast.IsNullExpr) []rangePoint {
if x.Not {
startPoint := rangePoint{value: types.MinNotNullDatum(), start: true}
endPoint := rangePoint{value: types.MaxValueDatum()}
return []rangePoint{startPoint, endPoint}
}
startPoint := rangePoint{start: true}
endPoint := rangePoint{}
return []rangePoint{startPoint, endPoint}
}
func (r *rangeBuilder) buildFromIsTruth(x *ast.IsTruthExpr) []rangePoint {
if x.True != 0 {
if x.Not {
// NOT TRUE range is {[null null] [0, 0]}
startPoint1 := rangePoint{start: true}
endPoint1 := rangePoint{}
startPoint2 := rangePoint{start: true}
startPoint2.value.SetInt64(0)
endPoint2 := rangePoint{}
endPoint2.value.SetInt64(0)
return []rangePoint{startPoint1, endPoint1, startPoint2, endPoint2}
}
// TRUE range is {[-inf 0) (0 +inf]}
startPoint1 := rangePoint{value: types.MinNotNullDatum(), start: true}
endPoint1 := rangePoint{excl: true}
endPoint1.value.SetInt64(0)
startPoint2 := rangePoint{excl: true, start: true}
startPoint2.value.SetInt64(0)
endPoint2 := rangePoint{value: types.MaxValueDatum()}
return []rangePoint{startPoint1, endPoint1, startPoint2, endPoint2}
}
if x.Not {
startPoint1 := rangePoint{start: true}
endPoint1 := rangePoint{excl: true}
endPoint1.value.SetInt64(0)
startPoint2 := rangePoint{start: true, excl: true}
startPoint2.value.SetInt64(0)
endPoint2 := rangePoint{value: types.MaxValueDatum()}
return []rangePoint{startPoint1, endPoint1, startPoint2, endPoint2}
}
startPoint := rangePoint{start: true}
startPoint.value.SetInt64(0)
endPoint := rangePoint{}
endPoint.value.SetInt64(0)
return []rangePoint{startPoint, endPoint}
}
func (r *rangeBuilder) buildFromPatternLike(x *ast.PatternLikeExpr) []rangePoint {
if x.Not {
// Pattern not like is not supported.
r.err = ErrUnsupportedType.Gen("NOT LIKE is not supported.")
return fullRange
}
pattern, err := types.ToString(x.Pattern.GetValue())
if err != nil {
r.err = errors.Trace(err)
return fullRange
}
lowValue := make([]byte, 0, len(pattern))
// unscape the pattern
var exclude bool
for i := 0; i < len(pattern); i++ {
if pattern[i] == x.Escape {
i++
if i < len(pattern) {
lowValue = append(lowValue, pattern[i])
} else {
lowValue = append(lowValue, x.Escape)
}
continue
}
if pattern[i] == '%' {
break
} else if pattern[i] == '_' {
exclude = true
break
}
lowValue = append(lowValue, pattern[i])
}
if len(lowValue) == 0 {
return []rangePoint{{value: types.MinNotNullDatum(), start: true}, {value: types.MaxValueDatum()}}
}
startPoint := rangePoint{start: true, excl: exclude}
startPoint.value.SetBytesAsString(lowValue)
highValue := make([]byte, len(lowValue))
copy(highValue, lowValue)
endPoint := rangePoint{excl: true}
for i := len(highValue) - 1; i >= 0; i-- {
highValue[i]++
if highValue[i] != 0 {
endPoint.value.SetBytesAsString(highValue)
break
}
if i == 0 {
endPoint.value = types.MaxValueDatum()
break
}
}
ranges := make([]rangePoint, 2)
ranges[0] = startPoint
ranges[1] = endPoint
return ranges
}
func (r *rangeBuilder) buildFromColumnName(x *ast.ColumnNameExpr) []rangePoint {
// column name expression is equivalent to column name is true.
startPoint1 := rangePoint{value: types.MinNotNullDatum(), start: true}
endPoint1 := rangePoint{excl: true}
endPoint1.value.SetInt64(0)
startPoint2 := rangePoint{excl: true, start: true}
startPoint2.value.SetInt64(0)
endPoint2 := rangePoint{value: types.MaxValueDatum()}
return []rangePoint{startPoint1, endPoint1, startPoint2, endPoint2}
}
func (r *rangeBuilder) intersection(a, b []rangePoint) []rangePoint {
return r.merge(a, b, false)
}
func (r *rangeBuilder) union(a, b []rangePoint) []rangePoint {
return r.merge(a, b, true)
}
func (r *rangeBuilder) merge(a, b []rangePoint, union bool) []rangePoint {
sorter := rangePointSorter{points: append(a, b...)}
sort.Sort(&sorter)
if sorter.err != nil {
r.err = sorter.err
return nil
}
var (
merged []rangePoint
inRangeCount int
requiredInRangeCount int
)
if union {
requiredInRangeCount = 1
} else {
requiredInRangeCount = 2
}
for _, val := range sorter.points {
if val.start {
inRangeCount++
if inRangeCount == requiredInRangeCount {
// just reached the required in range count, a new range started.
merged = append(merged, val)
}
} else {
if inRangeCount == requiredInRangeCount {
// just about to leave the required in range count, the range is ended.
merged = append(merged, val)
}
inRangeCount--
}
}
return merged
}
// buildIndexRanges build index ranges from range points.
// Only the first column in the index is built, extra column ranges will be appended by
// appendIndexRanges.
func (r *rangeBuilder) buildIndexRanges(rangePoints []rangePoint) []*IndexRange {
indexRanges := make([]*IndexRange, 0, len(rangePoints)/2)
for i := 0; i < len(rangePoints); i += 2 {
startPoint := rangePoints[i]
endPoint := rangePoints[i+1]
ir := &IndexRange{
LowVal: []types.Datum{startPoint.value},
LowExclude: startPoint.excl,
HighVal: []types.Datum{endPoint.value},
HighExclude: endPoint.excl,
}
indexRanges = append(indexRanges, ir)
}
return indexRanges
}
// appendIndexRanges appends additional column ranges for multi-column index.
// The additional column ranges can only be appended to point ranges.
// for example we have an index (a, b), if the condition is (a > 1 and b = 2)
// then we can not build a conjunctive ranges for this index.
func (r *rangeBuilder) appendIndexRanges(origin []*IndexRange, rangePoints []rangePoint) []*IndexRange {
var newIndexRanges []*IndexRange
for i := 0; i < len(origin); i++ {
oRange := origin[i]
if !oRange.IsPoint() {
newIndexRanges = append(newIndexRanges, oRange)
} else {
newIndexRanges = append(newIndexRanges, r.appendIndexRange(oRange, rangePoints)...)
}
}
return newIndexRanges
}
func (r *rangeBuilder) appendIndexRange(origin *IndexRange, rangePoints []rangePoint) []*IndexRange {
newRanges := make([]*IndexRange, 0, len(rangePoints)/2)
for i := 0; i < len(rangePoints); i += 2 {
startPoint := rangePoints[i]
lowVal := make([]types.Datum, len(origin.LowVal)+1)
copy(lowVal, origin.LowVal)
lowVal[len(origin.LowVal)] = startPoint.value
endPoint := rangePoints[i+1]
highVal := make([]types.Datum, len(origin.HighVal)+1)
copy(highVal, origin.HighVal)
highVal[len(origin.HighVal)] = endPoint.value
ir := &IndexRange{
LowVal: lowVal,
LowExclude: startPoint.excl,
HighVal: highVal,
HighExclude: endPoint.excl,
}
newRanges = append(newRanges, ir)
}
return newRanges
}
func (r *rangeBuilder) buildTableRanges(rangePoints []rangePoint) []TableRange {
tableRanges := make([]TableRange, 0, len(rangePoints)/2)
for i := 0; i < len(rangePoints); i += 2 {
startPoint := rangePoints[i]
if startPoint.value.Kind() == types.KindNull || startPoint.value.Kind() == types.KindMinNotNull {
startPoint.value.SetInt64(math.MinInt64)
}
startInt, err := types.ToInt64(startPoint.value.GetValue())
if err != nil {
r.err = errors.Trace(err)
return tableRanges
}
startDatum := types.NewDatum(startInt)
cmp, err := startDatum.CompareDatum(startPoint.value)
if err != nil {
r.err = errors.Trace(err)
return tableRanges
}
if cmp < 0 || (cmp == 0 && startPoint.excl) {
startInt++
}
endPoint := rangePoints[i+1]
if endPoint.value.Kind() == types.KindNull {
endPoint.value.SetInt64(math.MinInt64)
} else if endPoint.value.Kind() == types.KindMaxValue {
endPoint.value.SetInt64(math.MaxInt64)
}
endInt, err := types.ToInt64(endPoint.value.GetValue())
if err != nil {
r.err = errors.Trace(err)
return tableRanges
}
endDatum := types.NewDatum(endInt)
cmp, err = endDatum.CompareDatum(endPoint.value)
if err != nil {
r.err = errors.Trace(err)
return tableRanges
}
if cmp > 0 || (cmp == 0 && endPoint.excl) {
endInt--
}
if startInt > endInt {
continue
}
tableRanges = append(tableRanges, TableRange{LowVal: startInt, HighVal: endInt})
}
return tableRanges
}

Some files were not shown because too many files have changed in this diff Show more