forked from forgejo/forgejo
Integrate public as bindata optionally (#293)
* Dropped unused codekit config * Integrated dynamic and static bindata for public * Ignore public bindata * Add a general generate make task * Integrated flexible public assets into web command * Updated vendoring, added all missiong govendor deps * Made the linter happy with the bindata and dynamic code * Moved public bindata definition to modules directory * Ignoring the new bindata path now * Updated to the new public modules import path * Updated public bindata command and drop the new prefix
This commit is contained in:
parent
4680c349dd
commit
b6a95a8cb3
691 changed files with 305318 additions and 1272 deletions
50
vendor/github.com/ngaut/deadline/rw.go
generated
vendored
Normal file
50
vendor/github.com/ngaut/deadline/rw.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package deadline
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DeadlineReader interface {
|
||||
io.Reader
|
||||
SetReadDeadline(t time.Time) error
|
||||
}
|
||||
|
||||
type DeadlineWriter interface {
|
||||
io.Writer
|
||||
SetWriteDeadline(t time.Time) error
|
||||
}
|
||||
|
||||
type DeadlineReadWriter interface {
|
||||
io.ReadWriter
|
||||
SetReadDeadline(t time.Time) error
|
||||
SetWriteDeadline(t time.Time) error
|
||||
}
|
||||
|
||||
type deadlineReader struct {
|
||||
DeadlineReader
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (r *deadlineReader) Read(p []byte) (int, error) {
|
||||
r.DeadlineReader.SetReadDeadline(time.Now().Add(r.timeout))
|
||||
return r.DeadlineReader.Read(p)
|
||||
}
|
||||
|
||||
func NewDeadlineReader(r DeadlineReader, timeout time.Duration) io.Reader {
|
||||
return &deadlineReader{DeadlineReader: r, timeout: timeout}
|
||||
}
|
||||
|
||||
type deadlineWriter struct {
|
||||
DeadlineWriter
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (r *deadlineWriter) Write(p []byte) (int, error) {
|
||||
r.DeadlineWriter.SetWriteDeadline(time.Now().Add(r.timeout))
|
||||
return r.DeadlineWriter.Write(p)
|
||||
}
|
||||
|
||||
func NewDeadlineWriter(r DeadlineWriter, timeout time.Duration) io.Writer {
|
||||
return &deadlineWriter{DeadlineWriter: r, timeout: timeout}
|
||||
}
|
25
vendor/github.com/ngaut/go-zookeeper/LICENSE
generated
vendored
Normal file
25
vendor/github.com/ngaut/go-zookeeper/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the author nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
788
vendor/github.com/ngaut/go-zookeeper/zk/conn.go
generated
vendored
Normal file
788
vendor/github.com/ngaut/go-zookeeper/zk/conn.go
generated
vendored
Normal file
|
@ -0,0 +1,788 @@
|
|||
package zk
|
||||
|
||||
/*
|
||||
TODO:
|
||||
* make sure a ping response comes back in a reasonable time
|
||||
|
||||
Possible watcher events:
|
||||
* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err}
|
||||
*/
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrNoServer = errors.New("zk: could not connect to a server")
|
||||
|
||||
const (
|
||||
bufferSize = 10 * 1024 * 1024
|
||||
eventChanSize = 6
|
||||
sendChanSize = 16
|
||||
protectedPrefix = "_c_"
|
||||
)
|
||||
|
||||
type watchType int
|
||||
|
||||
const (
|
||||
watchTypeData = iota
|
||||
watchTypeExist = iota
|
||||
watchTypeChild = iota
|
||||
)
|
||||
|
||||
type watchPathType struct {
|
||||
path string
|
||||
wType watchType
|
||||
}
|
||||
|
||||
type Dialer func(network, address string, timeout time.Duration) (net.Conn, error)
|
||||
|
||||
type Conn struct {
|
||||
lastZxid int64
|
||||
sessionID int64
|
||||
state State // must be 32-bit aligned
|
||||
xid int32
|
||||
timeout int32 // session timeout in seconds
|
||||
passwd []byte
|
||||
|
||||
dialer Dialer
|
||||
servers []string
|
||||
serverIndex int
|
||||
conn net.Conn
|
||||
eventChan chan Event
|
||||
shouldQuit chan bool
|
||||
pingInterval time.Duration
|
||||
recvTimeout time.Duration
|
||||
connectTimeout time.Duration
|
||||
|
||||
sendChan chan *request
|
||||
requests map[int32]*request // Xid -> pending request
|
||||
requestsLock sync.Mutex
|
||||
watchers map[watchPathType][]chan Event
|
||||
watchersLock sync.Mutex
|
||||
|
||||
// Debug (used by unit tests)
|
||||
reconnectDelay time.Duration
|
||||
}
|
||||
|
||||
type request struct {
|
||||
xid int32
|
||||
opcode int32
|
||||
pkt interface{}
|
||||
recvStruct interface{}
|
||||
recvChan chan response
|
||||
|
||||
// Because sending and receiving happen in separate go routines, there's
|
||||
// a possible race condition when creating watches from outside the read
|
||||
// loop. We must ensure that a watcher gets added to the list synchronously
|
||||
// with the response from the server on any request that creates a watch.
|
||||
// In order to not hard code the watch logic for each opcode in the recv
|
||||
// loop the caller can use recvFunc to insert some synchronously code
|
||||
// after a response.
|
||||
recvFunc func(*request, *responseHeader, error)
|
||||
}
|
||||
|
||||
type response struct {
|
||||
zxid int64
|
||||
err error
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Type EventType
|
||||
State State
|
||||
Path string // For non-session events, the path of the watched node.
|
||||
Err error
|
||||
}
|
||||
|
||||
func Connect(servers []string, recvTimeout time.Duration) (*Conn, <-chan Event, error) {
|
||||
return ConnectWithDialer(servers, recvTimeout, nil)
|
||||
}
|
||||
|
||||
func ConnectWithDialer(servers []string, recvTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) {
|
||||
// Randomize the order of the servers to avoid creating hotspots
|
||||
stringShuffle(servers)
|
||||
|
||||
for i, addr := range servers {
|
||||
if !strings.Contains(addr, ":") {
|
||||
servers[i] = addr + ":" + strconv.Itoa(DefaultPort)
|
||||
}
|
||||
}
|
||||
ec := make(chan Event, eventChanSize)
|
||||
if dialer == nil {
|
||||
dialer = net.DialTimeout
|
||||
}
|
||||
conn := Conn{
|
||||
dialer: dialer,
|
||||
servers: servers,
|
||||
serverIndex: 0,
|
||||
conn: nil,
|
||||
state: StateDisconnected,
|
||||
eventChan: ec,
|
||||
shouldQuit: make(chan bool),
|
||||
recvTimeout: recvTimeout,
|
||||
pingInterval: time.Duration((int64(recvTimeout) / 2)),
|
||||
connectTimeout: 1 * time.Second,
|
||||
sendChan: make(chan *request, sendChanSize),
|
||||
requests: make(map[int32]*request),
|
||||
watchers: make(map[watchPathType][]chan Event),
|
||||
passwd: emptyPassword,
|
||||
timeout: 30000,
|
||||
|
||||
// Debug
|
||||
reconnectDelay: time.Second,
|
||||
}
|
||||
go func() {
|
||||
conn.loop()
|
||||
conn.flushRequests(ErrClosing)
|
||||
conn.invalidateWatches(ErrClosing)
|
||||
close(conn.eventChan)
|
||||
}()
|
||||
return &conn, ec, nil
|
||||
}
|
||||
|
||||
func (c *Conn) Close() {
|
||||
close(c.shouldQuit)
|
||||
|
||||
select {
|
||||
case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) State() State {
|
||||
return State(atomic.LoadInt32((*int32)(&c.state)))
|
||||
}
|
||||
|
||||
func (c *Conn) setState(state State) {
|
||||
atomic.StoreInt32((*int32)(&c.state), int32(state))
|
||||
select {
|
||||
case c.eventChan <- Event{Type: EventSession, State: state}:
|
||||
default:
|
||||
// panic("zk: event channel full - it must be monitored and never allowed to be full")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) connect() {
|
||||
c.serverIndex = (c.serverIndex + 1) % len(c.servers)
|
||||
startIndex := c.serverIndex
|
||||
c.setState(StateConnecting)
|
||||
for {
|
||||
zkConn, err := c.dialer("tcp", c.servers[c.serverIndex], c.connectTimeout)
|
||||
if err == nil {
|
||||
c.conn = zkConn
|
||||
c.setState(StateConnected)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Failed to connect to %s: %+v", c.servers[c.serverIndex], err)
|
||||
|
||||
c.serverIndex = (c.serverIndex + 1) % len(c.servers)
|
||||
if c.serverIndex == startIndex {
|
||||
c.flushUnsentRequests(ErrNoServer)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) loop() {
|
||||
for {
|
||||
c.connect()
|
||||
err := c.authenticate()
|
||||
switch {
|
||||
case err == ErrSessionExpired:
|
||||
c.invalidateWatches(err)
|
||||
case err != nil && c.conn != nil:
|
||||
c.conn.Close()
|
||||
case err == nil:
|
||||
closeChan := make(chan bool) // channel to tell send loop stop
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
c.sendLoop(c.conn, closeChan)
|
||||
c.conn.Close() // causes recv loop to EOF/exit
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = c.recvLoop(c.conn)
|
||||
if err == nil {
|
||||
panic("zk: recvLoop should never return nil error")
|
||||
}
|
||||
close(closeChan) // tell send loop to exit
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
c.setState(StateDisconnected)
|
||||
|
||||
// Yeesh
|
||||
if err != io.EOF && err != ErrSessionExpired && !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.shouldQuit:
|
||||
c.flushRequests(ErrClosing)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if err != ErrSessionExpired {
|
||||
err = ErrConnectionClosed
|
||||
}
|
||||
c.flushRequests(err)
|
||||
|
||||
if c.reconnectDelay > 0 {
|
||||
select {
|
||||
case <-c.shouldQuit:
|
||||
return
|
||||
case <-time.After(c.reconnectDelay):
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) flushUnsentRequests(err error) {
|
||||
for {
|
||||
select {
|
||||
default:
|
||||
return
|
||||
case req := <-c.sendChan:
|
||||
req.recvChan <- response{-1, err}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send error to all pending requests and clear request map
|
||||
func (c *Conn) flushRequests(err error) {
|
||||
c.requestsLock.Lock()
|
||||
for _, req := range c.requests {
|
||||
req.recvChan <- response{-1, err}
|
||||
}
|
||||
c.requests = make(map[int32]*request)
|
||||
c.requestsLock.Unlock()
|
||||
}
|
||||
|
||||
// Send error to all watchers and clear watchers map
|
||||
func (c *Conn) invalidateWatches(err error) {
|
||||
c.watchersLock.Lock()
|
||||
defer c.watchersLock.Unlock()
|
||||
|
||||
if len(c.watchers) >= 0 {
|
||||
for pathType, watchers := range c.watchers {
|
||||
ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err}
|
||||
for _, ch := range watchers {
|
||||
ch <- ev
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
c.watchers = make(map[watchPathType][]chan Event)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) sendSetWatches() {
|
||||
c.watchersLock.Lock()
|
||||
defer c.watchersLock.Unlock()
|
||||
|
||||
if len(c.watchers) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
req := &setWatchesRequest{
|
||||
RelativeZxid: c.lastZxid,
|
||||
DataWatches: make([]string, 0),
|
||||
ExistWatches: make([]string, 0),
|
||||
ChildWatches: make([]string, 0),
|
||||
}
|
||||
n := 0
|
||||
for pathType, watchers := range c.watchers {
|
||||
if len(watchers) == 0 {
|
||||
continue
|
||||
}
|
||||
switch pathType.wType {
|
||||
case watchTypeData:
|
||||
req.DataWatches = append(req.DataWatches, pathType.path)
|
||||
case watchTypeExist:
|
||||
req.ExistWatches = append(req.ExistWatches, pathType.path)
|
||||
case watchTypeChild:
|
||||
req.ChildWatches = append(req.ChildWatches, pathType.path)
|
||||
}
|
||||
n++
|
||||
}
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
res := &setWatchesResponse{}
|
||||
_, err := c.request(opSetWatches, req, res, nil)
|
||||
if err != nil {
|
||||
log.Printf("Failed to set previous watches: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *Conn) authenticate() error {
|
||||
buf := make([]byte, 256)
|
||||
|
||||
// connect request
|
||||
|
||||
n, err := encodePacket(buf[4:], &connectRequest{
|
||||
ProtocolVersion: protocolVersion,
|
||||
LastZxidSeen: c.lastZxid,
|
||||
TimeOut: c.timeout,
|
||||
SessionID: c.sessionID,
|
||||
Passwd: c.passwd,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(buf[:4], uint32(n))
|
||||
|
||||
_, err = c.conn.Write(buf[:n+4])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.sendSetWatches()
|
||||
|
||||
// connect response
|
||||
|
||||
// package length
|
||||
_, err = io.ReadFull(c.conn, buf[:4])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blen := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if cap(buf) < blen {
|
||||
buf = make([]byte, blen)
|
||||
}
|
||||
|
||||
_, err = io.ReadFull(c.conn, buf[:blen])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := connectResponse{}
|
||||
_, err = decodePacket(buf[:blen], &r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if r.SessionID == 0 {
|
||||
c.sessionID = 0
|
||||
c.passwd = emptyPassword
|
||||
c.lastZxid = 0
|
||||
c.setState(StateExpired)
|
||||
return ErrSessionExpired
|
||||
}
|
||||
|
||||
if c.sessionID != r.SessionID {
|
||||
atomic.StoreInt32(&c.xid, 0)
|
||||
}
|
||||
c.timeout = r.TimeOut
|
||||
c.sessionID = r.SessionID
|
||||
c.passwd = r.Passwd
|
||||
c.setState(StateHasSession)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan bool) error {
|
||||
pingTicker := time.NewTicker(c.pingInterval)
|
||||
defer pingTicker.Stop()
|
||||
|
||||
buf := make([]byte, bufferSize)
|
||||
for {
|
||||
select {
|
||||
case req := <-c.sendChan:
|
||||
header := &requestHeader{req.xid, req.opcode}
|
||||
n, err := encodePacket(buf[4:], header)
|
||||
if err != nil {
|
||||
req.recvChan <- response{-1, err}
|
||||
continue
|
||||
}
|
||||
|
||||
n2, err := encodePacket(buf[4+n:], req.pkt)
|
||||
if err != nil {
|
||||
req.recvChan <- response{-1, err}
|
||||
continue
|
||||
}
|
||||
|
||||
n += n2
|
||||
|
||||
binary.BigEndian.PutUint32(buf[:4], uint32(n))
|
||||
|
||||
c.requestsLock.Lock()
|
||||
select {
|
||||
case <-closeChan:
|
||||
req.recvChan <- response{-1, ErrConnectionClosed}
|
||||
c.requestsLock.Unlock()
|
||||
return ErrConnectionClosed
|
||||
default:
|
||||
}
|
||||
c.requests[req.xid] = req
|
||||
c.requestsLock.Unlock()
|
||||
|
||||
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
|
||||
_, err = conn.Write(buf[:n+4])
|
||||
conn.SetWriteDeadline(time.Time{})
|
||||
if err != nil {
|
||||
req.recvChan <- response{-1, err}
|
||||
conn.Close()
|
||||
return err
|
||||
}
|
||||
case <-pingTicker.C:
|
||||
n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing})
|
||||
if err != nil {
|
||||
panic("zk: opPing should never fail to serialize")
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(buf[:4], uint32(n))
|
||||
|
||||
conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
|
||||
_, err = conn.Write(buf[:n+4])
|
||||
conn.SetWriteDeadline(time.Time{})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return err
|
||||
}
|
||||
case <-closeChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) recvLoop(conn net.Conn) error {
|
||||
buf := make([]byte, bufferSize)
|
||||
for {
|
||||
// package length
|
||||
conn.SetReadDeadline(time.Now().Add(c.recvTimeout))
|
||||
_, err := io.ReadFull(conn, buf[:4])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blen := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if cap(buf) < blen {
|
||||
buf = make([]byte, blen)
|
||||
}
|
||||
|
||||
_, err = io.ReadFull(conn, buf[:blen])
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := responseHeader{}
|
||||
_, err = decodePacket(buf[:16], &res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if res.Xid == -1 {
|
||||
res := &watcherEvent{}
|
||||
_, err := decodePacket(buf[16:16+blen], res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ev := Event{
|
||||
Type: res.Type,
|
||||
State: res.State,
|
||||
Path: res.Path,
|
||||
Err: nil,
|
||||
}
|
||||
select {
|
||||
case c.eventChan <- ev:
|
||||
default:
|
||||
}
|
||||
wTypes := make([]watchType, 0, 2)
|
||||
switch res.Type {
|
||||
case EventNodeCreated:
|
||||
wTypes = append(wTypes, watchTypeExist)
|
||||
case EventNodeDeleted, EventNodeDataChanged:
|
||||
wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild)
|
||||
case EventNodeChildrenChanged:
|
||||
wTypes = append(wTypes, watchTypeChild)
|
||||
}
|
||||
c.watchersLock.Lock()
|
||||
for _, t := range wTypes {
|
||||
wpt := watchPathType{res.Path, t}
|
||||
if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
|
||||
for _, ch := range watchers {
|
||||
ch <- ev
|
||||
close(ch)
|
||||
}
|
||||
delete(c.watchers, wpt)
|
||||
}
|
||||
}
|
||||
c.watchersLock.Unlock()
|
||||
} else if res.Xid == -2 {
|
||||
// Ping response. Ignore.
|
||||
} else if res.Xid < 0 {
|
||||
log.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid)
|
||||
} else {
|
||||
if res.Zxid > 0 {
|
||||
c.lastZxid = res.Zxid
|
||||
}
|
||||
|
||||
c.requestsLock.Lock()
|
||||
req, ok := c.requests[res.Xid]
|
||||
if ok {
|
||||
delete(c.requests, res.Xid)
|
||||
}
|
||||
c.requestsLock.Unlock()
|
||||
|
||||
if !ok {
|
||||
log.Printf("Response for unknown request with xid %d", res.Xid)
|
||||
} else {
|
||||
if res.Err != 0 {
|
||||
err = res.Err.toError()
|
||||
} else {
|
||||
_, err = decodePacket(buf[16:16+blen], req.recvStruct)
|
||||
}
|
||||
if req.recvFunc != nil {
|
||||
req.recvFunc(req, &res, err)
|
||||
}
|
||||
req.recvChan <- response{res.Zxid, err}
|
||||
if req.opcode == opClose {
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) nextXid() int32 {
|
||||
return atomic.AddInt32(&c.xid, 1)
|
||||
}
|
||||
|
||||
func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event {
|
||||
c.watchersLock.Lock()
|
||||
defer c.watchersLock.Unlock()
|
||||
|
||||
ch := make(chan Event, 1)
|
||||
wpt := watchPathType{path, watchType}
|
||||
c.watchers[wpt] = append(c.watchers[wpt], ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response {
|
||||
rq := &request{
|
||||
xid: c.nextXid(),
|
||||
opcode: opcode,
|
||||
pkt: req,
|
||||
recvStruct: res,
|
||||
recvChan: make(chan response, 1),
|
||||
recvFunc: recvFunc,
|
||||
}
|
||||
c.sendChan <- rq
|
||||
return rq.recvChan
|
||||
}
|
||||
|
||||
func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
|
||||
r := <-c.queueRequest(opcode, req, res, recvFunc)
|
||||
return r.zxid, r.err
|
||||
}
|
||||
|
||||
func (c *Conn) AddAuth(scheme string, auth []byte) error {
|
||||
_, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Conn) Children(path string) ([]string, Stat, error) {
|
||||
res := &getChildren2Response{}
|
||||
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
|
||||
return res.Children, &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) ChildrenW(path string) ([]string, Stat, <-chan Event, error) {
|
||||
var ech <-chan Event
|
||||
res := &getChildren2Response{}
|
||||
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
|
||||
if err == nil {
|
||||
ech = c.addWatcher(path, watchTypeChild)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return res.Children, &res.Stat, ech, err
|
||||
}
|
||||
|
||||
func (c *Conn) Get(path string) ([]byte, Stat, error) {
|
||||
res := &getDataResponse{}
|
||||
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
|
||||
return res.Data, &res.Stat, err
|
||||
}
|
||||
|
||||
// GetW returns the contents of a znode and sets a watch
|
||||
func (c *Conn) GetW(path string) ([]byte, Stat, <-chan Event, error) {
|
||||
var ech <-chan Event
|
||||
res := &getDataResponse{}
|
||||
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
|
||||
if err == nil {
|
||||
ech = c.addWatcher(path, watchTypeData)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return res.Data, &res.Stat, ech, err
|
||||
}
|
||||
|
||||
func (c *Conn) Set(path string, data []byte, version int32) (Stat, error) {
|
||||
res := &setDataResponse{}
|
||||
_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
|
||||
return &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) {
|
||||
res := &createResponse{}
|
||||
_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
|
||||
return res.Path, err
|
||||
}
|
||||
|
||||
// CreateProtectedEphemeralSequential fixes a race condition if the server crashes
|
||||
// after it creates the node. On reconnect the session may still be valid so the
|
||||
// ephemeral node still exists. Therefore, on reconnect we need to check if a node
|
||||
// with a GUID generated on create exists.
|
||||
func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) {
|
||||
var guid [16]byte
|
||||
_, err := io.ReadFull(rand.Reader, guid[:16])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
guidStr := fmt.Sprintf("%x", guid)
|
||||
|
||||
parts := strings.Split(path, "/")
|
||||
parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1])
|
||||
rootPath := strings.Join(parts[:len(parts)-1], "/")
|
||||
protectedPath := strings.Join(parts, "/")
|
||||
|
||||
var newPath string
|
||||
for i := 0; i < 3; i++ {
|
||||
newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl)
|
||||
switch err {
|
||||
case ErrSessionExpired:
|
||||
// No need to search for the node since it can't exist. Just try again.
|
||||
case ErrConnectionClosed:
|
||||
children, _, err := c.Children(rootPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, p := range children {
|
||||
parts := strings.Split(p, "/")
|
||||
if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) {
|
||||
if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr {
|
||||
return rootPath + "/" + p, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case nil:
|
||||
return newPath, nil
|
||||
default:
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (c *Conn) Delete(path string, version int32) error {
|
||||
_, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Conn) Exists(path string) (bool, Stat, error) {
|
||||
res := &existsResponse{}
|
||||
_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
|
||||
exists := true
|
||||
if err == ErrNoNode {
|
||||
exists = false
|
||||
err = nil
|
||||
}
|
||||
return exists, &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) ExistsW(path string) (bool, Stat, <-chan Event, error) {
|
||||
var ech <-chan Event
|
||||
res := &existsResponse{}
|
||||
_, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
|
||||
if err == nil {
|
||||
ech = c.addWatcher(path, watchTypeData)
|
||||
} else if err == ErrNoNode {
|
||||
ech = c.addWatcher(path, watchTypeExist)
|
||||
}
|
||||
})
|
||||
exists := true
|
||||
if err == ErrNoNode {
|
||||
exists = false
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, nil, nil, err
|
||||
}
|
||||
return exists, &res.Stat, ech, err
|
||||
}
|
||||
|
||||
func (c *Conn) GetACL(path string) ([]ACL, Stat, error) {
|
||||
res := &getAclResponse{}
|
||||
_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
|
||||
return res.Acl, &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) SetACL(path string, acl []ACL, version int32) (Stat, error) {
|
||||
res := &setAclResponse{}
|
||||
_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
|
||||
return &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) Sync(path string) (string, error) {
|
||||
res := &syncResponse{}
|
||||
_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
|
||||
return res.Path, err
|
||||
}
|
||||
|
||||
type MultiOps struct {
|
||||
Create []CreateRequest
|
||||
Delete []DeleteRequest
|
||||
SetData []SetDataRequest
|
||||
Check []CheckVersionRequest
|
||||
}
|
||||
|
||||
func (c *Conn) Multi(ops MultiOps) error {
|
||||
req := &multiRequest{
|
||||
Ops: make([]multiRequestOp, 0, len(ops.Create)+len(ops.Delete)+len(ops.SetData)+len(ops.Check)),
|
||||
DoneHeader: multiHeader{Type: -1, Done: true, Err: -1},
|
||||
}
|
||||
for _, r := range ops.Create {
|
||||
req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCreate, false, -1}, r})
|
||||
}
|
||||
for _, r := range ops.SetData {
|
||||
req.Ops = append(req.Ops, multiRequestOp{multiHeader{opSetData, false, -1}, r})
|
||||
}
|
||||
for _, r := range ops.Delete {
|
||||
req.Ops = append(req.Ops, multiRequestOp{multiHeader{opDelete, false, -1}, r})
|
||||
}
|
||||
for _, r := range ops.Check {
|
||||
req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCheck, false, -1}, r})
|
||||
}
|
||||
res := &multiResponse{}
|
||||
_, err := c.request(opMulti, req, res, nil)
|
||||
return err
|
||||
}
|
217
vendor/github.com/ngaut/go-zookeeper/zk/constants.go
generated
vendored
Normal file
217
vendor/github.com/ngaut/go-zookeeper/zk/constants.go
generated
vendored
Normal file
|
@ -0,0 +1,217 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
protocolVersion = 0
|
||||
|
||||
DefaultPort = 2181
|
||||
)
|
||||
|
||||
const (
|
||||
opNotify = 0
|
||||
opCreate = 1
|
||||
opDelete = 2
|
||||
opExists = 3
|
||||
opGetData = 4
|
||||
opSetData = 5
|
||||
opGetAcl = 6
|
||||
opSetAcl = 7
|
||||
opGetChildren = 8
|
||||
opSync = 9
|
||||
opPing = 11
|
||||
opGetChildren2 = 12
|
||||
opCheck = 13
|
||||
opMulti = 14
|
||||
opClose = -11
|
||||
opSetAuth = 100
|
||||
opSetWatches = 101
|
||||
// Not in protocol, used internally
|
||||
opWatcherEvent = -2
|
||||
)
|
||||
|
||||
const (
|
||||
EventNodeCreated = EventType(1)
|
||||
EventNodeDeleted = EventType(2)
|
||||
EventNodeDataChanged = EventType(3)
|
||||
EventNodeChildrenChanged = EventType(4)
|
||||
|
||||
EventSession = EventType(-1)
|
||||
EventNotWatching = EventType(-2)
|
||||
)
|
||||
|
||||
var (
|
||||
eventNames = map[EventType]string{
|
||||
EventNodeCreated: "EventNodeCreated",
|
||||
EventNodeDeleted: "EventNodeDeleted",
|
||||
EventNodeDataChanged: "EventNodeDataChanged",
|
||||
EventNodeChildrenChanged: "EventNodeChildrenChanged",
|
||||
EventSession: "EventSession",
|
||||
EventNotWatching: "EventNotWatching",
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
StateUnknown = State(-1)
|
||||
StateDisconnected = State(0)
|
||||
StateConnecting = State(1)
|
||||
StateSyncConnected = State(3)
|
||||
StateAuthFailed = State(4)
|
||||
StateConnectedReadOnly = State(5)
|
||||
StateSaslAuthenticated = State(6)
|
||||
StateExpired = State(-112)
|
||||
// StateAuthFailed = State(-113)
|
||||
|
||||
StateConnected = State(100)
|
||||
StateHasSession = State(101)
|
||||
)
|
||||
|
||||
const (
|
||||
FlagEphemeral = 1
|
||||
FlagSequence = 2
|
||||
)
|
||||
|
||||
var (
|
||||
stateNames = map[State]string{
|
||||
StateUnknown: "StateUnknown",
|
||||
StateDisconnected: "StateDisconnected",
|
||||
StateSyncConnected: "StateSyncConnected",
|
||||
StateConnectedReadOnly: "StateConnectedReadOnly",
|
||||
StateSaslAuthenticated: "StateSaslAuthenticated",
|
||||
StateExpired: "StateExpired",
|
||||
StateAuthFailed: "StateAuthFailed",
|
||||
StateConnecting: "StateConnecting",
|
||||
StateConnected: "StateConnected",
|
||||
StateHasSession: "StateHasSession",
|
||||
}
|
||||
)
|
||||
|
||||
type State int32
|
||||
|
||||
func (s State) String() string {
|
||||
if name := stateNames[s]; name != "" {
|
||||
return name
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
type ErrCode int32
|
||||
|
||||
var (
|
||||
ErrConnectionClosed = errors.New("zk: connection closed")
|
||||
ErrUnknown = errors.New("zk: unknown error")
|
||||
ErrAPIError = errors.New("zk: api error")
|
||||
ErrNoNode = errors.New("zk: node does not exist")
|
||||
ErrNoAuth = errors.New("zk: not authenticated")
|
||||
ErrBadVersion = errors.New("zk: version conflict")
|
||||
ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children")
|
||||
ErrNodeExists = errors.New("zk: node already exists")
|
||||
ErrNotEmpty = errors.New("zk: node has children")
|
||||
ErrSessionExpired = errors.New("zk: session has been expired by the server")
|
||||
ErrInvalidACL = errors.New("zk: invalid ACL specified")
|
||||
ErrAuthFailed = errors.New("zk: client authentication failed")
|
||||
ErrClosing = errors.New("zk: zookeeper is closing")
|
||||
ErrNothing = errors.New("zk: no server responsees to process")
|
||||
ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored")
|
||||
|
||||
// ErrInvalidCallback = errors.New("zk: invalid callback specified")
|
||||
errCodeToError = map[ErrCode]error{
|
||||
0: nil,
|
||||
errAPIError: ErrAPIError,
|
||||
errNoNode: ErrNoNode,
|
||||
errNoAuth: ErrNoAuth,
|
||||
errBadVersion: ErrBadVersion,
|
||||
errNoChildrenForEphemerals: ErrNoChildrenForEphemerals,
|
||||
errNodeExists: ErrNodeExists,
|
||||
errNotEmpty: ErrNotEmpty,
|
||||
errSessionExpired: ErrSessionExpired,
|
||||
// errInvalidCallback: ErrInvalidCallback,
|
||||
errInvalidAcl: ErrInvalidACL,
|
||||
errAuthFailed: ErrAuthFailed,
|
||||
errClosing: ErrClosing,
|
||||
errNothing: ErrNothing,
|
||||
errSessionMoved: ErrSessionMoved,
|
||||
}
|
||||
)
|
||||
|
||||
func (e ErrCode) toError() error {
|
||||
if err, ok := errCodeToError[e]; ok {
|
||||
return err
|
||||
}
|
||||
return ErrUnknown
|
||||
}
|
||||
|
||||
const (
|
||||
errOk = 0
|
||||
// System and server-side errors
|
||||
errSystemError = -1
|
||||
errRuntimeInconsistency = -2
|
||||
errDataInconsistency = -3
|
||||
errConnectionLoss = -4
|
||||
errMarshallingError = -5
|
||||
errUnimplemented = -6
|
||||
errOperationTimeout = -7
|
||||
errBadArguments = -8
|
||||
errInvalidState = -9
|
||||
// API errors
|
||||
errAPIError = ErrCode(-100)
|
||||
errNoNode = ErrCode(-101) // *
|
||||
errNoAuth = ErrCode(-102)
|
||||
errBadVersion = ErrCode(-103) // *
|
||||
errNoChildrenForEphemerals = ErrCode(-108)
|
||||
errNodeExists = ErrCode(-110) // *
|
||||
errNotEmpty = ErrCode(-111)
|
||||
errSessionExpired = ErrCode(-112)
|
||||
errInvalidCallback = ErrCode(-113)
|
||||
errInvalidAcl = ErrCode(-114)
|
||||
errAuthFailed = ErrCode(-115)
|
||||
errClosing = ErrCode(-116)
|
||||
errNothing = ErrCode(-117)
|
||||
errSessionMoved = ErrCode(-118)
|
||||
)
|
||||
|
||||
// Constants for ACL permissions
|
||||
const (
|
||||
PermRead = 1 << iota
|
||||
PermWrite
|
||||
PermCreate
|
||||
PermDelete
|
||||
PermAdmin
|
||||
PermAll = 0x1f
|
||||
)
|
||||
|
||||
var (
|
||||
emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
opNames = map[int32]string{
|
||||
opNotify: "notify",
|
||||
opCreate: "create",
|
||||
opDelete: "delete",
|
||||
opExists: "exists",
|
||||
opGetData: "getData",
|
||||
opSetData: "setData",
|
||||
opGetAcl: "getACL",
|
||||
opSetAcl: "setACL",
|
||||
opGetChildren: "getChildren",
|
||||
opSync: "sync",
|
||||
opPing: "ping",
|
||||
opGetChildren2: "getChildren2",
|
||||
opCheck: "check",
|
||||
opMulti: "multi",
|
||||
opClose: "close",
|
||||
opSetAuth: "setAuth",
|
||||
opSetWatches: "setWatches",
|
||||
|
||||
opWatcherEvent: "watcherEvent",
|
||||
}
|
||||
)
|
||||
|
||||
type EventType int32
|
||||
|
||||
func (t EventType) String() string {
|
||||
if name := eventNames[t]; name != "" {
|
||||
return name
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
131
vendor/github.com/ngaut/go-zookeeper/zk/lock.go
generated
vendored
Normal file
131
vendor/github.com/ngaut/go-zookeeper/zk/lock.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDeadlock = errors.New("zk: trying to acquire a lock twice")
|
||||
ErrNotLocked = errors.New("zk: not locked")
|
||||
)
|
||||
|
||||
type Lock struct {
|
||||
c *Conn
|
||||
path string
|
||||
acl []ACL
|
||||
lockPath string
|
||||
seq int
|
||||
}
|
||||
|
||||
func NewLock(c *Conn, path string, acl []ACL) *Lock {
|
||||
return &Lock{
|
||||
c: c,
|
||||
path: path,
|
||||
acl: acl,
|
||||
}
|
||||
}
|
||||
|
||||
func parseSeq(path string) (int, error) {
|
||||
parts := strings.Split(path, "-")
|
||||
return strconv.Atoi(parts[len(parts)-1])
|
||||
}
|
||||
|
||||
func (l *Lock) Lock() error {
|
||||
if l.lockPath != "" {
|
||||
return ErrDeadlock
|
||||
}
|
||||
|
||||
prefix := fmt.Sprintf("%s/lock-", l.path)
|
||||
|
||||
path := ""
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl)
|
||||
if err == ErrNoNode {
|
||||
// Create parent node.
|
||||
parts := strings.Split(l.path, "/")
|
||||
pth := ""
|
||||
for _, p := range parts[1:] {
|
||||
pth += "/" + p
|
||||
_, err := l.c.Create(pth, []byte{}, 0, l.acl)
|
||||
if err != nil && err != ErrNodeExists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if err == nil {
|
||||
break
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
seq, err := parseSeq(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
children, _, err := l.c.Children(l.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lowestSeq := seq
|
||||
prevSeq := 0
|
||||
prevSeqPath := ""
|
||||
for _, p := range children {
|
||||
s, err := parseSeq(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s < lowestSeq {
|
||||
lowestSeq = s
|
||||
}
|
||||
if s < seq && s > prevSeq {
|
||||
prevSeq = s
|
||||
prevSeqPath = p
|
||||
}
|
||||
}
|
||||
|
||||
if seq == lowestSeq {
|
||||
// Acquired the lock
|
||||
break
|
||||
}
|
||||
|
||||
// Wait on the node next in line for the lock
|
||||
_, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath)
|
||||
if err != nil && err != ErrNoNode {
|
||||
return err
|
||||
} else if err != nil && err == ErrNoNode {
|
||||
// try again
|
||||
continue
|
||||
}
|
||||
|
||||
ev := <-ch
|
||||
if ev.Err != nil {
|
||||
return ev.Err
|
||||
}
|
||||
}
|
||||
|
||||
l.seq = seq
|
||||
l.lockPath = path
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Lock) Unlock() error {
|
||||
if l.lockPath == "" {
|
||||
return ErrNotLocked
|
||||
}
|
||||
if err := l.c.Delete(l.lockPath, -1); err != nil {
|
||||
return err
|
||||
}
|
||||
l.lockPath = ""
|
||||
l.seq = 0
|
||||
return nil
|
||||
}
|
113
vendor/github.com/ngaut/go-zookeeper/zk/server_help.go
generated
vendored
Normal file
113
vendor/github.com/ngaut/go-zookeeper/zk/server_help.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TestServer struct {
|
||||
Port int
|
||||
Path string
|
||||
Srv *Server
|
||||
}
|
||||
|
||||
type TestCluster struct {
|
||||
Path string
|
||||
Servers []TestServer
|
||||
}
|
||||
|
||||
func StartTestCluster(size int) (*TestCluster, error) {
|
||||
tmpPath, err := ioutil.TempDir("", "gozk")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
success := false
|
||||
startPort := int(rand.Int31n(6000) + 10000)
|
||||
cluster := &TestCluster{Path: tmpPath}
|
||||
defer func() {
|
||||
if !success {
|
||||
cluster.Stop()
|
||||
}
|
||||
}()
|
||||
for serverN := 0; serverN < size; serverN++ {
|
||||
srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN))
|
||||
if err := os.Mkdir(srvPath, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
port := startPort + serverN*3
|
||||
cfg := ServerConfig{
|
||||
ClientPort: port,
|
||||
DataDir: srvPath,
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
cfg.Servers = append(cfg.Servers, ServerConfigServer{
|
||||
ID: i + 1,
|
||||
Host: "127.0.0.1",
|
||||
PeerPort: startPort + i*3 + 1,
|
||||
LeaderElectionPort: startPort + i*3 + 2,
|
||||
})
|
||||
}
|
||||
cfgPath := filepath.Join(srvPath, "zoo.cfg")
|
||||
fi, err := os.Create(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = cfg.Marshall(fi)
|
||||
fi.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi, err = os.Create(filepath.Join(srvPath, "myid"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = fmt.Fprintf(fi, "%d\n", serverN+1)
|
||||
fi.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srv := &Server{
|
||||
ConfigPath: cfgPath,
|
||||
}
|
||||
if err := srv.Start(); err != nil {
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
cluster.Servers = append(cluster.Servers, TestServer{
|
||||
Path: srvPath,
|
||||
Port: cfg.ClientPort,
|
||||
Srv: srv,
|
||||
})
|
||||
}
|
||||
success = true
|
||||
time.Sleep(time.Second) // Give the server time to become active. Should probably actually attempt to connect to verify.
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
func (ts *TestCluster) Connect(idx int) (*Conn, error) {
|
||||
zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", ts.Servers[idx].Port)}, time.Second*15)
|
||||
return zk, err
|
||||
}
|
||||
|
||||
func (ts *TestCluster) ConnectAll() (*Conn, error) {
|
||||
hosts := make([]string, len(ts.Servers))
|
||||
for i, srv := range ts.Servers {
|
||||
hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port)
|
||||
}
|
||||
zk, _, err := Connect(hosts, time.Second*15)
|
||||
return zk, err
|
||||
}
|
||||
|
||||
func (ts *TestCluster) Stop() error {
|
||||
for _, srv := range ts.Servers {
|
||||
srv.Srv.Stop()
|
||||
}
|
||||
defer os.RemoveAll(ts.Path)
|
||||
return nil
|
||||
}
|
142
vendor/github.com/ngaut/go-zookeeper/zk/server_java.go
generated
vendored
Normal file
142
vendor/github.com/ngaut/go-zookeeper/zk/server_java.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type ErrMissingServerConfigField string
|
||||
|
||||
func (e ErrMissingServerConfigField) Error() string {
|
||||
return fmt.Sprintf("zk: missing server config field '%s'", string(e))
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultServerTickTime = 2000
|
||||
DefaultServerInitLimit = 10
|
||||
DefaultServerSyncLimit = 5
|
||||
DefaultServerAutoPurgeSnapRetainCount = 3
|
||||
DefaultPeerPort = 2888
|
||||
DefaultLeaderElectionPort = 3888
|
||||
)
|
||||
|
||||
type ServerConfigServer struct {
|
||||
ID int
|
||||
Host string
|
||||
PeerPort int
|
||||
LeaderElectionPort int
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
TickTime int // Number of milliseconds of each tick
|
||||
InitLimit int // Number of ticks that the initial synchronization phase can take
|
||||
SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement
|
||||
DataDir string // Direcrory where the snapshot is stored
|
||||
ClientPort int // Port at which clients will connect
|
||||
AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir
|
||||
AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge)
|
||||
Servers []ServerConfigServer
|
||||
}
|
||||
|
||||
func (sc ServerConfig) Marshall(w io.Writer) error {
|
||||
if sc.DataDir == "" {
|
||||
return ErrMissingServerConfigField("dataDir")
|
||||
}
|
||||
fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir)
|
||||
if sc.TickTime <= 0 {
|
||||
sc.TickTime = DefaultServerTickTime
|
||||
}
|
||||
fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime)
|
||||
if sc.InitLimit <= 0 {
|
||||
sc.InitLimit = DefaultServerInitLimit
|
||||
}
|
||||
fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit)
|
||||
if sc.SyncLimit <= 0 {
|
||||
sc.SyncLimit = DefaultServerSyncLimit
|
||||
}
|
||||
fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit)
|
||||
if sc.ClientPort <= 0 {
|
||||
sc.ClientPort = DefaultPort
|
||||
}
|
||||
fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort)
|
||||
if sc.AutoPurgePurgeInterval > 0 {
|
||||
if sc.AutoPurgeSnapRetainCount <= 0 {
|
||||
sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount
|
||||
}
|
||||
fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount)
|
||||
fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval)
|
||||
}
|
||||
if len(sc.Servers) > 0 {
|
||||
for _, srv := range sc.Servers {
|
||||
if srv.PeerPort <= 0 {
|
||||
srv.PeerPort = DefaultPeerPort
|
||||
}
|
||||
if srv.LeaderElectionPort <= 0 {
|
||||
srv.LeaderElectionPort = DefaultLeaderElectionPort
|
||||
}
|
||||
fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var jarSearchPaths = []string{
|
||||
"zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
|
||||
"../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
|
||||
"/usr/share/java/zookeeper-*.jar",
|
||||
"/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
|
||||
"/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar",
|
||||
}
|
||||
|
||||
func findZookeeperFatJar() string {
|
||||
var paths []string
|
||||
zkPath := os.Getenv("ZOOKEEPER_PATH")
|
||||
if zkPath == "" {
|
||||
paths = jarSearchPaths
|
||||
} else {
|
||||
paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")}
|
||||
}
|
||||
for _, path := range paths {
|
||||
matches, _ := filepath.Glob(path)
|
||||
// TODO: could sort by version and pick latest
|
||||
if len(matches) > 0 {
|
||||
return matches[0]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
JarPath string
|
||||
ConfigPath string
|
||||
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
func (srv *Server) Start() error {
|
||||
if srv.JarPath == "" {
|
||||
srv.JarPath = findZookeeperFatJar()
|
||||
if srv.JarPath == "" {
|
||||
return fmt.Errorf("zk: unable to find server jar")
|
||||
}
|
||||
}
|
||||
srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath)
|
||||
// srv.cmd.Stdout = os.Stdout
|
||||
// srv.cmd.Stderr = os.Stderr
|
||||
err := srv.cmd.Start()
|
||||
if err != nil {
|
||||
fmt.Println("start failed", err)
|
||||
}
|
||||
|
||||
fmt.Println("start zookeeper ok")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (srv *Server) Stop() error {
|
||||
srv.cmd.Process.Signal(os.Kill)
|
||||
return srv.cmd.Wait()
|
||||
}
|
662
vendor/github.com/ngaut/go-zookeeper/zk/structs.go
generated
vendored
Normal file
662
vendor/github.com/ngaut/go-zookeeper/zk/structs.go
generated
vendored
Normal file
|
@ -0,0 +1,662 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnhandledFieldType = errors.New("zk: unhandled field type")
|
||||
ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct")
|
||||
ErrShortBuffer = errors.New("zk: buffer too small")
|
||||
)
|
||||
|
||||
type ACL struct {
|
||||
Perms int32
|
||||
Scheme string
|
||||
ID string
|
||||
}
|
||||
|
||||
type zkstat struct {
|
||||
ZCzxid int64 // The zxid of the change that caused this znode to be created.
|
||||
ZMzxid int64 // The zxid of the change that last modified this znode.
|
||||
ZCtime int64 // The time in milliseconds from epoch when this znode was created.
|
||||
ZMtime int64 // The time in milliseconds from epoch when this znode was last modified.
|
||||
ZVersion int32 // The number of changes to the data of this znode.
|
||||
ZCversion int32 // The number of changes to the children of this znode.
|
||||
ZAversion int32 // The number of changes to the ACL of this znode.
|
||||
ZEphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero.
|
||||
ZDataLength int32 // The length of the data field of this znode.
|
||||
ZNumChildren int32 // The number of children of this znode.
|
||||
ZPzxid int64 // last modified children
|
||||
}
|
||||
|
||||
type Stat interface {
|
||||
Czxid() int64
|
||||
Mzxid() int64
|
||||
CTime() time.Time
|
||||
MTime() time.Time
|
||||
Version() int
|
||||
CVersion() int
|
||||
AVersion() int
|
||||
EphemeralOwner() int64
|
||||
DataLength() int
|
||||
NumChildren() int
|
||||
Pzxid() int64
|
||||
}
|
||||
|
||||
// Czxid returns the zxid of the change that caused the node to be created.
|
||||
func (s *zkstat) Czxid() int64 {
|
||||
return s.ZCzxid
|
||||
}
|
||||
|
||||
// Mzxid returns the zxid of the change that last modified the node.
|
||||
func (s *zkstat) Mzxid() int64 {
|
||||
return s.ZMzxid
|
||||
}
|
||||
|
||||
func millisec2time(ms int64) time.Time {
|
||||
return time.Unix(ms/1e3, ms%1e3*1e6)
|
||||
}
|
||||
|
||||
// CTime returns the time (at millisecond resolution) when the node was
|
||||
// created.
|
||||
func (s *zkstat) CTime() time.Time {
|
||||
return millisec2time(s.ZCtime)
|
||||
}
|
||||
|
||||
// MTime returns the time (at millisecond resolution) when the node was
|
||||
// last modified.
|
||||
func (s *zkstat) MTime() time.Time {
|
||||
return millisec2time(int64(s.ZMtime))
|
||||
}
|
||||
|
||||
// Version returns the number of changes to the data of the node.
|
||||
func (s *zkstat) Version() int {
|
||||
return int(s.ZVersion)
|
||||
}
|
||||
|
||||
// CVersion returns the number of changes to the children of the node.
|
||||
// This only changes when children are created or removed.
|
||||
func (s *zkstat) CVersion() int {
|
||||
return int(s.ZCversion)
|
||||
}
|
||||
|
||||
// AVersion returns the number of changes to the ACL of the node.
|
||||
func (s *zkstat) AVersion() int {
|
||||
return int(s.ZAversion)
|
||||
}
|
||||
|
||||
// If the node is an ephemeral node, EphemeralOwner returns the session id
|
||||
// of the owner of the node; otherwise it will return zero.
|
||||
func (s *zkstat) EphemeralOwner() int64 {
|
||||
return int64(s.ZEphemeralOwner)
|
||||
}
|
||||
|
||||
// DataLength returns the length of the data in the node in bytes.
|
||||
func (s *zkstat) DataLength() int {
|
||||
return int(s.ZDataLength)
|
||||
}
|
||||
|
||||
// NumChildren returns the number of children of the node.
|
||||
func (s *zkstat) NumChildren() int {
|
||||
return int(s.ZNumChildren)
|
||||
}
|
||||
|
||||
// Pzxid returns the Pzxid of the node, whatever that is.
|
||||
func (s *zkstat) Pzxid() int64 {
|
||||
return int64(s.ZPzxid)
|
||||
}
|
||||
|
||||
type requestHeader struct {
|
||||
Xid int32
|
||||
Opcode int32
|
||||
}
|
||||
|
||||
type responseHeader struct {
|
||||
Xid int32
|
||||
Zxid int64
|
||||
Err ErrCode
|
||||
}
|
||||
|
||||
type multiHeader struct {
|
||||
Type int32
|
||||
Done bool
|
||||
Err ErrCode
|
||||
}
|
||||
|
||||
type auth struct {
|
||||
Type int32
|
||||
Scheme string
|
||||
Auth []byte
|
||||
}
|
||||
|
||||
// Generic request structs
|
||||
|
||||
type pathRequest struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
type PathVersionRequest struct {
|
||||
Path string
|
||||
Version int32
|
||||
}
|
||||
|
||||
type pathWatchRequest struct {
|
||||
Path string
|
||||
Watch bool
|
||||
}
|
||||
|
||||
type pathResponse struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
type statResponse struct {
|
||||
Stat zkstat
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
type CheckVersionRequest PathVersionRequest
|
||||
type closeRequest struct{}
|
||||
type closeResponse struct{}
|
||||
|
||||
type connectRequest struct {
|
||||
ProtocolVersion int32
|
||||
LastZxidSeen int64
|
||||
TimeOut int32
|
||||
SessionID int64
|
||||
Passwd []byte
|
||||
}
|
||||
|
||||
type connectResponse struct {
|
||||
ProtocolVersion int32
|
||||
TimeOut int32
|
||||
SessionID int64
|
||||
Passwd []byte
|
||||
}
|
||||
|
||||
type CreateRequest struct {
|
||||
Path string
|
||||
Data []byte
|
||||
Acl []ACL
|
||||
Flags int32
|
||||
}
|
||||
|
||||
type createResponse pathResponse
|
||||
type DeleteRequest PathVersionRequest
|
||||
type deleteResponse struct{}
|
||||
|
||||
type errorResponse struct {
|
||||
Err int32
|
||||
}
|
||||
|
||||
type existsRequest pathWatchRequest
|
||||
type existsResponse statResponse
|
||||
type getAclRequest pathRequest
|
||||
|
||||
type getAclResponse struct {
|
||||
Acl []ACL
|
||||
Stat zkstat
|
||||
}
|
||||
|
||||
type getChildrenRequest pathRequest
|
||||
|
||||
type getChildrenResponse struct {
|
||||
Children []string
|
||||
}
|
||||
|
||||
type getChildren2Request pathWatchRequest
|
||||
|
||||
type getChildren2Response struct {
|
||||
Children []string
|
||||
Stat zkstat
|
||||
}
|
||||
|
||||
type getDataRequest pathWatchRequest
|
||||
|
||||
type getDataResponse struct {
|
||||
Data []byte
|
||||
Stat zkstat
|
||||
}
|
||||
|
||||
type getMaxChildrenRequest pathRequest
|
||||
|
||||
type getMaxChildrenResponse struct {
|
||||
Max int32
|
||||
}
|
||||
|
||||
type getSaslRequest struct {
|
||||
Token []byte
|
||||
}
|
||||
|
||||
type pingRequest struct{}
|
||||
type pingResponse struct{}
|
||||
|
||||
type setAclRequest struct {
|
||||
Path string
|
||||
Acl []ACL
|
||||
Version int32
|
||||
}
|
||||
|
||||
type setAclResponse statResponse
|
||||
|
||||
type SetDataRequest struct {
|
||||
Path string
|
||||
Data []byte
|
||||
Version int32
|
||||
}
|
||||
|
||||
type setDataResponse statResponse
|
||||
|
||||
type setMaxChildren struct {
|
||||
Path string
|
||||
Max int32
|
||||
}
|
||||
|
||||
type setSaslRequest struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
type setSaslResponse struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
type setWatchesRequest struct {
|
||||
RelativeZxid int64
|
||||
DataWatches []string
|
||||
ExistWatches []string
|
||||
ChildWatches []string
|
||||
}
|
||||
|
||||
type setWatchesResponse struct{}
|
||||
|
||||
type syncRequest pathRequest
|
||||
type syncResponse pathResponse
|
||||
|
||||
type setAuthRequest auth
|
||||
type setAuthResponse struct{}
|
||||
|
||||
type multiRequestOp struct {
|
||||
Header multiHeader
|
||||
Op interface{}
|
||||
}
|
||||
type multiRequest struct {
|
||||
Ops []multiRequestOp
|
||||
DoneHeader multiHeader
|
||||
}
|
||||
type multiResponseOp struct {
|
||||
Header multiHeader
|
||||
String string
|
||||
Stat *zkstat
|
||||
}
|
||||
type multiResponse struct {
|
||||
Ops []multiResponseOp
|
||||
DoneHeader multiHeader
|
||||
}
|
||||
|
||||
func (r *multiRequest) Encode(buf []byte) (int, error) {
|
||||
total := 0
|
||||
for _, op := range r.Ops {
|
||||
op.Header.Done = false
|
||||
n, err := encodePacketValue(buf[total:], reflect.ValueOf(op))
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
total += n
|
||||
}
|
||||
r.DoneHeader.Done = true
|
||||
n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader))
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
total += n
|
||||
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (r *multiRequest) Decode(buf []byte) (int, error) {
|
||||
r.Ops = make([]multiRequestOp, 0)
|
||||
r.DoneHeader = multiHeader{-1, true, -1}
|
||||
total := 0
|
||||
for {
|
||||
header := &multiHeader{}
|
||||
n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
total += n
|
||||
if header.Done {
|
||||
r.DoneHeader = *header
|
||||
break
|
||||
}
|
||||
|
||||
req := requestStructForOp(header.Type)
|
||||
if req == nil {
|
||||
return total, ErrAPIError
|
||||
}
|
||||
n, err = decodePacketValue(buf[total:], reflect.ValueOf(req))
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
total += n
|
||||
r.Ops = append(r.Ops, multiRequestOp{*header, req})
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (r *multiResponse) Decode(buf []byte) (int, error) {
|
||||
r.Ops = make([]multiResponseOp, 0)
|
||||
r.DoneHeader = multiHeader{-1, true, -1}
|
||||
total := 0
|
||||
for {
|
||||
header := &multiHeader{}
|
||||
n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
total += n
|
||||
if header.Done {
|
||||
r.DoneHeader = *header
|
||||
break
|
||||
}
|
||||
|
||||
res := multiResponseOp{Header: *header}
|
||||
var w reflect.Value
|
||||
switch header.Type {
|
||||
default:
|
||||
return total, ErrAPIError
|
||||
case opCreate:
|
||||
w = reflect.ValueOf(&res.String)
|
||||
case opSetData:
|
||||
res.Stat = new(zkstat)
|
||||
w = reflect.ValueOf(res.Stat)
|
||||
case opCheck, opDelete:
|
||||
}
|
||||
if w.IsValid() {
|
||||
n, err := decodePacketValue(buf[total:], w)
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
total += n
|
||||
}
|
||||
r.Ops = append(r.Ops, res)
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
type watcherEvent struct {
|
||||
Type EventType
|
||||
State State
|
||||
Path string
|
||||
}
|
||||
|
||||
type decoder interface {
|
||||
Decode(buf []byte) (int, error)
|
||||
}
|
||||
|
||||
type encoder interface {
|
||||
Encode(buf []byte) (int, error)
|
||||
}
|
||||
|
||||
func decodePacket(buf []byte, st interface{}) (n int, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
|
||||
err = ErrShortBuffer
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
v := reflect.ValueOf(st)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return 0, ErrPtrExpected
|
||||
}
|
||||
return decodePacketValue(buf, v)
|
||||
}
|
||||
|
||||
func decodePacketValue(buf []byte, v reflect.Value) (int, error) {
|
||||
rv := v
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
kind = v.Kind()
|
||||
}
|
||||
|
||||
n := 0
|
||||
switch kind {
|
||||
default:
|
||||
return n, ErrUnhandledFieldType
|
||||
case reflect.Struct:
|
||||
if de, ok := rv.Interface().(decoder); ok {
|
||||
return de.Decode(buf)
|
||||
} else if de, ok := v.Interface().(decoder); ok {
|
||||
return de.Decode(buf)
|
||||
} else {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
n2, err := decodePacketValue(buf[n:], field)
|
||||
n += n2
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
v.SetBool(buf[n] != 0)
|
||||
n++
|
||||
case reflect.Int32:
|
||||
v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4])))
|
||||
n += 4
|
||||
case reflect.Int64:
|
||||
v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8])))
|
||||
n += 8
|
||||
case reflect.String:
|
||||
ln := int(binary.BigEndian.Uint32(buf[n : n+4]))
|
||||
v.SetString(string(buf[n+4 : n+4+ln]))
|
||||
n += 4 + ln
|
||||
case reflect.Slice:
|
||||
switch v.Type().Elem().Kind() {
|
||||
default:
|
||||
count := int(binary.BigEndian.Uint32(buf[n : n+4]))
|
||||
n += 4
|
||||
values := reflect.MakeSlice(v.Type(), count, count)
|
||||
v.Set(values)
|
||||
for i := 0; i < count; i++ {
|
||||
n2, err := decodePacketValue(buf[n:], values.Index(i))
|
||||
n += n2
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
case reflect.Uint8:
|
||||
ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4])))
|
||||
if ln < 0 {
|
||||
n += 4
|
||||
v.SetBytes(nil)
|
||||
} else {
|
||||
bytes := make([]byte, ln)
|
||||
copy(bytes, buf[n+4:n+4+ln])
|
||||
v.SetBytes(bytes)
|
||||
n += 4 + ln
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func encodePacket(buf []byte, st interface{}) (n int, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
|
||||
err = ErrShortBuffer
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
v := reflect.ValueOf(st)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return 0, ErrPtrExpected
|
||||
}
|
||||
return encodePacketValue(buf, v)
|
||||
}
|
||||
|
||||
func encodePacketValue(buf []byte, v reflect.Value) (int, error) {
|
||||
rv := v
|
||||
for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
n := 0
|
||||
switch v.Kind() {
|
||||
default:
|
||||
return n, ErrUnhandledFieldType
|
||||
case reflect.Struct:
|
||||
if en, ok := rv.Interface().(encoder); ok {
|
||||
return en.Encode(buf)
|
||||
} else if en, ok := v.Interface().(encoder); ok {
|
||||
return en.Encode(buf)
|
||||
} else {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
n2, err := encodePacketValue(buf[n:], field)
|
||||
n += n2
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
buf[n] = 1
|
||||
} else {
|
||||
buf[n] = 0
|
||||
}
|
||||
n++
|
||||
case reflect.Int32:
|
||||
binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int()))
|
||||
n += 4
|
||||
case reflect.Int64:
|
||||
binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int()))
|
||||
n += 8
|
||||
case reflect.String:
|
||||
str := v.String()
|
||||
binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str)))
|
||||
copy(buf[n+4:n+4+len(str)], []byte(str))
|
||||
n += 4 + len(str)
|
||||
case reflect.Slice:
|
||||
switch v.Type().Elem().Kind() {
|
||||
default:
|
||||
count := v.Len()
|
||||
startN := n
|
||||
n += 4
|
||||
for i := 0; i < count; i++ {
|
||||
n2, err := encodePacketValue(buf[n:], v.Index(i))
|
||||
n += n2
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count))
|
||||
case reflect.Uint8:
|
||||
if v.IsNil() {
|
||||
binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff))
|
||||
n += 4
|
||||
} else {
|
||||
bytes := v.Bytes()
|
||||
binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes)))
|
||||
copy(buf[n+4:n+4+len(bytes)], bytes)
|
||||
n += 4 + len(bytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func requestStructForOp(op int32) interface{} {
|
||||
switch op {
|
||||
case opClose:
|
||||
return &closeRequest{}
|
||||
case opCreate:
|
||||
return &CreateRequest{}
|
||||
case opDelete:
|
||||
return &DeleteRequest{}
|
||||
case opExists:
|
||||
return &existsRequest{}
|
||||
case opGetAcl:
|
||||
return &getAclRequest{}
|
||||
case opGetChildren:
|
||||
return &getChildrenRequest{}
|
||||
case opGetChildren2:
|
||||
return &getChildren2Request{}
|
||||
case opGetData:
|
||||
return &getDataRequest{}
|
||||
case opPing:
|
||||
return &pingRequest{}
|
||||
case opSetAcl:
|
||||
return &setAclRequest{}
|
||||
case opSetData:
|
||||
return &SetDataRequest{}
|
||||
case opSetWatches:
|
||||
return &setWatchesRequest{}
|
||||
case opSync:
|
||||
return &syncRequest{}
|
||||
case opSetAuth:
|
||||
return &setAuthRequest{}
|
||||
case opCheck:
|
||||
return &CheckVersionRequest{}
|
||||
case opMulti:
|
||||
return &multiRequest{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func responseStructForOp(op int32) interface{} {
|
||||
switch op {
|
||||
case opClose:
|
||||
return &closeResponse{}
|
||||
case opCreate:
|
||||
return &createResponse{}
|
||||
case opDelete:
|
||||
return &deleteResponse{}
|
||||
case opExists:
|
||||
return &existsResponse{}
|
||||
case opGetAcl:
|
||||
return &getAclResponse{}
|
||||
case opGetChildren:
|
||||
return &getChildrenResponse{}
|
||||
case opGetChildren2:
|
||||
return &getChildren2Response{}
|
||||
case opGetData:
|
||||
return &getDataResponse{}
|
||||
case opPing:
|
||||
return &pingResponse{}
|
||||
case opSetAcl:
|
||||
return &setAclResponse{}
|
||||
case opSetData:
|
||||
return &setDataResponse{}
|
||||
case opSetWatches:
|
||||
return &setWatchesResponse{}
|
||||
case opSync:
|
||||
return &syncResponse{}
|
||||
case opWatcherEvent:
|
||||
return &watcherEvent{}
|
||||
case opSetAuth:
|
||||
return &setAuthResponse{}
|
||||
// case opCheck:
|
||||
// return &checkVersionResponse{}
|
||||
case opMulti:
|
||||
return &multiResponse{}
|
||||
}
|
||||
return nil
|
||||
}
|
149
vendor/github.com/ngaut/go-zookeeper/zk/tracer.go
generated
vendored
Normal file
149
vendor/github.com/ngaut/go-zookeeper/zk/tracer.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
requests = make(map[int32]int32) // Map of Xid -> Opcode
|
||||
requestsLock = &sync.Mutex{}
|
||||
)
|
||||
|
||||
func trace(conn1, conn2 net.Conn, client bool) {
|
||||
defer conn1.Close()
|
||||
defer conn2.Close()
|
||||
buf := make([]byte, 10*1024)
|
||||
init := true
|
||||
for {
|
||||
_, err := io.ReadFull(conn1, buf[:4])
|
||||
if err != nil {
|
||||
fmt.Println("1>", client, err)
|
||||
return
|
||||
}
|
||||
|
||||
blen := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
|
||||
_, err = io.ReadFull(conn1, buf[4:4+blen])
|
||||
if err != nil {
|
||||
fmt.Println("2>", client, err)
|
||||
return
|
||||
}
|
||||
|
||||
var cr interface{}
|
||||
opcode := int32(-1)
|
||||
readHeader := true
|
||||
if client {
|
||||
if init {
|
||||
cr = &connectRequest{}
|
||||
readHeader = false
|
||||
} else {
|
||||
xid := int32(binary.BigEndian.Uint32(buf[4:8]))
|
||||
opcode = int32(binary.BigEndian.Uint32(buf[8:12]))
|
||||
requestsLock.Lock()
|
||||
requests[xid] = opcode
|
||||
requestsLock.Unlock()
|
||||
cr = requestStructForOp(opcode)
|
||||
if cr == nil {
|
||||
fmt.Printf("Unknown opcode %d\n", opcode)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if init {
|
||||
cr = &connectResponse{}
|
||||
readHeader = false
|
||||
} else {
|
||||
xid := int32(binary.BigEndian.Uint32(buf[4:8]))
|
||||
zxid := int64(binary.BigEndian.Uint64(buf[8:16]))
|
||||
errnum := int32(binary.BigEndian.Uint32(buf[16:20]))
|
||||
if xid != -1 || zxid != -1 {
|
||||
requestsLock.Lock()
|
||||
found := false
|
||||
opcode, found = requests[xid]
|
||||
if !found {
|
||||
println("WEFWEFEW")
|
||||
opcode = 0
|
||||
}
|
||||
delete(requests, xid)
|
||||
requestsLock.Unlock()
|
||||
} else {
|
||||
opcode = opWatcherEvent
|
||||
}
|
||||
cr = responseStructForOp(opcode)
|
||||
if cr == nil {
|
||||
fmt.Printf("Unknown opcode %d\n", opcode)
|
||||
}
|
||||
if errnum != 0 {
|
||||
cr = &struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
opname := "."
|
||||
if opcode != -1 {
|
||||
opname = opNames[opcode]
|
||||
}
|
||||
if cr == nil {
|
||||
fmt.Printf("%+v %s %+v\n", client, opname, buf[4:4+blen])
|
||||
} else {
|
||||
n := 4
|
||||
hdrStr := ""
|
||||
if readHeader {
|
||||
var hdr interface{}
|
||||
if client {
|
||||
hdr = &requestHeader{}
|
||||
} else {
|
||||
hdr = &responseHeader{}
|
||||
}
|
||||
if n2, err := decodePacket(buf[n:n+blen], hdr); err != nil {
|
||||
fmt.Println(err)
|
||||
} else {
|
||||
n += n2
|
||||
}
|
||||
hdrStr = fmt.Sprintf(" %+v", hdr)
|
||||
}
|
||||
if _, err := decodePacket(buf[n:n+blen], cr); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
fmt.Printf("%+v %s%s %+v\n", client, opname, hdrStr, cr)
|
||||
}
|
||||
|
||||
init = false
|
||||
|
||||
written, err := conn2.Write(buf[:4+blen])
|
||||
if err != nil {
|
||||
fmt.Println("3>", client, err)
|
||||
return
|
||||
} else if written != 4+blen {
|
||||
fmt.Printf("Written != read: %d != %d\n", written, blen)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleConnection(addr string, conn net.Conn) {
|
||||
zkConn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
go trace(conn, zkConn, true)
|
||||
trace(zkConn, conn, false)
|
||||
}
|
||||
|
||||
func StartTracer(listenAddr, serverAddr string) {
|
||||
ln, err := net.Listen("tcp", listenAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
continue
|
||||
}
|
||||
go handleConnection(serverAddr, conn)
|
||||
}
|
||||
}
|
40
vendor/github.com/ngaut/go-zookeeper/zk/util.go
generated
vendored
Normal file
40
vendor/github.com/ngaut/go-zookeeper/zk/util.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package zk
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
// AuthACL produces an ACL list containing a single ACL which uses the
|
||||
// provided permissions, with the scheme "auth", and ID "", which is used
|
||||
// by ZooKeeper to represent any authenticated user.
|
||||
func AuthACL(perms int32) []ACL {
|
||||
return []ACL{{perms, "auth", ""}}
|
||||
}
|
||||
|
||||
// WorldACL produces an ACL list containing a single ACL which uses the
|
||||
// provided permissions, with the scheme "world", and ID "anyone", which
|
||||
// is used by ZooKeeper to represent any user at all.
|
||||
func WorldACL(perms int32) []ACL {
|
||||
return []ACL{{perms, "world", "anyone"}}
|
||||
}
|
||||
|
||||
func DigestACL(perms int32, user, password string) []ACL {
|
||||
userPass := []byte(fmt.Sprintf("%s:%s", user, password))
|
||||
h := sha1.New()
|
||||
if n, err := h.Write(userPass); err != nil || n != len(userPass) {
|
||||
panic("SHA1 failed")
|
||||
}
|
||||
digest := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}}
|
||||
}
|
||||
|
||||
// stringShuffle performs a Fisher-Yates shuffle on a slice of strings
|
||||
func stringShuffle(s []string) {
|
||||
for i := len(s) - 1; i > 0; i-- {
|
||||
j := rand.Intn(i + 1)
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
}
|
165
vendor/github.com/ngaut/log/LICENSE
generated
vendored
Normal file
165
vendor/github.com/ngaut/log/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,165 @@
|
|||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
2
vendor/github.com/ngaut/log/README.md
generated
vendored
Normal file
2
vendor/github.com/ngaut/log/README.md
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
logging
|
||||
=======
|
18
vendor/github.com/ngaut/log/crash_unix.go
generated
vendored
Normal file
18
vendor/github.com/ngaut/log/crash_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func CrashLog(file string) {
|
||||
f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
} else {
|
||||
syscall.Dup2(int(f.Fd()), 2)
|
||||
}
|
||||
}
|
37
vendor/github.com/ngaut/log/crash_win.go
generated
vendored
Normal file
37
vendor/github.com/ngaut/log/crash_win.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
// +build windows
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.MustLoadDLL("kernel32.dll")
|
||||
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
|
||||
)
|
||||
|
||||
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
|
||||
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
|
||||
if r0 == 0 {
|
||||
if e1 != 0 {
|
||||
return error(e1)
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CrashLog(file string) {
|
||||
f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
} else {
|
||||
err = setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
380
vendor/github.com/ngaut/log/log.go
generated
vendored
Normal file
380
vendor/github.com/ngaut/log/log.go
generated
vendored
Normal file
|
@ -0,0 +1,380 @@
|
|||
//high level log wrapper, so it can output different log based on level
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
Ldate = log.Ldate
|
||||
Llongfile = log.Llongfile
|
||||
Lmicroseconds = log.Lmicroseconds
|
||||
Lshortfile = log.Lshortfile
|
||||
LstdFlags = log.LstdFlags
|
||||
Ltime = log.Ltime
|
||||
)
|
||||
|
||||
type (
|
||||
LogLevel int
|
||||
LogType int
|
||||
)
|
||||
|
||||
const (
|
||||
LOG_FATAL = LogType(0x1)
|
||||
LOG_ERROR = LogType(0x2)
|
||||
LOG_WARNING = LogType(0x4)
|
||||
LOG_INFO = LogType(0x8)
|
||||
LOG_DEBUG = LogType(0x10)
|
||||
)
|
||||
|
||||
const (
|
||||
LOG_LEVEL_NONE = LogLevel(0x0)
|
||||
LOG_LEVEL_FATAL = LOG_LEVEL_NONE | LogLevel(LOG_FATAL)
|
||||
LOG_LEVEL_ERROR = LOG_LEVEL_FATAL | LogLevel(LOG_ERROR)
|
||||
LOG_LEVEL_WARN = LOG_LEVEL_ERROR | LogLevel(LOG_WARNING)
|
||||
LOG_LEVEL_INFO = LOG_LEVEL_WARN | LogLevel(LOG_INFO)
|
||||
LOG_LEVEL_DEBUG = LOG_LEVEL_INFO | LogLevel(LOG_DEBUG)
|
||||
LOG_LEVEL_ALL = LOG_LEVEL_DEBUG
|
||||
)
|
||||
|
||||
const FORMAT_TIME_DAY string = "20060102"
|
||||
const FORMAT_TIME_HOUR string = "2006010215"
|
||||
|
||||
var _log *logger = New()
|
||||
|
||||
func init() {
|
||||
SetFlags(Ldate | Ltime | Lshortfile)
|
||||
SetHighlighting(runtime.GOOS != "windows")
|
||||
}
|
||||
|
||||
func Logger() *log.Logger {
|
||||
return _log._log
|
||||
}
|
||||
|
||||
func SetLevel(level LogLevel) {
|
||||
_log.SetLevel(level)
|
||||
}
|
||||
func GetLogLevel() LogLevel {
|
||||
return _log.level
|
||||
}
|
||||
|
||||
func SetOutput(out io.Writer) {
|
||||
_log.SetOutput(out)
|
||||
}
|
||||
|
||||
func SetOutputByName(path string) error {
|
||||
return _log.SetOutputByName(path)
|
||||
}
|
||||
|
||||
func SetFlags(flags int) {
|
||||
_log._log.SetFlags(flags)
|
||||
}
|
||||
|
||||
func Info(v ...interface{}) {
|
||||
_log.Info(v...)
|
||||
}
|
||||
|
||||
func Infof(format string, v ...interface{}) {
|
||||
_log.Infof(format, v...)
|
||||
}
|
||||
|
||||
func Debug(v ...interface{}) {
|
||||
_log.Debug(v...)
|
||||
}
|
||||
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
_log.Debugf(format, v...)
|
||||
}
|
||||
|
||||
func Warn(v ...interface{}) {
|
||||
_log.Warning(v...)
|
||||
}
|
||||
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
_log.Warningf(format, v...)
|
||||
}
|
||||
|
||||
func Warning(v ...interface{}) {
|
||||
_log.Warning(v...)
|
||||
}
|
||||
|
||||
func Warningf(format string, v ...interface{}) {
|
||||
_log.Warningf(format, v...)
|
||||
}
|
||||
|
||||
func Error(v ...interface{}) {
|
||||
_log.Error(v...)
|
||||
}
|
||||
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
_log.Errorf(format, v...)
|
||||
}
|
||||
|
||||
func Fatal(v ...interface{}) {
|
||||
_log.Fatal(v...)
|
||||
}
|
||||
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
_log.Fatalf(format, v...)
|
||||
}
|
||||
|
||||
func SetLevelByString(level string) {
|
||||
_log.SetLevelByString(level)
|
||||
}
|
||||
|
||||
func SetHighlighting(highlighting bool) {
|
||||
_log.SetHighlighting(highlighting)
|
||||
}
|
||||
|
||||
func SetRotateByDay() {
|
||||
_log.SetRotateByDay()
|
||||
}
|
||||
|
||||
func SetRotateByHour() {
|
||||
_log.SetRotateByHour()
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
_log *log.Logger
|
||||
level LogLevel
|
||||
highlighting bool
|
||||
|
||||
dailyRolling bool
|
||||
hourRolling bool
|
||||
|
||||
fileName string
|
||||
logSuffix string
|
||||
fd *os.File
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (l *logger) SetHighlighting(highlighting bool) {
|
||||
l.highlighting = highlighting
|
||||
}
|
||||
|
||||
func (l *logger) SetLevel(level LogLevel) {
|
||||
l.level = level
|
||||
}
|
||||
|
||||
func (l *logger) SetLevelByString(level string) {
|
||||
l.level = StringToLogLevel(level)
|
||||
}
|
||||
|
||||
func (l *logger) SetRotateByDay() {
|
||||
l.dailyRolling = true
|
||||
l.logSuffix = genDayTime(time.Now())
|
||||
}
|
||||
|
||||
func (l *logger) SetRotateByHour() {
|
||||
l.hourRolling = true
|
||||
l.logSuffix = genHourTime(time.Now())
|
||||
}
|
||||
|
||||
func (l *logger) rotate() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
|
||||
var suffix string
|
||||
if l.dailyRolling {
|
||||
suffix = genDayTime(time.Now())
|
||||
} else if l.hourRolling {
|
||||
suffix = genHourTime(time.Now())
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notice: if suffix is not equal to l.LogSuffix, then rotate
|
||||
if suffix != l.logSuffix {
|
||||
err := l.doRotate(suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *logger) doRotate(suffix string) error {
|
||||
// Notice: Not check error, is this ok?
|
||||
l.fd.Close()
|
||||
|
||||
lastFileName := l.fileName + "." + l.logSuffix
|
||||
err := os.Rename(l.fileName, lastFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = l.SetOutputByName(l.fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.logSuffix = suffix
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *logger) SetOutput(out io.Writer) {
|
||||
l._log = log.New(out, l._log.Prefix(), l._log.Flags())
|
||||
}
|
||||
|
||||
func (l *logger) SetOutputByName(path string) error {
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
l.SetOutput(f)
|
||||
|
||||
l.fileName = path
|
||||
l.fd = f
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *logger) log(t LogType, v ...interface{}) {
|
||||
if l.level|LogLevel(t) != l.level {
|
||||
return
|
||||
}
|
||||
|
||||
err := l.rotate()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
v1 := make([]interface{}, len(v)+2)
|
||||
logStr, logColor := LogTypeToString(t)
|
||||
if l.highlighting {
|
||||
v1[0] = "\033" + logColor + "m[" + logStr + "]"
|
||||
copy(v1[1:], v)
|
||||
v1[len(v)+1] = "\033[0m"
|
||||
} else {
|
||||
v1[0] = "[" + logStr + "]"
|
||||
copy(v1[1:], v)
|
||||
v1[len(v)+1] = ""
|
||||
}
|
||||
|
||||
s := fmt.Sprintln(v1...)
|
||||
l._log.Output(4, s)
|
||||
}
|
||||
|
||||
func (l *logger) logf(t LogType, format string, v ...interface{}) {
|
||||
if l.level|LogLevel(t) != l.level {
|
||||
return
|
||||
}
|
||||
|
||||
err := l.rotate()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
logStr, logColor := LogTypeToString(t)
|
||||
var s string
|
||||
if l.highlighting {
|
||||
s = "\033" + logColor + "m[" + logStr + "] " + fmt.Sprintf(format, v...) + "\033[0m"
|
||||
} else {
|
||||
s = "[" + logStr + "] " + fmt.Sprintf(format, v...)
|
||||
}
|
||||
l._log.Output(4, s)
|
||||
}
|
||||
|
||||
func (l *logger) Fatal(v ...interface{}) {
|
||||
l.log(LOG_FATAL, v...)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
func (l *logger) Fatalf(format string, v ...interface{}) {
|
||||
l.logf(LOG_FATAL, format, v...)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
func (l *logger) Error(v ...interface{}) {
|
||||
l.log(LOG_ERROR, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Errorf(format string, v ...interface{}) {
|
||||
l.logf(LOG_ERROR, format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Warning(v ...interface{}) {
|
||||
l.log(LOG_WARNING, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Warningf(format string, v ...interface{}) {
|
||||
l.logf(LOG_WARNING, format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Debug(v ...interface{}) {
|
||||
l.log(LOG_DEBUG, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Debugf(format string, v ...interface{}) {
|
||||
l.logf(LOG_DEBUG, format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Info(v ...interface{}) {
|
||||
l.log(LOG_INFO, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Infof(format string, v ...interface{}) {
|
||||
l.logf(LOG_INFO, format, v...)
|
||||
}
|
||||
|
||||
func StringToLogLevel(level string) LogLevel {
|
||||
switch level {
|
||||
case "fatal":
|
||||
return LOG_LEVEL_FATAL
|
||||
case "error":
|
||||
return LOG_LEVEL_ERROR
|
||||
case "warn":
|
||||
return LOG_LEVEL_WARN
|
||||
case "warning":
|
||||
return LOG_LEVEL_WARN
|
||||
case "debug":
|
||||
return LOG_LEVEL_DEBUG
|
||||
case "info":
|
||||
return LOG_LEVEL_INFO
|
||||
}
|
||||
return LOG_LEVEL_ALL
|
||||
}
|
||||
|
||||
func LogTypeToString(t LogType) (string, string) {
|
||||
switch t {
|
||||
case LOG_FATAL:
|
||||
return "fatal", "[0;31"
|
||||
case LOG_ERROR:
|
||||
return "error", "[0;31"
|
||||
case LOG_WARNING:
|
||||
return "warning", "[0;33"
|
||||
case LOG_DEBUG:
|
||||
return "debug", "[0;36"
|
||||
case LOG_INFO:
|
||||
return "info", "[0;37"
|
||||
}
|
||||
return "unknown", "[0;37"
|
||||
}
|
||||
|
||||
func genDayTime(t time.Time) string {
|
||||
return t.Format(FORMAT_TIME_DAY)
|
||||
}
|
||||
|
||||
func genHourTime(t time.Time) string {
|
||||
return t.Format(FORMAT_TIME_HOUR)
|
||||
}
|
||||
|
||||
func New() *logger {
|
||||
return Newlogger(os.Stdout, "")
|
||||
}
|
||||
|
||||
func Newlogger(w io.Writer, prefix string) *logger {
|
||||
return &logger{_log: log.New(w, prefix, LstdFlags), level: LOG_LEVEL_ALL, highlighting: true}
|
||||
}
|
72
vendor/github.com/ngaut/pools/id_pool.go
generated
vendored
Normal file
72
vendor/github.com/ngaut/pools/id_pool.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2014, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// IDPool is used to ensure that the set of IDs in use concurrently never
|
||||
// contains any duplicates. The IDs start at 1 and increase without bound, but
|
||||
// will never be larger than the peak number of concurrent uses.
|
||||
//
|
||||
// IDPool's Get() and Set() methods can be used concurrently.
|
||||
type IDPool struct {
|
||||
sync.Mutex
|
||||
|
||||
// used holds the set of values that have been returned to us with Put().
|
||||
used map[uint32]bool
|
||||
// maxUsed remembers the largest value we've given out.
|
||||
maxUsed uint32
|
||||
}
|
||||
|
||||
// NewIDPool creates and initializes an IDPool.
|
||||
func NewIDPool() *IDPool {
|
||||
return &IDPool{
|
||||
used: make(map[uint32]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns an ID that is unique among currently active users of this pool.
|
||||
func (pool *IDPool) Get() (id uint32) {
|
||||
pool.Lock()
|
||||
defer pool.Unlock()
|
||||
|
||||
// Pick a value that's been returned, if any.
|
||||
for key, _ := range pool.used {
|
||||
delete(pool.used, key)
|
||||
return key
|
||||
}
|
||||
|
||||
// No recycled IDs are available, so increase the pool size.
|
||||
pool.maxUsed += 1
|
||||
return pool.maxUsed
|
||||
}
|
||||
|
||||
// Put recycles an ID back into the pool for others to use. Putting back a value
|
||||
// or 0, or a value that is not currently "checked out", will result in a panic
|
||||
// because that should never happen except in the case of a programming error.
|
||||
func (pool *IDPool) Put(id uint32) {
|
||||
pool.Lock()
|
||||
defer pool.Unlock()
|
||||
|
||||
if id < 1 || id > pool.maxUsed {
|
||||
panic(fmt.Errorf("IDPool.Put(%v): invalid value, must be in the range [1,%v]", id, pool.maxUsed))
|
||||
}
|
||||
|
||||
if pool.used[id] {
|
||||
panic(fmt.Errorf("IDPool.Put(%v): can't put value that was already recycled", id))
|
||||
}
|
||||
|
||||
// If we're recycling maxUsed, just shrink the pool.
|
||||
if id == pool.maxUsed {
|
||||
pool.maxUsed = id - 1
|
||||
return
|
||||
}
|
||||
|
||||
// Add it to the set of recycled IDs.
|
||||
pool.used[id] = true
|
||||
}
|
149
vendor/github.com/ngaut/pools/numbered.go
generated
vendored
Normal file
149
vendor/github.com/ngaut/pools/numbered.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
|||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Numbered allows you to manage resources by tracking them with numbers.
|
||||
// There are no interface restrictions on what you can track.
|
||||
type Numbered struct {
|
||||
mu sync.Mutex
|
||||
empty *sync.Cond // Broadcast when pool becomes empty
|
||||
resources map[int64]*numberedWrapper
|
||||
}
|
||||
|
||||
type numberedWrapper struct {
|
||||
val interface{}
|
||||
inUse bool
|
||||
purpose string
|
||||
timeCreated time.Time
|
||||
timeUsed time.Time
|
||||
}
|
||||
|
||||
func NewNumbered() *Numbered {
|
||||
n := &Numbered{resources: make(map[int64]*numberedWrapper)}
|
||||
n.empty = sync.NewCond(&n.mu)
|
||||
return n
|
||||
}
|
||||
|
||||
// Register starts tracking a resource by the supplied id.
|
||||
// It does not lock the object.
|
||||
// It returns an error if the id already exists.
|
||||
func (nu *Numbered) Register(id int64, val interface{}) error {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
if _, ok := nu.resources[id]; ok {
|
||||
return fmt.Errorf("already present")
|
||||
}
|
||||
now := time.Now()
|
||||
nu.resources[id] = &numberedWrapper{
|
||||
val: val,
|
||||
timeCreated: now,
|
||||
timeUsed: now,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregiester forgets the specified resource.
|
||||
// If the resource is not present, it's ignored.
|
||||
func (nu *Numbered) Unregister(id int64) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
delete(nu.resources, id)
|
||||
if len(nu.resources) == 0 {
|
||||
nu.empty.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
// Get locks the resource for use. It accepts a purpose as a string.
|
||||
// If it cannot be found, it returns a "not found" error. If in use,
|
||||
// it returns a "in use: purpose" error.
|
||||
func (nu *Numbered) Get(id int64, purpose string) (val interface{}, err error) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
nw, ok := nu.resources[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
if nw.inUse {
|
||||
return nil, fmt.Errorf("in use: %s", nw.purpose)
|
||||
}
|
||||
nw.inUse = true
|
||||
nw.purpose = purpose
|
||||
return nw.val, nil
|
||||
}
|
||||
|
||||
// Put unlocks a resource for someone else to use.
|
||||
func (nu *Numbered) Put(id int64) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
if nw, ok := nu.resources[id]; ok {
|
||||
nw.inUse = false
|
||||
nw.purpose = ""
|
||||
nw.timeUsed = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
// GetOutdated returns a list of resources that are older than age, and locks them.
|
||||
// It does not return any resources that are already locked.
|
||||
func (nu *Numbered) GetOutdated(age time.Duration, purpose string) (vals []interface{}) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
now := time.Now()
|
||||
for _, nw := range nu.resources {
|
||||
if nw.inUse {
|
||||
continue
|
||||
}
|
||||
if nw.timeCreated.Add(age).Sub(now) <= 0 {
|
||||
nw.inUse = true
|
||||
nw.purpose = purpose
|
||||
vals = append(vals, nw.val)
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// GetIdle returns a list of resurces that have been idle for longer
|
||||
// than timeout, and locks them. It does not return any resources that
|
||||
// are already locked.
|
||||
func (nu *Numbered) GetIdle(timeout time.Duration, purpose string) (vals []interface{}) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
now := time.Now()
|
||||
for _, nw := range nu.resources {
|
||||
if nw.inUse {
|
||||
continue
|
||||
}
|
||||
if nw.timeUsed.Add(timeout).Sub(now) <= 0 {
|
||||
nw.inUse = true
|
||||
nw.purpose = purpose
|
||||
vals = append(vals, nw.val)
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// WaitForEmpty returns as soon as the pool becomes empty
|
||||
func (nu *Numbered) WaitForEmpty() {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
for len(nu.resources) != 0 {
|
||||
nu.empty.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (nu *Numbered) StatsJSON() string {
|
||||
return fmt.Sprintf("{\"Size\": %v}", nu.Size())
|
||||
}
|
||||
|
||||
func (nu *Numbered) Size() (size int64) {
|
||||
nu.mu.Lock()
|
||||
defer nu.mu.Unlock()
|
||||
return int64(len(nu.resources))
|
||||
}
|
228
vendor/github.com/ngaut/pools/resource_pool.go
generated
vendored
Normal file
228
vendor/github.com/ngaut/pools/resource_pool.go
generated
vendored
Normal file
|
@ -0,0 +1,228 @@
|
|||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pools provides functionality to manage and reuse resources
|
||||
// like connections.
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ngaut/sync2"
|
||||
)
|
||||
|
||||
var (
|
||||
CLOSED_ERR = fmt.Errorf("ResourcePool is closed")
|
||||
)
|
||||
|
||||
// Factory is a function that can be used to create a resource.
|
||||
type Factory func() (Resource, error)
|
||||
|
||||
// Every resource needs to suport the Resource interface.
|
||||
// Thread synchronization between Close() and IsClosed()
|
||||
// is the responsibility the caller.
|
||||
type Resource interface {
|
||||
Close()
|
||||
}
|
||||
|
||||
// ResourcePool allows you to use a pool of resources.
|
||||
type ResourcePool struct {
|
||||
resources chan resourceWrapper
|
||||
factory Factory
|
||||
capacity sync2.AtomicInt64
|
||||
idleTimeout sync2.AtomicDuration
|
||||
|
||||
// stats
|
||||
waitCount sync2.AtomicInt64
|
||||
waitTime sync2.AtomicDuration
|
||||
}
|
||||
|
||||
type resourceWrapper struct {
|
||||
resource Resource
|
||||
timeUsed time.Time
|
||||
}
|
||||
|
||||
// NewResourcePool creates a new ResourcePool pool.
|
||||
// capacity is the initial capacity of the pool.
|
||||
// maxCap is the maximum capacity.
|
||||
// If a resource is unused beyond idleTimeout, it's discarded.
|
||||
// An idleTimeout of 0 means that there is no timeout.
|
||||
func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration) *ResourcePool {
|
||||
if capacity <= 0 || maxCap <= 0 || capacity > maxCap {
|
||||
panic(fmt.Errorf("Invalid/out of range capacity"))
|
||||
}
|
||||
rp := &ResourcePool{
|
||||
resources: make(chan resourceWrapper, maxCap),
|
||||
factory: factory,
|
||||
capacity: sync2.AtomicInt64(capacity),
|
||||
idleTimeout: sync2.AtomicDuration(idleTimeout),
|
||||
}
|
||||
for i := 0; i < capacity; i++ {
|
||||
rp.resources <- resourceWrapper{}
|
||||
}
|
||||
return rp
|
||||
}
|
||||
|
||||
// Close empties the pool calling Close on all its resources.
|
||||
// You can call Close while there are outstanding resources.
|
||||
// It waits for all resources to be returned (Put).
|
||||
// After a Close, Get and TryGet are not allowed.
|
||||
func (rp *ResourcePool) Close() {
|
||||
rp.SetCapacity(0)
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) IsClosed() (closed bool) {
|
||||
return rp.capacity.Get() == 0
|
||||
}
|
||||
|
||||
// Get will return the next available resource. If capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will indefinitely wait till the next resource becomes available.
|
||||
func (rp *ResourcePool) Get() (resource Resource, err error) {
|
||||
return rp.get(true)
|
||||
}
|
||||
|
||||
// TryGet will return the next available resource. If none is available, and capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will return nil with no error.
|
||||
func (rp *ResourcePool) TryGet() (resource Resource, err error) {
|
||||
return rp.get(false)
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) get(wait bool) (resource Resource, err error) {
|
||||
// Fetch
|
||||
var wrapper resourceWrapper
|
||||
var ok bool
|
||||
select {
|
||||
case wrapper, ok = <-rp.resources:
|
||||
default:
|
||||
if !wait {
|
||||
return nil, nil
|
||||
}
|
||||
startTime := time.Now()
|
||||
wrapper, ok = <-rp.resources
|
||||
rp.recordWait(startTime)
|
||||
}
|
||||
if !ok {
|
||||
return nil, CLOSED_ERR
|
||||
}
|
||||
|
||||
// Unwrap
|
||||
timeout := rp.idleTimeout.Get()
|
||||
if wrapper.resource != nil && timeout > 0 && wrapper.timeUsed.Add(timeout).Sub(time.Now()) < 0 {
|
||||
wrapper.resource.Close()
|
||||
wrapper.resource = nil
|
||||
}
|
||||
if wrapper.resource == nil {
|
||||
wrapper.resource, err = rp.factory()
|
||||
if err != nil {
|
||||
rp.resources <- resourceWrapper{}
|
||||
}
|
||||
}
|
||||
return wrapper.resource, err
|
||||
}
|
||||
|
||||
// Put will return a resource to the pool. For every successful Get,
|
||||
// a corresponding Put is required. If you no longer need a resource,
|
||||
// you will need to call Put(nil) instead of returning the closed resource.
|
||||
// The will eventually cause a new resource to be created in its place.
|
||||
func (rp *ResourcePool) Put(resource Resource) {
|
||||
var wrapper resourceWrapper
|
||||
if resource != nil {
|
||||
wrapper = resourceWrapper{resource, time.Now()}
|
||||
}
|
||||
select {
|
||||
case rp.resources <- wrapper:
|
||||
default:
|
||||
panic(fmt.Errorf("Attempt to Put into a full ResourcePool"))
|
||||
}
|
||||
}
|
||||
|
||||
// SetCapacity changes the capacity of the pool.
|
||||
// You can use it to shrink or expand, but not beyond
|
||||
// the max capacity. If the change requires the pool
|
||||
// to be shrunk, SetCapacity waits till the necessary
|
||||
// number of resources are returned to the pool.
|
||||
// A SetCapacity of 0 is equivalent to closing the ResourcePool.
|
||||
func (rp *ResourcePool) SetCapacity(capacity int) error {
|
||||
if capacity < 0 || capacity > cap(rp.resources) {
|
||||
return fmt.Errorf("capacity %d is out of range", capacity)
|
||||
}
|
||||
|
||||
// Atomically swap new capacity with old, but only
|
||||
// if old capacity is non-zero.
|
||||
var oldcap int
|
||||
for {
|
||||
oldcap = int(rp.capacity.Get())
|
||||
if oldcap == 0 {
|
||||
return CLOSED_ERR
|
||||
}
|
||||
if oldcap == capacity {
|
||||
return nil
|
||||
}
|
||||
if rp.capacity.CompareAndSwap(int64(oldcap), int64(capacity)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if capacity < oldcap {
|
||||
for i := 0; i < oldcap-capacity; i++ {
|
||||
wrapper := <-rp.resources
|
||||
if wrapper.resource != nil {
|
||||
wrapper.resource.Close()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < capacity-oldcap; i++ {
|
||||
rp.resources <- resourceWrapper{}
|
||||
}
|
||||
}
|
||||
if capacity == 0 {
|
||||
close(rp.resources)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) recordWait(start time.Time) {
|
||||
rp.waitCount.Add(1)
|
||||
rp.waitTime.Add(time.Now().Sub(start))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) SetIdleTimeout(idleTimeout time.Duration) {
|
||||
rp.idleTimeout.Set(idleTimeout)
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) StatsJSON() string {
|
||||
c, a, mx, wc, wt, it := rp.Stats()
|
||||
return fmt.Sprintf(`{"Capacity": %v, "Available": %v, "MaxCapacity": %v, "WaitCount": %v, "WaitTime": %v, "IdleTimeout": %v}`, c, a, mx, wc, int64(wt), int64(it))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) Stats() (capacity, available, maxCap, waitCount int64, waitTime, idleTimeout time.Duration) {
|
||||
return rp.Capacity(), rp.Available(), rp.MaxCap(), rp.WaitCount(), rp.WaitTime(), rp.IdleTimeout()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) Capacity() int64 {
|
||||
return rp.capacity.Get()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) Available() int64 {
|
||||
return int64(len(rp.resources))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) MaxCap() int64 {
|
||||
return int64(cap(rp.resources))
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) WaitCount() int64 {
|
||||
return rp.waitCount.Get()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) WaitTime() time.Duration {
|
||||
return rp.waitTime.Get()
|
||||
}
|
||||
|
||||
func (rp *ResourcePool) IdleTimeout() time.Duration {
|
||||
return rp.idleTimeout.Get()
|
||||
}
|
214
vendor/github.com/ngaut/pools/roundrobin.go
generated
vendored
Normal file
214
vendor/github.com/ngaut/pools/roundrobin.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
|||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RoundRobin is deprecated. Use ResourcePool instead.
|
||||
// RoundRobin allows you to use a pool of resources in a round robin fashion.
|
||||
type RoundRobin struct {
|
||||
mu sync.Mutex
|
||||
available *sync.Cond
|
||||
resources chan fifoWrapper
|
||||
size int64
|
||||
factory Factory
|
||||
idleTimeout time.Duration
|
||||
|
||||
// stats
|
||||
waitCount int64
|
||||
waitTime time.Duration
|
||||
}
|
||||
|
||||
type fifoWrapper struct {
|
||||
resource Resource
|
||||
timeUsed time.Time
|
||||
}
|
||||
|
||||
// NewRoundRobin creates a new RoundRobin pool.
|
||||
// capacity is the maximum number of resources RoundRobin will create.
|
||||
// factory will be the function used to create resources.
|
||||
// If a resource is unused beyond idleTimeout, it's discarded.
|
||||
func NewRoundRobin(capacity int, idleTimeout time.Duration) *RoundRobin {
|
||||
r := &RoundRobin{
|
||||
resources: make(chan fifoWrapper, capacity),
|
||||
size: 0,
|
||||
idleTimeout: idleTimeout,
|
||||
}
|
||||
r.available = sync.NewCond(&r.mu)
|
||||
return r
|
||||
}
|
||||
|
||||
// Open starts allowing the creation of resources
|
||||
func (rr *RoundRobin) Open(factory Factory) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
rr.factory = factory
|
||||
}
|
||||
|
||||
// Close empties the pool calling Close on all its resources.
|
||||
// It waits for all resources to be returned (Put).
|
||||
func (rr *RoundRobin) Close() {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
for rr.size > 0 {
|
||||
select {
|
||||
case fw := <-rr.resources:
|
||||
go fw.resource.Close()
|
||||
rr.size--
|
||||
default:
|
||||
rr.available.Wait()
|
||||
}
|
||||
}
|
||||
rr.factory = nil
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) IsClosed() bool {
|
||||
return rr.factory == nil
|
||||
}
|
||||
|
||||
// Get will return the next available resource. If none is available, and capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will indefinitely wait till the next resource becomes available.
|
||||
func (rr *RoundRobin) Get() (resource Resource, err error) {
|
||||
return rr.get(true)
|
||||
}
|
||||
|
||||
// TryGet will return the next available resource. If none is available, and capacity
|
||||
// has not been reached, it will create a new one using the factory. Otherwise,
|
||||
// it will return nil with no error.
|
||||
func (rr *RoundRobin) TryGet() (resource Resource, err error) {
|
||||
return rr.get(false)
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) get(wait bool) (resource Resource, err error) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
// Any waits in this loop will release the lock, and it will be
|
||||
// reacquired before the waits return.
|
||||
for {
|
||||
select {
|
||||
case fw := <-rr.resources:
|
||||
// Found a free resource in the channel
|
||||
if rr.idleTimeout > 0 && fw.timeUsed.Add(rr.idleTimeout).Sub(time.Now()) < 0 {
|
||||
// resource has been idle for too long. Discard & go for next.
|
||||
go fw.resource.Close()
|
||||
rr.size--
|
||||
// Nobody else should be waiting, but signal anyway.
|
||||
rr.available.Signal()
|
||||
continue
|
||||
}
|
||||
return fw.resource, nil
|
||||
default:
|
||||
// resource channel is empty
|
||||
if rr.size >= int64(cap(rr.resources)) {
|
||||
// The pool is full
|
||||
if wait {
|
||||
start := time.Now()
|
||||
rr.available.Wait()
|
||||
rr.recordWait(start)
|
||||
continue
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
// Pool is not full. Create a resource.
|
||||
if resource, err = rr.waitForCreate(); err != nil {
|
||||
// size was decremented, and somebody could be waiting.
|
||||
rr.available.Signal()
|
||||
return nil, err
|
||||
}
|
||||
// Creation successful. Account for this by incrementing size.
|
||||
rr.size++
|
||||
return resource, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) recordWait(start time.Time) {
|
||||
rr.waitCount++
|
||||
rr.waitTime += time.Now().Sub(start)
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) waitForCreate() (resource Resource, err error) {
|
||||
// Prevent thundering herd: increment size before creating resource, and decrement after.
|
||||
rr.size++
|
||||
rr.mu.Unlock()
|
||||
defer func() {
|
||||
rr.mu.Lock()
|
||||
rr.size--
|
||||
}()
|
||||
return rr.factory()
|
||||
}
|
||||
|
||||
// Put will return a resource to the pool. You MUST return every resource to the pool,
|
||||
// even if it's closed. If a resource is closed, you should call Put(nil).
|
||||
func (rr *RoundRobin) Put(resource Resource) {
|
||||
rr.mu.Lock()
|
||||
defer rr.available.Signal()
|
||||
defer rr.mu.Unlock()
|
||||
|
||||
if rr.size > int64(cap(rr.resources)) {
|
||||
if resource != nil {
|
||||
go resource.Close()
|
||||
}
|
||||
rr.size--
|
||||
} else if resource == nil {
|
||||
rr.size--
|
||||
} else {
|
||||
if len(rr.resources) == cap(rr.resources) {
|
||||
panic("unexpected")
|
||||
}
|
||||
rr.resources <- fifoWrapper{resource, time.Now()}
|
||||
}
|
||||
}
|
||||
|
||||
// Set capacity changes the capacity of the pool.
|
||||
// You can use it to expand or shrink.
|
||||
func (rr *RoundRobin) SetCapacity(capacity int) error {
|
||||
rr.mu.Lock()
|
||||
defer rr.available.Broadcast()
|
||||
defer rr.mu.Unlock()
|
||||
|
||||
nr := make(chan fifoWrapper, capacity)
|
||||
// This loop transfers resources from the old channel
|
||||
// to the new one, until it fills up or runs out.
|
||||
// It discards extras, if any.
|
||||
for {
|
||||
select {
|
||||
case fw := <-rr.resources:
|
||||
if len(nr) < cap(nr) {
|
||||
nr <- fw
|
||||
} else {
|
||||
go fw.resource.Close()
|
||||
rr.size--
|
||||
}
|
||||
continue
|
||||
default:
|
||||
}
|
||||
break
|
||||
}
|
||||
rr.resources = nr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) SetIdleTimeout(idleTimeout time.Duration) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
rr.idleTimeout = idleTimeout
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) StatsJSON() string {
|
||||
s, c, a, wc, wt, it := rr.Stats()
|
||||
return fmt.Sprintf("{\"Size\": %v, \"Capacity\": %v, \"Available\": %v, \"WaitCount\": %v, \"WaitTime\": %v, \"IdleTimeout\": %v}", s, c, a, wc, int64(wt), int64(it))
|
||||
}
|
||||
|
||||
func (rr *RoundRobin) Stats() (size, capacity, available, waitCount int64, waitTime, idleTimeout time.Duration) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
return rr.size, int64(cap(rr.resources)), int64(len(rr.resources)), rr.waitCount, rr.waitTime, rr.idleTimeout
|
||||
}
|
28
vendor/github.com/ngaut/pools/vitess_license
generated
vendored
Normal file
28
vendor/github.com/ngaut/pools/vitess_license
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
Copyright 2012, Google Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
114
vendor/github.com/ngaut/sync2/atomic.go
generated
vendored
Normal file
114
vendor/github.com/ngaut/sync2/atomic.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
|||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AtomicInt32 int32
|
||||
|
||||
func (i *AtomicInt32) Add(n int32) int32 {
|
||||
return atomic.AddInt32((*int32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) Set(n int32) {
|
||||
atomic.StoreInt32((*int32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) Get() int32 {
|
||||
return atomic.LoadInt32((*int32)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicUint32 uint32
|
||||
|
||||
func (i *AtomicUint32) Add(n uint32) uint32 {
|
||||
return atomic.AddUint32((*uint32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) Set(n uint32) {
|
||||
atomic.StoreUint32((*uint32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) Get() uint32 {
|
||||
return atomic.LoadUint32((*uint32)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) {
|
||||
return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicInt64 int64
|
||||
|
||||
func (i *AtomicInt64) Add(n int64) int64 {
|
||||
return atomic.AddInt64((*int64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) Set(n int64) {
|
||||
atomic.StoreInt64((*int64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) Get() int64 {
|
||||
return atomic.LoadInt64((*int64)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicDuration int64
|
||||
|
||||
func (d *AtomicDuration) Add(duration time.Duration) time.Duration {
|
||||
return time.Duration(atomic.AddInt64((*int64)(d), int64(duration)))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) Set(duration time.Duration) {
|
||||
atomic.StoreInt64((*int64)(d), int64(duration))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) Get() time.Duration {
|
||||
return time.Duration(atomic.LoadInt64((*int64)(d)))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval))
|
||||
}
|
||||
|
||||
// AtomicString gives you atomic-style APIs for string, but
|
||||
// it's only a convenience wrapper that uses a mutex. So, it's
|
||||
// not as efficient as the rest of the atomic types.
|
||||
type AtomicString struct {
|
||||
mu sync.Mutex
|
||||
str string
|
||||
}
|
||||
|
||||
func (s *AtomicString) Set(str string) {
|
||||
s.mu.Lock()
|
||||
s.str = str
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *AtomicString) Get() string {
|
||||
s.mu.Lock()
|
||||
str := s.str
|
||||
s.mu.Unlock()
|
||||
return str
|
||||
}
|
||||
|
||||
func (s *AtomicString) CompareAndSwap(oldval, newval string) (swqpped bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.str == oldval {
|
||||
s.str = newval
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
56
vendor/github.com/ngaut/sync2/cond.go
generated
vendored
Normal file
56
vendor/github.com/ngaut/sync2/cond.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Cond is an alternate implementation of sync.Cond
|
||||
type Cond struct {
|
||||
L sync.Locker
|
||||
sema chan struct{}
|
||||
waiters AtomicInt64
|
||||
}
|
||||
|
||||
func NewCond(l sync.Locker) *Cond {
|
||||
return &Cond{L: l, sema: make(chan struct{})}
|
||||
}
|
||||
|
||||
func (c *Cond) Wait() {
|
||||
c.waiters.Add(1)
|
||||
c.L.Unlock()
|
||||
<-c.sema
|
||||
c.L.Lock()
|
||||
}
|
||||
|
||||
func (c *Cond) Signal() {
|
||||
for {
|
||||
w := c.waiters.Get()
|
||||
if w == 0 {
|
||||
return
|
||||
}
|
||||
if c.waiters.CompareAndSwap(w, w-1) {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.sema <- struct{}{}
|
||||
}
|
||||
|
||||
func (c *Cond) Broadcast() {
|
||||
var w int64
|
||||
for {
|
||||
w = c.waiters.Get()
|
||||
if w == 0 {
|
||||
return
|
||||
}
|
||||
if c.waiters.CompareAndSwap(w, 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := int64(0); i < w; i++ {
|
||||
c.sema <- struct{}{}
|
||||
}
|
||||
}
|
55
vendor/github.com/ngaut/sync2/semaphore.go
generated
vendored
Normal file
55
vendor/github.com/ngaut/sync2/semaphore.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
// What's in a name? Channels have all you need to emulate a counting
|
||||
// semaphore with a boatload of extra functionality. However, in some
|
||||
// cases, you just want a familiar API.
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Semaphore is a counting semaphore with the option to
|
||||
// specify a timeout.
|
||||
type Semaphore struct {
|
||||
slots chan struct{}
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewSemaphore creates a Semaphore. The count parameter must be a positive
|
||||
// number. A timeout of zero means that there is no timeout.
|
||||
func NewSemaphore(count int, timeout time.Duration) *Semaphore {
|
||||
sem := &Semaphore{
|
||||
slots: make(chan struct{}, count),
|
||||
timeout: timeout,
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
sem.slots <- struct{}{}
|
||||
}
|
||||
return sem
|
||||
}
|
||||
|
||||
// Acquire returns true on successful acquisition, and
|
||||
// false on a timeout.
|
||||
func (sem *Semaphore) Acquire() bool {
|
||||
if sem.timeout == 0 {
|
||||
<-sem.slots
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-sem.slots:
|
||||
return true
|
||||
case <-time.After(sem.timeout):
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Release releases the acquired semaphore. You must
|
||||
// not release more than the number of semaphores you've
|
||||
// acquired.
|
||||
func (sem *Semaphore) Release() {
|
||||
sem.slots <- struct{}{}
|
||||
}
|
121
vendor/github.com/ngaut/sync2/service_manager.go
generated
vendored
Normal file
121
vendor/github.com/ngaut/sync2/service_manager.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// These are the three predefined states of a service.
|
||||
const (
|
||||
SERVICE_STOPPED = iota
|
||||
SERVICE_RUNNING
|
||||
SERVICE_SHUTTING_DOWN
|
||||
)
|
||||
|
||||
var stateNames = []string{
|
||||
"Stopped",
|
||||
"Running",
|
||||
"ShuttingDown",
|
||||
}
|
||||
|
||||
// ServiceManager manages the state of a service through its lifecycle.
|
||||
type ServiceManager struct {
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
err error // err is the error returned from the service function.
|
||||
state AtomicInt64
|
||||
// shutdown is created when the service starts and is closed when the service
|
||||
// enters the SERVICE_SHUTTING_DOWN state.
|
||||
shutdown chan struct{}
|
||||
}
|
||||
|
||||
// Go tries to change the state from SERVICE_STOPPED to SERVICE_RUNNING.
|
||||
//
|
||||
// If the current state is not SERVICE_STOPPED (already running), it returns
|
||||
// false immediately.
|
||||
//
|
||||
// On successful transition, it launches the service as a goroutine and returns
|
||||
// true. The service function is responsible for returning on its own when
|
||||
// requested, either by regularly checking svc.IsRunning(), or by waiting for
|
||||
// the svc.ShuttingDown channel to be closed.
|
||||
//
|
||||
// When the service func returns, the state is reverted to SERVICE_STOPPED.
|
||||
func (svm *ServiceManager) Go(service func(svc *ServiceContext) error) bool {
|
||||
svm.mu.Lock()
|
||||
defer svm.mu.Unlock()
|
||||
if !svm.state.CompareAndSwap(SERVICE_STOPPED, SERVICE_RUNNING) {
|
||||
return false
|
||||
}
|
||||
svm.wg.Add(1)
|
||||
svm.err = nil
|
||||
svm.shutdown = make(chan struct{})
|
||||
go func() {
|
||||
svm.err = service(&ServiceContext{ShuttingDown: svm.shutdown})
|
||||
svm.state.Set(SERVICE_STOPPED)
|
||||
svm.wg.Done()
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
// Stop tries to change the state from SERVICE_RUNNING to SERVICE_SHUTTING_DOWN.
|
||||
// If the current state is not SERVICE_RUNNING, it returns false immediately.
|
||||
// On successul transition, it waits for the service to finish, and returns true.
|
||||
// You are allowed to Go() again after a Stop().
|
||||
func (svm *ServiceManager) Stop() bool {
|
||||
svm.mu.Lock()
|
||||
defer svm.mu.Unlock()
|
||||
if !svm.state.CompareAndSwap(SERVICE_RUNNING, SERVICE_SHUTTING_DOWN) {
|
||||
return false
|
||||
}
|
||||
// Signal the service that we've transitioned to SERVICE_SHUTTING_DOWN.
|
||||
close(svm.shutdown)
|
||||
svm.shutdown = nil
|
||||
svm.wg.Wait()
|
||||
return true
|
||||
}
|
||||
|
||||
// Wait waits for the service to terminate if it's currently running.
|
||||
func (svm *ServiceManager) Wait() {
|
||||
svm.wg.Wait()
|
||||
}
|
||||
|
||||
// Join waits for the service to terminate and returns the value returned by the
|
||||
// service function.
|
||||
func (svm *ServiceManager) Join() error {
|
||||
svm.wg.Wait()
|
||||
return svm.err
|
||||
}
|
||||
|
||||
// State returns the current state of the service.
|
||||
// This should only be used to report the current state.
|
||||
func (svm *ServiceManager) State() int64 {
|
||||
return svm.state.Get()
|
||||
}
|
||||
|
||||
// StateName returns the name of the current state.
|
||||
func (svm *ServiceManager) StateName() string {
|
||||
return stateNames[svm.State()]
|
||||
}
|
||||
|
||||
// ServiceContext is passed into the service function to give it access to
|
||||
// information about the running service.
|
||||
type ServiceContext struct {
|
||||
// ShuttingDown is a channel that the service can select on to be notified
|
||||
// when it should shut down. The channel is closed when the state transitions
|
||||
// from SERVICE_RUNNING to SERVICE_SHUTTING_DOWN.
|
||||
ShuttingDown chan struct{}
|
||||
}
|
||||
|
||||
// IsRunning returns true if the ServiceContext.ShuttingDown channel has not
|
||||
// been closed yet.
|
||||
func (svc *ServiceContext) IsRunning() bool {
|
||||
select {
|
||||
case <-svc.ShuttingDown:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
28
vendor/github.com/ngaut/sync2/vitess_license
generated
vendored
Normal file
28
vendor/github.com/ngaut/sync2/vitess_license
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
Copyright 2012, Google Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
202
vendor/github.com/ngaut/tso/LICENSE
generated
vendored
Normal file
202
vendor/github.com/ngaut/tso/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
243
vendor/github.com/ngaut/tso/client/client.go
generated
vendored
Normal file
243
vendor/github.com/ngaut/tso/client/client.go
generated
vendored
Normal file
|
@ -0,0 +1,243 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"time"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/ngaut/log"
|
||||
"github.com/ngaut/tso/proto"
|
||||
"github.com/ngaut/tso/util"
|
||||
"github.com/ngaut/zkhelper"
|
||||
)
|
||||
|
||||
const (
|
||||
maxPipelineRequest = 100000
|
||||
)
|
||||
|
||||
// Client is a timestamp oracle client.
|
||||
type Client struct {
|
||||
requests chan *PipelineRequest
|
||||
|
||||
pending *list.List
|
||||
conf *Conf
|
||||
|
||||
addr string
|
||||
|
||||
leaderCh chan string
|
||||
}
|
||||
|
||||
// Conf is the configuration.
|
||||
type Conf struct {
|
||||
// tso server address, it will be deprecated later.
|
||||
ServerAddr string
|
||||
|
||||
// ZKAddr is for zookeeper address, if set, client will ignore ServerAddr
|
||||
// and find the leader tso server address in zookeeper.
|
||||
// Later ServerAddr is just for simple test and backward compatibility.
|
||||
ZKAddr string
|
||||
|
||||
// root path is the tso server saving in zookeeper, like /zk/tso.
|
||||
RootPath string
|
||||
}
|
||||
|
||||
// PipelineRequest let you get the timestamp with pipeline.
|
||||
type PipelineRequest struct {
|
||||
done chan error
|
||||
reply *proto.Response
|
||||
}
|
||||
|
||||
func newPipelineRequest() *PipelineRequest {
|
||||
return &PipelineRequest{
|
||||
done: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// MarkDone sets the repsone for current request.
|
||||
func (pr *PipelineRequest) MarkDone(reply *proto.Response, err error) {
|
||||
if err != nil {
|
||||
pr.reply = nil
|
||||
}
|
||||
pr.reply = reply
|
||||
pr.done <- errors.Trace(err)
|
||||
}
|
||||
|
||||
// GetTS gets the timestamp.
|
||||
func (pr *PipelineRequest) GetTS() (*proto.Timestamp, error) {
|
||||
err := <-pr.done
|
||||
if err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
return &pr.reply.Timestamp, nil
|
||||
}
|
||||
|
||||
// NewClient creates a timestamp oracle client.
|
||||
func NewClient(conf *Conf) *Client {
|
||||
c := &Client{
|
||||
requests: make(chan *PipelineRequest, maxPipelineRequest),
|
||||
pending: list.New(),
|
||||
conf: conf,
|
||||
leaderCh: make(chan string, 1),
|
||||
}
|
||||
|
||||
if len(conf.ZKAddr) == 0 {
|
||||
c.leaderCh <- conf.ServerAddr
|
||||
} else {
|
||||
go c.watchLeader()
|
||||
}
|
||||
|
||||
go c.workerLoop()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Client) cleanupPending(err error) {
|
||||
log.Warn(err)
|
||||
length := c.pending.Len()
|
||||
for i := 0; i < length; i++ {
|
||||
e := c.pending.Front()
|
||||
c.pending.Remove(e)
|
||||
e.Value.(*PipelineRequest).MarkDone(nil, err)
|
||||
}
|
||||
|
||||
// clear request in channel too
|
||||
length = len(c.requests)
|
||||
for i := 0; i < length; i++ {
|
||||
req := <-c.requests
|
||||
req.MarkDone(nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) notifyOne(reply *proto.Response) {
|
||||
e := c.pending.Front()
|
||||
c.pending.Remove(e)
|
||||
req := e.Value.(*PipelineRequest)
|
||||
req.MarkDone(reply, nil)
|
||||
}
|
||||
|
||||
func (c *Client) writeRequests(session *Conn) error {
|
||||
var protoHdr [1]byte
|
||||
for i := 0; i < c.pending.Len(); i++ {
|
||||
session.Write(protoHdr[:])
|
||||
}
|
||||
return session.Flush()
|
||||
}
|
||||
|
||||
func (c *Client) handleResponse(session *Conn) error {
|
||||
length := c.pending.Len()
|
||||
for i := 0; i < length; i++ {
|
||||
var resp proto.Response
|
||||
err := resp.Decode(session)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
c.notifyOne(&resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) do() error {
|
||||
session, err := NewConnection(c.addr, time.Duration(1*time.Second))
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
log.Debugf("connect tso server %s ok", c.addr)
|
||||
|
||||
defer session.Close()
|
||||
for {
|
||||
select {
|
||||
case req := <-c.requests:
|
||||
c.pending.PushBack(req)
|
||||
length := len(c.requests)
|
||||
for i := 0; i < length; i++ {
|
||||
req = <-c.requests
|
||||
c.pending.PushBack(req)
|
||||
}
|
||||
|
||||
err = c.writeRequests(session)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
err = c.handleResponse(session)
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
case addr := <-c.leaderCh:
|
||||
oldAddr := c.addr
|
||||
c.addr = addr
|
||||
return errors.Errorf("leader change %s -> %s", oldAddr, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) workerLoop() {
|
||||
// first get tso leader
|
||||
c.addr = <-c.leaderCh
|
||||
log.Debugf("try to connect tso server %s", c.addr)
|
||||
|
||||
for {
|
||||
err := c.do()
|
||||
if err != nil {
|
||||
c.cleanupPending(err)
|
||||
}
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
case addr := <-c.leaderCh:
|
||||
// If old tso server down, NewConnection will fail and return immediately in do function,
|
||||
// so we must check leader change here.
|
||||
log.Warnf("leader change %s -> %s", c.addr, addr)
|
||||
c.addr = addr
|
||||
// Wait some time to let tso server allow accepting connections.
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) watchLeader() {
|
||||
var (
|
||||
conn zkhelper.Conn
|
||||
err error
|
||||
)
|
||||
|
||||
for {
|
||||
conn, err = zkhelper.ConnectToZkWithTimeout(c.conf.ZKAddr, time.Second)
|
||||
if err != nil {
|
||||
log.Errorf("connect zk err %v, retry later", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
var lastAddr string
|
||||
|
||||
for {
|
||||
addr, watcher, err := util.GetWatchLeader(conn, c.conf.RootPath)
|
||||
if err != nil {
|
||||
log.Errorf("get tso leader err %v, retry later", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if lastAddr != addr {
|
||||
log.Warnf("leader change %s -> %s", lastAddr, addr)
|
||||
lastAddr = addr
|
||||
c.leaderCh <- addr
|
||||
}
|
||||
|
||||
// watch the leader changes.
|
||||
<-watcher
|
||||
}
|
||||
}
|
||||
|
||||
// GoGetTimestamp returns a PipelineRequest so you can get the timestamp later.
|
||||
func (c *Client) GoGetTimestamp() *PipelineRequest {
|
||||
pr := newPipelineRequest()
|
||||
c.requests <- pr
|
||||
return pr
|
||||
}
|
50
vendor/github.com/ngaut/tso/client/conn.go
generated
vendored
Normal file
50
vendor/github.com/ngaut/tso/client/conn.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/ngaut/deadline"
|
||||
)
|
||||
|
||||
// Conn is the connection for timestamp oracle server, it is not thread safe.
|
||||
type Conn struct {
|
||||
addr string
|
||||
net.Conn
|
||||
closed bool
|
||||
r *bufio.Reader
|
||||
w *bufio.Writer
|
||||
netTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewConnection creates a conn.
|
||||
func NewConnection(addr string, netTimeout time.Duration) (*Conn, error) {
|
||||
conn, err := net.DialTimeout("tcp", addr, netTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Conn{
|
||||
addr: addr,
|
||||
Conn: conn,
|
||||
r: bufio.NewReaderSize(deadline.NewDeadlineReader(conn, netTimeout), 512*1024),
|
||||
w: bufio.NewWriterSize(deadline.NewDeadlineWriter(conn, netTimeout), 512*1024),
|
||||
netTimeout: netTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read reads data and stores it into p.
|
||||
func (c *Conn) Read(p []byte) (int, error) {
|
||||
return c.r.Read(p)
|
||||
}
|
||||
|
||||
// Flush flushs buffered data.
|
||||
func (c *Conn) Flush() error {
|
||||
return c.w.Flush()
|
||||
}
|
||||
|
||||
// Write writes p.
|
||||
func (c *Conn) Write(p []byte) (int, error) {
|
||||
return c.w.Write(p)
|
||||
}
|
45
vendor/github.com/ngaut/tso/proto/proto.go
generated
vendored
Normal file
45
vendor/github.com/ngaut/tso/proto/proto.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
// RequestHeader is for tso request proto.
|
||||
type RequestHeader struct {
|
||||
}
|
||||
|
||||
// Timestamp is for tso timestamp.
|
||||
type Timestamp struct {
|
||||
Physical int64
|
||||
Logical int64
|
||||
}
|
||||
|
||||
// Response is for tso reponse proto.
|
||||
type Response struct {
|
||||
Timestamp
|
||||
}
|
||||
|
||||
// Encode encodes repsonse proto into w.
|
||||
func (res *Response) Encode(w io.Writer) error {
|
||||
var buf [16]byte
|
||||
binary.BigEndian.PutUint64(buf[0:8], uint64(res.Physical))
|
||||
binary.BigEndian.PutUint64(buf[8:16], uint64(res.Logical))
|
||||
_, err := w.Write(buf[0:16])
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// Decode decodes reponse proto from r.
|
||||
func (res *Response) Decode(r io.Reader) error {
|
||||
var buf [16]byte
|
||||
_, err := io.ReadFull(r, buf[0:16])
|
||||
if err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
res.Physical = int64(binary.BigEndian.Uint64(buf[0:8]))
|
||||
res.Logical = int64(binary.BigEndian.Uint64(buf[8:16]))
|
||||
return nil
|
||||
}
|
81
vendor/github.com/ngaut/tso/util/util.go
generated
vendored
Normal file
81
vendor/github.com/ngaut/tso/util/util.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2015 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/ngaut/go-zookeeper/zk"
|
||||
"github.com/ngaut/zkhelper"
|
||||
)
|
||||
|
||||
func getLeader(data []byte) (string, error) {
|
||||
m := struct {
|
||||
Addr string `json:"Addr"`
|
||||
}{}
|
||||
|
||||
err := json.Unmarshal(data, &m)
|
||||
if err != nil {
|
||||
return "", errors.Trace(err)
|
||||
}
|
||||
|
||||
return m.Addr, nil
|
||||
}
|
||||
|
||||
// getLeaderPath gets the leader path in zookeeper.
|
||||
func getLeaderPath(rootPath string) string {
|
||||
return path.Join(rootPath, "leader")
|
||||
}
|
||||
|
||||
// func checkLeaderExists(conn zkhelper.Conn) error {
|
||||
// // the leader node is not ephemeral, so we may meet no any tso server but leader node
|
||||
// // has the data for last closed tso server.
|
||||
// // TODO: check children in /candidates, if no child, we will treat it as no leader too.
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// GetLeaderAddr gets the leader tso address in zookeeper for outer use.
|
||||
func GetLeader(conn zkhelper.Conn, rootPath string) (string, error) {
|
||||
data, _, err := conn.Get(getLeaderPath(rootPath))
|
||||
if err != nil {
|
||||
return "", errors.Trace(err)
|
||||
}
|
||||
|
||||
// if err != checkLeaderExists(conn); err != nil {
|
||||
// return "", errors.Trace(err)
|
||||
// }
|
||||
|
||||
return getLeader(data)
|
||||
}
|
||||
|
||||
// GetWatchLeader gets the leader tso address in zookeeper and returns a watcher for leader change.
|
||||
func GetWatchLeader(conn zkhelper.Conn, rootPath string) (string, <-chan zk.Event, error) {
|
||||
data, _, watcher, err := conn.GetW(getLeaderPath(rootPath))
|
||||
if err != nil {
|
||||
return "", nil, errors.Trace(err)
|
||||
}
|
||||
addr, err := getLeader(data)
|
||||
if err != nil {
|
||||
return "", nil, errors.Trace(err)
|
||||
}
|
||||
|
||||
// if err != checkLeaderExists(conn); err != nil {
|
||||
// return "", errors.Trace(err)
|
||||
// }
|
||||
|
||||
return addr, watcher, nil
|
||||
}
|
4
vendor/github.com/ngaut/zkhelper/README.md
generated
vendored
Normal file
4
vendor/github.com/ngaut/zkhelper/README.md
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
Coordinator
|
||||
========
|
||||
|
||||
Support both zookeeper and etcd
|
53
vendor/github.com/ngaut/zkhelper/conn.go
generated
vendored
Normal file
53
vendor/github.com/ngaut/zkhelper/conn.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package zkhelper
|
||||
|
||||
import (
|
||||
zk "github.com/ngaut/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
/*
|
||||
type Stat interface {
|
||||
Czxid() int64
|
||||
Mzxid() int64
|
||||
CTime() time.Time
|
||||
MTime() time.Time
|
||||
Version() int
|
||||
CVersion() int
|
||||
AVersion() int
|
||||
EphemeralOwner() int64
|
||||
DataLength() int
|
||||
NumChildren() int
|
||||
Pzxid() int64
|
||||
}
|
||||
*/
|
||||
|
||||
// This interface is really close to the zookeeper connection
|
||||
// interface. It uses the Stat interface defined here instead of the
|
||||
// zookeeper.Stat structure for stats. Everything else is the same as
|
||||
// in zookeeper. So refer to the zookeeper docs for the conventions
|
||||
// used here (for instance, using -1 as version to specify any
|
||||
// version)
|
||||
type Conn interface {
|
||||
Get(path string) (data []byte, stat zk.Stat, err error)
|
||||
GetW(path string) (data []byte, stat zk.Stat, watch <-chan zk.Event, err error)
|
||||
|
||||
Children(path string) (children []string, stat zk.Stat, err error)
|
||||
ChildrenW(path string) (children []string, stat zk.Stat, watch <-chan zk.Event, err error)
|
||||
|
||||
Exists(path string) (exist bool, stat zk.Stat, err error)
|
||||
ExistsW(path string) (exist bool, stat zk.Stat, watch <-chan zk.Event, err error)
|
||||
|
||||
Create(path string, value []byte, flags int32, aclv []zk.ACL) (pathCreated string, err error)
|
||||
|
||||
Set(path string, value []byte, version int32) (stat zk.Stat, err error)
|
||||
|
||||
Delete(path string, version int32) (err error)
|
||||
|
||||
Close()
|
||||
|
||||
//RetryChange(path string, flags int, acl []ACL, changeFunc ChangeFunc) error
|
||||
|
||||
GetACL(path string) ([]zk.ACL, zk.Stat, error)
|
||||
SetACL(path string, aclv []zk.ACL, version int32) (zk.Stat, error)
|
||||
|
||||
Seq2Str(seq int64) string
|
||||
}
|
472
vendor/github.com/ngaut/zkhelper/etcd.go
generated
vendored
Normal file
472
vendor/github.com/ngaut/zkhelper/etcd.go
generated
vendored
Normal file
|
@ -0,0 +1,472 @@
|
|||
package zkhelper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
etcderr "github.com/coreos/etcd/error"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
zk "github.com/ngaut/go-zookeeper/zk"
|
||||
"github.com/ngaut/log"
|
||||
"github.com/ngaut/pools"
|
||||
)
|
||||
|
||||
var (
|
||||
singleInstanceLock sync.Mutex
|
||||
etcdInstance *etcdImpl
|
||||
)
|
||||
|
||||
type PooledEtcdClient struct {
|
||||
c *etcd.Client
|
||||
}
|
||||
|
||||
func (c *PooledEtcdClient) Close() {
|
||||
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Seq2Str(seq int64) string {
|
||||
return fmt.Sprintf("%d", seq)
|
||||
}
|
||||
|
||||
type etcdImpl struct {
|
||||
sync.Mutex
|
||||
cluster string
|
||||
pool *pools.ResourcePool
|
||||
indexMap map[string]uint64
|
||||
}
|
||||
|
||||
func convertToZkError(err error) error {
|
||||
//todo: convert other errors
|
||||
if ec, ok := err.(*etcd.EtcdError); ok {
|
||||
switch ec.ErrorCode {
|
||||
case etcderr.EcodeKeyNotFound:
|
||||
return zk.ErrNoNode
|
||||
case etcderr.EcodeNotFile:
|
||||
case etcderr.EcodeNotDir:
|
||||
case etcderr.EcodeNodeExist:
|
||||
return zk.ErrNodeExists
|
||||
case etcderr.EcodeDirNotEmpty:
|
||||
return zk.ErrNotEmpty
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func convertToZkEvent(watchPath string, resp *etcd.Response, err error) zk.Event {
|
||||
//log.Infof("convert event from path:%s, %+v, %+v", watchPath, resp, resp.Node.Key)
|
||||
var e zk.Event
|
||||
|
||||
if err != nil {
|
||||
e.Err = convertToZkError(err)
|
||||
e.State = zk.StateDisconnected
|
||||
return e
|
||||
}
|
||||
|
||||
e.State = zk.StateConnected
|
||||
|
||||
e.Path = resp.Node.Key
|
||||
if len(resp.Node.Key) > len(watchPath) {
|
||||
e.Type = zk.EventNodeChildrenChanged
|
||||
return e
|
||||
}
|
||||
|
||||
switch resp.Action {
|
||||
case "set":
|
||||
e.Type = zk.EventNodeDataChanged
|
||||
case "delete":
|
||||
e.Type = zk.EventNodeDeleted
|
||||
case "update":
|
||||
e.Type = zk.EventNodeDataChanged
|
||||
case "create":
|
||||
e.Type = zk.EventNodeCreated
|
||||
case "expire":
|
||||
e.Type = zk.EventNotWatching
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func NewEtcdConn(zkAddr string) (Conn, error) {
|
||||
singleInstanceLock.Lock()
|
||||
defer singleInstanceLock.Unlock()
|
||||
if etcdInstance != nil {
|
||||
return etcdInstance, nil
|
||||
}
|
||||
|
||||
p := pools.NewResourcePool(func() (pools.Resource, error) {
|
||||
cluster := strings.Split(zkAddr, ",")
|
||||
for i, addr := range cluster {
|
||||
if !strings.HasPrefix(addr, "http://") {
|
||||
cluster[i] = "http://" + addr
|
||||
}
|
||||
}
|
||||
newClient := etcd.NewClient(cluster)
|
||||
newClient.SetConsistency(etcd.STRONG_CONSISTENCY)
|
||||
return &PooledEtcdClient{c: newClient}, nil
|
||||
}, 10, 10, 0)
|
||||
|
||||
etcdInstance = &etcdImpl{
|
||||
cluster: zkAddr,
|
||||
pool: p,
|
||||
indexMap: make(map[string]uint64),
|
||||
}
|
||||
|
||||
log.Infof("new etcd %s", zkAddr)
|
||||
if etcdInstance == nil {
|
||||
return nil, errors.New("unknown error")
|
||||
}
|
||||
|
||||
return etcdInstance, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Get(key string) (data []byte, stat zk.Stat, err error) {
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
resp, err := c.Get(key, true, false)
|
||||
if resp == nil {
|
||||
return nil, nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
return []byte(resp.Node.Value), nil, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) setIndex(key string, index uint64) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
e.indexMap[key] = index
|
||||
}
|
||||
|
||||
func (e *etcdImpl) getIndex(key string) uint64 {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
index := e.indexMap[key]
|
||||
|
||||
return index
|
||||
}
|
||||
|
||||
func (e *etcdImpl) watch(key string, children bool) (resp *etcd.Response, stat zk.Stat, watch <-chan zk.Event, err error) {
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
index := e.getIndex(key)
|
||||
resp, err = c.Get(key, true, true)
|
||||
if resp == nil {
|
||||
return nil, nil, nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
if index < resp.Node.ModifiedIndex {
|
||||
index = resp.Node.ModifiedIndex
|
||||
}
|
||||
|
||||
for _, n := range resp.Node.Nodes {
|
||||
if n.ModifiedIndex > index {
|
||||
index = n.ModifiedIndex
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("try watch", key)
|
||||
ch := make(chan zk.Event, 100)
|
||||
originVal := resp.Node.Value
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
e.setIndex(key, index)
|
||||
}()
|
||||
|
||||
for {
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
resp, err := c.Watch(key, index, children, nil, nil)
|
||||
e.pool.Put(conn)
|
||||
|
||||
if err != nil {
|
||||
if ec, ok := err.(*etcd.EtcdError); ok {
|
||||
if ec.ErrorCode == etcderr.EcodeEventIndexCleared {
|
||||
index++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Warning("watch", err)
|
||||
ch <- convertToZkEvent(key, resp, err)
|
||||
return
|
||||
}
|
||||
|
||||
if key == resp.Node.Key && originVal == string(resp.Node.Value) { //keep alive event
|
||||
index++
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- convertToZkEvent(key, resp, err)
|
||||
//update index
|
||||
if index <= resp.Node.ModifiedIndex {
|
||||
index = resp.Node.ModifiedIndex + 1
|
||||
} else {
|
||||
index++
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil, ch, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) GetW(key string) (data []byte, stat zk.Stat, watch <-chan zk.Event, err error) {
|
||||
resp, stat, watch, err := e.watch(key, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return []byte(resp.Node.Value), stat, watch, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Children(key string) (children []string, stat zk.Stat, err error) {
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
resp, err := c.Get(key, true, false)
|
||||
if resp == nil {
|
||||
return nil, nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
for _, c := range resp.Node.Nodes {
|
||||
children = append(children, path.Base(c.Key))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (e *etcdImpl) ChildrenW(key string) (children []string, stat zk.Stat, watch <-chan zk.Event, err error) {
|
||||
resp, stat, watch, err := e.watch(key, true)
|
||||
if err != nil {
|
||||
return nil, stat, nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
for _, c := range resp.Node.Nodes {
|
||||
children = append(children, path.Base(c.Key))
|
||||
}
|
||||
|
||||
return children, stat, watch, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Exists(key string) (exist bool, stat zk.Stat, err error) {
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
_, err = c.Get(key, true, false)
|
||||
if err == nil {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
if ec, ok := err.(*etcd.EtcdError); ok {
|
||||
if ec.ErrorCode == etcderr.EcodeKeyNotFound {
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
func (e *etcdImpl) ExistsW(key string) (exist bool, stat zk.Stat, watch <-chan zk.Event, err error) {
|
||||
_, stat, watch, err = e.watch(key, false)
|
||||
if err != nil {
|
||||
return false, nil, nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
return true, nil, watch, nil
|
||||
}
|
||||
|
||||
const MAX_TTL = 365 * 24 * 60 * 60
|
||||
|
||||
func (e *etcdImpl) doKeepAlive(key string, ttl uint64) error {
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
resp, err := c.Get(key, false, false)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Node.Dir {
|
||||
return fmt.Errorf("can not set ttl to directory", key)
|
||||
}
|
||||
|
||||
//log.Info("keep alive ", key)
|
||||
resp, err = c.CompareAndSwap(key, resp.Node.Value, ttl, resp.Node.Value, resp.Node.ModifiedIndex)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ec, ok := err.(*etcd.EtcdError); ok && ec.ErrorCode == etcderr.EcodeTestFailed {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//todo:add test for keepAlive
|
||||
func (e *etcdImpl) keepAlive(key string, ttl uint64) {
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
err := e.doKeepAlive(key, ttl)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Create(wholekey string, value []byte, flags int32, aclv []zk.ACL) (keyCreated string, err error) {
|
||||
seq := (flags & zk.FlagSequence) != 0
|
||||
tmp := (flags & zk.FlagEphemeral) != 0
|
||||
ttl := uint64(MAX_TTL)
|
||||
if tmp {
|
||||
ttl = 5
|
||||
}
|
||||
|
||||
var resp *etcd.Response
|
||||
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
fn := c.Create
|
||||
log.Info("create", wholekey)
|
||||
|
||||
if seq {
|
||||
wholekey = path.Dir(wholekey)
|
||||
fn = c.CreateInOrder
|
||||
} else {
|
||||
for _, v := range aclv {
|
||||
if v.Perms == PERM_DIRECTORY {
|
||||
log.Info("etcdImpl:create directory", wholekey)
|
||||
fn = nil
|
||||
resp, err = c.CreateDir(wholekey, uint64(ttl))
|
||||
if err != nil {
|
||||
return "", convertToZkError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fn == nil {
|
||||
if tmp {
|
||||
e.keepAlive(wholekey, ttl)
|
||||
}
|
||||
return resp.Node.Key, nil
|
||||
}
|
||||
|
||||
resp, err = fn(wholekey, string(value), uint64(ttl))
|
||||
if err != nil {
|
||||
return "", convertToZkError(err)
|
||||
}
|
||||
|
||||
if tmp {
|
||||
e.keepAlive(resp.Node.Key, ttl)
|
||||
}
|
||||
|
||||
return resp.Node.Key, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Set(key string, value []byte, version int32) (stat zk.Stat, err error) {
|
||||
if version == 0 {
|
||||
return nil, errors.New("invalid version")
|
||||
}
|
||||
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
resp, err := c.Get(key, true, false)
|
||||
if resp == nil {
|
||||
return nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
_, err = c.Set(key, string(value), uint64(resp.Node.TTL))
|
||||
return nil, convertToZkError(err)
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Delete(key string, version int32) (err error) {
|
||||
//todo: handle version
|
||||
conn, err := e.pool.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer e.pool.Put(conn)
|
||||
c := conn.(*PooledEtcdClient).c
|
||||
|
||||
resp, err := c.Get(key, true, false)
|
||||
if resp == nil {
|
||||
return convertToZkError(err)
|
||||
}
|
||||
|
||||
if resp.Node.Dir {
|
||||
_, err = c.DeleteDir(key)
|
||||
} else {
|
||||
_, err = c.Delete(key, false)
|
||||
}
|
||||
|
||||
return convertToZkError(err)
|
||||
}
|
||||
|
||||
func (e *etcdImpl) GetACL(key string) ([]zk.ACL, zk.Stat, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) SetACL(key string, aclv []zk.ACL, version int32) (zk.Stat, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (e *etcdImpl) Close() {
|
||||
//how to implement this
|
||||
}
|
519
vendor/github.com/ngaut/zkhelper/fakezk.go
generated
vendored
Normal file
519
vendor/github.com/ngaut/zkhelper/fakezk.go
generated
vendored
Normal file
|
@ -0,0 +1,519 @@
|
|||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fakezk is a pretty complete mock implementation of a
|
||||
// Zookeper connection (see go/zk/zk.Conn). All operations
|
||||
// work as expected with the exceptions of zk.Conn.ACL and
|
||||
// zk.Conn.SetACL. zk.Conn.SetACL will succeed, but it is a noop (and
|
||||
// the ACLs won't be respected). zk.Conn.ACL will panic. It is OK to
|
||||
// access the connection from multiple goroutines, but the locking is
|
||||
// very naive (every operation locks the whole connection).
|
||||
package zkhelper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ngaut/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
type zconn struct {
|
||||
mu sync.Mutex
|
||||
root *stat
|
||||
zxid int64
|
||||
existWatches map[string][]chan zk.Event
|
||||
}
|
||||
|
||||
func (conn *zconn) getZxid() int64 {
|
||||
conn.zxid++
|
||||
return conn.zxid
|
||||
}
|
||||
|
||||
func (conn *zconn) Seq2Str(seq int64) string {
|
||||
return fmt.Sprintf("%0.10d", seq)
|
||||
}
|
||||
|
||||
// NewConn returns a fake zk.Conn implementation. Data is stored in
|
||||
// memory, and there's a global connection lock for concurrent access.
|
||||
func NewConn() Conn {
|
||||
return &zconn{
|
||||
root: &stat{
|
||||
name: "/",
|
||||
children: make(map[string]*stat),
|
||||
},
|
||||
existWatches: make(map[string][]chan zk.Event)}
|
||||
}
|
||||
|
||||
// NewConnFromFile returns a fake zk.Conn implementation, that is seeded
|
||||
// with the json data extracted from the input file.
|
||||
func NewConnFromFile(filename string) Conn {
|
||||
result := &zconn{
|
||||
root: &stat{
|
||||
name: "/",
|
||||
children: make(map[string]*stat),
|
||||
},
|
||||
existWatches: make(map[string][]chan zk.Event)}
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("NewConnFromFile failed to read file %v: %v", filename, err))
|
||||
}
|
||||
values := make(map[string]interface{})
|
||||
if err := json.Unmarshal(data, &values); err != nil {
|
||||
panic(fmt.Errorf("NewConnFromFile failed to json.Unmarshal file %v: %v", filename, err))
|
||||
}
|
||||
for k, v := range values {
|
||||
jv, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("NewConnFromFile failed to json.Marshal value %v: %v", k, err))
|
||||
}
|
||||
|
||||
// CreateRecursive will work for a leaf node where the parent
|
||||
// doesn't exist, but not for a node in the middle of a tree
|
||||
// that already exists. So have to use 'Set' as a backup.
|
||||
if _, err := CreateRecursive(result, k, string(jv), 0, nil); err != nil {
|
||||
if ZkErrorEqual(err, zk.ErrNodeExists) {
|
||||
_, err = result.Set(k, jv, -1)
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("NewConnFromFile failed to zk.CreateRecursive value %v: %v", k, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (conn *zconn) GetACL(path string) ([]zk.ACL, zk.Stat, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) Get(zkPath string) (data []byte, stat zk.Stat, err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
node, _, rest, err := conn.getNode(zkPath, "get")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, nil, zkError(zk.ErrNoNode, "get", zkPath)
|
||||
}
|
||||
return []byte(node.content), node, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) GetW(zkPath string) (data []byte, stat zk.Stat, watch <-chan zk.Event, err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
node, _, rest, err := conn.getNode(zkPath, "getw")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
if len(rest) != 0 {
|
||||
return nil, nil, nil, zkError(zk.ErrNoNode, "getw", zkPath)
|
||||
}
|
||||
c := make(chan zk.Event, 1)
|
||||
node.changeWatches = append(node.changeWatches, c)
|
||||
return []byte(node.content), node, c, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) Children(zkPath string) (children []string, stat zk.Stat, err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
|
||||
//println("Children:", conn.String())
|
||||
|
||||
node, _, rest, err := conn.getNode(zkPath, "children")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(rest) != 0 {
|
||||
return nil, nil, zkError(zk.ErrNoNode, "children", zkPath)
|
||||
}
|
||||
for name := range node.children {
|
||||
children = append(children, name)
|
||||
}
|
||||
return children, node, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) ChildrenW(zkPath string) (children []string, stat zk.Stat, watch <-chan zk.Event, err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
|
||||
//println("ChildrenW:", conn.String())
|
||||
|
||||
node, _, rest, err := conn.getNode(zkPath, "childrenw")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
if len(rest) != 0 {
|
||||
return nil, nil, nil, zkError(zk.ErrNoNode, "childrenw", zkPath)
|
||||
}
|
||||
c := make(chan zk.Event, 1)
|
||||
node.childrenWatches = append(node.childrenWatches, c)
|
||||
for name := range node.children {
|
||||
children = append(children, name)
|
||||
}
|
||||
return children, node, c, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) Exists(zkPath string) (exist bool, stat zk.Stat, err error) {
|
||||
// FIXME(szopa): if the path is bad, Op will be "get."
|
||||
exist = false
|
||||
_, stat, err = conn.Get(zkPath)
|
||||
if err != nil {
|
||||
if ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
exist = true
|
||||
}
|
||||
|
||||
return exist, stat, err
|
||||
}
|
||||
|
||||
func (conn *zconn) ExistsW(zkPath string) (exist bool, stat zk.Stat, watch <-chan zk.Event, err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
exist = false
|
||||
c := make(chan zk.Event, 1)
|
||||
node, _, rest, err := conn.getNode(zkPath, "existsw")
|
||||
if err != nil {
|
||||
return exist, nil, nil, err
|
||||
}
|
||||
|
||||
if len(rest) != 0 {
|
||||
watches, ok := conn.existWatches[zkPath]
|
||||
if !ok {
|
||||
watches = make([]chan zk.Event, 0)
|
||||
conn.existWatches[zkPath] = watches
|
||||
}
|
||||
conn.existWatches[zkPath] = append(watches, c)
|
||||
return exist, nil, c, nil
|
||||
}
|
||||
|
||||
exist = true
|
||||
node.existWatches = append(node.existWatches, c)
|
||||
return exist, node, c, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) Create(zkPath string, value []byte, flags int32, aclv []zk.ACL) (zkPathCreated string, err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
|
||||
node, _, rest, err := conn.getNode(zkPath, "create")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(rest) == 0 {
|
||||
return "", zkError(zk.ErrNodeExists, "create", zkPath)
|
||||
}
|
||||
|
||||
if len(rest) > 1 {
|
||||
return "", zkError(zk.ErrNoNode, "create", zkPath)
|
||||
}
|
||||
|
||||
zxid := conn.getZxid()
|
||||
name := rest[0]
|
||||
if (flags & zk.FlagSequence) != 0 {
|
||||
sequence := node.nextSequence()
|
||||
name += sequence
|
||||
zkPath = zkPath + sequence
|
||||
}
|
||||
|
||||
stat := &stat{
|
||||
name: name,
|
||||
content: string(value),
|
||||
children: make(map[string]*stat),
|
||||
acl: aclv,
|
||||
mtime: time.Now(),
|
||||
ctime: time.Now(),
|
||||
czxid: zxid,
|
||||
mzxid: zxid,
|
||||
existWatches: make([]chan zk.Event, 0),
|
||||
}
|
||||
node.children[name] = stat
|
||||
event := zk.Event{
|
||||
Type: zk.EventNodeCreated,
|
||||
Path: zkPath,
|
||||
State: zk.StateConnected,
|
||||
}
|
||||
if watches, ok := conn.existWatches[zkPath]; ok {
|
||||
delete(conn.existWatches, zkPath)
|
||||
for _, watch := range watches {
|
||||
watch <- event
|
||||
|
||||
}
|
||||
}
|
||||
childrenEvent := zk.Event{
|
||||
Type: zk.EventNodeChildrenChanged,
|
||||
Path: zkPath,
|
||||
State: zk.StateConnected,
|
||||
}
|
||||
for _, watch := range node.childrenWatches {
|
||||
watch <- childrenEvent
|
||||
close(watch)
|
||||
}
|
||||
node.childrenWatches = nil
|
||||
|
||||
node.cversion++
|
||||
|
||||
return zkPath, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) Set(zkPath string, value []byte, version int32) (stat zk.Stat, err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
|
||||
node, _, rest, err := conn.getNode(zkPath, "set")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(rest) != 0 {
|
||||
return nil, zkError(zk.ErrNoNode, "set", zkPath)
|
||||
}
|
||||
|
||||
if version != -1 && node.version != int(version) {
|
||||
return nil, zkError(zk.ErrBadVersion, "set", zkPath)
|
||||
}
|
||||
node.content = string(value)
|
||||
node.version++
|
||||
for _, watch := range node.changeWatches {
|
||||
watch <- zk.Event{
|
||||
Type: zk.EventNodeDataChanged,
|
||||
Path: zkPath,
|
||||
State: zk.StateConnected,
|
||||
}
|
||||
}
|
||||
node.changeWatches = nil
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) Delete(zkPath string, version int32) (err error) {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
|
||||
node, parent, rest, err := conn.getNode(zkPath, "delete")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(rest) > 0 {
|
||||
return zkError(zk.ErrNoNode, "delete", zkPath)
|
||||
}
|
||||
if len(node.children) > 0 {
|
||||
return zkError(zk.ErrNotEmpty, "delete", zkPath)
|
||||
}
|
||||
delete(parent.children, node.name)
|
||||
event := zk.Event{
|
||||
Type: zk.EventNodeDeleted,
|
||||
Path: zkPath,
|
||||
State: zk.StateConnected,
|
||||
}
|
||||
for _, watch := range node.existWatches {
|
||||
watch <- event
|
||||
}
|
||||
for _, watch := range node.changeWatches {
|
||||
watch <- event
|
||||
}
|
||||
node.existWatches = nil
|
||||
node.changeWatches = nil
|
||||
childrenEvent := zk.Event{
|
||||
Type: zk.EventNodeChildrenChanged,
|
||||
Path: zkPath,
|
||||
State: zk.StateConnected}
|
||||
|
||||
for _, watch := range parent.childrenWatches {
|
||||
watch <- childrenEvent
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *zconn) Close() {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
for _, watches := range conn.existWatches {
|
||||
for _, c := range watches {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
conn.root.closeAllWatches()
|
||||
}
|
||||
|
||||
/*
|
||||
func (conn *zconn) RetryChange(path string, flags int, acl []zk.ACL, changeFunc zk.ChangeFunc) error {
|
||||
for {
|
||||
oldValue, oldStat, err := conn.Get(path)
|
||||
if err != nil && !ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
return err
|
||||
}
|
||||
newValue, err := changeFunc(oldValue, oldStat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if oldStat == nil {
|
||||
_, err := conn.Create(path, newValue, flags, acl)
|
||||
if err == nil || !ZkErrorEqual(err, zk.ZNODEEXISTS) {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if newValue == oldValue {
|
||||
return nil // Nothing to do.
|
||||
}
|
||||
_, err = conn.Set(path, newValue, oldStat.Version())
|
||||
if err == nil || !ZkErrorEqual(err, zk.ZBADVERSION) && !ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func (conn *zconn) SetACL(zkPath string, aclv []zk.ACL, version int32) (zk.Stat, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (conn *zconn) getNode(zkPath string, op string) (node *stat, parent *stat, rest []string, err error) {
|
||||
// FIXME(szopa): Make sure the path starts with /.
|
||||
parts := strings.Split(zkPath, "/")
|
||||
if parts[0] != "" {
|
||||
//todo: fix this, error bad arguments
|
||||
return nil, nil, nil, zkError(zk.ErrUnknown, op, zkPath)
|
||||
}
|
||||
elements := parts[1:]
|
||||
parent = nil
|
||||
current := conn.root
|
||||
for i, el := range elements {
|
||||
candidateParent := current
|
||||
candidate, ok := current.children[el]
|
||||
if !ok {
|
||||
return current, parent, elements[i:], nil
|
||||
}
|
||||
current, parent = candidate, candidateParent
|
||||
}
|
||||
return current, parent, []string{}, nil
|
||||
}
|
||||
|
||||
type ZkError struct {
|
||||
Code error
|
||||
Op string
|
||||
Path string
|
||||
}
|
||||
|
||||
func (ze *ZkError) Error() string {
|
||||
return ze.Code.Error()
|
||||
}
|
||||
|
||||
// zkError creates an appropriate error return from
|
||||
// a ZooKeeper status
|
||||
func zkError(code error, op, path string) error {
|
||||
return &ZkError{
|
||||
Op: op,
|
||||
Code: code,
|
||||
Path: path,
|
||||
}
|
||||
}
|
||||
|
||||
type stat struct {
|
||||
name string
|
||||
content string
|
||||
children map[string]*stat
|
||||
acl []zk.ACL
|
||||
mtime time.Time
|
||||
ctime time.Time
|
||||
czxid int64
|
||||
mzxid int64
|
||||
pzxid int64
|
||||
version int
|
||||
cversion int
|
||||
aversion int
|
||||
|
||||
sequence int
|
||||
|
||||
existWatches []chan zk.Event
|
||||
changeWatches []chan zk.Event
|
||||
childrenWatches []chan zk.Event
|
||||
}
|
||||
|
||||
func (st stat) closeAllWatches() {
|
||||
for _, c := range st.existWatches {
|
||||
close(c)
|
||||
}
|
||||
for _, c := range st.changeWatches {
|
||||
close(c)
|
||||
}
|
||||
for _, c := range st.childrenWatches {
|
||||
close(c)
|
||||
}
|
||||
for _, child := range st.children {
|
||||
child.closeAllWatches()
|
||||
}
|
||||
}
|
||||
|
||||
func (st stat) Czxid() int64 {
|
||||
return st.czxid
|
||||
}
|
||||
func (st stat) Mzxid() int64 {
|
||||
return st.mzxid
|
||||
}
|
||||
func (st stat) CTime() time.Time {
|
||||
return st.ctime
|
||||
}
|
||||
func (st stat) MTime() time.Time {
|
||||
return st.mtime
|
||||
}
|
||||
func (st stat) Version() int {
|
||||
return st.version
|
||||
}
|
||||
func (st stat) CVersion() int {
|
||||
return st.cversion
|
||||
}
|
||||
func (st stat) AVersion() int {
|
||||
return st.aversion
|
||||
}
|
||||
func (st stat) EphemeralOwner() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (st stat) DataLength() int {
|
||||
return len(st.content)
|
||||
}
|
||||
|
||||
func (st stat) NumChildren() int {
|
||||
return len(st.children)
|
||||
}
|
||||
|
||||
func (st stat) Pzxid() int64 {
|
||||
return st.pzxid
|
||||
}
|
||||
|
||||
func (st *stat) nextSequence() string {
|
||||
st.sequence++
|
||||
return fmt.Sprintf("%010d", st.sequence)
|
||||
}
|
||||
|
||||
func (st stat) fprintRecursive(level int, buf *bytes.Buffer) {
|
||||
start := strings.Repeat(" ", level)
|
||||
fmt.Fprintf(buf, "%v-%v:\n", start, st.name)
|
||||
if st.content != "" {
|
||||
fmt.Fprintf(buf, "%v content: %q\n\n", start, st.content)
|
||||
}
|
||||
if len(st.children) > 0 {
|
||||
for _, child := range st.children {
|
||||
child.fprintRecursive(level+1, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *zconn) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
conn.root.fprintRecursive(0, b)
|
||||
return b.String()
|
||||
}
|
899
vendor/github.com/ngaut/zkhelper/zk.go
generated
vendored
Normal file
899
vendor/github.com/ngaut/zkhelper/zk.go
generated
vendored
Normal file
|
@ -0,0 +1,899 @@
|
|||
// zk helper functions
|
||||
// modified from Vitess project
|
||||
|
||||
package zkhelper
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ngaut/go-zookeeper/zk"
|
||||
"github.com/ngaut/log"
|
||||
)
|
||||
|
||||
var (
|
||||
// This error is returned by functions that wait for a result
|
||||
// when they are interrupted.
|
||||
ErrInterrupted = errors.New("zkutil: obtaining lock was interrupted")
|
||||
|
||||
// This error is returned by functions that wait for a result
|
||||
// when the timeout value is reached.
|
||||
ErrTimeout = errors.New("zkutil: obtaining lock timed out")
|
||||
)
|
||||
|
||||
const (
|
||||
// PERM_DIRECTORY are default permissions for a node.
|
||||
PERM_DIRECTORY = zk.PermAdmin | zk.PermCreate | zk.PermDelete | zk.PermRead | zk.PermWrite
|
||||
// PERM_FILE allows a zk node to emulate file behavior by disallowing child nodes.
|
||||
PERM_FILE = zk.PermAdmin | zk.PermRead | zk.PermWrite
|
||||
MagicPrefix = "zk"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
type MyZkConn struct {
|
||||
*zk.Conn
|
||||
}
|
||||
|
||||
func (conn *MyZkConn) Seq2Str(seq int64) string {
|
||||
return fmt.Sprintf("%0.10d", seq)
|
||||
}
|
||||
|
||||
func ConnectToZk(zkAddr string) (Conn, error) {
|
||||
zkConn, _, err := zk.Connect(strings.Split(zkAddr, ","), 3*time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MyZkConn{Conn: zkConn}, nil
|
||||
}
|
||||
|
||||
func ConnectToZkWithTimeout(zkAddr string, recvTime time.Duration) (Conn, error) {
|
||||
zkConn, _, err := zk.Connect(strings.Split(zkAddr, ","), recvTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MyZkConn{Conn: zkConn}, nil
|
||||
}
|
||||
|
||||
func DefaultACLs() []zk.ACL {
|
||||
return zk.WorldACL(zk.PermAll)
|
||||
}
|
||||
|
||||
func DefaultDirACLs() []zk.ACL {
|
||||
return zk.WorldACL(PERM_DIRECTORY)
|
||||
}
|
||||
|
||||
func DefaultFileACLs() []zk.ACL {
|
||||
return zk.WorldACL(PERM_FILE)
|
||||
}
|
||||
|
||||
// IsDirectory returns if this node should be treated as a directory.
|
||||
func IsDirectory(aclv []zk.ACL) bool {
|
||||
for _, acl := range aclv {
|
||||
if acl.Perms != PERM_DIRECTORY {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ZkErrorEqual(a, b error) bool {
|
||||
if a != nil && b != nil {
|
||||
return a.Error() == b.Error()
|
||||
}
|
||||
|
||||
return a == b
|
||||
}
|
||||
|
||||
// Create a path and any pieces required, think mkdir -p.
|
||||
// Intermediate znodes are always created empty.
|
||||
func CreateRecursive(zconn Conn, zkPath, value string, flags int, aclv []zk.ACL) (pathCreated string, err error) {
|
||||
parts := strings.Split(zkPath, "/")
|
||||
if parts[1] != MagicPrefix {
|
||||
return "", fmt.Errorf("zkutil: non /%v path: %v", MagicPrefix, zkPath)
|
||||
}
|
||||
|
||||
pathCreated, err = zconn.Create(zkPath, []byte(value), int32(flags), aclv)
|
||||
if ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
// Make sure that nodes are either "file" or "directory" to mirror file system
|
||||
// semantics.
|
||||
dirAclv := make([]zk.ACL, len(aclv))
|
||||
for i, acl := range aclv {
|
||||
dirAclv[i] = acl
|
||||
dirAclv[i].Perms = PERM_DIRECTORY
|
||||
}
|
||||
_, err = CreateRecursive(zconn, path.Dir(zkPath), "", flags, dirAclv)
|
||||
if err != nil && !ZkErrorEqual(err, zk.ErrNodeExists) {
|
||||
return "", err
|
||||
}
|
||||
pathCreated, err = zconn.Create(zkPath, []byte(value), int32(flags), aclv)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CreateOrUpdate(zconn Conn, zkPath, value string, flags int, aclv []zk.ACL, recursive bool) (pathCreated string, err error) {
|
||||
if recursive {
|
||||
pathCreated, err = CreateRecursive(zconn, zkPath, value, 0, aclv)
|
||||
} else {
|
||||
pathCreated, err = zconn.Create(zkPath, []byte(value), 0, aclv)
|
||||
}
|
||||
if err != nil && ZkErrorEqual(err, zk.ErrNodeExists) {
|
||||
pathCreated = ""
|
||||
_, err = zconn.Set(zkPath, []byte(value), -1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type pathItem struct {
|
||||
path string
|
||||
err error
|
||||
}
|
||||
|
||||
func ChildrenRecursive(zconn Conn, zkPath string) ([]string, error) {
|
||||
var err error
|
||||
mutex := sync.Mutex{}
|
||||
wg := sync.WaitGroup{}
|
||||
pathList := make([]string, 0, 32)
|
||||
children, _, err := zconn.Children(zkPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, child := range children {
|
||||
wg.Add(1)
|
||||
go func(child string) {
|
||||
childPath := path.Join(zkPath, child)
|
||||
rChildren, zkErr := ChildrenRecursive(zconn, childPath)
|
||||
if zkErr != nil {
|
||||
// If other processes are deleting nodes, we need to ignore
|
||||
// the missing nodes.
|
||||
if !ZkErrorEqual(zkErr, zk.ErrNoNode) {
|
||||
mutex.Lock()
|
||||
err = zkErr
|
||||
mutex.Unlock()
|
||||
}
|
||||
} else {
|
||||
mutex.Lock()
|
||||
pathList = append(pathList, child)
|
||||
for _, rChild := range rChildren {
|
||||
pathList = append(pathList, path.Join(child, rChild))
|
||||
}
|
||||
mutex.Unlock()
|
||||
}
|
||||
wg.Done()
|
||||
}(child)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pathList, nil
|
||||
}
|
||||
|
||||
func HasWildcard(path string) bool {
|
||||
for i := 0; i < len(path); i++ {
|
||||
switch path[i] {
|
||||
case '\\':
|
||||
if i+1 >= len(path) {
|
||||
return true
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
case '*', '?', '[':
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func resolveRecursive(zconn Conn, parts []string, toplevel bool) ([]string, error) {
|
||||
for i, part := range parts {
|
||||
if HasWildcard(part) {
|
||||
var children []string
|
||||
zkParentPath := strings.Join(parts[:i], "/")
|
||||
var err error
|
||||
children, _, err = zconn.Children(zkParentPath)
|
||||
if err != nil {
|
||||
// we asked for something like
|
||||
// /zk/cell/aaa/* and
|
||||
// /zk/cell/aaa doesn't exist
|
||||
// -> return empty list, no error
|
||||
// (note we check both a regular zk
|
||||
// error and the error the test
|
||||
// produces)
|
||||
if ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
return nil, nil
|
||||
}
|
||||
// otherwise we return the error
|
||||
return nil, err
|
||||
}
|
||||
sort.Strings(children)
|
||||
|
||||
results := make([][]string, len(children))
|
||||
wg := &sync.WaitGroup{}
|
||||
mu := &sync.Mutex{}
|
||||
var firstError error
|
||||
|
||||
for j, child := range children {
|
||||
matched, err := path.Match(part, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if matched {
|
||||
// we have a match!
|
||||
wg.Add(1)
|
||||
newParts := make([]string, len(parts))
|
||||
copy(newParts, parts)
|
||||
newParts[i] = child
|
||||
go func(j int) {
|
||||
defer wg.Done()
|
||||
subResult, err := resolveRecursive(zconn, newParts, false)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
if firstError != nil {
|
||||
log.Infof("Multiple error: %v", err)
|
||||
} else {
|
||||
firstError = err
|
||||
}
|
||||
mu.Unlock()
|
||||
} else {
|
||||
results[j] = subResult
|
||||
}
|
||||
}(j)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if firstError != nil {
|
||||
return nil, firstError
|
||||
}
|
||||
|
||||
result := make([]string, 0, 32)
|
||||
for j := 0; j < len(children); j++ {
|
||||
subResult := results[j]
|
||||
if subResult != nil {
|
||||
result = append(result, subResult...)
|
||||
}
|
||||
}
|
||||
|
||||
// we found a part that is a wildcard, we
|
||||
// added the children already, we're done
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// no part contains a wildcard, add the path if it exists, and done
|
||||
path := strings.Join(parts, "/")
|
||||
if toplevel {
|
||||
// for whatever the user typed at the toplevel, we don't
|
||||
// check it exists or not, we just return it
|
||||
return []string{path}, nil
|
||||
}
|
||||
|
||||
// this is an expanded path, we need to check if it exists
|
||||
_, stat, err := zconn.Exists(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stat != nil {
|
||||
return []string{path}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// resolve paths like:
|
||||
// /zk/nyc/vt/tablets/*/action
|
||||
// /zk/global/vt/keyspaces/*/shards/*/action
|
||||
// /zk/*/vt/tablets/*/action
|
||||
// into real existing paths
|
||||
//
|
||||
// If you send paths that don't contain any wildcard and
|
||||
// don't exist, this function will return an empty array.
|
||||
func ResolveWildcards(zconn Conn, zkPaths []string) ([]string, error) {
|
||||
// check all the paths start with /zk/ before doing anything
|
||||
// time consuming
|
||||
// relax this in case we are not talking to a metaconn and
|
||||
// just want to talk to a specified instance.
|
||||
// for _, zkPath := range zkPaths {
|
||||
// if _, err := ZkCellFromZkPath(zkPath); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// }
|
||||
|
||||
results := make([][]string, len(zkPaths))
|
||||
wg := &sync.WaitGroup{}
|
||||
mu := &sync.Mutex{}
|
||||
var firstError error
|
||||
|
||||
for i, zkPath := range zkPaths {
|
||||
wg.Add(1)
|
||||
parts := strings.Split(zkPath, "/")
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
subResult, err := resolveRecursive(zconn, parts, true)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
if firstError != nil {
|
||||
log.Infof("Multiple error: %v", err)
|
||||
} else {
|
||||
firstError = err
|
||||
}
|
||||
mu.Unlock()
|
||||
} else {
|
||||
results[i] = subResult
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if firstError != nil {
|
||||
return nil, firstError
|
||||
}
|
||||
|
||||
result := make([]string, 0, 32)
|
||||
for i := 0; i < len(zkPaths); i++ {
|
||||
subResult := results[i]
|
||||
if subResult != nil {
|
||||
result = append(result, subResult...)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func DeleteRecursive(zconn Conn, zkPath string, version int) error {
|
||||
// version: -1 delete any version of the node at path - only applies to the top node
|
||||
err := zconn.Delete(zkPath, int32(version))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !ZkErrorEqual(err, zk.ErrNotEmpty) {
|
||||
return err
|
||||
}
|
||||
// Remove the ability for other nodes to get created while we are trying to delete.
|
||||
// Otherwise, you can enter a race condition, or get starved out from deleting.
|
||||
_, err = zconn.SetACL(zkPath, zk.WorldACL(zk.PermAdmin|zk.PermDelete|zk.PermRead), int32(version))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
children, _, err := zconn.Children(zkPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, child := range children {
|
||||
err := DeleteRecursive(zconn, path.Join(zkPath, child), -1)
|
||||
if err != nil && !ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
return fmt.Errorf("zkutil: recursive delete failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = zconn.Delete(zkPath, int32(version))
|
||||
if err != nil && !ZkErrorEqual(err, zk.ErrNotEmpty) {
|
||||
err = fmt.Errorf("zkutil: nodes getting recreated underneath delete (app race condition): %v", zkPath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// The lexically lowest node is the lock holder - verify that this
|
||||
// path holds the lock. Call this queue-lock because the semantics are
|
||||
// a hybrid. Normal zk locks make assumptions about sequential
|
||||
// numbering that don't hold when the data in a lock is modified.
|
||||
// if the provided 'interrupted' chan is closed, we'll just stop waiting
|
||||
// and return an interruption error
|
||||
func ObtainQueueLock(zconn Conn, zkPath string, wait time.Duration, interrupted chan struct{}) error {
|
||||
queueNode := path.Dir(zkPath)
|
||||
lockNode := path.Base(zkPath)
|
||||
|
||||
timer := time.NewTimer(wait)
|
||||
trylock:
|
||||
children, _, err := zconn.Children(queueNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("zkutil: trylock failed %v", err)
|
||||
}
|
||||
sort.Strings(children)
|
||||
if len(children) > 0 {
|
||||
if children[0] == lockNode {
|
||||
return nil
|
||||
}
|
||||
if wait > 0 {
|
||||
prevLock := ""
|
||||
for i := 1; i < len(children); i++ {
|
||||
if children[i] == lockNode {
|
||||
prevLock = children[i-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
if prevLock == "" {
|
||||
return fmt.Errorf("zkutil: no previous queue node found: %v", zkPath)
|
||||
}
|
||||
|
||||
zkPrevLock := path.Join(queueNode, prevLock)
|
||||
_, stat, watch, err := zconn.ExistsW(zkPrevLock)
|
||||
if err != nil {
|
||||
return fmt.Errorf("zkutil: unable to watch queued node %v %v", zkPrevLock, err)
|
||||
}
|
||||
if stat == nil {
|
||||
goto trylock
|
||||
}
|
||||
select {
|
||||
case <-timer.C:
|
||||
break
|
||||
case <-interrupted:
|
||||
return ErrInterrupted
|
||||
case <-watch:
|
||||
// The precise event doesn't matter - try to read again regardless.
|
||||
goto trylock
|
||||
}
|
||||
}
|
||||
return ErrTimeout
|
||||
}
|
||||
return fmt.Errorf("zkutil: empty queue node: %v", queueNode)
|
||||
}
|
||||
|
||||
func ZkEventOk(e zk.Event) bool {
|
||||
return e.State == zk.StateConnected
|
||||
}
|
||||
|
||||
func NodeExists(zconn Conn, zkPath string) (bool, error) {
|
||||
b, _, err := zconn.Exists(zkPath)
|
||||
return b, err
|
||||
}
|
||||
|
||||
// Close the release channel when you want to clean up nicely.
|
||||
func CreatePidNode(zconn Conn, zkPath string, contents string, done chan struct{}) error {
|
||||
// On the first try, assume the cluster is up and running, that will
|
||||
// help hunt down any config issues present at startup
|
||||
if _, err := zconn.Create(zkPath, []byte(contents), zk.FlagEphemeral, zk.WorldACL(PERM_FILE)); err != nil {
|
||||
if ZkErrorEqual(err, zk.ErrNodeExists) {
|
||||
err = zconn.Delete(zkPath, -1)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("zkutil: failed deleting pid node: %v: %v", zkPath, err)
|
||||
}
|
||||
_, err = zconn.Create(zkPath, []byte(contents), zk.FlagEphemeral, zk.WorldACL(PERM_FILE))
|
||||
if err != nil {
|
||||
return fmt.Errorf("zkutil: failed creating pid node: %v: %v", zkPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
_, _, watch, err := zconn.GetW(zkPath)
|
||||
if err != nil {
|
||||
if ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
_, err = zconn.Create(zkPath, []byte(contents), zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
|
||||
if err != nil {
|
||||
log.Warningf("failed recreating pid node: %v: %v", zkPath, err)
|
||||
} else {
|
||||
log.Infof("recreated pid node: %v", zkPath)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.Warningf("failed reading pid node: %v", err)
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case event := <-watch:
|
||||
if ZkEventOk(event) && event.Type == zk.EventNodeDeleted {
|
||||
// Most likely another process has started up. However,
|
||||
// there is a chance that an ephemeral node is deleted by
|
||||
// the session expiring, yet that same session gets a watch
|
||||
// notification. This seems like buggy behavior, but rather
|
||||
// than race too hard on the node, just wait a bit and see
|
||||
// if the situation resolves itself.
|
||||
log.Warningf("pid deleted: %v", zkPath)
|
||||
} else {
|
||||
log.Infof("pid node event: %v", event)
|
||||
}
|
||||
// break here and wait for a bit before attempting
|
||||
case <-done:
|
||||
log.Infof("pid watcher stopped on done: %v", zkPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
// No one likes a thundering herd, least of all zk.
|
||||
case <-time.After(5*time.Second + time.Duration(rand.Int63n(55e9))):
|
||||
case <-done:
|
||||
log.Infof("pid watcher stopped on done: %v", zkPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZLocker is an interface for a lock that can fail.
|
||||
type ZLocker interface {
|
||||
Lock(desc string) error
|
||||
LockWithTimeout(wait time.Duration, desc string) error
|
||||
Unlock() error
|
||||
Interrupt()
|
||||
}
|
||||
|
||||
// Experiment with a little bit of abstraction.
|
||||
// FIMXE(msolo) This object may need a mutex to ensure it can be shared
|
||||
// across goroutines.
|
||||
type zMutex struct {
|
||||
mu sync.Mutex
|
||||
zconn Conn
|
||||
path string // Path under which we try to create lock nodes.
|
||||
contents string
|
||||
interrupted chan struct{}
|
||||
name string // The name of the specific lock node we created.
|
||||
ephemeral bool
|
||||
}
|
||||
|
||||
// CreateMutex initializes an unaquired mutex. A mutex is released only
|
||||
// by Unlock. You can clean up a mutex with delete, but you should be
|
||||
// careful doing so.
|
||||
func CreateMutex(zconn Conn, zkPath string) ZLocker {
|
||||
zm, err := CreateMutexWithContents(zconn, zkPath, map[string]interface{}{})
|
||||
if err != nil {
|
||||
panic(err) // should never happen
|
||||
}
|
||||
return zm
|
||||
}
|
||||
|
||||
// CreateMutex initializes an unaquired mutex with special content for this mutex.
|
||||
// A mutex is released only by Unlock. You can clean up a mutex with delete, but you should be
|
||||
// careful doing so.
|
||||
func CreateMutexWithContents(zconn Conn, zkPath string, contents map[string]interface{}) (ZLocker, error) {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pid := os.Getpid()
|
||||
contents["hostname"] = hostname
|
||||
contents["pid"] = pid
|
||||
|
||||
data, err := json.Marshal(contents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &zMutex{zconn: zconn, path: zkPath, contents: string(data), interrupted: make(chan struct{})}, nil
|
||||
}
|
||||
|
||||
// Interrupt releases a lock that's held.
|
||||
func (zm *zMutex) Interrupt() {
|
||||
select {
|
||||
case zm.interrupted <- struct{}{}:
|
||||
default:
|
||||
log.Warningf("zmutex interrupt blocked")
|
||||
}
|
||||
}
|
||||
|
||||
// Lock returns nil when the lock is acquired.
|
||||
func (zm *zMutex) Lock(desc string) error {
|
||||
return zm.LockWithTimeout(365*24*time.Hour, desc)
|
||||
}
|
||||
|
||||
// LockWithTimeout returns nil when the lock is acquired. A lock is
|
||||
// held if the file exists and you are the creator. Setting the wait
|
||||
// to zero makes this a nonblocking lock check.
|
||||
//
|
||||
// FIXME(msolo) Disallow non-super users from removing the lock?
|
||||
func (zm *zMutex) LockWithTimeout(wait time.Duration, desc string) (err error) {
|
||||
timer := time.NewTimer(wait)
|
||||
defer func() {
|
||||
if panicErr := recover(); panicErr != nil || err != nil {
|
||||
zm.deleteLock()
|
||||
}
|
||||
}()
|
||||
// Ensure the rendezvous node is here.
|
||||
// FIXME(msolo) Assuming locks are contended, it will be cheaper to assume this just
|
||||
// exists.
|
||||
_, err = CreateRecursive(zm.zconn, zm.path, "", 0, zk.WorldACL(PERM_DIRECTORY))
|
||||
if err != nil && !ZkErrorEqual(err, zk.ErrNodeExists) {
|
||||
return err
|
||||
}
|
||||
|
||||
lockPrefix := path.Join(zm.path, "lock-")
|
||||
zflags := zk.FlagSequence
|
||||
if zm.ephemeral {
|
||||
zflags = zflags | zk.FlagEphemeral
|
||||
}
|
||||
|
||||
// update node content
|
||||
var lockContent map[string]interface{}
|
||||
err = json.Unmarshal([]byte(zm.contents), &lockContent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lockContent["desc"] = desc
|
||||
newContent, err := json.Marshal(lockContent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createlock:
|
||||
lockCreated, err := zm.zconn.Create(lockPrefix, newContent, int32(zflags), zk.WorldACL(PERM_FILE))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := path.Base(lockCreated)
|
||||
zm.mu.Lock()
|
||||
zm.name = name
|
||||
zm.mu.Unlock()
|
||||
|
||||
trylock:
|
||||
children, _, err := zm.zconn.Children(zm.path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("zkutil: trylock failed %v", err)
|
||||
}
|
||||
sort.Strings(children)
|
||||
if len(children) == 0 {
|
||||
return fmt.Errorf("zkutil: empty lock: %v", zm.path)
|
||||
}
|
||||
|
||||
if children[0] == name {
|
||||
// We are the lock owner.
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is the degenerate case of a nonblocking lock check. It's not optimal, but
|
||||
// also probably not worth optimizing.
|
||||
if wait == 0 {
|
||||
return ErrTimeout
|
||||
}
|
||||
prevLock := ""
|
||||
for i := 1; i < len(children); i++ {
|
||||
if children[i] == name {
|
||||
prevLock = children[i-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
if prevLock == "" {
|
||||
// This is an interesting case. The node disappeared
|
||||
// underneath us, probably due to a session loss. We can
|
||||
// recreate the lock node (with a new sequence number) and
|
||||
// keep trying.
|
||||
log.Warningf("zkutil: no lock node found: %v/%v", zm.path, zm.name)
|
||||
goto createlock
|
||||
}
|
||||
|
||||
zkPrevLock := path.Join(zm.path, prevLock)
|
||||
exist, stat, watch, err := zm.zconn.ExistsW(zkPrevLock)
|
||||
if err != nil {
|
||||
// FIXME(msolo) Should this be a retry?
|
||||
return fmt.Errorf("zkutil: unable to watch previous lock node %v %v", zkPrevLock, err)
|
||||
}
|
||||
if stat == nil || !exist {
|
||||
goto trylock
|
||||
}
|
||||
select {
|
||||
case <-timer.C:
|
||||
return ErrTimeout
|
||||
case <-zm.interrupted:
|
||||
return ErrInterrupted
|
||||
case event := <-watch:
|
||||
log.Infof("zkutil: lock event: %v", event)
|
||||
// The precise event doesn't matter - try to read again regardless.
|
||||
goto trylock
|
||||
}
|
||||
panic("unexpected")
|
||||
}
|
||||
|
||||
// Unlock returns nil if the lock was successfully
|
||||
// released. Otherwise, it is most likely a zk related error.
|
||||
func (zm *zMutex) Unlock() error {
|
||||
return zm.deleteLock()
|
||||
}
|
||||
|
||||
func (zm *zMutex) deleteLock() error {
|
||||
zm.mu.Lock()
|
||||
zpath := path.Join(zm.path, zm.name)
|
||||
zm.mu.Unlock()
|
||||
|
||||
err := zm.zconn.Delete(zpath, -1)
|
||||
if err != nil && !ZkErrorEqual(err, zk.ErrNoNode) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZElector stores basic state for running an election.
|
||||
type ZElector struct {
|
||||
*zMutex
|
||||
path string
|
||||
leader string
|
||||
}
|
||||
|
||||
func (ze *ZElector) isLeader() bool {
|
||||
return ze.leader == ze.name
|
||||
}
|
||||
|
||||
type electionEvent struct {
|
||||
Event int
|
||||
Err error
|
||||
}
|
||||
|
||||
type backoffDelay struct {
|
||||
min time.Duration
|
||||
max time.Duration
|
||||
delay time.Duration
|
||||
}
|
||||
|
||||
func newBackoffDelay(min, max time.Duration) *backoffDelay {
|
||||
return &backoffDelay{min, max, min}
|
||||
}
|
||||
|
||||
func (bd *backoffDelay) NextDelay() time.Duration {
|
||||
delay := bd.delay
|
||||
bd.delay = 2 * bd.delay
|
||||
if bd.delay > bd.max {
|
||||
bd.delay = bd.max
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
func (bd *backoffDelay) Reset() {
|
||||
bd.delay = bd.min
|
||||
}
|
||||
|
||||
// ElectorTask is the interface for a task that runs essentially
|
||||
// forever or until something bad happens. If a task must be stopped,
|
||||
// it should be handled promptly - no second notification will be
|
||||
// sent.
|
||||
type ElectorTask interface {
|
||||
Run() error
|
||||
Stop()
|
||||
// Return true if interrupted, false if it died of natural causes.
|
||||
// An interrupted task indicates that the election should stop.
|
||||
Interrupted() bool
|
||||
}
|
||||
|
||||
// CreateElection returns an initialized elector. An election is
|
||||
// really a cycle of events. You are flip-flopping between leader and
|
||||
// candidate. It's better to think of this as a stream of events that
|
||||
// one needs to react to.
|
||||
func CreateElection(zconn Conn, zkPath string) ZElector {
|
||||
zm, err := CreateElectionWithContents(zconn, zkPath, map[string]interface{}{})
|
||||
if err != nil {
|
||||
// should never happend
|
||||
panic(err)
|
||||
}
|
||||
return zm
|
||||
}
|
||||
|
||||
// CreateElection returns an initialized elector with special contents. An election is
|
||||
// really a cycle of events. You are flip-flopping between leader and
|
||||
// candidate. It's better to think of this as a stream of events that
|
||||
// one needs to react to.
|
||||
func CreateElectionWithContents(zconn Conn, zkPath string, contents map[string]interface{}) (ZElector, error) {
|
||||
l, err := CreateMutexWithContents(zconn, path.Join(zkPath, "candidates"), contents)
|
||||
if err != nil {
|
||||
return ZElector{}, err
|
||||
}
|
||||
zm := l.(*zMutex)
|
||||
zm.ephemeral = true
|
||||
return ZElector{zMutex: zm, path: zkPath}, nil
|
||||
}
|
||||
|
||||
// RunTask returns nil when the underlyingtask ends or the error it
|
||||
// generated.
|
||||
func (ze *ZElector) RunTask(task ElectorTask) error {
|
||||
delay := newBackoffDelay(100*time.Millisecond, 1*time.Minute)
|
||||
leaderPath := path.Join(ze.path, "leader")
|
||||
for {
|
||||
_, err := CreateRecursive(ze.zconn, leaderPath, "", 0, zk.WorldACL(PERM_FILE))
|
||||
if err == nil || ZkErrorEqual(err, zk.ErrNodeExists) {
|
||||
break
|
||||
}
|
||||
log.Warningf("election leader create failed: %v", err)
|
||||
time.Sleep(delay.NextDelay())
|
||||
}
|
||||
|
||||
for {
|
||||
err := ze.Lock("RunTask")
|
||||
if err != nil {
|
||||
log.Warningf("election lock failed: %v", err)
|
||||
if err == ErrInterrupted {
|
||||
return ErrInterrupted
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Confirm your win and deliver acceptance speech. This notifies
|
||||
// listeners who will have been watching the leader node for
|
||||
// changes.
|
||||
_, err = ze.zconn.Set(leaderPath, []byte(ze.contents), -1)
|
||||
if err != nil {
|
||||
log.Warningf("election promotion failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("election promote leader %v", leaderPath)
|
||||
taskErrChan := make(chan error)
|
||||
go func() {
|
||||
taskErrChan <- task.Run()
|
||||
}()
|
||||
|
||||
watchLeader:
|
||||
// Watch the leader so we can get notified if something goes wrong.
|
||||
data, _, watch, err := ze.zconn.GetW(leaderPath)
|
||||
if err != nil {
|
||||
log.Warningf("election unable to watch leader node %v %v", leaderPath, err)
|
||||
// FIXME(msolo) Add delay
|
||||
goto watchLeader
|
||||
}
|
||||
|
||||
if string(data) != ze.contents {
|
||||
log.Warningf("election unable to promote leader")
|
||||
task.Stop()
|
||||
// We won the election, but we didn't become the leader. How is that possible?
|
||||
// (see Bush v. Gore for some inspiration)
|
||||
// It means:
|
||||
// 1. Someone isn't playing by the election rules (a bad actor).
|
||||
// Hard to detect - let's assume we don't have this problem. :)
|
||||
// 2. We lost our connection somehow and the ephemeral lock was cleared,
|
||||
// allowing someone else to win the election.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is where we start our target process and watch for its failure.
|
||||
waitForEvent:
|
||||
select {
|
||||
case <-ze.interrupted:
|
||||
log.Warning("election interrupted - stop child process")
|
||||
task.Stop()
|
||||
// Once the process dies from the signal, this will all tear down.
|
||||
goto waitForEvent
|
||||
case taskErr := <-taskErrChan:
|
||||
// If our code fails, unlock to trigger an election.
|
||||
log.Infof("election child process ended: %v", taskErr)
|
||||
ze.Unlock()
|
||||
if task.Interrupted() {
|
||||
log.Warningf("election child process interrupted - stepping down")
|
||||
return ErrInterrupted
|
||||
}
|
||||
continue
|
||||
case zevent := <-watch:
|
||||
// We had a zk connection hiccup. We have a few choices,
|
||||
// but it depends on the constraints and the events.
|
||||
//
|
||||
// If we get SESSION_EXPIRED our connection loss triggered an
|
||||
// election that we won't have won and the thus the lock was
|
||||
// automatically freed. We have no choice but to start over.
|
||||
if zevent.State == zk.StateExpired {
|
||||
log.Warningf("election leader watch expired")
|
||||
task.Stop()
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, we had an intermittent issue or something touched
|
||||
// the node. Either we lost our position or someone broke
|
||||
// protocol and touched the leader node. We just reconnect and
|
||||
// revalidate. In the meantime, assume we are still the leader
|
||||
// until we determine otherwise.
|
||||
//
|
||||
// On a reconnect we will be able to see the leader
|
||||
// information. If we still hold the position, great. If not, we
|
||||
// kill the associated process.
|
||||
//
|
||||
// On a leader node change, we need to perform the same
|
||||
// validation. It's possible an election completes without the
|
||||
// old leader realizing he is out of touch.
|
||||
log.Warningf("election leader watch event %v", zevent)
|
||||
goto watchLeader
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue