1
0
Fork 0
forked from forgejo/forgejo

chore: update github.com/couchbase/gomemcached and github.com/couchbase/go-couchbase (#9419)

This commit is contained in:
Antoine GIRARD 2019-12-19 03:03:26 +01:00 committed by techknowlogick
parent 8873a80276
commit 559fb6ccf0
12 changed files with 1555 additions and 200 deletions

View file

@ -0,0 +1,123 @@
package memcached
import (
"encoding/json"
"fmt"
)
// Collection based filter
type CollectionsFilter struct {
ManifestUid uint64
UseManifestUid bool
StreamId uint16
UseStreamId bool
// Use either ScopeId OR CollectionsList, not both
CollectionsList []uint32
ScopeId uint32
}
type nonStreamIdNonResumeScopeMeta struct {
ScopeId string `json:"scope"`
}
type nonStreamIdResumeScopeMeta struct {
ManifestId string `json:"uid"`
}
type nonStreamIdNonResumeCollectionsMeta struct {
CollectionsList []string `json:"collections"`
}
type nonStreamIdResumeCollectionsMeta struct {
ManifestId string `json:"uid"`
CollectionsList []string `json:"collections"`
}
type streamIdNonResumeCollectionsMeta struct {
CollectionsList []string `json:"collections"`
StreamId uint16 `json:"sid"`
}
type streamIdNonResumeScopeMeta struct {
ScopeId string `json:"scope"`
StreamId uint16 `json:"sid"`
}
func (c *CollectionsFilter) IsValid() error {
if c.UseManifestUid {
return fmt.Errorf("Not implemented yet")
}
if len(c.CollectionsList) > 0 && c.ScopeId > 0 {
return fmt.Errorf("Collection list is specified but scope ID is also specified")
}
return nil
}
func (c *CollectionsFilter) outputCollectionsFilterColList() (outputList []string) {
for _, collectionUint := range c.CollectionsList {
outputList = append(outputList, fmt.Sprintf("%x", collectionUint))
}
return
}
func (c *CollectionsFilter) outputScopeId() string {
return fmt.Sprintf("%x", c.ScopeId)
}
func (c *CollectionsFilter) ToStreamReqBody() ([]byte, error) {
if err := c.IsValid(); err != nil {
return nil, err
}
var output interface{}
switch c.UseStreamId {
case true:
switch c.UseManifestUid {
case true:
// TODO
return nil, fmt.Errorf("NotImplemented0")
case false:
switch len(c.CollectionsList) > 0 {
case true:
filter := &streamIdNonResumeCollectionsMeta{
StreamId: c.StreamId,
CollectionsList: c.outputCollectionsFilterColList(),
}
output = *filter
case false:
filter := &streamIdNonResumeScopeMeta{
StreamId: c.StreamId,
ScopeId: c.outputScopeId(),
}
output = *filter
}
}
case false:
switch c.UseManifestUid {
case true:
// TODO
return nil, fmt.Errorf("NotImplemented1")
case false:
switch len(c.CollectionsList) > 0 {
case true:
filter := &nonStreamIdNonResumeCollectionsMeta{
CollectionsList: c.outputCollectionsFilterColList(),
}
output = *filter
case false:
output = nonStreamIdNonResumeScopeMeta{ScopeId: c.outputScopeId()}
}
}
}
data, err := json.Marshal(output)
if err != nil {
return nil, err
} else {
return data, nil
}
}

View file

@ -28,10 +28,12 @@ type ClientIface interface {
CASNext(vb uint16, k string, exp int, state *CASState) bool
CAS(vb uint16, k string, f CasFunc, initexp int) (*gomemcached.MCResponse, error)
CollectionsGetCID(scope string, collection string) (*gomemcached.MCResponse, error)
CollectionEnabled() bool
Close() error
Decr(vb uint16, key string, amt, def uint64, exp int) (uint64, error)
Del(vb uint16, key string) (*gomemcached.MCResponse, error)
EnableMutationToken() (*gomemcached.MCResponse, error)
EnableFeatures(features Features) (*gomemcached.MCResponse, error)
Get(vb uint16, key string) (*gomemcached.MCResponse, error)
GetCollectionsManifest() (*gomemcached.MCResponse, error)
GetFromCollection(vb uint16, cid uint32, key string) (*gomemcached.MCResponse, error)
@ -76,9 +78,12 @@ var Healthy uint32 = 1
type Features []Feature
type Feature uint16
const FeatureMutationToken = Feature(0x04)
const FeatureTcpNoDelay = Feature(0x03)
const FeatureMutationToken = Feature(0x04) // XATTR bit in data type field with dcp mutations
const FeatureXattr = Feature(0x06)
const FeatureXerror = Feature(0x07)
const FeatureCollections = Feature(0x12)
const FeatureSnappyCompression = Feature(0x0a)
const FeatureDataType = Feature(0x0b)
type memcachedConnection interface {
@ -96,6 +101,9 @@ type Client struct {
opaque uint32
hdrBuf []byte
featureMtx sync.RWMutex
sentHeloFeatures Features
}
var (
@ -285,6 +293,10 @@ func (c *Client) EnableFeatures(features Features) (*gomemcached.MCResponse, err
binary.BigEndian.PutUint16(payload[len(payload)-2:], uint16(feature))
}
c.featureMtx.Lock()
c.sentHeloFeatures = features
c.featureMtx.Unlock()
return c.Send(&gomemcached.MCRequest{
Opcode: gomemcached.HELLO,
Key: []byte("GoMemcached"),
@ -363,6 +375,18 @@ func (c *Client) CollectionsGetCID(scope string, collection string) (*gomemcache
return res, nil
}
func (c *Client) CollectionEnabled() bool {
c.featureMtx.RLock()
defer c.featureMtx.RUnlock()
for _, feature := range c.sentHeloFeatures {
if feature == FeatureCollections {
return true
}
}
return false
}
// Get the value for a key, and update expiry
func (c *Client) GetAndTouch(vb uint16, key string, exp int) (*gomemcached.MCResponse, error) {
extraBuf := make([]byte, 4)
@ -1138,3 +1162,7 @@ func IfResStatusError(response *gomemcached.MCResponse) bool {
response.Status != gomemcached.SUBDOC_PATH_NOT_FOUND &&
response.Status != gomemcached.SUBDOC_MULTI_PATH_FAILURE_DELETED)
}
func (c *Client) Conn() io.ReadWriteCloser {
return c.conn
}

View file

@ -0,0 +1,346 @@
package memcached
import (
"encoding/binary"
"fmt"
"github.com/couchbase/gomemcached"
"math"
)
type SystemEventType int
const InvalidSysEvent SystemEventType = -1
const (
CollectionCreate SystemEventType = 0
CollectionDrop SystemEventType = iota
CollectionFlush SystemEventType = iota // KV did not implement
ScopeCreate SystemEventType = iota
ScopeDrop SystemEventType = iota
CollectionChanged SystemEventType = iota
)
type ScopeCreateEvent interface {
GetSystemEventName() (string, error)
GetScopeId() (uint32, error)
GetManifestId() (uint64, error)
}
type CollectionCreateEvent interface {
GetSystemEventName() (string, error)
GetScopeId() (uint32, error)
GetCollectionId() (uint32, error)
GetManifestId() (uint64, error)
GetMaxTTL() (uint32, error)
}
type CollectionDropEvent interface {
GetScopeId() (uint32, error)
GetCollectionId() (uint32, error)
GetManifestId() (uint64, error)
}
type ScopeDropEvent interface {
GetScopeId() (uint32, error)
GetManifestId() (uint64, error)
}
type CollectionChangedEvent interface {
GetCollectionId() (uint32, error)
GetManifestId() (uint64, error)
GetMaxTTL() (uint32, error)
}
var ErrorInvalidOp error = fmt.Errorf("Invalid Operation")
var ErrorInvalidVersion error = fmt.Errorf("Invalid version for parsing")
var ErrorValueTooShort error = fmt.Errorf("Value length is too short")
var ErrorNoMaxTTL error = fmt.Errorf("This event has no max TTL")
// UprEvent memcached events for UPR streams.
type UprEvent struct {
Opcode gomemcached.CommandCode // Type of event
Status gomemcached.Status // Response status
VBucket uint16 // VBucket this event applies to
DataType uint8 // data type
Opaque uint16 // 16 MSB of opaque
VBuuid uint64 // This field is set by downstream
Flags uint32 // Item flags
Expiry uint32 // Item expiration time
Key, Value []byte // Item key/value
OldValue []byte // TODO: TBD: old document value
Cas uint64 // CAS value of the item
Seqno uint64 // sequence number of the mutation
RevSeqno uint64 // rev sequence number : deletions
LockTime uint32 // Lock time
MetadataSize uint16 // Metadata size
SnapstartSeq uint64 // start sequence number of this snapshot
SnapendSeq uint64 // End sequence number of the snapshot
SnapshotType uint32 // 0: disk 1: memory
FailoverLog *FailoverLog // Failover log containing vvuid and sequnce number
Error error // Error value in case of a failure
ExtMeta []byte // Extended Metadata
AckSize uint32 // The number of bytes that can be Acked to DCP
SystemEvent SystemEventType // Only valid if IsSystemEvent() is true
SysEventVersion uint8 // Based on the version, the way Extra bytes is parsed is different
ValueLen int // Cache it to avoid len() calls for performance
CollectionId uint64 // Valid if Collection is in use
}
// FailoverLog containing vvuid and sequnce number
type FailoverLog [][2]uint64
func makeUprEvent(rq gomemcached.MCRequest, stream *UprStream, bytesReceivedFromDCP int) *UprEvent {
event := &UprEvent{
Opcode: rq.Opcode,
VBucket: stream.Vbucket,
VBuuid: stream.Vbuuid,
Value: rq.Body,
Cas: rq.Cas,
ExtMeta: rq.ExtMeta,
DataType: rq.DataType,
ValueLen: len(rq.Body),
SystemEvent: InvalidSysEvent,
CollectionId: math.MaxUint64,
}
event.PopulateFieldsBasedOnStreamType(rq, stream.StreamType)
// set AckSize for events that need to be acked to DCP,
// i.e., events with CommandCodes that need to be buffered in DCP
if _, ok := gomemcached.BufferedCommandCodeMap[rq.Opcode]; ok {
event.AckSize = uint32(bytesReceivedFromDCP)
}
// 16 LSBits are used by client library to encode vbucket number.
// 16 MSBits are left for application to multiplex on opaque value.
event.Opaque = appOpaque(rq.Opaque)
if len(rq.Extras) >= uprMutationExtraLen &&
event.Opcode == gomemcached.UPR_MUTATION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.Flags = binary.BigEndian.Uint32(rq.Extras[16:20])
event.Expiry = binary.BigEndian.Uint32(rq.Extras[20:24])
event.LockTime = binary.BigEndian.Uint32(rq.Extras[24:28])
event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[28:30])
} else if len(rq.Extras) >= uprDeletetionWithDeletionTimeExtraLen &&
event.Opcode == gomemcached.UPR_DELETION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.Expiry = binary.BigEndian.Uint32(rq.Extras[16:20])
} else if len(rq.Extras) >= uprDeletetionExtraLen &&
event.Opcode == gomemcached.UPR_DELETION ||
event.Opcode == gomemcached.UPR_EXPIRATION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[16:18])
} else if len(rq.Extras) >= uprSnapshotExtraLen &&
event.Opcode == gomemcached.UPR_SNAPSHOT {
event.SnapstartSeq = binary.BigEndian.Uint64(rq.Extras[:8])
event.SnapendSeq = binary.BigEndian.Uint64(rq.Extras[8:16])
event.SnapshotType = binary.BigEndian.Uint32(rq.Extras[16:20])
} else if event.IsSystemEvent() {
event.PopulateEvent(rq.Extras)
}
return event
}
func (event *UprEvent) PopulateFieldsBasedOnStreamType(rq gomemcached.MCRequest, streamType DcpStreamType) {
switch streamType {
case CollectionsNonStreamId:
switch rq.Opcode {
// Only these will have CID encoded within the key
case gomemcached.UPR_MUTATION,
gomemcached.UPR_DELETION,
gomemcached.UPR_EXPIRATION:
uleb128 := Uleb128(rq.Key)
result, bytesShifted := uleb128.ToUint64(rq.Keylen)
event.CollectionId = result
event.Key = rq.Key[bytesShifted:]
default:
event.Key = rq.Key
}
case CollectionsStreamId:
// TODO - not implemented
fallthrough
case NonCollectionStream:
// Let default behavior be legacy stream type
fallthrough
default:
event.Key = rq.Key
}
}
func (event *UprEvent) String() string {
name := gomemcached.CommandNames[event.Opcode]
if name == "" {
name = fmt.Sprintf("#%d", event.Opcode)
}
return name
}
func (event *UprEvent) IsSnappyDataType() bool {
return event.Opcode == gomemcached.UPR_MUTATION && (event.DataType&SnappyDataType > 0)
}
func (event *UprEvent) IsCollectionType() bool {
return event.IsSystemEvent() || event.CollectionId <= math.MaxUint32
}
func (event *UprEvent) IsSystemEvent() bool {
return event.Opcode == gomemcached.DCP_SYSTEM_EVENT
}
func (event *UprEvent) PopulateEvent(extras []byte) {
if len(extras) < dcpSystemEventExtraLen {
// Wrong length, don't parse
return
}
event.Seqno = binary.BigEndian.Uint64(extras[:8])
event.SystemEvent = SystemEventType(binary.BigEndian.Uint32(extras[8:12]))
var versionTemp uint16 = binary.BigEndian.Uint16(extras[12:14])
event.SysEventVersion = uint8(versionTemp >> 8)
}
func (event *UprEvent) GetSystemEventName() (string, error) {
switch event.SystemEvent {
case CollectionCreate:
fallthrough
case ScopeCreate:
return string(event.Key), nil
default:
return "", ErrorInvalidOp
}
}
func (event *UprEvent) GetManifestId() (uint64, error) {
switch event.SystemEvent {
// Version 0 only checks
case CollectionChanged:
fallthrough
case ScopeDrop:
fallthrough
case ScopeCreate:
fallthrough
case CollectionDrop:
if event.SysEventVersion > 0 {
return 0, ErrorInvalidVersion
}
fallthrough
case CollectionCreate:
// CollectionCreate supports version 1
if event.SysEventVersion > 1 {
return 0, ErrorInvalidVersion
}
if event.ValueLen < 8 {
return 0, ErrorValueTooShort
}
return binary.BigEndian.Uint64(event.Value[0:8]), nil
default:
return 0, ErrorInvalidOp
}
}
func (event *UprEvent) GetCollectionId() (uint32, error) {
switch event.SystemEvent {
case CollectionDrop:
if event.SysEventVersion > 0 {
return 0, ErrorInvalidVersion
}
fallthrough
case CollectionCreate:
if event.SysEventVersion > 1 {
return 0, ErrorInvalidVersion
}
if event.ValueLen < 16 {
return 0, ErrorValueTooShort
}
return binary.BigEndian.Uint32(event.Value[12:16]), nil
case CollectionChanged:
if event.SysEventVersion > 0 {
return 0, ErrorInvalidVersion
}
if event.ValueLen < 12 {
return 0, ErrorValueTooShort
}
return binary.BigEndian.Uint32(event.Value[8:12]), nil
default:
return 0, ErrorInvalidOp
}
}
func (event *UprEvent) GetScopeId() (uint32, error) {
switch event.SystemEvent {
// version 0 checks
case ScopeCreate:
fallthrough
case ScopeDrop:
fallthrough
case CollectionDrop:
if event.SysEventVersion > 0 {
return 0, ErrorInvalidVersion
}
fallthrough
case CollectionCreate:
// CollectionCreate could be either 0 or 1
if event.SysEventVersion > 1 {
return 0, ErrorInvalidVersion
}
if event.ValueLen < 12 {
return 0, ErrorValueTooShort
}
return binary.BigEndian.Uint32(event.Value[8:12]), nil
default:
return 0, ErrorInvalidOp
}
}
func (event *UprEvent) GetMaxTTL() (uint32, error) {
switch event.SystemEvent {
case CollectionCreate:
if event.SysEventVersion < 1 {
return 0, ErrorNoMaxTTL
}
if event.ValueLen < 20 {
return 0, ErrorValueTooShort
}
return binary.BigEndian.Uint32(event.Value[16:20]), nil
case CollectionChanged:
if event.SysEventVersion > 0 {
return 0, ErrorInvalidVersion
}
if event.ValueLen < 16 {
return 0, ErrorValueTooShort
}
return binary.BigEndian.Uint32(event.Value[12:16]), nil
default:
return 0, ErrorInvalidOp
}
}
type Uleb128 []byte
func (u Uleb128) ToUint64(cachedLen int) (result uint64, bytesShifted int) {
var shift uint = 0
for curByte := 0; curByte < cachedLen; curByte++ {
oneByte := u[curByte]
last7Bits := 0x7f & oneByte
result |= uint64(last7Bits) << shift
bytesShifted++
if oneByte&0x80 == 0 {
break
}
shift += 7
}
return
}

View file

@ -19,6 +19,7 @@ const uprMutationExtraLen = 30
const uprDeletetionExtraLen = 18
const uprDeletetionWithDeletionTimeExtraLen = 21
const uprSnapshotExtraLen = 20
const dcpSystemEventExtraLen = 13
const bufferAckThreshold = 0.2
const opaqueOpen = 0xBEAF0001
const opaqueFailover = 0xDEADBEEF
@ -27,32 +28,6 @@ const uprDefaultNoopInterval = 120
// Counter on top of opaqueOpen that others can draw from for open and control msgs
var opaqueOpenCtrlWell uint32 = opaqueOpen
// UprEvent memcached events for UPR streams.
type UprEvent struct {
Opcode gomemcached.CommandCode // Type of event
Status gomemcached.Status // Response status
VBucket uint16 // VBucket this event applies to
DataType uint8 // data type
Opaque uint16 // 16 MSB of opaque
VBuuid uint64 // This field is set by downstream
Flags uint32 // Item flags
Expiry uint32 // Item expiration time
Key, Value []byte // Item key/value
OldValue []byte // TODO: TBD: old document value
Cas uint64 // CAS value of the item
Seqno uint64 // sequence number of the mutation
RevSeqno uint64 // rev sequence number : deletions
LockTime uint32 // Lock time
MetadataSize uint16 // Metadata size
SnapstartSeq uint64 // start sequence number of this snapshot
SnapendSeq uint64 // End sequence number of the snapshot
SnapshotType uint32 // 0: disk 1: memory
FailoverLog *FailoverLog // Failover log containing vvuid and sequnce number
Error error // Error value in case of a failure
ExtMeta []byte
AckSize uint32 // The number of bytes that can be Acked to DCP
}
type PriorityType string
// high > medium > disabled > low
@ -63,13 +38,39 @@ const (
PriorityHigh PriorityType = "high"
)
type DcpStreamType int32
var UninitializedStream DcpStreamType = -1
const (
NonCollectionStream DcpStreamType = 0
CollectionsNonStreamId DcpStreamType = iota
CollectionsStreamId DcpStreamType = iota
)
func (t DcpStreamType) String() string {
switch t {
case UninitializedStream:
return "Un-Initialized Stream"
case NonCollectionStream:
return "Traditional Non-Collection Stream"
case CollectionsNonStreamId:
return "Collections Stream without StreamID"
case CollectionsStreamId:
return "Collection Stream with StreamID"
default:
return "Unknown Stream Type"
}
}
// UprStream is per stream data structure over an UPR Connection.
type UprStream struct {
Vbucket uint16 // Vbucket id
Vbuuid uint64 // vbucket uuid
StartSeq uint64 // start sequence number
EndSeq uint64 // end sequence number
connected bool
Vbucket uint16 // Vbucket id
Vbuuid uint64 // vbucket uuid
StartSeq uint64 // start sequence number
EndSeq uint64 // end sequence number
connected bool
StreamType DcpStreamType
}
type FeedState int
@ -113,6 +114,7 @@ type UprFeatures struct {
IncludeDeletionTime bool
DcpPriority PriorityType
EnableExpiry bool
EnableStreamId bool
}
/**
@ -274,9 +276,15 @@ type UprFeed struct {
// if flag is true, upr feed will use ack from client to determine whether/when to send ack to DCP
// if flag is false, upr feed will track how many bytes it has sent to client
// and use that to determine whether/when to send ack to DCP
ackByClient bool
feedState FeedState
muFeedState sync.RWMutex
ackByClient bool
feedState FeedState
muFeedState sync.RWMutex
activatedFeatures UprFeatures
collectionEnabled bool // This is needed separately because parsing depends on this
// DCP StreamID allows multiple filtered collection streams to share a single DCP Stream
// It is not allowed once a regular/legacy stream was started originally
streamsType DcpStreamType
initStreamTypeOnce sync.Once
}
// Exported interface - to allow for mocking
@ -296,6 +304,9 @@ type UprFeedIface interface {
UprRequestStream(vbno, opaqueMSB uint16, flags uint32, vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error
// Set DCP priority on an existing DCP connection. The command is sent asynchronously without waiting for a response
SetPriorityAsync(p PriorityType) error
// Various Collection-Type RequestStreams
UprRequestCollectionsStream(vbno, opaqueMSB uint16, flags uint32, vbuuid, startSeq, endSeq, snapStart, snapEnd uint64, filter *CollectionsFilter) error
}
type UprStats struct {
@ -305,9 +316,6 @@ type UprStats struct {
TotalSnapShot uint64
}
// FailoverLog containing vvuid and sequnce number
type FailoverLog [][2]uint64
// error codes
var ErrorInvalidLog = errors.New("couchbase.errorInvalidLog")
@ -320,76 +328,6 @@ func (flogp *FailoverLog) Latest() (vbuuid, seqno uint64, err error) {
return vbuuid, seqno, ErrorInvalidLog
}
func makeUprEvent(rq gomemcached.MCRequest, stream *UprStream, bytesReceivedFromDCP int) *UprEvent {
event := &UprEvent{
Opcode: rq.Opcode,
VBucket: stream.Vbucket,
VBuuid: stream.Vbuuid,
Key: rq.Key,
Value: rq.Body,
Cas: rq.Cas,
ExtMeta: rq.ExtMeta,
DataType: rq.DataType,
}
// set AckSize for events that need to be acked to DCP,
// i.e., events with CommandCodes that need to be buffered in DCP
if _, ok := gomemcached.BufferedCommandCodeMap[rq.Opcode]; ok {
event.AckSize = uint32(bytesReceivedFromDCP)
}
// 16 LSBits are used by client library to encode vbucket number.
// 16 MSBits are left for application to multiplex on opaque value.
event.Opaque = appOpaque(rq.Opaque)
if len(rq.Extras) >= uprMutationExtraLen &&
event.Opcode == gomemcached.UPR_MUTATION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.Flags = binary.BigEndian.Uint32(rq.Extras[16:20])
event.Expiry = binary.BigEndian.Uint32(rq.Extras[20:24])
event.LockTime = binary.BigEndian.Uint32(rq.Extras[24:28])
event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[28:30])
} else if len(rq.Extras) >= uprDeletetionWithDeletionTimeExtraLen &&
event.Opcode == gomemcached.UPR_DELETION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.Expiry = binary.BigEndian.Uint32(rq.Extras[16:20])
} else if len(rq.Extras) >= uprDeletetionExtraLen &&
event.Opcode == gomemcached.UPR_DELETION ||
event.Opcode == gomemcached.UPR_EXPIRATION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[16:18])
} else if len(rq.Extras) >= uprSnapshotExtraLen &&
event.Opcode == gomemcached.UPR_SNAPSHOT {
event.SnapstartSeq = binary.BigEndian.Uint64(rq.Extras[:8])
event.SnapendSeq = binary.BigEndian.Uint64(rq.Extras[8:16])
event.SnapshotType = binary.BigEndian.Uint32(rq.Extras[16:20])
}
return event
}
func (event *UprEvent) String() string {
name := gomemcached.CommandNames[event.Opcode]
if name == "" {
name = fmt.Sprintf("#%d", event.Opcode)
}
return name
}
func (event *UprEvent) IsSnappyDataType() bool {
return event.Opcode == gomemcached.UPR_MUTATION && (event.DataType&SnappyDataType > 0)
}
func (feed *UprFeed) sendCommands(mc *Client) {
transmitCh := feed.transmitCh
transmitCl := feed.transmitCl
@ -420,6 +358,10 @@ func (feed *UprFeed) activateStream(vbno, opaque uint16, stream *UprStream) erro
feed.muVbstreams.Lock()
defer feed.muVbstreams.Unlock()
if feed.collectionEnabled {
stream.StreamType = feed.streamsType
}
// Set this stream as the officially connected stream for this vb
stream.connected = true
feed.vbstreams[vbno] = stream
@ -440,14 +382,15 @@ func (mc *Client) NewUprFeed() (*UprFeed, error) {
}
func (mc *Client) NewUprFeedWithConfig(ackByClient bool) (*UprFeed, error) {
feed := &UprFeed{
conn: mc,
closer: make(chan bool, 1),
vbstreams: make(map[uint16]*UprStream),
transmitCh: make(chan *gomemcached.MCRequest),
transmitCl: make(chan bool),
ackByClient: ackByClient,
conn: mc,
closer: make(chan bool, 1),
vbstreams: make(map[uint16]*UprStream),
transmitCh: make(chan *gomemcached.MCRequest),
transmitCl: make(chan bool),
ackByClient: ackByClient,
collectionEnabled: mc.CollectionEnabled(),
streamsType: UninitializedStream,
}
feed.negotiator.initialize()
@ -642,7 +585,22 @@ func (feed *UprFeed) uprOpen(name string, sequence uint32, bufSize uint32, featu
activatedFeatures.EnableExpiry = true
}
if features.EnableStreamId {
rq := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_CONTROL,
Key: []byte("enable_stream_id"),
Body: []byte("true"),
Opaque: getUprOpenCtrlOpaque(),
}
err = sendMcRequestSync(feed.conn, rq)
if err != nil {
return
}
activatedFeatures.EnableStreamId = true
}
// everything is ok so far, set upr feed to open state
feed.activatedFeatures = activatedFeatures
feed.setOpen()
return
}
@ -689,10 +647,60 @@ func (mc *Client) UprGetFailoverLog(
func (feed *UprFeed) UprRequestStream(vbno, opaqueMSB uint16, flags uint32,
vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
return feed.UprRequestCollectionsStream(vbno, opaqueMSB, flags, vuuid, startSequence, endSequence, snapStart, snapEnd, nil)
}
func (feed *UprFeed) initStreamType(filter *CollectionsFilter) (err error) {
if filter != nil && filter.UseStreamId && !feed.activatedFeatures.EnableStreamId {
err = fmt.Errorf("Cannot use streamID based filter if the feed was not started with the streamID feature")
return
}
streamInitFunc := func() {
if feed.streamsType != UninitializedStream {
// Shouldn't happen
err = fmt.Errorf("The current feed has already been started in %v mode", feed.streamsType.String())
} else {
if !feed.collectionEnabled {
feed.streamsType = NonCollectionStream
} else {
if filter != nil && filter.UseStreamId {
feed.streamsType = CollectionsStreamId
} else {
feed.streamsType = CollectionsNonStreamId
}
}
}
}
feed.initStreamTypeOnce.Do(streamInitFunc)
return
}
func (feed *UprFeed) UprRequestCollectionsStream(vbno, opaqueMSB uint16, flags uint32,
vbuuid, startSequence, endSequence, snapStart, snapEnd uint64, filter *CollectionsFilter) error {
err := feed.initStreamType(filter)
if err != nil {
return err
}
var mcRequestBody []byte
if filter != nil {
err = filter.IsValid()
if err != nil {
return err
}
mcRequestBody, err = filter.ToStreamReqBody()
if err != nil {
return err
}
}
rq := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_STREAMREQ,
VBucket: vbno,
Opaque: composeOpaque(vbno, opaqueMSB),
Body: mcRequestBody,
}
rq.Extras = make([]byte, 48) // #Extras
@ -700,15 +708,15 @@ func (feed *UprFeed) UprRequestStream(vbno, opaqueMSB uint16, flags uint32,
binary.BigEndian.PutUint32(rq.Extras[4:8], uint32(0))
binary.BigEndian.PutUint64(rq.Extras[8:16], startSequence)
binary.BigEndian.PutUint64(rq.Extras[16:24], endSequence)
binary.BigEndian.PutUint64(rq.Extras[24:32], vuuid)
binary.BigEndian.PutUint64(rq.Extras[24:32], vbuuid)
binary.BigEndian.PutUint64(rq.Extras[32:40], snapStart)
binary.BigEndian.PutUint64(rq.Extras[40:48], snapEnd)
feed.negotiator.registerRequest(vbno, opaqueMSB, vuuid, startSequence, endSequence)
feed.negotiator.registerRequest(vbno, opaqueMSB, vbuuid, startSequence, endSequence)
// Any client that has ever called this method, regardless of return code,
// should expect a potential UPR_CLOSESTREAM message due to this new map entry prior to Transmit.
if err := feed.conn.Transmit(rq); err != nil {
if err = feed.conn.Transmit(rq); err != nil {
logging.Errorf("Error in StreamRequest %s", err.Error())
// If an error occurs during transmit, then the UPRFeed will keep the stream
// in the vbstreams map. This is to prevent nil lookup from any previously
@ -973,6 +981,12 @@ loop:
if err := feed.conn.TransmitResponse(noop); err != nil {
logging.Warnf("failed to transmit command %s. Error %s", noop.Opcode.String(), err.Error())
}
case gomemcached.DCP_SYSTEM_EVENT:
if stream == nil {
logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
break loop
}
event = makeUprEvent(pkt, stream, bytes)
default:
logging.Infof("Recived an unknown response for vbucket %d", vb)
}