This commit is contained in:
techknowlogick 2021-02-28 18:08:33 -05:00 committed by GitHub
parent 030646eea4
commit 47f6a4ec3f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
947 changed files with 26119 additions and 7062 deletions

View file

@ -33,6 +33,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
@ -122,6 +123,8 @@ func (b *Bucket) Do(k string, f func(mc *memcached.Client, vb uint16) error) (er
}
func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, deadline bool) (err error) {
var lastError error
if SlowServerCallWarningThreshold > 0 {
defer slowLog(time.Now(), "call to Do(%q)", k)
}
@ -131,7 +134,7 @@ func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, de
for i := 0; i < maxTries; i++ {
conn, pool, err := b.getConnectionToVBucket(vb)
if err != nil {
if isConnError(err) && backOff(i, maxTries, backOffDuration, true) {
if (err == errNoPool || isConnError(err)) && backOff(i, maxTries, backOffDuration, true) {
b.Refresh()
continue
}
@ -143,13 +146,13 @@ func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, de
} else {
conn.SetDeadline(noDeadline)
}
err = f(conn, uint16(vb))
lastError = f(conn, uint16(vb))
var retry bool
discard := isOutOfBoundsError(err)
retry := false
discard := isOutOfBoundsError(lastError) || IsReadTimeOutError(lastError)
// MB-30967 / MB-31001 implement back off for transient errors
if resp, ok := err.(*gomemcached.MCResponse); ok {
if resp, ok := lastError.(*gomemcached.MCResponse); ok {
switch resp.Status {
case gomemcached.NOT_MY_VBUCKET:
b.Refresh()
@ -162,12 +165,10 @@ func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, de
retry = true
case gomemcached.ENOMEM:
fallthrough
case gomemcached.TMPFAIL:
case gomemcached.TMPFAIL, gomemcached.EBUSY:
retry = backOff(i, maxTries, backOffDuration, true)
default:
retry = false
}
} else if err != nil && isConnError(err) && backOff(i, maxTries, backOffDuration, true) {
} else if lastError != nil && isConnError(lastError) && backOff(i, maxTries, backOffDuration, true) {
retry = true
}
@ -178,11 +179,11 @@ func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, de
}
if !retry {
return err
return lastError
}
}
return fmt.Errorf("unable to complete action after %v attemps", maxTries)
return fmt.Errorf("unable to complete action after %v attemps: ", maxTries, lastError)
}
type GatheredStats struct {
@ -211,6 +212,20 @@ func getStatsParallel(sn string, b *Bucket, offset int, which string,
}
}
func getStatsParallelFunc(fn func(key, val []byte), sn string, b *Bucket, offset int, which string,
ch chan<- GatheredStats) {
pool := b.getConnPool(offset)
conn, err := pool.Get()
if err == nil {
conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
err = conn.StatsFunc(which, fn)
pool.Return(conn)
}
ch <- GatheredStats{Server: sn, Err: err}
}
// GetStats gets a set of stats from all servers.
//
// Returns a map of server ID -> map of stat key to map value.
@ -246,6 +261,108 @@ func (b *Bucket) GatherStats(which string) map[string]GatheredStats {
return rv
}
// GatherStats returns a map of server ID -> GatheredStats from all servers.
func (b *Bucket) GatherStatsFunc(which string, fn func(key, val []byte)) map[string]error {
var errMap map[string]error
vsm := b.VBServerMap()
if vsm.ServerList == nil {
return errMap
}
// Go grab all the things at once.
ch := make(chan GatheredStats, len(vsm.ServerList))
for i, sn := range vsm.ServerList {
go getStatsParallelFunc(fn, sn, b, i, which, ch)
}
// Gather the results
for range vsm.ServerList {
gs := <-ch
if gs.Err != nil {
if errMap == nil {
errMap = make(map[string]error)
errMap[gs.Server] = gs.Err
}
}
}
return errMap
}
type BucketStats int
const (
StatCount = BucketStats(iota)
StatSize
)
var bucketStatString = []string{
"curr_items",
"ep_value_size",
}
var collectionStatString = []string{
"items",
"disk_size",
}
// Get selected bucket or collection stats
func (b *Bucket) GetIntStats(refresh bool, which []BucketStats, context ...*memcached.ClientContext) ([]int64, error) {
if refresh {
b.Refresh()
}
var vals []int64 = make([]int64, len(which))
if len(vals) == 0 {
return vals, nil
}
var outErr error
if len(context) > 0 {
collKey := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
errs := b.GatherStatsFunc(collKey, func(key, val []byte) {
for i, f := range which {
lk := len(key)
ls := len(collectionStatString[f])
if lk >= ls && string(key[lk-ls:]) == collectionStatString[f] {
v, err := strconv.ParseInt(string(val), 10, 64)
if err == nil {
atomic.AddInt64(&vals[i], v)
} else if outErr == nil {
outErr = err
}
}
}
})
// have to use a range to access any one element of a map
for _, err := range errs {
return nil, err
}
} else {
errs := b.GatherStatsFunc("", func(key, val []byte) {
for i, f := range which {
if string(key) == bucketStatString[f] {
v, err := strconv.ParseInt(string(val), 10, 64)
if err == nil {
atomic.AddInt64(&vals[i], v)
} else if outErr == nil {
outErr = err
}
}
}
})
// have to use a range to access any one element of a map
for _, err := range errs {
return nil, err
}
}
return vals, outErr
}
// Get bucket count through the bucket stats
func (b *Bucket) GetCount(refresh bool, context ...*memcached.ClientContext) (count int64, err error) {
if refresh {
@ -351,6 +468,9 @@ func isAuthError(err error) bool {
}
func IsReadTimeOutError(err error) bool {
if err == nil {
return false
}
estr := err.Error()
return strings.Contains(estr, "read tcp") ||
strings.Contains(estr, "i/o timeout")
@ -456,6 +576,21 @@ func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
}
b.Refresh()
backOffAttempts++
} else if err == errNoPool {
if !backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
logging.Errorf("Connection Error %v : %v", bname, err)
ech <- err
return err
}
err = b.Refresh()
if err != nil {
ech <- err
return err
}
backOffAttempts++
// retry, and make no noise
return nil
}
logging.Infof("Pool Get returned %v: %v", bname, err)
// retry
@ -498,8 +633,8 @@ func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
ech <- err
return err
case error:
if isOutOfBoundsError(err) {
// We got an out of bound error, retry the operation
if isOutOfBoundsError(err) || IsReadTimeOutError(err) {
// We got an out of bounds error or a read timeout error; retry the operation
discard = true
return nil
} else if isConnError(err) && backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
@ -816,6 +951,14 @@ var ErrKeyExists = errors.New("key exists")
func (b *Bucket) Write(k string, flags, exp int, v interface{},
opt WriteOptions, context ...*memcached.ClientContext) (err error) {
_, err = b.WriteWithCAS(k, flags, exp, v, opt, context...)
return err
}
func (b *Bucket) WriteWithCAS(k string, flags, exp int, v interface{},
opt WriteOptions, context ...*memcached.ClientContext) (cas uint64, err error) {
if ClientOpCallback != nil {
defer func(t time.Time) {
ClientOpCallback(fmt.Sprintf("Write(%v)", opt), k, t, err)
@ -826,7 +969,7 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
if opt&Raw == 0 {
data, err = json.Marshal(v)
if err != nil {
return err
return cas, err
}
} else if v != nil {
data = v.([]byte)
@ -852,14 +995,18 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
res, err = mc.Set(vb, k, flags, exp, data, context...)
}
if err == nil {
cas = res.Cas
}
return err
})
if err == nil && (opt&(Persist|Indexable) != 0) {
err = b.WaitForPersistence(k, res.Cas, data == nil)
err = b.WaitForPersistence(k, cas, data == nil)
}
return err
return cas, err
}
func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
@ -1018,6 +1165,11 @@ func (b *Bucket) Set(k string, exp int, v interface{}, context ...*memcached.Cli
return b.Write(k, 0, exp, v, 0, context...)
}
// Set a value in this bucket.
func (b *Bucket) SetWithCAS(k string, exp int, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
return b.WriteWithCAS(k, 0, exp, v, 0, context...)
}
// Set a value in this bucket with with flags
func (b *Bucket) SetWithMeta(k string, flags int, exp int, v interface{}, context ...*memcached.ClientContext) (*MutationToken, error) {
return b.WriteWithMT(k, flags, exp, v, 0, context...)
@ -1039,6 +1191,16 @@ func (b *Bucket) Add(k string, exp int, v interface{}, context ...*memcached.Cli
return (err == nil), err
}
// Add adds a value to this bucket; like Set except that nothing
// happens if the key exists. Return the CAS value.
func (b *Bucket) AddWithCAS(k string, exp int, v interface{}, context ...*memcached.ClientContext) (bool, uint64, error) {
cas, err := b.WriteWithCAS(k, 0, exp, v, AddOnly, context...)
if err == ErrKeyExists {
return false, 0, nil
}
return (err == nil), cas, err
}
// AddRaw adds a value to this bucket; like SetRaw except that nothing
// happens if the key exists. The value will be stored as raw bytes.
func (b *Bucket) AddRaw(k string, exp int, v []byte, context ...*memcached.ClientContext) (added bool, err error) {

View file

@ -510,9 +510,11 @@ func (b *Bucket) GetRandomDoc(context ...*memcached.ClientContext) (*gomemcached
// We may need to select the bucket before GetRandomDoc()
// will work. This is sometimes done at startup (see defaultMkConn())
// but not always, depending on the auth type.
_, err = conn.SelectBucket(b.Name)
if err != nil {
return nil, err
if conn.LastBucket() != b.Name {
_, err = conn.SelectBucket(b.Name)
if err != nil {
return nil, err
}
}
// get a randomm document from the connection
@ -533,7 +535,7 @@ func (b *Bucket) CreateScope(scope string) error {
client := pool.client
b.RUnlock()
args := map[string]interface{}{"name": scope}
return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections", args, nil)
return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes", args, nil)
}
func (b *Bucket) DropScope(scope string) error {
@ -541,7 +543,7 @@ func (b *Bucket) DropScope(scope string) error {
pool := b.pool
client := pool.client
b.RUnlock()
return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope), nil, nil)
return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes/"+uriAdj(scope), nil, nil)
}
func (b *Bucket) CreateCollection(scope string, collection string) error {
@ -550,7 +552,7 @@ func (b *Bucket) CreateCollection(scope string, collection string) error {
client := pool.client
b.RUnlock()
args := map[string]interface{}{"name": collection}
return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope), args, nil)
return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes/"+uriAdj(scope)+"/collections", args, nil)
}
func (b *Bucket) DropCollection(scope string, collection string) error {
@ -558,7 +560,7 @@ func (b *Bucket) DropCollection(scope string, collection string) error {
pool := b.pool
client := pool.client
b.RUnlock()
return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope)+"/"+uriAdj(collection), nil, nil)
return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/scopes/"+uriAdj(scope)+"/collections/"+uriAdj(collection), nil, nil)
}
func (b *Bucket) FlushCollection(scope string, collection string) error {
@ -703,7 +705,8 @@ func doHTTPRequestForStreaming(req *http.Request) (*http.Response, error) {
if skipVerify {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConnsPerHost: MaxIdleConnsPerHost,
}
} else {
// Handle cases with cert
@ -714,7 +717,8 @@ func doHTTPRequestForStreaming(req *http.Request) (*http.Response, error) {
}
tr = &http.Transport{
TLSClientConfig: cfg,
TLSClientConfig: cfg,
MaxIdleConnsPerHost: MaxIdleConnsPerHost,
}
}
@ -751,7 +755,8 @@ func doHTTPRequest(req *http.Request) (*http.Response, error) {
if skipVerify {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConnsPerHost: MaxIdleConnsPerHost,
}
} else {
// Handle cases with cert
@ -762,11 +767,12 @@ func doHTTPRequest(req *http.Request) (*http.Response, error) {
}
tr = &http.Transport{
TLSClientConfig: cfg,
TLSClientConfig: cfg,
MaxIdleConnsPerHost: MaxIdleConnsPerHost,
}
}
client = &http.Client{Transport: tr}
client = &http.Client{Transport: tr, Timeout: ClientTimeOut}
} else if client == nil {
client = HTTPClient
@ -1346,6 +1352,10 @@ func (b *Bucket) GetCollectionsManifest() (*Manifest, error) {
b.RLock()
pools := b.getConnPools(true /* already locked */)
if len(pools) == 0 {
b.RUnlock()
return nil, fmt.Errorf("Unable to get connection to retrieve collections manifest: no connection pool. No collections access to bucket %s.", b.Name)
}
pool := pools[0] // Any pool will do, so use the first one.
b.RUnlock()
client, err := pool.Get()

View file

@ -6,7 +6,6 @@ import (
"github.com/couchbase/goutils/logging"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"time"
@ -109,10 +108,7 @@ func (b *Bucket) UpdateBucket2(streamingFn StreamingFn) error {
return fmt.Errorf("No healthy nodes found")
}
startNode := rand.Intn(len(nodes))
node := nodes[(startNode)%len(nodes)]
streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, uriAdj(b.GetName()))
streamUrl := fmt.Sprintf("%s/pools/default/bucketsStreaming/%s", b.pool.client.BaseURL, uriAdj(b.GetName()))
logging.Infof(" Trying with %s", streamUrl)
req, err := http.NewRequest("GET", streamUrl, nil)
if err != nil {