[Vendor] Update go-redis to v8.5.0 (#13749)
* Update go-redis to v8.4.0 * github.com/go-redis/redis/v8 v8.4.0 -> v8.5.0 * Apply suggestions from code review Co-authored-by: zeripath <art27@cantab.net> * TODO * Use the Queue termination channel as the default context for pushes Signed-off-by: Andrew Thornton <art27@cantab.net> * missed one Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
This commit is contained in:
parent
4cffc46f65
commit
ac97ea573c
139 changed files with 16117 additions and 4965 deletions
21
vendor/github.com/dgryski/go-rendezvous/LICENSE
generated
vendored
Normal file
21
vendor/github.com/dgryski/go-rendezvous/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
79
vendor/github.com/dgryski/go-rendezvous/rdv.go
generated
vendored
Normal file
79
vendor/github.com/dgryski/go-rendezvous/rdv.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
package rendezvous
|
||||
|
||||
type Rendezvous struct {
|
||||
nodes map[string]int
|
||||
nstr []string
|
||||
nhash []uint64
|
||||
hash Hasher
|
||||
}
|
||||
|
||||
type Hasher func(s string) uint64
|
||||
|
||||
func New(nodes []string, hash Hasher) *Rendezvous {
|
||||
r := &Rendezvous{
|
||||
nodes: make(map[string]int, len(nodes)),
|
||||
nstr: make([]string, len(nodes)),
|
||||
nhash: make([]uint64, len(nodes)),
|
||||
hash: hash,
|
||||
}
|
||||
|
||||
for i, n := range nodes {
|
||||
r.nodes[n] = i
|
||||
r.nstr[i] = n
|
||||
r.nhash[i] = hash(n)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Lookup(k string) string {
|
||||
// short-circuit if we're empty
|
||||
if len(r.nodes) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
khash := r.hash(k)
|
||||
|
||||
var midx int
|
||||
var mhash = xorshiftMult64(khash ^ r.nhash[0])
|
||||
|
||||
for i, nhash := range r.nhash[1:] {
|
||||
if h := xorshiftMult64(khash ^ nhash); h > mhash {
|
||||
midx = i + 1
|
||||
mhash = h
|
||||
}
|
||||
}
|
||||
|
||||
return r.nstr[midx]
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Add(node string) {
|
||||
r.nodes[node] = len(r.nstr)
|
||||
r.nstr = append(r.nstr, node)
|
||||
r.nhash = append(r.nhash, r.hash(node))
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Remove(node string) {
|
||||
// find index of node to remove
|
||||
nidx := r.nodes[node]
|
||||
|
||||
// remove from the slices
|
||||
l := len(r.nstr)
|
||||
r.nstr[nidx] = r.nstr[l]
|
||||
r.nstr = r.nstr[:l]
|
||||
|
||||
r.nhash[nidx] = r.nhash[l]
|
||||
r.nhash = r.nhash[:l]
|
||||
|
||||
// update the map
|
||||
delete(r.nodes, node)
|
||||
moved := r.nstr[nidx]
|
||||
r.nodes[moved] = nidx
|
||||
}
|
||||
|
||||
func xorshiftMult64(x uint64) uint64 {
|
||||
x ^= x >> 12 // a
|
||||
x ^= x << 25 // b
|
||||
x ^= x >> 27 // c
|
||||
return x * 2685821657736338717
|
||||
}
|
22
vendor/github.com/go-redis/redis/v7/.travis.yml
generated
vendored
22
vendor/github.com/go-redis/redis/v7/.travis.yml
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
dist: xenial
|
||||
language: go
|
||||
|
||||
services:
|
||||
- redis-server
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
go_import_path: github.com/go-redis/redis
|
||||
|
||||
before_install:
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0
|
46
vendor/github.com/go-redis/redis/v7/CHANGELOG.md
generated
vendored
46
vendor/github.com/go-redis/redis/v7/CHANGELOG.md
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
# Changelog
|
||||
|
||||
## v7.2
|
||||
|
||||
- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
|
||||
|
||||
## v7.1
|
||||
|
||||
- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` interface.
|
||||
|
||||
## v7
|
||||
|
||||
- *Important*. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a transactional pipeline.
|
||||
- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
|
||||
- WithContext now can not be used to create a shallow copy of the client.
|
||||
- New methods ProcessContext, DoContext, and ExecContext.
|
||||
- Client respects Context.Deadline when setting net.Conn deadline.
|
||||
- Client listens on Context.Done while waiting for a connection from the pool and returns an error when context context is cancelled.
|
||||
- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow detecting reconnections.
|
||||
- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse the time.
|
||||
- `SetLimiter` is removed and added `Options.Limiter` instead.
|
||||
- `HMSet` is deprecated as of Redis v4.
|
||||
|
||||
## v6.15
|
||||
|
||||
- Cluster and Ring pipelines process commands for each node in its own goroutine.
|
||||
|
||||
## 6.14
|
||||
|
||||
- Added Options.MinIdleConns.
|
||||
- Added Options.MaxConnAge.
|
||||
- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
|
||||
- Add Client.Do to simplify creating custom commands.
|
||||
- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
|
||||
- Lower memory usage.
|
||||
|
||||
## v6.13
|
||||
|
||||
- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards.
|
||||
- Cluster client was optimized to use much less memory when reloading cluster state.
|
||||
- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead.
|
||||
- Dialer.KeepAlive is set to 5 minutes by default.
|
||||
|
||||
## v6.12
|
||||
|
||||
- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
|
128
vendor/github.com/go-redis/redis/v7/README.md
generated
vendored
128
vendor/github.com/go-redis/redis/v7/README.md
generated
vendored
|
@ -1,128 +0,0 @@
|
|||
# Redis client for Golang
|
||||
|
||||
[](https://travis-ci.org/go-redis/redis)
|
||||
[](https://godoc.org/github.com/go-redis/redis)
|
||||
[](https://airbrake.io)
|
||||
|
||||
Supports:
|
||||
|
||||
- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
|
||||
- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
|
||||
- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
|
||||
- [Transactions](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
|
||||
- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
|
||||
- [Scripting](https://godoc.org/github.com/go-redis/redis#Script).
|
||||
- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
|
||||
- [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient).
|
||||
- [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient).
|
||||
- [Cluster of Redis Servers](https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup) without using cluster mode and Redis Sentinel.
|
||||
- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
|
||||
- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
|
||||
- [Cache friendly](https://github.com/go-redis/cache).
|
||||
- [Rate limiting](https://github.com/go-redis/redis_rate).
|
||||
- [Distributed Locks](https://github.com/bsm/redislock).
|
||||
|
||||
API docs: https://godoc.org/github.com/go-redis/redis.
|
||||
Examples: https://godoc.org/github.com/go-redis/redis#pkg-examples.
|
||||
|
||||
## Installation
|
||||
|
||||
go-redis requires a Go version with [Modules](https://github.com/golang/go/wiki/Modules) support and uses import versioning. So please make sure to initialize a Go module before installing go-redis:
|
||||
|
||||
``` shell
|
||||
go mod init github.com/my/repo
|
||||
go get github.com/go-redis/redis/v7
|
||||
```
|
||||
|
||||
Import:
|
||||
|
||||
``` go
|
||||
import "github.com/go-redis/redis/v7"
|
||||
```
|
||||
|
||||
## Quickstart
|
||||
|
||||
``` go
|
||||
func ExampleNewClient() {
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Addr: "localhost:6379",
|
||||
Password: "", // no password set
|
||||
DB: 0, // use default DB
|
||||
})
|
||||
|
||||
pong, err := client.Ping().Result()
|
||||
fmt.Println(pong, err)
|
||||
// Output: PONG <nil>
|
||||
}
|
||||
|
||||
func ExampleClient() {
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Addr: "localhost:6379",
|
||||
Password: "", // no password set
|
||||
DB: 0, // use default DB
|
||||
})
|
||||
err := client.Set("key", "value", 0).Err()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
val, err := client.Get("key").Result()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("key", val)
|
||||
|
||||
val2, err := client.Get("key2").Result()
|
||||
if err == redis.Nil {
|
||||
fmt.Println("key2 does not exist")
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Println("key2", val2)
|
||||
}
|
||||
// Output: key value
|
||||
// key2 does not exist
|
||||
}
|
||||
```
|
||||
|
||||
## Howto
|
||||
|
||||
Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package.
|
||||
|
||||
## Look and feel
|
||||
|
||||
Some corner cases:
|
||||
|
||||
``` go
|
||||
// SET key value EX 10 NX
|
||||
set, err := client.SetNX("key", "value", 10*time.Second).Result()
|
||||
|
||||
// SORT list LIMIT 0 2 ASC
|
||||
vals, err := client.Sort("list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
|
||||
|
||||
// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
|
||||
vals, err := client.ZRangeByScoreWithScores("zset", &redis.ZRangeBy{
|
||||
Min: "-inf",
|
||||
Max: "+inf",
|
||||
Offset: 0,
|
||||
Count: 2,
|
||||
}).Result()
|
||||
|
||||
// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
|
||||
vals, err := client.ZInterStore("out", &redis.ZStore{
|
||||
Keys: []string{"zset1", "zset2"},
|
||||
Weights: []int64{2, 3}
|
||||
}).Result()
|
||||
|
||||
// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
|
||||
vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
|
||||
|
||||
// custom command
|
||||
res, err := client.Do("set", "key", "value").Result()
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
|
||||
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
|
||||
- [Golang message task queue](https://github.com/vmihailenco/taskq)
|
22
vendor/github.com/go-redis/redis/v7/cluster_commands.go
generated
vendored
22
vendor/github.com/go-redis/redis/v7/cluster_commands.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
package redis
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
func (c *ClusterClient) DBSize() *IntCmd {
|
||||
cmd := NewIntCmd("dbsize")
|
||||
var size int64
|
||||
err := c.ForEachMaster(func(master *Client) error {
|
||||
n, err := master.DBSize().Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.AddInt64(&size, n)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
return cmd
|
||||
}
|
||||
cmd.val = size
|
||||
return cmd
|
||||
}
|
2643
vendor/github.com/go-redis/redis/v7/commands.go
generated
vendored
2643
vendor/github.com/go-redis/redis/v7/commands.go
generated
vendored
File diff suppressed because it is too large
Load diff
15
vendor/github.com/go-redis/redis/v7/go.mod
generated
vendored
15
vendor/github.com/go-redis/redis/v7/go.mod
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
module github.com/go-redis/redis/v7
|
||||
|
||||
require (
|
||||
github.com/golang/protobuf v1.3.2 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/onsi/ginkgo v1.10.1
|
||||
github.com/onsi/gomega v1.7.0
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 // indirect
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.4 // indirect
|
||||
)
|
||||
|
||||
go 1.11
|
47
vendor/github.com/go-redis/redis/v7/go.sum
generated
vendored
47
vendor/github.com/go-redis/redis/v7/go.sum
generated
vendored
|
@ -1,47 +0,0 @@
|
|||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
81
vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go
generated
vendored
81
vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go
generated
vendored
|
@ -1,81 +0,0 @@
|
|||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package consistenthash provides an implementation of a ring hash.
|
||||
package consistenthash
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Hash func(data []byte) uint32
|
||||
|
||||
type Map struct {
|
||||
hash Hash
|
||||
replicas int
|
||||
keys []int // Sorted
|
||||
hashMap map[int]string
|
||||
}
|
||||
|
||||
func New(replicas int, fn Hash) *Map {
|
||||
m := &Map{
|
||||
replicas: replicas,
|
||||
hash: fn,
|
||||
hashMap: make(map[int]string),
|
||||
}
|
||||
if m.hash == nil {
|
||||
m.hash = crc32.ChecksumIEEE
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Returns true if there are no items available.
|
||||
func (m *Map) IsEmpty() bool {
|
||||
return len(m.keys) == 0
|
||||
}
|
||||
|
||||
// Adds some keys to the hash.
|
||||
func (m *Map) Add(keys ...string) {
|
||||
for _, key := range keys {
|
||||
for i := 0; i < m.replicas; i++ {
|
||||
hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
|
||||
m.keys = append(m.keys, hash)
|
||||
m.hashMap[hash] = key
|
||||
}
|
||||
}
|
||||
sort.Ints(m.keys)
|
||||
}
|
||||
|
||||
// Gets the closest item in the hash to the provided key.
|
||||
func (m *Map) Get(key string) string {
|
||||
if m.IsEmpty() {
|
||||
return ""
|
||||
}
|
||||
|
||||
hash := int(m.hash([]byte(key)))
|
||||
|
||||
// Binary search for appropriate replica.
|
||||
idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
|
||||
|
||||
// Means we have cycled back to the first replica.
|
||||
if idx == len(m.keys) {
|
||||
idx = 0
|
||||
}
|
||||
|
||||
return m.hashMap[m.keys[idx]]
|
||||
}
|
24
vendor/github.com/go-redis/redis/v7/internal/internal.go
generated
vendored
24
vendor/github.com/go-redis/redis/v7/internal/internal.go
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Retry backoff with jitter sleep to prevent overloaded conditions during intervals
|
||||
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
|
||||
if retry < 0 {
|
||||
retry = 0
|
||||
}
|
||||
|
||||
backoff := minBackoff << uint(retry)
|
||||
if backoff > maxBackoff || backoff < minBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
|
||||
if backoff == 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(rand.Int63n(int64(backoff)))
|
||||
}
|
8
vendor/github.com/go-redis/redis/v7/internal/log.go
generated
vendored
8
vendor/github.com/go-redis/redis/v7/internal/log.go
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var Logger = log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile)
|
112
vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go
generated
vendored
112
vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go
generated
vendored
|
@ -1,112 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type StickyConnPool struct {
|
||||
pool *ConnPool
|
||||
reusable bool
|
||||
|
||||
cn *Conn
|
||||
closed bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var _ Pooler = (*StickyConnPool)(nil)
|
||||
|
||||
func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
|
||||
return &StickyConnPool{
|
||||
pool: pool,
|
||||
reusable: reusable,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) NewConn(context.Context) (*Conn, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) CloseConn(*Conn) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
if p.cn != nil {
|
||||
return p.cn, nil
|
||||
}
|
||||
|
||||
cn, err := p.pool.Get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.cn = cn
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) putUpstream() {
|
||||
p.pool.Put(p.cn)
|
||||
p.cn = nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Put(cn *Conn) {}
|
||||
|
||||
func (p *StickyConnPool) removeUpstream(reason error) {
|
||||
p.pool.Remove(p.cn, reason)
|
||||
p.cn = nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Remove(cn *Conn, reason error) {
|
||||
p.removeUpstream(reason)
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Len() int {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.cn == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) IdleLen() int {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.cn == nil {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Stats() *Stats {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return ErrClosed
|
||||
}
|
||||
p.closed = true
|
||||
|
||||
if p.cn != nil {
|
||||
if p.reusable {
|
||||
p.putUpstream()
|
||||
} else {
|
||||
p.removeUpstream(ErrClosed)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
56
vendor/github.com/go-redis/redis/v7/internal/util.go
generated
vendored
56
vendor/github.com/go-redis/redis/v7/internal/util.go
generated
vendored
|
@ -1,56 +0,0 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/util"
|
||||
)
|
||||
|
||||
func Sleep(ctx context.Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func ToLower(s string) string {
|
||||
if isLower(s) {
|
||||
return s
|
||||
}
|
||||
|
||||
b := make([]byte, len(s))
|
||||
for i := range b {
|
||||
c := s[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
}
|
||||
b[i] = c
|
||||
}
|
||||
return util.BytesToString(b)
|
||||
}
|
||||
|
||||
func isLower(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func Unwrap(err error) error {
|
||||
u, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return u.Unwrap()
|
||||
}
|
62
vendor/github.com/go-redis/redis/v7/script.go
generated
vendored
62
vendor/github.com/go-redis/redis/v7/script.go
generated
vendored
|
@ -1,62 +0,0 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type scripter interface {
|
||||
Eval(script string, keys []string, args ...interface{}) *Cmd
|
||||
EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
|
||||
ScriptExists(hashes ...string) *BoolSliceCmd
|
||||
ScriptLoad(script string) *StringCmd
|
||||
}
|
||||
|
||||
var _ scripter = (*Client)(nil)
|
||||
var _ scripter = (*Ring)(nil)
|
||||
var _ scripter = (*ClusterClient)(nil)
|
||||
|
||||
type Script struct {
|
||||
src, hash string
|
||||
}
|
||||
|
||||
func NewScript(src string) *Script {
|
||||
h := sha1.New()
|
||||
_, _ = io.WriteString(h, src)
|
||||
return &Script{
|
||||
src: src,
|
||||
hash: hex.EncodeToString(h.Sum(nil)),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Script) Hash() string {
|
||||
return s.hash
|
||||
}
|
||||
|
||||
func (s *Script) Load(c scripter) *StringCmd {
|
||||
return c.ScriptLoad(s.src)
|
||||
}
|
||||
|
||||
func (s *Script) Exists(c scripter) *BoolSliceCmd {
|
||||
return c.ScriptExists(s.hash)
|
||||
}
|
||||
|
||||
func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd {
|
||||
return c.Eval(s.src, keys, args...)
|
||||
}
|
||||
|
||||
func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd {
|
||||
return c.EvalSha(s.hash, keys, args...)
|
||||
}
|
||||
|
||||
// Run optimistically uses EVALSHA to run the script. If script does not exist
|
||||
// it is retried using EVAL.
|
||||
func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd {
|
||||
r := s.EvalSha(c, keys, args...)
|
||||
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
|
||||
return s.Eval(c, keys, args...)
|
||||
}
|
||||
return r
|
||||
}
|
509
vendor/github.com/go-redis/redis/v7/sentinel.go
generated
vendored
509
vendor/github.com/go-redis/redis/v7/sentinel.go
generated
vendored
|
@ -1,509 +0,0 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal"
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// FailoverOptions are used to configure a failover client and should
|
||||
// be passed to NewFailoverClient.
|
||||
type FailoverOptions struct {
|
||||
// The master name.
|
||||
MasterName string
|
||||
// A seed list of host:port addresses of sentinel nodes.
|
||||
SentinelAddrs []string
|
||||
SentinelUsername string
|
||||
SentinelPassword string
|
||||
|
||||
// Following options are copied from Options struct.
|
||||
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(*Conn) error
|
||||
|
||||
Username string
|
||||
Password string
|
||||
DB int
|
||||
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
DialTimeout time.Duration
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
|
||||
PoolSize int
|
||||
MinIdleConns int
|
||||
MaxConnAge time.Duration
|
||||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
func (opt *FailoverOptions) options() *Options {
|
||||
return &Options{
|
||||
Addr: "FailoverClient",
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
DB: opt.DB,
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
|
||||
MaxRetries: opt.MaxRetries,
|
||||
MinRetryBackoff: opt.MinRetryBackoff,
|
||||
MaxRetryBackoff: opt.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolSize: opt.PoolSize,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFailoverClient returns a Redis client that uses Redis Sentinel
|
||||
// for automatic failover. It's safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
|
||||
opt := failoverOpt.options()
|
||||
opt.init()
|
||||
|
||||
failover := &sentinelFailover{
|
||||
masterName: failoverOpt.MasterName,
|
||||
sentinelAddrs: failoverOpt.SentinelAddrs,
|
||||
username: failoverOpt.SentinelUsername,
|
||||
password: failoverOpt.SentinelPassword,
|
||||
|
||||
opt: opt,
|
||||
}
|
||||
|
||||
c := Client{
|
||||
baseClient: newBaseClient(opt, failover.Pool()),
|
||||
ctx: context.Background(),
|
||||
}
|
||||
c.cmdable = c.Process
|
||||
c.onClose = failover.Close
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type SentinelClient struct {
|
||||
*baseClient
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func NewSentinelClient(opt *Options) *SentinelClient {
|
||||
opt.init()
|
||||
c := &SentinelClient{
|
||||
baseClient: &baseClient{
|
||||
opt: opt,
|
||||
connPool: newConnPool(opt),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
clone := *c
|
||||
clone.ctx = ctx
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Process(cmd Cmder) error {
|
||||
return c.ProcessContext(c.ctx, cmd)
|
||||
}
|
||||
|
||||
func (c *SentinelClient) ProcessContext(ctx context.Context, cmd Cmder) error {
|
||||
return c.baseClient.process(ctx, cmd)
|
||||
}
|
||||
|
||||
func (c *SentinelClient) pubSub() *PubSub {
|
||||
pubsub := &PubSub{
|
||||
opt: c.opt,
|
||||
|
||||
newConn: func(channels []string) (*pool.Conn, error) {
|
||||
return c.newConn(context.TODO())
|
||||
},
|
||||
closeConn: c.connPool.CloseConn,
|
||||
}
|
||||
pubsub.init()
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// Ping is used to test if a connection is still alive, or to
|
||||
// measure latency.
|
||||
func (c *SentinelClient) Ping() *StringCmd {
|
||||
cmd := NewStringCmd("ping")
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) Subscribe(channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) PSubscribe(channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
|
||||
cmd := NewStringSliceCmd("sentinel", "get-master-addr-by-name", name)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Sentinels(name string) *SliceCmd {
|
||||
cmd := NewSliceCmd("sentinel", "sentinels", name)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Failover forces a failover as if the master was not reachable, and without
|
||||
// asking for agreement to other Sentinels.
|
||||
func (c *SentinelClient) Failover(name string) *StatusCmd {
|
||||
cmd := NewStatusCmd("sentinel", "failover", name)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Reset resets all the masters with matching name. The pattern argument is a
|
||||
// glob-style pattern. The reset process clears any previous state in a master
|
||||
// (including a failover in progress), and removes every slave and sentinel
|
||||
// already discovered and associated with the master.
|
||||
func (c *SentinelClient) Reset(pattern string) *IntCmd {
|
||||
cmd := NewIntCmd("sentinel", "reset", pattern)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// FlushConfig forces Sentinel to rewrite its configuration on disk, including
|
||||
// the current Sentinel state.
|
||||
func (c *SentinelClient) FlushConfig() *StatusCmd {
|
||||
cmd := NewStatusCmd("sentinel", "flushconfig")
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Master shows the state and info of the specified master.
|
||||
func (c *SentinelClient) Master(name string) *StringStringMapCmd {
|
||||
cmd := NewStringStringMapCmd("sentinel", "master", name)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Masters shows a list of monitored masters and their state.
|
||||
func (c *SentinelClient) Masters() *SliceCmd {
|
||||
cmd := NewSliceCmd("sentinel", "masters")
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Slaves shows a list of slaves for the specified master and their state.
|
||||
func (c *SentinelClient) Slaves(name string) *SliceCmd {
|
||||
cmd := NewSliceCmd("sentinel", "slaves", name)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// CkQuorum checks if the current Sentinel configuration is able to reach the
|
||||
// quorum needed to failover a master, and the majority needed to authorize the
|
||||
// failover. This command should be used in monitoring systems to check if a
|
||||
// Sentinel deployment is ok.
|
||||
func (c *SentinelClient) CkQuorum(name string) *StringCmd {
|
||||
cmd := NewStringCmd("sentinel", "ckquorum", name)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Monitor tells the Sentinel to start monitoring a new master with the specified
|
||||
// name, ip, port, and quorum.
|
||||
func (c *SentinelClient) Monitor(name, ip, port, quorum string) *StringCmd {
|
||||
cmd := NewStringCmd("sentinel", "monitor", name, ip, port, quorum)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Set is used in order to change configuration parameters of a specific master.
|
||||
func (c *SentinelClient) Set(name, option, value string) *StringCmd {
|
||||
cmd := NewStringCmd("sentinel", "set", name, option, value)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Remove is used in order to remove the specified master: the master will no
|
||||
// longer be monitored, and will totally be removed from the internal state of
|
||||
// the Sentinel.
|
||||
func (c *SentinelClient) Remove(name string) *StringCmd {
|
||||
cmd := NewStringCmd("sentinel", "remove", name)
|
||||
_ = c.Process(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
type sentinelFailover struct {
|
||||
sentinelAddrs []string
|
||||
|
||||
opt *Options
|
||||
username string
|
||||
password string
|
||||
|
||||
pool *pool.ConnPool
|
||||
poolOnce sync.Once
|
||||
|
||||
mu sync.RWMutex
|
||||
masterName string
|
||||
_masterAddr string
|
||||
sentinel *SentinelClient
|
||||
pubsub *PubSub
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.sentinel != nil {
|
||||
return c.closeSentinel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) closeSentinel() error {
|
||||
firstErr := c.pubsub.Close()
|
||||
c.pubsub = nil
|
||||
|
||||
err := c.sentinel.Close()
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
c.sentinel = nil
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) Pool() *pool.ConnPool {
|
||||
c.poolOnce.Do(func() {
|
||||
opt := *c.opt
|
||||
opt.Dialer = c.dial
|
||||
c.pool = newConnPool(&opt)
|
||||
})
|
||||
return c.pool
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) dial(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||
addr, err := c.MasterAddr()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.opt.Dialer != nil {
|
||||
return c.opt.Dialer(ctx, network, addr)
|
||||
}
|
||||
return net.DialTimeout("tcp", addr, c.opt.DialTimeout)
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) MasterAddr() (string, error) {
|
||||
addr, err := c.masterAddr()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
c.switchMaster(addr)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) masterAddr() (string, error) {
|
||||
c.mu.RLock()
|
||||
sentinel := c.sentinel
|
||||
c.mu.RUnlock()
|
||||
|
||||
if sentinel != nil {
|
||||
addr := c.getMasterAddr(sentinel)
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.sentinel != nil {
|
||||
addr := c.getMasterAddr(c.sentinel)
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
_ = c.closeSentinel()
|
||||
}
|
||||
|
||||
for i, sentinelAddr := range c.sentinelAddrs {
|
||||
sentinel := NewSentinelClient(&Options{
|
||||
Addr: sentinelAddr,
|
||||
Dialer: c.opt.Dialer,
|
||||
|
||||
Username: c.username,
|
||||
Password: c.password,
|
||||
|
||||
MaxRetries: c.opt.MaxRetries,
|
||||
|
||||
DialTimeout: c.opt.DialTimeout,
|
||||
ReadTimeout: c.opt.ReadTimeout,
|
||||
WriteTimeout: c.opt.WriteTimeout,
|
||||
|
||||
PoolSize: c.opt.PoolSize,
|
||||
PoolTimeout: c.opt.PoolTimeout,
|
||||
IdleTimeout: c.opt.IdleTimeout,
|
||||
IdleCheckFrequency: c.opt.IdleCheckFrequency,
|
||||
|
||||
TLSConfig: c.opt.TLSConfig,
|
||||
})
|
||||
|
||||
masterAddr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf("sentinel: GetMasterAddrByName master=%q failed: %s",
|
||||
c.masterName, err)
|
||||
_ = sentinel.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Push working sentinel to the top.
|
||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
|
||||
c.setSentinel(sentinel)
|
||||
|
||||
addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
return "", errors.New("redis: all sentinels are unreachable")
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) getMasterAddr(sentinel *SentinelClient) string {
|
||||
addr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf("sentinel: GetMasterAddrByName name=%q failed: %s",
|
||||
c.masterName, err)
|
||||
return ""
|
||||
}
|
||||
return net.JoinHostPort(addr[0], addr[1])
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) switchMaster(addr string) {
|
||||
c.mu.RLock()
|
||||
masterAddr := c._masterAddr
|
||||
c.mu.RUnlock()
|
||||
if masterAddr == addr {
|
||||
return
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c._masterAddr == addr {
|
||||
return
|
||||
}
|
||||
|
||||
internal.Logger.Printf("sentinel: new master=%q addr=%q",
|
||||
c.masterName, addr)
|
||||
_ = c.Pool().Filter(func(cn *pool.Conn) bool {
|
||||
return cn.RemoteAddr().String() != addr
|
||||
})
|
||||
c._masterAddr = addr
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) {
|
||||
if c.sentinel != nil {
|
||||
panic("not reached")
|
||||
}
|
||||
c.sentinel = sentinel
|
||||
c.discoverSentinels()
|
||||
|
||||
c.pubsub = sentinel.Subscribe("+switch-master")
|
||||
go c.listen(c.pubsub)
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) discoverSentinels() {
|
||||
sentinels, err := c.sentinel.Sentinels(c.masterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf("sentinel: Sentinels master=%q failed: %s", c.masterName, err)
|
||||
return
|
||||
}
|
||||
for _, sentinel := range sentinels {
|
||||
vals := sentinel.([]interface{})
|
||||
for i := 0; i < len(vals); i += 2 {
|
||||
key := vals[i].(string)
|
||||
if key == "name" {
|
||||
sentinelAddr := vals[i+1].(string)
|
||||
if !contains(c.sentinelAddrs, sentinelAddr) {
|
||||
internal.Logger.Printf("sentinel: discovered new sentinel=%q for master=%q",
|
||||
sentinelAddr, c.masterName)
|
||||
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) listen(pubsub *PubSub) {
|
||||
ch := pubsub.Channel()
|
||||
for {
|
||||
msg, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if msg.Channel == "+switch-master" {
|
||||
parts := strings.Split(msg.Payload, " ")
|
||||
if parts[0] != c.masterName {
|
||||
internal.Logger.Printf("sentinel: ignore addr for master=%q", parts[0])
|
||||
continue
|
||||
}
|
||||
addr := net.JoinHostPort(parts[3], parts[4])
|
||||
c.switchMaster(addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func contains(slice []string, str string) bool {
|
||||
for _, s := range slice {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,2 +1,3 @@
|
|||
*.rdb
|
||||
testdata/*/
|
||||
.idea/
|
|
@ -7,9 +7,18 @@ linters:
|
|||
disable:
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- godox
|
||||
- gosec
|
||||
- maligned
|
||||
- wsl
|
||||
- gomnd
|
||||
- goerr113
|
||||
- exhaustive
|
||||
- nestif
|
||||
- nlreturn
|
||||
- exhaustivestruct
|
||||
- wrapcheck
|
||||
- errorlint
|
4
vendor/github.com/go-redis/redis/v8/.prettierrc
generated
vendored
Normal file
4
vendor/github.com/go-redis/redis/v8/.prettierrc
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
semi: false
|
||||
singleQuote: true
|
||||
proseWrap: always
|
||||
printWidth: 100
|
20
vendor/github.com/go-redis/redis/v8/.travis.yml
generated
vendored
Normal file
20
vendor/github.com/go-redis/redis/v8/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
dist: xenial
|
||||
language: go
|
||||
|
||||
services:
|
||||
- redis-server
|
||||
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
go_import_path: github.com/go-redis/redis
|
||||
|
||||
before_install:
|
||||
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
|
||||
-b $(go env GOPATH)/bin v1.32.2
|
5
vendor/github.com/go-redis/redis/v8/CHANGELOG.md
generated
vendored
Normal file
5
vendor/github.com/go-redis/redis/v8/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Changelog
|
||||
|
||||
> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
|
||||
|
||||
See https://redis.uptrace.dev/changelog/
|
|
@ -3,6 +3,7 @@ all: testdeps
|
|||
go test ./... -short -race
|
||||
go test ./... -run=NONE -bench=. -benchmem
|
||||
env GOOS=linux GOARCH=386 go test ./...
|
||||
go vet
|
||||
golangci-lint run
|
||||
|
||||
testdeps: testdata/redis/src/redis-server
|
||||
|
@ -18,3 +19,9 @@ testdata/redis:
|
|||
|
||||
testdata/redis/src/redis-server: testdata/redis
|
||||
cd $< && make all
|
||||
|
||||
tag:
|
||||
git tag $(VERSION)
|
||||
git tag extra/rediscmd/$(VERSION)
|
||||
git tag extra/redisotel/$(VERSION)
|
||||
git tag extra/rediscensus/$(VERSION)
|
159
vendor/github.com/go-redis/redis/v8/README.md
generated
vendored
Normal file
159
vendor/github.com/go-redis/redis/v8/README.md
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
# Redis client for Golang
|
||||
|
||||
[](https://travis-ci.org/go-redis/redis)
|
||||
[](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
|
||||
[](https://redis.uptrace.dev/)
|
||||
[](https://discord.gg/rWtp5Aj)
|
||||
|
||||
> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
|
||||
|
||||
- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
|
||||
- [Documentation](https://redis.uptrace.dev)
|
||||
- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
|
||||
- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
|
||||
- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
|
||||
|
||||
## Ecosystem
|
||||
|
||||
- [Redis Mock](https://github.com/go-redis/redismock).
|
||||
- [Distributed Locks](https://github.com/bsm/redislock).
|
||||
- [Redis Cache](https://github.com/go-redis/cache).
|
||||
- [Rate limiting](https://github.com/go-redis/redis_rate).
|
||||
|
||||
## Features
|
||||
|
||||
- Redis 3 commands except QUIT, MONITOR, and SYNC.
|
||||
- Automatic connection pooling with
|
||||
[circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
|
||||
- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
|
||||
- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
|
||||
- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
|
||||
[TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
|
||||
- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
|
||||
- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
|
||||
- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
|
||||
- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
|
||||
- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
|
||||
without using cluster mode and Redis Sentinel.
|
||||
- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
|
||||
- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
|
||||
|
||||
## Installation
|
||||
|
||||
go-redis supports 2 last Go versions and requires a Go version with
|
||||
[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
|
||||
module:
|
||||
|
||||
```shell
|
||||
go mod init github.com/my/repo
|
||||
```
|
||||
|
||||
And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
|
||||
|
||||
```shell
|
||||
go get github.com/go-redis/redis/v8
|
||||
```
|
||||
|
||||
## Quickstart
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func ExampleClient() {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: "localhost:6379",
|
||||
Password: "", // no password set
|
||||
DB: 0, // use default DB
|
||||
})
|
||||
|
||||
err := rdb.Set(ctx, "key", "value", 0).Err()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
val, err := rdb.Get(ctx, "key").Result()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("key", val)
|
||||
|
||||
val2, err := rdb.Get(ctx, "key2").Result()
|
||||
if err == redis.Nil {
|
||||
fmt.Println("key2 does not exist")
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Println("key2", val2)
|
||||
}
|
||||
// Output: key value
|
||||
// key2 does not exist
|
||||
}
|
||||
```
|
||||
|
||||
## Look and feel
|
||||
|
||||
Some corner cases:
|
||||
|
||||
```go
|
||||
// SET key value EX 10 NX
|
||||
set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
|
||||
|
||||
// SET key value keepttl NX
|
||||
set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
|
||||
|
||||
// SORT list LIMIT 0 2 ASC
|
||||
vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
|
||||
|
||||
// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
|
||||
vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
|
||||
Min: "-inf",
|
||||
Max: "+inf",
|
||||
Offset: 0,
|
||||
Count: 2,
|
||||
}).Result()
|
||||
|
||||
// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
|
||||
vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
|
||||
Keys: []string{"zset1", "zset2"},
|
||||
Weights: []int64{2, 3}
|
||||
}).Result()
|
||||
|
||||
// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
|
||||
vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
|
||||
|
||||
// custom command
|
||||
res, err := rdb.Do(ctx, "set", "key", "value").Result()
|
||||
```
|
||||
## Run the test
|
||||
go-redis will start a redis-server and run the test cases.
|
||||
|
||||
The paths of redis-server bin file and redis config file are definded in `main_test.go`:
|
||||
```
|
||||
var (
|
||||
redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
|
||||
redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
|
||||
)
|
||||
```
|
||||
|
||||
For local testing, you can change the variables to refer to your local files, or create a soft link to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
|
||||
```
|
||||
ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
|
||||
cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
|
||||
```
|
||||
|
||||
Lastly, run:
|
||||
```
|
||||
go test
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
|
||||
- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
|
||||
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
|
||||
- [Golang message task queue](https://github.com/vmihailenco/taskq)
|
File diff suppressed because it is too large
Load diff
25
vendor/github.com/go-redis/redis/v8/cluster_commands.go
generated
vendored
Normal file
25
vendor/github.com/go-redis/redis/v8/cluster_commands.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
|
||||
cmd := NewIntCmd(ctx, "dbsize")
|
||||
var size int64
|
||||
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
|
||||
n, err := master.DBSize(ctx).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.AddInt64(&size, n)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
return cmd
|
||||
}
|
||||
cmd.val = size
|
||||
return cmd
|
||||
}
|
File diff suppressed because it is too large
Load diff
2790
vendor/github.com/go-redis/redis/v8/commands.go
generated
vendored
Normal file
2790
vendor/github.com/go-redis/redis/v8/commands.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
|
@ -6,8 +6,8 @@ import (
|
|||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/go-redis/redis/v7/internal/proto"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
var ErrClosed = pool.ErrClosed
|
||||
|
@ -24,15 +24,16 @@ type Error interface {
|
|||
|
||||
var _ Error = proto.RedisError("")
|
||||
|
||||
func isRetryableError(err error, retryTimeout bool) bool {
|
||||
func shouldRetry(err error, retryTimeout bool) bool {
|
||||
switch err {
|
||||
case io.EOF, io.ErrUnexpectedEOF:
|
||||
return true
|
||||
case nil, context.Canceled, context.DeadlineExceeded:
|
||||
return false
|
||||
case io.EOF:
|
||||
return true
|
||||
}
|
||||
if netErr, ok := err.(net.Error); ok {
|
||||
if netErr.Timeout() {
|
||||
|
||||
if v, ok := err.(timeoutError); ok {
|
||||
if v.Timeout() {
|
||||
return retryTimeout
|
||||
}
|
||||
return true
|
||||
|
@ -51,6 +52,10 @@ func isRetryableError(err error, retryTimeout bool) bool {
|
|||
if strings.HasPrefix(s, "CLUSTERDOWN ") {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(s, "TRYAGAIN ") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -60,19 +65,25 @@ func isRedisError(err error) bool {
|
|||
}
|
||||
|
||||
func isBadConn(err error, allowTimeout bool) bool {
|
||||
if err == nil {
|
||||
switch err {
|
||||
case nil:
|
||||
return false
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return true
|
||||
}
|
||||
|
||||
if isRedisError(err) {
|
||||
// Close connections in read only state in case domain addr is used
|
||||
// and domain resolves to a different Redis Server. See #790.
|
||||
return isReadOnlyError(err)
|
||||
}
|
||||
|
||||
if allowTimeout {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||
return false
|
||||
return !netErr.Temporary()
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -106,3 +117,9 @@ func isLoadingError(err error) bool {
|
|||
func isReadOnlyError(err error) bool {
|
||||
return strings.HasPrefix(err.Error(), "READONLY ")
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type timeoutError interface {
|
||||
Timeout() bool
|
||||
}
|
11
vendor/github.com/go-redis/redis/v8/go.mod
generated
vendored
Normal file
11
vendor/github.com/go-redis/redis/v8/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
module github.com/go-redis/redis/v8
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
|
||||
github.com/onsi/ginkgo v1.15.0
|
||||
github.com/onsi/gomega v1.10.5
|
||||
go.opentelemetry.io/otel v0.16.0
|
||||
)
|
97
vendor/github.com/go-redis/redis/v8/go.sum
generated
vendored
Normal file
97
vendor/github.com/go-redis/redis/v8/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
|
||||
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U=
|
||||
github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ=
|
||||
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opentelemetry.io/otel v0.16.0 h1:uIWEbdeb4vpKPGITLsRVUS44L5oDbDUCZxn8lkxhmgw=
|
||||
go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
56
vendor/github.com/go-redis/redis/v8/internal/arg.go
generated
vendored
Normal file
56
vendor/github.com/go-redis/redis/v8/internal/arg.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func AppendArg(b []byte, v interface{}) []byte {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return append(b, "<nil>"...)
|
||||
case string:
|
||||
return appendUTF8String(b, Bytes(v))
|
||||
case []byte:
|
||||
return appendUTF8String(b, v)
|
||||
case int:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int8:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int16:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int32:
|
||||
return strconv.AppendInt(b, int64(v), 10)
|
||||
case int64:
|
||||
return strconv.AppendInt(b, v, 10)
|
||||
case uint:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint8:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint16:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint32:
|
||||
return strconv.AppendUint(b, uint64(v), 10)
|
||||
case uint64:
|
||||
return strconv.AppendUint(b, v, 10)
|
||||
case float32:
|
||||
return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
|
||||
case float64:
|
||||
return strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
case bool:
|
||||
if v {
|
||||
return append(b, "true"...)
|
||||
}
|
||||
return append(b, "false"...)
|
||||
case time.Time:
|
||||
return v.AppendFormat(b, time.RFC3339Nano)
|
||||
default:
|
||||
return append(b, fmt.Sprint(v)...)
|
||||
}
|
||||
}
|
||||
|
||||
func appendUTF8String(dst []byte, src []byte) []byte {
|
||||
dst = append(dst, src...)
|
||||
return dst
|
||||
}
|
|
@ -1,8 +1,9 @@
|
|||
package hashtag
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
const slotNumber = 16384
|
151
vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
generated
vendored
Normal file
151
vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package hscan
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// decoderFunc represents decoding functions for default built-in types.
|
||||
type decoderFunc func(reflect.Value, string) error
|
||||
|
||||
var (
|
||||
// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
|
||||
decoders = []decoderFunc{
|
||||
reflect.Bool: decodeBool,
|
||||
reflect.Int: decodeInt,
|
||||
reflect.Int8: decodeInt,
|
||||
reflect.Int16: decodeInt,
|
||||
reflect.Int32: decodeInt,
|
||||
reflect.Int64: decodeInt,
|
||||
reflect.Uint: decodeUint,
|
||||
reflect.Uint8: decodeUint,
|
||||
reflect.Uint16: decodeUint,
|
||||
reflect.Uint32: decodeUint,
|
||||
reflect.Uint64: decodeUint,
|
||||
reflect.Float32: decodeFloat,
|
||||
reflect.Float64: decodeFloat,
|
||||
reflect.Complex64: decodeUnsupported,
|
||||
reflect.Complex128: decodeUnsupported,
|
||||
reflect.Array: decodeUnsupported,
|
||||
reflect.Chan: decodeUnsupported,
|
||||
reflect.Func: decodeUnsupported,
|
||||
reflect.Interface: decodeUnsupported,
|
||||
reflect.Map: decodeUnsupported,
|
||||
reflect.Ptr: decodeUnsupported,
|
||||
reflect.Slice: decodeSlice,
|
||||
reflect.String: decodeString,
|
||||
reflect.Struct: decodeUnsupported,
|
||||
reflect.UnsafePointer: decodeUnsupported,
|
||||
}
|
||||
|
||||
// Global map of struct field specs that is populated once for every new
|
||||
// struct type that is scanned. This caches the field types and the corresponding
|
||||
// decoder functions to avoid iterating through struct fields on subsequent scans.
|
||||
globalStructMap = newStructMap()
|
||||
)
|
||||
|
||||
func Struct(dst interface{}) (StructValue, error) {
|
||||
v := reflect.ValueOf(dst)
|
||||
|
||||
// The dstination to scan into should be a struct pointer.
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
|
||||
}
|
||||
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
|
||||
}
|
||||
|
||||
return StructValue{
|
||||
spec: globalStructMap.get(v.Type()),
|
||||
value: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Scan scans the results from a key-value Redis map result set to a destination struct.
|
||||
// The Redis keys are matched to the struct's field with the `redis` tag.
|
||||
func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
|
||||
if len(keys) != len(vals) {
|
||||
return errors.New("args should have the same number of keys and vals")
|
||||
}
|
||||
|
||||
strct, err := Struct(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate through the (key, value) sequence.
|
||||
for i := 0; i < len(vals); i++ {
|
||||
key, ok := keys[i].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
val, ok := vals[i].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := strct.Scan(key, val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeBool(f reflect.Value, s string) error {
|
||||
b, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeInt(f reflect.Value, s string) error {
|
||||
v, err := strconv.ParseInt(s, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetInt(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUint(f reflect.Value, s string) error {
|
||||
v, err := strconv.ParseUint(s, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetUint(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeFloat(f reflect.Value, s string) error {
|
||||
v, err := strconv.ParseFloat(s, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetFloat(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeString(f reflect.Value, s string) error {
|
||||
f.SetString(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeSlice(f reflect.Value, s string) error {
|
||||
// []byte slice ([]uint8).
|
||||
if f.Type().Elem().Kind() == reflect.Uint8 {
|
||||
f.SetBytes([]byte(s))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUnsupported(v reflect.Value, s string) error {
|
||||
return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
|
||||
}
|
87
vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
generated
vendored
Normal file
87
vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
package hscan
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// structMap contains the map of struct fields for target structs
|
||||
// indexed by the struct type.
|
||||
type structMap struct {
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
func newStructMap() *structMap {
|
||||
return new(structMap)
|
||||
}
|
||||
|
||||
func (s *structMap) get(t reflect.Type) *structSpec {
|
||||
if v, ok := s.m.Load(t); ok {
|
||||
return v.(*structSpec)
|
||||
}
|
||||
|
||||
spec := newStructSpec(t, "redis")
|
||||
s.m.Store(t, spec)
|
||||
return spec
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// structSpec contains the list of all fields in a target struct.
|
||||
type structSpec struct {
|
||||
m map[string]*structField
|
||||
}
|
||||
|
||||
func (s *structSpec) set(tag string, sf *structField) {
|
||||
s.m[tag] = sf
|
||||
}
|
||||
|
||||
func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
|
||||
out := &structSpec{
|
||||
m: make(map[string]*structField),
|
||||
}
|
||||
|
||||
num := t.NumField()
|
||||
for i := 0; i < num; i++ {
|
||||
f := t.Field(i)
|
||||
|
||||
tag := f.Tag.Get(fieldTag)
|
||||
if tag == "" || tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
tag = strings.Split(tag, ",")[0]
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the built-in decoder.
|
||||
out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// structField represents a single field in a target struct.
|
||||
type structField struct {
|
||||
index int
|
||||
fn decoderFunc
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type StructValue struct {
|
||||
spec *structSpec
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
func (s StructValue) Scan(key string, value string) error {
|
||||
field, ok := s.spec.m[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return field.fn(s.value.Field(field.index), value)
|
||||
}
|
33
vendor/github.com/go-redis/redis/v8/internal/instruments.go
generated
vendored
Normal file
33
vendor/github.com/go-redis/redis/v8/internal/instruments.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
var (
|
||||
// WritesCounter is a count of write commands performed.
|
||||
WritesCounter metric.Int64Counter
|
||||
// NewConnectionsCounter is a count of new connections.
|
||||
NewConnectionsCounter metric.Int64Counter
|
||||
)
|
||||
|
||||
func init() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
|
||||
}
|
||||
}()
|
||||
|
||||
meter := metric.Must(otel.Meter("github.com/go-redis/redis"))
|
||||
|
||||
WritesCounter = meter.NewInt64Counter("redis.writes",
|
||||
metric.WithDescription("the number of writes initiated"),
|
||||
)
|
||||
|
||||
NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
|
||||
metric.WithDescription("the number of connections created"),
|
||||
)
|
||||
}
|
29
vendor/github.com/go-redis/redis/v8/internal/internal.go
generated
vendored
Normal file
29
vendor/github.com/go-redis/redis/v8/internal/internal.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
|
||||
if retry < 0 {
|
||||
panic("not reached")
|
||||
}
|
||||
if minBackoff == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
d := minBackoff << uint(retry)
|
||||
if d < minBackoff {
|
||||
return maxBackoff
|
||||
}
|
||||
|
||||
d = minBackoff + time.Duration(rand.Int63n(int64(d)))
|
||||
|
||||
if d > maxBackoff || d < minBackoff {
|
||||
d = maxBackoff
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
24
vendor/github.com/go-redis/redis/v8/internal/log.go
generated
vendored
Normal file
24
vendor/github.com/go-redis/redis/v8/internal/log.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Logging interface {
|
||||
Printf(ctx context.Context, format string, v ...interface{})
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
|
||||
_ = l.log.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
var Logger Logging = &logger{
|
||||
log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
|
||||
}
|
|
@ -1,26 +1,30 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/proto"
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var noDeadline = time.Time{}
|
||||
|
||||
type Conn struct {
|
||||
usedAt int64 // atomic
|
||||
netConn net.Conn
|
||||
|
||||
rd *proto.Reader
|
||||
bw *bufio.Writer
|
||||
wr *proto.Writer
|
||||
|
||||
Inited bool
|
||||
pooled bool
|
||||
createdAt time.Time
|
||||
usedAt int64 // atomic
|
||||
}
|
||||
|
||||
func NewConn(netConn net.Conn) *Conn {
|
||||
|
@ -29,7 +33,8 @@ func NewConn(netConn net.Conn) *Conn {
|
|||
createdAt: time.Now(),
|
||||
}
|
||||
cn.rd = proto.NewReader(netConn)
|
||||
cn.wr = proto.NewWriter(netConn)
|
||||
cn.bw = bufio.NewWriter(netConn)
|
||||
cn.wr = proto.NewWriter(cn.bw)
|
||||
cn.SetUsedAt(time.Now())
|
||||
return cn
|
||||
}
|
||||
|
@ -46,7 +51,7 @@ func (cn *Conn) SetUsedAt(tm time.Time) {
|
|||
func (cn *Conn) SetNetConn(netConn net.Conn) {
|
||||
cn.netConn = netConn
|
||||
cn.rd.Reset(netConn)
|
||||
cn.wr.Reset(netConn)
|
||||
cn.bw.Reset(netConn)
|
||||
}
|
||||
|
||||
func (cn *Conn) Write(b []byte) (int, error) {
|
||||
|
@ -54,35 +59,48 @@ func (cn *Conn) Write(b []byte) (int, error) {
|
|||
}
|
||||
|
||||
func (cn *Conn) RemoteAddr() net.Addr {
|
||||
return cn.netConn.RemoteAddr()
|
||||
if cn.netConn != nil {
|
||||
return cn.netConn.RemoteAddr()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
|
||||
err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn(cn.rd)
|
||||
return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error {
|
||||
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
|
||||
return internal.RecordError(ctx, span, err)
|
||||
}
|
||||
if err := fn(cn.rd); err != nil {
|
||||
return internal.RecordError(ctx, span, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (cn *Conn) WithWriter(
|
||||
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
|
||||
) error {
|
||||
err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error {
|
||||
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
|
||||
return internal.RecordError(ctx, span, err)
|
||||
}
|
||||
|
||||
if cn.wr.Buffered() > 0 {
|
||||
cn.wr.Reset(cn.netConn)
|
||||
}
|
||||
if cn.bw.Buffered() > 0 {
|
||||
cn.bw.Reset(cn.netConn)
|
||||
}
|
||||
|
||||
err = fn(cn.wr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fn(cn.wr); err != nil {
|
||||
return internal.RecordError(ctx, span, err)
|
||||
}
|
||||
|
||||
return cn.wr.Flush()
|
||||
if err := cn.bw.Flush(); err != nil {
|
||||
return internal.RecordError(ctx, span, err)
|
||||
}
|
||||
|
||||
internal.WritesCounter.Add(ctx, 1)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (cn *Conn) Close() error {
|
|
@ -8,11 +8,13 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal"
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("redis: client is closed")
|
||||
var ErrPoolTimeout = errors.New("redis: connection pool timeout")
|
||||
var (
|
||||
ErrClosed = errors.New("redis: client is closed")
|
||||
ErrPoolTimeout = errors.New("redis: connection pool timeout")
|
||||
)
|
||||
|
||||
var timers = sync.Pool{
|
||||
New: func() interface{} {
|
||||
|
@ -38,8 +40,8 @@ type Pooler interface {
|
|||
CloseConn(*Conn) error
|
||||
|
||||
Get(context.Context) (*Conn, error)
|
||||
Put(*Conn)
|
||||
Remove(*Conn, error)
|
||||
Put(context.Context, *Conn)
|
||||
Remove(context.Context, *Conn, error)
|
||||
|
||||
Len() int
|
||||
IdleLen() int
|
||||
|
@ -60,13 +62,16 @@ type Options struct {
|
|||
IdleCheckFrequency time.Duration
|
||||
}
|
||||
|
||||
type lastDialErrorWrap struct {
|
||||
err error
|
||||
}
|
||||
|
||||
type ConnPool struct {
|
||||
opt *Options
|
||||
|
||||
dialErrorsNum uint32 // atomic
|
||||
|
||||
lastDialErrorMu sync.RWMutex
|
||||
lastDialError error
|
||||
lastDialError atomic.Value
|
||||
|
||||
queue chan struct{}
|
||||
|
||||
|
@ -158,6 +163,7 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
|
|||
}
|
||||
}
|
||||
p.connsMu.Unlock()
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
|
@ -179,6 +185,7 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
internal.NewConnectionsCounter.Add(ctx, 1)
|
||||
cn := NewConn(netConn)
|
||||
cn.pooled = pooled
|
||||
return cn, nil
|
||||
|
@ -204,16 +211,15 @@ func (p *ConnPool) tryDial() {
|
|||
}
|
||||
|
||||
func (p *ConnPool) setLastDialError(err error) {
|
||||
p.lastDialErrorMu.Lock()
|
||||
p.lastDialError = err
|
||||
p.lastDialErrorMu.Unlock()
|
||||
p.lastDialError.Store(&lastDialErrorWrap{err: err})
|
||||
}
|
||||
|
||||
func (p *ConnPool) getLastDialError() error {
|
||||
p.lastDialErrorMu.RLock()
|
||||
err := p.lastDialError
|
||||
p.lastDialErrorMu.RUnlock()
|
||||
return err
|
||||
err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
|
||||
if err != nil {
|
||||
return err.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns existed connection from the pool or creates a new one.
|
||||
|
@ -313,15 +319,15 @@ func (p *ConnPool) popIdle() *Conn {
|
|||
return cn
|
||||
}
|
||||
|
||||
func (p *ConnPool) Put(cn *Conn) {
|
||||
func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
|
||||
if cn.rd.Buffered() > 0 {
|
||||
internal.Logger.Printf("Conn has unread data")
|
||||
p.Remove(cn, BadConnError{})
|
||||
internal.Logger.Printf(ctx, "Conn has unread data")
|
||||
p.Remove(ctx, cn, BadConnError{})
|
||||
return
|
||||
}
|
||||
|
||||
if !cn.pooled {
|
||||
p.Remove(cn, nil)
|
||||
p.Remove(ctx, cn, nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -332,7 +338,7 @@ func (p *ConnPool) Put(cn *Conn) {
|
|||
p.freeTurn()
|
||||
}
|
||||
|
||||
func (p *ConnPool) Remove(cn *Conn, reason error) {
|
||||
func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
|
||||
p.removeConnWithLock(cn)
|
||||
p.freeTurn()
|
||||
_ = p.closeConn(cn)
|
||||
|
@ -403,8 +409,10 @@ func (p *ConnPool) closed() bool {
|
|||
}
|
||||
|
||||
func (p *ConnPool) Filter(fn func(*Conn) bool) error {
|
||||
var firstErr error
|
||||
p.connsMu.Lock()
|
||||
defer p.connsMu.Unlock()
|
||||
|
||||
var firstErr error
|
||||
for _, cn := range p.conns {
|
||||
if fn(cn) {
|
||||
if err := p.closeConn(cn); err != nil && firstErr == nil {
|
||||
|
@ -412,7 +420,6 @@ func (p *ConnPool) Filter(fn func(*Conn) bool) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
p.connsMu.Unlock()
|
||||
return firstErr
|
||||
}
|
||||
|
||||
|
@ -453,7 +460,7 @@ func (p *ConnPool) reaper(frequency time.Duration) {
|
|||
}
|
||||
_, err := p.ReapStaleConns()
|
||||
if err != nil {
|
||||
internal.Logger.Printf("ReapStaleConns failed: %s", err)
|
||||
internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
|
||||
continue
|
||||
}
|
||||
case <-p.closedCh:
|
||||
|
@ -470,6 +477,7 @@ func (p *ConnPool) ReapStaleConns() (int, error) {
|
|||
p.connsMu.Lock()
|
||||
cn := p.reapStaleConn()
|
||||
p.connsMu.Unlock()
|
||||
|
||||
p.freeTurn()
|
||||
|
||||
if cn != nil {
|
58
vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
generated
vendored
Normal file
58
vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package pool
|
||||
|
||||
import "context"
|
||||
|
||||
type SingleConnPool struct {
|
||||
pool Pooler
|
||||
cn *Conn
|
||||
stickyErr error
|
||||
}
|
||||
|
||||
var _ Pooler = (*SingleConnPool)(nil)
|
||||
|
||||
func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
|
||||
return &SingleConnPool{
|
||||
pool: pool,
|
||||
cn: cn,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
|
||||
return p.pool.NewConn(ctx)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) CloseConn(cn *Conn) error {
|
||||
return p.pool.CloseConn(cn)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
|
||||
if p.stickyErr != nil {
|
||||
return nil, p.stickyErr
|
||||
}
|
||||
return p.cn, nil
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
|
||||
|
||||
func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
|
||||
p.cn = nil
|
||||
p.stickyErr = reason
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Close() error {
|
||||
p.cn = nil
|
||||
p.stickyErr = ErrClosed
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Len() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) IdleLen() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Stats() *Stats {
|
||||
return &Stats{}
|
||||
}
|
|
@ -2,6 +2,7 @@ package pool
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
@ -30,9 +31,11 @@ func (e BadConnError) Unwrap() error {
|
|||
return e.wrapped
|
||||
}
|
||||
|
||||
type SingleConnPool struct {
|
||||
pool Pooler
|
||||
level int32 // atomic
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type StickyConnPool struct {
|
||||
pool Pooler
|
||||
shared int32 // atomic
|
||||
|
||||
state uint32 // atomic
|
||||
ch chan *Conn
|
||||
|
@ -40,37 +43,29 @@ type SingleConnPool struct {
|
|||
_badConnError atomic.Value
|
||||
}
|
||||
|
||||
var _ Pooler = (*SingleConnPool)(nil)
|
||||
var _ Pooler = (*StickyConnPool)(nil)
|
||||
|
||||
func NewSingleConnPool(pool Pooler) *SingleConnPool {
|
||||
p, ok := pool.(*SingleConnPool)
|
||||
func NewStickyConnPool(pool Pooler) *StickyConnPool {
|
||||
p, ok := pool.(*StickyConnPool)
|
||||
if !ok {
|
||||
p = &SingleConnPool{
|
||||
p = &StickyConnPool{
|
||||
pool: pool,
|
||||
ch: make(chan *Conn, 1),
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&p.level, 1)
|
||||
atomic.AddInt32(&p.shared, 1)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) SetConn(cn *Conn) {
|
||||
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
|
||||
p.ch <- cn
|
||||
} else {
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
|
||||
func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
|
||||
return p.pool.NewConn(ctx)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) CloseConn(cn *Conn) error {
|
||||
func (p *StickyConnPool) CloseConn(cn *Conn) error {
|
||||
return p.pool.CloseConn(cn)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
|
||||
func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
|
||||
// In worst case this races with Close which is not a very common operation.
|
||||
for i := 0; i < 1000; i++ {
|
||||
switch atomic.LoadUint32(&p.state) {
|
||||
|
@ -82,7 +77,7 @@ func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
|
|||
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
|
||||
return cn, nil
|
||||
}
|
||||
p.pool.Remove(cn, ErrClosed)
|
||||
p.pool.Remove(ctx, cn, ErrClosed)
|
||||
case stateInited:
|
||||
if err := p.badConnError(); err != nil {
|
||||
return nil, err
|
||||
|
@ -98,60 +93,38 @@ func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
|
|||
panic("not reached")
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("redis: SingleConnPool.Get: infinite loop")
|
||||
return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Put(cn *Conn) {
|
||||
func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
p.freeConn(cn)
|
||||
p.freeConn(ctx, cn)
|
||||
}
|
||||
}()
|
||||
p.ch <- cn
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) freeConn(cn *Conn) {
|
||||
func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
|
||||
if err := p.badConnError(); err != nil {
|
||||
p.pool.Remove(cn, err)
|
||||
p.pool.Remove(ctx, cn, err)
|
||||
} else {
|
||||
p.pool.Put(cn)
|
||||
p.pool.Put(ctx, cn)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Remove(cn *Conn, reason error) {
|
||||
func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
p.pool.Remove(cn, ErrClosed)
|
||||
p.pool.Remove(ctx, cn, ErrClosed)
|
||||
}
|
||||
}()
|
||||
p._badConnError.Store(BadConnError{wrapped: reason})
|
||||
p.ch <- cn
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Len() int {
|
||||
switch atomic.LoadUint32(&p.state) {
|
||||
case stateDefault:
|
||||
return 0
|
||||
case stateInited:
|
||||
return 1
|
||||
case stateClosed:
|
||||
return 0
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) IdleLen() int {
|
||||
return len(p.ch)
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Stats() *Stats {
|
||||
return &Stats{}
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Close() error {
|
||||
level := atomic.AddInt32(&p.level, -1)
|
||||
if level > 0 {
|
||||
func (p *StickyConnPool) Close() error {
|
||||
if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -164,16 +137,16 @@ func (p *SingleConnPool) Close() error {
|
|||
close(p.ch)
|
||||
cn, ok := <-p.ch
|
||||
if ok {
|
||||
p.freeConn(cn)
|
||||
p.freeConn(context.TODO(), cn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("redis: SingleConnPool.Close: infinite loop")
|
||||
return errors.New("redis: StickyConnPool.Close: infinite loop")
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) Reset() error {
|
||||
func (p *StickyConnPool) Reset(ctx context.Context) error {
|
||||
if p.badConnError() == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -183,21 +156,21 @@ func (p *SingleConnPool) Reset() error {
|
|||
if !ok {
|
||||
return ErrClosed
|
||||
}
|
||||
p.pool.Remove(cn, ErrClosed)
|
||||
p.pool.Remove(ctx, cn, ErrClosed)
|
||||
p._badConnError.Store(BadConnError{wrapped: nil})
|
||||
default:
|
||||
return fmt.Errorf("redis: SingleConnPool does not have a Conn")
|
||||
return errors.New("redis: StickyConnPool does not have a Conn")
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
|
||||
state := atomic.LoadUint32(&p.state)
|
||||
return fmt.Errorf("redis: invalid SingleConnPool state: %d", state)
|
||||
return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SingleConnPool) badConnError() error {
|
||||
func (p *StickyConnPool) badConnError() error {
|
||||
if v := p._badConnError.Load(); v != nil {
|
||||
err := v.(BadConnError)
|
||||
if err.wrapped != nil {
|
||||
|
@ -206,3 +179,24 @@ func (p *SingleConnPool) badConnError() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Len() int {
|
||||
switch atomic.LoadUint32(&p.state) {
|
||||
case stateDefault:
|
||||
return 0
|
||||
case stateInited:
|
||||
return 1
|
||||
case stateClosed:
|
||||
return 0
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) IdleLen() int {
|
||||
return len(p.ch)
|
||||
}
|
||||
|
||||
func (p *StickyConnPool) Stats() *Stats {
|
||||
return &Stats{}
|
||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/util"
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -71,13 +71,25 @@ func (r *Reader) ReadLine() ([]byte, error) {
|
|||
func (r *Reader) readLine() ([]byte, error) {
|
||||
b, err := r.rd.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if err != bufio.ErrBufferFull {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
full := make([]byte, len(b))
|
||||
copy(full, b)
|
||||
|
||||
b, err = r.rd.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
full = append(full, b...)
|
||||
b = full
|
||||
}
|
||||
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
|
||||
return nil, fmt.Errorf("redis: invalid reply: %q", b)
|
||||
}
|
||||
b = b[:len(b)-2]
|
||||
return b, nil
|
||||
return b[:len(b)-2], nil
|
||||
}
|
||||
|
||||
func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
|
||||
|
@ -181,7 +193,7 @@ func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadArrayLen() (int64, error) {
|
||||
func (r *Reader) ReadArrayLen() (int, error) {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -190,7 +202,11 @@ func (r *Reader) ReadArrayLen() (int64, error) {
|
|||
case ErrorReply:
|
||||
return 0, ParseErrorReply(line)
|
||||
case ArrayReply:
|
||||
return parseArrayLen(line)
|
||||
n, err := parseArrayLen(line)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(n), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
|
||||
}
|
||||
|
@ -216,7 +232,8 @@ func (r *Reader) ReadScanReply() ([]string, uint64, error) {
|
|||
}
|
||||
|
||||
keys := make([]string, n)
|
||||
for i := int64(0); i < n; i++ {
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
key, err := r.ReadString()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
|
@ -4,10 +4,13 @@ import (
|
|||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/util"
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
)
|
||||
|
||||
// Scan parses bytes `b` to `v` with appropriate type.
|
||||
// nolint: gocyclo
|
||||
func Scan(b []byte, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
|
@ -99,6 +102,10 @@ func Scan(b []byte, v interface{}) error {
|
|||
case *bool:
|
||||
*v = len(b) == 1 && b[0] == '1'
|
||||
return nil
|
||||
case *time.Time:
|
||||
var err error
|
||||
*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
|
||||
return err
|
||||
case encoding.BinaryUnmarshaler:
|
||||
return v.UnmarshalBinary(b)
|
||||
default:
|
||||
|
@ -124,7 +131,7 @@ func ScanSlice(data []string, slice interface{}) error {
|
|||
for i, s := range data {
|
||||
elem := next()
|
||||
if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
|
||||
err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
|
||||
err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
|
||||
return err
|
||||
}
|
||||
}
|
|
@ -1,26 +1,32 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/util"
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
io.ByteWriter
|
||||
// io.StringWriter
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
wr *bufio.Writer
|
||||
writer
|
||||
|
||||
lenBuf []byte
|
||||
numBuf []byte
|
||||
}
|
||||
|
||||
func NewWriter(wr io.Writer) *Writer {
|
||||
func NewWriter(wr writer) *Writer {
|
||||
return &Writer{
|
||||
wr: bufio.NewWriter(wr),
|
||||
writer: wr,
|
||||
|
||||
lenBuf: make([]byte, 64),
|
||||
numBuf: make([]byte, 64),
|
||||
|
@ -28,19 +34,16 @@ func NewWriter(wr io.Writer) *Writer {
|
|||
}
|
||||
|
||||
func (w *Writer) WriteArgs(args []interface{}) error {
|
||||
err := w.wr.WriteByte(ArrayReply)
|
||||
if err != nil {
|
||||
if err := w.WriteByte(ArrayReply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.writeLen(len(args))
|
||||
if err != nil {
|
||||
if err := w.writeLen(len(args)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, arg := range args {
|
||||
err := w.writeArg(arg)
|
||||
if err != nil {
|
||||
if err := w.WriteArg(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -51,11 +54,11 @@ func (w *Writer) WriteArgs(args []interface{}) error {
|
|||
func (w *Writer) writeLen(n int) error {
|
||||
w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
|
||||
w.lenBuf = append(w.lenBuf, '\r', '\n')
|
||||
_, err := w.wr.Write(w.lenBuf)
|
||||
_, err := w.Write(w.lenBuf)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Writer) writeArg(v interface{}) error {
|
||||
func (w *Writer) WriteArg(v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return w.string("")
|
||||
|
@ -93,7 +96,8 @@ func (w *Writer) writeArg(v interface{}) error {
|
|||
}
|
||||
return w.int(0)
|
||||
case time.Time:
|
||||
return w.string(v.Format(time.RFC3339Nano))
|
||||
w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
|
||||
return w.bytes(w.numBuf)
|
||||
case encoding.BinaryMarshaler:
|
||||
b, err := v.MarshalBinary()
|
||||
if err != nil {
|
||||
|
@ -107,18 +111,15 @@ func (w *Writer) writeArg(v interface{}) error {
|
|||
}
|
||||
|
||||
func (w *Writer) bytes(b []byte) error {
|
||||
err := w.wr.WriteByte(StringReply)
|
||||
if err != nil {
|
||||
if err := w.WriteByte(StringReply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.writeLen(len(b))
|
||||
if err != nil {
|
||||
if err := w.writeLen(len(b)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.wr.Write(b)
|
||||
if err != nil {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -145,21 +146,8 @@ func (w *Writer) float(f float64) error {
|
|||
}
|
||||
|
||||
func (w *Writer) crlf() error {
|
||||
err := w.wr.WriteByte('\r')
|
||||
if err != nil {
|
||||
if err := w.WriteByte('\r'); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.wr.WriteByte('\n')
|
||||
}
|
||||
|
||||
func (w *Writer) Buffered() int {
|
||||
return w.wr.Buffered()
|
||||
}
|
||||
|
||||
func (w *Writer) Reset(wr io.Writer) {
|
||||
w.wr.Reset(wr)
|
||||
}
|
||||
|
||||
func (w *Writer) Flush() error {
|
||||
return w.wr.Flush()
|
||||
return w.WriteByte('\n')
|
||||
}
|
45
vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
generated
vendored
Normal file
45
vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package rand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Int returns a non-negative pseudo-random int.
|
||||
func Int() int { return pseudo.Int() }
|
||||
|
||||
// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
|
||||
// It panics if n <= 0.
|
||||
func Intn(n int) int { return pseudo.Intn(n) }
|
||||
|
||||
// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
|
||||
// It panics if n <= 0.
|
||||
func Int63n(n int64) int64 { return pseudo.Int63n(n) }
|
||||
|
||||
// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
|
||||
func Perm(n int) []int { return pseudo.Perm(n) }
|
||||
|
||||
// Seed uses the provided seed value to initialize the default Source to a
|
||||
// deterministic state. If Seed is not called, the generator behaves as if
|
||||
// seeded by Seed(1).
|
||||
func Seed(n int64) { pseudo.Seed(n) }
|
||||
|
||||
var pseudo = rand.New(&source{src: rand.NewSource(1)})
|
||||
|
||||
type source struct {
|
||||
src rand.Source
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (s *source) Int63() int64 {
|
||||
s.mu.Lock()
|
||||
n := s.src.Int63()
|
||||
s.mu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *source) Seed(seed int64) {
|
||||
s.mu.Lock()
|
||||
s.src.Seed(seed)
|
||||
s.mu.Unlock()
|
||||
}
|
11
vendor/github.com/go-redis/redis/v8/internal/safe.go
generated
vendored
Normal file
11
vendor/github.com/go-redis/redis/v8/internal/safe.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
func String(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func Bytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
20
vendor/github.com/go-redis/redis/v8/internal/unsafe.go
generated
vendored
Normal file
20
vendor/github.com/go-redis/redis/v8/internal/unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// String converts byte slice to string.
|
||||
func String(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// Bytes converts string to byte slice.
|
||||
func Bytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(
|
||||
&struct {
|
||||
string
|
||||
Cap int
|
||||
}{s, len(s)},
|
||||
))
|
||||
}
|
73
vendor/github.com/go-redis/redis/v8/internal/util.go
generated
vendored
Normal file
73
vendor/github.com/go-redis/redis/v8/internal/util.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
"github.com/go-redis/redis/v8/internal/util"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func Sleep(ctx context.Context, dur time.Duration) error {
|
||||
return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func ToLower(s string) string {
|
||||
if isLower(s) {
|
||||
return s
|
||||
}
|
||||
|
||||
b := make([]byte, len(s))
|
||||
for i := range b {
|
||||
c := s[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
}
|
||||
b[i] = c
|
||||
}
|
||||
return util.BytesToString(b)
|
||||
}
|
||||
|
||||
func isLower(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var tracer = otel.Tracer("github.com/go-redis/redis")
|
||||
|
||||
func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
|
||||
if span := trace.SpanFromContext(ctx); !span.IsRecording() {
|
||||
return fn(ctx, span)
|
||||
}
|
||||
|
||||
ctx, span := tracer.Start(ctx, name)
|
||||
defer span.End()
|
||||
|
||||
return fn(ctx, span)
|
||||
}
|
||||
|
||||
func RecordError(ctx context.Context, span trace.Span, err error) error {
|
||||
if err != proto.Nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
@ -21,7 +22,7 @@ func (it *ScanIterator) Err() error {
|
|||
}
|
||||
|
||||
// Next advances the cursor and returns true if more values can be read.
|
||||
func (it *ScanIterator) Next() bool {
|
||||
func (it *ScanIterator) Next(ctx context.Context) bool {
|
||||
it.mu.Lock()
|
||||
defer it.mu.Unlock()
|
||||
|
||||
|
@ -43,13 +44,14 @@ func (it *ScanIterator) Next() bool {
|
|||
}
|
||||
|
||||
// Fetch next page.
|
||||
if it.cmd.args[0] == "scan" {
|
||||
switch it.cmd.args[0] {
|
||||
case "scan", "qscan":
|
||||
it.cmd.args[1] = it.cmd.cursor
|
||||
} else {
|
||||
default:
|
||||
it.cmd.args[2] = it.cmd.cursor
|
||||
}
|
||||
|
||||
err := it.cmd.process(it.cmd)
|
||||
err := it.cmd.process(ctx, it.cmd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
|
@ -12,7 +12,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"go.opentelemetry.io/otel/label"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Limiter is the interface of a rate limiter or a circuit breaker.
|
||||
|
@ -26,6 +29,7 @@ type Limiter interface {
|
|||
ReportResult(result error)
|
||||
}
|
||||
|
||||
// Options keeps the settings to setup redis connection.
|
||||
type Options struct {
|
||||
// The network type, either tcp or unix.
|
||||
// Default is tcp.
|
||||
|
@ -38,21 +42,23 @@ type Options struct {
|
|||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Hook that is called when new connection is established.
|
||||
OnConnect func(*Conn) error
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
// Use the specified Username to authenticate the current connection with one of the connections defined in the ACL
|
||||
// list when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
|
||||
// Use the specified Username to authenticate the current connection
|
||||
// with one of the connections defined in the ACL list when connecting
|
||||
// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
|
||||
Username string
|
||||
|
||||
// Optional password. Must match the password specified in the
|
||||
// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
|
||||
// or the User Password when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
|
||||
// or the User Password when connecting to a Redis 6.0 instance, or greater,
|
||||
// that is using the Redis ACL system.
|
||||
Password string
|
||||
|
||||
// Database to be selected after connecting to the server.
|
||||
DB int
|
||||
|
||||
// Maximum number of retries before giving up.
|
||||
// Default is to not retry failed commands.
|
||||
// Default is 3 retries; -1 (not 0) disables retries.
|
||||
MaxRetries int
|
||||
// Minimum backoff between each retry.
|
||||
// Default is 8 milliseconds; -1 disables backoff.
|
||||
|
@ -117,6 +123,9 @@ func (opt *Options) init() {
|
|||
opt.Network = "tcp"
|
||||
}
|
||||
}
|
||||
if opt.DialTimeout == 0 {
|
||||
opt.DialTimeout = 5 * time.Second
|
||||
}
|
||||
if opt.Dialer == nil {
|
||||
opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
netDialer := &net.Dialer{
|
||||
|
@ -132,9 +141,6 @@ func (opt *Options) init() {
|
|||
if opt.PoolSize == 0 {
|
||||
opt.PoolSize = 10 * runtime.NumCPU()
|
||||
}
|
||||
if opt.DialTimeout == 0 {
|
||||
opt.DialTimeout = 5 * time.Second
|
||||
}
|
||||
switch opt.ReadTimeout {
|
||||
case -1:
|
||||
opt.ReadTimeout = 0
|
||||
|
@ -159,6 +165,8 @@ func (opt *Options) init() {
|
|||
|
||||
if opt.MaxRetries == -1 {
|
||||
opt.MaxRetries = 0
|
||||
} else if opt.MaxRetries == 0 {
|
||||
opt.MaxRetries = 3
|
||||
}
|
||||
switch opt.MinRetryBackoff {
|
||||
case -1:
|
||||
|
@ -180,26 +188,35 @@ func (opt *Options) clone() *Options {
|
|||
}
|
||||
|
||||
// ParseURL parses an URL into Options that can be used to connect to Redis.
|
||||
// Scheme is required.
|
||||
// There are two connection types: by tcp socket and by unix socket.
|
||||
// Tcp connection:
|
||||
// redis://<user>:<password>@<host>:<port>/<db_number>
|
||||
// Unix connection:
|
||||
// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
|
||||
func ParseURL(redisURL string) (*Options, error) {
|
||||
o := &Options{Network: "tcp"}
|
||||
u, err := url.Parse(redisURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if u.Scheme != "redis" && u.Scheme != "rediss" {
|
||||
return nil, errors.New("invalid redis URL scheme: " + u.Scheme)
|
||||
switch u.Scheme {
|
||||
case "redis", "rediss":
|
||||
return setupTCPConn(u)
|
||||
case "unix":
|
||||
return setupUnixConn(u)
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
o.Username = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
o.Password = p
|
||||
}
|
||||
}
|
||||
func setupTCPConn(u *url.URL) (*Options, error) {
|
||||
o := &Options{Network: "tcp"}
|
||||
|
||||
o.Username, o.Password = getUserPassword(u)
|
||||
|
||||
if len(u.Query()) > 0 {
|
||||
return nil, errors.New("no options supported")
|
||||
return nil, errors.New("redis: no options supported")
|
||||
}
|
||||
|
||||
h, p, err := net.SplitHostPort(u.Host)
|
||||
|
@ -222,22 +239,73 @@ func ParseURL(redisURL string) (*Options, error) {
|
|||
o.DB = 0
|
||||
case 1:
|
||||
if o.DB, err = strconv.Atoi(f[0]); err != nil {
|
||||
return nil, fmt.Errorf("invalid redis database number: %q", f[0])
|
||||
return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("invalid redis URL path: " + u.Path)
|
||||
return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
|
||||
}
|
||||
|
||||
if u.Scheme == "rediss" {
|
||||
o.TLSConfig = &tls.Config{ServerName: h}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func setupUnixConn(u *url.URL) (*Options, error) {
|
||||
o := &Options{
|
||||
Network: "unix",
|
||||
}
|
||||
|
||||
if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
|
||||
return nil, errors.New("redis: empty unix socket path")
|
||||
}
|
||||
o.Addr = u.Path
|
||||
|
||||
o.Username, o.Password = getUserPassword(u)
|
||||
|
||||
dbStr := u.Query().Get("db")
|
||||
if dbStr == "" {
|
||||
return o, nil // if database is not set, connect to 0 db.
|
||||
}
|
||||
|
||||
db, err := strconv.Atoi(dbStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("redis: invalid database number: %w", err)
|
||||
}
|
||||
o.DB = db
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func getUserPassword(u *url.URL) (string, string) {
|
||||
var user, password string
|
||||
if u.User != nil {
|
||||
user = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
password = p
|
||||
}
|
||||
}
|
||||
return user, password
|
||||
}
|
||||
|
||||
func newConnPool(opt *Options) *pool.ConnPool {
|
||||
return pool.NewConnPool(&pool.Options{
|
||||
Dialer: func(ctx context.Context) (net.Conn, error) {
|
||||
return opt.Dialer(ctx, opt.Network, opt.Addr)
|
||||
var conn net.Conn
|
||||
err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error {
|
||||
span.SetAttributes(
|
||||
label.String("db.connection_string", opt.Addr),
|
||||
)
|
||||
|
||||
var err error
|
||||
conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
|
||||
if err != nil {
|
||||
_ = internal.RecordError(ctx, span, err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
return conn, err
|
||||
},
|
||||
PoolSize: opt.PoolSize,
|
||||
MinIdleConns: opt.MinIdleConns,
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
)
|
||||
|
||||
type pipelineExecer func(context.Context, []Cmder) error
|
||||
|
@ -24,12 +24,11 @@ type pipelineExecer func(context.Context, []Cmder) error
|
|||
// depends of your batch size and/or use TxPipeline.
|
||||
type Pipeliner interface {
|
||||
StatefulCmdable
|
||||
Do(args ...interface{}) *Cmd
|
||||
Process(cmd Cmder) error
|
||||
Do(ctx context.Context, args ...interface{}) *Cmd
|
||||
Process(ctx context.Context, cmd Cmder) error
|
||||
Close() error
|
||||
Discard() error
|
||||
Exec() ([]Cmder, error)
|
||||
ExecContext(ctx context.Context) ([]Cmder, error)
|
||||
Exec(ctx context.Context) ([]Cmder, error)
|
||||
}
|
||||
|
||||
var _ Pipeliner = (*Pipeline)(nil)
|
||||
|
@ -54,14 +53,14 @@ func (c *Pipeline) init() {
|
|||
c.statefulCmdable = c.Process
|
||||
}
|
||||
|
||||
func (c *Pipeline) Do(args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(args...)
|
||||
_ = c.Process(cmd)
|
||||
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Process queues the cmd for later execution.
|
||||
func (c *Pipeline) Process(cmd Cmder) error {
|
||||
func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
|
||||
c.mu.Lock()
|
||||
c.cmds = append(c.cmds, cmd)
|
||||
c.mu.Unlock()
|
||||
|
@ -98,11 +97,7 @@ func (c *Pipeline) discard() error {
|
|||
//
|
||||
// Exec always returns list of commands and error of the first failed
|
||||
// command if any.
|
||||
func (c *Pipeline) Exec() ([]Cmder, error) {
|
||||
return c.ExecContext(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Pipeline) ExecContext(ctx context.Context) ([]Cmder, error) {
|
||||
func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
@ -120,11 +115,11 @@ func (c *Pipeline) ExecContext(ctx context.Context) ([]Cmder, error) {
|
|||
return cmds, c.exec(ctx, cmds)
|
||||
}
|
||||
|
||||
func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
if err := fn(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmds, err := c.Exec()
|
||||
cmds, err := c.Exec(ctx)
|
||||
_ = c.Close()
|
||||
return cmds, err
|
||||
}
|
||||
|
@ -133,8 +128,8 @@ func (c *Pipeline) Pipeline() Pipeliner {
|
|||
return c
|
||||
}
|
||||
|
||||
func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipelined(fn)
|
||||
func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Pipeline) TxPipeline() Pipeliner {
|
|
@ -8,12 +8,15 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal"
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/go-redis/redis/v7/internal/proto"
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
const pingTimeout = 30 * time.Second
|
||||
const (
|
||||
pingTimeout = time.Second
|
||||
chanSendTimeout = time.Minute
|
||||
)
|
||||
|
||||
var errPingTimeout = errors.New("redis: ping timeout")
|
||||
|
||||
|
@ -26,7 +29,7 @@ var errPingTimeout = errors.New("redis: ping timeout")
|
|||
type PubSub struct {
|
||||
opt *Options
|
||||
|
||||
newConn func([]string) (*pool.Conn, error)
|
||||
newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
|
||||
closeConn func(*pool.Conn) error
|
||||
|
||||
mu sync.Mutex
|
||||
|
@ -55,14 +58,14 @@ func (c *PubSub) init() {
|
|||
c.exit = make(chan struct{})
|
||||
}
|
||||
|
||||
func (c *PubSub) connWithLock() (*pool.Conn, error) {
|
||||
func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
|
||||
c.mu.Lock()
|
||||
cn, err := c.conn(nil)
|
||||
cn, err := c.conn(ctx, nil)
|
||||
c.mu.Unlock()
|
||||
return cn, err
|
||||
}
|
||||
|
||||
func (c *PubSub) conn(newChannels []string) (*pool.Conn, error) {
|
||||
func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
|
||||
if c.closed {
|
||||
return nil, pool.ErrClosed
|
||||
}
|
||||
|
@ -73,12 +76,12 @@ func (c *PubSub) conn(newChannels []string) (*pool.Conn, error) {
|
|||
channels := mapKeys(c.channels)
|
||||
channels = append(channels, newChannels...)
|
||||
|
||||
cn, err := c.newConn(channels)
|
||||
cn, err := c.newConn(ctx, channels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.resubscribe(cn); err != nil {
|
||||
if err := c.resubscribe(ctx, cn); err != nil {
|
||||
_ = c.closeConn(cn)
|
||||
return nil, err
|
||||
}
|
||||
|
@ -93,15 +96,15 @@ func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
|
|||
})
|
||||
}
|
||||
|
||||
func (c *PubSub) resubscribe(cn *pool.Conn) error {
|
||||
func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
|
||||
var firstErr error
|
||||
|
||||
if len(c.channels) > 0 {
|
||||
firstErr = c._subscribe(cn, "subscribe", mapKeys(c.channels))
|
||||
firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
|
||||
}
|
||||
|
||||
if len(c.patterns) > 0 {
|
||||
err := c._subscribe(cn, "psubscribe", mapKeys(c.patterns))
|
||||
err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
|
@ -121,35 +124,40 @@ func mapKeys(m map[string]struct{}) []string {
|
|||
}
|
||||
|
||||
func (c *PubSub) _subscribe(
|
||||
cn *pool.Conn, redisCmd string, channels []string,
|
||||
ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
|
||||
) error {
|
||||
args := make([]interface{}, 0, 1+len(channels))
|
||||
args = append(args, redisCmd)
|
||||
for _, channel := range channels {
|
||||
args = append(args, channel)
|
||||
}
|
||||
cmd := NewSliceCmd(args...)
|
||||
return c.writeCmd(context.TODO(), cn, cmd)
|
||||
cmd := NewSliceCmd(ctx, args...)
|
||||
return c.writeCmd(ctx, cn, cmd)
|
||||
}
|
||||
|
||||
func (c *PubSub) releaseConnWithLock(cn *pool.Conn, err error, allowTimeout bool) {
|
||||
func (c *PubSub) releaseConnWithLock(
|
||||
ctx context.Context,
|
||||
cn *pool.Conn,
|
||||
err error,
|
||||
allowTimeout bool,
|
||||
) {
|
||||
c.mu.Lock()
|
||||
c.releaseConn(cn, err, allowTimeout)
|
||||
c.releaseConn(ctx, cn, err, allowTimeout)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *PubSub) releaseConn(cn *pool.Conn, err error, allowTimeout bool) {
|
||||
func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
|
||||
if c.cn != cn {
|
||||
return
|
||||
}
|
||||
if isBadConn(err, allowTimeout) {
|
||||
c.reconnect(err)
|
||||
c.reconnect(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PubSub) reconnect(reason error) {
|
||||
func (c *PubSub) reconnect(ctx context.Context, reason error) {
|
||||
_ = c.closeTheCn(reason)
|
||||
_, _ = c.conn(nil)
|
||||
_, _ = c.conn(ctx, nil)
|
||||
}
|
||||
|
||||
func (c *PubSub) closeTheCn(reason error) error {
|
||||
|
@ -157,7 +165,7 @@ func (c *PubSub) closeTheCn(reason error) error {
|
|||
return nil
|
||||
}
|
||||
if !c.closed {
|
||||
internal.Logger.Printf("redis: discarding bad PubSub connection: %s", reason)
|
||||
internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
|
||||
}
|
||||
err := c.closeConn(c.cn)
|
||||
c.cn = nil
|
||||
|
@ -179,11 +187,11 @@ func (c *PubSub) Close() error {
|
|||
|
||||
// Subscribe the client to the specified channels. It returns
|
||||
// empty subscription if there are no channels.
|
||||
func (c *PubSub) Subscribe(channels ...string) error {
|
||||
func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
err := c.subscribe("subscribe", channels...)
|
||||
err := c.subscribe(ctx, "subscribe", channels...)
|
||||
if c.channels == nil {
|
||||
c.channels = make(map[string]struct{})
|
||||
}
|
||||
|
@ -195,11 +203,11 @@ func (c *PubSub) Subscribe(channels ...string) error {
|
|||
|
||||
// PSubscribe the client to the given patterns. It returns
|
||||
// empty subscription if there are no patterns.
|
||||
func (c *PubSub) PSubscribe(patterns ...string) error {
|
||||
func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
err := c.subscribe("psubscribe", patterns...)
|
||||
err := c.subscribe(ctx, "psubscribe", patterns...)
|
||||
if c.patterns == nil {
|
||||
c.patterns = make(map[string]struct{})
|
||||
}
|
||||
|
@ -211,55 +219,55 @@ func (c *PubSub) PSubscribe(patterns ...string) error {
|
|||
|
||||
// Unsubscribe the client from the given channels, or from all of
|
||||
// them if none is given.
|
||||
func (c *PubSub) Unsubscribe(channels ...string) error {
|
||||
func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for _, channel := range channels {
|
||||
delete(c.channels, channel)
|
||||
}
|
||||
err := c.subscribe("unsubscribe", channels...)
|
||||
err := c.subscribe(ctx, "unsubscribe", channels...)
|
||||
return err
|
||||
}
|
||||
|
||||
// PUnsubscribe the client from the given patterns, or from all of
|
||||
// them if none is given.
|
||||
func (c *PubSub) PUnsubscribe(patterns ...string) error {
|
||||
func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for _, pattern := range patterns {
|
||||
delete(c.patterns, pattern)
|
||||
}
|
||||
err := c.subscribe("punsubscribe", patterns...)
|
||||
err := c.subscribe(ctx, "punsubscribe", patterns...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
|
||||
cn, err := c.conn(channels)
|
||||
func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
|
||||
cn, err := c.conn(ctx, channels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c._subscribe(cn, redisCmd, channels)
|
||||
c.releaseConn(cn, err, false)
|
||||
err = c._subscribe(ctx, cn, redisCmd, channels)
|
||||
c.releaseConn(ctx, cn, err, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *PubSub) Ping(payload ...string) error {
|
||||
func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
|
||||
args := []interface{}{"ping"}
|
||||
if len(payload) == 1 {
|
||||
args = append(args, payload[0])
|
||||
}
|
||||
cmd := NewCmd(args...)
|
||||
cmd := NewCmd(ctx, args...)
|
||||
|
||||
cn, err := c.connWithLock()
|
||||
cn, err := c.connWithLock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.writeCmd(context.TODO(), cn, cmd)
|
||||
c.releaseConnWithLock(cn, err, false)
|
||||
err = c.writeCmd(ctx, cn, cmd)
|
||||
c.releaseConnWithLock(ctx, cn, err, false)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -279,9 +287,10 @@ func (m *Subscription) String() string {
|
|||
|
||||
// Message received as result of a PUBLISH command issued by another client.
|
||||
type Message struct {
|
||||
Channel string
|
||||
Pattern string
|
||||
Payload string
|
||||
Channel string
|
||||
Pattern string
|
||||
Payload string
|
||||
PayloadSlice []string
|
||||
}
|
||||
|
||||
func (m *Message) String() string {
|
||||
|
@ -317,10 +326,24 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
|
|||
Count: int(reply[2].(int64)),
|
||||
}, nil
|
||||
case "message":
|
||||
return &Message{
|
||||
Channel: reply[1].(string),
|
||||
Payload: reply[2].(string),
|
||||
}, nil
|
||||
switch payload := reply[2].(type) {
|
||||
case string:
|
||||
return &Message{
|
||||
Channel: reply[1].(string),
|
||||
Payload: payload,
|
||||
}, nil
|
||||
case []interface{}:
|
||||
ss := make([]string, len(payload))
|
||||
for i, s := range payload {
|
||||
ss[i] = s.(string)
|
||||
}
|
||||
return &Message{
|
||||
Channel: reply[1].(string),
|
||||
PayloadSlice: ss,
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
|
||||
}
|
||||
case "pmessage":
|
||||
return &Message{
|
||||
Pattern: reply[1].(string),
|
||||
|
@ -342,21 +365,21 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
|
|||
// ReceiveTimeout acts like Receive but returns an error if message
|
||||
// is not received in time. This is low-level API and in most cases
|
||||
// Channel should be used instead.
|
||||
func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
|
||||
func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
|
||||
if c.cmd == nil {
|
||||
c.cmd = NewCmd()
|
||||
c.cmd = NewCmd(ctx)
|
||||
}
|
||||
|
||||
cn, err := c.connWithLock()
|
||||
cn, err := c.connWithLock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = cn.WithReader(context.TODO(), timeout, func(rd *proto.Reader) error {
|
||||
err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
|
||||
return c.cmd.readReply(rd)
|
||||
})
|
||||
|
||||
c.releaseConnWithLock(cn, err, timeout > 0)
|
||||
c.releaseConnWithLock(ctx, cn, err, timeout > 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -367,16 +390,16 @@ func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
|
|||
// Receive returns a message as a Subscription, Message, Pong or error.
|
||||
// See PubSub example for details. This is low-level API and in most cases
|
||||
// Channel should be used instead.
|
||||
func (c *PubSub) Receive() (interface{}, error) {
|
||||
return c.ReceiveTimeout(0)
|
||||
func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
|
||||
return c.ReceiveTimeout(ctx, 0)
|
||||
}
|
||||
|
||||
// ReceiveMessage returns a Message or error ignoring Subscription and Pong
|
||||
// messages. This is low-level API and in most cases Channel should be used
|
||||
// instead.
|
||||
func (c *PubSub) ReceiveMessage() (*Message, error) {
|
||||
func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
|
||||
for {
|
||||
msg, err := c.Receive()
|
||||
msg, err := c.Receive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -429,7 +452,7 @@ func (c *PubSub) ChannelSize(size int) <-chan *Message {
|
|||
// reconnections.
|
||||
//
|
||||
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
|
||||
func (c *PubSub) ChannelWithSubscriptions(size int) <-chan interface{} {
|
||||
func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
|
||||
c.chOnce.Do(func() {
|
||||
c.initPing()
|
||||
c.initAllChan(size)
|
||||
|
@ -445,10 +468,18 @@ func (c *PubSub) ChannelWithSubscriptions(size int) <-chan interface{} {
|
|||
return c.allCh
|
||||
}
|
||||
|
||||
func (c *PubSub) getContext() context.Context {
|
||||
if c.cmd != nil {
|
||||
return c.cmd.ctx
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (c *PubSub) initPing() {
|
||||
ctx := context.TODO()
|
||||
c.ping = make(chan struct{}, 1)
|
||||
go func() {
|
||||
timer := time.NewTimer(pingTimeout)
|
||||
timer := time.NewTimer(time.Minute)
|
||||
timer.Stop()
|
||||
|
||||
healthy := true
|
||||
|
@ -461,7 +492,7 @@ func (c *PubSub) initPing() {
|
|||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
pingErr := c.Ping()
|
||||
pingErr := c.Ping(ctx)
|
||||
if healthy {
|
||||
healthy = false
|
||||
} else {
|
||||
|
@ -469,7 +500,7 @@ func (c *PubSub) initPing() {
|
|||
pingErr = errPingTimeout
|
||||
}
|
||||
c.mu.Lock()
|
||||
c.reconnect(pingErr)
|
||||
c.reconnect(ctx, pingErr)
|
||||
healthy = true
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
@ -482,21 +513,22 @@ func (c *PubSub) initPing() {
|
|||
|
||||
// initMsgChan must be in sync with initAllChan.
|
||||
func (c *PubSub) initMsgChan(size int) {
|
||||
ctx := context.TODO()
|
||||
c.msgCh = make(chan *Message, size)
|
||||
go func() {
|
||||
timer := time.NewTimer(pingTimeout)
|
||||
timer := time.NewTimer(time.Minute)
|
||||
timer.Stop()
|
||||
|
||||
var errCount int
|
||||
for {
|
||||
msg, err := c.Receive()
|
||||
msg, err := c.Receive(ctx)
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
close(c.msgCh)
|
||||
return
|
||||
}
|
||||
if errCount > 0 {
|
||||
time.Sleep(c.retryBackoff(errCount))
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
errCount++
|
||||
continue
|
||||
|
@ -516,7 +548,7 @@ func (c *PubSub) initMsgChan(size int) {
|
|||
case *Pong:
|
||||
// Ignore.
|
||||
case *Message:
|
||||
timer.Reset(pingTimeout)
|
||||
timer.Reset(chanSendTimeout)
|
||||
select {
|
||||
case c.msgCh <- msg:
|
||||
if !timer.Stop() {
|
||||
|
@ -524,10 +556,14 @@ func (c *PubSub) initMsgChan(size int) {
|
|||
}
|
||||
case <-timer.C:
|
||||
internal.Logger.Printf(
|
||||
"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
|
||||
c.getContext(),
|
||||
"redis: %s channel is full for %s (message is dropped)",
|
||||
c,
|
||||
chanSendTimeout,
|
||||
)
|
||||
}
|
||||
default:
|
||||
internal.Logger.Printf("redis: unknown message type: %T", msg)
|
||||
internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -535,6 +571,7 @@ func (c *PubSub) initMsgChan(size int) {
|
|||
|
||||
// initAllChan must be in sync with initMsgChan.
|
||||
func (c *PubSub) initAllChan(size int) {
|
||||
ctx := context.TODO()
|
||||
c.allCh = make(chan interface{}, size)
|
||||
go func() {
|
||||
timer := time.NewTimer(pingTimeout)
|
||||
|
@ -542,14 +579,14 @@ func (c *PubSub) initAllChan(size int) {
|
|||
|
||||
var errCount int
|
||||
for {
|
||||
msg, err := c.Receive()
|
||||
msg, err := c.Receive(ctx)
|
||||
if err != nil {
|
||||
if err == pool.ErrClosed {
|
||||
close(c.allCh)
|
||||
return
|
||||
}
|
||||
if errCount > 0 {
|
||||
time.Sleep(c.retryBackoff(errCount))
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
errCount++
|
||||
continue
|
||||
|
@ -571,7 +608,7 @@ func (c *PubSub) initAllChan(size int) {
|
|||
case *Message:
|
||||
c.sendMessage(msg, timer)
|
||||
default:
|
||||
internal.Logger.Printf("redis: unknown message type: %T", msg)
|
||||
internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -586,10 +623,7 @@ func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
|
|||
}
|
||||
case <-timer.C:
|
||||
internal.Logger.Printf(
|
||||
c.getContext(),
|
||||
"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PubSub) retryBackoff(attempt int) time.Duration {
|
||||
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
|
||||
}
|
336
vendor/github.com/go-redis/redis/v7/redis.go → vendor/github.com/go-redis/redis/v8/redis.go
generated
vendored
336
vendor/github.com/go-redis/redis/v7/redis.go → vendor/github.com/go-redis/redis/v8/redis.go
generated
vendored
|
@ -2,19 +2,22 @@ package redis
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal"
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/go-redis/redis/v7/internal/proto"
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
"go.opentelemetry.io/otel/label"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Nil reply returned by Redis when key does not exist.
|
||||
const Nil = proto.Nil
|
||||
|
||||
func SetLogger(logger *log.Logger) {
|
||||
func SetLogger(logger internal.Logging) {
|
||||
internal.Logger = logger
|
||||
}
|
||||
|
||||
|
@ -49,92 +52,88 @@ func (hs *hooks) AddHook(hook Hook) {
|
|||
func (hs hooks) process(
|
||||
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
|
||||
) error {
|
||||
ctx, err := hs.beforeProcess(ctx, cmd)
|
||||
if err != nil {
|
||||
if len(hs.hooks) == 0 {
|
||||
err := hs.withContext(ctx, func() error {
|
||||
return fn(ctx, cmd)
|
||||
})
|
||||
cmd.SetErr(err)
|
||||
return err
|
||||
}
|
||||
|
||||
cmdErr := fn(ctx, cmd)
|
||||
var hookIndex int
|
||||
var retErr error
|
||||
|
||||
if err := hs.afterProcess(ctx, cmd); err != nil {
|
||||
cmd.SetErr(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return cmdErr
|
||||
}
|
||||
|
||||
func (hs hooks) beforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) {
|
||||
for _, h := range hs.hooks {
|
||||
var err error
|
||||
ctx, err = h.BeforeProcess(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
|
||||
ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
|
||||
if retErr != nil {
|
||||
cmd.SetErr(retErr)
|
||||
}
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (hs hooks) afterProcess(ctx context.Context, cmd Cmder) error {
|
||||
var firstErr error
|
||||
for _, h := range hs.hooks {
|
||||
err := h.AfterProcess(ctx, cmd)
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
if retErr == nil {
|
||||
retErr = hs.withContext(ctx, func() error {
|
||||
return fn(ctx, cmd)
|
||||
})
|
||||
cmd.SetErr(retErr)
|
||||
}
|
||||
|
||||
for hookIndex--; hookIndex >= 0; hookIndex-- {
|
||||
if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
|
||||
retErr = err
|
||||
cmd.SetErr(retErr)
|
||||
}
|
||||
}
|
||||
return firstErr
|
||||
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (hs hooks) processPipeline(
|
||||
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
|
||||
) error {
|
||||
ctx, err := hs.beforeProcessPipeline(ctx, cmds)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
if len(hs.hooks) == 0 {
|
||||
err := hs.withContext(ctx, func() error {
|
||||
return fn(ctx, cmds)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
cmdsErr := fn(ctx, cmds)
|
||||
var hookIndex int
|
||||
var retErr error
|
||||
|
||||
if err := hs.afterProcessPipeline(ctx, cmds); err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return cmdsErr
|
||||
}
|
||||
|
||||
func (hs hooks) beforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) {
|
||||
for _, h := range hs.hooks {
|
||||
var err error
|
||||
ctx, err = h.BeforeProcessPipeline(ctx, cmds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
|
||||
ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
|
||||
if retErr != nil {
|
||||
setCmdsErr(cmds, retErr)
|
||||
}
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (hs hooks) afterProcessPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
var firstErr error
|
||||
for _, h := range hs.hooks {
|
||||
err := h.AfterProcessPipeline(ctx, cmds)
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
if retErr == nil {
|
||||
retErr = hs.withContext(ctx, func() error {
|
||||
return fn(ctx, cmds)
|
||||
})
|
||||
}
|
||||
|
||||
for hookIndex--; hookIndex >= 0; hookIndex-- {
|
||||
if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
|
||||
retErr = err
|
||||
setCmdsErr(cmds, retErr)
|
||||
}
|
||||
}
|
||||
return firstErr
|
||||
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (hs hooks) processTxPipeline(
|
||||
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
|
||||
) error {
|
||||
cmds = wrapMultiExec(cmds)
|
||||
cmds = wrapMultiExec(ctx, cmds)
|
||||
return hs.processPipeline(ctx, cmds, fn)
|
||||
}
|
||||
|
||||
func (hs hooks) withContext(ctx context.Context, fn func() error) error {
|
||||
return fn()
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type baseClient struct {
|
||||
|
@ -201,6 +200,7 @@ func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
|
|||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
|
@ -210,10 +210,16 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = c.initConn(ctx, cn)
|
||||
if cn.Inited {
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error {
|
||||
return c.initConn(ctx, cn)
|
||||
})
|
||||
if err != nil {
|
||||
c.connPool.Remove(cn, err)
|
||||
if err := internal.Unwrap(err); err != nil {
|
||||
c.connPool.Remove(ctx, cn, err)
|
||||
if err := errors.Unwrap(err); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
|
@ -235,25 +241,24 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
connPool := pool.NewSingleConnPool(nil)
|
||||
connPool.SetConn(cn)
|
||||
connPool := pool.NewSingleConnPool(c.connPool, cn)
|
||||
conn := newConn(ctx, c.opt, connPool)
|
||||
|
||||
_, err := conn.Pipelined(func(pipe Pipeliner) error {
|
||||
_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
|
||||
if c.opt.Password != "" {
|
||||
if c.opt.Username != "" {
|
||||
pipe.AuthACL(c.opt.Username, c.opt.Password)
|
||||
pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
|
||||
} else {
|
||||
pipe.Auth(c.opt.Password)
|
||||
pipe.Auth(ctx, c.opt.Password)
|
||||
}
|
||||
}
|
||||
|
||||
if c.opt.DB > 0 {
|
||||
pipe.Select(c.opt.DB)
|
||||
pipe.Select(ctx, c.opt.DB)
|
||||
}
|
||||
|
||||
if c.opt.readOnly {
|
||||
pipe.ReadOnly()
|
||||
pipe.ReadOnly(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -263,76 +268,107 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
|
|||
}
|
||||
|
||||
if c.opt.OnConnect != nil {
|
||||
return c.opt.OnConnect(conn)
|
||||
return c.opt.OnConnect(ctx, conn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *baseClient) releaseConn(cn *pool.Conn, err error) {
|
||||
func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
|
||||
if c.opt.Limiter != nil {
|
||||
c.opt.Limiter.ReportResult(err)
|
||||
}
|
||||
|
||||
if isBadConn(err, false) {
|
||||
c.connPool.Remove(cn, err)
|
||||
c.connPool.Remove(ctx, cn, err)
|
||||
} else {
|
||||
c.connPool.Put(cn)
|
||||
c.connPool.Put(ctx, cn)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) withConn(
|
||||
ctx context.Context, fn func(context.Context, *pool.Conn) error,
|
||||
) error {
|
||||
cn, err := c.getConn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
c.releaseConn(cn, err)
|
||||
}()
|
||||
return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error {
|
||||
cn, err := c.getConn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = fn(ctx, cn)
|
||||
return err
|
||||
if span.IsRecording() {
|
||||
if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
|
||||
span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
c.releaseConn(ctx, cn, err)
|
||||
}()
|
||||
|
||||
done := ctx.Done()
|
||||
if done == nil {
|
||||
err = fn(ctx, cn)
|
||||
return err
|
||||
}
|
||||
|
||||
errc := make(chan error, 1)
|
||||
go func() { errc <- fn(ctx, cn) }()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
_ = cn.Close()
|
||||
// Wait for the goroutine to finish and send something.
|
||||
<-errc
|
||||
|
||||
err = ctx.Err()
|
||||
return err
|
||||
case err = <-errc:
|
||||
return err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
|
||||
err := c._process(ctx, cmd)
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *baseClient) _process(ctx context.Context, cmd Cmder) error {
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
|
||||
if attempt > 0 {
|
||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
attempt := attempt
|
||||
|
||||
retryTimeout := true
|
||||
lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
|
||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return writeCmd(wr, cmd)
|
||||
var retry bool
|
||||
err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error {
|
||||
if attempt > 0 {
|
||||
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
retryTimeout := uint32(1)
|
||||
err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
|
||||
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
|
||||
return writeCmd(wr, cmd)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
|
||||
if err != nil {
|
||||
if cmd.readTimeout() == nil {
|
||||
atomic.StoreUint32(&retryTimeout, 1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
|
||||
if err != nil {
|
||||
retryTimeout = cmd.readTimeout() == nil
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
retry = shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
|
||||
return err
|
||||
})
|
||||
if lastErr == nil || !isRetryableError(lastErr, retryTimeout) {
|
||||
return lastErr
|
||||
if err == nil || !retry {
|
||||
return err
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
@ -411,7 +447,7 @@ func (c *baseClient) _generalProcessPipeline(
|
|||
canRetry, err = p(ctx, cn, cmds)
|
||||
return err
|
||||
})
|
||||
if lastErr == nil || !canRetry || !isRetryableError(lastErr, true) {
|
||||
if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
|
||||
return lastErr
|
||||
}
|
||||
}
|
||||
|
@ -437,6 +473,7 @@ func (c *baseClient) pipelineProcessCmds(
|
|||
func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
|
||||
for _, cmd := range cmds {
|
||||
err := cmd.readReply(rd)
|
||||
cmd.SetErr(err)
|
||||
if err != nil && !isRedisError(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -469,15 +506,15 @@ func (c *baseClient) txPipelineProcessCmds(
|
|||
return false, err
|
||||
}
|
||||
|
||||
func wrapMultiExec(cmds []Cmder) []Cmder {
|
||||
func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
|
||||
if len(cmds) == 0 {
|
||||
panic("not reached")
|
||||
}
|
||||
cmds = append(cmds, make([]Cmder, 2)...)
|
||||
copy(cmds[1:], cmds[:len(cmds)-2])
|
||||
cmds[0] = NewStatusCmd("multi")
|
||||
cmds[len(cmds)-1] = NewSliceCmd("exec")
|
||||
return cmds
|
||||
cmdCopy := make([]Cmder, len(cmds)+2)
|
||||
cmdCopy[0] = NewStatusCmd(ctx, "multi")
|
||||
copy(cmdCopy[1:], cmds)
|
||||
cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
|
||||
return cmdCopy
|
||||
}
|
||||
|
||||
func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
|
||||
|
@ -565,26 +602,18 @@ func (c *Client) WithContext(ctx context.Context) *Client {
|
|||
return clone
|
||||
}
|
||||
|
||||
func (c *Client) Conn() *Conn {
|
||||
return newConn(c.ctx, c.opt, pool.NewSingleConnPool(c.connPool))
|
||||
func (c *Client) Conn(ctx context.Context) *Conn {
|
||||
return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
|
||||
}
|
||||
|
||||
// Do creates a Cmd from the args and processes the cmd.
|
||||
func (c *Client) Do(args ...interface{}) *Cmd {
|
||||
return c.DoContext(c.ctx, args...)
|
||||
}
|
||||
|
||||
func (c *Client) DoContext(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(args...)
|
||||
_ = c.ProcessContext(ctx, cmd)
|
||||
func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *Client) Process(cmd Cmder) error {
|
||||
return c.ProcessContext(c.ctx, cmd)
|
||||
}
|
||||
|
||||
func (c *Client) ProcessContext(ctx context.Context, cmd Cmder) error {
|
||||
func (c *Client) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
|
@ -609,8 +638,8 @@ func (c *Client) PoolStats() *PoolStats {
|
|||
return (*PoolStats)(stats)
|
||||
}
|
||||
|
||||
func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(fn)
|
||||
func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Client) Pipeline() Pipeliner {
|
||||
|
@ -622,8 +651,8 @@ func (c *Client) Pipeline() Pipeliner {
|
|||
return &pipe
|
||||
}
|
||||
|
||||
func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(fn)
|
||||
func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
|
||||
|
@ -640,8 +669,8 @@ func (c *Client) pubSub() *PubSub {
|
|||
pubsub := &PubSub{
|
||||
opt: c.opt,
|
||||
|
||||
newConn: func(channels []string) (*pool.Conn, error) {
|
||||
return c.newConn(context.TODO())
|
||||
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
|
||||
return c.newConn(ctx)
|
||||
},
|
||||
closeConn: c.connPool.CloseConn,
|
||||
}
|
||||
|
@ -675,20 +704,20 @@ func (c *Client) pubSub() *PubSub {
|
|||
// }
|
||||
//
|
||||
// ch := sub.Channel()
|
||||
func (c *Client) Subscribe(channels ...string) *PubSub {
|
||||
func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(channels...)
|
||||
_ = pubsub.Subscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *Client) PSubscribe(channels ...string) *PubSub {
|
||||
func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(channels...)
|
||||
_ = pubsub.PSubscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
@ -699,6 +728,7 @@ type conn struct {
|
|||
baseClient
|
||||
cmdable
|
||||
statefulCmdable
|
||||
hooks // TODO: inherit hooks
|
||||
}
|
||||
|
||||
// Conn is like Client, but its pool contains single connection.
|
||||
|
@ -722,16 +752,20 @@ func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
|
|||
return &c
|
||||
}
|
||||
|
||||
func (c *Conn) Process(cmd Cmder) error {
|
||||
return c.ProcessContext(c.ctx, cmd)
|
||||
func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
func (c *Conn) ProcessContext(ctx context.Context, cmd Cmder) error {
|
||||
return c.baseClient.process(ctx, cmd)
|
||||
func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
|
||||
}
|
||||
|
||||
func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(fn)
|
||||
func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
|
||||
}
|
||||
|
||||
func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Conn) Pipeline() Pipeliner {
|
||||
|
@ -743,8 +777,8 @@ func (c *Conn) Pipeline() Pipeliner {
|
|||
return &pipe
|
||||
}
|
||||
|
||||
func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(fn)
|
||||
func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
|
|
@ -2,7 +2,7 @@ package redis
|
|||
|
||||
import "time"
|
||||
|
||||
// NewCmdResult returns a Cmd initialised with val and err for testing
|
||||
// NewCmdResult returns a Cmd initialised with val and err for testing.
|
||||
func NewCmdResult(val interface{}, err error) *Cmd {
|
||||
var cmd Cmd
|
||||
cmd.val = val
|
||||
|
@ -10,7 +10,7 @@ func NewCmdResult(val interface{}, err error) *Cmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewSliceResult returns a SliceCmd initialised with val and err for testing
|
||||
// NewSliceResult returns a SliceCmd initialised with val and err for testing.
|
||||
func NewSliceResult(val []interface{}, err error) *SliceCmd {
|
||||
var cmd SliceCmd
|
||||
cmd.val = val
|
||||
|
@ -18,7 +18,7 @@ func NewSliceResult(val []interface{}, err error) *SliceCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewStatusResult returns a StatusCmd initialised with val and err for testing
|
||||
// NewStatusResult returns a StatusCmd initialised with val and err for testing.
|
||||
func NewStatusResult(val string, err error) *StatusCmd {
|
||||
var cmd StatusCmd
|
||||
cmd.val = val
|
||||
|
@ -26,7 +26,7 @@ func NewStatusResult(val string, err error) *StatusCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewIntResult returns an IntCmd initialised with val and err for testing
|
||||
// NewIntResult returns an IntCmd initialised with val and err for testing.
|
||||
func NewIntResult(val int64, err error) *IntCmd {
|
||||
var cmd IntCmd
|
||||
cmd.val = val
|
||||
|
@ -34,7 +34,7 @@ func NewIntResult(val int64, err error) *IntCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewDurationResult returns a DurationCmd initialised with val and err for testing
|
||||
// NewDurationResult returns a DurationCmd initialised with val and err for testing.
|
||||
func NewDurationResult(val time.Duration, err error) *DurationCmd {
|
||||
var cmd DurationCmd
|
||||
cmd.val = val
|
||||
|
@ -42,7 +42,7 @@ func NewDurationResult(val time.Duration, err error) *DurationCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewBoolResult returns a BoolCmd initialised with val and err for testing
|
||||
// NewBoolResult returns a BoolCmd initialised with val and err for testing.
|
||||
func NewBoolResult(val bool, err error) *BoolCmd {
|
||||
var cmd BoolCmd
|
||||
cmd.val = val
|
||||
|
@ -50,7 +50,7 @@ func NewBoolResult(val bool, err error) *BoolCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringResult returns a StringCmd initialised with val and err for testing
|
||||
// NewStringResult returns a StringCmd initialised with val and err for testing.
|
||||
func NewStringResult(val string, err error) *StringCmd {
|
||||
var cmd StringCmd
|
||||
cmd.val = val
|
||||
|
@ -58,7 +58,7 @@ func NewStringResult(val string, err error) *StringCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewFloatResult returns a FloatCmd initialised with val and err for testing
|
||||
// NewFloatResult returns a FloatCmd initialised with val and err for testing.
|
||||
func NewFloatResult(val float64, err error) *FloatCmd {
|
||||
var cmd FloatCmd
|
||||
cmd.val = val
|
||||
|
@ -66,7 +66,7 @@ func NewFloatResult(val float64, err error) *FloatCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing
|
||||
// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
|
||||
func NewStringSliceResult(val []string, err error) *StringSliceCmd {
|
||||
var cmd StringSliceCmd
|
||||
cmd.val = val
|
||||
|
@ -74,7 +74,7 @@ func NewStringSliceResult(val []string, err error) *StringSliceCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing
|
||||
// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
|
||||
func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
|
||||
var cmd BoolSliceCmd
|
||||
cmd.val = val
|
||||
|
@ -82,7 +82,7 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing
|
||||
// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
|
||||
func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
|
||||
var cmd StringStringMapCmd
|
||||
cmd.val = val
|
||||
|
@ -90,7 +90,7 @@ func NewStringStringMapResult(val map[string]string, err error) *StringStringMap
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing
|
||||
// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
|
||||
func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
|
||||
var cmd StringIntMapCmd
|
||||
cmd.val = val
|
||||
|
@ -98,7 +98,7 @@ func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing
|
||||
// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
|
||||
func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
|
||||
var cmd TimeCmd
|
||||
cmd.val = val
|
||||
|
@ -106,7 +106,7 @@ func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing
|
||||
// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
|
||||
func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
|
||||
var cmd ZSliceCmd
|
||||
cmd.val = val
|
||||
|
@ -114,7 +114,7 @@ func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing
|
||||
// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
|
||||
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
|
||||
var cmd ZWithKeyCmd
|
||||
cmd.val = val
|
||||
|
@ -122,7 +122,7 @@ func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewScanCmdResult returns a ScanCmd initialised with val and err for testing
|
||||
// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
|
||||
func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
|
||||
var cmd ScanCmd
|
||||
cmd.page = keys
|
||||
|
@ -131,7 +131,7 @@ func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing
|
||||
// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
|
||||
func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
|
||||
var cmd ClusterSlotsCmd
|
||||
cmd.val = val
|
||||
|
@ -139,7 +139,7 @@ func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing
|
||||
// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
|
||||
func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
|
||||
var cmd GeoLocationCmd
|
||||
cmd.locations = val
|
||||
|
@ -147,7 +147,7 @@ func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing
|
||||
// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
|
||||
func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
|
||||
var cmd GeoPosCmd
|
||||
cmd.val = val
|
||||
|
@ -155,7 +155,7 @@ func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing
|
||||
// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
|
||||
func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
|
||||
var cmd CommandsInfoCmd
|
||||
cmd.val = val
|
||||
|
@ -163,7 +163,7 @@ func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsI
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing
|
||||
// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
|
||||
func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
|
||||
var cmd XMessageSliceCmd
|
||||
cmd.val = val
|
||||
|
@ -171,7 +171,7 @@ func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
|
|||
return &cmd
|
||||
}
|
||||
|
||||
// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing
|
||||
// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
|
||||
func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
|
||||
var cmd XStreamSliceCmd
|
||||
cmd.val = val
|
349
vendor/github.com/go-redis/redis/v7/ring.go → vendor/github.com/go-redis/redis/v8/ring.go
generated
vendored
349
vendor/github.com/go-redis/redis/v7/ring.go → vendor/github.com/go-redis/redis/v8/ring.go
generated
vendored
|
@ -2,72 +2,73 @@ package redis
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal"
|
||||
"github.com/go-redis/redis/v7/internal/consistenthash"
|
||||
"github.com/go-redis/redis/v7/internal/hashtag"
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dgryski/go-rendezvous"
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/hashtag"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
// Hash is type of hash function used in consistent hash.
|
||||
type Hash consistenthash.Hash
|
||||
|
||||
var errRingShardsDown = errors.New("redis: all ring shards are down")
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type ConsistentHash interface {
|
||||
Get(string) string
|
||||
}
|
||||
|
||||
type rendezvousWrapper struct {
|
||||
*rendezvous.Rendezvous
|
||||
}
|
||||
|
||||
func (w rendezvousWrapper) Get(key string) string {
|
||||
return w.Lookup(key)
|
||||
}
|
||||
|
||||
func newRendezvous(shards []string) ConsistentHash {
|
||||
return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// RingOptions are used to configure a ring client and should be
|
||||
// passed to NewRing.
|
||||
type RingOptions struct {
|
||||
// Map of name => host:port addresses of ring shards.
|
||||
Addrs map[string]string
|
||||
|
||||
// Map of name => password of ring shards, to allow different shards to have
|
||||
// different passwords. It will be ignored if the Password field is set.
|
||||
Passwords map[string]string
|
||||
// NewClient creates a shard client with provided name and options.
|
||||
NewClient func(name string, opt *Options) *Client
|
||||
|
||||
// Frequency of PING commands sent to check shards availability.
|
||||
// Shard is considered down after 3 subsequent failed checks.
|
||||
HeartbeatFrequency time.Duration
|
||||
|
||||
// Hash function used in consistent hash.
|
||||
// Default is crc32.ChecksumIEEE.
|
||||
Hash Hash
|
||||
|
||||
// Number of replicas in consistent hash.
|
||||
// Default is 100 replicas.
|
||||
// NewConsistentHash returns a consistent hash that is used
|
||||
// to distribute keys across the shards.
|
||||
//
|
||||
// Higher number of replicas will provide less deviation, that is keys will be
|
||||
// distributed to nodes more evenly.
|
||||
//
|
||||
// Following is deviation for common nreplicas:
|
||||
// --------------------------------------------------------
|
||||
// | nreplicas | standard error | 99% confidence interval |
|
||||
// | 10 | 0.3152 | (0.37, 1.98) |
|
||||
// | 100 | 0.0997 | (0.76, 1.28) |
|
||||
// | 1000 | 0.0316 | (0.92, 1.09) |
|
||||
// --------------------------------------------------------
|
||||
//
|
||||
// See https://arxiv.org/abs/1406.2294 for reference
|
||||
HashReplicas int
|
||||
|
||||
// NewClient creates a shard client with provided name and options.
|
||||
NewClient func(name string, opt *Options) *Client
|
||||
|
||||
// Optional hook that is called when a new shard is created.
|
||||
OnNewShard func(*Client)
|
||||
// See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
|
||||
// for consistent hashing algorithmic tradeoffs.
|
||||
NewConsistentHash func(shards []string) ConsistentHash
|
||||
|
||||
// Following options are copied from Options struct.
|
||||
|
||||
OnConnect func(*Conn) error
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
DB int
|
||||
Username string
|
||||
Password string
|
||||
DB int
|
||||
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
|
@ -83,17 +84,31 @@ type RingOptions struct {
|
|||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
|
||||
TLSConfig *tls.Config
|
||||
Limiter Limiter
|
||||
}
|
||||
|
||||
func (opt *RingOptions) init() {
|
||||
if opt.NewClient == nil {
|
||||
opt.NewClient = func(name string, opt *Options) *Client {
|
||||
return NewClient(opt)
|
||||
}
|
||||
}
|
||||
|
||||
if opt.HeartbeatFrequency == 0 {
|
||||
opt.HeartbeatFrequency = 500 * time.Millisecond
|
||||
}
|
||||
|
||||
if opt.HashReplicas == 0 {
|
||||
opt.HashReplicas = 100
|
||||
if opt.NewConsistentHash == nil {
|
||||
opt.NewConsistentHash = newRendezvous
|
||||
}
|
||||
|
||||
if opt.MaxRetries == -1 {
|
||||
opt.MaxRetries = 0
|
||||
} else if opt.MaxRetries == 0 {
|
||||
opt.MaxRetries = 3
|
||||
}
|
||||
switch opt.MinRetryBackoff {
|
||||
case -1:
|
||||
opt.MinRetryBackoff = 0
|
||||
|
@ -108,12 +123,16 @@ func (opt *RingOptions) init() {
|
|||
}
|
||||
}
|
||||
|
||||
func (opt *RingOptions) clientOptions(shard string) *Options {
|
||||
func (opt *RingOptions) clientOptions() *Options {
|
||||
return &Options{
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
DB: opt.DB,
|
||||
Password: opt.getPassword(shard),
|
||||
|
||||
MaxRetries: -1,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
|
@ -125,14 +144,10 @@ func (opt *RingOptions) clientOptions(shard string) *Options {
|
|||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
func (opt *RingOptions) getPassword(shard string) string {
|
||||
if opt.Password == "" {
|
||||
return opt.Passwords[shard]
|
||||
TLSConfig: opt.TLSConfig,
|
||||
Limiter: opt.Limiter,
|
||||
}
|
||||
return opt.Password
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
@ -142,6 +157,15 @@ type ringShard struct {
|
|||
down int32
|
||||
}
|
||||
|
||||
func newRingShard(opt *RingOptions, name, addr string) *ringShard {
|
||||
clopt := opt.clientOptions()
|
||||
clopt.Addr = addr
|
||||
|
||||
return &ringShard{
|
||||
Client: opt.NewClient(name, clopt),
|
||||
}
|
||||
}
|
||||
|
||||
func (shard *ringShard) String() string {
|
||||
var state string
|
||||
if shard.IsUp() {
|
||||
|
@ -182,41 +206,59 @@ func (shard *ringShard) Vote(up bool) bool {
|
|||
type ringShards struct {
|
||||
opt *RingOptions
|
||||
|
||||
mu sync.RWMutex
|
||||
hash *consistenthash.Map
|
||||
shards map[string]*ringShard // read only
|
||||
list []*ringShard // read only
|
||||
len int
|
||||
closed bool
|
||||
mu sync.RWMutex
|
||||
hash ConsistentHash
|
||||
shards map[string]*ringShard // read only
|
||||
list []*ringShard // read only
|
||||
numShard int
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newRingShards(opt *RingOptions) *ringShards {
|
||||
return &ringShards{
|
||||
shards := make(map[string]*ringShard, len(opt.Addrs))
|
||||
list := make([]*ringShard, 0, len(shards))
|
||||
|
||||
for name, addr := range opt.Addrs {
|
||||
shard := newRingShard(opt, name, addr)
|
||||
shards[name] = shard
|
||||
|
||||
list = append(list, shard)
|
||||
}
|
||||
|
||||
c := &ringShards{
|
||||
opt: opt,
|
||||
|
||||
hash: newConsistentHash(opt),
|
||||
shards: make(map[string]*ringShard),
|
||||
shards: shards,
|
||||
list: list,
|
||||
}
|
||||
}
|
||||
c.rebalance()
|
||||
|
||||
func (c *ringShards) Add(name string, cl *Client) {
|
||||
shard := &ringShard{Client: cl}
|
||||
c.hash.Add(name)
|
||||
c.shards[name] = shard
|
||||
c.list = append(c.list, shard)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ringShards) List() []*ringShard {
|
||||
var list []*ringShard
|
||||
|
||||
c.mu.RLock()
|
||||
list := c.list
|
||||
if !c.closed {
|
||||
list = c.list
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func (c *ringShards) Hash(key string) string {
|
||||
key = hashtag.Key(key)
|
||||
|
||||
var hash string
|
||||
|
||||
c.mu.RLock()
|
||||
hash := c.hash.Get(key)
|
||||
if c.numShard > 0 {
|
||||
hash = c.hash.Get(key)
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
return hash
|
||||
}
|
||||
|
||||
|
@ -230,6 +272,11 @@ func (c *ringShards) GetByKey(key string) (*ringShard, error) {
|
|||
return nil, pool.ErrClosed
|
||||
}
|
||||
|
||||
if c.numShard == 0 {
|
||||
c.mu.RUnlock()
|
||||
return nil, errRingShardsDown
|
||||
}
|
||||
|
||||
hash := c.hash.Get(key)
|
||||
if hash == "" {
|
||||
c.mu.RUnlock()
|
||||
|
@ -242,13 +289,13 @@ func (c *ringShards) GetByKey(key string) (*ringShard, error) {
|
|||
return shard, nil
|
||||
}
|
||||
|
||||
func (c *ringShards) GetByHash(name string) (*ringShard, error) {
|
||||
if name == "" {
|
||||
func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
|
||||
if shardName == "" {
|
||||
return c.Random()
|
||||
}
|
||||
|
||||
c.mu.RLock()
|
||||
shard := c.shards[name]
|
||||
shard := c.shards[shardName]
|
||||
c.mu.RUnlock()
|
||||
return shard, nil
|
||||
}
|
||||
|
@ -261,23 +308,16 @@ func (c *ringShards) Random() (*ringShard, error) {
|
|||
func (c *ringShards) Heartbeat(frequency time.Duration) {
|
||||
ticker := time.NewTicker(frequency)
|
||||
defer ticker.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
for range ticker.C {
|
||||
var rebalance bool
|
||||
|
||||
c.mu.RLock()
|
||||
|
||||
if c.closed {
|
||||
c.mu.RUnlock()
|
||||
break
|
||||
}
|
||||
|
||||
shards := c.list
|
||||
c.mu.RUnlock()
|
||||
|
||||
for _, shard := range shards {
|
||||
err := shard.Client.Ping().Err()
|
||||
if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
|
||||
internal.Logger.Printf("ring shard state changed: %s", shard)
|
||||
for _, shard := range c.List() {
|
||||
err := shard.Client.Ping(ctx).Err()
|
||||
isUp := err == nil || err == pool.ErrPoolTimeout
|
||||
if shard.Vote(isUp) {
|
||||
internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
|
||||
rebalance = true
|
||||
}
|
||||
}
|
||||
|
@ -294,24 +334,25 @@ func (c *ringShards) rebalance() {
|
|||
shards := c.shards
|
||||
c.mu.RUnlock()
|
||||
|
||||
hash := newConsistentHash(c.opt)
|
||||
var shardsNum int
|
||||
liveShards := make([]string, 0, len(shards))
|
||||
|
||||
for name, shard := range shards {
|
||||
if shard.IsUp() {
|
||||
hash.Add(name)
|
||||
shardsNum++
|
||||
liveShards = append(liveShards, name)
|
||||
}
|
||||
}
|
||||
|
||||
hash := c.opt.NewConsistentHash(liveShards)
|
||||
|
||||
c.mu.Lock()
|
||||
c.hash = hash
|
||||
c.len = shardsNum
|
||||
c.numShard = len(liveShards)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *ringShards) Len() int {
|
||||
c.mu.RLock()
|
||||
l := c.len
|
||||
l := c.numShard
|
||||
c.mu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
@ -377,34 +418,15 @@ func NewRing(opt *RingOptions) *Ring {
|
|||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
|
||||
ring.cmdable = ring.Process
|
||||
|
||||
for name, addr := range opt.Addrs {
|
||||
shard := newRingShard(opt, name, addr)
|
||||
ring.shards.Add(name, shard)
|
||||
}
|
||||
|
||||
go ring.shards.Heartbeat(opt.HeartbeatFrequency)
|
||||
|
||||
return &ring
|
||||
}
|
||||
|
||||
func newRingShard(opt *RingOptions, name, addr string) *Client {
|
||||
clopt := opt.clientOptions(name)
|
||||
clopt.Addr = addr
|
||||
var shard *Client
|
||||
if opt.NewClient != nil {
|
||||
shard = opt.NewClient(name, clopt)
|
||||
} else {
|
||||
shard = NewClient(clopt)
|
||||
}
|
||||
if opt.OnNewShard != nil {
|
||||
opt.OnNewShard(shard)
|
||||
}
|
||||
return shard
|
||||
}
|
||||
|
||||
func (c *Ring) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
@ -421,21 +443,13 @@ func (c *Ring) WithContext(ctx context.Context) *Ring {
|
|||
}
|
||||
|
||||
// Do creates a Cmd from the args and processes the cmd.
|
||||
func (c *Ring) Do(args ...interface{}) *Cmd {
|
||||
return c.DoContext(c.ctx, args...)
|
||||
}
|
||||
|
||||
func (c *Ring) DoContext(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(args...)
|
||||
_ = c.ProcessContext(ctx, cmd)
|
||||
func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *Ring) Process(cmd Cmder) error {
|
||||
return c.ProcessContext(c.ctx, cmd)
|
||||
}
|
||||
|
||||
func (c *Ring) ProcessContext(ctx context.Context, cmd Cmder) error {
|
||||
func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.process)
|
||||
}
|
||||
|
||||
|
@ -469,36 +483,39 @@ func (c *Ring) Len() int {
|
|||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
func (c *Ring) Subscribe(channels ...string) *PubSub {
|
||||
func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
if len(channels) == 0 {
|
||||
panic("at least one channel is required")
|
||||
}
|
||||
|
||||
shard, err := c.shards.GetByKey(channels[0])
|
||||
if err != nil {
|
||||
//TODO: return PubSub with sticky error
|
||||
// TODO: return PubSub with sticky error
|
||||
panic(err)
|
||||
}
|
||||
return shard.Client.Subscribe(channels...)
|
||||
return shard.Client.Subscribe(ctx, channels...)
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
func (c *Ring) PSubscribe(channels ...string) *PubSub {
|
||||
func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
if len(channels) == 0 {
|
||||
panic("at least one channel is required")
|
||||
}
|
||||
|
||||
shard, err := c.shards.GetByKey(channels[0])
|
||||
if err != nil {
|
||||
//TODO: return PubSub with sticky error
|
||||
// TODO: return PubSub with sticky error
|
||||
panic(err)
|
||||
}
|
||||
return shard.Client.PSubscribe(channels...)
|
||||
return shard.Client.PSubscribe(ctx, channels...)
|
||||
}
|
||||
|
||||
// ForEachShard concurrently calls the fn on each live shard in the ring.
|
||||
// It returns the first error if any.
|
||||
func (c *Ring) ForEachShard(fn func(client *Client) error) error {
|
||||
func (c *Ring) ForEachShard(
|
||||
ctx context.Context,
|
||||
fn func(ctx context.Context, client *Client) error,
|
||||
) error {
|
||||
shards := c.shards.List()
|
||||
var wg sync.WaitGroup
|
||||
errCh := make(chan error, 1)
|
||||
|
@ -510,7 +527,7 @@ func (c *Ring) ForEachShard(fn func(client *Client) error) error {
|
|||
wg.Add(1)
|
||||
go func(shard *ringShard) {
|
||||
defer wg.Done()
|
||||
err := fn(shard.Client)
|
||||
err := fn(ctx, shard.Client)
|
||||
if err != nil {
|
||||
select {
|
||||
case errCh <- err:
|
||||
|
@ -529,11 +546,11 @@ func (c *Ring) ForEachShard(fn func(client *Client) error) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) {
|
||||
func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
|
||||
shards := c.shards.List()
|
||||
firstErr := errRingShardsDown
|
||||
var firstErr error
|
||||
for _, shard := range shards {
|
||||
cmdsInfo, err := shard.Client.Command().Result()
|
||||
cmdsInfo, err := shard.Client.Command(ctx).Result()
|
||||
if err == nil {
|
||||
return cmdsInfo, nil
|
||||
}
|
||||
|
@ -541,23 +558,26 @@ func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) {
|
|||
firstErr = err
|
||||
}
|
||||
}
|
||||
if firstErr == nil {
|
||||
return nil, errRingShardsDown
|
||||
}
|
||||
return nil, firstErr
|
||||
}
|
||||
|
||||
func (c *Ring) cmdInfo(name string) *CommandInfo {
|
||||
cmdsInfo, err := c.cmdsInfoCache.Get()
|
||||
func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
|
||||
cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
info := cmdsInfo[name]
|
||||
if info == nil {
|
||||
internal.Logger.Printf("info for cmd=%s not found", name)
|
||||
internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
|
||||
cmdInfo := c.cmdInfo(cmd.Name())
|
||||
func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
|
||||
cmdInfo := c.cmdInfo(ctx, cmd.Name())
|
||||
pos := cmdFirstKeyPos(cmd, cmdInfo)
|
||||
if pos == 0 {
|
||||
return c.shards.Random()
|
||||
|
@ -567,15 +587,6 @@ func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
|
|||
}
|
||||
|
||||
func (c *Ring) process(ctx context.Context, cmd Cmder) error {
|
||||
err := c._process(ctx, cmd)
|
||||
if err != nil {
|
||||
cmd.SetErr(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Ring) _process(ctx context.Context, cmd Cmder) error {
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
|
||||
if attempt > 0 {
|
||||
|
@ -584,21 +595,21 @@ func (c *Ring) _process(ctx context.Context, cmd Cmder) error {
|
|||
}
|
||||
}
|
||||
|
||||
shard, err := c.cmdShard(cmd)
|
||||
shard, err := c.cmdShard(ctx, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastErr = shard.Client.ProcessContext(ctx, cmd)
|
||||
if lastErr == nil || !isRetryableError(lastErr, cmd.readTimeout() == nil) {
|
||||
lastErr = shard.Client.Process(ctx, cmd)
|
||||
if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
|
||||
return lastErr
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(fn)
|
||||
func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Ring) Pipeline() Pipeliner {
|
||||
|
@ -616,8 +627,8 @@ func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
|
|||
})
|
||||
}
|
||||
|
||||
func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(fn)
|
||||
func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *Ring) TxPipeline() Pipeliner {
|
||||
|
@ -640,10 +651,10 @@ func (c *Ring) generalProcessPipeline(
|
|||
) error {
|
||||
cmdsMap := make(map[string][]Cmder)
|
||||
for _, cmd := range cmds {
|
||||
cmdInfo := c.cmdInfo(cmd.Name())
|
||||
cmdInfo := c.cmdInfo(ctx, cmd.Name())
|
||||
hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
|
||||
if hash != "" {
|
||||
hash = c.shards.Hash(hashtag.Key(hash))
|
||||
hash = c.shards.Hash(hash)
|
||||
}
|
||||
cmdsMap[hash] = append(cmdsMap[hash], cmd)
|
||||
}
|
||||
|
@ -665,30 +676,20 @@ func (c *Ring) generalProcessPipeline(
|
|||
func (c *Ring) processShardPipeline(
|
||||
ctx context.Context, hash string, cmds []Cmder, tx bool,
|
||||
) error {
|
||||
//TODO: retry?
|
||||
shard, err := c.shards.GetByHash(hash)
|
||||
// TODO: retry?
|
||||
shard, err := c.shards.GetByName(hash)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if tx {
|
||||
err = shard.Client.processTxPipeline(ctx, cmds)
|
||||
} else {
|
||||
err = shard.Client.processPipeline(ctx, cmds)
|
||||
return shard.Client.processTxPipeline(ctx, cmds)
|
||||
}
|
||||
return err
|
||||
return shard.Client.processPipeline(ctx, cmds)
|
||||
}
|
||||
|
||||
// Close closes the ring client, releasing any open resources.
|
||||
//
|
||||
// It is rare to Close a Ring, as the Ring is meant to be long-lived
|
||||
// and shared between many goroutines.
|
||||
func (c *Ring) Close() error {
|
||||
return c.shards.Close()
|
||||
}
|
||||
|
||||
func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
|
||||
func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
|
||||
if len(keys) == 0 {
|
||||
return fmt.Errorf("redis: Watch requires at least one key")
|
||||
}
|
||||
|
@ -718,9 +719,13 @@ func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
|
|||
}
|
||||
}
|
||||
|
||||
return shards[0].Client.Watch(fn, keys...)
|
||||
return shards[0].Client.Watch(ctx, fn, keys...)
|
||||
}
|
||||
|
||||
func newConsistentHash(opt *RingOptions) *consistenthash.Map {
|
||||
return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash))
|
||||
// Close closes the ring client, releasing any open resources.
|
||||
//
|
||||
// It is rare to Close a Ring, as the Ring is meant to be long-lived
|
||||
// and shared between many goroutines.
|
||||
func (c *Ring) Close() error {
|
||||
return c.shards.Close()
|
||||
}
|
65
vendor/github.com/go-redis/redis/v8/script.go
generated
vendored
Normal file
65
vendor/github.com/go-redis/redis/v8/script.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Scripter interface {
|
||||
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
|
||||
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
|
||||
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
|
||||
ScriptLoad(ctx context.Context, script string) *StringCmd
|
||||
}
|
||||
|
||||
var (
|
||||
_ Scripter = (*Client)(nil)
|
||||
_ Scripter = (*Ring)(nil)
|
||||
_ Scripter = (*ClusterClient)(nil)
|
||||
)
|
||||
|
||||
type Script struct {
|
||||
src, hash string
|
||||
}
|
||||
|
||||
func NewScript(src string) *Script {
|
||||
h := sha1.New()
|
||||
_, _ = io.WriteString(h, src)
|
||||
return &Script{
|
||||
src: src,
|
||||
hash: hex.EncodeToString(h.Sum(nil)),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Script) Hash() string {
|
||||
return s.hash
|
||||
}
|
||||
|
||||
func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
|
||||
return c.ScriptLoad(ctx, s.src)
|
||||
}
|
||||
|
||||
func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
|
||||
return c.ScriptExists(ctx, s.hash)
|
||||
}
|
||||
|
||||
func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
|
||||
return c.Eval(ctx, s.src, keys, args...)
|
||||
}
|
||||
|
||||
func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
|
||||
return c.EvalSha(ctx, s.hash, keys, args...)
|
||||
}
|
||||
|
||||
// Run optimistically uses EVALSHA to run the script. If script does not exist
|
||||
// it is retried using EVAL.
|
||||
func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
|
||||
r := s.EvalSha(ctx, c, keys, args...)
|
||||
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
|
||||
return s.Eval(ctx, c, keys, args...)
|
||||
}
|
||||
return r
|
||||
}
|
738
vendor/github.com/go-redis/redis/v8/sentinel.go
generated
vendored
Normal file
738
vendor/github.com/go-redis/redis/v8/sentinel.go
generated
vendored
Normal file
|
@ -0,0 +1,738 @@
|
|||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/rand"
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// FailoverOptions are used to configure a failover client and should
|
||||
// be passed to NewFailoverClient.
|
||||
type FailoverOptions struct {
|
||||
// The master name.
|
||||
MasterName string
|
||||
// A seed list of host:port addresses of sentinel nodes.
|
||||
SentinelAddrs []string
|
||||
// Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
|
||||
SentinelPassword string
|
||||
|
||||
// Allows routing read-only commands to the closest master or slave node.
|
||||
// This option only works with NewFailoverClusterClient.
|
||||
RouteByLatency bool
|
||||
// Allows routing read-only commands to the random master or slave node.
|
||||
// This option only works with NewFailoverClusterClient.
|
||||
RouteRandomly bool
|
||||
|
||||
// Route all commands to slave read-only nodes.
|
||||
SlaveOnly bool
|
||||
|
||||
// Following options are copied from Options struct.
|
||||
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
Username string
|
||||
Password string
|
||||
DB int
|
||||
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
DialTimeout time.Duration
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
|
||||
PoolSize int
|
||||
MinIdleConns int
|
||||
MaxConnAge time.Duration
|
||||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
func (opt *FailoverOptions) clientOptions() *Options {
|
||||
return &Options{
|
||||
Addr: "FailoverClient",
|
||||
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
DB: opt.DB,
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
|
||||
MaxRetries: opt.MaxRetries,
|
||||
MinRetryBackoff: opt.MinRetryBackoff,
|
||||
MaxRetryBackoff: opt.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolSize: opt.PoolSize,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
|
||||
return &Options{
|
||||
Addr: addr,
|
||||
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
DB: 0,
|
||||
Password: opt.SentinelPassword,
|
||||
|
||||
MaxRetries: opt.MaxRetries,
|
||||
MinRetryBackoff: opt.MinRetryBackoff,
|
||||
MaxRetryBackoff: opt.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolSize: opt.PoolSize,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
|
||||
return &ClusterOptions{
|
||||
Dialer: opt.Dialer,
|
||||
OnConnect: opt.OnConnect,
|
||||
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
|
||||
MaxRedirects: opt.MaxRetries,
|
||||
|
||||
RouteByLatency: opt.RouteByLatency,
|
||||
RouteRandomly: opt.RouteRandomly,
|
||||
|
||||
MinRetryBackoff: opt.MinRetryBackoff,
|
||||
MaxRetryBackoff: opt.MaxRetryBackoff,
|
||||
|
||||
DialTimeout: opt.DialTimeout,
|
||||
ReadTimeout: opt.ReadTimeout,
|
||||
WriteTimeout: opt.WriteTimeout,
|
||||
|
||||
PoolSize: opt.PoolSize,
|
||||
PoolTimeout: opt.PoolTimeout,
|
||||
IdleTimeout: opt.IdleTimeout,
|
||||
IdleCheckFrequency: opt.IdleCheckFrequency,
|
||||
MinIdleConns: opt.MinIdleConns,
|
||||
MaxConnAge: opt.MaxConnAge,
|
||||
|
||||
TLSConfig: opt.TLSConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFailoverClient returns a Redis client that uses Redis Sentinel
|
||||
// for automatic failover. It's safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
|
||||
if failoverOpt.RouteByLatency {
|
||||
panic("to route commands by latency, use NewFailoverClusterClient")
|
||||
}
|
||||
if failoverOpt.RouteRandomly {
|
||||
panic("to route commands randomly, use NewFailoverClusterClient")
|
||||
}
|
||||
|
||||
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
|
||||
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
|
||||
|
||||
failover := &sentinelFailover{
|
||||
opt: failoverOpt,
|
||||
sentinelAddrs: sentinelAddrs,
|
||||
}
|
||||
|
||||
opt := failoverOpt.clientOptions()
|
||||
opt.Dialer = masterSlaveDialer(failover)
|
||||
opt.init()
|
||||
|
||||
connPool := newConnPool(opt)
|
||||
|
||||
failover.mu.Lock()
|
||||
failover.onFailover = func(ctx context.Context, addr string) {
|
||||
_ = connPool.Filter(func(cn *pool.Conn) bool {
|
||||
return cn.RemoteAddr().String() != addr
|
||||
})
|
||||
}
|
||||
failover.mu.Unlock()
|
||||
|
||||
c := Client{
|
||||
baseClient: newBaseClient(opt, connPool),
|
||||
ctx: context.Background(),
|
||||
}
|
||||
c.cmdable = c.Process
|
||||
c.onClose = failover.Close
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func masterSlaveDialer(
|
||||
failover *sentinelFailover,
|
||||
) func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||
var addr string
|
||||
var err error
|
||||
|
||||
if failover.opt.SlaveOnly {
|
||||
addr, err = failover.RandomSlaveAddr(ctx)
|
||||
} else {
|
||||
addr, err = failover.MasterAddr(ctx)
|
||||
if err == nil {
|
||||
failover.trySwitchMaster(ctx, addr)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if failover.opt.Dialer != nil {
|
||||
return failover.opt.Dialer(ctx, network, addr)
|
||||
}
|
||||
return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// SentinelClient is a client for a Redis Sentinel.
|
||||
type SentinelClient struct {
|
||||
*baseClient
|
||||
hooks
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func NewSentinelClient(opt *Options) *SentinelClient {
|
||||
opt.init()
|
||||
c := &SentinelClient{
|
||||
baseClient: &baseClient{
|
||||
opt: opt,
|
||||
connPool: newConnPool(opt),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
clone := *c
|
||||
clone.ctx = ctx
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
func (c *SentinelClient) pubSub() *PubSub {
|
||||
pubsub := &PubSub{
|
||||
opt: c.opt,
|
||||
|
||||
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
|
||||
return c.newConn(ctx)
|
||||
},
|
||||
closeConn: c.connPool.CloseConn,
|
||||
}
|
||||
pubsub.init()
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// Ping is used to test if a connection is still alive, or to
|
||||
// measure latency.
|
||||
func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "ping")
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
|
||||
cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
|
||||
cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Failover forces a failover as if the master was not reachable, and without
|
||||
// asking for agreement to other Sentinels.
|
||||
func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
|
||||
cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Reset resets all the masters with matching name. The pattern argument is a
|
||||
// glob-style pattern. The reset process clears any previous state in a master
|
||||
// (including a failover in progress), and removes every slave and sentinel
|
||||
// already discovered and associated with the master.
|
||||
func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
|
||||
cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// FlushConfig forces Sentinel to rewrite its configuration on disk, including
|
||||
// the current Sentinel state.
|
||||
func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
|
||||
cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Master shows the state and info of the specified master.
|
||||
func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
|
||||
cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Masters shows a list of monitored masters and their state.
|
||||
func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
|
||||
cmd := NewSliceCmd(ctx, "sentinel", "masters")
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Slaves shows a list of slaves for the specified master and their state.
|
||||
func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
|
||||
cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// CkQuorum checks if the current Sentinel configuration is able to reach the
|
||||
// quorum needed to failover a master, and the majority needed to authorize the
|
||||
// failover. This command should be used in monitoring systems to check if a
|
||||
// Sentinel deployment is ok.
|
||||
func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Monitor tells the Sentinel to start monitoring a new master with the specified
|
||||
// name, ip, port, and quorum.
|
||||
func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Set is used in order to change configuration parameters of a specific master.
|
||||
func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Remove is used in order to remove the specified master: the master will no
|
||||
// longer be monitored, and will totally be removed from the internal state of
|
||||
// the Sentinel.
|
||||
func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
|
||||
cmd := NewStringCmd(ctx, "sentinel", "remove", name)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type sentinelFailover struct {
|
||||
opt *FailoverOptions
|
||||
|
||||
sentinelAddrs []string
|
||||
|
||||
onFailover func(ctx context.Context, addr string)
|
||||
onUpdate func(ctx context.Context)
|
||||
|
||||
mu sync.RWMutex
|
||||
_masterAddr string
|
||||
sentinel *SentinelClient
|
||||
pubsub *PubSub
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.sentinel != nil {
|
||||
return c.closeSentinel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) closeSentinel() error {
|
||||
firstErr := c.pubsub.Close()
|
||||
c.pubsub = nil
|
||||
|
||||
err := c.sentinel.Close()
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
c.sentinel = nil
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
|
||||
addresses, err := c.slaveAddrs(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(addresses) == 0 {
|
||||
return c.MasterAddr(ctx)
|
||||
}
|
||||
return addresses[rand.Intn(len(addresses))], nil
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
|
||||
c.mu.RLock()
|
||||
sentinel := c.sentinel
|
||||
c.mu.RUnlock()
|
||||
|
||||
if sentinel != nil {
|
||||
addr := c.getMasterAddr(ctx, sentinel)
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.sentinel != nil {
|
||||
addr := c.getMasterAddr(ctx, c.sentinel)
|
||||
if addr != "" {
|
||||
return addr, nil
|
||||
}
|
||||
_ = c.closeSentinel()
|
||||
}
|
||||
|
||||
for i, sentinelAddr := range c.sentinelAddrs {
|
||||
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
|
||||
|
||||
masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
_ = sentinel.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Push working sentinel to the top.
|
||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
|
||||
c.setSentinel(ctx, sentinel)
|
||||
|
||||
addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
return "", errors.New("redis: all sentinels specified in configuration are unreachable")
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
|
||||
c.mu.RLock()
|
||||
sentinel := c.sentinel
|
||||
c.mu.RUnlock()
|
||||
|
||||
if sentinel != nil {
|
||||
addrs := c.getSlaveAddrs(ctx, sentinel)
|
||||
if len(addrs) > 0 {
|
||||
return addrs, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.sentinel != nil {
|
||||
addrs := c.getSlaveAddrs(ctx, c.sentinel)
|
||||
if len(addrs) > 0 {
|
||||
return addrs, nil
|
||||
}
|
||||
_ = c.closeSentinel()
|
||||
}
|
||||
|
||||
for i, sentinelAddr := range c.sentinelAddrs {
|
||||
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
|
||||
|
||||
slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
_ = sentinel.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Push working sentinel to the top.
|
||||
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
|
||||
c.setSentinel(ctx, sentinel)
|
||||
|
||||
addrs := parseSlaveAddrs(slaves)
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
|
||||
addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
return ""
|
||||
}
|
||||
return net.JoinHostPort(addr[0], addr[1])
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
|
||||
addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
|
||||
c.opt.MasterName, err)
|
||||
return []string{}
|
||||
}
|
||||
return parseSlaveAddrs(addrs)
|
||||
}
|
||||
|
||||
func parseSlaveAddrs(addrs []interface{}) []string {
|
||||
nodes := make([]string, 0, len(addrs))
|
||||
|
||||
for _, node := range addrs {
|
||||
ip := ""
|
||||
port := ""
|
||||
flags := []string{}
|
||||
lastkey := ""
|
||||
isDown := false
|
||||
|
||||
for _, key := range node.([]interface{}) {
|
||||
switch lastkey {
|
||||
case "ip":
|
||||
ip = key.(string)
|
||||
case "port":
|
||||
port = key.(string)
|
||||
case "flags":
|
||||
flags = strings.Split(key.(string), ",")
|
||||
}
|
||||
lastkey = key.(string)
|
||||
}
|
||||
|
||||
for _, flag := range flags {
|
||||
switch flag {
|
||||
case "s_down", "o_down", "disconnected":
|
||||
isDown = true
|
||||
}
|
||||
}
|
||||
|
||||
if !isDown {
|
||||
nodes = append(nodes, net.JoinHostPort(ip, port))
|
||||
}
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
|
||||
c.mu.RLock()
|
||||
currentAddr := c._masterAddr
|
||||
c.mu.RUnlock()
|
||||
|
||||
if addr == currentAddr {
|
||||
return
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if addr == c._masterAddr {
|
||||
return
|
||||
}
|
||||
c._masterAddr = addr
|
||||
|
||||
internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
|
||||
c.opt.MasterName, addr)
|
||||
if c.onFailover != nil {
|
||||
c.onFailover(ctx, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
|
||||
if c.sentinel != nil {
|
||||
panic("not reached")
|
||||
}
|
||||
c.sentinel = sentinel
|
||||
c.discoverSentinels(ctx)
|
||||
|
||||
c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
|
||||
go c.listen(c.pubsub)
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
|
||||
sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
|
||||
if err != nil {
|
||||
internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
|
||||
return
|
||||
}
|
||||
for _, sentinel := range sentinels {
|
||||
vals := sentinel.([]interface{})
|
||||
for i := 0; i < len(vals); i += 2 {
|
||||
key := vals[i].(string)
|
||||
if key == "name" {
|
||||
sentinelAddr := vals[i+1].(string)
|
||||
if !contains(c.sentinelAddrs, sentinelAddr) {
|
||||
internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
|
||||
sentinelAddr, c.opt.MasterName)
|
||||
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *sentinelFailover) listen(pubsub *PubSub) {
|
||||
ctx := context.TODO()
|
||||
|
||||
if c.onUpdate != nil {
|
||||
c.onUpdate(ctx)
|
||||
}
|
||||
|
||||
ch := pubsub.Channel()
|
||||
for msg := range ch {
|
||||
if msg.Channel == "+switch-master" {
|
||||
parts := strings.Split(msg.Payload, " ")
|
||||
if parts[0] != c.opt.MasterName {
|
||||
internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
|
||||
continue
|
||||
}
|
||||
addr := net.JoinHostPort(parts[3], parts[4])
|
||||
c.trySwitchMaster(pubsub.getContext(), addr)
|
||||
}
|
||||
|
||||
if c.onUpdate != nil {
|
||||
c.onUpdate(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func contains(slice []string, str string) bool {
|
||||
for _, s := range slice {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// NewFailoverClusterClient returns a client that supports routing read-only commands
|
||||
// to a slave node.
|
||||
func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
|
||||
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
|
||||
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
|
||||
|
||||
failover := &sentinelFailover{
|
||||
opt: failoverOpt,
|
||||
sentinelAddrs: sentinelAddrs,
|
||||
}
|
||||
|
||||
opt := failoverOpt.clusterOptions()
|
||||
opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
|
||||
masterAddr, err := failover.MasterAddr(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes := []ClusterNode{{
|
||||
Addr: masterAddr,
|
||||
}}
|
||||
|
||||
slaveAddrs, err := failover.slaveAddrs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, slaveAddr := range slaveAddrs {
|
||||
nodes = append(nodes, ClusterNode{
|
||||
Addr: slaveAddr,
|
||||
})
|
||||
}
|
||||
|
||||
slots := []ClusterSlot{
|
||||
{
|
||||
Start: 0,
|
||||
End: 16383,
|
||||
Nodes: nodes,
|
||||
},
|
||||
}
|
||||
return slots, nil
|
||||
}
|
||||
|
||||
c := NewClusterClient(opt)
|
||||
|
||||
failover.mu.Lock()
|
||||
failover.onUpdate = func(ctx context.Context) {
|
||||
c.ReloadState(ctx)
|
||||
}
|
||||
failover.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
51
vendor/github.com/go-redis/redis/v7/tx.go → vendor/github.com/go-redis/redis/v8/tx.go
generated
vendored
51
vendor/github.com/go-redis/redis/v7/tx.go → vendor/github.com/go-redis/redis/v8/tx.go
generated
vendored
|
@ -3,8 +3,8 @@ package redis
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/go-redis/redis/v7/internal/proto"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
// TxFailedErr transaction redis failed.
|
||||
|
@ -26,7 +26,7 @@ func (c *Client) newTx(ctx context.Context) *Tx {
|
|||
tx := Tx{
|
||||
baseClient: baseClient{
|
||||
opt: c.opt,
|
||||
connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true),
|
||||
connPool: pool.NewStickyConnPool(c.connPool),
|
||||
},
|
||||
hooks: c.hooks.clone(),
|
||||
ctx: ctx,
|
||||
|
@ -55,11 +55,7 @@ func (c *Tx) WithContext(ctx context.Context) *Tx {
|
|||
return &clone
|
||||
}
|
||||
|
||||
func (c *Tx) Process(cmd Cmder) error {
|
||||
return c.ProcessContext(c.ctx, cmd)
|
||||
}
|
||||
|
||||
func (c *Tx) ProcessContext(ctx context.Context, cmd Cmder) error {
|
||||
func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.baseClient.process)
|
||||
}
|
||||
|
||||
|
@ -67,52 +63,45 @@ func (c *Tx) ProcessContext(ctx context.Context, cmd Cmder) error {
|
|||
// for conditional execution if there are any keys.
|
||||
//
|
||||
// The transaction is automatically closed when fn exits.
|
||||
func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
|
||||
return c.WatchContext(c.ctx, fn, keys...)
|
||||
}
|
||||
|
||||
func (c *Client) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
|
||||
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
|
||||
tx := c.newTx(ctx)
|
||||
defer tx.Close(ctx)
|
||||
if len(keys) > 0 {
|
||||
if err := tx.Watch(keys...).Err(); err != nil {
|
||||
_ = tx.Close()
|
||||
if err := tx.Watch(ctx, keys...).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := fn(tx)
|
||||
_ = tx.Close()
|
||||
return err
|
||||
return fn(tx)
|
||||
}
|
||||
|
||||
// Close closes the transaction, releasing any open resources.
|
||||
func (c *Tx) Close() error {
|
||||
_ = c.Unwatch().Err()
|
||||
func (c *Tx) Close(ctx context.Context) error {
|
||||
_ = c.Unwatch(ctx).Err()
|
||||
return c.baseClient.Close()
|
||||
}
|
||||
|
||||
// Watch marks the keys to be watched for conditional execution
|
||||
// of a transaction.
|
||||
func (c *Tx) Watch(keys ...string) *StatusCmd {
|
||||
func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
|
||||
args := make([]interface{}, 1+len(keys))
|
||||
args[0] = "watch"
|
||||
for i, key := range keys {
|
||||
args[1+i] = key
|
||||
}
|
||||
cmd := NewStatusCmd(args...)
|
||||
_ = c.Process(cmd)
|
||||
cmd := NewStatusCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Unwatch flushes all the previously watched keys for a transaction.
|
||||
func (c *Tx) Unwatch(keys ...string) *StatusCmd {
|
||||
func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
|
||||
args := make([]interface{}, 1+len(keys))
|
||||
args[0] = "unwatch"
|
||||
for i, key := range keys {
|
||||
args[1+i] = key
|
||||
}
|
||||
cmd := NewStatusCmd(args...)
|
||||
_ = c.Process(cmd)
|
||||
cmd := NewStatusCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -130,8 +119,8 @@ func (c *Tx) Pipeline() Pipeliner {
|
|||
|
||||
// Pipelined executes commands queued in the fn outside of the transaction.
|
||||
// Use TxPipelined if you need transactional behavior.
|
||||
func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(fn)
|
||||
func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipelined executes commands queued in the fn in the transaction.
|
||||
|
@ -142,8 +131,8 @@ func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
|||
// Exec always returns list of commands. If transaction fails
|
||||
// TxFailedErr is returned. Otherwise Exec returns an error of the first
|
||||
// failed command or nil.
|
||||
func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(fn)
|
||||
func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
|
|
@ -20,23 +20,29 @@ type UniversalOptions struct {
|
|||
|
||||
// Common options.
|
||||
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(*Conn) error
|
||||
Username string
|
||||
Password string
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
MaxRetryBackoff time.Duration
|
||||
DialTimeout time.Duration
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
OnConnect func(ctx context.Context, cn *Conn) error
|
||||
|
||||
Username string
|
||||
Password string
|
||||
SentinelPassword string
|
||||
|
||||
MaxRetries int
|
||||
MinRetryBackoff time.Duration
|
||||
MaxRetryBackoff time.Duration
|
||||
|
||||
DialTimeout time.Duration
|
||||
ReadTimeout time.Duration
|
||||
WriteTimeout time.Duration
|
||||
|
||||
PoolSize int
|
||||
MinIdleConns int
|
||||
MaxConnAge time.Duration
|
||||
PoolTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
IdleCheckFrequency time.Duration
|
||||
TLSConfig *tls.Config
|
||||
|
||||
TLSConfig *tls.Config
|
||||
|
||||
// Only cluster clients.
|
||||
|
||||
|
@ -100,9 +106,10 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
|
|||
Dialer: o.Dialer,
|
||||
OnConnect: o.OnConnect,
|
||||
|
||||
DB: o.DB,
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
DB: o.DB,
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
SentinelPassword: o.SentinelPassword,
|
||||
|
||||
MaxRetries: o.MaxRetries,
|
||||
MinRetryBackoff: o.MinRetryBackoff,
|
||||
|
@ -168,19 +175,20 @@ type UniversalClient interface {
|
|||
Cmdable
|
||||
Context() context.Context
|
||||
AddHook(Hook)
|
||||
Watch(fn func(*Tx) error, keys ...string) error
|
||||
Do(args ...interface{}) *Cmd
|
||||
DoContext(ctx context.Context, args ...interface{}) *Cmd
|
||||
Process(cmd Cmder) error
|
||||
ProcessContext(ctx context.Context, cmd Cmder) error
|
||||
Subscribe(channels ...string) *PubSub
|
||||
PSubscribe(channels ...string) *PubSub
|
||||
Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
|
||||
Do(ctx context.Context, args ...interface{}) *Cmd
|
||||
Process(ctx context.Context, cmd Cmder) error
|
||||
Subscribe(ctx context.Context, channels ...string) *PubSub
|
||||
PSubscribe(ctx context.Context, channels ...string) *PubSub
|
||||
Close() error
|
||||
PoolStats() *PoolStats
|
||||
}
|
||||
|
||||
var _ UniversalClient = (*Client)(nil)
|
||||
var _ UniversalClient = (*ClusterClient)(nil)
|
||||
var _ UniversalClient = (*Ring)(nil)
|
||||
var (
|
||||
_ UniversalClient = (*Client)(nil)
|
||||
_ UniversalClient = (*ClusterClient)(nil)
|
||||
_ UniversalClient = (*Ring)(nil)
|
||||
)
|
||||
|
||||
// NewUniversalClient returns a new multi client. The type of client returned depends
|
||||
// on the following three conditions:
|
Loading…
Add table
Add a link
Reference in a new issue