diff --git a/.github/release-drafter-config.yml b/.github/release-drafter-config.yml new file mode 100644 index 00000000..9ccb28ac --- /dev/null +++ b/.github/release-drafter-config.yml @@ -0,0 +1,48 @@ +name-template: '$NEXT_MINOR_VERSION' +tag-template: 'v$NEXT_MINOR_VERSION' +autolabeler: + - label: 'maintenance' + files: + - '*.md' + - '.github/*' + - label: 'bug' + branch: + - '/bug-.+' + - label: 'maintenance' + branch: + - '/maintenance-.+' + - label: 'feature' + branch: + - '/feature-.+' +categories: + - title: 'Breaking Changes' + labels: + - 'breakingchange' + - title: '🧪 Experimental Features' + labels: + - 'experimental' + - title: '🚀 New Features' + labels: + - 'feature' + - 'enhancement' + - title: '🐛 Bug Fixes' + labels: + - 'fix' + - 'bugfix' + - 'bug' + - 'BUG' + - title: '🧰 Maintenance' + label: 'maintenance' +change-template: '- $TITLE (#$NUMBER)' +exclude-labels: + - 'skip-changelog' +template: | + # Changes + + $CHANGES + + ## Contributors + We'd like to thank all the contributors who worked on this release! + + $CONTRIBUTORS + diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 32dc6868..30fb9e85 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -6,6 +6,9 @@ on: pull_request: branches: [master, v9] +permissions: + contents: read + jobs: build: name: build @@ -13,11 +16,11 @@ jobs: strategy: fail-fast: false matrix: - go-version: [1.16.x, 1.17.x] + go-version: [1.19.x, 1.20.x] services: redis: - image: redis + image: redis:7.2-rc options: >- --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 ports: @@ -25,7 +28,7 @@ jobs: steps: - name: Set up ${{ matrix.go-version }} - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: ${{ matrix.go-version }} diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml deleted file mode 100644 index 5fcfeaea..00000000 --- a/.github/workflows/commitlint.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: Lint Commit Messages -on: [pull_request] - -jobs: - commitlint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - uses: wagoid/commitlint-github-action@v4 diff --git a/.github/workflows/doctests.yaml b/.github/workflows/doctests.yaml new file mode 100644 index 00000000..00b0063b --- /dev/null +++ b/.github/workflows/doctests.yaml @@ -0,0 +1,41 @@ +name: Documentation Tests + +on: + push: + branches: [master, examples] + pull_request: + branches: [master, examples] + +permissions: + contents: read + +jobs: + doctests: + name: doctests + runs-on: ubuntu-latest + + services: + redis-stack: + image: redis/redis-stack-server:latest + options: >- + --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 + ports: + - 6379:6379 + + strategy: + fail-fast: false + matrix: + go-version: [ "1.18", "1.19", "1.20" ] + + steps: + - name: Set up ${{ matrix.go-version }} + uses: actions/setup-go@v4 + with: + go-version: ${{ matrix.go-version }} + + - name: Checkout code + uses: actions/checkout@v3 + + - name: Test doc examples + working-directory: ./doctests + run: go test \ No newline at end of file diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 91c4b53e..d3232ecb 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -10,11 +10,17 @@ on: - v9 pull_request: +permissions: + contents: read + jobs: golangci: + permissions: + contents: read # for actions/checkout to fetch code + pull-requests: read # for golangci/golangci-lint-action to fetch pull requests name: lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml new file mode 100644 index 00000000..eebb3e67 --- /dev/null +++ b/.github/workflows/release-drafter.yml @@ -0,0 +1,24 @@ +name: Release Drafter + +on: + push: + # branches to consider in the event; optional, defaults to all + branches: + - master + +permissions: {} +jobs: + update_release_draft: + permissions: + pull-requests: write # to add label to PR (release-drafter/release-drafter) + contents: write # to create a github release (release-drafter/release-drafter) + + runs-on: ubuntu-latest + steps: + # Drafts your next Release notes as Pull Requests are merged into "master" + - uses: release-drafter/release-drafter@v5 + with: + # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml + config-name: release-drafter-config.yml + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 685693ae..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Releases - -on: - push: - tags: - - 'v*' - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: ncipollo/release-action@v1 - with: - body: - Please refer to - [CHANGELOG.md](https://github.com/go-redis/redis/blob/master/CHANGELOG.md) for details diff --git a/.gitignore b/.gitignore index b975a7b4..64a7cb51 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ *.rdb -testdata/*/ +testdata/* .idea/ +.DS_Store diff --git a/CHANGELOG.md b/CHANGELOG.md index 83fcbe2a..297438a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,184 +1,124 @@ -## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17) - - -### Bug Fixes - -* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a)) -* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c)) -* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475)) -* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2)) -* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32)) -* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4)) -* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc)) -* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f)) -* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2)) +## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) ### Features -* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8)) -* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e)) -* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7)) -* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e)) -* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b)) -* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417)) -* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d)) - - - -## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) - - -### Features - -* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634)) -* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24)) -* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4)) - - - -## v9 UNRELEASED - -- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. -- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources. - `Pipeline.Discard` is still available if you want to reset commands for some reason. -- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value. - -## v8.11 - -- Remove OpenTelemetry metrics. -- Supports more redis commands and options. - -## v8.10 - -- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a - single span with a Redis command (instead of 4 spans). There are multiple reasons behind this - decision: - - - Traces become smaller and less noisy. - - It may be costly to process those 3 extra spans for each query. - - go-redis no longer depends on OpenTelemetry. - - Eventually we hope to replace the information that we no longer collect with OpenTelemetry - Metrics. - -## v8.9 - -- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`, - `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings. - -## v8.8 - -- To make updating easier, extra modules now have the same version as go-redis does. That means that - you need to update your imports: +* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602)) +* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe)) +* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af)) -``` -github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8 -github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8 -``` -## v8.5 -- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a - struct: +## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01) -```go -err := rdb.HGetAll(ctx, "hash").Scan(&data) -err := rdb.MGet(ctx, "key1", "key2").Scan(&data) -``` - -- Please check [redismock](https://github.com/go-redis/redismock) by - [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client. - -## v8 - -- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not - using `context.Context` yet, the simplest option is to define global package variable - `var ctx = context.TODO()` and use it when `ctx` is required. - -- Full support for `context.Context` canceling. - -- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node. - -- Added `redisext.OpenTemetryHook` that adds - [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). - -- Redis slow log support. - -- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move - existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: - -```go -import "github.com/golang/groupcache/consistenthash" - -ring := redis.NewRing(&redis.RingOptions{ - NewConsistentHash: func() { - return consistenthash.New(100, crc32.ChecksumIEEE) - }, -}) -``` - -- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3. -- `Options.MaxRetries` default value is changed from 0 to 3. - -- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. - -## v7.3 - -- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection - URL contains username. - -## v7.2 - -- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. - -## v7.1 - -- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` - interface. - -## v7 - -- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a - transactional pipeline. -- WrapProcess is replaced with more convenient AddHook that has access to context.Context. -- WithContext now can not be used to create a shallow copy of the client. -- New methods ProcessContext, DoContext, and ExecContext. -- Client respects Context.Deadline when setting net.Conn deadline. -- Client listens on Context.Done while waiting for a connection from the pool and returns an error - when context context is cancelled. -- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow - detecting reconnections. -- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse - the time. -- `SetLimiter` is removed and added `Options.Limiter` instead. -- `HMSet` is deprecated as of Redis v4. - -## v6.15 - -- Cluster and Ring pipelines process commands for each node in its own goroutine. +### Bug Fixes -## 6.14 +* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241)) -- Added Options.MinIdleConns. -- Added Options.MaxConnAge. -- PoolStats.FreeConns is renamed to PoolStats.IdleConns. -- Add Client.Do to simplify creating custom commands. -- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. -- Lower memory usage. -## v6.13 +### Features -- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set - `HashReplicas = 1000` for better keys distribution between shards. -- Cluster client was optimized to use much less memory when reloading cluster state. -- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout - occurres. In most cases it is recommended to use PubSub.Channel instead. -- Dialer.KeepAlive is set to 5 minutes by default. +* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e)) +* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8)) +* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af)) + + + +## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02) + +### New Features + +- feat(scan): scan time.Time sets the default decoding (#2413) +- Add support for CLUSTER LINKS command (#2504) +- Add support for acl dryrun command (#2502) +- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500) +- Add support for LCS Command (#2480) +- Add support for BZMPOP (#2456) +- Adding support for ZMPOP command (#2408) +- Add support for LMPOP (#2440) +- feat: remove pool unused fields (#2438) +- Expiretime and PExpireTime (#2426) +- Implement `FUNCTION` group of commands (#2475) +- feat(zadd): add ZAddLT and ZAddGT (#2429) +- Add: Support for COMMAND LIST command (#2491) +- Add support for BLMPOP (#2442) +- feat: check pipeline.Do to prevent confusion with Exec (#2517) +- Function stats, function kill, fcall and fcall_ro (#2486) +- feat: Add support for CLUSTER SHARDS command (#2507) +- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498) + +### Fixed + +- fix: eval api cmd.SetFirstKeyPos (#2501) +- fix: limit the number of connections created (#2441) +- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479) +- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458) +- fix: group lag can be null (#2448) + +### Maintenance + +- Updating to the latest version of redis (#2508) +- Allowing for running tests on a port other than the fixed 6380 (#2466) +- redis 7.0.8 in tests (#2450) +- docs: Update redisotel example for v9 (#2425) +- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476) +- chore: add Chinese translation (#2436) +- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421) +- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420) +- chore(deps): bump actions/setup-go from 3 to 4 (#2495) +- docs: add instructions for the HSet api (#2503) +- docs: add reading lag field comment (#2451) +- test: update go mod before testing(go mod tidy) (#2423) +- docs: fix comment typo (#2505) +- test: remove testify (#2463) +- refactor: change ListElementCmd to KeyValuesCmd. (#2443) +- fix(appendArg): appendArg case special type (#2489) + +## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01) -## v6.12 +### Features -- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis - Servers that don't have cluster mode enabled. See - https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup +* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65)) + +## v9 2023-01-30 + +### Breaking + +- Changed Pipelines to not be thread-safe any more. + +### Added + +- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was + contributed by @monkey92t who has done the majority of work in this release. +- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts + and deadlines. See + [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details. +- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example, + `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`. +- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html) +- Added `redis.HasErrorPrefix` to help working with errors. + +### Changed + +- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is + completely gone in v9. +- Reworked hook interface and added `DialHook`. +- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See + [example](example/otel) and + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html). +- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making + an allocation. +- Renamed the option `MaxConnAge` to `ConnMaxLifetime`. +- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`. +- Removed connection reaper in favor of `MaxIdleConns`. +- Removed `WithContext` since `context.Context` can be passed directly as an arg. +- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and + it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to + reset commands for some reason. + +### Fixed + +- Improved and fixed pipeline retries. +- As usually, added support for more commands and fixed some bugs. diff --git a/LICENSE b/LICENSE index 298bed9b..f4967dbc 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013 The github.com/go-redis/redis Authors. +Copyright (c) 2013 The github.com/redis/go-redis Authors. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/Makefile b/Makefile index a4cfe057..285f65dd 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,18 @@ -PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort) +GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) test: testdeps - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - go vet + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go test in $${dir}"; \ + (cd "$${dir}" && \ + go mod tidy -compat=1.18 && \ + go test && \ + go test ./... -short -race && \ + go test ./... -run=NONE -bench=. -benchmem && \ + env GOOS=linux GOARCH=386 go test && \ + go vet); \ + done + cd internal/customvet && go build . + go vet -vettool ./internal/customvet/customvet testdeps: testdata/redis/src/redis-server @@ -16,20 +23,19 @@ bench: testdeps testdata/redis: mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@ + wget -qO- https://download.redis.io/releases/redis-7.2-rc1.tar.gz | tar xvz --strip-components=1 -C $@ testdata/redis/src/redis-server: testdata/redis cd $< && make all fmt: gofmt -w -s ./ - goimports -w -local github.com/go-redis/redis ./ + goimports -w -local github.com/redis/go-redis ./ go_mod_tidy: - go get -u && go mod tidy - set -e; for dir in $(PACKAGE_DIRS); do \ + set -e; for dir in $(GO_MOD_DIRS); do \ echo "go mod tidy in $${dir}"; \ (cd "$${dir}" && \ - go get -u && \ - go mod tidy); \ + go get -u ./... && \ + go mod tidy -compat=1.18); \ done diff --git a/README.md b/README.md index 2fffbc00..36d60fd4 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,29 @@ # Redis client for Go -![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +[![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc) [![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) [![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) -go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). -Uptrace is an open source and blazingly fast **distributed tracing** backend powered by -OpenTelemetry and ClickHouse. Give it a star as well! +> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can +> use it to monitor applications and set up automatic alerts to receive notifications via email, +> Slack, Telegram, and others. +> +> See [OpenTelemetry](example/otel) example which demonstrates how you can use Uptrace to monitor +> go-redis. -## Resources +## Documentation -- [Discussions](https://github.com/go-redis/redis/discussions) -- [Documentation](https://redis.uptrace.dev) -- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) -- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) -- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) +- [English](https://redis.uptrace.dev) +- [简体中文](https://redis.uptrace.dev/zh/) -Other projects you may like: +## Resources -- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. -- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. +- [Discussions](https://github.com/redis/go-redis/discussions) +- [Chat](https://discord.gg/rWtp5Aj) +- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9) +- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples) ## Ecosystem @@ -29,23 +32,20 @@ Other projects you may like: - [Redis Cache](https://github.com/go-redis/cache) - [Rate limiting](https://github.com/go-redis/redis_rate) +This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed +key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol. + ## Features - Redis 3 commands except QUIT, MONITOR, and SYNC. - Automatic connection pooling with - [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. -- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). -- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). -- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and - [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline). -- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). -- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). -- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). -- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). -- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup) - without using cluster mode and Redis Sentinel. -- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). -- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation). +- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html). +- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html). +- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html). +- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html). +- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html). +- [Redis Ring](https://redis.uptrace.dev/guide/ring.html). +- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html). ## Installation @@ -57,10 +57,10 @@ module: go mod init github.com/my/repo ``` -And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): +Then install go-redis/**v9**: ```shell -go get github.com/go-redis/redis/v8 +go get github.com/redis/go-redis/v9 ``` ## Quickstart @@ -68,7 +68,7 @@ go get github.com/go-redis/redis/v8 ```go import ( "context" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" "fmt" ) @@ -146,7 +146,7 @@ go-redis will start a redis-server and run the test cases. The paths of redis-server bin file and redis config file are defined in `main_test.go`: -``` +```go var ( redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) @@ -156,21 +156,34 @@ var ( For local testing, you can change the variables to refer to your local files, or create a soft link to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: -``` +```shell ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ ``` Lastly, run: -``` +```shell go test ``` +Another option is to run your specific tests with an already running redis. The example below, tests against a redis running on port 9999.: + +```shell +REDIS_PORT=9999 go test +``` + +## See also + +- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite +- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/) +- [Golang HTTP router](https://bunrouter.uptrace.dev/) +- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse) + ## Contributors Thanks to all the people who already contributed! - - + + diff --git a/bench_decode_test.go b/bench_decode_test.go index b07ad4ed..de53064f 100644 --- a/bench_decode_test.go +++ b/bench_decode_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/go-redis/redis/v8/internal/proto" + "github.com/redis/go-redis/v9/internal/proto" ) var ctx = context.TODO() @@ -41,7 +41,7 @@ func NewClusterClientStub(resp []byte) *ClientStub { client := NewClusterClient(&ClusterOptions{ PoolSize: 128, - Addrs: []string{"127.0.0.1:6379"}, + Addrs: []string{":6379"}, Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { return stub.stubConn(initHello), nil }, @@ -118,7 +118,7 @@ func BenchmarkDecode(b *testing.B) { } benchmarks := []Benchmark{ - {"single", NewClientStub}, + {"server", NewClientStub}, {"cluster", NewClusterClientStub}, } diff --git a/bench_test.go b/bench_test.go index df43d890..8e23303f 100644 --- a/bench_test.go +++ b/bench_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" ) func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client { @@ -273,36 +273,6 @@ func BenchmarkXRead(b *testing.B) { }) } -var clientSink *redis.Client - -func BenchmarkWithContext(b *testing.B) { - ctx := context.Background() - rdb := benchmarkRedisClient(ctx, 10) - defer rdb.Close() - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - clientSink = rdb.WithContext(ctx) - } -} - -var ringSink *redis.Ring - -func BenchmarkRingWithContext(b *testing.B) { - ctx := context.Background() - rdb := redis.NewRing(&redis.RingOptions{}) - defer rdb.Close() - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - ringSink = rdb.WithContext(ctx) - } -} - //------------------------------------------------------------------------------ func newClusterScenario() *clusterScenario { @@ -396,17 +366,77 @@ func BenchmarkClusterSetString(b *testing.B) { }) } -var clusterSink *redis.ClusterClient +func BenchmarkExecRingSetAddrsCmd(b *testing.B) { + const ( + ringShard1Name = "ringShardOne" + ringShard2Name = "ringShardTwo" + ) -func BenchmarkClusterWithContext(b *testing.B) { - ctx := context.Background() - rdb := redis.NewClusterClient(&redis.ClusterOptions{}) - defer rdb.Close() + for _, port := range []string{ringShard1Port, ringShard2Port} { + if _, err := startRedis(port); err != nil { + b.Fatal(err) + } + } - b.ResetTimer() - b.ReportAllocs() + b.Cleanup(func() { + for _, p := range processes { + if err := p.Close(); err != nil { + b.Errorf("Failed to stop redis process: %v", err) + } + } + processes = nil + }) + + ring := redis.NewRing(&redis.RingOptions{ + Addrs: map[string]string{ + "ringShardOne": ":" + ringShard1Port, + }, + NewClient: func(opt *redis.Options) *redis.Client { + // Simulate slow shard creation + time.Sleep(100 * time.Millisecond) + return redis.NewClient(opt) + }, + }) + defer ring.Close() + if _, err := ring.Ping(context.Background()).Result(); err != nil { + b.Fatal(err) + } + + // Continuously update addresses by adding and removing one address + updatesDone := make(chan struct{}) + defer func() { close(updatesDone) }() + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for i := 0; ; i++ { + select { + case <-ticker.C: + if i%2 == 0 { + ring.SetAddrs(map[string]string{ + ringShard1Name: ":" + ringShard1Port, + }) + } else { + ring.SetAddrs(map[string]string{ + ringShard1Name: ":" + ringShard1Port, + ringShard2Name: ":" + ringShard2Port, + }) + } + case <-updatesDone: + return + } + } + }() + + b.ResetTimer() for i := 0; i < b.N; i++ { - clusterSink = rdb.WithContext(ctx) + if _, err := ring.Ping(context.Background()).Result(); err != nil { + if err == redis.ErrClosed { + // The shard client could be closed while ping command is in progress + continue + } else { + b.Fatal(err) + } + } } } diff --git a/cluster.go b/cluster.go index 4ba49cac..bf085d65 100644 --- a/cluster.go +++ b/cluster.go @@ -6,17 +6,19 @@ import ( "fmt" "math" "net" + "net/url" "runtime" "sort" + "strings" "sync" "sync/atomic" "time" - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hashtag" - "github.com/go-redis/redis/v8/internal/pool" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/rand" + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/hashtag" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/rand" ) var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") @@ -27,6 +29,9 @@ type ClusterOptions struct { // A seed list of host:port addresses of cluster nodes. Addrs []string + // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. + ClientName string + // NewClient creates a cluster node client with provided name and options. NewClient func(opt *Options) *Client @@ -57,6 +62,7 @@ type ClusterOptions struct { OnConnect func(ctx context.Context, cn *Conn) error + Protocol int Username string Password string @@ -64,20 +70,18 @@ type ClusterOptions struct { MinRetryBackoff time.Duration MaxRetryBackoff time.Duration - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - - // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). - PoolFIFO bool + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + ContextTimeoutEnabled bool - // PoolSize applies per cluster node and not for the whole cluster. - PoolSize int - MinIdleConns int - MaxConnAge time.Duration - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration + PoolFIFO bool + PoolSize int // applies per cluster node and not for the whole cluster + PoolTimeout time.Duration + MinIdleConns int + MaxIdleConns int + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration TLSConfig *tls.Config } @@ -131,13 +135,137 @@ func (opt *ClusterOptions) init() { } } -func (opt *ClusterOptions) clientOptions() *Options { - const disableIdleCheck = -1 +// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis. +// The URL must be in the form: +// +// redis://:@: +// or +// rediss://:@: +// +// To add additional addresses, specify the query parameter, "addr" one or more times. e.g: +// +// redis://:@:?addr=:&addr=: +// or +// rediss://:@:?addr=:&addr=: +// +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "username" and "password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// +// Example: +// +// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 +// is equivalent to: +// &ClusterOptions{ +// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"] +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// } +func ParseClusterURL(redisURL string) (*ClusterOptions, error) { + o := &ClusterOptions{} + + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + + // add base URL to the array of addresses + // more addresses may be added through the URL params + h, p := getHostPortWithDefaults(u) + o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) + + // setup username, password, and other configurations + o, err = setupClusterConn(u, h, o) + if err != nil { + return nil, err + } + + return o, nil +} +// setupClusterConn gets the username and password from the URL and the query parameters. +func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) { + switch u.Scheme { + case "rediss": + o.TLSConfig = &tls.Config{ServerName: host} + fallthrough + case "redis": + o.Username, o.Password = getUserPassword(u) + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } + + // retrieve the configuration from the query parameters + o, err := setupClusterQueryParams(u, o) + if err != nil { + return nil, err + } + + return o, nil +} + +// setupClusterQueryParams converts query parameters in u to option value in o. +func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) { + q := queryOptions{q: u.Query()} + + o.Protocol = q.int("protocol") + o.ClientName = q.string("client_name") + o.MaxRedirects = q.int("max_redirects") + o.ReadOnly = q.bool("read_only") + o.RouteByLatency = q.bool("route_by_latency") + o.RouteRandomly = q.bool("route_randomly") + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MinIdleConns = q.int("min_idle_conns") + o.PoolTimeout = q.duration("pool_timeout") + o.ConnMaxLifetime = q.duration("conn_max_lifetime") + o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + + if q.err != nil { + return nil, q.err + } + + // addr can be specified as many times as needed + addrs := q.strings("addr") + for _, addr := range addrs { + h, p, err := net.SplitHostPort(addr) + if err != nil || h == "" || p == "" { + return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr) + } + + o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} + +func (opt *ClusterOptions) clientOptions() *Options { return &Options{ - Dialer: opt.Dialer, - OnConnect: opt.OnConnect, + ClientName: opt.ClientName, + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + Protocol: opt.Protocol, Username: opt.Username, Password: opt.Password, @@ -149,13 +277,13 @@ func (opt *ClusterOptions) clientOptions() *Options { ReadTimeout: opt.ReadTimeout, WriteTimeout: opt.WriteTimeout, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - MinIdleConns: opt.MinIdleConns, - MaxConnAge: opt.MaxConnAge, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: disableIdleCheck, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, TLSConfig: opt.TLSConfig, // If ClusterSlots is populated, then we probably have an artificial @@ -204,15 +332,26 @@ func (n *clusterNode) updateLatency() { const numProbe = 10 var dur uint64 + successes := 0 for i := 0; i < numProbe; i++ { time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) start := time.Now() - n.Client.Ping(context.TODO()) - dur += uint64(time.Since(start) / time.Microsecond) + err := n.Client.Ping(context.TODO()).Err() + if err == nil { + dur += uint64(time.Since(start) / time.Microsecond) + successes++ + } } - latency := float64(dur) / float64(numProbe) + var latency float64 + if successes == 0 { + // If none of the pings worked, set latency to some arbitrarily high value so this node gets + // least priority. + latency = float64((1 * time.Minute) / time.Microsecond) + } else { + latency = float64(dur) / float64(successes) + } atomic.StoreUint32(&n.latency, uint32(latency+0.5)) } @@ -266,6 +405,7 @@ type clusterNodes struct { nodes map[string]*clusterNode activeAddrs []string closed bool + onNewNode []func(rdb *Client) _generation uint32 // atomic } @@ -301,6 +441,12 @@ func (c *clusterNodes) Close() error { return firstErr } +func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) { + c.mu.Lock() + c.onNewNode = append(c.onNewNode, fn) + c.mu.Unlock() +} + func (c *clusterNodes) Addrs() ([]string, error) { var addrs []string @@ -378,6 +524,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { } node = newClusterNode(c.opt, addr) + for _, fn := range c.onNewNode { + fn(node.Client) + } c.addrs = appendIfNotExists(c.addrs, addr) c.nodes[addr] = node @@ -687,21 +836,16 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er //------------------------------------------------------------------------------ -type clusterClient struct { - opt *ClusterOptions - nodes *clusterNodes - state *clusterStateHolder //nolint:structcheck - cmdsInfoCache *cmdsInfoCache //nolint:structcheck -} - // ClusterClient is a Redis Cluster client representing a pool of zero // or more underlying connections. It's safe for concurrent use by // multiple goroutines. type ClusterClient struct { - *clusterClient + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache cmdable - hooks - ctx context.Context + hooksMixin } // NewClusterClient returns a Redis Cluster client as described in @@ -710,38 +854,24 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient { opt.init() c := &ClusterClient{ - clusterClient: &clusterClient{ - opt: opt, - nodes: newClusterNodes(opt), - }, - ctx: context.Background(), + opt: opt, + nodes: newClusterNodes(opt), } + c.state = newClusterStateHolder(c.loadState) c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) c.cmdable = c.Process - if opt.IdleCheckFrequency > 0 { - go c.reaper(opt.IdleCheckFrequency) - } + c.initHooks(hooks{ + dial: nil, + process: c.process, + pipeline: c.processPipeline, + txPipeline: c.processTxPipeline, + }) return c } -func (c *ClusterClient) Context() context.Context { - return c.ctx -} - -func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { - if ctx == nil { - panic("nil context") - } - clone := *c - clone.cmdable = clone.Process - clone.hooks.lock() - clone.ctx = ctx - return &clone -} - // Options returns read-only Options that were used to create the client. func (c *ClusterClient) Options() *ClusterOptions { return c.opt @@ -761,7 +891,7 @@ func (c *ClusterClient) Close() error { return c.nodes.Close() } -// Do creates a Cmd from the args and processes the cmd. +// Do create a Cmd from the args and processes the cmd. func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { cmd := NewCmd(ctx, args...) _ = c.Process(ctx, cmd) @@ -769,13 +899,14 @@ func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { } func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { - return c.hooks.process(ctx, cmd, c.process) + err := c.processHook(ctx, cmd) + cmd.SetErr(err) + return err } func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { - cmdInfo := c.cmdInfo(cmd.Name()) - slot := c.cmdSlot(cmd) - + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + slot := c.cmdSlot(ctx, cmd) var node *clusterNode var ask bool var lastErr error @@ -795,11 +926,12 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { } if ask { + ask = false + pipe := node.Client.Pipeline() _ = pipe.Process(ctx, NewCmd(ctx, "asking")) _ = pipe.Process(ctx, cmd) _, lastErr = pipe.Exec(ctx) - ask = false } else { lastErr = node.Client.Process(ctx, cmd) } @@ -854,6 +986,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { return lastErr } +func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) { + c.nodes.OnNewNode(fn) +} + // ForEachMaster concurrently calls the fn on each master node in the cluster. // It returns the first error if any. func (c *ClusterClient) ForEachMaster( @@ -1059,30 +1195,9 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { return nil, firstErr } -// reaper closes idle connections to the cluster. -func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { - ticker := time.NewTicker(idleCheckFrequency) - defer ticker.Stop() - - for range ticker.C { - nodes, err := c.nodes.All() - if err != nil { - break - } - - for _, node := range nodes { - _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() - if err != nil { - internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) - } - } - } -} - func (c *ClusterClient) Pipeline() Pipeliner { pipe := Pipeline{ - ctx: c.ctx, - exec: c.processPipeline, + exec: pipelineExecer(c.processPipelineHook), } pipe.init() return &pipe @@ -1093,13 +1208,9 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) } func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processPipeline(ctx, cmds, c._processPipeline) -} - -func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { cmdsMap := newCmdsMap() - err := c.mapCmdsByNode(ctx, cmdsMap, cmds) - if err != nil { + + if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil { setCmdsErr(cmds, err) return err } @@ -1119,18 +1230,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro wg.Add(1) go func(node *clusterNode, cmds []Cmder) { defer wg.Done() - - err := c._processPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } + c.processPipelineNode(ctx, node, cmds, failedCmds) }(node, cmds) } @@ -1150,9 +1250,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd return err } - if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { + if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) { for _, cmd := range cmds { - slot := c.cmdSlot(cmd) + slot := c.cmdSlot(ctx, cmd) node, err := c.slotReadOnlyNode(state, slot) if err != nil { return err @@ -1163,7 +1263,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd } for _, cmd := range cmds { - slot := c.cmdSlot(cmd) + slot := c.cmdSlot(ctx, cmd) node, err := state.slotMasterNode(slot) if err != nil { return err @@ -1173,9 +1273,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd return nil } -func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { +func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool { for _, cmd := range cmds { - cmdInfo := c.cmdInfo(cmd.Name()) + cmdInfo := c.cmdInfo(ctx, cmd.Name()) if cmdInfo == nil || !cmdInfo.ReadOnly { return false } @@ -1183,22 +1283,42 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { return true } -func (c *ClusterClient) _processPipelineNode( +func (c *ClusterClient) processPipelineNode( ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) { + _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + cn, err := node.Client.getConn(ctx) + if err != nil { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + setCmdsErr(cmds, err) + return err + } + + var processErr error + defer func() { + node.Client.releaseConn(ctx, cn, processErr) + }() + processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds) + + return processErr + }) +} + +func (c *ClusterClient) processPipelineNodeConn( + ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, ) error { - return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err - } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }); err != nil { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds, err) + return err + } - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) - }) - }) + return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) }) } @@ -1209,7 +1329,7 @@ func (c *ClusterClient) pipelineReadCmds( cmds []Cmder, failedCmds *cmdsMap, ) error { - for _, cmd := range cmds { + for i, cmd := range cmds { err := cmd.readReply(rd) cmd.SetErr(err) @@ -1221,15 +1341,24 @@ func (c *ClusterClient) pipelineReadCmds( continue } - if c.opt.ReadOnly && isLoadingError(err) { + if c.opt.ReadOnly { node.MarkAsFailing() - return err } - if isRedisError(err) { - continue + + if !isRedisError(err) { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds[i+1:], err) + return err } + } + + if err := cmds[0].Err(); err != nil && shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) return err } + return nil } @@ -1263,8 +1392,10 @@ func (c *ClusterClient) checkMovedErr( // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. func (c *ClusterClient) TxPipeline() Pipeliner { pipe := Pipeline{ - ctx: c.ctx, - exec: c.processTxPipeline, + exec: func(ctx context.Context, cmds []Cmder) error { + cmds = wrapMultiExec(ctx, cmds) + return c.processTxPipelineHook(ctx, cmds) + }, } pipe.init() return &pipe @@ -1275,10 +1406,6 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro } func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline) -} - -func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { // Trim multi .. exec. cmds = cmds[1 : len(cmds)-1] @@ -1288,7 +1415,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er return err } - cmdsMap := c.mapCmdsBySlot(cmds) + cmdsMap := c.mapCmdsBySlot(ctx, cmds) for slot, cmds := range cmdsMap { node, err := state.slotMasterNode(slot) if err != nil { @@ -1312,19 +1439,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er wg.Add(1) go func(node *clusterNode, cmds []Cmder) { defer wg.Done() - - err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } + c.processTxPipelineNode(ctx, node, cmds, failedCmds) }(node, cmds) } @@ -1339,44 +1454,69 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er return cmdsFirstErr(cmds) } -func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { +func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder { cmdsMap := make(map[int][]Cmder) for _, cmd := range cmds { - slot := c.cmdSlot(cmd) + slot := c.cmdSlot(ctx, cmd) cmdsMap[slot] = append(cmdsMap[slot], cmd) } return cmdsMap } -func (c *ClusterClient) _processTxPipelineNode( +func (c *ClusterClient) processTxPipelineNode( ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) { + cmds = wrapMultiExec(ctx, cmds) + _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + cn, err := node.Client.getConn(ctx) + if err != nil { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + setCmdsErr(cmds, err) + return err + } + + var processErr error + defer func() { + node.Client.releaseConn(ctx, cn, processErr) + }() + processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds) + + return processErr + }) +} + +func (c *ClusterClient) processTxPipelineNodeConn( + ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, ) error { - return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }); err != nil { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds, err) + return err + } + + return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + trimmedCmds := cmds[1 : len(cmds)-1] + + if err := c.txPipelineReadQueued( + ctx, rd, statusCmd, trimmedCmds, failedCmds, + ); err != nil { + setCmdsErr(cmds, err) + + moved, ask, addr := isMovedError(err) + if moved || ask { + return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds) } - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - statusCmd := cmds[0].(*StatusCmd) - // Trim multi and exec. - cmds = cmds[1 : len(cmds)-1] - - err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) - if err != nil { - moved, ask, addr := isMovedError(err) - if moved || ask { - return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) - } - return err - } + return err + } - return pipelineReadCmds(rd, cmds) - }) - }) + return pipelineReadCmds(rd, trimmedCmds) }) } @@ -1566,6 +1706,15 @@ func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *Pub return pubsub } +// SSubscribe Subscribes the client to the specified shard channels. +func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.SSubscribe(ctx, channels...) + } + return pubsub +} + func (c *ClusterClient) retryBackoff(attempt int) time.Duration { return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) } @@ -1612,26 +1761,27 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, return nil, firstErr } -func (c *ClusterClient) cmdInfo(name string) *CommandInfo { - cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) +func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(ctx) if err != nil { + internal.Logger.Printf(context.TODO(), "getting command info: %s", err) return nil } info := cmdsInfo[name] if info == nil { - internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) + internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name) } return info } -func (c *ClusterClient) cmdSlot(cmd Cmder) int { +func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int { args := cmd.Args() if args[0] == "cluster" && args[1] == "getkeysinslot" { return args[2].(int) } - cmdInfo := c.cmdInfo(cmd.Name()) + cmdInfo := c.cmdInfo(ctx, cmd.Name()) return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) } @@ -1659,7 +1809,7 @@ func (c *ClusterClient) cmdNode( return state.slotMasterNode(slot) } -func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { +func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { if c.opt.RouteByLatency { return state.slotClosestNode(slot) } @@ -1706,6 +1856,13 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, return node.Client, err } +func (c *ClusterClient) context(ctx context.Context) context.Context { + if c.opt.ContextTimeoutEnabled { + return ctx + } + return context.Background() +} + func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { for _, n := range nodes { if n == node { diff --git a/cluster_commands.go b/cluster_commands.go index 085bce83..b13f8e7e 100644 --- a/cluster_commands.go +++ b/cluster_commands.go @@ -8,7 +8,7 @@ import ( func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { cmd := NewIntCmd(ctx, "dbsize") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { var size int64 err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { n, err := master.DBSize(ctx).Result() @@ -30,8 +30,8 @@ func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { cmd := NewStringCmd(ctx, "script", "load", script) - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var mu sync.Mutex err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { val, err := shard.ScriptLoad(ctx, script).Result() if err != nil { @@ -56,7 +56,7 @@ func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCm func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { cmd := NewStatusCmd(ctx, "script", "flush") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { return shard.ScriptFlush(ctx).Err() }) @@ -82,8 +82,8 @@ func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *Boo result[i] = true } - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var mu sync.Mutex err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { val, err := shard.ScriptExists(ctx, hashes...).Result() if err != nil { diff --git a/cluster_test.go b/cluster_test.go index 2d2021cf..d3b4474a 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -2,6 +2,8 @@ package redis_test import ( "context" + "crypto/tls" + "errors" "fmt" "net" "strconv" @@ -9,11 +11,10 @@ import ( "sync" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/go-redis/redis/v8" - "github.com/go-redis/redis/v8/internal/hashtag" + . "github.com/bsm/ginkgo/v2" + . "github.com/bsm/gomega" + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis/v9/internal/hashtag" ) type clusterScenario struct { @@ -82,8 +83,10 @@ func (s *clusterScenario) newClusterClient( func (s *clusterScenario) Close() error { for _, port := range s.ports { - processes[port].Close() - delete(processes, port) + if process, ok := processes[port]; ok { + process.Close() + delete(processes, port) + } } return nil } @@ -237,14 +240,6 @@ var _ = Describe("ClusterClient", func() { var client *redis.ClusterClient assertClusterClient := func() { - It("supports WithContext", func() { - ctx, cancel := context.WithCancel(ctx) - cancel() - - err := client.Ping(ctx).Err() - Expect(err).To(MatchError("context canceled")) - }) - It("should GET/SET/DEL", func() { err := client.Get(ctx, "A").Err() Expect(err).To(Equal(redis.Nil)) @@ -555,6 +550,30 @@ var _ = Describe("ClusterClient", func() { }, 30*time.Second).ShouldNot(HaveOccurred()) }) + It("supports sharded PubSub", func() { + pubsub := client.SSubscribe(ctx, "mychannel") + defer pubsub.Close() + + Eventually(func() error { + _, err := client.SPublish(ctx, "mychannel", "hello").Result() + if err != nil { + return err + } + + msg, err := pubsub.ReceiveTimeout(ctx, time.Second) + if err != nil { + return err + } + + _, ok := msg.(*redis.Message) + if !ok { + return fmt.Errorf("got %T, wanted *redis.Message", msg) + } + + return nil + }, 30*time.Second).ShouldNot(HaveOccurred()) + }) + It("supports PubSub.Ping without channels", func() { pubsub := client.Subscribe(ctx) defer pubsub.Close() @@ -564,9 +583,39 @@ var _ = Describe("ClusterClient", func() { }) } + Describe("ClusterClient PROTO 2", func() { + BeforeEach(func() { + opt = redisClusterOptions() + opt.Protocol = 2 + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("should CLUSTER PROTO 2", func() { + _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { + val, err := c.Do(ctx, "HELLO").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).Should(ContainElements("proto", int64(2))) + return nil + }) + }) + }) + Describe("ClusterClient", func() { BeforeEach(func() { opt = redisClusterOptions() + opt.ClientName = "cluster_hi" client = cluster.newClusterClient(ctx, opt) err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { @@ -657,6 +706,90 @@ var _ = Describe("ClusterClient", func() { Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred()) }) + It("should CLUSTER SHARDS", func() { + res, err := client.ClusterShards(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeEmpty()) + + // Iterate over the ClusterShard results and validate the fields. + for _, shard := range res { + Expect(shard.Slots).NotTo(BeEmpty()) + for _, slotRange := range shard.Slots { + Expect(slotRange.Start).To(BeNumerically(">=", 0)) + Expect(slotRange.End).To(BeNumerically(">=", slotRange.Start)) + } + + Expect(shard.Nodes).NotTo(BeEmpty()) + for _, node := range shard.Nodes { + Expect(node.ID).NotTo(BeEmpty()) + Expect(node.Endpoint).NotTo(BeEmpty()) + Expect(node.IP).NotTo(BeEmpty()) + Expect(node.Port).To(BeNumerically(">", 0)) + + validRoles := []string{"master", "slave", "replica"} + Expect(validRoles).To(ContainElement(node.Role)) + + Expect(node.ReplicationOffset).To(BeNumerically(">=", 0)) + + validHealthStatuses := []string{"online", "failed", "loading"} + Expect(validHealthStatuses).To(ContainElement(node.Health)) + } + } + }) + + It("should CLUSTER LINKS", func() { + res, err := client.ClusterLinks(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeEmpty()) + + // Iterate over the ClusterLink results and validate the map keys. + for _, link := range res { + + Expect(link.Direction).NotTo(BeEmpty()) + Expect([]string{"from", "to"}).To(ContainElement(link.Direction)) + Expect(link.Node).NotTo(BeEmpty()) + Expect(link.CreateTime).To(BeNumerically(">", 0)) + + Expect(link.Events).NotTo(BeEmpty()) + validEventChars := []rune{'r', 'w'} + for _, eventChar := range link.Events { + Expect(validEventChars).To(ContainElement(eventChar)) + } + + Expect(link.SendBufferAllocated).To(BeNumerically(">=", 0)) + Expect(link.SendBufferUsed).To(BeNumerically(">=", 0)) + } + }) + + It("should cluster client setname", func() { + err := client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { + return c.Ping(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { + val, err := c.ClientList(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).Should(ContainSubstring("name=cluster_hi")) + return nil + }) + }) + + It("should CLUSTER PROTO 3", func() { + _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { + val, err := c.Do(ctx, "HELLO").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).Should(HaveKeyWithValue("proto", int64(3))) + return nil + }) + }) + + It("should CLUSTER MYSHARDID", func() { + shardID, err := client.ClusterMyShardID(ctx).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(shardID).ToNot(BeEmpty()) + }) + It("should CLUSTER NODES", func() { res, err := client.ClusterNodes(ctx).Result() Expect(err).NotTo(HaveOccurred()) @@ -733,6 +866,9 @@ var _ = Describe("ClusterClient", func() { }) It("supports Process hook", func() { + testCtx, cancel := context.WithCancel(ctx) + defer cancel() + err := client.Ping(ctx).Err() Expect(err).NotTo(HaveOccurred()) @@ -744,29 +880,47 @@ var _ = Describe("ClusterClient", func() { var stack []string clusterHook := &hook{ - beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) { - Expect(cmd.String()).To(Equal("ping: ")) - stack = append(stack, "cluster.BeforeProcess") - return ctx, nil - }, - afterProcess: func(ctx context.Context, cmd redis.Cmder) error { - Expect(cmd.String()).To(Equal("ping: PONG")) - stack = append(stack, "cluster.AfterProcess") - return nil + processHook: func(hook redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + select { + case <-testCtx.Done(): + return hook(ctx, cmd) + default: + } + + Expect(cmd.String()).To(Equal("ping: ")) + stack = append(stack, "cluster.BeforeProcess") + + err := hook(ctx, cmd) + + Expect(cmd.String()).To(Equal("ping: PONG")) + stack = append(stack, "cluster.AfterProcess") + + return err + } }, } client.AddHook(clusterHook) nodeHook := &hook{ - beforeProcess: func(ctx context.Context, cmd redis.Cmder) (context.Context, error) { - Expect(cmd.String()).To(Equal("ping: ")) - stack = append(stack, "shard.BeforeProcess") - return ctx, nil - }, - afterProcess: func(ctx context.Context, cmd redis.Cmder) error { - Expect(cmd.String()).To(Equal("ping: PONG")) - stack = append(stack, "shard.AfterProcess") - return nil + processHook: func(hook redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + select { + case <-testCtx.Done(): + return hook(ctx, cmd) + default: + } + + Expect(cmd.String()).To(Equal("ping: ")) + stack = append(stack, "shard.BeforeProcess") + + err := hook(ctx, cmd) + + Expect(cmd.String()).To(Equal("ping: PONG")) + stack = append(stack, "shard.AfterProcess") + + return err + } }, } @@ -783,11 +937,6 @@ var _ = Describe("ClusterClient", func() { "shard.AfterProcess", "cluster.AfterProcess", })) - - clusterHook.beforeProcess = nil - clusterHook.afterProcess = nil - nodeHook.beforeProcess = nil - nodeHook.afterProcess = nil }) It("supports Pipeline hook", func() { @@ -802,33 +951,39 @@ var _ = Describe("ClusterClient", func() { var stack []string client.AddHook(&hook{ - beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { - Expect(cmds).To(HaveLen(1)) - Expect(cmds[0].String()).To(Equal("ping: ")) - stack = append(stack, "cluster.BeforeProcessPipeline") - return ctx, nil - }, - afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { - Expect(cmds).To(HaveLen(1)) - Expect(cmds[0].String()).To(Equal("ping: PONG")) - stack = append(stack, "cluster.AfterProcessPipeline") - return nil + processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: ")) + stack = append(stack, "cluster.BeforeProcessPipeline") + + err := hook(ctx, cmds) + + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: PONG")) + stack = append(stack, "cluster.AfterProcessPipeline") + + return err + } }, }) _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { node.AddHook(&hook{ - beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { - Expect(cmds).To(HaveLen(1)) - Expect(cmds[0].String()).To(Equal("ping: ")) - stack = append(stack, "shard.BeforeProcessPipeline") - return ctx, nil - }, - afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { - Expect(cmds).To(HaveLen(1)) - Expect(cmds[0].String()).To(Equal("ping: PONG")) - stack = append(stack, "shard.AfterProcessPipeline") - return nil + processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: ")) + stack = append(stack, "shard.BeforeProcessPipeline") + + err := hook(ctx, cmds) + + Expect(cmds).To(HaveLen(1)) + Expect(cmds[0].String()).To(Equal("ping: PONG")) + stack = append(stack, "shard.AfterProcessPipeline") + + return err + } }, }) return nil @@ -859,33 +1014,39 @@ var _ = Describe("ClusterClient", func() { var stack []string client.AddHook(&hook{ - beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { - Expect(cmds).To(HaveLen(3)) - Expect(cmds[1].String()).To(Equal("ping: ")) - stack = append(stack, "cluster.BeforeProcessPipeline") - return ctx, nil - }, - afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { - Expect(cmds).To(HaveLen(3)) - Expect(cmds[1].String()).To(Equal("ping: PONG")) - stack = append(stack, "cluster.AfterProcessPipeline") - return nil + processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: ")) + stack = append(stack, "cluster.BeforeProcessPipeline") + + err := hook(ctx, cmds) + + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: PONG")) + stack = append(stack, "cluster.AfterProcessPipeline") + + return err + } }, }) _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { node.AddHook(&hook{ - beforeProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { - Expect(cmds).To(HaveLen(3)) - Expect(cmds[1].String()).To(Equal("ping: ")) - stack = append(stack, "shard.BeforeProcessPipeline") - return ctx, nil - }, - afterProcessPipeline: func(ctx context.Context, cmds []redis.Cmder) error { - Expect(cmds).To(HaveLen(3)) - Expect(cmds[1].String()).To(Equal("ping: PONG")) - stack = append(stack, "shard.AfterProcessPipeline") - return nil + processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: ")) + stack = append(stack, "shard.BeforeProcessPipeline") + + err := hook(ctx, cmds) + + Expect(cmds).To(HaveLen(3)) + Expect(cmds[1].String()).To(Equal("ping: PONG")) + stack = append(stack, "shard.AfterProcessPipeline") + + return err + } }, }) return nil @@ -1254,27 +1415,175 @@ var _ = Describe("ClusterClient timeout", func() { Context("read/write timeout", func() { BeforeEach(func() { opt := redisClusterOptions() - opt.ReadTimeout = 250 * time.Millisecond - opt.WriteTimeout = 250 * time.Millisecond - opt.MaxRedirects = 1 client = cluster.newClusterClient(ctx, opt) err := client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error { - return client.ClientPause(ctx, pause).Err() + err := client.ClientPause(ctx, pause).Err() + + opt := client.Options() + opt.ReadTimeout = time.Nanosecond + opt.WriteTimeout = time.Nanosecond + + return err }) Expect(err).NotTo(HaveOccurred()) + + // Overwrite timeouts after the client is initialized. + opt.ReadTimeout = time.Nanosecond + opt.WriteTimeout = time.Nanosecond + opt.MaxRedirects = 0 }) AfterEach(func() { _ = client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error { defer GinkgoRecover() + + opt := client.Options() + opt.ReadTimeout = time.Second + opt.WriteTimeout = time.Second + Eventually(func() error { return client.Ping(ctx).Err() }, 2*pause).ShouldNot(HaveOccurred()) return nil }) + + err := client.Close() + Expect(err).NotTo(HaveOccurred()) }) testTimeout() }) }) + +var _ = Describe("ClusterClient ParseURL", func() { + var cases = []struct { + test string + url string + o *redis.ClusterOptions // expected value + err error + }{ + { + test: "ParseRedisURL", + url: "redis://localhost:123", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}}, + }, { + test: "ParseRedissURL", + url: "rediss://localhost:123", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, TLSConfig: &tls.Config{ServerName: "localhost"}}, + }, { + test: "MissingRedisPort", + url: "redis://localhost", + o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}}, + }, { + test: "MissingRedissPort", + url: "rediss://localhost", + o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}, TLSConfig: &tls.Config{ServerName: "localhost"}}, + }, { + test: "MultipleRedisURLs", + url: "redis://localhost:123?addr=localhost:1234&addr=localhost:12345", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}}, + }, { + test: "MultipleRedissURLs", + url: "rediss://localhost:123?addr=localhost:1234&addr=localhost:12345", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}, TLSConfig: &tls.Config{ServerName: "localhost"}}, + }, { + test: "OnlyPassword", + url: "redis://:bar@localhost:123", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Password: "bar"}, + }, { + test: "OnlyUser", + url: "redis://foo@localhost:123", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo"}, + }, { + test: "RedisUsernamePassword", + url: "redis://foo:bar@localhost:123", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo", Password: "bar"}, + }, { + test: "RedissUsernamePassword", + url: "rediss://foo:bar@localhost:123?addr=localhost:1234", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, Username: "foo", Password: "bar", TLSConfig: &tls.Config{ServerName: "localhost"}}, + }, { + test: "QueryParameters", + url: "redis://localhost:123?read_timeout=2&pool_fifo=true&addr=localhost:1234", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, ReadTimeout: 2 * time.Second, PoolFIFO: true}, + }, { + test: "DisabledTimeout", + url: "redis://localhost:123?conn_max_idle_time=0", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1}, + }, { + test: "DisabledTimeoutNeg", + url: "redis://localhost:123?conn_max_idle_time=-1", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1}, + }, { + test: "UseDefault", + url: "redis://localhost:123?conn_max_idle_time=", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0}, + }, { + test: "Protocol", + url: "redis://localhost:123?protocol=2", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Protocol: 2}, + }, { + test: "ClientName", + url: "redis://localhost:123?client_name=cluster_hi", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ClientName: "cluster_hi"}, + }, { + test: "UseDefaultMissing=", + url: "redis://localhost:123?conn_max_idle_time", + o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0}, + }, { + test: "InvalidQueryAddr", + url: "rediss://foo:bar@localhost:123?addr=rediss://foo:barr@localhost:1234", + err: errors.New(`redis: unable to parse addr param: rediss://foo:barr@localhost:1234`), + }, { + test: "InvalidInt", + url: "redis://localhost?pool_size=five", + err: errors.New(`redis: invalid pool_size number: strconv.Atoi: parsing "five": invalid syntax`), + }, { + test: "InvalidBool", + url: "redis://localhost?pool_fifo=yes", + err: errors.New(`redis: invalid pool_fifo boolean: expected true/false/1/0 or an empty string, got "yes"`), + }, { + test: "UnknownParam", + url: "redis://localhost?abc=123", + err: errors.New("redis: unexpected option: abc"), + }, { + test: "InvalidScheme", + url: "https://google.com", + err: errors.New("redis: invalid URL scheme: https"), + }, + } + + It("match ParseClusterURL", func() { + for i := range cases { + tc := cases[i] + actual, err := redis.ParseClusterURL(tc.url) + if tc.err != nil { + Expect(err).Should(MatchError(tc.err)) + } else { + Expect(err).NotTo(HaveOccurred()) + } + + if err == nil { + Expect(tc.o).NotTo(BeNil()) + + Expect(tc.o.Addrs).To(Equal(actual.Addrs)) + Expect(tc.o.TLSConfig).To(Equal(actual.TLSConfig)) + Expect(tc.o.Username).To(Equal(actual.Username)) + Expect(tc.o.Password).To(Equal(actual.Password)) + Expect(tc.o.MaxRetries).To(Equal(actual.MaxRetries)) + Expect(tc.o.MinRetryBackoff).To(Equal(actual.MinRetryBackoff)) + Expect(tc.o.MaxRetryBackoff).To(Equal(actual.MaxRetryBackoff)) + Expect(tc.o.DialTimeout).To(Equal(actual.DialTimeout)) + Expect(tc.o.ReadTimeout).To(Equal(actual.ReadTimeout)) + Expect(tc.o.WriteTimeout).To(Equal(actual.WriteTimeout)) + Expect(tc.o.PoolFIFO).To(Equal(actual.PoolFIFO)) + Expect(tc.o.PoolSize).To(Equal(actual.PoolSize)) + Expect(tc.o.MinIdleConns).To(Equal(actual.MinIdleConns)) + Expect(tc.o.ConnMaxLifetime).To(Equal(actual.ConnMaxLifetime)) + Expect(tc.o.ConnMaxIdleTime).To(Equal(actual.ConnMaxIdleTime)) + Expect(tc.o.PoolTimeout).To(Equal(actual.PoolTimeout)) + } + } + }) +}) diff --git a/command.go b/command.go index f9d8af36..b6df28fb 100644 --- a/command.go +++ b/command.go @@ -5,12 +5,13 @@ import ( "fmt" "net" "strconv" + "strings" "time" - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hscan" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/util" + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/hscan" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/util" ) type Cmder interface { @@ -65,7 +66,7 @@ func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { } switch cmd.Name() { - case "eval", "evalsha": + case "eval", "evalsha", "eval_ro", "evalsha_ro": if cmd.stringArg(2) != "0" { return 3 } @@ -83,7 +84,7 @@ func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { if info != nil { return int(info.FirstKeyPos) } - return 0 + return 1 } func cmdString(cmd Cmder, val interface{}) string { @@ -104,7 +105,7 @@ func cmdString(cmd Cmder, val interface{}) string { b = internal.AppendArg(b, val) } - return internal.String(b) + return util.BytesToString(b) } //------------------------------------------------------------------------------ @@ -339,6 +340,8 @@ func (cmd *Cmd) Bool() (bool, error) { func toBool(val interface{}) (bool, error) { switch val := val.(type) { + case bool: + return val, nil case int64: return val != 0, nil case string: @@ -1093,6 +1096,10 @@ func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSlic } } +func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) { + cmd.val = val +} + func (cmd *KeyValueSliceCmd) Val() []KeyValue { return cmd.val } @@ -1106,15 +1113,16 @@ func (cmd *KeyValueSliceCmd) String() string { } // Many commands will respond to two formats: -// 1) 1) "one" -// 2) (double) 1 -// 2) 1) "two" -// 2) (double) 2 +// 1. 1) "one" +// 2. (double) 1 +// 2. 1) "two" +// 2. (double) 2 +// // OR: -// 1) "two" -// 2) (double) 2 -// 3) "one" -// 4) (double) 1 +// 1. "two" +// 2. (double) 2 +// 3. "one" +// 4. (double) 1 func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl n, err := rd.ReadArrayLen() if err != nil { @@ -1288,16 +1296,16 @@ func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error { //------------------------------------------------------------------------------ -type StringIntMapCmd struct { +type MapStringIntCmd struct { baseCmd val map[string]int64 } -var _ Cmder = (*StringIntMapCmd)(nil) +var _ Cmder = (*MapStringIntCmd)(nil) -func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { - return &StringIntMapCmd{ +func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd { + return &MapStringIntCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, @@ -1305,23 +1313,23 @@ func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapC } } -func (cmd *StringIntMapCmd) SetVal(val map[string]int64) { +func (cmd *MapStringIntCmd) SetVal(val map[string]int64) { cmd.val = val } -func (cmd *StringIntMapCmd) Val() map[string]int64 { +func (cmd *MapStringIntCmd) Val() map[string]int64 { return cmd.val } -func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { +func (cmd *MapStringIntCmd) Result() (map[string]int64, error) { return cmd.val, cmd.err } -func (cmd *StringIntMapCmd) String() string { +func (cmd *MapStringIntCmd) String() string { return cmdString(cmd, cmd.val) } -func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { +func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadMapLen() if err != nil { return err @@ -1769,19 +1777,35 @@ func (cmd *XAutoClaimCmd) String() string { } func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { - var err error - if err = rd.ReadFixedArrayLen(2); err != nil { + n, err := rd.ReadArrayLen() + if err != nil { return err } + switch n { + case 2, // Redis 6 + 3: // Redis 7: + // ok + default: + return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n) + } + cmd.start, err = rd.ReadString() if err != nil { return err } + cmd.val, err = readXMessageSlice(rd) if err != nil { return err } + + if n >= 3 { + if err := rd.DiscardNext(); err != nil { + return err + } + } + return nil } @@ -1823,27 +1847,43 @@ func (cmd *XAutoClaimJustIDCmd) String() string { } func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { - var err error - if err = rd.ReadFixedArrayLen(2); err != nil { + n, err := rd.ReadArrayLen() + if err != nil { return err } + switch n { + case 2, // Redis 6 + 3: // Redis 7: + // ok + default: + return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n) + } + cmd.start, err = rd.ReadString() if err != nil { return err } - n, err := rd.ReadArrayLen() + + nn, err := rd.ReadArrayLen() if err != nil { return err } - cmd.val = make([]string, n) - for i := 0; i < n; i++ { + cmd.val = make([]string, nn) + for i := 0; i < nn; i++ { cmd.val[i], err = rd.ReadString() if err != nil { return err } } + + if n >= 3 { + if err := rd.DiscardNext(); err != nil { + return err + } + } + return nil } @@ -1855,9 +1895,10 @@ type XInfoConsumersCmd struct { } type XInfoConsumer struct { - Name string - Pending int64 - Idle time.Duration + Name string + Pending int64 + Idle time.Duration + Inactive time.Duration } var _ Cmder = (*XInfoConsumersCmd)(nil) @@ -1895,12 +1936,13 @@ func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { cmd.val = make([]XInfoConsumer, n) for i := 0; i < len(cmd.val); i++ { - if err = rd.ReadFixedMapLen(3); err != nil { + nn, err := rd.ReadMapLen() + if err != nil { return err } var key string - for f := 0; f < 3; f++ { + for f := 0; f < nn; f++ { key, err = rd.ReadString() if err != nil { return err @@ -1915,6 +1957,10 @@ func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { var idle int64 idle, err = rd.ReadInt() cmd.val[i].Idle = time.Duration(idle) * time.Millisecond + case "inactive": + var inactive int64 + inactive, err = rd.ReadInt() + cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond default: return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) } @@ -1939,6 +1985,8 @@ type XInfoGroup struct { Consumers int64 Pending int64 LastDeliveredID string + EntriesRead int64 + Lag int64 } var _ Cmder = (*XInfoGroupsCmd)(nil) @@ -1976,12 +2024,15 @@ func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { cmd.val = make([]XInfoGroup, n) for i := 0; i < len(cmd.val); i++ { - if err = rd.ReadFixedMapLen(4); err != nil { + group := &cmd.val[i] + + nn, err := rd.ReadMapLen() + if err != nil { return err } var key string - for f := 0; f < 4; f++ { + for j := 0; j < nn; j++ { key, err = rd.ReadString() if err != nil { return err @@ -1989,18 +2040,40 @@ func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { switch key { case "name": - cmd.val[i].Name, err = rd.ReadString() + group.Name, err = rd.ReadString() + if err != nil { + return err + } case "consumers": - cmd.val[i].Consumers, err = rd.ReadInt() + group.Consumers, err = rd.ReadInt() + if err != nil { + return err + } case "pending": - cmd.val[i].Pending, err = rd.ReadInt() + group.Pending, err = rd.ReadInt() + if err != nil { + return err + } case "last-delivered-id": - cmd.val[i].LastDeliveredID, err = rd.ReadString() + group.LastDeliveredID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-read": + group.EntriesRead, err = rd.ReadInt() + if err != nil && err != Nil { + return err + } + case "lag": + group.Lag, err = rd.ReadInt() + + // lag: the number of entries in the stream that are still waiting to be delivered + // to the group's consumers, or a NULL(Nil) when that number can't be determined. + if err != nil && err != Nil { + return err + } default: - return fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key) - } - if err != nil { - return err + return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key) } } } @@ -2016,13 +2089,16 @@ type XInfoStreamCmd struct { } type XInfoStream struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - Groups int64 - LastGeneratedID string - FirstEntry XMessage - LastEntry XMessage + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + Groups int64 + LastGeneratedID string + MaxDeletedEntryID string + EntriesAdded int64 + FirstEntry XMessage + LastEntry XMessage + RecordedFirstEntryID string } var _ Cmder = (*XInfoStreamCmd)(nil) @@ -2053,12 +2129,13 @@ func (cmd *XInfoStreamCmd) String() string { } func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { - if err := rd.ReadFixedMapLen(7); err != nil { + n, err := rd.ReadMapLen() + if err != nil { return err } cmd.val = &XInfoStream{} - for i := 0; i < 7; i++ { + for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return err @@ -2066,30 +2143,56 @@ func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { switch key { case "length": cmd.val.Length, err = rd.ReadInt() + if err != nil { + return err + } case "radix-tree-keys": cmd.val.RadixTreeKeys, err = rd.ReadInt() + if err != nil { + return err + } case "radix-tree-nodes": cmd.val.RadixTreeNodes, err = rd.ReadInt() + if err != nil { + return err + } case "groups": cmd.val.Groups, err = rd.ReadInt() + if err != nil { + return err + } case "last-generated-id": cmd.val.LastGeneratedID, err = rd.ReadString() + if err != nil { + return err + } + case "max-deleted-entry-id": + cmd.val.MaxDeletedEntryID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-added": + cmd.val.EntriesAdded, err = rd.ReadInt() + if err != nil { + return err + } case "first-entry": cmd.val.FirstEntry, err = readXMessage(rd) - if err == Nil { - err = nil + if err != nil && err != Nil { + return err } case "last-entry": cmd.val.LastEntry, err = readXMessage(rd) - if err == Nil { - err = nil + if err != nil && err != Nil { + return err + } + case "recorded-first-entry-id": + cmd.val.RecordedFirstEntryID, err = rd.ReadString() + if err != nil { + return err } default: - return fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - if err != nil { - return err + return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key) } } return nil @@ -2103,17 +2206,22 @@ type XInfoStreamFullCmd struct { } type XInfoStreamFull struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - LastGeneratedID string - Entries []XMessage - Groups []XInfoStreamGroup + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + LastGeneratedID string + MaxDeletedEntryID string + EntriesAdded int64 + Entries []XMessage + Groups []XInfoStreamGroup + RecordedFirstEntryID string } type XInfoStreamGroup struct { Name string LastDeliveredID string + EntriesRead int64 + Lag int64 PelCount int64 Pending []XInfoStreamGroupPending Consumers []XInfoStreamConsumer @@ -2127,10 +2235,11 @@ type XInfoStreamGroupPending struct { } type XInfoStreamConsumer struct { - Name string - SeenTime time.Time - PelCount int64 - Pending []XInfoStreamConsumerPending + Name string + SeenTime time.Time + ActiveTime time.Time + PelCount int64 + Pending []XInfoStreamConsumerPending } type XInfoStreamConsumerPending struct { @@ -2167,13 +2276,14 @@ func (cmd *XInfoStreamFullCmd) String() string { } func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { - if err := rd.ReadFixedMapLen(6); err != nil { + n, err := rd.ReadMapLen() + if err != nil { return err } cmd.val = &XInfoStreamFull{} - for i := 0; i < 6; i++ { + for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return err @@ -2182,22 +2292,51 @@ func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { switch key { case "length": cmd.val.Length, err = rd.ReadInt() + if err != nil { + return err + } case "radix-tree-keys": cmd.val.RadixTreeKeys, err = rd.ReadInt() + if err != nil { + return err + } case "radix-tree-nodes": cmd.val.RadixTreeNodes, err = rd.ReadInt() + if err != nil { + return err + } case "last-generated-id": cmd.val.LastGeneratedID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-added": + cmd.val.EntriesAdded, err = rd.ReadInt() + if err != nil { + return err + } case "entries": cmd.val.Entries, err = readXMessageSlice(rd) + if err != nil { + return err + } case "groups": cmd.val.Groups, err = readStreamGroups(rd) + if err != nil { + return err + } + case "max-deleted-entry-id": + cmd.val.MaxDeletedEntryID, err = rd.ReadString() + if err != nil { + return err + } + case "recorded-first-entry-id": + cmd.val.RecordedFirstEntryID, err = rd.ReadString() + if err != nil { + return err + } default: - return fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM FULL reply", key) - } - if err != nil { - return err + return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) } } return nil @@ -2210,13 +2349,14 @@ func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { } groups := make([]XInfoStreamGroup, 0, n) for i := 0; i < n; i++ { - if err = rd.ReadFixedMapLen(5); err != nil { + nn, err := rd.ReadMapLen() + if err != nil { return nil, err } group := XInfoStreamGroup{} - for f := 0; f < 5; f++ { + for j := 0; j < nn; j++ { key, err := rd.ReadString() if err != nil { return nil, err @@ -2225,21 +2365,43 @@ func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { switch key { case "name": group.Name, err = rd.ReadString() + if err != nil { + return nil, err + } case "last-delivered-id": group.LastDeliveredID, err = rd.ReadString() + if err != nil { + return nil, err + } + case "entries-read": + group.EntriesRead, err = rd.ReadInt() + if err != nil && err != Nil { + return nil, err + } + case "lag": + // lag: the number of entries in the stream that are still waiting to be delivered + // to the group's consumers, or a NULL(Nil) when that number can't be determined. + group.Lag, err = rd.ReadInt() + if err != nil && err != Nil { + return nil, err + } case "pel-count": group.PelCount, err = rd.ReadInt() + if err != nil { + return nil, err + } case "pending": group.Pending, err = readXInfoStreamGroupPending(rd) + if err != nil { + return nil, err + } case "consumers": group.Consumers, err = readXInfoStreamConsumers(rd) + if err != nil { + return nil, err + } default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM FULL reply", key) - } - - if err != nil { - return nil, err + return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) } } @@ -2300,13 +2462,14 @@ func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { consumers := make([]XInfoStreamConsumer, 0, n) for i := 0; i < n; i++ { - if err = rd.ReadFixedMapLen(4); err != nil { + nn, err := rd.ReadMapLen() + if err != nil { return nil, err } c := XInfoStreamConsumer{} - for f := 0; f < 4; f++ { + for f := 0; f < nn; f++ { cKey, err := rd.ReadString() if err != nil { return nil, err @@ -2320,7 +2483,13 @@ func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { if err != nil { return nil, err } - c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond)) + c.SeenTime = time.UnixMilli(seen) + case "active-time": + active, err := rd.ReadInt() + if err != nil { + return nil, err + } + c.ActiveTime = time.UnixMilli(active) case "pel-count": c.PelCount, err = rd.ReadInt() case "pending": @@ -2547,11 +2716,11 @@ func (cmd *ScanCmd) readReply(rd *proto.Reader) error { return err } - cursor, err := rd.ReadInt() + cursor, err := rd.ReadUint() if err != nil { return err } - cmd.cursor = uint64(cursor) + cmd.cursor = cursor n, err := rd.ReadArrayLen() if err != nil { @@ -2577,8 +2746,9 @@ func (cmd *ScanCmd) Iterator() *ScanIterator { //------------------------------------------------------------------------------ type ClusterNode struct { - ID string - Addr string + ID string + Addr string + NetworkingMetadata map[string]string } type ClusterSlot struct { @@ -2648,13 +2818,14 @@ func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { // subtract start and end. nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { nn, err := rd.ReadArrayLen() if err != nil { return err } - if nn != 2 && nn != 3 { - return fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", nn) + if nn < 2 || nn > 4 { + return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n) } ip, err := rd.ReadString() @@ -2669,14 +2840,38 @@ func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { nodes[j].Addr = net.JoinHostPort(ip, port) - if nn == 3 { + if nn >= 3 { id, err := rd.ReadString() if err != nil { return err } nodes[j].ID = id } + + if nn >= 4 { + metadataLength, err := rd.ReadMapLen() + if err != nil { + return err + } + + networkingMetadata := make(map[string]string, metadataLength) + + for i := 0; i < metadataLength; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + value, err := rd.ReadString() + if err != nil { + return err + } + networkingMetadata[key] = value + } + + nodes[j].NetworkingMetadata = networkingMetadata + } } + cmd.val[i] = ClusterSlot{ Start: int(start), End: int(end), @@ -2949,6 +3144,10 @@ func NewGeoSearchLocationCmd( } } +func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { + cmd.val = val +} + func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { return cmd.val } @@ -3135,6 +3334,7 @@ func (cmd *CommandsInfoCmd) String() string { func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { const numArgRedis5 = 6 const numArgRedis6 = 7 + const numArgRedis7 = 10 n, err := rd.ReadArrayLen() if err != nil { @@ -3147,8 +3347,12 @@ func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { if err != nil { return err } - if nn != numArgRedis5 && nn != numArgRedis6 { - return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7", nn) + + switch nn { + case numArgRedis5, numArgRedis6, numArgRedis7: + // ok + default: + return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn) } cmdInfo := &CommandInfo{} @@ -3199,7 +3403,7 @@ func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { } cmdInfo.StepCount = int8(stepCount) - if nn == numArgRedis6 { + if nn >= numArgRedis6 { aclFlagLen, err := rd.ReadArrayLen() if err != nil { return err @@ -3217,6 +3421,18 @@ func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { } } + if nn >= numArgRedis7 { + if err := rd.DiscardNext(); err != nil { + return err + } + if err := rd.DiscardNext(); err != nil { + return err + } + if err := rd.DiscardNext(); err != nil { + return err + } + } + cmd.val[cmdInfo.Name] = cmdInfo } @@ -3388,6 +3604,10 @@ func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStri } } +func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) { + cmd.val = val +} + func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} { return cmd.val } @@ -3448,6 +3668,10 @@ func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapSt } } +func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) { + cmd.val = val +} + func (cmd *MapStringStringSliceCmd) Val() []map[string]string { return cmd.val } @@ -3488,3 +3712,1459 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { } return nil } + +//------------------------------------------------------------------------------ + +type KeyValuesCmd struct { + baseCmd + + key string + val []string +} + +var _ Cmder = (*KeyValuesCmd)(nil) + +func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd { + return &KeyValuesCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyValuesCmd) SetVal(key string, val []string) { + cmd.key = key + cmd.val = val +} + +func (cmd *KeyValuesCmd) Val() (string, []string) { + return cmd.key, cmd.val +} + +func (cmd *KeyValuesCmd) Result() (string, []string, error) { + return cmd.key, cmd.val, cmd.err +} + +func (cmd *KeyValuesCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cmd.key, err = rd.ReadString() + if err != nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]string, n) + for i := 0; i < n; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ZSliceWithKeyCmd struct { + baseCmd + + key string + val []Z +} + +var _ Cmder = (*ZSliceWithKeyCmd)(nil) + +func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd { + return &ZSliceWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) { + cmd.key = key + cmd.val = val +} + +func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) { + return cmd.key, cmd.val +} + +func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) { + return cmd.key, cmd.val, cmd.err +} + +func (cmd *ZSliceWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cmd.key, err = rd.ReadString() + if err != nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + array := typ == proto.RespArray + + if array { + cmd.val = make([]Z, n) + } else { + cmd.val = make([]Z, n/2) + } + + for i := 0; i < len(cmd.val); i++ { + if array { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + + if cmd.val[i].Member, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Score, err = rd.ReadFloat(); err != nil { + return err + } + } + + return nil +} + +type Function struct { + Name string + Description string + Flags []string +} + +type Library struct { + Name string + Engine string + Functions []Function + Code string +} + +type FunctionListCmd struct { + baseCmd + + val []Library +} + +var _ Cmder = (*FunctionListCmd)(nil) + +func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd { + return &FunctionListCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FunctionListCmd) SetVal(val []Library) { + cmd.val = val +} + +func (cmd *FunctionListCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FunctionListCmd) Val() []Library { + return cmd.val +} + +func (cmd *FunctionListCmd) Result() ([]Library, error) { + return cmd.val, cmd.err +} + +func (cmd *FunctionListCmd) First() (*Library, error) { + if cmd.err != nil { + return nil, cmd.err + } + if len(cmd.val) > 0 { + return &cmd.val[0], nil + } + return nil, Nil +} + +func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + libraries := make([]Library, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + + library := Library{} + for f := 0; f < nn; f++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "library_name": + library.Name, err = rd.ReadString() + case "engine": + library.Engine, err = rd.ReadString() + case "functions": + library.Functions, err = cmd.readFunctions(rd) + case "library_code": + library.Code, err = rd.ReadString() + default: + return fmt.Errorf("redis: function list unexpected key %s", key) + } + + if err != nil { + return err + } + } + + libraries[i] = library + } + cmd.val = libraries + return nil +} + +func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + functions := make([]Function, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + function := Function{} + for f := 0; f < nn; f++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + if function.Name, err = rd.ReadString(); err != nil { + return nil, err + } + case "description": + if function.Description, err = rd.ReadString(); err != nil && err != Nil { + return nil, err + } + case "flags": + // resp set + nx, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + function.Flags = make([]string, nx) + for j := 0; j < nx; j++ { + if function.Flags[j], err = rd.ReadString(); err != nil { + return nil, err + } + } + default: + return nil, fmt.Errorf("redis: function list unexpected key %s", key) + } + } + + functions[i] = function + } + return functions, nil +} + +// FunctionStats contains information about the scripts currently executing on the server, and the available engines +// - Engines: +// Statistics about the engine like number of functions and number of libraries +// - RunningScript: +// The script currently running on the shard we're connecting to. +// For Redis Enterprise and Redis Cloud, this represents the +// function with the longest running time, across all the running functions, on all shards +// - RunningScripts +// All scripts currently running in a Redis Enterprise clustered database. +// Only available on Redis Enterprise +type FunctionStats struct { + Engines []Engine + isRunning bool + rs RunningScript + allrs []RunningScript +} + +func (fs *FunctionStats) Running() bool { + return fs.isRunning +} + +func (fs *FunctionStats) RunningScript() (RunningScript, bool) { + return fs.rs, fs.isRunning +} + +// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database. +// Only available on Redis Enterprise +func (fs *FunctionStats) AllRunningScripts() []RunningScript { + return fs.allrs +} + +type RunningScript struct { + Name string + Command []string + Duration time.Duration +} + +type Engine struct { + Language string + LibrariesCount int64 + FunctionsCount int64 +} + +type FunctionStatsCmd struct { + baseCmd + val FunctionStats +} + +var _ Cmder = (*FunctionStatsCmd)(nil) + +func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd { + return &FunctionStatsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) { + cmd.val = val +} + +func (cmd *FunctionStatsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FunctionStatsCmd) Val() FunctionStats { + return cmd.val +} + +func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) { + return cmd.val, cmd.err +} + +func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result FunctionStats + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "running_script": + result.rs, result.isRunning, err = cmd.readRunningScript(rd) + case "engines": + result.Engines, err = cmd.readEngines(rd) + case "all_running_scripts": // Redis Enterprise only + result.allrs, result.isRunning, err = cmd.readRunningScripts(rd) + default: + return fmt.Errorf("redis: function stats unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) { + err := rd.ReadFixedMapLen(3) + if err != nil { + if err == Nil { + return RunningScript{}, false, nil + } + return RunningScript{}, false, err + } + + var runningScript RunningScript + for i := 0; i < 3; i++ { + key, err := rd.ReadString() + if err != nil { + return RunningScript{}, false, err + } + + switch key { + case "name": + runningScript.Name, err = rd.ReadString() + case "duration_ms": + runningScript.Duration, err = cmd.readDuration(rd) + case "command": + runningScript.Command, err = cmd.readCommand(rd) + default: + return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key) + } + + if err != nil { + return RunningScript{}, false, err + } + } + + return runningScript, true, nil +} + +func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) { + n, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + engines := make([]Engine, 0, n) + for i := 0; i < n; i++ { + engine := Engine{} + engine.Language, err = rd.ReadString() + if err != nil { + return nil, err + } + + err = rd.ReadFixedMapLen(2) + if err != nil { + return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language) + } + + for i := 0; i < 2; i++ { + key, err := rd.ReadString() + switch key { + case "libraries_count": + engine.LibrariesCount, err = rd.ReadInt() + case "functions_count": + engine.FunctionsCount, err = rd.ReadInt() + } + if err != nil { + return nil, err + } + } + + engines = append(engines, engine) + } + return engines, nil +} + +func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) { + t, err := rd.ReadInt() + if err != nil { + return time.Duration(0), err + } + return time.Duration(t) * time.Millisecond, nil +} + +func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) { + + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + command := make([]string, 0, n) + for i := 0; i < n; i++ { + x, err := rd.ReadString() + if err != nil { + return nil, err + } + command = append(command, x) + } + + return command, nil +} +func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, false, err + } + + runningScripts := make([]RunningScript, 0, n) + for i := 0; i < n; i++ { + rs, _, err := cmd.readRunningScript(rd) + if err != nil { + return nil, false, err + } + runningScripts = append(runningScripts, rs) + } + + return runningScripts, len(runningScripts) > 0, nil +} + +//------------------------------------------------------------------------------ + +// LCSQuery is a parameter used for the LCS command +type LCSQuery struct { + Key1 string + Key2 string + Len bool + Idx bool + MinMatchLen int + WithMatchLen bool +} + +// LCSMatch is the result set of the LCS command. +type LCSMatch struct { + MatchString string + Matches []LCSMatchedPosition + Len int64 +} + +type LCSMatchedPosition struct { + Key1 LCSPosition + Key2 LCSPosition + + // only for withMatchLen is true + MatchLen int64 +} + +type LCSPosition struct { + Start int64 + End int64 +} + +type LCSCmd struct { + baseCmd + + // 1: match string + // 2: match len + // 3: match idx LCSMatch + readType uint8 + val *LCSMatch +} + +func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd { + args := make([]interface{}, 3, 7) + args[0] = "lcs" + args[1] = q.Key1 + args[2] = q.Key2 + + cmd := &LCSCmd{readType: 1} + if q.Len { + cmd.readType = 2 + args = append(args, "len") + } else if q.Idx { + cmd.readType = 3 + args = append(args, "idx") + if q.MinMatchLen != 0 { + args = append(args, "minmatchlen", q.MinMatchLen) + } + if q.WithMatchLen { + args = append(args, "withmatchlen") + } + } + cmd.baseCmd = baseCmd{ + ctx: ctx, + args: args, + } + + return cmd +} + +func (cmd *LCSCmd) SetVal(val *LCSMatch) { + cmd.val = val +} + +func (cmd *LCSCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *LCSCmd) Val() *LCSMatch { + return cmd.val +} + +func (cmd *LCSCmd) Result() (*LCSMatch, error) { + return cmd.val, cmd.err +} + +func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) { + lcs := &LCSMatch{} + switch cmd.readType { + case 1: + // match string + if lcs.MatchString, err = rd.ReadString(); err != nil { + return err + } + case 2: + // match len + if lcs.Len, err = rd.ReadInt(); err != nil { + return err + } + case 3: + // read LCSMatch + if err = rd.ReadFixedMapLen(2); err != nil { + return err + } + + // read matches or len field + for i := 0; i < 2; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "matches": + // read array of matched positions + if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil { + return err + } + case "len": + // read match length + if lcs.Len, err = rd.ReadInt(); err != nil { + return err + } + } + } + } + + cmd.val = lcs + return nil +} + +func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + positions := make([]LCSMatchedPosition, n) + for i := 0; i < n; i++ { + pn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + if positions[i].Key1, err = cmd.readPosition(rd); err != nil { + return nil, err + } + if positions[i].Key2, err = cmd.readPosition(rd); err != nil { + return nil, err + } + + // read match length if WithMatchLen is true + if pn > 2 { + if positions[i].MatchLen, err = rd.ReadInt(); err != nil { + return nil, err + } + } + } + + return positions, nil +} + +func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return pos, err + } + if pos.Start, err = rd.ReadInt(); err != nil { + return pos, err + } + if pos.End, err = rd.ReadInt(); err != nil { + return pos, err + } + + return pos, nil +} + +// ------------------------------------------------------------------------ + +type KeyFlags struct { + Key string + Flags []string +} + +type KeyFlagsCmd struct { + baseCmd + + val []KeyFlags +} + +var _ Cmder = (*KeyFlagsCmd)(nil) + +func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd { + return &KeyFlagsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) { + cmd.val = val +} + +func (cmd *KeyFlagsCmd) Val() []KeyFlags { + return cmd.val +} + +func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) { + return cmd.val, cmd.err +} + +func (cmd *KeyFlagsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + if n == 0 { + cmd.val = make([]KeyFlags, 0) + return nil + } + + cmd.val = make([]KeyFlags, n) + + for i := 0; i < len(cmd.val); i++ { + + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + if cmd.val[i].Key, err = rd.ReadString(); err != nil { + return err + } + flagsLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[i].Flags = make([]string, flagsLen) + + for j := 0; j < flagsLen; j++ { + if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil { + return err + } + } + } + + return nil +} + +// --------------------------------------------------------------------------------------------------- + +type ClusterLink struct { + Direction string + Node string + CreateTime int64 + Events string + SendBufferAllocated int64 + SendBufferUsed int64 +} + +type ClusterLinksCmd struct { + baseCmd + + val []ClusterLink +} + +var _ Cmder = (*ClusterLinksCmd)(nil) + +func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd { + return &ClusterLinksCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) { + cmd.val = val +} + +func (cmd *ClusterLinksCmd) Val() []ClusterLink { + return cmd.val +} + +func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterLinksCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterLink, n) + + for i := 0; i < len(cmd.val); i++ { + m, err := rd.ReadMapLen() + if err != nil { + return err + } + + for j := 0; j < m; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "direction": + cmd.val[i].Direction, err = rd.ReadString() + case "node": + cmd.val[i].Node, err = rd.ReadString() + case "create-time": + cmd.val[i].CreateTime, err = rd.ReadInt() + case "events": + cmd.val[i].Events, err = rd.ReadString() + case "send-buffer-allocated": + cmd.val[i].SendBufferAllocated, err = rd.ReadInt() + case "send-buffer-used": + cmd.val[i].SendBufferUsed, err = rd.ReadInt() + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key) + } + + if err != nil { + return err + } + } + } + + return nil +} + +// ------------------------------------------------------------------------------------------------------------------ + +type SlotRange struct { + Start int64 + End int64 +} + +type Node struct { + ID string + Endpoint string + IP string + Hostname string + Port int64 + TLSPort int64 + Role string + ReplicationOffset int64 + Health string +} + +type ClusterShard struct { + Slots []SlotRange + Nodes []Node +} + +type ClusterShardsCmd struct { + baseCmd + + val []ClusterShard +} + +var _ Cmder = (*ClusterShardsCmd)(nil) + +func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd { + return &ClusterShardsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) { + cmd.val = val +} + +func (cmd *ClusterShardsCmd) Val() []ClusterShard { + return cmd.val +} + +func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterShardsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterShard, n) + + for i := 0; i < n; i++ { + m, err := rd.ReadMapLen() + if err != nil { + return err + } + + for j := 0; j < m; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "slots": + l, err := rd.ReadArrayLen() + if err != nil { + return err + } + for k := 0; k < l; k += 2 { + start, err := rd.ReadInt() + if err != nil { + return err + } + + end, err := rd.ReadInt() + if err != nil { + return err + } + + cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end}) + } + case "nodes": + nodesLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[i].Nodes = make([]Node, nodesLen) + for k := 0; k < nodesLen; k++ { + nodeMapLen, err := rd.ReadMapLen() + if err != nil { + return err + } + + for l := 0; l < nodeMapLen; l++ { + nodeKey, err := rd.ReadString() + if err != nil { + return err + } + + switch nodeKey { + case "id": + cmd.val[i].Nodes[k].ID, err = rd.ReadString() + case "endpoint": + cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString() + case "ip": + cmd.val[i].Nodes[k].IP, err = rd.ReadString() + case "hostname": + cmd.val[i].Nodes[k].Hostname, err = rd.ReadString() + case "port": + cmd.val[i].Nodes[k].Port, err = rd.ReadInt() + case "tls-port": + cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt() + case "role": + cmd.val[i].Nodes[k].Role, err = rd.ReadString() + case "replication-offset": + cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt() + case "health": + cmd.val[i].Nodes[k].Health, err = rd.ReadString() + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey) + } + + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key) + } + } + } + + return nil +} + +// ----------------------------------------- + +type RankScore struct { + Rank int64 + Score float64 +} + +type RankWithScoreCmd struct { + baseCmd + + val RankScore +} + +var _ Cmder = (*RankWithScoreCmd)(nil) + +func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd { + return &RankWithScoreCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *RankWithScoreCmd) SetVal(val RankScore) { + cmd.val = val +} + +func (cmd *RankWithScoreCmd) Val() RankScore { + return cmd.val +} + +func (cmd *RankWithScoreCmd) Result() (RankScore, error) { + return cmd.val, cmd.err +} + +func (cmd *RankWithScoreCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error { + if err := rd.ReadFixedArrayLen(2); err != nil { + return err + } + + rank, err := rd.ReadInt() + if err != nil { + return err + } + + score, err := rd.ReadFloat() + if err != nil { + return err + } + + cmd.val = RankScore{Rank: rank, Score: score} + + return nil +} + +// -------------------------------------------------------------------------------------------------- + +// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0) +type ClientFlags uint64 + +const ( + ClientSlave ClientFlags = 1 << 0 /* This client is a replica */ + ClientMaster ClientFlags = 1 << 1 /* This client is a master */ + ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */ + ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */ + ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */ + ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */ + ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */ + ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */ + ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */ + ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */ + ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */ + ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */ + ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */ + ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */ + ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */ + ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */ + ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */ + ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */ + ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */ + ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */ + ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */ + ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp + ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */ + ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */ + ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */ + ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */ + ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */ + ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */ + ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */ + ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */ + ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling + a command. usually this will be marked only during call() + however, blocked clients might have this flag kept until they + will try to reprocess the command. */ + ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */ + ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */ + ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */ + ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */ + ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */ + ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */ + ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */ + ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/ + ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */ + ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */ + ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */ + ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */ + ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */ + ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */ + ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */ + ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */ + ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */ +) + +// ClientInfo is redis-server ClientInfo, not go-redis *Client +type ClientInfo struct { + ID int64 // redis version 2.8.12, a unique 64-bit client ID + Addr string // address/port of the client + LAddr string // address/port of local address client connected to (bind address) + FD int64 // file descriptor corresponding to the socket + Name string // the name set by the client with CLIENT SETNAME + Age time.Duration // total duration of the connection in seconds + Idle time.Duration // idle time of the connection in seconds + Flags ClientFlags // client flags (see below) + DB int // current database ID + Sub int // number of channel subscriptions + PSub int // number of pattern matching subscriptions + SSub int // redis version 7.0.3, number of shard channel subscriptions + Multi int // number of commands in a MULTI/EXEC context + QueryBuf int // qbuf, query buffer length (0 means no query pending) + QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full) + ArgvMem int // incomplete arguments for the next command (already extracted from query buffer) + MultiMem int // redis version 7.0, memory is used up by buffered multi commands + BufferSize int // rbs, usable size of buffer + BufferPeak int // rbp, peak used size of buffer in last 5 sec interval + OutputBufferLength int // obl, output buffer length + OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full) + OutputMemory int // omem, output buffer memory usage + TotalMemory int // tot-mem, total memory consumed by this client in its various buffers + Events string // file descriptor events (see below) + LastCmd string // cmd, last command played + User string // the authenticated username of the client + Redir int64 // client id of current client tracking redirection + Resp int // redis version 7.0, client RESP protocol version + LibName string // redis version 7.2, client library name + LibVer string // redis version 7.2, client library version +} + +type ClientInfoCmd struct { + baseCmd + + val *ClientInfo +} + +var _ Cmder = (*ClientInfoCmd)(nil) + +func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd { + return &ClientInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) { + cmd.val = val +} + +func (cmd *ClientInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClientInfoCmd) Val() *ClientInfo { + return cmd.val +} + +func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) { + txt, err := rd.ReadString() + if err != nil { + return err + } + + // sds o = catClientInfoString(sdsempty(), c); + // o = sdscatlen(o,"\n",1); + // addReplyVerbatim(c,o,sdslen(o),"txt"); + // sdsfree(o); + cmd.val, err = parseClientInfo(strings.TrimSpace(txt)) + return err +} + +// fmt.Sscanf() cannot handle null values +func parseClientInfo(txt string) (info *ClientInfo, err error) { + info = &ClientInfo{} + for _, s := range strings.Split(txt, " ") { + kv := strings.Split(s, "=") + if len(kv) != 2 { + return nil, fmt.Errorf("redis: unexpected client info data (%s)", s) + } + key, val := kv[0], kv[1] + + switch key { + case "id": + info.ID, err = strconv.ParseInt(val, 10, 64) + case "addr": + info.Addr = val + case "laddr": + info.LAddr = val + case "fd": + info.FD, err = strconv.ParseInt(val, 10, 64) + case "name": + info.Name = val + case "age": + var age int + if age, err = strconv.Atoi(val); err == nil { + info.Age = time.Duration(age) * time.Second + } + case "idle": + var idle int + if idle, err = strconv.Atoi(val); err == nil { + info.Idle = time.Duration(idle) * time.Second + } + case "flags": + if val == "N" { + break + } + + for i := 0; i < len(val); i++ { + switch val[i] { + case 'S': + info.Flags |= ClientSlave + case 'O': + info.Flags |= ClientSlave | ClientMonitor + case 'M': + info.Flags |= ClientMaster + case 'P': + info.Flags |= ClientPubSub + case 'x': + info.Flags |= ClientMulti + case 'b': + info.Flags |= ClientBlocked + case 't': + info.Flags |= ClientTracking + case 'R': + info.Flags |= ClientTrackingBrokenRedir + case 'B': + info.Flags |= ClientTrackingBCAST + case 'd': + info.Flags |= ClientDirtyCAS + case 'c': + info.Flags |= ClientCloseAfterCommand + case 'u': + info.Flags |= ClientUnBlocked + case 'A': + info.Flags |= ClientCloseASAP + case 'U': + info.Flags |= ClientUnixSocket + case 'r': + info.Flags |= ClientReadOnly + case 'e': + info.Flags |= ClientNoEvict + case 'T': + info.Flags |= ClientNoTouch + default: + return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i])) + } + } + case "db": + info.DB, err = strconv.Atoi(val) + case "sub": + info.Sub, err = strconv.Atoi(val) + case "psub": + info.PSub, err = strconv.Atoi(val) + case "ssub": + info.SSub, err = strconv.Atoi(val) + case "multi": + info.Multi, err = strconv.Atoi(val) + case "qbuf": + info.QueryBuf, err = strconv.Atoi(val) + case "qbuf-free": + info.QueryBufFree, err = strconv.Atoi(val) + case "argv-mem": + info.ArgvMem, err = strconv.Atoi(val) + case "multi-mem": + info.MultiMem, err = strconv.Atoi(val) + case "rbs": + info.BufferSize, err = strconv.Atoi(val) + case "rbp": + info.BufferPeak, err = strconv.Atoi(val) + case "obl": + info.OutputBufferLength, err = strconv.Atoi(val) + case "oll": + info.OutputListLength, err = strconv.Atoi(val) + case "omem": + info.OutputMemory, err = strconv.Atoi(val) + case "tot-mem": + info.TotalMemory, err = strconv.Atoi(val) + case "events": + info.Events = val + case "cmd": + info.LastCmd = val + case "user": + info.User = val + case "redir": + info.Redir, err = strconv.ParseInt(val, 10, 64) + case "resp": + info.Resp, err = strconv.Atoi(val) + case "lib-name": + info.LibName = val + case "lib-ver": + info.LibVer = val + default: + return nil, fmt.Errorf("redis: unexpected client info key(%s)", key) + } + + if err != nil { + return nil, err + } + } + + return info, nil +} + +// ------------------------------------------- + +type ACLLogEntry struct { + Count int64 + Reason string + Context string + Object string + Username string + AgeSeconds float64 + ClientInfo *ClientInfo + EntryID int64 + TimestampCreated int64 + TimestampLastUpdated int64 +} + +type ACLLogCmd struct { + baseCmd + + val []*ACLLogEntry +} + +var _ Cmder = (*ACLLogCmd)(nil) + +func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd { + return &ACLLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) { + cmd.val = val +} + +func (cmd *ACLLogCmd) Val() []*ACLLogEntry { + return cmd.val +} + +func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ACLLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]*ACLLogEntry, n) + for i := 0; i < n; i++ { + cmd.val[i] = &ACLLogEntry{} + entry := cmd.val[i] + respLen, err := rd.ReadMapLen() + if err != nil { + return err + } + for j := 0; j < respLen; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "count": + entry.Count, err = rd.ReadInt() + case "reason": + entry.Reason, err = rd.ReadString() + case "context": + entry.Context, err = rd.ReadString() + case "object": + entry.Object, err = rd.ReadString() + case "username": + entry.Username, err = rd.ReadString() + case "age-seconds": + entry.AgeSeconds, err = rd.ReadFloat() + case "client-info": + txt, err := rd.ReadString() + if err != nil { + return err + } + entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt)) + if err != nil { + return err + } + case "entry-id": + entry.EntryID, err = rd.ReadInt() + case "timestamp-created": + entry.TimestampCreated, err = rd.ReadInt() + case "timestamp-last-updated": + entry.TimestampLastUpdated, err = rd.ReadInt() + default: + return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key) + } + + if err != nil { + return err + } + } + } + + return nil +} diff --git a/command_test.go b/command_test.go index 775987fc..b9d558cf 100644 --- a/command_test.go +++ b/command_test.go @@ -4,10 +4,10 @@ import ( "errors" "time" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + . "github.com/bsm/ginkgo/v2" + . "github.com/bsm/gomega" ) var _ = Describe("Cmd", func() { diff --git a/commands.go b/commands.go index eb0757d1..34f4d2c2 100644 --- a/commands.go +++ b/commands.go @@ -2,18 +2,22 @@ package redis import ( "context" + "encoding" "errors" "io" + "net" + "reflect" + "strings" "time" - "github.com/go-redis/redis/v8/internal" + "github.com/redis/go-redis/v9/internal" ) // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, // otherwise you will receive an error: (error) ERR syntax error. // For example: // -// rdb.Set(ctx, key, value, redis.KeepTTL) +// rdb.Set(ctx, key, value, redis.KeepTTL) const KeepTTL = -1 func usePrecise(dur time.Duration) bool { @@ -73,11 +77,84 @@ func appendArg(dst []interface{}, arg interface{}) []interface{} { dst = append(dst, k, v) } return dst + case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP: + return append(dst, arg) default: + // scan struct field + v := reflect.ValueOf(arg) + if v.Type().Kind() == reflect.Ptr { + if v.IsNil() { + // error: arg is not a valid object + return dst + } + v = v.Elem() + } + + if v.Type().Kind() == reflect.Struct { + return appendStructField(dst, v) + } + return append(dst, arg) } } +// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst. +func appendStructField(dst []interface{}, v reflect.Value) []interface{} { + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + tag := typ.Field(i).Tag.Get("redis") + if tag == "" || tag == "-" { + continue + } + name, opt, _ := strings.Cut(tag, ",") + if name == "" { + continue + } + + field := v.Field(i) + + // miss field + if omitEmpty(opt) && isEmptyValue(field) { + continue + } + + if field.CanInterface() { + dst = append(dst, name, field.Interface()) + } + } + + return dst +} + +func omitEmpty(opt string) bool { + for opt != "" { + var name string + name, opt, _ = strings.Cut(opt, ",") + if name == "omitempty" { + return true + } + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Pointer: + return v.IsNil() + } + return false +} + type Cmdable interface { Pipeline() Pipeliner Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) @@ -86,6 +163,9 @@ type Cmdable interface { TxPipeline() Pipeliner Command(ctx context.Context) *CommandsInfoCmd + CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd + CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd + CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd ClientGetName(ctx context.Context) *StringCmd Echo(ctx context.Context, message interface{}) *StringCmd Ping(ctx context.Context) *StatusCmd @@ -96,6 +176,7 @@ type Cmdable interface { Exists(ctx context.Context, keys ...string) *IntCmd Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + ExpireTime(ctx context.Context, key string) *DurationCmd ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd @@ -109,6 +190,7 @@ type Cmdable interface { Persist(ctx context.Context, key string) *BoolCmd PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + PExpireTime(ctx context.Context, key string) *DurationCmd PTTL(ctx context.Context, key string) *DurationCmd RandomKey(ctx context.Context) *StringCmd Rename(ctx context.Context, key, newkey string) *StatusCmd @@ -116,6 +198,7 @@ type Cmdable interface { Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd Touch(ctx context.Context, keys ...string) *IntCmd @@ -152,6 +235,7 @@ type Cmdable interface { BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpNot(ctx context.Context, destKey string, key string) *IntCmd BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd + BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd @@ -177,13 +261,16 @@ type Cmdable interface { HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd + LCS(ctx context.Context, q *LCSQuery) *LCSCmd LIndex(ctx context.Context, key string, index int64) *StringCmd LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd LLen(ctx context.Context, key string) *IntCmd + LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd LPop(ctx context.Context, key string) *StringCmd LPopCount(ctx context.Context, key string, count int) *StringSliceCmd LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd @@ -207,6 +294,7 @@ type Cmdable interface { SDiff(ctx context.Context, keys ...string) *StringSliceCmd SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd SInter(ctx context.Context, keys ...string) *StringSliceCmd + SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd @@ -255,8 +343,11 @@ type Cmdable interface { BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd ZAdd(ctx context.Context, key string, members ...Z) *IntCmd + ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd + ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd @@ -267,7 +358,9 @@ type Cmdable interface { ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd ZInter(ctx context.Context, store *ZStore) *StringSliceCmd ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd + ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd + ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd @@ -280,6 +373,7 @@ type Cmdable interface { ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd ZRank(ctx context.Context, key, member string) *IntCmd + ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd @@ -290,6 +384,7 @@ type Cmdable interface { ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd ZRevRank(ctx context.Context, key, member string) *IntCmd + ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd ZScore(ctx context.Context, key, member string) *FloatCmd ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd @@ -309,7 +404,9 @@ type Cmdable interface { ClientKill(ctx context.Context, ipPort string) *StatusCmd ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd ClientList(ctx context.Context) *StringCmd + ClientInfo(ctx context.Context) *ClientInfoCmd ClientPause(ctx context.Context, dur time.Duration) *BoolCmd + ClientUnpause(ctx context.Context) *BoolCmd ClientID(ctx context.Context) *IntCmd ClientUnblock(ctx context.Context, id int64) *IntCmd ClientUnblockWithError(ctx context.Context, id int64) *IntCmd @@ -338,17 +435,39 @@ type Cmdable interface { Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd ScriptFlush(ctx context.Context) *StatusCmd ScriptKill(ctx context.Context) *StatusCmd ScriptLoad(ctx context.Context, script string) *StringCmd + FunctionLoad(ctx context.Context, code string) *StringCmd + FunctionLoadReplace(ctx context.Context, code string) *StringCmd + FunctionDelete(ctx context.Context, libName string) *StringCmd + FunctionFlush(ctx context.Context) *StringCmd + FunctionKill(ctx context.Context) *StringCmd + FunctionFlushAsync(ctx context.Context) *StringCmd + FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd + FunctionDump(ctx context.Context) *StringCmd + FunctionRestore(ctx context.Context, libDump string) *StringCmd + FunctionStats(ctx context.Context) *FunctionStatsCmd + FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + Publish(ctx context.Context, channel string, message interface{}) *IntCmd + SPublish(ctx context.Context, channel string, message interface{}) *IntCmd PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd - PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd + PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd PubSubNumPat(ctx context.Context) *IntCmd + PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd + ClusterMyShardID(ctx context.Context) *StringCmd ClusterSlots(ctx context.Context) *ClusterSlotsCmd + ClusterShards(ctx context.Context) *ClusterShardsCmd + ClusterLinks(ctx context.Context) *ClusterLinksCmd ClusterNodes(ctx context.Context) *StringCmd ClusterMeet(ctx context.Context, host, port string) *StatusCmd ClusterForget(ctx context.Context, nodeID string) *StatusCmd @@ -379,6 +498,12 @@ type Cmdable interface { GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd + + ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd + ACLLog(ctx context.Context, count int64) *ACLLogCmd + ACLLogReset(ctx context.Context) *StatusCmd + + ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd } type StatefulCmdable interface { @@ -473,6 +598,50 @@ func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { return cmd } +// FilterBy is used for the `CommandList` command parameter. +type FilterBy struct { + Module string + ACLCat string + Pattern string +} + +func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd { + args := make([]interface{}, 0, 5) + args = append(args, "command", "list") + if filter != nil { + if filter.Module != "" { + args = append(args, "filterby", "module", filter.Module) + } else if filter.ACLCat != "" { + args = append(args, "filterby", "aclcat", filter.ACLCat) + } else if filter.Pattern != "" { + args = append(args, "filterby", "pattern", filter.Pattern) + } + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd { + args := make([]interface{}, 2+len(commands)) + args[0] = "command" + args[1] = "getkeys" + copy(args[2:], commands) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd { + args := make([]interface{}, 2+len(commands)) + args[0] = "command" + args[1] = "getkeysandflags" + copy(args[2:], commands) + cmd := NewKeyFlagsCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + // ClientGetName returns the name of the connection. func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { cmd := NewStringCmd(ctx, "client", "getname") @@ -577,6 +746,12 @@ func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCm return cmd } +func (c cmdable) ExpireTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "expiretime", key) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "keys", pattern) _ = c(ctx, cmd) @@ -645,6 +820,12 @@ func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolC return cmd } +func (c cmdable) PExpireTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pexpiretime", key) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) _ = c(ctx, cmd) @@ -702,8 +883,9 @@ type Sort struct { Alpha bool } -func (sort *Sort) args(key string) []interface{} { - args := []interface{}{"sort", key} +func (sort *Sort) args(command, key string) []interface{} { + args := []interface{}{command, key} + if sort.By != "" { args = append(args, "by", sort.By) } @@ -722,14 +904,20 @@ func (sort *Sort) args(key string) []interface{} { return args } +func (c cmdable) SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args("sort_ro", key)...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, sort.args(key)...) + cmd := NewStringSliceCmd(ctx, sort.args("sort", key)...) _ = c(ctx, cmd) return cmd } func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { - args := sort.args(key) + args := sort.args("sort", key) if store != "" { args = append(args, "store", store) } @@ -739,7 +927,7 @@ func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) * } func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { - cmd := NewSliceCmd(ctx, sort.args(key)...) + cmd := NewSliceCmd(ctx, sort.args("sort", key)...) _ = c(ctx, cmd) return cmd } @@ -864,6 +1052,7 @@ func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { // - MSet("key1", "value1", "key2", "value2") // - MSet([]string{"key1", "value1", "key2", "value2"}) // - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - MSet(struct), For struct types, see HSet description. func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { args := make([]interface{}, 1, 1+len(values)) args[0] = "mset" @@ -877,6 +1066,7 @@ func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { // - MSetNX("key1", "value1", "key2", "value2") // - MSetNX([]string{"key1", "value1", "key2", "value2"}) // - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - MSetNX(struct), For struct types, see HSet description. func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { args := make([]interface{}, 1, 1+len(values)) args[0] = "msetnx" @@ -1108,6 +1298,8 @@ func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntC return c.bitOp(ctx, "not", destKey, key) } +// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end +// if you need the `byte | bit` parameter, please use `BitPosSpan`. func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { args := make([]interface{}, 3+len(pos)) args[0] = "bitpos" @@ -1128,6 +1320,18 @@ func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64 return cmd } +// BitPosSpan supports the `byte | bit` parameters in redis version 7.0, +// the bitpos command defaults to using byte type for the `start-end` range, +// which means it counts in bytes from start to end. you can set the value +// of "span" to determine the type of `start-end`. +// span = "bit", cmd: bitpos key bit start end bit +// span = "byte", cmd: bitpos key bit start end byte +func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd { + cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { a := make([]interface{}, 0, 2+len(args)) a = append(a, "bitfield") @@ -1279,11 +1483,29 @@ func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *Slice } // HSet accepts values in following formats: +// // - HSet("myhash", "key1", "value1", "key2", "value2") +// // - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// // - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) // -// Note that it requires Redis v4 for multiple field/value pairs support. +// Playing struct With "redis" tag. +// type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` } +// +// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0 +// +// For struct, can be a structure pointer type, we only parse the field whose tag is redis. +// if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it, +// or you don't need to set the redis tag. +// For the type of structure field, we only support simple data types: +// string, int/uint(8,16,32,64), float(32,64), time.Time(to RFC3339Nano), time.Duration(to Nanoseconds ), +// if you are other more complex or custom data types, please implement the encoding.BinaryMarshaler interface. +// +// Note that in older versions of Redis server(redis-server < 4.0), HSet only supports a single key-value pair. +// redis-docs: https://redis.io/commands/hset (Starting with Redis version 4.0.0: Accepts multiple field and value arguments.) +// If you are using a Struct type and the number of fields is greater than one, +// you will receive an error similar to "ERR wrong number of arguments", you can use HMSet as a substitute. func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(values)) args[0] = "hset" @@ -1346,6 +1568,21 @@ func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...strin return cmd } +func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd { + args := make([]interface{}, 3+len(keys), 6+len(keys)) + args[0] = "blmpop" + args[1] = formatSec(ctx, timeout) + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + args = append(args, strings.ToLower(direction), "count", count) + cmd := NewKeyValuesCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)+1) args[0] = "brpop" @@ -1372,12 +1609,34 @@ func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, tim return cmd } +func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd { + cmd := NewLCSCmd(ctx, q) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { cmd := NewStringCmd(ctx, "lindex", key, index) _ = c(ctx, cmd) return cmd } +// LMPop Pops one or more elements from the first non-empty list key from the list of provided key names. +// direction: left or right, count: > 0 +// example: client.LMPop(ctx, "left", 3, "key1", "key2") +func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd { + args := make([]interface{}, 2+len(keys), 5+len(keys)) + args[0] = "lmpop" + args[1] = len(keys) + for i, key := range keys { + args[2+i] = key + } + args = append(args, strings.ToLower(direction), "count", count) + cmd := NewKeyValuesCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) _ = c(ctx, cmd) @@ -1606,6 +1865,22 @@ func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { return cmd } +func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { + args := make([]interface{}, 4+len(keys)) + args[0] = "sintercard" + numkeys := int64(0) + for i, key := range keys { + args[2+i] = key + numkeys++ + } + args[1] = numkeys + args[2+numkeys] = "limit" + args[3+numkeys] = limit + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sinterstore" @@ -1737,8 +2012,6 @@ type XAddArgs struct { Values interface{} } -// XAdd a.Limit has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { args := make([]interface{}, 0, 11) args = append(args, "xadd", a.Stream) @@ -2046,8 +2319,10 @@ func xClaimArgs(a *XClaimArgs) []interface{} { // xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). // example: -// XTRIM key MAXLEN/MINID threshold LIMIT limit. -// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// +// XTRIM key MAXLEN/MINID threshold LIMIT limit. +// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// // The redis-server version is lower than 6.2, please set limit to 0. func (c cmdable) xTrim( ctx context.Context, key, strategy string, @@ -2073,22 +2348,14 @@ func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *Int return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) } -// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) } -// XTrimMinID No `~` rules are used, `limit` cannot be used. -// cmd: XTRIM key MINID minID func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { return c.xTrim(ctx, key, "minid", false, minID, 0) } -// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MINID ~ minID LIMIT limit func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { return c.xTrim(ctx, key, "minid", true, minID, limit) } @@ -2201,6 +2468,26 @@ func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...st return cmd } +// BZMPop is the blocking variant of ZMPOP. +// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP. +// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses. +// A timeout of zero can be used to block indefinitely. +// example: client.BZMPop(ctx, 0,"max", 1, "set") +func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd { + args := make([]interface{}, 3+len(keys), 6+len(keys)) + args[0] = "bzmpop" + args[1] = formatSec(ctx, timeout) + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + args = append(args, strings.ToLower(order), "count", count) + cmd := NewZSliceWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + // ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. type ZAddArgs struct { NX bool @@ -2260,6 +2547,22 @@ func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd { }) } +// ZAddLT Redis `ZADD key LT score member [score member ...]` command. +func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + LT: true, + Members: members, + }) +} + +// ZAddGT Redis `ZADD key GT score member [score member ...]` command. +func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + GT: true, + Members: members, + }) +} + // ZAddNX Redis `ZADD key NX score member [score member ...]` command. func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd { return c.ZAddArgs(ctx, key, ZAddArgs{ @@ -2331,6 +2634,38 @@ func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd return cmd } +func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { + args := make([]interface{}, 4+len(keys)) + args[0] = "zintercard" + numkeys := int64(0) + for i, key := range keys { + args[2+i] = key + numkeys++ + } + args[1] = numkeys + args[2+numkeys] = "limit" + args[3+numkeys] = limit + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names. +// direction: "max" (highest score) or "min" (lowest score), count: > 0 +// example: client.ZMPop(ctx, "max", 5, "set1", "set2") +func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd { + args := make([]interface{}, 2+len(keys), 5+len(keys)) + args[0] = "zmpop" + args[1] = len(keys) + for i, key := range keys { + args[2+i] = key + } + args = append(args, strings.ToLower(order), "count", count) + cmd := NewZSliceWithKeyCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { args := make([]interface{}, 2+len(members)) args[0] = "zmscore" @@ -2385,11 +2720,13 @@ func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSlic // ZRangeArgs is all the options of the ZRange command. // In version> 6.2.0, you can replace the(cmd): -// ZREVRANGE, -// ZRANGEBYSCORE, -// ZREVRANGEBYSCORE, -// ZRANGEBYLEX, -// ZREVRANGEBYLEX. +// +// ZREVRANGE, +// ZRANGEBYSCORE, +// ZREVRANGEBYSCORE, +// ZRANGEBYLEX, +// ZREVRANGEBYLEX. +// // Please pay attention to your redis-server version. // // Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. @@ -2552,6 +2889,14 @@ func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { return cmd } +// ZRankWithScore according to the Redis documentation, if member does not exist +// in the sorted set or key does not exist, it will return a redis.Nil error. +func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd { + cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore") + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "zrem" @@ -2592,6 +2937,8 @@ func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) * return cmd } +// ZRevRangeWithScores according to the Redis documentation, if member does not exist +// in the sorted set or key does not exist, it will return a redis.Nil error. func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") _ = c(ctx, cmd) @@ -2642,6 +2989,12 @@ func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { return cmd } +func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd { + cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore") + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { cmd := NewFloatCmd(ctx, "zscore", key, member) _ = c(ctx, cmd) @@ -2793,7 +3146,7 @@ func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { // ClientKillByFilter is new style syntax, while the ClientKill is old // -// CLIENT KILL