From 08a52131e5553eea4b617e2f8a6cad33d72f9d8e Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Fri, 1 Nov 2024 10:41:40 +0100 Subject: [PATCH] fix: use require instead of t.Fatal(err) in tests/integration package Signed-off-by: Matthieu MOREL --- tests/integration/clientv3/cluster_test.go | 6 +- .../clientv3/concurrency/mutex_test.go | 46 +-- .../clientv3/concurrency/session_test.go | 33 +- .../clientv3/connectivity/black_hole_test.go | 28 +- .../clientv3/connectivity/dial_test.go | 32 +- .../connectivity/network_partition_test.go | 25 +- .../connectivity/server_shutdown_test.go | 18 +- .../recipes/v3_double_barrier_test.go | 6 +- .../experimental/recipes/v3_lock_test.go | 22 +- tests/integration/clientv3/kv_test.go | 57 +-- .../integration/clientv3/lease/lease_test.go | 70 +--- .../clientv3/lease/leasing_test.go | 378 +++++++----------- .../integration/clientv3/maintenance_test.go | 40 +- .../integration/clientv3/mirror_auth_test.go | 13 +- tests/integration/clientv3/mirror_test.go | 10 +- tests/integration/clientv3/namespace_test.go | 20 +- .../integration/clientv3/ordering_kv_test.go | 46 +-- .../clientv3/ordering_util_test.go | 49 +-- tests/integration/clientv3/txn_test.go | 31 +- tests/integration/clientv3/user_test.go | 38 +- tests/integration/clientv3/watch_test.go | 114 ++---- tests/integration/cluster_test.go | 33 +- tests/integration/grpc_test.go | 13 +- tests/integration/hashkv_test.go | 6 +- tests/integration/member_test.go | 4 +- tests/integration/metrics_test.go | 56 +-- tests/integration/revision_test.go | 13 +- tests/integration/tracing_test.go | 8 +- tests/integration/utl_wal_version_test.go | 13 +- tests/integration/v3_alarm_test.go | 80 ++-- tests/integration/v3_auth_test.go | 47 +-- tests/integration/v3_election_test.go | 38 +- tests/integration/v3_failover_test.go | 5 +- tests/integration/v3_grpc_inflight_test.go | 11 +- tests/integration/v3_grpc_test.go | 62 +-- tests/integration/v3_kv_test.go | 34 +- tests/integration/v3_leadership_test.go | 9 +- tests/integration/v3_lease_test.go | 186 +++------ tests/integration/v3_stm_test.go | 6 +- tests/integration/v3_tls_test.go | 5 +- tests/integration/v3_watch_restore_test.go | 5 +- tests/integration/v3_watch_test.go | 43 +- tests/integration/v3election_grpc_test.go | 16 +- tests/integration/v3lock_grpc_test.go | 9 +- 44 files changed, 593 insertions(+), 1191 deletions(-) diff --git a/tests/integration/clientv3/cluster_test.go b/tests/integration/clientv3/cluster_test.go index 9ed510108b8..ae644afb4bf 100644 --- a/tests/integration/clientv3/cluster_test.go +++ b/tests/integration/clientv3/cluster_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "go.etcd.io/etcd/client/pkg/v3/types" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" ) @@ -266,9 +267,8 @@ func TestMemberPromote(t *testing.T) { // (the response has information on peer urls of the existing members in cluster) learnerMember := clus.MustNewMember(t, memberAddResp) - if err = learnerMember.Launch(); err != nil { - t.Fatal(err) - } + err = learnerMember.Launch() + require.NoError(t, err) // retry until promote succeed or timeout timeout := time.After(5 * time.Second) diff --git a/tests/integration/clientv3/concurrency/mutex_test.go b/tests/integration/clientv3/concurrency/mutex_test.go index bf5b187686f..f9e864fe531 100644 --- a/tests/integration/clientv3/concurrency/mutex_test.go +++ b/tests/integration/clientv3/concurrency/mutex_test.go @@ -19,6 +19,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -26,29 +28,22 @@ import ( func TestMutexLockSessionExpired(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // create two separate sessions for lock competition s1, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s1.Close() m1 := concurrency.NewMutex(s1, "/my-lock/") s2, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) m2 := concurrency.NewMutex(s2, "/my-lock/") // acquire lock for s1 - if err = m1.Lock(context.TODO()); err != nil { - t.Fatal(err) - } + err = m1.Lock(context.TODO()) + require.NoError(t, err) m2Locked := make(chan struct{}) var err2 error @@ -62,27 +57,20 @@ func TestMutexLockSessionExpired(t *testing.T) { // revoke the session of m2 before unlock m1 err = s2.Close() - if err != nil { - t.Fatal(err) - } - if err := m1.Unlock(context.TODO()); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + err = m1.Unlock(context.TODO()) + require.NoError(t, err) <-m2Locked } func TestMutexUnlock(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() s1, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s1.Close() m1 := concurrency.NewMutex(s1, "/my-lock/") @@ -94,13 +82,11 @@ func TestMutexUnlock(t *testing.T) { t.Fatal(err) } - if err = m1.Lock(context.TODO()); err != nil { - t.Fatal(err) - } + err = m1.Lock(context.TODO()) + require.NoError(t, err) - if err = m1.Unlock(context.TODO()); err != nil { - t.Fatal(err) - } + err = m1.Unlock(context.TODO()) + require.NoError(t, err) err = m1.Unlock(context.TODO()) if err == nil { diff --git a/tests/integration/clientv3/concurrency/session_test.go b/tests/integration/clientv3/concurrency/session_test.go index 37fc1899b0f..e9bb5c5a328 100644 --- a/tests/integration/clientv3/concurrency/session_test.go +++ b/tests/integration/clientv3/concurrency/session_test.go @@ -20,6 +20,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" @@ -28,18 +29,12 @@ import ( func TestSessionOptions(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() lease, err := cli.Grant(context.Background(), 100) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) s, err := concurrency.NewSession(cli, concurrency.WithLease(lease.ID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s.Close() assert.Equal(t, s.Lease(), lease.ID) @@ -52,16 +47,12 @@ func TestSessionOptions(t *testing.T) { } func TestSessionTTLOptions(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() var setTTL = 90 s, err := concurrency.NewSession(cli, concurrency.WithTTL(setTTL)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s.Close() leaseID := s.Lease() @@ -83,18 +74,12 @@ func TestSessionTTLOptions(t *testing.T) { func TestSessionCtx(t *testing.T) { cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() lease, err := cli.Grant(context.Background(), 100) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) s, err := concurrency.NewSession(cli, concurrency.WithLease(lease.ID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer s.Close() assert.Equal(t, s.Lease(), lease.ID) diff --git a/tests/integration/clientv3/connectivity/black_hole_test.go b/tests/integration/clientv3/connectivity/black_hole_test.go index e1fc1c57b97..00b7849ea56 100644 --- a/tests/integration/clientv3/connectivity/black_hole_test.go +++ b/tests/integration/clientv3/connectivity/black_hole_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" @@ -62,9 +63,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { timeout := pingInterval + integration2.RequestWaitTimeout cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify()) @@ -80,9 +79,8 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { clus.Members[0].Bridge().Blackhole() - if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(context.TODO(), "foo", "bar") + require.NoError(t, err) select { case <-wch: case <-time.After(timeout): @@ -97,12 +95,10 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { clus.Members[1].Bridge().Blackhole() // make sure client[0] can connect to eps[0] after remove the blackhole. - if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Get(context.TODO(), "foo") + require.NoError(t, err) + _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1") + require.NoError(t, err) select { case <-wch: @@ -183,9 +179,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned @@ -214,7 +208,5 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien t.Errorf("#%d: failed with error %v", i, err) } } - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } diff --git a/tests/integration/clientv3/connectivity/dial_test.go b/tests/integration/clientv3/connectivity/dial_test.go index 769ce17f4df..54556d0f8dd 100644 --- a/tests/integration/clientv3/connectivity/dial_test.go +++ b/tests/integration/clientv3/connectivity/dial_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -54,9 +55,7 @@ func TestDialTLSExpired(t *testing.T) { defer clus.Terminate(t) tls, err := testTLSInfoExpired.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // expect remote errors "tls: bad certificate" _, err = integration2.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL}, @@ -120,9 +119,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() if setBefore { @@ -137,9 +134,8 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) { } time.Sleep(time.Second * 2) ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout) - if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil { - t.Fatal(err) - } + _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()) + require.NoError(t, err) cancel() } @@ -160,9 +156,8 @@ func TestSwitchSetEndpoints(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - if _, err := cli.Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err := cli.Get(ctx, "foo") + require.NoError(t, err) } func TestRejectOldCluster(t *testing.T) { @@ -178,9 +173,7 @@ func TestRejectOldCluster(t *testing.T) { RejectOldCluster: true, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli.Close() } @@ -192,9 +185,7 @@ func TestDialForeignEndpoint(t *testing.T) { defer clus.Terminate(t) conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer conn.Close() // grpc can return a lazy connection that's not connected yet; confirm @@ -202,9 +193,8 @@ func TestDialForeignEndpoint(t *testing.T) { kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn), clus.Client(0)) ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) defer cancel() - if _, gerr := kvc.Get(ctx, "abc"); gerr != nil { - t.Fatal(err) - } + _, gerr := kvc.Get(ctx, "abc") + require.NoError(t, gerr) } // TestSetEndpointAndPut checks that a Put following a SetEndpoints diff --git a/tests/integration/clientv3/connectivity/network_partition_test.go b/tests/integration/clientv3/connectivity/network_partition_test.go index 6c99c32d04c..557cdb0b31e 100644 --- a/tests/integration/clientv3/connectivity/network_partition_test.go +++ b/tests/integration/clientv3/connectivity/network_partition_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -124,9 +125,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned clientv3test.MustWaitPinReady(t, cli) @@ -180,9 +179,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T DialTimeout: 2 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // add all eps to list, so that when the original pined one fails @@ -201,9 +198,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T break } } - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) { @@ -233,9 +228,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { // pin eps[target] watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("watchCli created to: %v", target) defer watchCli.Close() @@ -291,9 +284,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) { DialOptions: []grpc.DialOption{grpc.WithBlock()}, } cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned @@ -303,9 +294,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) { cli.SetEndpoints(eps...) time.Sleep(time.Second * 2) conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer conn.Close() clus.Members[leaderIndex].InjectPartition(t, clus.Members[(leaderIndex+1)%3], clus.Members[(leaderIndex+2)%3]) diff --git a/tests/integration/clientv3/connectivity/server_shutdown_test.go b/tests/integration/clientv3/connectivity/server_shutdown_test.go index 3afc8eb4c01..9e44ea9f906 100644 --- a/tests/integration/clientv3/connectivity/server_shutdown_test.go +++ b/tests/integration/clientv3/connectivity/server_shutdown_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" clientv3 "go.etcd.io/etcd/client/v3" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -45,9 +47,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { // pin eps[lead] watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer watchCli.Close() // wait for eps[lead] to be pinned @@ -91,9 +91,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) { // writes to eps[lead+1] putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer putCli.Close() for { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -154,9 +152,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie // pin eps[0] cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() // wait for eps[0] to be pinned @@ -177,9 +173,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie cctx, ccancel := context.WithTimeout(context.Background(), time.Second) err = op(cli, cctx) ccancel() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestBalancerUnderServerShutdownGetLinearizable(t *testing.T) { diff --git a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go index 680476b48da..4ff1c049534 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" @@ -216,9 +217,8 @@ func TestDoubleBarrierFailover(t *testing.T) { } } - if err = s0.Close(); err != nil { - t.Fatal(err) - } + err = s0.Close() + require.NoError(t, err) // join on rest of waiters for i := 0; i < waiters-1; i++ { select { diff --git a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go index 1fcbc46e144..02652c0bd5b 100644 --- a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go +++ b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" @@ -182,14 +184,12 @@ func TestMutexSessionRelock(t *testing.T) { } m := concurrency.NewMutex(session, "test-mutex") - if err := m.Lock(context.TODO()); err != nil { - t.Fatal(err) - } + err = m.Lock(context.TODO()) + require.NoError(t, err) m2 := concurrency.NewMutex(session, "test-mutex") - if err := m2.Lock(context.TODO()); err != nil { - t.Fatal(err) - } + err = m2.Lock(context.TODO()) + require.NoError(t, err) } // TestMutexWaitsOnCurrentHolder ensures a mutex is only acquired once all @@ -211,9 +211,8 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) { } defer firstOwnerSession.Close() firstOwnerMutex := concurrency.NewMutex(firstOwnerSession, "test-mutex") - if err = firstOwnerMutex.Lock(cctx); err != nil { - t.Fatal(err) - } + err = firstOwnerMutex.Lock(cctx) + require.NoError(t, err) victimSession, err := concurrency.NewSession(cli) if err != nil { @@ -286,9 +285,8 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) { default: } - if err := firstOwnerMutex.Unlock(cctx); err != nil { - t.Fatal(err) - } + err = firstOwnerMutex.Unlock(cctx) + require.NoError(t, err) select { case <-newOwnerDonec: diff --git a/tests/integration/clientv3/kv_test.go b/tests/integration/clientv3/kv_test.go index 5f66b4f9034..a9113dc0b28 100644 --- a/tests/integration/clientv3/kv_test.go +++ b/tests/integration/clientv3/kv_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -61,9 +62,7 @@ func TestKVPutError(t *testing.T) { } _, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50))) - if err != nil { // below quota - t.Fatal(err) - } + require.NoError(t, err) // below quota time.Sleep(1 * time.Second) // give enough time for commit @@ -123,13 +122,11 @@ func TestKVPutWithIgnoreValue(t *testing.T) { t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err) } - if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "foo", "bar") + require.NoError(t, err) - if _, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()) + require.NoError(t, err) rr, rerr := kv.Get(context.TODO(), "foo") if rerr != nil { t.Fatal(rerr) @@ -162,13 +159,11 @@ func TestKVPutWithIgnoreLease(t *testing.T) { t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err) } - if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)) + require.NoError(t, err) - if _, err := kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()); err != nil { - t.Fatal(err) - } + _, err = kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()) + require.NoError(t, err) rr, rerr := kv.Get(context.TODO(), "zoo") if rerr != nil { @@ -209,13 +204,9 @@ func TestKVPutWithRequireLeader(t *testing.T) { `type="unary"`, fmt.Sprintf(`client_api_version="%v"`, version.APIVersion), ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cv, err := strconv.ParseInt(cnt, 10, 32) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if cv < 1 { // >1 when retried t.Fatalf("expected at least 1, got %q", cnt) } @@ -298,9 +289,8 @@ func TestKVGetErrConnClosed(t *testing.T) { cli := clus.Client(0) donec := make(chan struct{}) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err := cli.Close() + require.NoError(t, err) clus.TakeClient(0) go func() { @@ -326,9 +316,8 @@ func TestKVNewAfterClose(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err := cli.Close() + require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -493,9 +482,8 @@ func TestKVGetRetry(t *testing.T) { kv := clus.Client(fIdx) ctx := context.TODO() - if _, err := kv.Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err := kv.Put(ctx, "foo", "bar") + require.NoError(t, err) clus.Members[fIdx].Stop(t) @@ -650,9 +638,8 @@ func TestKVPutAtMostOnce(t *testing.T) { clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) defer clus.Terminate(t) - if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil { - t.Fatal(err) - } + _, err := clus.Client(0).Put(context.TODO(), "k", "1") + require.NoError(t, err) for i := 0; i < 10; i++ { clus.Members[0].Bridge().DropConnections() @@ -672,9 +659,7 @@ func TestKVPutAtMostOnce(t *testing.T) { } resp, err := clus.Client(0).Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if resp.Kvs[0].Version > 11 { t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0]) } diff --git a/tests/integration/clientv3/lease/lease_test.go b/tests/integration/clientv3/lease/lease_test.go index 3f09ffbcf39..df3c6f38dc5 100644 --- a/tests/integration/clientv3/lease/lease_test.go +++ b/tests/integration/clientv3/lease/lease_test.go @@ -266,9 +266,8 @@ func TestLeaseKeepAliveNotFound(t *testing.T) { lchs = append(lchs, leaseCh{resp.ID, kach}) } - if _, err := cli.Revoke(context.TODO(), lchs[1].lid); err != nil { - t.Fatal(err) - } + _, err := cli.Revoke(context.TODO(), lchs[1].lid) + require.NoError(t, err) <-lchs[0].ch if _, ok := <-lchs[0].ch; !ok { @@ -287,9 +286,8 @@ func TestLeaseGrantErrConnClosed(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err := cli.Close() + require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -359,9 +357,8 @@ func TestLeaseGrantNewAfterClose(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err := cli.Close() + require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -386,15 +383,12 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { cli := clus.Client(0) resp, err := cli.Grant(context.TODO(), 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := resp.ID clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err = cli.Close() + require.NoError(t, err) errMsgCh := make(chan string, 1) go func() { @@ -427,9 +421,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { // setup lease and do a keepalive resp, err := cli.Grant(context.Background(), 10) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rc, kerr := cli.KeepAlive(context.Background(), resp.ID) if kerr != nil { t.Fatal(kerr) @@ -444,9 +436,8 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { time.Sleep(time.Second) clus.WaitLeader(t) - if _, err := clus.Client(1).Revoke(context.TODO(), resp.ID); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Revoke(context.TODO(), resp.ID) + require.NoError(t, err) clus.Members[0].Restart(t) @@ -473,9 +464,7 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) { // setup lease and do a keepalive resp, err := cli.Grant(context.Background(), 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // keep client disconnected clus.Members[0].Stop(t) rc, kerr := cli.KeepAlive(context.Background(), resp.ID) @@ -506,9 +495,7 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) { // setup lease and do a keepalive resp, err := cli.Grant(context.Background(), 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rc, kerr := cli.KeepAlive(context.Background(), resp.ID) if kerr != nil { t.Fatal(kerr) @@ -553,9 +540,8 @@ func TestLeaseTimeToLive(t *testing.T) { } // linearized read to ensure Puts propagated to server backing lapi - if _, err := c.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + _, err = c.Get(context.TODO(), "abc") + require.NoError(t, err) lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys()) if lerr != nil { @@ -641,9 +627,7 @@ func TestLeaseLeases(t *testing.T) { } resp, err := cli.Leases(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Leases) != 5 { t.Fatalf("len(resp.Leases) expected 5, got %d", len(resp.Leases)) } @@ -664,16 +648,12 @@ func TestLeaseRenewLostQuorum(t *testing.T) { cli := clus.Client(0) r, err := cli.Grant(context.TODO(), 4) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kctx, kcancel := context.WithCancel(context.Background()) defer kcancel() ka, err := cli.KeepAlive(kctx, r.ID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // consume first keepalive so next message sends when cluster is down <-ka lastKa := time.Now() @@ -715,9 +695,7 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) { clus.TakeClient(0) resp, err := cli.Grant(ctx, 5) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli.Close() _, err = cli.KeepAlive(ctx, resp.ID) @@ -790,13 +768,9 @@ func TestLeaseWithRequireLeader(t *testing.T) { c := clus.Client(0) lid1, err1 := c.Grant(context.TODO(), 60) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lid2, err2 := c.Grant(context.TODO(), 60) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) // kaReqLeader close if the leader is lost kaReqLeader, kerr1 := c.KeepAlive(clientv3.WithRequireLeader(context.TODO()), lid1.ID) if kerr1 != nil { diff --git a/tests/integration/clientv3/lease/leasing_test.go b/tests/integration/clientv3/lease/leasing_test.go index c148b5d650f..d2726296607 100644 --- a/tests/integration/clientv3/lease/leasing_test.go +++ b/tests/integration/clientv3/lease/leasing_test.go @@ -52,19 +52,15 @@ func TestLeasingPutGet(t *testing.T) { defer closeLKV3() resp, err := lKV1.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Errorf("expected nil, got %q", resp.Kvs[0].Key) } - if _, err = lKV1.Put(context.TODO(), "abc", "def"); err != nil { - t.Fatal(err) - } - if resp, err = lKV2.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + _, err = lKV1.Put(context.TODO(), "abc", "def") + require.NoError(t, err) + resp, err = lKV2.Get(context.TODO(), "abc") + require.NoError(t, err) if string(resp.Kvs[0].Key) != "abc" { t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) } @@ -72,16 +68,13 @@ func TestLeasingPutGet(t *testing.T) { t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value) } - if _, err = lKV3.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - if _, err = lKV2.Put(context.TODO(), "abc", "ghi"); err != nil { - t.Fatal(err) - } + _, err = lKV3.Get(context.TODO(), "abc") + require.NoError(t, err) + _, err = lKV2.Put(context.TODO(), "abc", "ghi") + require.NoError(t, err) - if resp, err = lKV3.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + resp, err = lKV3.Get(context.TODO(), "abc") + require.NoError(t, err) if string(resp.Kvs[0].Key) != "abc" { t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) } @@ -108,22 +101,18 @@ func TestLeasingInterval(t *testing.T) { } resp, err := lkv.Get(context.TODO(), "abc/", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 3 { t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs) } // load into cache - if _, err = lkv.Get(context.TODO(), "abc/a"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "abc/a") + require.NoError(t, err) // get when prefix is also a cached key - if resp, err = lkv.Get(context.TODO(), "abc/a", clientv3.WithPrefix()); err != nil { - t.Fatal(err) - } + resp, err = lkv.Get(context.TODO(), "abc/a", clientv3.WithPrefix()) + require.NoError(t, err) if len(resp.Kvs) != 2 { t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs) } @@ -139,17 +128,13 @@ func TestLeasingPutInvalidateNew(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Put(context.TODO(), "k", "v"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Put(context.TODO(), "k", "v") + require.NoError(t, err) lkvResp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cResp, cerr := clus.Client(0).Get(context.TODO(), "k") if cerr != nil { t.Fatal(cerr) @@ -165,25 +150,20 @@ func TestLeasingPutInvalidateExisting(t *testing.T) { clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) defer clus.Terminate(t) - if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err := clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") assert.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Put(context.TODO(), "k", "v"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Put(context.TODO(), "k", "v") + require.NoError(t, err) lkvResp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cResp, cerr := clus.Client(0).Get(context.TODO(), "k") if cerr != nil { t.Fatal(cerr) @@ -232,20 +212,16 @@ func TestLeasingGetSerializable(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "cached"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "cached", "abc") + require.NoError(t, err) + _, err = lkv.Get(context.TODO(), "cached") + require.NoError(t, err) clus.Members[1].Stop(t) // don't necessarily try to acquire leasing key ownership for new key resp, err := lkv.Get(context.TODO(), "uncached", clientv3.WithSerializable()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf(`expected no keys, got response %+v`, resp) } @@ -254,9 +230,7 @@ func TestLeasingGetSerializable(t *testing.T) { // leasing key ownership should have "cached" locally served cachedResp, err := lkv.Get(context.TODO(), "cached", clientv3.WithSerializable()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(cachedResp.Kvs) != 1 || string(cachedResp.Kvs[0].Value) != "abc" { t.Fatalf(`expected "cached"->"abc", got response %+v`, cachedResp) } @@ -272,17 +246,13 @@ func TestLeasingPrevKey(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // acquire leasing key - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) resp, err := lkv.Put(context.TODO(), "k", "def", clientv3.WithPrevKV()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if resp.PrevKv == nil || string(resp.PrevKv.Value) != "abc" { t.Fatalf(`expected PrevKV.Value="abc", got response %+v`, resp) } @@ -299,12 +269,9 @@ func TestLeasingRevGet(t *testing.T) { defer closeLKV() putResp, err := clus.Client(0).Put(context.TODO(), "k", "abc") - if err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "k", "def"); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = clus.Client(0).Put(context.TODO(), "k", "def") + require.NoError(t, err) // check historic revision getResp, gerr := lkv.Get(context.TODO(), "k", clientv3.WithRev(putResp.Header.Revision)) @@ -334,13 +301,11 @@ func TestLeasingGetWithOpts(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // in cache - if _, err = lkv.Get(context.TODO(), "k", clientv3.WithKeysOnly()); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k", clientv3.WithKeysOnly()) + require.NoError(t, err) clus.Members[0].Stop(t) @@ -362,9 +327,8 @@ func TestLeasingGetWithOpts(t *testing.T) { getOpts = append(getOpts, opts[rand.Intn(len(opts))]) } getOpts = getOpts[:rand.Intn(len(opts))] - if _, err := lkv.Get(context.TODO(), "k", getOpts...); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k", getOpts...) + require.NoError(t, err) } // TestLeasingConcurrentPut ensures that a get after concurrent puts returns @@ -379,9 +343,8 @@ func TestLeasingConcurrentPut(t *testing.T) { defer closeLKV() // force key into leasing key cache - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) // concurrently put through leasing client numPuts := 16 @@ -405,9 +368,7 @@ func TestLeasingConcurrentPut(t *testing.T) { // confirm Get gives most recently put revisions getResp, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(err) - } + require.NoError(t, gerr) if mr := getResp.Kvs[0].ModRevision; mr != maxRev { t.Errorf("expected ModRevision %d, got %d", maxRev, mr) } @@ -425,21 +386,17 @@ func TestLeasingDisconnectedGet(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "cached", "abc") + require.NoError(t, err) // get key so it's cached - if _, err = lkv.Get(context.TODO(), "cached"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "cached") + require.NoError(t, err) clus.Members[0].Stop(t) // leasing key ownership should have "cached" locally served cachedResp, err := lkv.Get(context.TODO(), "cached") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(cachedResp.Kvs) != 1 || string(cachedResp.Kvs[0].Value) != "abc" { t.Fatalf(`expected "cached"->"abc", got response %+v`, cachedResp) } @@ -454,29 +411,23 @@ func TestLeasingDeleteOwner(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // get+own / delete / get - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Delete(context.TODO(), "k") + require.NoError(t, err) resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf(`expected "k" to be deleted, got response %+v`, resp) } // try to double delete - if _, err = lkv.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Delete(context.TODO(), "k") + require.NoError(t, err) } func TestLeasingDeleteNonOwner(t *testing.T) { @@ -492,23 +443,18 @@ func TestLeasingDeleteNonOwner(t *testing.T) { assert.NoError(t, err) defer closeLKV2() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // acquire ownership - if _, err = lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv1.Get(context.TODO(), "k") + require.NoError(t, err) // delete via non-owner - if _, err = lkv2.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv2.Delete(context.TODO(), "k") + require.NoError(t, err) // key should be removed from lkv1 resp, err := lkv1.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf(`expected "k" to be deleted, got response %+v`, resp) } @@ -523,22 +469,17 @@ func TestLeasingOverwriteResponse(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resp.Kvs[0].Key[0] = 'z' resp.Kvs[0].Value[0] = 'z' resp, err = lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Key) != "k" { t.Errorf(`expected key "k", got %q`, string(resp.Kvs[0].Key)) @@ -557,17 +498,14 @@ func TestLeasingOwnerPutResponse(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) _, gerr := lkv.Get(context.TODO(), "k") if gerr != nil { t.Fatal(gerr) } presp, err := lkv.Put(context.TODO(), "k", "def") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if presp == nil { t.Fatal("expected put response, got nil") } @@ -601,9 +539,8 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) { _, err := clus.Client(0).Put(context.TODO(), k, k+k) require.NoError(t, err) } - if _, err := lkv.Get(context.TODO(), "k-"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k-") + require.NoError(t, err) tresp, terr := lkv.Txn(context.TODO()).Then(clientv3.OpGet("k-", clientv3.WithPrefix())).Commit() if terr != nil { @@ -642,9 +579,7 @@ func TestLeasingTxnOwnerGet(t *testing.T) { for i := range presps { k := fmt.Sprintf("k-%d", i) presp, err := client.Put(context.TODO(), k, k+k) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) presps[i] = presp _, err = lkv.Get(context.TODO(), k) @@ -718,9 +653,7 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) { // cache in lkv resp, err := lkv.Get(context.TODO(), "k-", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != keyCount { t.Fatalf("expected %d keys, got %d", keyCount, len(resp.Kvs)) } @@ -730,9 +663,7 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) { } resp, err = lkv.Get(context.TODO(), "k-", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf("expected no keys, got %d", len(resp.Kvs)) } @@ -747,9 +678,8 @@ func TestLeasingTxnOwnerDelete(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) // cache in lkv if _, gerr := lkv.Get(context.TODO(), "k"); gerr != nil { @@ -761,9 +691,7 @@ func TestLeasingTxnOwnerDelete(t *testing.T) { } resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf("expected no keys, got %d", len(resp.Kvs)) } @@ -778,12 +706,10 @@ func TestLeasingTxnOwnerIf(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) // served through cache clus.Members[0].Stop(t) @@ -877,15 +803,13 @@ func TestLeasingTxnCancel(t *testing.T) { defer closeLKV2() // acquire lease but disconnect so no revoke in time - if _, err = lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv1.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) // wait for leader election, if any - if _, err = clus.Client(1).Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(context.TODO(), "abc") + require.NoError(t, err) ctx, cancel := context.WithCancel(context.TODO()) go func() { @@ -910,19 +834,15 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) { assert.NoError(t, err) defer closeLKV2() - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "k2", "123"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(context.TODO(), "k", "abc") + require.NoError(t, err) + _, err = clus.Client(0).Put(context.TODO(), "k2", "123") + require.NoError(t, err) // cache in lkv - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "k2"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) + _, err = lkv.Get(context.TODO(), "k2") + require.NoError(t, err) // invalidate via lkv2 txn opArray := make([]clientv3.Op, 0) opArray = append(opArray, clientv3.OpPut("k2", "456")) @@ -939,9 +859,7 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) { } // check cache was invalidated gresp, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(err) - } + require.NoError(t, gerr) if len(gresp.Kvs) != 1 || string(gresp.Kvs[0].Value) != "def" { t.Errorf(`expected value "def", got %+v`, gresp) } @@ -993,9 +911,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) { for i := 0; i < keyCount; i++ { k, v := fmt.Sprintf("k-%d", i), fmt.Sprintf("%d", i) dat[i], err1 = clus.Client(0).Put(context.TODO(), k, v) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) } // nondeterministically populate leasing caches @@ -1090,9 +1006,8 @@ func TestLeasingOwnerPutError(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) @@ -1111,9 +1026,8 @@ func TestLeasingOwnerDeleteError(t *testing.T) { assert.NoError(t, err) defer closeLKV() - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) @@ -1162,9 +1076,8 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { require.NoError(t, err) } - if _, err = lkv.Get(context.TODO(), "key/1"); err != nil { - t.Fatal(err) - } + _, err = lkv.Get(context.TODO(), "key/1") + require.NoError(t, err) opResp, delErr := lkv.Do(context.TODO(), del) if delErr != nil { @@ -1175,9 +1088,7 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { // confirm keys are invalidated from cache and deleted on etcd for i := 0; i < 8; i++ { resp, err := lkv.Get(context.TODO(), fmt.Sprintf("key/%d", i)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 0 { t.Fatalf("expected no keys on key/%d, got %+v", i, resp) } @@ -1216,9 +1127,8 @@ func TestLeasingDeleteRangeBounds(t *testing.T) { require.NoError(t, err) } - if _, err = delkv.Delete(context.TODO(), "k", clientv3.WithPrefix()); err != nil { - t.Fatal(err) - } + _, err = delkv.Delete(context.TODO(), "k", clientv3.WithPrefix()) + require.NoError(t, err) // leases still on server? for _, k := range []string{"j", "m"} { @@ -1234,12 +1144,10 @@ func TestLeasingDeleteRangeBounds(t *testing.T) { // j and m should still have leases registered since not under k* clus.Members[0].Stop(t) - if _, err = getkv.Get(context.TODO(), "j"); err != nil { - t.Fatal(err) - } - if _, err = getkv.Get(context.TODO(), "m"); err != nil { - t.Fatal(err) - } + _, err = getkv.Get(context.TODO(), "j") + require.NoError(t, err) + _, err = getkv.Get(context.TODO(), "m") + require.NoError(t, err) } func TestLeasingDeleteRangeContendTxn(t *testing.T) { @@ -1300,13 +1208,9 @@ func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) { for i := 0; i < maxKey; i++ { key := fmt.Sprintf("key/%d", i) resp, err := putkv.Get(context.TODO(), key) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) servResp, err := clus.Client(0).Get(context.TODO(), key) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if !reflect.DeepEqual(resp.Kvs, servResp.Kvs) { t.Errorf("#%d: expected %+v, got %+v", i, servResp.Kvs, resp.Kvs) } @@ -1350,18 +1254,14 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) { wg.Wait() resp, err := lkvs[0].Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) > 0 { t.Fatalf("expected no kvs, got %+v", resp.Kvs) } resp, err = clus.Client(0).Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) > 0 { t.Fatalf("expected no kvs, got %+v", resp.Kvs) } @@ -1382,9 +1282,8 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) { assert.NoError(t, err2) defer closeLKV2() - if _, err := lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err := lkv1.Get(context.TODO(), "k") + require.NoError(t, err) // force leader away from member 0 clus.Members[0].Stop(t) @@ -1443,39 +1342,30 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) { assert.NoError(t, err2) defer closeLKV2() - if _, err := lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } + _, err := lkv1.Get(context.TODO(), "k") + require.NoError(t, err) clus.Members[0].Stop(t) clus.WaitLeader(t) // put some more revisions for compaction - _, err := clus.Client(1).Put(context.TODO(), "a", "123") - if err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(context.TODO(), "a", "123") + require.NoError(t, err) presp, err := clus.Client(1).Put(context.TODO(), "a", "123") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // compact while lkv1 is disconnected rev := presp.Header.Revision - if _, err = clus.Client(1).Compact(context.TODO(), rev); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Compact(context.TODO(), rev) + require.NoError(t, err) clus.Members[0].Restart(t) cctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) defer cancel() - if _, err = lkv2.Put(cctx, "k", "v"); err != nil { - t.Fatal(err) - } + _, err = lkv2.Put(cctx, "k", "v") + require.NoError(t, err) resp, err := lkv1.Get(cctx, "k") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Value) != "v" { t.Fatalf(`expected "v" value, got %+v`, resp) } diff --git a/tests/integration/clientv3/maintenance_test.go b/tests/integration/clientv3/maintenance_test.go index a2394b8b719..0e9e56a1c7a 100644 --- a/tests/integration/clientv3/maintenance_test.go +++ b/tests/integration/clientv3/maintenance_test.go @@ -61,9 +61,7 @@ func TestMaintenanceHashKV(t *testing.T) { _, err := cli.Get(context.TODO(), "foo") require.NoError(t, err) hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL, 0) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if hv == 0 { hv = hresp.Hash continue @@ -83,9 +81,7 @@ func TestCompactionHash(t *testing.T) { defer clus.Terminate(t) cc, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL}, 1000) } @@ -140,9 +136,7 @@ func TestMaintenanceMoveLeader(t *testing.T) { cli = clus.Client(oldLeadIdx) _, err = cli.MoveLeader(context.Background(), target) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leadIdx := clus.WaitLeader(t) lead := uint64(clus.Members[leadIdx].ID()) @@ -172,9 +166,7 @@ func TestMaintenanceSnapshotCancel(t *testing.T) { populateDataIntoCluster(t, clus, 3, 1024*1024) rc1, err := clus.RandClient().Snapshot(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc1.Close() // read 16 bytes to ensure that server opens snapshot @@ -232,9 +224,7 @@ func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, populateDataIntoCluster(t, clus, 3, 1024*1024) rc2, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc2.Close() time.Sleep(2 * time.Second) @@ -290,9 +280,7 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co // reading snapshot with canceled context should error out ctx, cancel := context.WithCancel(context.Background()) rc1, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc1.Close() donec := make(chan struct{}) @@ -311,9 +299,7 @@ func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Co ctx, cancel = context.WithTimeout(context.Background(), time.Second) defer cancel() rc2, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer rc2.Close() // 300ms left and expect timeout while snapshot reading is in progress @@ -339,9 +325,7 @@ func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) { // reading snapshot with canceled context should error out resp, err := clus.RandClient().SnapshotWithVersion(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer resp.Snapshot.Close() if resp.Version != "3.6.0" { t.Errorf("unexpected version, expected %q, got %q", version.Version, resp.Version) @@ -411,18 +395,14 @@ func TestMaintenanceStatus(t *testing.T) { t.Logf("Creating client...") cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() t.Logf("Creating client [DONE]") prevID, leaderFound := uint64(0), false for i := 0; i < 3; i++ { resp, err := cli.Status(context.TODO(), eps[i]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Response from %v: %v", i, resp) if resp.DbSizeQuota != storage.DefaultQuotaBytes { t.Errorf("unexpected backend default quota returned: %d, expected %d", resp.DbSizeQuota, storage.DefaultQuotaBytes) diff --git a/tests/integration/clientv3/mirror_auth_test.go b/tests/integration/clientv3/mirror_auth_test.go index 8dedd4e94d2..1738fea5a64 100644 --- a/tests/integration/clientv3/mirror_auth_test.go +++ b/tests/integration/clientv3/mirror_auth_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "go.etcd.io/etcd/api/v3/mvccpb" @@ -45,9 +46,7 @@ func TestMirrorSync_Authenticated(t *testing.T) { // Seed /syncpath with some initial data _, err := initialClient.KV.Put(context.TODO(), "/syncpath/foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Require authentication authSetupRoot(t, initialClient.Auth) @@ -61,9 +60,7 @@ func TestMirrorSync_Authenticated(t *testing.T) { Password: "syncfoo", } syncClient, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer syncClient.Close() // Now run the sync process, create changes, and get the initial sync state @@ -86,9 +83,7 @@ func TestMirrorSync_Authenticated(t *testing.T) { // Update state _, err = syncClient.KV.Put(context.TODO(), "/syncpath/foo", "baz") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Wait for the updated state to sync select { diff --git a/tests/integration/clientv3/mirror_test.go b/tests/integration/clientv3/mirror_test.go index f21551bbdf0..e3bc9a73009 100644 --- a/tests/integration/clientv3/mirror_test.go +++ b/tests/integration/clientv3/mirror_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3/mirror" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -35,9 +37,7 @@ func TestMirrorSync(t *testing.T) { c := clus.Client(0) _, err := c.KV.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) syncer := mirror.NewSyncer(c, "", 0) gch, ech := syncer.SyncBase(context.TODO()) @@ -56,9 +56,7 @@ func TestMirrorSync(t *testing.T) { wch := syncer.SyncUpdates(context.TODO()) _, err = c.KV.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case r := <-wch: diff --git a/tests/integration/clientv3/namespace_test.go b/tests/integration/clientv3/namespace_test.go index 2aad010f987..4ad7f60a28f 100644 --- a/tests/integration/clientv3/namespace_test.go +++ b/tests/integration/clientv3/namespace_test.go @@ -19,6 +19,8 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" @@ -34,21 +36,16 @@ func TestNamespacePutGet(t *testing.T) { c := clus.Client(0) nsKV := namespace.NewKV(c.KV, "foo/") - if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil { - t.Fatal(err) - } + _, err := nsKV.Put(context.TODO(), "abc", "bar") + require.NoError(t, err) resp, err := nsKV.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Key) != "abc" { t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) } resp, err = c.Get(context.TODO(), "foo/abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if string(resp.Kvs[0].Value) != "bar" { t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value) } @@ -64,9 +61,8 @@ func TestNamespaceWatch(t *testing.T) { nsKV := namespace.NewKV(c.KV, "foo/") nsWatcher := namespace.NewWatcher(c.Watcher, "foo/") - if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil { - t.Fatal(err) - } + _, err := nsKV.Put(context.TODO(), "abc", "bar") + require.NoError(t, err) nsWch := nsWatcher.Watch(context.TODO(), "abc", clientv3.WithRev(1)) wkv := &mvccpb.KeyValue{Key: []byte("abc"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1} diff --git a/tests/integration/clientv3/ordering_kv_test.go b/tests/integration/clientv3/ordering_kv_test.go index 1d667b44dac..e01ae046f2b 100644 --- a/tests/integration/clientv3/ordering_kv_test.go +++ b/tests/integration/clientv3/ordering_kv_test.go @@ -21,6 +21,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/ordering" @@ -42,27 +43,21 @@ func TestDetectKvOrderViolation(t *testing.T) { }, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { assert.NoError(t, cli.Close()) }() ctx := context.TODO() - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(ctx, "foo", "bar") + require.NoError(t, err) // ensure that the second member has the current revision for the key foo - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(ctx, "foo") + require.NoError(t, err) // stop third member in order to force the member to have an outdated revision clus.Members[2].Stop(t) time.Sleep(1 * time.Second) // give enough time for operation _, err = cli.Put(ctx, "foo", "buzz") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // perform get request against the first member, in order to // set up kvOrdering to expect "foo" revisions greater than that of @@ -72,9 +67,7 @@ func TestDetectKvOrderViolation(t *testing.T) { return errOrderViolation }) v, err := orderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Read from the first member: v:%v err:%v", v, err) assert.Equal(t, []byte("buzz"), v.Kvs[0].Value) @@ -109,26 +102,21 @@ func TestDetectTxnOrderViolation(t *testing.T) { }, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { assert.NoError(t, cli.Close()) }() ctx := context.TODO() - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(ctx, "foo", "bar") + require.NoError(t, err) // ensure that the second member has the current revision for the key foo - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(ctx, "foo") + require.NoError(t, err) // stop third member in order to force the member to have an outdated revision clus.Members[2].Stop(t) time.Sleep(1 * time.Second) // give enough time for operation - if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(ctx, "foo", "buzz") + require.NoError(t, err) // perform get request against the first member, in order to // set up kvOrdering to expect "foo" revisions greater than that of @@ -143,9 +131,7 @@ func TestDetectTxnOrderViolation(t *testing.T) { ).Then( clientv3.OpGet("foo"), ).Commit() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // ensure that only the third member is queried during requests clus.Members[0].Stop(t) diff --git a/tests/integration/clientv3/ordering_util_test.go b/tests/integration/clientv3/ordering_util_test.go index 6313957bf3f..4bd3d32bae3 100644 --- a/tests/integration/clientv3/ordering_util_test.go +++ b/tests/integration/clientv3/ordering_util_test.go @@ -20,6 +20,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/ordering" integration2 "go.etcd.io/etcd/tests/v3/framework/integration" @@ -39,20 +41,16 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { } cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL}} cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() ctx := context.TODO() - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(0).Put(ctx, "foo", "bar") + require.NoError(t, err) // ensure that the second member has current revision for key "foo" - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Get(ctx, "foo") + require.NoError(t, err) // create partition between third members and the first two members // in order to guarantee that the third member's revision of "foo" @@ -61,9 +59,8 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { time.Sleep(1 * time.Second) // give enough time for the operation // update to "foo" will not be replicated to the third member due to the partition - if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil { - t.Fatal(err) - } + _, err = clus.Client(1).Put(ctx, "foo", "buzz") + require.NoError(t, err) cli.SetEndpoints(eps...) time.Sleep(1 * time.Second) // give enough time for the operation @@ -71,9 +68,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) { // set prevRev to the second member's revision of "foo" such that // the revision is higher than the third member's revision of "foo" _, err = orderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Reconfigure client to speak only to the 'partitioned' member") cli.SetEndpoints(clus.Members[2].GRPCURL) @@ -106,9 +101,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { }, } cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer cli.Close() eps := cli.Endpoints() ctx := context.TODO() @@ -116,9 +109,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { cli.SetEndpoints(clus.Members[0].GRPCURL) time.Sleep(1 * time.Second) _, err = cli.Put(ctx, "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // stop fourth member in order to force the member to have an outdated revision clus.Members[3].Stop(t) @@ -127,9 +118,7 @@ func TestUnresolvableOrderViolation(t *testing.T) { clus.Members[4].Stop(t) time.Sleep(1 * time.Second) // give enough time for operation _, err = cli.Put(ctx, "foo", "buzz") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli.SetEndpoints(eps...) time.Sleep(1 * time.Second) // give enough time for operation @@ -137,21 +126,15 @@ func TestUnresolvableOrderViolation(t *testing.T) { // set prevRev to the first member's revision of "foo" such that // the revision is higher than the fourth and fifth members' revision of "foo" _, err = OrderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) clus.Members[0].Stop(t) clus.Members[1].Stop(t) clus.Members[2].Stop(t) err = clus.Members[3].Restart(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = clus.Members[4].Restart(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) clus.Members[3].WaitStarted(t) cli.SetEndpoints(clus.Members[3].GRPCURL) time.Sleep(1 * time.Second) // give enough time for operation diff --git a/tests/integration/clientv3/txn_test.go b/tests/integration/clientv3/txn_test.go index 221247d2f7a..1e0e247f23e 100644 --- a/tests/integration/clientv3/txn_test.go +++ b/tests/integration/clientv3/txn_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" @@ -150,14 +152,10 @@ func TestTxnSuccess(t *testing.T) { ctx := context.TODO() _, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resp, err := kv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" { t.Fatalf("unexpected Get response %v", resp) } @@ -171,12 +169,9 @@ func TestTxnCompareRange(t *testing.T) { kv := clus.Client(0) fooResp, err := kv.Put(context.TODO(), "foo/", "bar") - if err != nil { - t.Fatal(err) - } - if _, err = kv.Put(context.TODO(), "foo/a", "baz"); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = kv.Put(context.TODO(), "foo/a", "baz") + require.NoError(t, err) tresp, terr := kv.Txn(context.TODO()).If( clientv3.Compare( clientv3.CreateRevision("foo/"), "=", fooResp.Header.Revision). @@ -204,25 +199,19 @@ func TestTxnNested(t *testing.T) { clientv3.OpPut("foo", "bar"), clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpPut("abc", "123")}, nil)). Else(clientv3.OpPut("foo", "baz")).Commit() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(tresp.Responses) != 2 { t.Errorf("expected 2 top-level txn responses, got %+v", tresp.Responses) } // check txn writes were applied resp, err := kv.Get(context.TODO(), "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" { t.Errorf("unexpected Get response %+v", resp) } resp, err = kv.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "123" { t.Errorf("unexpected Get response %+v", resp) } diff --git a/tests/integration/clientv3/user_test.go b/tests/integration/clientv3/user_test.go index 7ed13e7ed44..f748ab0c8d6 100644 --- a/tests/integration/clientv3/user_test.go +++ b/tests/integration/clientv3/user_test.go @@ -37,9 +37,7 @@ func TestUserError(t *testing.T) { authapi := clus.RandClient() _, err := authapi.UserAdd(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = authapi.UserAdd(context.TODO(), "foo", "bar") if !errors.Is(err, rpctypes.ErrUserAlreadyExist) { @@ -138,29 +136,22 @@ func TestUserErrorAuth(t *testing.T) { cfg.Username, cfg.Password = "root", "123" authed, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer authed.Close() - if _, err := authed.UserList(context.TODO()); err != nil { - t.Fatal(err) - } + _, err = authed.UserList(context.TODO()) + require.NoError(t, err) } func authSetupRoot(t *testing.T, auth clientv3.Auth) { - if _, err := auth.UserAdd(context.TODO(), "root", "123"); err != nil { - t.Fatal(err) - } - if _, err := auth.RoleAdd(context.TODO(), "root"); err != nil { - t.Fatal(err) - } - if _, err := auth.UserGrantRole(context.TODO(), "root", "root"); err != nil { - t.Fatal(err) - } - if _, err := auth.AuthEnable(context.TODO()); err != nil { - t.Fatal(err) - } + _, err := auth.UserAdd(context.TODO(), "root", "123") + require.NoError(t, err) + _, err = auth.RoleAdd(context.TODO(), "root") + require.NoError(t, err) + _, err = auth.UserGrantRole(context.TODO(), "root", "root") + require.NoError(t, err) + _, err = auth.AuthEnable(context.TODO()) + require.NoError(t, err) } // TestGetTokenWithoutAuth is when Client can connect to etcd even if they @@ -177,9 +168,8 @@ func TestGetTokenWithoutAuth(t *testing.T) { var client *clientv3.Client // make sure "auth" was disabled - if _, err = authapi.AuthDisable(context.TODO()); err != nil { - t.Fatal(err) - } + _, err = authapi.AuthDisable(context.TODO()) + require.NoError(t, err) // "Username" and "Password" must be used cfg := clientv3.Config{ diff --git a/tests/integration/clientv3/watch_test.go b/tests/integration/clientv3/watch_test.go index 090e473d065..5133911c8dc 100644 --- a/tests/integration/clientv3/watch_test.go +++ b/tests/integration/clientv3/watch_test.go @@ -160,9 +160,8 @@ func testWatchMultiWatcher(t *testing.T, wctx *watchctx) { for i := 0; i < numKeyUpdates; i++ { for _, k := range keys { v := fmt.Sprintf("%s-%d", k, i) - if _, err := wctx.kv.Put(ctx, k, v); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Put(ctx, k, v) + require.NoError(t, err) } } } @@ -216,9 +215,8 @@ func testWatchReconnRequest(t *testing.T, wctx *watchctx) { // spinning on dropping connections may trigger a leader election // due to resource starvation; l-read to ensure the cluster is stable ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - if _, err := wctx.kv.Get(ctx, "_"); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Get(ctx, "_") + require.NoError(t, err) cancel() // ensure watcher works @@ -308,9 +306,8 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) { if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil { t.Fatalf("expected non-nil watcher channel") } - if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Put(ctx, "a", "a") + require.NoError(t, err) cancel() select { case <-time.After(time.Second): @@ -333,9 +330,8 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) { } func putAndWatch(t *testing.T, wctx *watchctx, key, val string) { - if _, err := wctx.kv.Put(context.TODO(), key, val); err != nil { - t.Fatal(err) - } + _, err := wctx.kv.Put(context.TODO(), key, val) + require.NoError(t, err) select { case <-time.After(5 * time.Second): t.Fatalf("watch timed out") @@ -360,16 +356,13 @@ func TestWatchResumeAfterDisconnect(t *testing.T) { defer clus.Terminate(t) cli := clus.Client(0) - if _, err := cli.Put(context.TODO(), "b", "2"); err != nil { - t.Fatal(err) - } - if _, err := cli.Put(context.TODO(), "a", "3"); err != nil { - t.Fatal(err) - } + _, err := cli.Put(context.TODO(), "b", "2") + require.NoError(t, err) + _, err = cli.Put(context.TODO(), "a", "3") + require.NoError(t, err) // if resume is broken, it'll pick up this key first instead of a=3 - if _, err := cli.Put(context.TODO(), "a", "4"); err != nil { - t.Fatal(err) - } + _, err = cli.Put(context.TODO(), "a", "4") + require.NoError(t, err) // watch from revision 1 wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify()) @@ -441,9 +434,8 @@ func TestWatchResumeCompacted(t *testing.T) { _, err := kv.Put(context.TODO(), "foo", "bar") require.NoError(t, err) } - if _, err := kv.Compact(context.TODO(), 3); err != nil { - t.Fatal(err) - } + _, err := kv.Compact(context.TODO(), 3) + require.NoError(t, err) clus.Members[0].Restart(t) @@ -509,9 +501,8 @@ func TestWatchCompactRevision(t *testing.T) { w := clus.RandClient() - if _, err := kv.Compact(context.TODO(), 4); err != nil { - t.Fatal(err) - } + _, err := kv.Compact(context.TODO(), 4) + require.NoError(t, err) wch := w.Watch(context.Background(), "foo", clientv3.WithRev(2)) // get compacted error message @@ -566,9 +557,8 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { } kvc := clus.RandClient() - if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.TODO(), "foox", "bar") + require.NoError(t, err) select { case resp := <-rch: @@ -646,9 +636,7 @@ func TestWatchRequestProgress(t *testing.T) { } _, err := wc.Put(context.Background(), "/a", "1") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, rch := range watchChans { select { @@ -663,14 +651,10 @@ func TestWatchRequestProgress(t *testing.T) { // put a value not being watched to increment revision _, err = wc.Put(context.Background(), "x", "1") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = wc.RequestProgress(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // verify all watch channels receive a progress notify for _, rch := range watchChans { @@ -782,9 +766,8 @@ func TestWatchErrConnClosed(t *testing.T) { } }() - if err := cli.ActiveConnection().Close(); err != nil { - t.Fatal(err) - } + err := cli.ActiveConnection().Close() + require.NoError(t, err) clus.TakeClient(0) select { @@ -802,9 +785,8 @@ func TestWatchAfterClose(t *testing.T) { cli := clus.Client(0) clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err := cli.Close() + require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -833,9 +815,8 @@ func TestWatchWithRequireLeader(t *testing.T) { // ensure that it receives the update so watching after killing quorum // is guaranteed to have the key. liveClient := clus.Client(0) - if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err := liveClient.Put(context.TODO(), "foo", "bar") + require.NoError(t, err) clus.Members[1].Stop(t) clus.Members[2].Stop(t) @@ -883,13 +864,9 @@ func TestWatchWithRequireLeader(t *testing.T) { `type="stream"`, fmt.Sprintf(`client_api_version="%v"`, version.APIVersion), ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cv, err := strconv.ParseInt(cnt, 10, 32) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if cv < 2 { // >2 when retried t.Fatalf("expected at least 2, got %q", cnt) } @@ -908,12 +885,10 @@ func TestWatchWithFilter(t *testing.T) { wcNoPut := client.Watch(ctx, "a", clientv3.WithFilterPut()) wcNoDel := client.Watch(ctx, "a", clientv3.WithFilterDelete()) - if _, err := client.Put(ctx, "a", "abc"); err != nil { - t.Fatal(err) - } - if _, err := client.Delete(ctx, "a"); err != nil { - t.Fatal(err) - } + _, err := client.Put(ctx, "a", "abc") + require.NoError(t, err) + _, err = client.Delete(ctx, "a") + require.NoError(t, err) npResp := <-wcNoPut if len(npResp.Events) != 1 || npResp.Events[0].Type != clientv3.EventTypeDelete { @@ -1030,9 +1005,7 @@ func TestWatchCancelOnServer(t *testing.T) { time.Sleep(time.Second) minWatches, err := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) maxWatchV, minWatchV := 0, 0 n, serr := fmt.Sscanf(maxWatches+" "+minWatches, "%d %d", &maxWatchV, &minWatchV) @@ -1083,9 +1056,8 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.Cluster)) // issue concurrent watches on "abc" with cancel cli := clus.RandClient() - if _, err := cli.Put(context.TODO(), "abc", "def"); err != nil { - t.Fatal(err) - } + _, err := cli.Put(context.TODO(), "abc", "def") + require.NoError(t, err) ch := make(chan struct{}, n) tCtx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() @@ -1155,9 +1127,8 @@ func TestWatchCancelAndCloseClient(t *testing.T) { } }() cancel() - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err := cli.Close() + require.NoError(t, err) <-donec clus.TakeClient(0) } @@ -1179,9 +1150,8 @@ func TestWatchStressResumeClose(t *testing.T) { } clus.Members[0].Bridge().DropConnections() cancel() - if err := cli.Close(); err != nil { - t.Fatal(err) - } + err := cli.Close() + require.NoError(t, err) clus.TakeClient(0) } diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go index 4aac7e2c824..064a0561144 100644 --- a/tests/integration/cluster_test.go +++ b/tests/integration/cluster_test.go @@ -208,9 +208,8 @@ func TestIssue2681(t *testing.T) { c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true}) defer c.Terminate(t) - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberID())); err != nil { - t.Fatal(err) - } + err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberID())) + require.NoError(t, err) c.WaitMembersForLeader(t, c.Members) c.AddMember(t) @@ -234,9 +233,8 @@ func testIssue2746(t *testing.T, members int) { clusterMustProgress(t, c.Members) } - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberID())); err != nil { - t.Fatal(err) - } + err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberID())) + require.NoError(t, err) c.WaitMembersForLeader(t, c.Members) c.AddMember(t) @@ -312,9 +310,8 @@ func TestIssue3699(t *testing.T) { t.Logf("Restarting member '0'...") // bring back node a // node a will remain useless as long as d is the leader. - if err := c.Members[0].Restart(t); err != nil { - t.Fatal(err) - } + err := c.Members[0].Restart(t) + require.NoError(t, err) t.Logf("Restarted member '0'.") select { @@ -530,9 +527,7 @@ func TestConcurrentRemoveMember(t *testing.T) { defer c.Terminate(t) addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) removeID := addResp.Member.ID done := make(chan struct{}) go func() { @@ -540,9 +535,8 @@ func TestConcurrentRemoveMember(t *testing.T) { c.Members[0].Client.MemberRemove(context.Background(), removeID) close(done) }() - if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil { - t.Fatal(err) - } + _, err = c.Members[0].Client.MemberRemove(context.Background(), removeID) + require.NoError(t, err) <-done } @@ -552,9 +546,7 @@ func TestConcurrentMoveLeader(t *testing.T) { defer c.Terminate(t) addResp, err := c.Members[0].Client.MemberAddAsLearner(context.Background(), []string{"http://localhost:123"}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) removeID := addResp.Member.ID done := make(chan struct{}) go func() { @@ -562,8 +554,7 @@ func TestConcurrentMoveLeader(t *testing.T) { c.Members[0].Client.MoveLeader(context.Background(), removeID) close(done) }() - if _, err := c.Members[0].Client.MemberRemove(context.Background(), removeID); err != nil { - t.Fatal(err) - } + _, err = c.Members[0].Client.MemberRemove(context.Background(), removeID) + require.NoError(t, err) <-done } diff --git a/tests/integration/grpc_test.go b/tests/integration/grpc_test.go index 7061ed61e63..e3b762c31ef 100644 --- a/tests/integration/grpc_test.go +++ b/tests/integration/grpc_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" clientv3 "go.etcd.io/etcd/client/v3" @@ -105,9 +106,7 @@ func TestAuthority(t *testing.T) { putRequestMethod := "/etcdserverpb.KV/Put" for i := 0; i < 100; i++ { _, err := kv.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } assertAuthority(t, tc.expectAuthorityPattern, clus, putRequestMethod) @@ -121,9 +120,7 @@ func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integra if useTLS { cfg.ClientTLS = &integration.TestTLSInfo tlsConfig, err := integration.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return cfg, tlsConfig } return cfg, nil @@ -138,9 +135,7 @@ func setupClient(t *testing.T, endpointPattern string, clus *integration.Cluster DialOptions: []grpc.DialOption{grpc.WithBlock()}, TLS: tlsConfig, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return kv } diff --git a/tests/integration/hashkv_test.go b/tests/integration/hashkv_test.go index cbc83d0159e..26d5dfaf040 100644 --- a/tests/integration/hashkv_test.go +++ b/tests/integration/hashkv_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" @@ -36,9 +38,7 @@ func TestCompactionHash(t *testing.T) { defer clus.Terminate(t) cc, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) client := &http.Client{ Transport: &http.Transport{ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { diff --git a/tests/integration/member_test.go b/tests/integration/member_test.go index efd6598f684..b012370ab2c 100644 --- a/tests/integration/member_test.go +++ b/tests/integration/member_test.go @@ -58,9 +58,7 @@ func TestRestartMember(t *testing.T) { c.WaitMembersForLeader(t, membs) clusterMustProgress(t, membs) err := c.Members[i].Restart(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } c.WaitMembersForLeader(t, c.Members) clusterMustProgress(t, c.Members) diff --git a/tests/integration/metrics_test.go b/tests/integration/metrics_test.go index 047b57ef150..fad4ed08946 100644 --- a/tests/integration/metrics_test.go +++ b/tests/integration/metrics_test.go @@ -38,9 +38,7 @@ func TestMetricDbSizeBoot(t *testing.T) { defer clus.Terminate(t) v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if v == "0" { t.Fatalf("expected non-zero, got %q", v) @@ -74,24 +72,16 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { expected := numPuts * len(putreq.Value) beforeDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) bv, err := strconv.Atoi(beforeDefrag) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if bv < expected { t.Fatalf("expected db size greater than %d, got %d", expected, bv) } beforeDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) biu, err := strconv.Atoi(beforeDefragInUse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if biu < expected { t.Fatalf("expected db size in use is greater than %d, got %d", expected, biu) } @@ -142,25 +132,17 @@ func testMetricDbSizeDefrag(t *testing.T, name string) { mc.Defragment(context.TODO(), &pb.DefragmentRequest{}) afterDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) av, err := strconv.Atoi(afterDefrag) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if bv <= av { t.Fatalf("expected less than %d, got %d after defrag", bv, av) } afterDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) adiu, err := strconv.Atoi(afterDefragInUse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if adiu > av { t.Fatalf("db size in use (%d) is expected less than db size (%d) after defrag", adiu, av) } @@ -172,13 +154,9 @@ func TestMetricQuotaBackendBytes(t *testing.T) { defer clus.Terminate(t) qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) qv, err := strconv.ParseFloat(qs, 64) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if int64(qv) != storage.DefaultQuotaBytes { t.Fatalf("expected %d, got %f", storage.DefaultQuotaBytes, qv) } @@ -190,9 +168,7 @@ func TestMetricsHealth(t *testing.T) { defer clus.Terminate(t) tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) u := clus.Members[0].ClientURLs[0] u.Path = "/health" resp, err := tr.RoundTrip(&http.Request{ @@ -201,14 +177,10 @@ func TestMetricsHealth(t *testing.T) { URL: &u, }) resp.Body.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) hv, err := clus.Members[0].Metric("etcd_server_health_failures") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if hv != "0" { t.Fatalf("expected '0' from etcd_server_health_failures, got %q", hv) } diff --git a/tests/integration/revision_test.go b/tests/integration/revision_test.go index 74792546820..10866331cbf 100644 --- a/tests/integration/revision_test.go +++ b/tests/integration/revision_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc/status" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -103,9 +104,7 @@ func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration, wg.Wait() kv := clus.Client(0) resp, err := kv.Get(context.Background(), "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Logf("Revision %d", resp.Header.Revision) } @@ -116,9 +115,7 @@ func putWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) { if errors.Is(err, context.DeadlineExceeded) { return } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } + require.NoError(t, silenceConnectionErrors(err)) } } @@ -130,9 +127,7 @@ func getWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) { if errors.Is(err, context.DeadlineExceeded) { return } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } + require.NoError(t, silenceConnectionErrors(err)) if resp == nil { continue } diff --git a/tests/integration/tracing_test.go b/tests/integration/tracing_test.go index 744028322d3..92f96cd0bc8 100644 --- a/tests/integration/tracing_test.go +++ b/tests/integration/tracing_test.go @@ -40,9 +40,7 @@ func TestTracing(t *testing.T) { "Wal creation tests are depending on embedded etcd server so are integration-level tests.") // set up trace collector listener, err := net.Listen("tcp", "localhost:") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) traceFound := make(chan struct{}) defer close(traceFound) @@ -63,9 +61,7 @@ func TestTracing(t *testing.T) { // start an etcd instance with tracing enabled etcdSrv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer etcdSrv.Close() select { diff --git a/tests/integration/utl_wal_version_test.go b/tests/integration/utl_wal_version_test.go index 33e1b0aecd4..d3646735d65 100644 --- a/tests/integration/utl_wal_version_test.go +++ b/tests/integration/utl_wal_version_test.go @@ -21,6 +21,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.uber.org/zap" "go.etcd.io/etcd/client/pkg/v3/testutil" @@ -37,9 +38,7 @@ func TestEtcdVersionFromWAL(t *testing.T) { "Wal creation tests are depending on embedded etcd server so are integration-level tests.") cfg := integration.NewEmbedConfig(t, "default") srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case <-srv.Server.ReadyNotify(): case <-time.After(3 * time.Second): @@ -76,15 +75,11 @@ func TestEtcdVersionFromWAL(t *testing.T) { srv.Close() w, err := wal.Open(zap.NewNop(), cfg.Dir+"/member/wal", walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer w.Close() walVersion, err := wal.ReadWALVersion(w) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) assert.Equal(t, &semver.Version{Major: 3, Minor: 6}, walVersion.MinimalEtcdVersion()) } diff --git a/tests/integration/v3_alarm_test.go b/tests/integration/v3_alarm_test.go index 50a701e6809..b4cbb728e4b 100644 --- a/tests/integration/v3_alarm_test.go +++ b/tests/integration/v3_alarm_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -63,15 +64,11 @@ func TestV3StorageQuotaApply(t *testing.T) { // test big put bigbuf := make([]byte, quotasize) _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // quorum get should work regardless of whether alarm is raised _, err = kvc0.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // wait until alarm is raised for sure-- poll the alarms stopc := time.After(5 * time.Second) @@ -111,9 +108,7 @@ func TestV3StorageQuotaApply(t *testing.T) { }, }, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout) defer cancel() @@ -152,25 +147,22 @@ func TestV3AlarmDeactivate(t *testing.T) { Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } - if _, err := mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } + _, err := mt.Alarm(context.TODO(), alarmReq) + require.NoError(t, err) key := []byte("abc") smallbuf := make([]byte, 512) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) if err == nil && !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { t.Fatalf("put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) } alarmReq.Action = pb.AlarmRequest_DEACTIVATE - if _, err = mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } + _, err = mt.Alarm(context.TODO(), alarmReq) + require.NoError(t, err) - if _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + require.NoError(t, err) } func TestV3CorruptAlarm(t *testing.T) { @@ -209,15 +201,12 @@ func TestV3CorruptAlarm(t *testing.T) { time.Sleep(time.Second * 2) // Wait for cluster so Puts succeed in case member 0 was the leader. - if _, err := clus.Client(1).Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "xyz", "321"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "abc", "fed"); err != nil { - t.Fatal(err) - } + _, err := clus.Client(1).Get(context.TODO(), "k") + require.NoError(t, err) + _, err = clus.Client(1).Put(context.TODO(), "xyz", "321") + require.NoError(t, err) + _, err = clus.Client(1).Put(context.TODO(), "abc", "fed") + require.NoError(t, err) // Restart with corruption checking enabled. clus.Members[1].Stop(t) @@ -231,14 +220,10 @@ func TestV3CorruptAlarm(t *testing.T) { clus.Members[0].WaitStarted(t) resp0, err0 := clus.Client(0).Get(context.TODO(), "abc") - if err0 != nil { - t.Fatal(err0) - } + require.NoError(t, err0) clus.Members[1].WaitStarted(t) resp1, err1 := clus.Client(1).Get(context.TODO(), "abc") - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) if resp0.Kvs[0].ModRevision == resp1.Kvs[0].ModRevision { t.Fatalf("matching ModRevision values") @@ -290,9 +275,8 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) { } } - if err = clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())); err != nil { - t.Fatal(err) - } + err = clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())) + require.NoError(t, err) clus.WaitMembersForLeader(t, clus.Members) clus.AddMember(t) @@ -314,30 +298,22 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) { schema.MustUnsafePutLease(tx, &lpb) tx.Commit() - if err = be.Close(); err != nil { - t.Fatal(err) - } + err = be.Close() + require.NoError(t, err) - if err = clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } + err = clus.Members[2].Restart(t) + require.NoError(t, err) clus.Members[1].WaitOK(t) clus.Members[2].WaitOK(t) // Revoke lease should remove key except the member with corruption _, err = integration.ToGRPC(clus.Members[0].Client).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) resp0, err0 := clus.Members[1].Client.KV.Get(context.TODO(), "foo") - if err0 != nil { - t.Fatal(err0) - } + require.NoError(t, err0) resp1, err1 := clus.Members[2].Client.KV.Get(context.TODO(), "foo") - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) if resp0.Header.Revision == resp1.Header.Revision { t.Fatalf("matching Revision values") diff --git a/tests/integration/v3_auth_test.go b/tests/integration/v3_auth_test.go index ad21aa0f29b..393eb04a25a 100644 --- a/tests/integration/v3_auth_test.go +++ b/tests/integration/v3_auth_test.go @@ -101,9 +101,8 @@ func TestV3AuthTokenWithDisable(t *testing.T) { }() time.Sleep(10 * time.Millisecond) - if _, err := c.AuthDisable(context.TODO()); err != nil { - t.Fatal(err) - } + _, err := c.AuthDisable(context.TODO()) + require.NoError(t, err) time.Sleep(10 * time.Millisecond) cancel() @@ -168,14 +167,11 @@ func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterCon defer rootc.Close() leaseResp, err := rootc.Grant(context.TODO(), 2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID - if _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)); err != nil { - t.Fatal(err) - } + _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)) + require.NoError(t, err) // wait for lease expire time.Sleep(3 * time.Second) @@ -229,15 +225,11 @@ func TestV3AuthWithLeaseRevoke(t *testing.T) { defer rootc.Close() leaseResp, err := rootc.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID // permission of k3 isn't granted to user1 _, err = rootc.Put(context.TODO(), "k3", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) if cerr != nil { @@ -288,31 +280,21 @@ func TestV3AuthWithLeaseAttach(t *testing.T) { defer user2c.Close() leaseResp, err := user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID := leaseResp.ID // permission of k2 is also granted to user2 _, err = user1c.Put(context.TODO(), "k2", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = user2c.Revoke(context.TODO(), leaseID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseResp, err = user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseID = leaseResp.ID // permission of k1 isn't granted to user2 _, err = user1c.Put(context.TODO(), "k1", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = user2c.Revoke(context.TODO(), leaseID) if err == nil { @@ -353,9 +335,8 @@ func authSetupRoot(t *testing.T, auth pb.AuthClient) { }, } authSetupUsers(t, auth, root) - if _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}); err != nil { - t.Fatal(err) - } + _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}) + require.NoError(t, err) } func TestV3AuthNonAuthorizedRPCs(t *testing.T) { diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go index 350bfb354dd..b49097ca861 100644 --- a/tests/integration/v3_election_test.go +++ b/tests/integration/v3_election_test.go @@ -155,9 +155,8 @@ func TestElectionFailover(t *testing.T) { }() // invoke leader failover - if err := ss[0].Close(); err != nil { - t.Fatal(err) - } + err := ss[0].Close() + require.NoError(t, err) // check new leader e = concurrency.NewElection(ss[2], "test-election") @@ -192,13 +191,11 @@ func TestElectionSessionRecampaign(t *testing.T) { defer session.Orphan() e := concurrency.NewElection(session, "test-elect") - if err := e.Campaign(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } + err = e.Campaign(context.TODO(), "abc") + require.NoError(t, err) e2 := concurrency.NewElection(session, "test-elect") - if err := e2.Campaign(context.TODO(), "def"); err != nil { - t.Fatal(err) - } + err = e2.Campaign(context.TODO(), "def") + require.NoError(t, err) ctx, cancel := context.WithCancel(context.TODO()) defer cancel() @@ -217,22 +214,19 @@ func TestElectionOnPrefixOfExistingKey(t *testing.T) { defer clus.Terminate(t) cli := clus.RandClient() - if _, err := cli.Put(context.TODO(), "testa", "value"); err != nil { - t.Fatal(err) - } + _, err := cli.Put(context.TODO(), "testa", "value") + require.NoError(t, err) s, serr := concurrency.NewSession(cli) if serr != nil { t.Fatal(serr) } e := concurrency.NewElection(s, "test") ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - err := e.Campaign(ctx, "abc") + err = e.Campaign(ctx, "abc") cancel() - if err != nil { - // after 5 seconds, deadlock results in - // 'context deadline exceeded' here. - t.Fatal(err) - } + // after 5 seconds, deadlock results in + // 'context deadline exceeded' here. + require.NoError(t, err) } // TestElectionOnSessionRestart tests that a quick restart of leader (resulting @@ -245,9 +239,7 @@ func TestElectionOnSessionRestart(t *testing.T) { cli := clus.RandClient() session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) e := concurrency.NewElection(session, "test-elect") if cerr := e.Campaign(context.TODO(), "abc"); cerr != nil { @@ -293,9 +285,7 @@ func TestElectionObserveCompacted(t *testing.T) { cli := clus.Client(0) session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer session.Orphan() e := concurrency.NewElection(session, "test-elect") diff --git a/tests/integration/v3_failover_test.go b/tests/integration/v3_failover_test.go index d71dd267b85..49c029586c4 100644 --- a/tests/integration/v3_failover_test.go +++ b/tests/integration/v3_failover_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" @@ -56,9 +57,7 @@ func TestFailover(t *testing.T) { defer clus.Terminate(t) cc, err := integration2.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Create an etcd client before or after first server down t.Logf("Creating an etcd client [%s]", tc.name) cli, err := tc.testFunc(t, cc, clus) diff --git a/tests/integration/v3_grpc_inflight_test.go b/tests/integration/v3_grpc_inflight_test.go index 7968e614edc..3d8b446e59b 100644 --- a/tests/integration/v3_grpc_inflight_test.go +++ b/tests/integration/v3_grpc_inflight_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -38,9 +39,8 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { cli := clus.RandClient() kvc := integration.ToGRPC(cli).KV - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -69,9 +69,8 @@ func TestV3KVInflightRangeRequests(t *testing.T) { cli := clus.RandClient() kvc := integration.ToGRPC(cli).KV - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) diff --git a/tests/integration/v3_grpc_test.go b/tests/integration/v3_grpc_test.go index 61fef15d8cd..1085ab87aa2 100644 --- a/tests/integration/v3_grpc_test.go +++ b/tests/integration/v3_grpc_test.go @@ -135,11 +135,10 @@ func TestV3CompactCurrentRev(t *testing.T) { } } // get key to add to proxy cache, if any - if _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}); err != nil { - t.Fatal(err) - } + _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) + require.NoError(t, err) // compact on current revision - _, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4}) + _, err = kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4}) if err != nil { t.Fatalf("couldn't compact kv space (%v)", err) } @@ -166,15 +165,11 @@ func TestV3HashKV(t *testing.T) { for i := 0; i < 10; i++ { resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rev := resp.Header.Revision hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if rev != hresp.Header.Revision { t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) } @@ -183,9 +178,7 @@ func TestV3HashKV(t *testing.T) { prevCompactRev := hresp.CompactRevision for i := 0; i < 10; i++ { hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if rev != hresp.Header.Revision { t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) } @@ -406,16 +399,12 @@ func TestV3TxnRevision(t *testing.T) { kvc := integration.ToGRPC(clus.RandClient()).KV pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")} presp, err := kvc.Put(context.TODO(), pr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: &pb.RangeRequest{Key: []byte("abc")}}} txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}} tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // did not update revision if presp.Header.Revision != tresp.Header.Revision { @@ -425,9 +414,7 @@ func TestV3TxnRevision(t *testing.T) { txndr := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: &pb.DeleteRangeRequest{Key: []byte("def")}}} txn = &pb.TxnRequest{Success: []*pb.RequestOp{txndr}} tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // did not update revision if presp.Header.Revision != tresp.Header.Revision { @@ -437,9 +424,7 @@ func TestV3TxnRevision(t *testing.T) { txnput := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("123")}}} txn = &pb.TxnRequest{Success: []*pb.RequestOp{txnput}} tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // updated revision if tresp.Header.Revision != presp.Header.Revision+1 { @@ -485,9 +470,7 @@ func TestV3TxnCmpHeaderRev(t *testing.T) { txn.Compare = append(txn.Compare, cmp) tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) prev := <-revc err = <-errCh @@ -604,9 +587,7 @@ func TestV3TxnRangeCompare(t *testing.T) { txn := &pb.TxnRequest{} txn.Compare = append(txn.Compare, &tt.cmp) tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if tt.wSuccess != tresp.Succeeded { t.Errorf("#%d: expected %v, got %v", i, tt.wSuccess, tresp.Succeeded) } @@ -653,9 +634,7 @@ func TestV3TxnNestedPath(t *testing.T) { } tresp, err := kvc.Txn(context.TODO(), topTxn) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) curTxnResp := tresp for i := range txnPath { @@ -680,9 +659,7 @@ func TestV3PutIgnoreValue(t *testing.T) { // create lease lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -811,9 +788,7 @@ func TestV3PutIgnoreLease(t *testing.T) { // create lease lc := integration.ToGRPC(clus.RandClient()).Lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -1260,13 +1235,12 @@ func TestV3StorageQuotaAPI(t *testing.T) { // test small put that fits in quota smallbuf := make([]byte, 512) - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } + _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) + require.NoError(t, err) // test big put bigbuf := make([]byte, quotasize) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) + _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) if !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) } diff --git a/tests/integration/v3_kv_test.go b/tests/integration/v3_kv_test.go index 2cf8acf7ab9..d180171cbf0 100644 --- a/tests/integration/v3_kv_test.go +++ b/tests/integration/v3_kv_test.go @@ -18,6 +18,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -33,23 +35,15 @@ func TestKVWithEmptyValue(t *testing.T) { client := clus.RandClient() _, err := client.Put(context.Background(), "my-namespace/foobar", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = client.Put(context.Background(), "my-namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = client.Put(context.Background(), "namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Range over all keys. resp, err := client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, kv := range resp.Kvs { t.Log(string(kv.Key), "=", string(kv.Value)) } @@ -57,24 +51,18 @@ func TestKVWithEmptyValue(t *testing.T) { // Range over all keys in a namespace. client.KV = namespace.NewKV(client.KV, "my-namespace/") resp, err = client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, kv := range resp.Kvs { t.Log(string(kv.Key), "=", string(kv.Value)) } //Remove all keys without WithFromKey/WithPrefix func _, err = client.Delete(context.Background(), "") - if err == nil { - // fatal error duo to without WithFromKey/WithPrefix func called. - t.Fatal(err) - } + // fatal error duo to without WithFromKey/WithPrefix func called. + require.NoError(t, err) respDel, err := client.Delete(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - // fatal error duo to with WithFromKey/WithPrefix func called. - t.Fatal(err) - } + // fatal error duo to with WithFromKey/WithPrefix func called. + require.NoError(t, err) t.Logf("delete keys:%d", respDel.Deleted) } diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go index aa1cd8ddc9c..71640f825c6 100644 --- a/tests/integration/v3_leadership_test.go +++ b/tests/integration/v3_leadership_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -59,15 +60,11 @@ func testMoveLeader(t *testing.T, auto bool) { target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.MemberID()) if auto { err := clus.Members[oldLeadIdx].Server.TryTransferLeadershipOnShutdown() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } else { mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } // wait until leader transitions have happened diff --git a/tests/integration/v3_lease_test.go b/tests/integration/v3_lease_test.go index 2ec93a3ac84..a96cf49568b 100644 --- a/tests/integration/v3_lease_test.go +++ b/tests/integration/v3_lease_test.go @@ -51,9 +51,7 @@ func TestV3LeasePromote(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) ttl := time.Duration(lresp.TTL) * time.Second afterGrant := time.Now() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -203,9 +201,8 @@ func TestV3LeaseNegativeID(t *testing.T) { time.Sleep(100 * time.Millisecond) // restore lessor from db file clus.Members[2].Stop(t) - if err = clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } + err = clus.Members[2].Restart(t) + require.NoError(t, err) // revoke lease should remove key integration.WaitClientV3(t, clus.Members[2].Client) @@ -217,9 +214,7 @@ func TestV3LeaseNegativeID(t *testing.T) { for _, m := range clus.Members { getr := &pb.RangeRequest{Key: tc.k} getresp, err := integration.ToGRPC(m.Client).KV.Range(ctx, getr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if revision == 0 { revision = getresp.Header.Revision } @@ -384,9 +379,7 @@ func TestV3LeaseCheckpoint(t *testing.T) { defer cancel() c := integration.ToGRPC(clus.RandClient()) lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: int64(tc.ttl.Seconds())}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < tc.leaderChanges; i++ { // wait for a checkpoint to occur @@ -440,9 +433,7 @@ func TestV3LeaseExists(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -467,9 +458,7 @@ func TestV3LeaseLeases(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( ctx0, &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -479,9 +468,7 @@ func TestV3LeaseLeases(t *testing.T) { lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases( context.Background(), &pb.LeaseLeasesRequest{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := range lresp.Leases { if lresp.Leases[i].ID != ids[i] { t.Fatalf("#%d: lease ID expected %d, got %d", i, ids[i], lresp.Leases[i].ID) @@ -528,9 +515,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient if useClusterClient { clusterClient, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for i := 0; i < 300; i++ { go func() { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }() } @@ -628,9 +613,7 @@ func TestV3GetNonExistLease(t *testing.T) { t.Errorf("failed to create lease %v", err) } _, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leaseTTLr := &pb.LeaseTimeToLiveRequest{ ID: lresp.ID, @@ -663,49 +646,33 @@ func TestV3LeaseSwitch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) // attach key on lease1 then switch it to lease2 put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID} _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID} _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // revoke lease1 should not remove key _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rreq := &pb.RangeRequest{Key: []byte("foo")} rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("unexpect removal of key") } // revoke lease2 should remove key _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -726,9 +693,7 @@ func TestV3LeaseFailover(t *testing.T) { // create lease lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } @@ -743,9 +708,7 @@ func TestV3LeaseFailover(t *testing.T) { ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // send keep alive to old leader until the old leader starts // to drop lease request. @@ -790,9 +753,7 @@ func TestV3LeaseRequireLeader(t *testing.T) { ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) donec := make(chan struct{}) go func() { @@ -825,16 +786,12 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure lease still exists clus.Members[0].Stop(t) @@ -844,22 +801,16 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke should delete the key _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -876,22 +827,16 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // revoke should delete the key _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure revoked key doesn't exist clus.Members[0].Stop(t) @@ -901,16 +846,12 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV defer nc.Close() rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -928,22 +869,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { lsc := integration.ToGRPC(clus.Client(0)).Lease lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // overwrite lease with none _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // restart server and ensure lease still exists clus.Members[0].Stop(t) @@ -953,22 +888,16 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the detached lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("only detached lease removed, key should remain") } @@ -986,18 +915,14 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { var leaseIDs []int64 for i := 0; i < 2; i++ { lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lresp.Error != "" { t.Fatal(lresp.Error) } leaseIDs = append(leaseIDs, lresp.ID) _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } // restart server and ensure lease still exists @@ -1013,36 +938,26 @@ func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { // overwrite old client with newly dialed connection // otherwise, error with "grpc: RPC failed fast due to transport failure" nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) kvc = integration.ToGRPC(nc).KV lsc = integration.ToGRPC(nc).Lease defer nc.Close() // revoke the old lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[0]}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // key should still exist rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 1 { t.Fatalf("only detached lease removed, key should remain") } // revoke the latest lease _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[1]}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) rresp, err = kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } @@ -1147,20 +1062,15 @@ func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.Cluster, int64 defer clus.Terminate(t) leaseID, err := acquireLeaseAndKey(clus, "foo") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err = act(clus, leaseID); err != nil { - t.Fatal(err) - } + err = act(clus, leaseID) + require.NoError(t, err) // confirm no key rreq := &pb.RangeRequest{Key: []byte("foo")} rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rresp.Kvs) != 0 { t.Fatalf("lease removed but key remains") } diff --git a/tests/integration/v3_stm_test.go b/tests/integration/v3_stm_test.go index 9fe76bd102e..16bb18774a0 100644 --- a/tests/integration/v3_stm_test.go +++ b/tests/integration/v3_stm_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" @@ -230,9 +231,8 @@ func TestSTMApplyOnConcurrentDeletion(t *testing.T) { defer clus.Terminate(t) etcdc := clus.RandClient() - if _, err := etcdc.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } + _, err := etcdc.Put(context.TODO(), "foo", "bar") + require.NoError(t, err) donec, readyc := make(chan struct{}), make(chan struct{}) go func() { <-readyc diff --git a/tests/integration/v3_tls_test.go b/tests/integration/v3_tls_test.go index 4e706f6f2e1..551b756e784 100644 --- a/tests/integration/v3_tls_test.go +++ b/tests/integration/v3_tls_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc" clientv3 "go.etcd.io/etcd/client/v3" @@ -61,9 +62,7 @@ func testTLSCipherSuites(t *testing.T, valid bool) { defer clus.Terminate(t) cc, err := cliTLS.ClientConfig() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cli, cerr := integration.NewClient(t, clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCURL}, DialTimeout: time.Second, diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go index f7e2e4b4730..81dc5f4c9d3 100644 --- a/tests/integration/v3_watch_restore_test.go +++ b/tests/integration/v3_watch_restore_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/tests/v3/framework/config" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -65,9 +66,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx) - if errW != nil { - t.Fatal(errW) - } + require.NoError(t, errW) if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}}}); err != nil { t.Fatalf("wStream.Send error: %v", err) diff --git a/tests/integration/v3_watch_test.go b/tests/integration/v3_watch_test.go index e60805b3761..64d3e739822 100644 --- a/tests/integration/v3_watch_test.go +++ b/tests/integration/v3_watch_test.go @@ -571,19 +571,16 @@ func TestV3WatchEmptyKey(t *testing.T) { req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ CreateRequest: &pb.WatchCreateRequest{ Key: []byte("foo")}}} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } + err := ws.Send(req) + require.NoError(t, err) + _, err = ws.Recv() + require.NoError(t, err) // put a key with empty value kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), preq) + require.NoError(t, err) // check received PUT resp, rerr := ws.Recv() @@ -1200,12 +1197,10 @@ func TestV3WatchWithFilter(t *testing.T) { Key: []byte("foo"), Filters: []pb.WatchCreateRequest_FilterType{pb.WatchCreateRequest_NOPUT}, }}} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } + err := ws.Send(req) + require.NoError(t, err) + _, err = ws.Recv() + require.NoError(t, err) recv := make(chan *pb.WatchResponse, 1) go func() { @@ -1220,9 +1215,8 @@ func TestV3WatchWithFilter(t *testing.T) { // put a key with empty value kvc := integration.ToGRPC(clus.RandClient()).KV preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } + _, err = kvc.Put(context.TODO(), preq) + require.NoError(t, err) select { case <-recv: @@ -1231,9 +1225,8 @@ func TestV3WatchWithFilter(t *testing.T) { } dreq := &pb.DeleteRangeRequest{Key: []byte("foo")} - if _, err := kvc.DeleteRange(context.TODO(), dreq); err != nil { - t.Fatal(err) - } + _, err = kvc.DeleteRange(context.TODO(), dreq) + require.NoError(t, err) select { case resp := <-recv: @@ -1345,9 +1338,7 @@ func TestV3WatchCancellation(t *testing.T) { time.Sleep(3 * time.Second) minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var expected string if integration.ThroughProxy { @@ -1384,9 +1375,7 @@ func TestV3WatchCloseCancelRace(t *testing.T) { time.Sleep(3 * time.Second) minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var expected string if integration.ThroughProxy { diff --git a/tests/integration/v3election_grpc_test.go b/tests/integration/v3election_grpc_test.go index d0ca72b4255..cf5c7cb3427 100644 --- a/tests/integration/v3election_grpc_test.go +++ b/tests/integration/v3election_grpc_test.go @@ -35,13 +35,9 @@ func TestV3ElectionCampaign(t *testing.T) { defer clus.Terminate(t) lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) lc := integration.ToGRPC(clus.Client(0)).Election req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} @@ -129,13 +125,9 @@ func TestV3ElectionObserve(t *testing.T) { } lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) c1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("0")}) - if cerr1 != nil { - t.Fatal(cerr1) - } + require.NoError(t, cerr1) // overlap other leader so it waits on resign leader2c := make(chan struct{}) diff --git a/tests/integration/v3lock_grpc_test.go b/tests/integration/v3lock_grpc_test.go index f293bc1a556..9ba4b990538 100644 --- a/tests/integration/v3lock_grpc_test.go +++ b/tests/integration/v3lock_grpc_test.go @@ -19,6 +19,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" pb "go.etcd.io/etcd/api/v3/etcdserverpb" lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" "go.etcd.io/etcd/tests/v3/framework/integration" @@ -32,13 +33,9 @@ func TestV3LockLockWaiter(t *testing.T) { defer clus.Terminate(t) lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } + require.NoError(t, err1) lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } + require.NoError(t, err2) lc := integration.ToGRPC(clus.Client(0)).Lock l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID})