From ffb6902af7bb61b63beec9ad61450a9846e7cd96 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Mon, 8 Jul 2024 10:51:44 +0200 Subject: [PATCH 01/29] refactor: difficulty in terms of target hash --- api/poktroll/proof/params.pulsar.go | 166 ++++++++++-------- e2e/tests/parse_params_test.go | 2 +- e2e/tests/update_params_test.go | 2 +- pkg/client/interface.go | 2 +- pkg/crypto/protocol/difficulty.go | 29 +-- pkg/crypto/protocol/difficulty_test.go | 51 ------ pkg/crypto/protocol/hash.go | 10 ++ pkg/crypto/rand/integer.go | 10 ++ pkg/relayer/miner/miner.go | 19 +- pkg/relayer/miner/miner_test.go | 9 +- pkg/relayer/miner/options.go | 7 +- proto/poktroll/proof/params.proto | 5 +- telemetry/event_counters.go | 4 +- .../relay_mining_difficulty_test.go | 4 +- testutil/testrelayer/relays.go | 5 +- x/proof/keeper/msg_server_submit_proof.go | 38 ++-- .../keeper/msg_server_submit_proof_test.go | 34 ++-- x/proof/keeper/msg_server_update_param.go | 10 +- .../keeper/msg_server_update_param_test.go | 15 +- x/proof/keeper/msg_update_params_test.go | 3 +- x/proof/keeper/params_test.go | 18 +- x/proof/types/message_update_param.go | 16 +- x/proof/types/message_update_param_test.go | 6 +- x/proof/types/params.go | 56 +++--- x/proof/types/params.pb.go | 103 ++++++----- x/service/types/relay.go | 16 +- .../keeper/update_relay_mining_difficulty.go | 77 +++----- .../update_relay_mining_difficulty_test.go | 19 +- x/tokenomics/module/abci.go | 9 +- 29 files changed, 393 insertions(+), 352 deletions(-) delete mode 100644 pkg/crypto/protocol/difficulty_test.go create mode 100644 pkg/crypto/protocol/hash.go diff --git a/api/poktroll/proof/params.pulsar.go b/api/poktroll/proof/params.pulsar.go index 5c642a5fb..015aafc49 100644 --- a/api/poktroll/proof/params.pulsar.go +++ b/api/poktroll/proof/params.pulsar.go @@ -18,17 +18,17 @@ import ( ) var ( - md_Params protoreflect.MessageDescriptor - fd_Params_min_relay_difficulty_bits protoreflect.FieldDescriptor - fd_Params_proof_request_probability protoreflect.FieldDescriptor - fd_Params_proof_requirement_threshold protoreflect.FieldDescriptor - fd_Params_proof_missing_penalty protoreflect.FieldDescriptor + md_Params protoreflect.MessageDescriptor + fd_Params_relay_difficulty_target_hash protoreflect.FieldDescriptor + fd_Params_proof_request_probability protoreflect.FieldDescriptor + fd_Params_proof_requirement_threshold protoreflect.FieldDescriptor + fd_Params_proof_missing_penalty protoreflect.FieldDescriptor ) func init() { file_poktroll_proof_params_proto_init() md_Params = File_poktroll_proof_params_proto.Messages().ByName("Params") - fd_Params_min_relay_difficulty_bits = md_Params.Fields().ByName("min_relay_difficulty_bits") + fd_Params_relay_difficulty_target_hash = md_Params.Fields().ByName("relay_difficulty_target_hash") fd_Params_proof_request_probability = md_Params.Fields().ByName("proof_request_probability") fd_Params_proof_requirement_threshold = md_Params.Fields().ByName("proof_requirement_threshold") fd_Params_proof_missing_penalty = md_Params.Fields().ByName("proof_missing_penalty") @@ -99,9 +99,9 @@ func (x *fastReflection_Params) Interface() protoreflect.ProtoMessage { // While iterating, mutating operations may only be performed // on the current field descriptor. func (x *fastReflection_Params) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if x.MinRelayDifficultyBits != uint64(0) { - value := protoreflect.ValueOfUint64(x.MinRelayDifficultyBits) - if !f(fd_Params_min_relay_difficulty_bits, value) { + if len(x.RelayDifficultyTargetHash) != 0 { + value := protoreflect.ValueOfBytes(x.RelayDifficultyTargetHash) + if !f(fd_Params_relay_difficulty_target_hash, value) { return } } @@ -138,8 +138,8 @@ func (x *fastReflection_Params) Range(f func(protoreflect.FieldDescriptor, proto // a repeated field is populated if it is non-empty. func (x *fastReflection_Params) Has(fd protoreflect.FieldDescriptor) bool { switch fd.FullName() { - case "poktroll.proof.Params.min_relay_difficulty_bits": - return x.MinRelayDifficultyBits != uint64(0) + case "poktroll.proof.Params.relay_difficulty_target_hash": + return len(x.RelayDifficultyTargetHash) != 0 case "poktroll.proof.Params.proof_request_probability": return x.ProofRequestProbability != float32(0) || math.Signbit(float64(x.ProofRequestProbability)) case "poktroll.proof.Params.proof_requirement_threshold": @@ -162,8 +162,8 @@ func (x *fastReflection_Params) Has(fd protoreflect.FieldDescriptor) bool { // Clear is a mutating operation and unsafe for concurrent use. func (x *fastReflection_Params) Clear(fd protoreflect.FieldDescriptor) { switch fd.FullName() { - case "poktroll.proof.Params.min_relay_difficulty_bits": - x.MinRelayDifficultyBits = uint64(0) + case "poktroll.proof.Params.relay_difficulty_target_hash": + x.RelayDifficultyTargetHash = nil case "poktroll.proof.Params.proof_request_probability": x.ProofRequestProbability = float32(0) case "poktroll.proof.Params.proof_requirement_threshold": @@ -186,9 +186,9 @@ func (x *fastReflection_Params) Clear(fd protoreflect.FieldDescriptor) { // of the value; to obtain a mutable reference, use Mutable. func (x *fastReflection_Params) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { switch descriptor.FullName() { - case "poktroll.proof.Params.min_relay_difficulty_bits": - value := x.MinRelayDifficultyBits - return protoreflect.ValueOfUint64(value) + case "poktroll.proof.Params.relay_difficulty_target_hash": + value := x.RelayDifficultyTargetHash + return protoreflect.ValueOfBytes(value) case "poktroll.proof.Params.proof_request_probability": value := x.ProofRequestProbability return protoreflect.ValueOfFloat32(value) @@ -218,8 +218,8 @@ func (x *fastReflection_Params) Get(descriptor protoreflect.FieldDescriptor) pro // Set is a mutating operation and unsafe for concurrent use. func (x *fastReflection_Params) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { switch fd.FullName() { - case "poktroll.proof.Params.min_relay_difficulty_bits": - x.MinRelayDifficultyBits = value.Uint() + case "poktroll.proof.Params.relay_difficulty_target_hash": + x.RelayDifficultyTargetHash = value.Bytes() case "poktroll.proof.Params.proof_request_probability": x.ProofRequestProbability = float32(value.Float()) case "poktroll.proof.Params.proof_requirement_threshold": @@ -251,8 +251,8 @@ func (x *fastReflection_Params) Mutable(fd protoreflect.FieldDescriptor) protore x.ProofMissingPenalty = new(v1beta1.Coin) } return protoreflect.ValueOfMessage(x.ProofMissingPenalty.ProtoReflect()) - case "poktroll.proof.Params.min_relay_difficulty_bits": - panic(fmt.Errorf("field min_relay_difficulty_bits of message poktroll.proof.Params is not mutable")) + case "poktroll.proof.Params.relay_difficulty_target_hash": + panic(fmt.Errorf("field relay_difficulty_target_hash of message poktroll.proof.Params is not mutable")) case "poktroll.proof.Params.proof_request_probability": panic(fmt.Errorf("field proof_request_probability of message poktroll.proof.Params is not mutable")) case "poktroll.proof.Params.proof_requirement_threshold": @@ -270,8 +270,8 @@ func (x *fastReflection_Params) Mutable(fd protoreflect.FieldDescriptor) protore // For lists, maps, and messages, this returns a new, empty, mutable value. func (x *fastReflection_Params) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { switch fd.FullName() { - case "poktroll.proof.Params.min_relay_difficulty_bits": - return protoreflect.ValueOfUint64(uint64(0)) + case "poktroll.proof.Params.relay_difficulty_target_hash": + return protoreflect.ValueOfBytes(nil) case "poktroll.proof.Params.proof_request_probability": return protoreflect.ValueOfFloat32(float32(0)) case "poktroll.proof.Params.proof_requirement_threshold": @@ -348,8 +348,9 @@ func (x *fastReflection_Params) ProtoMethods() *protoiface.Methods { var n int var l int _ = l - if x.MinRelayDifficultyBits != 0 { - n += 1 + runtime.Sov(uint64(x.MinRelayDifficultyBits)) + l = len(x.RelayDifficultyTargetHash) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) } if x.ProofRequestProbability != 0 || math.Signbit(float64(x.ProofRequestProbability)) { n += 5 @@ -415,10 +416,12 @@ func (x *fastReflection_Params) ProtoMethods() *protoiface.Methods { i-- dAtA[i] = 0x15 } - if x.MinRelayDifficultyBits != 0 { - i = runtime.EncodeVarint(dAtA, i, uint64(x.MinRelayDifficultyBits)) + if len(x.RelayDifficultyTargetHash) > 0 { + i -= len(x.RelayDifficultyTargetHash) + copy(dAtA[i:], x.RelayDifficultyTargetHash) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.RelayDifficultyTargetHash))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } if input.Buf != nil { input.Buf = append(input.Buf, dAtA...) @@ -470,10 +473,10 @@ func (x *fastReflection_Params) ProtoMethods() *protoiface.Methods { } switch fieldNum { case 1: - if wireType != 0 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field MinRelayDifficultyBits", wireType) + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RelayDifficultyTargetHash", wireType) } - x.MinRelayDifficultyBits = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow @@ -483,11 +486,26 @@ func (x *fastReflection_Params) ProtoMethods() *protoiface.Methods { } b := dAtA[iNdEx] iNdEx++ - x.MinRelayDifficultyBits |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.RelayDifficultyTargetHash = append(x.RelayDifficultyTargetHash[:0], dAtA[iNdEx:postIndex]...) + if x.RelayDifficultyTargetHash == nil { + x.RelayDifficultyTargetHash = []byte{} + } + iNdEx = postIndex case 2: if wireType != 5 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ProofRequestProbability", wireType) @@ -608,9 +626,8 @@ type Params struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // min_relay_difficulty_bits is the minimum difficulty in bits for a relay to - // be included in a Merkle proof. - MinRelayDifficultyBits uint64 `protobuf:"varint,1,opt,name=min_relay_difficulty_bits,json=minRelayDifficultyBits,proto3" json:"min_relay_difficulty_bits,omitempty"` + // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. + RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash,omitempty"` // proof_request_probability is the probability of a session requiring a proof // if it's cost (i.e. compute unit consumption) is below the ProofRequirementThreshold. ProofRequestProbability float32 `protobuf:"fixed32,2,opt,name=proof_request_probability,json=proofRequestProbability,proto3" json:"proof_request_probability,omitempty"` @@ -647,11 +664,11 @@ func (*Params) Descriptor() ([]byte, []int) { return file_poktroll_proof_params_proto_rawDescGZIP(), []int{0} } -func (x *Params) GetMinRelayDifficultyBits() uint64 { +func (x *Params) GetRelayDifficultyTargetHash() []byte { if x != nil { - return x.MinRelayDifficultyBits + return x.RelayDifficultyTargetHash } - return 0 + return nil } func (x *Params) GetProofRequestProbability() float32 { @@ -685,44 +702,45 @@ var file_poktroll_proof_params_proto_rawDesc = []byte{ 0x1a, 0x14, 0x67, 0x6f, 0x67, 0x6f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x63, 0x6f, 0x69, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x12, 0x58, 0x0a, 0x19, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x64, - 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x5f, 0x62, 0x69, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x42, 0x1d, 0xea, 0xde, 0x1f, 0x19, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x5f, 0x62, - 0x69, 0x74, 0x73, 0x52, 0x16, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x44, 0x69, 0x66, - 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x42, 0x69, 0x74, 0x73, 0x12, 0x59, 0x0a, 0x19, 0x70, - 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, - 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x42, 0x1d, - 0xea, 0xde, 0x1f, 0x19, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x17, 0x70, - 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x62, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x5f, 0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x03, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x12, 0x61, 0x0a, 0x1c, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, + 0x63, 0x75, 0x6c, 0x74, 0x79, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x20, 0xea, 0xde, 0x1f, 0x1c, 0x72, 0x65, 0x6c, + 0x61, 0x79, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x5f, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x52, 0x19, 0x72, 0x65, 0x6c, 0x61, 0x79, + 0x44, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x59, 0x0a, 0x19, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x42, 0x1d, 0xea, 0xde, 0x1f, 0x19, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x17, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, + 0x5f, 0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x42, 0x1f, 0xea, 0xde, 0x1f, 0x1b, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x1f, 0xea, 0xde, 0x1f, - 0x1b, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x52, 0x19, 0x70, 0x72, - 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x68, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x6f, 0x66, - 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, - 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x69, - 0x6e, 0x42, 0x19, 0xea, 0xde, 0x1f, 0x15, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x52, 0x13, 0x70, 0x72, - 0x6f, 0x6f, 0x66, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x74, - 0x79, 0x3a, 0x20, 0xe8, 0xa0, 0x1f, 0x01, 0x8a, 0xe7, 0xb0, 0x2a, 0x17, 0x70, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x78, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2f, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x42, 0x9b, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0b, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, - 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0xa2, 0x02, 0x03, 0x50, 0x50, 0x58, - 0xaa, 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0xca, 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0xe2, 0x02, 0x1a, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, - 0x6f, 0x6f, 0x66, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x6f, - 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x52, 0x19, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x12, 0x68, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x42, 0x19, 0xea, 0xde, 0x1f, 0x15, + 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, + 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x4d, 0x69, 0x73, 0x73, + 0x69, 0x6e, 0x67, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0xe8, 0xa0, 0x1f, 0x01, + 0x8a, 0xe7, 0xb0, 0x2a, 0x17, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x78, 0x2f, + 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x9b, 0x01, 0x0a, + 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, + 0x6f, 0x6f, 0x66, 0x42, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x1f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x6f, 0x66, 0xa2, 0x02, 0x03, 0x50, 0x50, 0x58, 0xaa, 0x02, 0x0e, 0x50, 0x6f, 0x6b, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0xca, 0x02, 0x0e, 0x50, 0x6f, 0x6b, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0xe2, 0x02, 0x1a, 0x50, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x50, 0x6f, 0x6b, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/e2e/tests/parse_params_test.go b/e2e/tests/parse_params_test.go index 7e9a3a8c6..fd2b71902 100644 --- a/e2e/tests/parse_params_test.go +++ b/e2e/tests/parse_params_test.go @@ -132,7 +132,7 @@ func (s *suite) newProofMsgUpdateParams(params paramsMap) cosmostypes.Msg { for paramName, paramValue := range params { switch paramName { - case prooftypes.ParamMinRelayDifficultyBits: + case prooftypes.ParamRelayDifficultyTargetHash: msgUpdateParams.Params.MinRelayDifficultyBits = uint64(paramValue.value.(int64)) case prooftypes.ParamProofRequestProbability: msgUpdateParams.Params.ProofRequestProbability = paramValue.value.(float32) diff --git a/e2e/tests/update_params_test.go b/e2e/tests/update_params_test.go index 6e618997d..1fa575df1 100644 --- a/e2e/tests/update_params_test.go +++ b/e2e/tests/update_params_test.go @@ -370,7 +370,7 @@ func (s *suite) assertExpectedModuleParamsUpdated(moduleName string) { params := prooftypes.DefaultParams() paramsMap := s.expectedModuleParams[moduleName] - minRelayDifficultyBits, ok := paramsMap[prooftypes.ParamMinRelayDifficultyBits] + minRelayDifficultyBits, ok := paramsMap[prooftypes.ParamRelayDifficultyTargetHash] if ok { params.MinRelayDifficultyBits = uint64(minRelayDifficultyBits.value.(int64)) } diff --git a/pkg/client/interface.go b/pkg/client/interface.go index ff5375596..a5cec3ae8 100644 --- a/pkg/client/interface.go +++ b/pkg/client/interface.go @@ -335,7 +335,7 @@ type BlockQueryClient interface { // protobuf message. Since the generated go types don't include interface types, this // is necessary to prevent dependency cycles. type ProofParams interface { - GetMinRelayDifficultyBits() uint64 + GetRelayDifficultyTargetHash() []byte GetProofRequestProbability() float32 GetProofRequirementThreshold() uint64 GetProofMissingPenalty() *cosmostypes.Coin diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index 8901f0758..6cf5f8930 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -1,20 +1,25 @@ package protocol import ( - "encoding/binary" - "math/bits" + "crypto/sha256" + "encoding/hex" + "math/big" ) -// CountHashDifficultyBits returns the number of leading zero bits in the given byte slice. -// TODO_MAINNET: Consider generalizing difficulty to a target hash. See: +// Difficulty1Hash represents the "easiest" difficulty. +var ( + Difficulty1HashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + Difficulty1HashBz, _ = hex.DecodeString(Difficulty1HashHex) + Difficulty1HashInt = new(big.Int).SetBytes(Difficulty1HashBz) +) + +// GetDifficultyFromHash returns the "difficulty" of the given hash, with respect +// to the "highest" target hash, Difficulty1Hash. // - https://bitcoin.stackexchange.com/questions/107976/bitcoin-difficulty-why-leading-0s // - https://bitcoin.stackexchange.com/questions/121920/is-it-always-possible-to-find-a-number-whose-hash-starts-with-a-certain-number-o -// - https://github.com/pokt-network/poktroll/pull/656/files#r1666712528 -func CountHashDifficultyBits(bz [32]byte) int { - // Using BigEndian for contiguous bit/byte ordering such leading zeros - // accumulate across adjacent bytes. - // E.g.: []byte{0, 0b00111111, 0x00, 0x00} has 10 leading zero bits. If - // LittleEndian were applied instead, it would have 18 leading zeros because it would - // look like []byte{0, 0, 0b00111111, 0}. - return bits.LeadingZeros64(binary.BigEndian.Uint64(bz[:])) +func GetDifficultyFromHash(hashBz [sha256.Size]byte) int64 { + hashInt := new(big.Int).SetBytes(hashBz[:]) + + // difficulty is the ratio of the highest target hash to the given hash. + return new(big.Int).Div(Difficulty1HashInt, hashInt).Int64() } diff --git a/pkg/crypto/protocol/difficulty_test.go b/pkg/crypto/protocol/difficulty_test.go deleted file mode 100644 index 90a9a2367..000000000 --- a/pkg/crypto/protocol/difficulty_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package protocol_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/pokt-network/poktroll/pkg/crypto/protocol" -) - -func TestCountDifficultyBits(t *testing.T) { - tests := []struct { - bz []byte - difficulty int - }{ - { - bz: []byte{0b11111111}, - difficulty: 0, - }, - { - bz: []byte{0b01111111}, - difficulty: 1, - }, - { - bz: []byte{0, 255}, - difficulty: 8, - }, - { - bz: []byte{0, 0b01111111}, - difficulty: 9, - }, - { - bz: []byte{0, 0b00111111}, - difficulty: 10, - }, - { - bz: []byte{0, 0, 255}, - difficulty: 16, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("difficulty_%d_zero_bits", test.difficulty), func(t *testing.T) { - var bz [32]byte - copy(bz[:], test.bz) - actualDifficulty := protocol.CountHashDifficultyBits(bz) - require.Equal(t, test.difficulty, actualDifficulty) - }) - } -} diff --git a/pkg/crypto/protocol/hash.go b/pkg/crypto/protocol/hash.go new file mode 100644 index 000000000..bd3203437 --- /dev/null +++ b/pkg/crypto/protocol/hash.go @@ -0,0 +1,10 @@ +package protocol + +import "crypto/sha256" + +// GetHashFromBytes returns the hash of the relay (full, request or response) bytes. +// It is used as helper in the case that the relay is already marshaled and +// centralizes the hasher used. +func GetHashFromBytes(relayBz []byte) [sha256.Size]byte { + return sha256.Sum256(relayBz) +} diff --git a/pkg/crypto/rand/integer.go b/pkg/crypto/rand/integer.go index 683211ed0..9c72bc483 100644 --- a/pkg/crypto/rand/integer.go +++ b/pkg/crypto/rand/integer.go @@ -17,6 +17,16 @@ import ( func SeededInt63(seedParts ...[]byte) int64 { seedHashInputBz := bytes.Join(append([][]byte{}, seedParts...), nil) seedHash := crypto.Sha256(seedHashInputBz) + + // TODO_MAINNET: To support other language implementations of the protocol, the + // pseudo-random number generator used here should be language-agnostic (i.e. not + // golang specific). + // + // Additionally, there is a precision loss here when converting the hash to an int64. + // Since the math/rand.Source interface only supports int64 seeds, we are forced to + // truncate the hash to 64 bits. This is not ideal, as it reduces the entropy of the + // seed. We should consider using a different random number generator that supports + // byte array seeds. seed, _ := binary.Varint(seedHash) return rand.NewSource(seed).Int63() diff --git a/pkg/relayer/miner/miner.go b/pkg/relayer/miner/miner.go index c474d9c15..a21aee91e 100644 --- a/pkg/relayer/miner/miner.go +++ b/pkg/relayer/miner/miner.go @@ -1,6 +1,7 @@ package miner import ( + "bytes" "context" "cosmossdk.io/depinject" @@ -25,9 +26,11 @@ type miner struct { // proofQueryClient is used to query for the minimum relay difficulty. proofQueryClient client.ProofQueryClient - // relayDifficultyBits is the minimum difficulty that a relay must have to be - // volume / reward applicable. - relayDifficultyBits uint64 + // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. + // + // TODO_MAINNET(#543): This is populated by querying the corresponding on-chain parameter during construction. + // If this parameter is updated on-chain the relayminer will need to be restarted to query the new value. + relayDifficultyTargetHash []byte } // NewMiner creates a new miner from the given dependencies and options. It @@ -37,7 +40,7 @@ type miner struct { // - ProofQueryClient // // Available options: -// - WithDifficulty +// - WithRelayDifficultyTargetHash func NewMiner( deps depinject.Config, opts ...relayer.MinerOption, @@ -91,8 +94,8 @@ func (mnr *miner) setDefaults() error { return err } - if mnr.relayDifficultyBits == 0 { - mnr.relayDifficultyBits = params.GetMinRelayDifficultyBits() + if len(mnr.relayDifficultyTargetHash) == 0 { + mnr.relayDifficultyTargetHash = params.GetRelayDifficultyTargetHash() } return nil } @@ -112,10 +115,10 @@ func (mnr *miner) mapMineRelay( if err != nil { return either.Error[*relayer.MinedRelay](err), false } - relayHash := servicetypes.GetHashFromBytes(relayBz) + relayHash := protocol.GetHashFromBytes(relayBz) // The relay IS NOT volume / reward applicable - if uint64(protocol.CountHashDifficultyBits(relayHash)) < mnr.relayDifficultyBits { + if bytes.Compare(relayHash[:], mnr.relayDifficultyTargetHash) == 1 { return either.Success[*relayer.MinedRelay](nil), true } diff --git a/pkg/relayer/miner/miner_test.go b/pkg/relayer/miner/miner_test.go index 34fb99d43..8515d26c8 100644 --- a/pkg/relayer/miner/miner_test.go +++ b/pkg/relayer/miner/miner_test.go @@ -15,6 +15,7 @@ import ( "cosmossdk.io/depinject" "github.com/stretchr/testify/require" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/observable/channel" "github.com/pokt-network/poktroll/pkg/relayer" "github.com/pokt-network/poktroll/pkg/relayer/miner" @@ -22,11 +23,11 @@ import ( servicetypes "github.com/pokt-network/poktroll/x/service/types" ) -const testDifficulty = uint64(16) +var testTargetHash, _ = hex.DecodeString("0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") // TestMiner_MinedRelays constructs an observable of mined relays, through which // it pipes pre-mined relay fixtures. It asserts that the observable only emits -// mined relays with difficulty equal to or greater than testDifficulty. +// mined relays with difficulty equal to or greater than testTargetHash. // // To regenerate all fixtures, use `make go_testgen_fixtures`; to regenerate only this // test's fixtures run `go generate ./pkg/relayer/miner/miner_test.go`. @@ -42,7 +43,7 @@ func TestMiner_MinedRelays(t *testing.T) { proofQueryClientMock := testqueryclients.NewTestProofQueryClient(t) deps := depinject.Supply(proofQueryClientMock) - mnr, err := miner.NewMiner(deps, miner.WithDifficulty(testDifficulty)) + mnr, err := miner.NewMiner(deps, miner.WithRelayDifficultyTargetHash(testTargetHash)) require.NoError(t, err) minedRelays := mnr.MinedRelays(ctx, mockRelaysObs) @@ -134,7 +135,7 @@ func unmarshalHexMinedRelay( require.NoError(t, err) // TODO_TECHDEBT(@red-0ne, #446): Centralize the configuration for the SMT spec. - relayHashArr := servicetypes.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ diff --git a/pkg/relayer/miner/options.go b/pkg/relayer/miner/options.go index da87dc27f..ebee97c40 100644 --- a/pkg/relayer/miner/options.go +++ b/pkg/relayer/miner/options.go @@ -2,10 +2,9 @@ package miner import "github.com/pokt-network/poktroll/pkg/relayer" -// WithDifficulty sets the difficulty of the miner, where difficultyBytes is the -// minimum number of leading zero bytes. -func WithDifficulty(difficultyBits uint64) relayer.MinerOption { +// WithRelayDifficultyTargetHash sets the relayDifficultyTargetHash of the miner. +func WithRelayDifficultyTargetHash(targetHash []byte) relayer.MinerOption { return func(mnr relayer.Miner) { - mnr.(*miner).relayDifficultyBits = difficultyBits + mnr.(*miner).relayDifficultyTargetHash = targetHash } } diff --git a/proto/poktroll/proof/params.proto b/proto/poktroll/proof/params.proto index 0183990c6..ab1d6ac97 100644 --- a/proto/poktroll/proof/params.proto +++ b/proto/poktroll/proof/params.proto @@ -12,9 +12,8 @@ message Params { option (amino.name) = "poktroll/x/proof/Params"; option (gogoproto.equal) = true; - // min_relay_difficulty_bits is the minimum difficulty in bits for a relay to - // be included in a Merkle proof. - uint64 min_relay_difficulty_bits = 1 [(gogoproto.jsontag) = "min_relay_difficulty_bits"]; + // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. + bytes relay_difficulty_target_hash = 1 [(gogoproto.jsontag) = "relay_difficulty_target_hash"]; // proof_request_probability is the probability of a session requiring a proof // if it's cost (i.e. compute unit consumption) is below the ProofRequirementThreshold. diff --git a/telemetry/event_counters.go b/telemetry/event_counters.go index 9b95af5b5..ff23d4921 100644 --- a/telemetry/event_counters.go +++ b/telemetry/event_counters.go @@ -150,7 +150,7 @@ func ClaimCounter( // RelayMiningDifficultyGauge sets a gauge which tracks the relay mining difficulty, // which is represented by number of leading zero bits. // The serviceId is used as a label to be able to track the difficulty for each service. -func RelayMiningDifficultyGauge(numbLeadingZeroBits int, serviceId string) { +func RelayMiningDifficultyGauge(difficulty int64, serviceId string) { labels := []metrics.Label{ {Name: "type", Value: "relay_mining_difficulty"}, {Name: "service_id", Value: serviceId}, @@ -158,7 +158,7 @@ func RelayMiningDifficultyGauge(numbLeadingZeroBits int, serviceId string) { telemetry.SetGaugeWithLabels( []string{eventTypeMetricKeyGauge}, - float32(numbLeadingZeroBits), + float32(difficulty), labels, ) } diff --git a/tests/integration/tokenomics/relay_mining_difficulty_test.go b/tests/integration/tokenomics/relay_mining_difficulty_test.go index 1b543c00a..bf28e6c86 100644 --- a/tests/integration/tokenomics/relay_mining_difficulty_test.go +++ b/tests/integration/tokenomics/relay_mining_difficulty_test.go @@ -120,8 +120,8 @@ func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) relayMiningEvent := relayMiningEvents[0] require.Equal(t, "svc1", relayMiningEvent.ServiceId) // The default difficulty) - require.Equal(t, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", relayMiningEvent.PrevTargetHashHexEncoded) - require.Equal(t, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", relayMiningEvent.NewTargetHashHexEncoded) + require.Equal(t, prooftypes.DefaultRelayDifficultyTargetHashHex, relayMiningEvent.PrevTargetHashHexEncoded) + require.Equal(t, prooftypes.DefaultRelayDifficultyTargetHashHex, relayMiningEvent.NewTargetHashHexEncoded) // The previous EMA is the same as the current one if the service is new require.Equal(t, uint64(1), relayMiningEvent.PrevNumRelaysEma) require.Equal(t, uint64(1), relayMiningEvent.NewNumRelaysEma) diff --git a/testutil/testrelayer/relays.go b/testutil/testrelayer/relays.go index ff2dfb54f..8cc870b42 100644 --- a/testutil/testrelayer/relays.go +++ b/testutil/testrelayer/relays.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/pokt-network/poktroll/pkg/crypto" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/relayer" testutilkeyring "github.com/pokt-network/poktroll/testutil/testkeyring" servicetypes "github.com/pokt-network/poktroll/x/service/types" @@ -56,7 +57,7 @@ func NewUnsignedMinedRelay( relayBz, err := relay.Marshal() require.NoError(t, err) - relayHashArr := servicetypes.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ @@ -110,7 +111,7 @@ func NewSignedMinedRelay( relayBz, err := relay.Marshal() require.NoError(t, err) - relayHashArr := servicetypes.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index e462022b0..50c0a785d 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -7,6 +7,7 @@ package keeper import ( "bytes" "context" + "crypto/sha256" "fmt" cosmoscryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" @@ -207,7 +208,7 @@ func (k msgServer) SubmitProof( params := k.GetParams(ctx) // Verify the relay difficulty is above the minimum required to earn rewards. - if err = validateMiningDifficulty(relayBz, params.MinRelayDifficultyBits); err != nil { + if err = validateRelayDifficulty(relayBz, params.RelayDifficultyTargetHash); err != nil { return nil, status.Error(codes.FailedPrecondition, err.Error()) } logger.Debug("successfully validated relay mining difficulty") @@ -224,11 +225,6 @@ func (k msgServer) SubmitProof( } logger.Debug("successfully validated proof path") - // Verify the relay's difficulty. - if err = validateMiningDifficulty(relayBz, params.MinRelayDifficultyBits); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - // Retrieve the corresponding claim for the proof submitted so it can be // used in the proof validation below. claim, err = k.queryAndValidateClaimForProof(ctx, msg) @@ -447,21 +443,35 @@ func verifyClosestProof( return nil } -// validateMiningDifficulty ensures that the relay's mining difficulty meets the +// validateRelayDifficulty ensures that the relay's mining difficulty meets the // required minimum threshold. // TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared // function that can be used by both the proof and the miner packages. -func validateMiningDifficulty(relayBz []byte, minRelayDifficultyBits uint64) error { - relayHash := servicetypes.GetHashFromBytes(relayBz) - relayDifficultyBits := protocol.CountHashDifficultyBits(relayHash) +func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { + relayHash := protocol.GetHashFromBytes(relayBz) + + if len(targetHash) != sha256.Size { + return types.ErrProofInvalidRelay.Wrapf( + "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", + targetHash, + 32, + len(targetHash), + ) + } + + var targetHashArr [sha256.Size]byte + copy(targetHashArr[:], targetHash) // TODO_MAINNET: Devise a test that tries to attack the network and ensure that there // is sufficient telemetry. - if uint64(relayDifficultyBits) < minRelayDifficultyBits { + // NB: If relayHash > targetHash, then the difficulty is less than the target difficulty. + if bytes.Compare(relayHash[:], targetHash[:]) == 1 { + relayDifficulty := protocol.GetDifficultyFromHash(relayHash) + targetDifficulty := protocol.GetDifficultyFromHash(targetHashArr) return types.ErrProofInvalidRelay.Wrapf( - "relay difficulty %d is less than the minimum difficulty %d", - relayDifficultyBits, - minRelayDifficultyBits, + "relay difficulty %d is less than the target difficulty %d", + relayDifficulty, + targetDifficulty, ) } diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index c66c27d11..23cf008b5 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -2,6 +2,8 @@ package keeper_test import ( "context" + "crypto/sha256" + "encoding/hex" "os" "testing" @@ -44,11 +46,11 @@ var ( expectedMerkleProofPath []byte // testProofParams sets: - // - the minimum relay difficulty bits to zero so that these tests don't need to mine for valid relays. + // - the relay difficulty target hash to the easiest difficulty so that these tests don't need to mine for valid relays. // - the proof request probability to 1 so that all test sessions require a proof. testProofParams = prooftypes.Params{ - MinRelayDifficultyBits: 0, - ProofRequestProbability: 1, + RelayDifficultyTargetHash: protocol.Difficulty1HashBz, + ProofRequestProbability: 1, } ) @@ -528,7 +530,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { ) // Compute the difficulty in bits of the closest relay from the valid session tree. - validClosestRelayDifficultyBits := getClosestRelayDifficultyBits(t, validSessionTree, expectedMerkleProofPath) + validClosestRelayDifficultyBits := getClosestRelayDifficulty(t, validSessionTree, expectedMerkleProofPath) // Copy `emptyBlockHash` to `wrongClosestProofPath` to with a missing byte // so the closest proof is invalid (i.e. unmarshalable). @@ -565,6 +567,11 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { copy(wrongClosestProofPath, expectedMerkleProofPath) copy(wrongClosestProofPath, "wrong closest proof path") + lowTargetHash, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + var lowTargetHashArr [sha256.Size]byte + copy(lowTargetHashArr[:], lowTargetHash) + highExpectedTargetDifficulty := protocol.GetDifficultyFromHash(lowTargetHashArr) + tests := []struct { desc string newProofMsg func(t *testing.T) *prooftypes.MsgSubmitProof @@ -1019,7 +1026,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // Set the minimum relay difficulty to a non-zero value such that the relays // constructed by the test helpers have a negligable chance of being valid. err = keepers.Keeper.SetParams(ctx, prooftypes.Params{ - MinRelayDifficultyBits: 10, + RelayDifficultyTargetHash: lowTargetHash, }) require.NoError(t, err) @@ -1041,9 +1048,9 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { expectedErr: status.Error( codes.FailedPrecondition, prooftypes.ErrProofInvalidRelay.Wrapf( - "relay difficulty %d is less than the minimum difficulty %d", + "relay difficulty %d is less than the target difficulty %d", validClosestRelayDifficultyBits, - 10, + highExpectedTargetDifficulty, ).Error(), ), }, @@ -1400,14 +1407,14 @@ func createClaimAndStoreBlockHash( return claimRes.GetClaim() } -// getClosestRelayDifficultyBits returns the number of leading 0s (i.e. relay -// mining difficulty bits) in the relayHash stored in the sessionTree that is -// is closest to the merkle proof path provided. -func getClosestRelayDifficultyBits( +// getClosestRelayDifficulty returns the mining difficulty number which corresponds +// to the relayHash stored in the sessionTree that is closest to the merkle proof +// path provided. +func getClosestRelayDifficulty( t *testing.T, sessionTree relayer.SessionTree, closestMerkleProofPath []byte, -) uint64 { +) int64 { // Retrieve a merkle proof that is closest to the path provided closestMerkleProof, err := sessionTree.ProveClosest(closestMerkleProofPath) require.NoError(t, err) @@ -1422,6 +1429,5 @@ func getClosestRelayDifficultyBits( relayHash, err := relay.GetHash() require.NoError(t, err) - // Count the number of leading 0s in the relay hash to determine its difficulty. - return uint64(protocol.CountHashDifficultyBits(relayHash)) + return protocol.GetDifficultyFromHash(relayHash) } diff --git a/x/proof/keeper/msg_server_update_param.go b/x/proof/keeper/msg_server_update_param.go index 7a1faa50b..6b941068d 100644 --- a/x/proof/keeper/msg_server_update_param.go +++ b/x/proof/keeper/msg_server_update_param.go @@ -23,18 +23,18 @@ func (k msgServer) UpdateParam( params := k.GetParams(ctx) switch msg.Name { - case types.ParamMinRelayDifficultyBits: - value, ok := msg.AsType.(*types.MsgUpdateParam_AsInt64) + case types.ParamRelayDifficultyTargetHash: + value, ok := msg.AsType.(*types.MsgUpdateParam_AsBytes) if !ok { return nil, types.ErrProofParamInvalid.Wrapf("unsupported value type for %s param: %T", msg.Name, msg.AsType) } - minRelayDifficultyBits := uint64(value.AsInt64) + relayDifficultyTargetHash := value.AsBytes - if err := types.ValidateMinRelayDifficultyBits(minRelayDifficultyBits); err != nil { + if err := types.ValidateRelayDifficultyTargetHash(relayDifficultyTargetHash); err != nil { return nil, err } - params.MinRelayDifficultyBits = minRelayDifficultyBits + params.RelayDifficultyTargetHash = relayDifficultyTargetHash case types.ParamProofRequestProbability: value, ok := msg.AsType.(*types.MsgUpdateParam_AsFloat) if !ok { diff --git a/x/proof/keeper/msg_server_update_param_test.go b/x/proof/keeper/msg_server_update_param_test.go index 743e2ea8d..243de80ea 100644 --- a/x/proof/keeper/msg_server_update_param_test.go +++ b/x/proof/keeper/msg_server_update_param_test.go @@ -1,6 +1,7 @@ package keeper_test import ( + "encoding/hex" "testing" "cosmossdk.io/math" @@ -15,7 +16,7 @@ import ( ) func TestMsgUpdateParam_UpdateMinRelayDifficultyBitsOnly(t *testing.T) { - var expectedMinRelayDifficultyBits uint64 = 8 + expectedRelayDifficultyTargetHash, _ := hex.DecodeString("0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff") // Set the parameters to their default values k, msgSrv, ctx := setupMsgServer(t) @@ -23,22 +24,22 @@ func TestMsgUpdateParam_UpdateMinRelayDifficultyBitsOnly(t *testing.T) { require.NoError(t, k.SetParams(ctx, defaultParams)) // Ensure the default values are different from the new values we want to set - require.NotEqual(t, expectedMinRelayDifficultyBits, defaultParams.MinRelayDifficultyBits) + require.NotEqual(t, expectedRelayDifficultyTargetHash, defaultParams.RelayDifficultyTargetHash) // Update the min relay difficulty bits updateParamMsg := &prooftypes.MsgUpdateParam{ Authority: authtypes.NewModuleAddress(govtypes.ModuleName).String(), - Name: prooftypes.ParamMinRelayDifficultyBits, - AsType: &prooftypes.MsgUpdateParam_AsInt64{AsInt64: int64(expectedMinRelayDifficultyBits)}, + Name: prooftypes.ParamRelayDifficultyTargetHash, + AsType: &prooftypes.MsgUpdateParam_AsBytes{AsBytes: expectedRelayDifficultyTargetHash}, } res, err := msgSrv.UpdateParam(ctx, updateParamMsg) require.NoError(t, err) - require.NotEqual(t, defaultParams.MinRelayDifficultyBits, res.Params.MinRelayDifficultyBits) - require.Equal(t, expectedMinRelayDifficultyBits, res.Params.MinRelayDifficultyBits) + require.NotEqual(t, defaultParams.RelayDifficultyTargetHash, res.Params.RelayDifficultyTargetHash) + require.Equal(t, expectedRelayDifficultyTargetHash, res.Params.RelayDifficultyTargetHash) // Ensure the other parameters are unchanged - testkeeper.AssertDefaultParamsEqualExceptFields(t, &defaultParams, res.Params, "MinRelayDifficultyBits") + testkeeper.AssertDefaultParamsEqualExceptFields(t, &defaultParams, res.Params, "RelayDifficultyTargetHash") } func TestMsgUpdateParam_UpdateProofRequestProbabilityOnly(t *testing.T) { diff --git a/x/proof/keeper/msg_update_params_test.go b/x/proof/keeper/msg_update_params_test.go index 5e2a528a6..f5ce27e29 100644 --- a/x/proof/keeper/msg_update_params_test.go +++ b/x/proof/keeper/msg_update_params_test.go @@ -42,7 +42,8 @@ func TestMsgUpdateParams(t *testing.T) { params: &types.MsgUpdateParams{ Authority: k.GetAuthority(), Params: types.Params{ - ProofMissingPenalty: &types.DefaultProofMissingPenalty, + ProofMissingPenalty: &types.DefaultProofMissingPenalty, + RelayDifficultyTargetHash: types.DefaultRelayDifficultyTargetHash, }, }, shouldError: false, diff --git a/x/proof/keeper/params_test.go b/x/proof/keeper/params_test.go index 1d7c92945..6c281b639 100644 --- a/x/proof/keeper/params_test.go +++ b/x/proof/keeper/params_test.go @@ -20,24 +20,24 @@ func TestGetParams(t *testing.T) { } func TestParams_ValidateMinRelayDifficulty(t *testing.T) { tests := []struct { - desc string - minRelayDifficultyBits any - expectedErr error + desc string + relayDifficultyTargetHash any + expectedErr error }{ { - desc: "invalid type", - minRelayDifficultyBits: int64(-1), - expectedErr: prooftypes.ErrProofParamInvalid.Wrapf("invalid parameter type: int64"), + desc: "invalid type", + relayDifficultyTargetHash: int64(-1), + expectedErr: prooftypes.ErrProofParamInvalid.Wrapf("invalid parameter type: int64"), }, { - desc: "valid MinRelayDifficultyBits", - minRelayDifficultyBits: uint64(4), + desc: "valid RelayDifficultyTargetHash", + relayDifficultyTargetHash: prooftypes.DefaultRelayDifficultyTargetHash, }, } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - err := prooftypes.ValidateMinRelayDifficultyBits(tt.minRelayDifficultyBits) + err := prooftypes.ValidateRelayDifficultyTargetHash(tt.relayDifficultyTargetHash) if tt.expectedErr != nil { require.Error(t, err) require.Contains(t, err.Error(), tt.expectedErr.Error()) diff --git a/x/proof/types/message_update_param.go b/x/proof/types/message_update_param.go index c2271453b..b9a0b4621 100644 --- a/x/proof/types/message_update_param.go +++ b/x/proof/types/message_update_param.go @@ -47,8 +47,8 @@ func (msg *MsgUpdateParam) ValidateBasic() error { // Parameter name must be supported by this module. switch msg.Name { - case ParamMinRelayDifficultyBits: - return msg.paramTypeIsInt64() + case ParamRelayDifficultyTargetHash: + return msg.paramTypeIsBytes() case ParamProofRequestProbability: return msg.paramTypeIsFloat() case ParamProofRequirementThreshold: @@ -60,6 +60,18 @@ func (msg *MsgUpdateParam) ValidateBasic() error { } } +// paramTypeIsBytes checks if the parameter type is a byte slice, returning an error if not. +func (msg *MsgUpdateParam) paramTypeIsBytes() error { + if _, ok := msg.AsType.(*MsgUpdateParam_AsBytes); !ok { + return ErrProofParamInvalid.Wrapf( + "invalid type for param %q expected %T, got %T", + msg.Name, &MsgUpdateParam_AsBytes{}, + msg.AsType, + ) + } + return nil +} + // paramTypeIsInt64 checks if the parameter type is int64, returning an error if not. func (msg *MsgUpdateParam) paramTypeIsInt64() error { if _, ok := msg.AsType.(*MsgUpdateParam_AsInt64); !ok { diff --git a/x/proof/types/message_update_param_test.go b/x/proof/types/message_update_param_test.go index 1fd4793a8..42a2242df 100644 --- a/x/proof/types/message_update_param_test.go +++ b/x/proof/types/message_update_param_test.go @@ -37,7 +37,7 @@ func TestMsgUpdateParam_ValidateBasic(t *testing.T) { name: "invalid: incorrect param type", msg: MsgUpdateParam{ Authority: sample.AccAddress(), - Name: ParamMinRelayDifficultyBits, + Name: ParamRelayDifficultyTargetHash, AsType: &MsgUpdateParam_AsString{AsString: "invalid"}, }, expectedErr: ErrProofParamInvalid, @@ -45,8 +45,8 @@ func TestMsgUpdateParam_ValidateBasic(t *testing.T) { name: "valid: correct authority, param name, and type", msg: MsgUpdateParam{ Authority: sample.AccAddress(), - Name: ParamMinRelayDifficultyBits, - AsType: &MsgUpdateParam_AsInt64{AsInt64: 1}, + Name: ParamRelayDifficultyTargetHash, + AsType: &MsgUpdateParam_AsBytes{AsBytes: DefaultRelayDifficultyTargetHash}, }, expectedErr: nil, diff --git a/x/proof/types/params.go b/x/proof/types/params.go index baed0ad3b..dfabfec13 100644 --- a/x/proof/types/params.go +++ b/x/proof/types/params.go @@ -1,6 +1,8 @@ package types import ( + "encoding/hex" + "cosmossdk.io/math" cosmostypes "github.com/cosmos/cosmos-sdk/types" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" @@ -13,18 +15,19 @@ var ( _ client.ProofParams = (*Params)(nil) _ paramtypes.ParamSet = (*Params)(nil) - KeyMinRelayDifficultyBits = []byte("MinRelayDifficultyBits") - ParamMinRelayDifficultyBits = "min_relay_difficulty_bits" - DefaultMinRelayDifficultyBits uint64 = 0 // TODO_MAINNET(#142, #401): Determine the default value. - KeyProofRequestProbability = []byte("ProofRequestProbability") - ParamProofRequestProbability = "proof_request_probability" - DefaultProofRequestProbability float32 = 0.25 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md - KeyProofRequirementThreshold = []byte("ProofRequirementThreshold") - ParamProofRequirementThreshold = "proof_requirement_threshold" - DefaultProofRequirementThreshold uint64 = 20 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md - KeyProofMissingPenalty = []byte("ProofMissingPenalty") - ParamProofMissingPenalty = "proof_missing_penalty" - DefaultProofMissingPenalty = cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(320)) // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + KeyMinRelayDifficultyBits = []byte("MinRelayDifficultyBits") + ParamRelayDifficultyTargetHash = "relay_difficulty_target_hash" + DefaultRelayDifficultyTargetHashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + DefaultRelayDifficultyTargetHash, _ = hex.DecodeString(DefaultRelayDifficultyTargetHashHex) // TODO_MAINNET(#142, #401): Determine the default value. + KeyProofRequestProbability = []byte("ProofRequestProbability") + ParamProofRequestProbability = "proof_request_probability" + DefaultProofRequestProbability float32 = 0.25 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + KeyProofRequirementThreshold = []byte("ProofRequirementThreshold") + ParamProofRequirementThreshold = "proof_requirement_threshold" + DefaultProofRequirementThreshold uint64 = 20 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + KeyProofMissingPenalty = []byte("ProofMissingPenalty") + ParamProofMissingPenalty = "proof_missing_penalty" + DefaultProofMissingPenalty = cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(320)) // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md ) // ParamKeyTable the param key table for launch module @@ -34,13 +37,13 @@ func ParamKeyTable() paramtypes.KeyTable { // NewParams creates a new Params instance func NewParams( - minRelayDifficultyBits uint64, + relayDifficultyTargetHash []byte, proofRequestProbability float32, proofRequirementThreshold uint64, proofMissingPenalty *cosmostypes.Coin, ) Params { return Params{ - MinRelayDifficultyBits: minRelayDifficultyBits, + RelayDifficultyTargetHash: relayDifficultyTargetHash, ProofRequestProbability: proofRequestProbability, ProofRequirementThreshold: proofRequirementThreshold, ProofMissingPenalty: proofMissingPenalty, @@ -50,7 +53,7 @@ func NewParams( // DefaultParams returns a default set of parameters func DefaultParams() Params { return NewParams( - DefaultMinRelayDifficultyBits, + DefaultRelayDifficultyTargetHash, DefaultProofRequestProbability, DefaultProofRequirementThreshold, &DefaultProofMissingPenalty, @@ -62,8 +65,8 @@ func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { return paramtypes.ParamSetPairs{ paramtypes.NewParamSetPair( KeyMinRelayDifficultyBits, - &p.MinRelayDifficultyBits, - ValidateMinRelayDifficultyBits, + &p.RelayDifficultyTargetHash, + ValidateRelayDifficultyTargetHash, ), paramtypes.NewParamSetPair( KeyProofRequestProbability, @@ -86,7 +89,7 @@ func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { // ValidateBasic does a sanity check on the provided params. func (params *Params) ValidateBasic() error { // Validate the ComputeUnitsToTokensMultiplier - if err := ValidateMinRelayDifficultyBits(params.MinRelayDifficultyBits); err != nil { + if err := ValidateRelayDifficultyTargetHash(params.RelayDifficultyTargetHash); err != nil { return err } @@ -105,13 +108,24 @@ func (params *Params) ValidateBasic() error { return nil } -// ValidateMinRelayDifficultyBits validates the MinRelayDifficultyBits param. +// ValidateRelayDifficultyTargetHash validates the MinRelayDifficultyBits param. // NB: The argument is an interface type to satisfy the ParamSetPair function signature. -func ValidateMinRelayDifficultyBits(v interface{}) error { - if _, ok := v.(uint64); !ok { +func ValidateRelayDifficultyTargetHash(v interface{}) error { + targetHash, ok := v.([]byte) + if !ok { return ErrProofParamInvalid.Wrapf("invalid parameter type: %T", v) } + // TODO_TECHDEBT: reference some hasher output size. + if len(targetHash) != 32 { + return ErrProofParamInvalid.Wrapf( + "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", + targetHash, + 32, + len(targetHash), + ) + } + return nil } diff --git a/x/proof/types/params.pb.go b/x/proof/types/params.pb.go index 209a20f7a..5f3956b68 100644 --- a/x/proof/types/params.pb.go +++ b/x/proof/types/params.pb.go @@ -4,6 +4,7 @@ package types import ( + bytes "bytes" encoding_binary "encoding/binary" fmt "fmt" types "github.com/cosmos/cosmos-sdk/types" @@ -28,9 +29,8 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Params defines the parameters for the module. type Params struct { - // min_relay_difficulty_bits is the minimum difficulty in bits for a relay to - // be included in a Merkle proof. - MinRelayDifficultyBits uint64 `protobuf:"varint,1,opt,name=min_relay_difficulty_bits,json=minRelayDifficultyBits,proto3" json:"min_relay_difficulty_bits"` + // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. + RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash"` // proof_request_probability is the probability of a session requiring a proof // if it's cost (i.e. compute unit consumption) is below the ProofRequirementThreshold. ProofRequestProbability float32 `protobuf:"fixed32,2,opt,name=proof_request_probability,json=proofRequestProbability,proto3" json:"proof_request_probability"` @@ -80,11 +80,11 @@ func (m *Params) XXX_DiscardUnknown() { var xxx_messageInfo_Params proto.InternalMessageInfo -func (m *Params) GetMinRelayDifficultyBits() uint64 { +func (m *Params) GetRelayDifficultyTargetHash() []byte { if m != nil { - return m.MinRelayDifficultyBits + return m.RelayDifficultyTargetHash } - return 0 + return nil } func (m *Params) GetProofRequestProbability() float32 { @@ -115,32 +115,33 @@ func init() { func init() { proto.RegisterFile("poktroll/proof/params.proto", fileDescriptor_2ad689ad5bf3a2d7) } var fileDescriptor_2ad689ad5bf3a2d7 = []byte{ - // 399 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xbf, 0x8e, 0xd4, 0x30, - 0x10, 0xc6, 0xd7, 0x77, 0xa7, 0x2b, 0x82, 0x84, 0x44, 0xf8, 0x73, 0xc9, 0x9d, 0x48, 0x22, 0xaa, - 0x15, 0x12, 0xb6, 0x0e, 0x3a, 0xca, 0x40, 0x41, 0x83, 0xb4, 0x8a, 0x28, 0x80, 0xc6, 0x72, 0x72, - 0xde, 0xc4, 0xba, 0xd8, 0x13, 0x6c, 0x2f, 0x90, 0x57, 0xa0, 0xe2, 0x11, 0xa8, 0xa9, 0x78, 0x0c, - 0xca, 0x2b, 0xa9, 0x56, 0x68, 0xb7, 0x00, 0xed, 0x53, 0xa0, 0xd8, 0xb9, 0xdd, 0x02, 0x96, 0x26, - 0xb2, 0xbe, 0xdf, 0xf7, 0x4d, 0x46, 0x33, 0x13, 0x9c, 0x75, 0x70, 0x69, 0x35, 0xb4, 0x2d, 0xe9, - 0x34, 0xc0, 0x9c, 0x74, 0x4c, 0x33, 0x69, 0x70, 0xa7, 0xc1, 0x42, 0x78, 0xf3, 0x1a, 0x62, 0x07, - 0x4f, 0x6f, 0x31, 0x29, 0x14, 0x10, 0xf7, 0xf5, 0x96, 0xd3, 0x3b, 0x35, 0xd4, 0xe0, 0x9e, 0x64, - 0x78, 0x8d, 0x6a, 0x52, 0x81, 0x91, 0x60, 0x48, 0xc9, 0x0c, 0x27, 0xef, 0xcf, 0x4b, 0x6e, 0xd9, - 0x39, 0xa9, 0x40, 0x28, 0xcf, 0x1f, 0x7c, 0x3d, 0x0c, 0x8e, 0x67, 0xee, 0x4f, 0xe1, 0xeb, 0x20, - 0x96, 0x42, 0x51, 0xcd, 0x5b, 0xd6, 0xd3, 0x0b, 0x31, 0x9f, 0x8b, 0x6a, 0xd1, 0xda, 0x9e, 0x96, - 0xc2, 0x9a, 0x08, 0x65, 0x68, 0x7a, 0x94, 0xdf, 0xdf, 0x2c, 0xd3, 0xfd, 0xa6, 0xe2, 0x9e, 0x14, - 0xaa, 0x18, 0xc8, 0xf3, 0x2d, 0xc8, 0x85, 0x35, 0xe1, 0x9b, 0x20, 0x76, 0x6d, 0x53, 0xcd, 0xdf, - 0x2d, 0xb8, 0xb1, 0xb4, 0xd3, 0x50, 0xb2, 0x52, 0xb4, 0xc2, 0xf6, 0xd1, 0x41, 0x86, 0xa6, 0x07, - 0xbe, 0xf2, 0x5e, 0x53, 0x71, 0xe2, 0x50, 0xe1, 0xc9, 0x6c, 0x07, 0x42, 0x1a, 0x9c, 0xed, 0x52, - 0x42, 0x73, 0xc9, 0x95, 0xa5, 0xb6, 0xd1, 0xdc, 0x34, 0xd0, 0x5e, 0x44, 0x87, 0xae, 0xed, 0x74, - 0xb3, 0x4c, 0xff, 0x67, 0x2b, 0xe2, 0x6d, 0xf9, 0x91, 0xbd, 0xba, 0x46, 0x61, 0x13, 0xdc, 0xf5, - 0x49, 0x29, 0x8c, 0x11, 0xaa, 0xa6, 0x1d, 0x57, 0xac, 0xb5, 0x7d, 0x74, 0x94, 0xa1, 0xe9, 0x8d, - 0xc7, 0x31, 0xf6, 0x03, 0xc6, 0xc3, 0x80, 0xf1, 0x38, 0x60, 0xfc, 0x0c, 0x84, 0xca, 0xe3, 0xcd, - 0x32, 0xfd, 0x77, 0xb6, 0xb8, 0xed, 0xe4, 0x97, 0x5e, 0x9d, 0x79, 0xf1, 0x69, 0xf6, 0xfb, 0x4b, - 0x8a, 0x3e, 0xfd, 0xfa, 0xf6, 0xf0, 0x64, 0x7b, 0x09, 0x1f, 0xc7, 0x5b, 0xf0, 0x1b, 0xca, 0x5f, - 0x7c, 0x5f, 0x25, 0xe8, 0x6a, 0x95, 0xa0, 0x9f, 0xab, 0x04, 0x7d, 0x5e, 0x27, 0x93, 0xab, 0x75, - 0x32, 0xf9, 0xb1, 0x4e, 0x26, 0x6f, 0x71, 0x2d, 0x6c, 0xb3, 0x28, 0x71, 0x05, 0x92, 0x0c, 0xe9, - 0x47, 0x8a, 0xdb, 0x0f, 0xa0, 0x2f, 0xc9, 0x5f, 0xa5, 0x6c, 0xdf, 0x71, 0x53, 0x1e, 0xbb, 0xed, - 0x3f, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, 0x22, 0xfc, 0x96, 0xb6, 0x75, 0x02, 0x00, 0x00, + // 405 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x31, 0x8e, 0xd4, 0x30, + 0x14, 0x86, 0xc7, 0xbb, 0xab, 0x2d, 0x02, 0x42, 0x22, 0x80, 0x36, 0xd9, 0x85, 0x24, 0xa2, 0x8a, + 0x90, 0xb0, 0xb5, 0xd0, 0x51, 0x06, 0x8a, 0x6d, 0x90, 0x46, 0xd1, 0x36, 0xd0, 0x58, 0x4e, 0xd6, + 0x93, 0x58, 0x93, 0xe4, 0x05, 0xdb, 0x03, 0xe4, 0x0a, 0x54, 0x1c, 0x81, 0x23, 0x20, 0x71, 0x09, + 0xca, 0x29, 0xa9, 0x22, 0x34, 0x53, 0x80, 0x72, 0x0a, 0x34, 0x76, 0x66, 0xa6, 0x00, 0xa6, 0xb1, + 0x9e, 0xfe, 0xef, 0xff, 0x9f, 0xad, 0xe7, 0xe7, 0x5c, 0xb4, 0x30, 0xd7, 0x12, 0xaa, 0x8a, 0xb4, + 0x12, 0x60, 0x46, 0x5a, 0x26, 0x59, 0xad, 0x70, 0x2b, 0x41, 0x83, 0x7b, 0x67, 0x0b, 0xb1, 0x81, + 0xe7, 0x77, 0x59, 0x2d, 0x1a, 0x20, 0xe6, 0xb4, 0x96, 0xf3, 0xfb, 0x05, 0x14, 0x60, 0x4a, 0xb2, + 0xa9, 0x46, 0x35, 0xc8, 0x41, 0xd5, 0xa0, 0x48, 0xc6, 0x14, 0x27, 0xef, 0x2f, 0x33, 0xae, 0xd9, + 0x25, 0xc9, 0x41, 0x34, 0x96, 0x3f, 0xfe, 0x76, 0xec, 0x9c, 0x4e, 0xcd, 0x4d, 0x2e, 0x73, 0x1e, + 0x4a, 0x5e, 0xb1, 0x8e, 0xde, 0x88, 0xd9, 0x4c, 0xe4, 0x8b, 0x4a, 0x77, 0x54, 0x33, 0x59, 0x70, + 0x4d, 0x4b, 0xa6, 0x4a, 0x0f, 0x45, 0x28, 0xbe, 0x9d, 0x44, 0x43, 0x1f, 0x1e, 0xf4, 0xa5, 0xbe, + 0xa1, 0xaf, 0x76, 0xf0, 0xda, 0xb0, 0x2b, 0xa6, 0x4a, 0xf7, 0x8d, 0xe3, 0x9b, 0xf7, 0x53, 0xc9, + 0xdf, 0x2d, 0xb8, 0xd2, 0xb4, 0x95, 0x90, 0xb1, 0x4c, 0x54, 0x42, 0x77, 0xde, 0x51, 0x84, 0xe2, + 0xa3, 0xe4, 0xd1, 0xd0, 0x87, 0xff, 0x37, 0xa5, 0x67, 0x06, 0xa5, 0x96, 0x4c, 0xf7, 0xc0, 0xa5, + 0xce, 0xc5, 0x3e, 0x25, 0x24, 0xaf, 0x79, 0xa3, 0xa9, 0x2e, 0x25, 0x57, 0x25, 0x54, 0x37, 0xde, + 0x71, 0x84, 0xe2, 0x93, 0x24, 0x1c, 0xfa, 0xf0, 0x90, 0x2d, 0xf5, 0x77, 0xed, 0x47, 0x76, 0xbd, + 0x45, 0x6e, 0xe9, 0x3c, 0xb0, 0xc9, 0x5a, 0x28, 0x25, 0x9a, 0x82, 0xb6, 0xbc, 0x61, 0x95, 0xee, + 0xbc, 0x93, 0x08, 0xc5, 0xb7, 0x9e, 0xf9, 0xd8, 0x4e, 0x1a, 0x6f, 0x26, 0x8d, 0xc7, 0x49, 0xe3, + 0x97, 0x20, 0x9a, 0xc4, 0x1f, 0xfa, 0xf0, 0xdf, 0xd9, 0xf4, 0x9e, 0x91, 0x5f, 0x5b, 0x75, 0x6a, + 0xc5, 0x17, 0xd1, 0xef, 0x2f, 0x21, 0xfa, 0xf4, 0xeb, 0xeb, 0x93, 0xb3, 0xdd, 0x4a, 0x7c, 0x1c, + 0x97, 0xc2, 0x7e, 0x55, 0x72, 0xf5, 0x7d, 0x15, 0xa0, 0xe5, 0x2a, 0x40, 0x3f, 0x57, 0x01, 0xfa, + 0xbc, 0x0e, 0x26, 0xcb, 0x75, 0x30, 0xf9, 0xb1, 0x0e, 0x26, 0x6f, 0x71, 0x21, 0x74, 0xb9, 0xc8, + 0x70, 0x0e, 0x35, 0xd9, 0xa4, 0x9f, 0x36, 0x5c, 0x7f, 0x00, 0x39, 0x27, 0x7f, 0xb5, 0xd2, 0x5d, + 0xcb, 0x55, 0x76, 0x6a, 0xd6, 0xe0, 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0xe3, 0x17, + 0x08, 0x7e, 0x02, 0x00, 0x00, } func (this *Params) Equal(that interface{}) bool { @@ -162,7 +163,7 @@ func (this *Params) Equal(that interface{}) bool { } else if this == nil { return false } - if this.MinRelayDifficultyBits != that1.MinRelayDifficultyBits { + if !bytes.Equal(this.RelayDifficultyTargetHash, that1.RelayDifficultyTargetHash) { return false } if this.ProofRequestProbability != that1.ProofRequestProbability { @@ -219,10 +220,12 @@ func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x15 } - if m.MinRelayDifficultyBits != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.MinRelayDifficultyBits)) + if len(m.RelayDifficultyTargetHash) > 0 { + i -= len(m.RelayDifficultyTargetHash) + copy(dAtA[i:], m.RelayDifficultyTargetHash) + i = encodeVarintParams(dAtA, i, uint64(len(m.RelayDifficultyTargetHash))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -244,8 +247,9 @@ func (m *Params) Size() (n int) { } var l int _ = l - if m.MinRelayDifficultyBits != 0 { - n += 1 + sovParams(uint64(m.MinRelayDifficultyBits)) + l = len(m.RelayDifficultyTargetHash) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) } if m.ProofRequestProbability != 0 { n += 5 @@ -296,10 +300,10 @@ func (m *Params) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinRelayDifficultyBits", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RelayDifficultyTargetHash", wireType) } - m.MinRelayDifficultyBits = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams @@ -309,11 +313,26 @@ func (m *Params) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinRelayDifficultyBits |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RelayDifficultyTargetHash = append(m.RelayDifficultyTargetHash[:0], dAtA[iNdEx:postIndex]...) + if m.RelayDifficultyTargetHash == nil { + m.RelayDifficultyTargetHash = []byte{} + } + iNdEx = postIndex case 2: if wireType != 5 { return fmt.Errorf("proto: wrong wireType = %d for field ProofRequestProbability", wireType) diff --git a/x/service/types/relay.go b/x/service/types/relay.go index 3276e6441..f1a4ac271 100644 --- a/x/service/types/relay.go +++ b/x/service/types/relay.go @@ -1,21 +1,13 @@ package types import ( - "crypto/sha256" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" sessiontypes "github.com/pokt-network/poktroll/x/session/types" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) -// GetHashFromBytes returns the hash of the relay (full, request or response) bytes. -// It is used as helper in the case that the relay is already marshaled and -// centralizes the hasher used. -func GetHashFromBytes(relayBz []byte) [32]byte { - return sha256.Sum256(relayBz) -} - // GetHash returns the hash of the relay, which contains both the signed // relay request and the relay response. It is used as the key for insertion // into the SMT. @@ -25,7 +17,7 @@ func (relay *Relay) GetHash() ([32]byte, error) { return [32]byte{}, err } - return GetHashFromBytes(relayBz), nil + return protocol.GetHashFromBytes(relayBz), nil } // GetSignableBytesHash returns the hash of the signable bytes of the relay request @@ -42,7 +34,7 @@ func (req RelayRequest) GetSignableBytesHash() ([32]byte, error) { // return the marshaled request hash to guarantee that the signable bytes // are always of a constant and expected length - return GetHashFromBytes(requestBz), nil + return protocol.GetHashFromBytes(requestBz), nil } // ValidateBasic performs basic validation of the RelayResponse Meta, SessionHeader @@ -84,7 +76,7 @@ func (res RelayResponse) GetSignableBytesHash() ([32]byte, error) { // return the marshaled response hash to guarantee that the signable bytes // are always of a constant and expected length - return GetHashFromBytes(responseBz), nil + return protocol.GetHashFromBytes(responseBz), nil } // ValidateBasic performs basic validation of the RelayResponse Meta, SessionHeader diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index 340ac77e3..04268c342 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -5,12 +5,10 @@ import ( "context" "encoding/hex" "fmt" - "math" - "math/bits" + "math/big" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pokt-network/poktroll/pkg/crypto/protocol" prooftypes "github.com/pokt-network/poktroll/x/proof/types" "github.com/pokt-network/poktroll/x/tokenomics/types" ) @@ -56,7 +54,7 @@ func (k Keeper) UpdateRelayMiningDifficulty( ServiceId: serviceId, BlockHeight: sdkCtx.BlockHeight(), NumRelaysEma: numRelays, - TargetHash: defaultDifficultyTargetHash(), + TargetHash: prooftypes.DefaultRelayDifficultyTargetHash, } } @@ -123,37 +121,37 @@ func ComputeNewDifficultyTargetHash(targetNumRelays, newRelaysEma uint64) []byte // the actual on-chain relays, so we don't need to scale to anything above // the default. if targetNumRelays > newRelaysEma { - return defaultDifficultyTargetHash() + return prooftypes.DefaultRelayDifficultyTargetHash } - log2 := func(x float64) float64 { - return math.Log(x) / math.Ln2 - } + // Calculate the proportion of target relays to the new EMA + ratio := float64(targetNumRelays) / float64(newRelaysEma) + + // Compute the new target hash by scaling the default target hash based on the ratio + newTargetHash := scaleDifficultyTargetHash(prooftypes.DefaultRelayDifficultyTargetHash, ratio) - // We are dealing with a bitwise binary distribution, and are trying to convert - // the proportion of an off-chain relay (i.e. relayEMA) to an - // on-chain relay (i.e. target) based on the probability of x leading zeros - // in the target hash. - // - // In other words, the probability of an off-chain relay moving into the tree - // should equal (approximately) the probability of having x leading zeroes - // in the target hash. - // - // The construction is as follows: - // (0.5)^num_leading_zeroes = (num_target_relay / num_total_relays) - // (0.5)^x = (T/R) - // x = -ln2(T/R) - numLeadingZeroBits := int(-log2(float64(targetNumRelays) / float64(newRelaysEma))) - numBytes := protocol.SmtSpec.PathHasherSize() - return LeadingZeroBitsToTargetDifficultyHash(numLeadingZeroBits, numBytes) + return newTargetHash } -// defaultDifficultyTargetHash returns the default difficulty target hash with -// the default number of leading zero bits. -func defaultDifficultyTargetHash() []byte { - numBytes := protocol.SmtSpec.PathHasherSize() - numDefaultLeadingZeroBits := int(prooftypes.DefaultMinRelayDifficultyBits) - return LeadingZeroBitsToTargetDifficultyHash(numDefaultLeadingZeroBits, numBytes) +// scaleDifficultyTargetHash scales the default target hash based on the given ratio +func scaleDifficultyTargetHash(targetHash []byte, ratio float64) []byte { + // Convert targetHash to a big.Float to miminize precision loss. + targetInt := new(big.Int).SetBytes(targetHash) + targetFloat := new(big.Float).SetInt(targetInt) + + // Scale the target by multiplying it by the ratio. + scaledTargetFloat := new(big.Float).Mul(targetFloat, big.NewFloat(ratio)) + scaledTargetInt, _ := scaledTargetFloat.Int(nil) + scaledTargetHash := scaledTargetInt.Bytes() + + // Ensure the scaled target hash has the same length as the default target hash. + if len(scaledTargetHash) < len(targetHash) { + paddedTargetHash := make([]byte, len(targetHash)) + copy(paddedTargetHash[len(paddedTargetHash)-len(scaledTargetHash):], scaledTargetHash) + return paddedTargetHash + } + + return scaledTargetHash } // computeEma computes the EMA at time t, given the EMA at time t-1, the raw @@ -163,25 +161,6 @@ func computeEma(alpha float64, prevEma, currValue uint64) uint64 { return uint64(alpha*float64(currValue) + (1-alpha)*float64(prevEma)) } -// RelayMiningTargetHashToDifficulty returns the relay mining difficulty based on the hash. -// This currently implies the number of leading zero bits but may be changed in the future. -// TODO_MAINNET: Determine if we should launch with a more adaptive difficulty or stick -// to leading zeroes. -func RelayMiningTargetHashToDifficulty(targetHash []byte) int { - numLeadingZeroBits := 0 - for _, b := range targetHash { - if b == 0 { - numLeadingZeroBits += 8 - continue - } else { - numLeadingZeroBits += bits.LeadingZeros8(b) - break // Stop counting after the first non-zero byte - } - } - - return numLeadingZeroBits -} - // LeadingZeroBitsToTargetDifficultyHash generates a slice of bytes with the specified number of leading zero bits // NB: Exported for testing purposes only. func LeadingZeroBitsToTargetDifficultyHash(numLeadingZeroBits int, numBytes int) []byte { diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go index c9cde4b13..6842b0f1c 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go @@ -127,8 +127,8 @@ func TestUpdateRelayMiningDifficulty_FirstDifficulty(t *testing.T) { BlockHeight: 1, NumRelaysEma: keeper.TargetNumRelays * 1e3, TargetHash: append( - []byte{0b00000000, 0b01111111}, // 9 leading 0 bits - makeBytesFullOfOnes(30)..., + []byte{0b00000000}, // at least 8 leading 0 bits + makeBytesFullOfOnes(31)..., ), }, }, @@ -145,10 +145,13 @@ func TestUpdateRelayMiningDifficulty_FirstDifficulty(t *testing.T) { difficulty, found := keeper.GetRelayMiningDifficulty(ctx, "svc1") require.True(t, found) - require.Equal(t, difficulty.NumRelaysEma, tt.numRelays) - require.Equal(t, difficulty.NumRelaysEma, tt.expectedRelayMiningDifficulty.NumRelaysEma) + require.Equal(t, tt.numRelays, difficulty.NumRelaysEma) + require.Equal(t, tt.expectedRelayMiningDifficulty.NumRelaysEma, difficulty.NumRelaysEma) - require.Equal(t, difficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash) + require.Lessf(t, bytes.Compare(difficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash), 1, + "expected difficulty.TargetHash (%x) to be less than or equal to expectedRelayMiningDifficulty.TargetHash (%x)", + difficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash, + ) }) } } @@ -213,7 +216,11 @@ func TestComputeNewDifficultyHash(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { result := keeper.ComputeNewDifficultyTargetHash(tt.numRelaysTarget, tt.relaysEma) - require.Equal(t, result, tt.expectedDifficultyHash) + + require.Lessf(t, bytes.Compare(result, tt.expectedDifficultyHash), 1, + "expected difficulty.TargetHash (%x) to be less than or equal to expectedRelayMiningDifficulty.TargetHash (%x)", + result, tt.expectedDifficultyHash, + ) }) } } diff --git a/x/tokenomics/module/abci.go b/x/tokenomics/module/abci.go index b628bc553..264ce5ee7 100644 --- a/x/tokenomics/module/abci.go +++ b/x/tokenomics/module/abci.go @@ -1,10 +1,12 @@ package tokenomics import ( + "crypto/sha256" "fmt" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/telemetry" prooftypes "github.com/pokt-network/poktroll/x/proof/types" "github.com/pokt-network/poktroll/x/tokenomics/keeper" @@ -81,8 +83,11 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { // Emit telemetry for each service's relay mining difficulty. for serviceId, newDifficulty := range difficultyPerServiceMap { - miningDifficultyNumBits := keeper.RelayMiningTargetHashToDifficulty(newDifficulty.TargetHash) - telemetry.RelayMiningDifficultyGauge(miningDifficultyNumBits, serviceId) + var newTargetHash [sha256.Size]byte + copy(newTargetHash[:], newDifficulty.TargetHash) + + difficulty := protocol.GetDifficultyFromHash(newTargetHash) + telemetry.RelayMiningDifficultyGauge(difficulty, serviceId) telemetry.RelayEMAGauge(newDifficulty.NumRelaysEma, serviceId) } From ceb283d18e1268272cc22d9d13901aa58ede1b45 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Tue, 16 Jul 2024 15:27:29 +0200 Subject: [PATCH 02/29] fix: e2e test --- e2e/tests/parse_params_test.go | 2 +- e2e/tests/update_params_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/tests/parse_params_test.go b/e2e/tests/parse_params_test.go index fd2b71902..e928e657a 100644 --- a/e2e/tests/parse_params_test.go +++ b/e2e/tests/parse_params_test.go @@ -133,7 +133,7 @@ func (s *suite) newProofMsgUpdateParams(params paramsMap) cosmostypes.Msg { for paramName, paramValue := range params { switch paramName { case prooftypes.ParamRelayDifficultyTargetHash: - msgUpdateParams.Params.MinRelayDifficultyBits = uint64(paramValue.value.(int64)) + msgUpdateParams.Params.RelayDifficultyTargetHash = paramValue.value.([]byte) case prooftypes.ParamProofRequestProbability: msgUpdateParams.Params.ProofRequestProbability = paramValue.value.(float32) case prooftypes.ParamProofRequirementThreshold: diff --git a/e2e/tests/update_params_test.go b/e2e/tests/update_params_test.go index 1fa575df1..517cd2849 100644 --- a/e2e/tests/update_params_test.go +++ b/e2e/tests/update_params_test.go @@ -372,7 +372,7 @@ func (s *suite) assertExpectedModuleParamsUpdated(moduleName string) { minRelayDifficultyBits, ok := paramsMap[prooftypes.ParamRelayDifficultyTargetHash] if ok { - params.MinRelayDifficultyBits = uint64(minRelayDifficultyBits.value.(int64)) + params.RelayDifficultyTargetHash = minRelayDifficultyBits.value.([]byte) } proofRequestProbability, ok := paramsMap[prooftypes.ParamProofRequestProbability] From 0ca9b9858be8a011783f90c23834bef552d88975 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Tue, 16 Jul 2024 16:30:18 +0200 Subject: [PATCH 03/29] fix: e2e test --- e2e/tests/parse_params_test.go | 3 ++- e2e/tests/update_params.feature | 14 +++++++------- e2e/tests/update_params_test.go | 5 +++-- x/proof/types/message_update_param_test.go | 2 +- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/e2e/tests/parse_params_test.go b/e2e/tests/parse_params_test.go index e928e657a..2f3d5e99c 100644 --- a/e2e/tests/parse_params_test.go +++ b/e2e/tests/parse_params_test.go @@ -3,6 +3,7 @@ package e2e import ( + "encoding/hex" "fmt" "strconv" @@ -133,7 +134,7 @@ func (s *suite) newProofMsgUpdateParams(params paramsMap) cosmostypes.Msg { for paramName, paramValue := range params { switch paramName { case prooftypes.ParamRelayDifficultyTargetHash: - msgUpdateParams.Params.RelayDifficultyTargetHash = paramValue.value.([]byte) + msgUpdateParams.Params.RelayDifficultyTargetHash, _ = hex.DecodeString(string(paramValue.value.([]byte))) case prooftypes.ParamProofRequestProbability: msgUpdateParams.Params.ProofRequestProbability = paramValue.value.(float32) case prooftypes.ParamProofRequirementThreshold: diff --git a/e2e/tests/update_params.feature b/e2e/tests/update_params.feature index 3430b6549..9804553da 100644 --- a/e2e/tests/update_params.feature +++ b/e2e/tests/update_params.feature @@ -37,11 +37,11 @@ Feature: Params Namespace And all "proof" module params are set to their default values And an authz grant from the "gov" "module" account to the "pnf" "user" account for the "/poktroll.proof.MsgUpdateParams" message exists When the "pnf" account sends an authz exec message to update all "proof" module params - | name | value | type | - | min_relay_difficulty_bits | 8 | int64 | - | proof_request_probability | 0.1 | float | - | proof_requirement_threshold | 100 | int64 | - | proof_missing_penalty | 500 | coin | + | name | value | type | + | relay_difficulty_target_hash | 00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff | bytes | + | proof_request_probability | 0.1 | float | + | proof_requirement_threshold | 100 | int64 | + | proof_missing_penalty | 500 | coin | Then all "proof" module params should be updated # NB: If you are reading this and the proof module has parameters @@ -89,6 +89,6 @@ Feature: Params Namespace And all "proof" module params are set to their default values And an authz grant from the "gov" "module" account to the "pnf" "user" account for the "/poktroll.proof.MsgUpdateParams" message exists When the "unauthorized" account sends an authz exec message to update "proof" the module param - | name | value | type | - | "min_relay_difficulty_bits | 666 | int64 | + | name | value | type | + | proof_request_probability | 0.1 | float | Then the "proof" module param "min_relay_difficulty_bits" should be set to its default value diff --git a/e2e/tests/update_params_test.go b/e2e/tests/update_params_test.go index 517cd2849..c7d383479 100644 --- a/e2e/tests/update_params_test.go +++ b/e2e/tests/update_params_test.go @@ -3,6 +3,7 @@ package e2e import ( + "encoding/hex" "encoding/json" "fmt" "reflect" @@ -370,9 +371,9 @@ func (s *suite) assertExpectedModuleParamsUpdated(moduleName string) { params := prooftypes.DefaultParams() paramsMap := s.expectedModuleParams[moduleName] - minRelayDifficultyBits, ok := paramsMap[prooftypes.ParamRelayDifficultyTargetHash] + relayDifficultyTargetHash, ok := paramsMap[prooftypes.ParamRelayDifficultyTargetHash] if ok { - params.RelayDifficultyTargetHash = minRelayDifficultyBits.value.([]byte) + params.RelayDifficultyTargetHash, _ = hex.DecodeString(string(relayDifficultyTargetHash.value.([]byte))) } proofRequestProbability, ok := paramsMap[prooftypes.ParamProofRequestProbability] diff --git a/x/proof/types/message_update_param_test.go b/x/proof/types/message_update_param_test.go index 42a2242df..d89a323ad 100644 --- a/x/proof/types/message_update_param_test.go +++ b/x/proof/types/message_update_param_test.go @@ -28,7 +28,7 @@ func TestMsgUpdateParam_ValidateBasic(t *testing.T) { name: "invalid: param name incorrect (non-existent)", msg: MsgUpdateParam{ Authority: sample.AccAddress(), - Name: "WRONG_min_relay_difficulty_bits", + Name: "WRONG_relay_difficulty_target_hash", AsType: &MsgUpdateParam_AsInt64{AsInt64: 1}, }, From 38d1985cfe0f4031d503687836a238d8a203a3c8 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Wed, 17 Jul 2024 13:12:46 +0200 Subject: [PATCH 04/29] chore: review feedback improvements --- api/poktroll/proof/params.pulsar.go | 2 +- pkg/crypto/protocol/difficulty.go | 18 ++-- pkg/crypto/protocol/difficulty_test.go | 55 +++++++++++ proto/poktroll/proof/params.proto | 2 +- telemetry/event_counters.go | 6 +- x/proof/keeper/msg_server_submit_proof.go | 2 +- x/proof/types/params.go | 4 +- x/proof/types/params.pb.go | 2 +- x/tokenomics/keeper/scale_difficulty_test.go | 97 +++++++++++++++++++ .../keeper/update_relay_mining_difficulty.go | 7 ++ 10 files changed, 178 insertions(+), 17 deletions(-) create mode 100644 pkg/crypto/protocol/difficulty_test.go create mode 100644 x/tokenomics/keeper/scale_difficulty_test.go diff --git a/api/poktroll/proof/params.pulsar.go b/api/poktroll/proof/params.pulsar.go index 015aafc49..e75f23eac 100644 --- a/api/poktroll/proof/params.pulsar.go +++ b/api/poktroll/proof/params.pulsar.go @@ -626,7 +626,7 @@ type Params struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. + // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash,omitempty"` // proof_request_probability is the probability of a session requiring a proof // if it's cost (i.e. compute unit consumption) is below the ProofRequirementThreshold. diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index 6cf5f8930..eea1b62c7 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -6,20 +6,22 @@ import ( "math/big" ) -// Difficulty1Hash represents the "easiest" difficulty. var ( - Difficulty1HashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" - Difficulty1HashBz, _ = hex.DecodeString(Difficulty1HashHex) - Difficulty1HashInt = new(big.Int).SetBytes(Difficulty1HashBz) + // Difficulty1HashBz is the chosen "highest" (easiest) target hash, which + // corresponds to the lowest possible difficulty. It effectively calibrates + // the difficulty number (which is returned by GetDifficultyFromHash) by defining + // the hash which corresponds to difficulty 1. + // - https://bitcoin.stackexchange.com/questions/107976/bitcoin-difficulty-why-leading-0s + // - https://bitcoin.stackexchange.com/questions/121920/is-it-always-possible-to-find-a-number-whose-hash-starts-with-a-certain-number-o + Difficulty1HashBz, _ = hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") ) // GetDifficultyFromHash returns the "difficulty" of the given hash, with respect -// to the "highest" target hash, Difficulty1Hash. -// - https://bitcoin.stackexchange.com/questions/107976/bitcoin-difficulty-why-leading-0s -// - https://bitcoin.stackexchange.com/questions/121920/is-it-always-possible-to-find-a-number-whose-hash-starts-with-a-certain-number-o +// to the "highest" (easiest) target hash, Difficulty1Hash. func GetDifficultyFromHash(hashBz [sha256.Size]byte) int64 { + difficulty1HashInt := new(big.Int).SetBytes(Difficulty1HashBz) hashInt := new(big.Int).SetBytes(hashBz[:]) // difficulty is the ratio of the highest target hash to the given hash. - return new(big.Int).Div(Difficulty1HashInt, hashInt).Int64() + return new(big.Int).Div(difficulty1HashInt, hashInt).Int64() } diff --git a/pkg/crypto/protocol/difficulty_test.go b/pkg/crypto/protocol/difficulty_test.go new file mode 100644 index 000000000..7aa9647d9 --- /dev/null +++ b/pkg/crypto/protocol/difficulty_test.go @@ -0,0 +1,55 @@ +package protocol + +import ( + "crypto/sha256" + "encoding/hex" + "math/big" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetDifficultyFromHash(t *testing.T) { + tests := []struct { + desc string + hashHex string + expectedDifficulty int64 + }{ + { + desc: "Difficulty 1", + hashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedDifficulty: 1, + }, + { + desc: "Difficulty 2", + hashHex: "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedDifficulty: 2, + }, + { + desc: "Difficulty 4", + hashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedDifficulty: 4, + }, + { + desc: "Highest difficulty", + hashHex: "0000000000000000000000000000000000000000000000000000000000000001", + expectedDifficulty: new(big.Int).SetBytes(Difficulty1HashBz).Int64(), + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + hashBytes, err := hex.DecodeString(test.hashHex) + if err != nil { + t.Fatalf("failed to decode hash: %v", err) + } + + var hashBz [sha256.Size]byte + copy(hashBz[:], hashBytes) + + difficulty := GetDifficultyFromHash(hashBz) + t.Logf("test: %s, difficulty: %d", test.desc, difficulty) + require.Equal(t, test.expectedDifficulty, difficulty) + }) + } +} diff --git a/proto/poktroll/proof/params.proto b/proto/poktroll/proof/params.proto index ab1d6ac97..3e8f1bc17 100644 --- a/proto/poktroll/proof/params.proto +++ b/proto/poktroll/proof/params.proto @@ -12,7 +12,7 @@ message Params { option (amino.name) = "poktroll/x/proof/Params"; option (gogoproto.equal) = true; - // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. + // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. bytes relay_difficulty_target_hash = 1 [(gogoproto.jsontag) = "relay_difficulty_target_hash"]; // proof_request_probability is the probability of a session requiring a proof diff --git a/telemetry/event_counters.go b/telemetry/event_counters.go index ff23d4921..c8fb2f456 100644 --- a/telemetry/event_counters.go +++ b/telemetry/event_counters.go @@ -147,9 +147,9 @@ func ClaimCounter( ) } -// RelayMiningDifficultyGauge sets a gauge which tracks the relay mining difficulty, -// which is represented by number of leading zero bits. -// The serviceId is used as a label to be able to track the difficulty for each service. +// RelayMiningDifficultyGauge sets a gauge which tracks the integer representation +// of the relay mining difficulty. The serviceId is used as a label to be able to +// track the difficulty for each service. func RelayMiningDifficultyGauge(difficulty int64, serviceId string) { labels := []metrics.Label{ {Name: "type", Value: "relay_mining_difficulty"}, diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index 50c0a785d..73921ba25 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -454,7 +454,7 @@ func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { return types.ErrProofInvalidRelay.Wrapf( "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", targetHash, - 32, + sha256.Size, len(targetHash), ) } diff --git a/x/proof/types/params.go b/x/proof/types/params.go index dfabfec13..4aca6858f 100644 --- a/x/proof/types/params.go +++ b/x/proof/types/params.go @@ -1,6 +1,7 @@ package types import ( + "crypto/sha256" "encoding/hex" "cosmossdk.io/math" @@ -116,8 +117,7 @@ func ValidateRelayDifficultyTargetHash(v interface{}) error { return ErrProofParamInvalid.Wrapf("invalid parameter type: %T", v) } - // TODO_TECHDEBT: reference some hasher output size. - if len(targetHash) != 32 { + if len(targetHash) != sha256.Size { return ErrProofParamInvalid.Wrapf( "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", targetHash, diff --git a/x/proof/types/params.pb.go b/x/proof/types/params.pb.go index 5f3956b68..9718a5496 100644 --- a/x/proof/types/params.pb.go +++ b/x/proof/types/params.pb.go @@ -29,7 +29,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Params defines the parameters for the module. type Params struct { - // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. + // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash"` // proof_request_probability is the probability of a session requiring a proof // if it's cost (i.e. compute unit consumption) is below the ProofRequirementThreshold. diff --git a/x/tokenomics/keeper/scale_difficulty_test.go b/x/tokenomics/keeper/scale_difficulty_test.go new file mode 100644 index 000000000..898e33d35 --- /dev/null +++ b/x/tokenomics/keeper/scale_difficulty_test.go @@ -0,0 +1,97 @@ +package keeper + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestScaleDifficultyTargetHash tests the scaling of a target hash by a given ratio. +// Some expectations are manually adjusted to account for some precision loss in the +// implementation. +func TestScaleDifficultyTargetHash(t *testing.T) { + tests := []struct { + desc string + targetHashHex string + ratio float64 + expectedHashHex string + }{ + { + desc: "Scale by 0.5", + targetHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 0.5, + expectedHashHex: "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "Scale by 2", + targetHashHex: "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 2, + expectedHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + }, + { + desc: "Scale by 0.25", + targetHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 0.25, + expectedHashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "Scale by 4", + targetHashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 4, + expectedHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc", + }, + { + desc: "Scale by 1 (no change)", + targetHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 1, + expectedHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "Scale by 0.1", + targetHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 0.1, + expectedHashHex: "19999999999999ffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "Scale by 10", + targetHashHex: "1999999999999999999999999999999999999999999999999999999999999999", + ratio: 10, + expectedHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8", + }, + { + desc: "Scale by 10e-12", + targetHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 10e-12, + expectedHashHex: "000000000afebff0bcb24a7fffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "Scale by 10e12", + targetHashHex: "000000000afebff0bcb24a7fffffffffffffffffffffffffffffffffffffffff", + ratio: 10e12, + expectedHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + desc: "Maxes out at Difficulty1", + targetHashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ratio: 10, + expectedHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + targetHashBz, targetErr := hex.DecodeString(test.targetHashHex) + require.NoError(t, targetErr) + + expectedBytes, expectedErr := hex.DecodeString(test.expectedHashHex) + require.NoError(t, expectedErr) + + scaledHash := scaleDifficultyTargetHash(targetHashBz, test.ratio) + assert.Equal(t, len(scaledHash), len(targetHashBz)) + require.Equalf(t, 0, bytes.Compare(scaledHash, expectedBytes), "expected hash %x, got %x", expectedBytes, scaledHash) + }) + } +} diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index 04268c342..3ad14365d 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -9,6 +9,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" prooftypes "github.com/pokt-network/poktroll/x/proof/types" "github.com/pokt-network/poktroll/x/tokenomics/types" ) @@ -141,9 +142,15 @@ func scaleDifficultyTargetHash(targetHash []byte, ratio float64) []byte { // Scale the target by multiplying it by the ratio. scaledTargetFloat := new(big.Float).Mul(targetFloat, big.NewFloat(ratio)) + // NB: Some precision is lost when converting back to an integer. scaledTargetInt, _ := scaledTargetFloat.Int(nil) scaledTargetHash := scaledTargetInt.Bytes() + // Ensure the scaled target hash maxes out at Difficulty1. + if len(scaledTargetHash) > len(targetHash) { + return protocol.Difficulty1HashBz + } + // Ensure the scaled target hash has the same length as the default target hash. if len(scaledTargetHash) < len(targetHash) { paddedTargetHash := make([]byte, len(targetHash)) From cc6468df5d247b613d4dcf2aa8ddf51d8883b5e9 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Thu, 18 Jul 2024 08:49:38 +0200 Subject: [PATCH 05/29] refactor: protocol.NewRelayHasher, .RelayHashSize --- pkg/crypto/protocol/difficulty.go | 3 +-- pkg/crypto/protocol/difficulty_test.go | 3 +-- pkg/crypto/protocol/hash.go | 12 ++++++++---- pkg/crypto/protocol/hasher.go | 11 +++++++++++ x/proof/keeper/msg_server_submit_proof.go | 7 +++---- x/proof/keeper/msg_server_submit_proof_test.go | 3 +-- x/proof/types/params.go | 4 ++-- x/tokenomics/module/abci.go | 3 +-- 8 files changed, 28 insertions(+), 18 deletions(-) create mode 100644 pkg/crypto/protocol/hasher.go diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index eea1b62c7..f0259da94 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -1,7 +1,6 @@ package protocol import ( - "crypto/sha256" "encoding/hex" "math/big" ) @@ -18,7 +17,7 @@ var ( // GetDifficultyFromHash returns the "difficulty" of the given hash, with respect // to the "highest" (easiest) target hash, Difficulty1Hash. -func GetDifficultyFromHash(hashBz [sha256.Size]byte) int64 { +func GetDifficultyFromHash(hashBz [RelayHasherSize]byte) int64 { difficulty1HashInt := new(big.Int).SetBytes(Difficulty1HashBz) hashInt := new(big.Int).SetBytes(hashBz[:]) diff --git a/pkg/crypto/protocol/difficulty_test.go b/pkg/crypto/protocol/difficulty_test.go index 7aa9647d9..a9aa89cdd 100644 --- a/pkg/crypto/protocol/difficulty_test.go +++ b/pkg/crypto/protocol/difficulty_test.go @@ -1,7 +1,6 @@ package protocol import ( - "crypto/sha256" "encoding/hex" "math/big" "testing" @@ -44,7 +43,7 @@ func TestGetDifficultyFromHash(t *testing.T) { t.Fatalf("failed to decode hash: %v", err) } - var hashBz [sha256.Size]byte + var hashBz [RelayHasherSize]byte copy(hashBz[:], hashBytes) difficulty := GetDifficultyFromHash(hashBz) diff --git a/pkg/crypto/protocol/hash.go b/pkg/crypto/protocol/hash.go index bd3203437..a88380b77 100644 --- a/pkg/crypto/protocol/hash.go +++ b/pkg/crypto/protocol/hash.go @@ -1,10 +1,14 @@ package protocol -import "crypto/sha256" - // GetHashFromBytes returns the hash of the relay (full, request or response) bytes. // It is used as helper in the case that the relay is already marshaled and // centralizes the hasher used. -func GetHashFromBytes(relayBz []byte) [sha256.Size]byte { - return sha256.Sum256(relayBz) +func GetHashFromBytes(relayBz []byte) (hash [RelayHasherSize]byte) { + hasher := NewRelayHasher() + // NB: Intentionally ignoring the error, following sha256.Sum256 implementation. + _, _ = hasher.Write(relayBz) + hashBz := hasher.Sum(nil) + copy(hash[:], hashBz) + + return hash } diff --git a/pkg/crypto/protocol/hasher.go b/pkg/crypto/protocol/hasher.go new file mode 100644 index 000000000..5b330344b --- /dev/null +++ b/pkg/crypto/protocol/hasher.go @@ -0,0 +1,11 @@ +package protocol + +import "crypto/sha256" + +const ( + RelayHasherSize = sha256.Size +) + +var ( + NewRelayHasher = sha256.New +) diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index 73921ba25..af695b46a 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -7,7 +7,6 @@ package keeper import ( "bytes" "context" - "crypto/sha256" "fmt" cosmoscryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" @@ -450,16 +449,16 @@ func verifyClosestProof( func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { relayHash := protocol.GetHashFromBytes(relayBz) - if len(targetHash) != sha256.Size { + if len(targetHash) != protocol.RelayHasherSize { return types.ErrProofInvalidRelay.Wrapf( "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", targetHash, - sha256.Size, + protocol.RelayHasherSize, len(targetHash), ) } - var targetHashArr [sha256.Size]byte + var targetHashArr [protocol.RelayHasherSize]byte copy(targetHashArr[:], targetHash) // TODO_MAINNET: Devise a test that tries to attack the network and ensure that there diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 23cf008b5..ec65b92b2 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -2,7 +2,6 @@ package keeper_test import ( "context" - "crypto/sha256" "encoding/hex" "os" "testing" @@ -568,7 +567,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { copy(wrongClosestProofPath, "wrong closest proof path") lowTargetHash, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") - var lowTargetHashArr [sha256.Size]byte + var lowTargetHashArr [protocol.RelayHasherSize]byte copy(lowTargetHashArr[:], lowTargetHash) highExpectedTargetDifficulty := protocol.GetDifficultyFromHash(lowTargetHashArr) diff --git a/x/proof/types/params.go b/x/proof/types/params.go index 4aca6858f..acc9c9b83 100644 --- a/x/proof/types/params.go +++ b/x/proof/types/params.go @@ -1,7 +1,6 @@ package types import ( - "crypto/sha256" "encoding/hex" "cosmossdk.io/math" @@ -10,6 +9,7 @@ import ( "github.com/pokt-network/poktroll/app/volatile" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" ) var ( @@ -117,7 +117,7 @@ func ValidateRelayDifficultyTargetHash(v interface{}) error { return ErrProofParamInvalid.Wrapf("invalid parameter type: %T", v) } - if len(targetHash) != sha256.Size { + if len(targetHash) != protocol.RelayHasherSize { return ErrProofParamInvalid.Wrapf( "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", targetHash, diff --git a/x/tokenomics/module/abci.go b/x/tokenomics/module/abci.go index 264ce5ee7..7bf320928 100644 --- a/x/tokenomics/module/abci.go +++ b/x/tokenomics/module/abci.go @@ -1,7 +1,6 @@ package tokenomics import ( - "crypto/sha256" "fmt" sdk "github.com/cosmos/cosmos-sdk/types" @@ -83,7 +82,7 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { // Emit telemetry for each service's relay mining difficulty. for serviceId, newDifficulty := range difficultyPerServiceMap { - var newTargetHash [sha256.Size]byte + var newTargetHash [protocol.RelayHasherSize]byte copy(newTargetHash[:], newDifficulty.TargetHash) difficulty := protocol.GetDifficultyFromHash(newTargetHash) From 5675f817477c278911a61d2281eac4debed03c0b Mon Sep 17 00:00:00 2001 From: Bryan White Date: Fri, 19 Jul 2024 14:19:18 +0200 Subject: [PATCH 06/29] fix: ComputeNewDifficultyTargetHash() --- x/tokenomics/keeper/update_relay_mining_difficulty.go | 6 +++--- x/tokenomics/keeper/update_relay_mining_difficulty_test.go | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index 3ad14365d..7d45fc512 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -69,7 +69,7 @@ func (k Keeper) UpdateRelayMiningDifficulty( // Compute the updated EMA of the number of relays. prevRelaysEma := prevDifficulty.NumRelaysEma newRelaysEma := computeEma(alpha, prevRelaysEma, numRelays) - difficultyHash := ComputeNewDifficultyTargetHash(TargetNumRelays, newRelaysEma) + difficultyHash := ComputeNewDifficultyTargetHash(prevDifficulty.TargetHash, TargetNumRelays, newRelaysEma) newDifficulty := types.RelayMiningDifficulty{ ServiceId: serviceId, BlockHeight: sdkCtx.BlockHeight(), @@ -117,7 +117,7 @@ func (k Keeper) UpdateRelayMiningDifficulty( // on the target number of relays we want the network to mine and the new EMA of // the number of relays. // NB: Exported for testing purposes only. -func ComputeNewDifficultyTargetHash(targetNumRelays, newRelaysEma uint64) []byte { +func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newRelaysEma uint64) []byte { // The target number of relays we want the network to mine is greater than // the actual on-chain relays, so we don't need to scale to anything above // the default. @@ -129,7 +129,7 @@ func ComputeNewDifficultyTargetHash(targetNumRelays, newRelaysEma uint64) []byte ratio := float64(targetNumRelays) / float64(newRelaysEma) // Compute the new target hash by scaling the default target hash based on the ratio - newTargetHash := scaleDifficultyTargetHash(prooftypes.DefaultRelayDifficultyTargetHash, ratio) + newTargetHash := scaleDifficultyTargetHash(prevTargetHash, ratio) return newTargetHash } diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go index 6842b0f1c..e42784282 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go @@ -9,6 +9,7 @@ import ( testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" + prooftypes "github.com/pokt-network/poktroll/x/proof/types" "github.com/pokt-network/poktroll/x/tokenomics/keeper" tokenomicskeeper "github.com/pokt-network/poktroll/x/tokenomics/keeper" "github.com/pokt-network/poktroll/x/tokenomics/types" @@ -215,7 +216,7 @@ func TestComputeNewDifficultyHash(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - result := keeper.ComputeNewDifficultyTargetHash(tt.numRelaysTarget, tt.relaysEma) + result := keeper.ComputeNewDifficultyTargetHash(prooftypes.DefaultRelayDifficultyTargetHash, tt.numRelaysTarget, tt.relaysEma) require.Lessf(t, bytes.Compare(result, tt.expectedDifficultyHash), 1, "expected difficulty.TargetHash (%x) to be less than or equal to expectedRelayMiningDifficulty.TargetHash (%x)", From 440ecc667dc6419f64a3b6ea35f8bd111e265b11 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Fri, 19 Jul 2024 14:21:16 +0200 Subject: [PATCH 07/29] chore: review fee Co-authored-by: Daniel Olshansky --- pkg/crypto/protocol/difficulty.go | 3 ++- x/proof/types/params.go | 2 +- x/tokenomics/keeper/update_relay_mining_difficulty.go | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index f0259da94..05e519ad1 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -7,7 +7,7 @@ import ( var ( // Difficulty1HashBz is the chosen "highest" (easiest) target hash, which - // corresponds to the lowest possible difficulty. It effectively calibrates + // corresponds to the lowest possible difficulty. It effectively normalizes // the difficulty number (which is returned by GetDifficultyFromHash) by defining // the hash which corresponds to difficulty 1. // - https://bitcoin.stackexchange.com/questions/107976/bitcoin-difficulty-why-leading-0s @@ -17,6 +17,7 @@ var ( // GetDifficultyFromHash returns the "difficulty" of the given hash, with respect // to the "highest" (easiest) target hash, Difficulty1Hash. +// The resultant value is not used for any business logic but is simplify there to have a human-readable version of the hash. func GetDifficultyFromHash(hashBz [RelayHasherSize]byte) int64 { difficulty1HashInt := new(big.Int).SetBytes(Difficulty1HashBz) hashInt := new(big.Int).SetBytes(hashBz[:]) diff --git a/x/proof/types/params.go b/x/proof/types/params.go index acc9c9b83..22c0c4cfa 100644 --- a/x/proof/types/params.go +++ b/x/proof/types/params.go @@ -18,7 +18,7 @@ var ( KeyMinRelayDifficultyBits = []byte("MinRelayDifficultyBits") ParamRelayDifficultyTargetHash = "relay_difficulty_target_hash" - DefaultRelayDifficultyTargetHashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + DefaultRelayDifficultyTargetHashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" // all relays are payable DefaultRelayDifficultyTargetHash, _ = hex.DecodeString(DefaultRelayDifficultyTargetHashHex) // TODO_MAINNET(#142, #401): Determine the default value. KeyProofRequestProbability = []byte("ProofRequestProbability") ParamProofRequestProbability = "proof_request_probability" diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index 7d45fc512..c97021cb3 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -134,7 +134,7 @@ func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newR return newTargetHash } -// scaleDifficultyTargetHash scales the default target hash based on the given ratio +// scaleDifficultyTargetHash scales the target hash based on the given ratio func scaleDifficultyTargetHash(targetHash []byte, ratio float64) []byte { // Convert targetHash to a big.Float to miminize precision loss. targetInt := new(big.Int).SetBytes(targetHash) From ee59e0d21e36baf821b4944c46ee4802b5444cd4 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Fri, 19 Jul 2024 14:41:48 +0200 Subject: [PATCH 08/29] refactor: use big.Floats on-chain & cleanup --- x/tokenomics/keeper/scale_difficulty_test.go | 3 +- .../keeper/update_relay_mining_difficulty.go | 89 +++++++++---------- .../update_relay_mining_difficulty_test.go | 70 --------------- 3 files changed, 45 insertions(+), 117 deletions(-) diff --git a/x/tokenomics/keeper/scale_difficulty_test.go b/x/tokenomics/keeper/scale_difficulty_test.go index 898e33d35..00f3ee164 100644 --- a/x/tokenomics/keeper/scale_difficulty_test.go +++ b/x/tokenomics/keeper/scale_difficulty_test.go @@ -3,6 +3,7 @@ package keeper import ( "bytes" "encoding/hex" + "math/big" "testing" "github.com/stretchr/testify/assert" @@ -89,7 +90,7 @@ func TestScaleDifficultyTargetHash(t *testing.T) { expectedBytes, expectedErr := hex.DecodeString(test.expectedHashHex) require.NoError(t, expectedErr) - scaledHash := scaleDifficultyTargetHash(targetHashBz, test.ratio) + scaledHash := scaleDifficultyTargetHash(targetHashBz, new(big.Float).SetFloat64(test.ratio)) assert.Equal(t, len(scaledHash), len(targetHashBz)) require.Equalf(t, 0, bytes.Compare(scaledHash, expectedBytes), "expected hash %x, got %x", expectedBytes, scaledHash) }) diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index c97021cb3..7c136c491 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -14,22 +14,24 @@ import ( "github.com/pokt-network/poktroll/x/tokenomics/types" ) -const ( - // Exponential moving average (ema) smoothing factor, commonly known as alpha. - // Usually, alpha = 2 / (N+1), where N is the number of periods. - // Large alpha -> more weight on recent data; less smoothing and fast response. - // Small alpha -> more weight on past data; more smoothing and slow response. - emaSmoothingFactor = float64(0.1) - - // The target number of relays we want the network to mine for a specific - // service across all applications & suppliers per session. - // This number determines the total number of leafs to be created across in - // the off-chain SMTs, across all suppliers, for each service. - // It indirectly drives the off-chain resource requirements of the network - // in additional to playing a critical role in Relay Mining. - // TODO_BLOCKER(@Olshansk, #542): Make this a governance parameter. - TargetNumRelays = uint64(10e4) -) +// TargetNumRelays is the target number of relays we want the network to mine for +// a specific service across all applications & suppliers per session. +// This number determines the total number of leafs to be created across in +// the off-chain SMTs, across all suppliers, for each service. +// It indirectly drives the off-chain resource requirements of the network +// in additional to playing a critical role in Relay Mining. +// TODO_BLOCKER(@Olshansk, #542): Make this a governance parameter. +const TargetNumRelays = uint64(10e4) + +// Exponential moving average (ema) smoothing factor, commonly known as alpha. +// Usually, alpha = 2 / (N+1), where N is the number of periods. +// Large alpha -> more weight on recent data; less smoothing and fast response. +// Small alpha -> more weight on past data; more smoothing and slow response. +// +// TODO_MAINNET: Use a language agnostic float implementation or arithmetic library +// to ensure deterministic results across different language implementations of the +// protocol. +var emaSmoothingFactor = new(big.Float).SetFloat64(0.1) // UpdateRelayMiningDifficulty updates the on-chain relay mining difficulty // based on the amount of on-chain relays for each service, given a map of serviceId->numRelays. @@ -126,7 +128,13 @@ func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newR } // Calculate the proportion of target relays to the new EMA - ratio := float64(targetNumRelays) / float64(newRelaysEma) + // TODO_MAINNET: Use a language agnostic float implementation or arithmetic library + // to ensure deterministic results across different language implementations of the + // protocol. + ratio := new(big.Float).Quo( + new(big.Float).SetUint64(targetNumRelays), + new(big.Float).SetUint64(newRelaysEma), + ) // Compute the new target hash by scaling the default target hash based on the ratio newTargetHash := scaleDifficultyTargetHash(prevTargetHash, ratio) @@ -135,13 +143,17 @@ func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newR } // scaleDifficultyTargetHash scales the target hash based on the given ratio -func scaleDifficultyTargetHash(targetHash []byte, ratio float64) []byte { +// +// TODO_MAINNET: Use a language agnostic float implementation or arithmetic library +// to ensure deterministic results across different language implementations of the +// protocol. +func scaleDifficultyTargetHash(targetHash []byte, ratio *big.Float) []byte { // Convert targetHash to a big.Float to miminize precision loss. targetInt := new(big.Int).SetBytes(targetHash) targetFloat := new(big.Float).SetInt(targetInt) // Scale the target by multiplying it by the ratio. - scaledTargetFloat := new(big.Float).Mul(targetFloat, big.NewFloat(ratio)) + scaledTargetFloat := new(big.Float).Mul(targetFloat, ratio) // NB: Some precision is lost when converting back to an integer. scaledTargetInt, _ := scaledTargetFloat.Int(nil) scaledTargetHash := scaledTargetInt.Bytes() @@ -164,31 +176,16 @@ func scaleDifficultyTargetHash(targetHash []byte, ratio float64) []byte { // computeEma computes the EMA at time t, given the EMA at time t-1, the raw // data revealed at time t, and the smoothing factor α. // Src: https://en.wikipedia.org/wiki/Exponential_smoothing -func computeEma(alpha float64, prevEma, currValue uint64) uint64 { - return uint64(alpha*float64(currValue) + (1-alpha)*float64(prevEma)) -} - -// LeadingZeroBitsToTargetDifficultyHash generates a slice of bytes with the specified number of leading zero bits -// NB: Exported for testing purposes only. -func LeadingZeroBitsToTargetDifficultyHash(numLeadingZeroBits int, numBytes int) []byte { - targetDifficultyHah := make([]byte, numBytes) - - // Set everything to 1s initially - for i := range targetDifficultyHah { - targetDifficultyHah[i] = 0xff - } - - // Set full zero bytes - fullZeroBytes := numLeadingZeroBits / 8 - for i := 0; i < fullZeroBytes; i++ { - targetDifficultyHah[i] = 0 - } - - // Set remaining bits in the next byte - remainingZeroBits := numLeadingZeroBits % 8 - if remainingZeroBits > 0 { - targetDifficultyHah[fullZeroBytes] = byte(0xff >> remainingZeroBits) - } - - return targetDifficultyHah +// +// TODO_MAINNET: Use a language agnostic float implementation or arithmetic library +// to ensure deterministic results across different language implementations of the +// protocol. +func computeEma(alpha *big.Float, prevEma, currValue uint64) uint64 { + oneMinusAlpha := new(big.Float).Sub(new(big.Float).SetInt64(1), alpha) + prevEmaFloat := new(big.Float).SetUint64(prevEma) + + weightedCurrentContribution := new(big.Float).Mul(alpha, new(big.Float).SetUint64(currValue)) + weightedPreviousContribution := new(big.Float).Mul(oneMinusAlpha, prevEmaFloat) + newEma, _ := new(big.Float).Add(weightedCurrentContribution, weightedPreviousContribution).Uint64() + return newEma } diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go index e42784282..d752ae5c8 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go @@ -226,76 +226,6 @@ func TestComputeNewDifficultyHash(t *testing.T) { } } -func TestLeadingZeroBitsToTargetDifficultyHash(t *testing.T) { - tests := []struct { - desc string - numLeadingZeroBits int - numBytes int - expectedDifficultyHash []byte - }{ - { - desc: "0 leading 0 bits in 1 byte", - numLeadingZeroBits: 0, - numBytes: 1, - expectedDifficultyHash: []byte{0b11111111}, - }, - { - desc: "full zero bytes (16 leading 0 bits in 32 bytes)", - numLeadingZeroBits: 16, - numBytes: 32, - expectedDifficultyHash: append( - []byte{0b00000000, 0b00000000}, - makeBytesFullOfOnes(30)..., - ), - }, - { - desc: "partial byte (20 leading 0 bits in 32 bytes)", - numLeadingZeroBits: 20, - numBytes: 32, - expectedDifficultyHash: append( - []byte{0b00000000, 0b00000000, 0b00001111}, - makeBytesFullOfOnes(29)..., - ), - }, - { - desc: "another partial byte (10 leading 0 bits in 32 bytes)", - numLeadingZeroBits: 10, - numBytes: 32, - expectedDifficultyHash: append( - []byte{0b00000000, 0b00111111}, - makeBytesFullOfOnes(30)..., - ), - }, - { - desc: "edge case 1 bit (1 leading 0 bits in 32 bytes)", - numLeadingZeroBits: 1, - numBytes: 32, - expectedDifficultyHash: append( - []byte{0b01111111}, - makeBytesFullOfOnes(31)..., - ), - }, - { - desc: "exact byte boundary (24 leading 0 bits in 32 bytes)", - numLeadingZeroBits: 24, - numBytes: 32, - expectedDifficultyHash: append( - []byte{0b00000000, 0b00000000, 0b00000000}, - makeBytesFullOfOnes(29)..., - ), - }, - } - - for _, tt := range tests { - t.Run(tt.desc, func(t *testing.T) { - result := keeper.LeadingZeroBitsToTargetDifficultyHash(tt.numLeadingZeroBits, tt.numBytes) - if !bytes.Equal(result, tt.expectedDifficultyHash) { - t.Errorf("got %x, expected %x", result, tt.expectedDifficultyHash) - } - }) - } -} - func makeBytesFullOfOnes(length int) []byte { result := make([]byte, length) for i := range result { From 69d97d34690db277496f00fc712d06f934c9b292 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Fri, 19 Jul 2024 14:43:15 +0200 Subject: [PATCH 09/29] chore: review improvements --- x/tokenomics/keeper/update_relay_mining_difficulty.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index 7c136c491..deb6e20f9 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -136,7 +136,7 @@ func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newR new(big.Float).SetUint64(newRelaysEma), ) - // Compute the new target hash by scaling the default target hash based on the ratio + // Compute the new target hash by scaling the previous target hash based on the ratio newTargetHash := scaleDifficultyTargetHash(prevTargetHash, ratio) return newTargetHash From 5a3cfd8ac0eb8e830e20bcf649c3ab38b2706e66 Mon Sep 17 00:00:00 2001 From: Bryan White Date: Fri, 19 Jul 2024 14:48:08 +0200 Subject: [PATCH 10/29] [TODO] chore: update `smt.MerkleRoot#Sum()` error handling (#672) Co-authored-by: Redouane Lakrache --- go.mod | 2 +- go.sum | 4 +- pkg/client/supplier/client_test.go | 4 +- pkg/crypto/protocol/hasher.go | 5 ++ pkg/relayer/session/sessiontree.go | 3 +- .../relay_mining_difficulty_test.go | 3 +- testutil/proof/fixture_generators.go | 39 +++++++------ x/application/keeper/auto_undelegate.go | 1 + .../keeper/msg_server_create_claim_test.go | 5 +- x/proof/types/claim.go | 29 +--------- .../keeper/settle_session_accounting.go | 55 ++++++++++++++----- .../keeper/settle_session_accounting_test.go | 37 ++++--------- 12 files changed, 95 insertions(+), 92 deletions(-) diff --git a/go.mod b/go.mod index 3fbc306fb..33c469839 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,7 @@ require ( // repo is the first obvious idea, but has to be carefully considered, automated, and is not // a hard blocker. github.com/pokt-network/shannon-sdk v0.0.0-20240628223057-7d2928722749 - github.com/pokt-network/smt v0.11.1 + github.com/pokt-network/smt v0.12.0 github.com/pokt-network/smt/kvstore/badger v0.0.0-20240109205447-868237978c0b github.com/prometheus/client_golang v1.19.0 github.com/regen-network/gocuke v1.1.0 diff --git a/go.sum b/go.sum index fcb07cd82..5c9f760df 100644 --- a/go.sum +++ b/go.sum @@ -996,8 +996,8 @@ github.com/pokt-network/ring-go v0.1.0 h1:hF7mDR4VVCIqqDAsrloP8azM9y1mprc99YgnTj github.com/pokt-network/ring-go v0.1.0/go.mod h1:8NHPH7H3EwrPX3XHfpyRI6bz4gApkE3+fd0XZRbMWP0= github.com/pokt-network/shannon-sdk v0.0.0-20240628223057-7d2928722749 h1:V/3xzmykSABhAxRZLawWUoIPVlnp7EGCnCxFpLXD7R0= github.com/pokt-network/shannon-sdk v0.0.0-20240628223057-7d2928722749/go.mod h1:MfoRhzPRlxiaY3xQyZo28B7ibDuhricA//TGGy48TwM= -github.com/pokt-network/smt v0.11.1 h1:ySN8PjrPDKyvzLcX0qTHR2s5ReaZnjq25z0B7p6AWl0= -github.com/pokt-network/smt v0.11.1/go.mod h1:S4Ho4OPkK2v2vUCHNtA49XDjqUC/OFYpBbynRVYmxvA= +github.com/pokt-network/smt v0.12.0 h1:uqru/0ykC4LnBoMacakobNOd1iRK69PlohqjMtLmYNA= +github.com/pokt-network/smt v0.12.0/go.mod h1:S4Ho4OPkK2v2vUCHNtA49XDjqUC/OFYpBbynRVYmxvA= github.com/pokt-network/smt/kvstore/badger v0.0.0-20240109205447-868237978c0b h1:TjfgV3vgW0zW47Br/OgUXD4M8iyR74EYanbFfN4ed8o= github.com/pokt-network/smt/kvstore/badger v0.0.0-20240109205447-868237978c0b/go.mod h1:GbzcG5ebj8twKmBL1VzdPM4NS44okwYXBfQaVXT+6yU= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= diff --git a/pkg/client/supplier/client_test.go b/pkg/client/supplier/client_test.go index 122318bb4..09aa0dffb 100644 --- a/pkg/client/supplier/client_test.go +++ b/pkg/client/supplier/client_test.go @@ -2,7 +2,6 @@ package supplier_test import ( "context" - "crypto/sha256" "testing" "time" @@ -14,6 +13,7 @@ import ( "github.com/pokt-network/poktroll/pkg/client/keyring" "github.com/pokt-network/poktroll/pkg/client/supplier" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/testutil/mockclient" "github.com/pokt-network/poktroll/testutil/testclient/testkeyring" "github.com/pokt-network/poktroll/testutil/testclient/testtx" @@ -181,7 +181,7 @@ func TestSupplierClient_SubmitProof(t *testing.T) { // Generating an ephemeral tree & spec just so we can submit // a proof of the right size. // TODO_TECHDEBT(#446): Centralize the configuration for the SMT spec. - tree := smt.NewSparseMerkleSumTrie(kvStore, sha256.New()) + tree := smt.NewSparseMerkleSumTrie(kvStore, protocol.NewTrieHasher()) emptyPath := make([]byte, tree.PathHasherSize()) proof, err := tree.ProveClosest(emptyPath) require.NoError(t, err) diff --git a/pkg/crypto/protocol/hasher.go b/pkg/crypto/protocol/hasher.go index 5b330344b..ce9c175f9 100644 --- a/pkg/crypto/protocol/hasher.go +++ b/pkg/crypto/protocol/hasher.go @@ -4,8 +4,13 @@ import "crypto/sha256" const ( RelayHasherSize = sha256.Size + TrieHasherSize = sha256.Size + TrieRootSize = TrieHasherSize + trieRootMetadataSize + // TODO_CONSIDERATION: Export this from the SMT package. + trieRootMetadataSize = 16 ) var ( NewRelayHasher = sha256.New + NewTrieHasher = sha256.New ) diff --git a/pkg/relayer/session/sessiontree.go b/pkg/relayer/session/sessiontree.go index 5236225cd..b9ec7151e 100644 --- a/pkg/relayer/session/sessiontree.go +++ b/pkg/relayer/session/sessiontree.go @@ -11,6 +11,7 @@ import ( "github.com/pokt-network/smt" "github.com/pokt-network/smt/kvstore/badger" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/relayer" sessiontypes "github.com/pokt-network/poktroll/x/session/types" ) @@ -85,7 +86,7 @@ func NewSessionTree( // Create the SMST from the KVStore and a nil value hasher so the proof would // contain a non-hashed Relay that could be used to validate the proof on-chain. - trie := smt.NewSparseMerkleSumTrie(treeStore, sha256.New(), smt.WithValueHasher(nil)) + trie := smt.NewSparseMerkleSumTrie(treeStore, protocol.NewTrieHasher(), smt.WithValueHasher(nil)) sessionTree := &sessionTree{ sessionHeader: sessionHeader, diff --git a/tests/integration/tokenomics/relay_mining_difficulty_test.go b/tests/integration/tokenomics/relay_mining_difficulty_test.go index bf28e6c86..d97f38949 100644 --- a/tests/integration/tokenomics/relay_mining_difficulty_test.go +++ b/tests/integration/tokenomics/relay_mining_difficulty_test.go @@ -2,7 +2,6 @@ package integration_test import ( "context" - "crypto/sha256" "testing" "github.com/pokt-network/smt" @@ -193,7 +192,7 @@ func prepareSMST( integrationApp.GetRingClient(), ) - trie := smt.NewSparseMerkleSumTrie(kvStore, sha256.New(), smt.WithValueHasher(nil)) + trie := smt.NewSparseMerkleSumTrie(kvStore, protocol.NewTrieHasher(), smt.WithValueHasher(nil)) err = trie.Update(minedRelay.Hash, minedRelay.Bytes, 1) require.NoError(t, err) diff --git a/testutil/proof/fixture_generators.go b/testutil/proof/fixture_generators.go index 96d263ce0..19855b8af 100644 --- a/testutil/proof/fixture_generators.go +++ b/testutil/proof/fixture_generators.go @@ -5,9 +5,11 @@ import ( "math/rand" "testing" - "github.com/pokt-network/smt" "github.com/stretchr/testify/require" + "github.com/pokt-network/smt" + + "github.com/pokt-network/poktroll/pkg/crypto/protocol" testsession "github.com/pokt-network/poktroll/testutil/session" prooftypes "github.com/pokt-network/poktroll/x/proof/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" @@ -51,32 +53,37 @@ func ClaimWithRandomHash(t *testing.T, appAddr, supplierAddr string, sum uint64) // TODO_MAINNET: Revisit if the SMT should be big or little Endian. Refs: // https://github.com/pokt-network/smt/pull/46#discussion_r1636975124 // https://github.com/pokt-network/smt/blob/ea585c6c3bc31c804b6bafa83e985e473b275580/smst.go#L23C10-L23C76 -func SmstRootWithSum(sum uint64) smt.MerkleRoot { - root := [smt.SmstRootSizeBytes]byte{} - // Insert the sum into the root hash - binary.BigEndian.PutUint64(root[smt.SmtRootSizeBytes:], sum) - // Insert the count into the root hash - // TODO_TECHDEBT: This is a hard-coded count of 1, but could be a parameter. - // TODO_TECHDEBT: We are assuming the sum takes up 8 bytes. - binary.BigEndian.PutUint64(root[smt.SmtRootSizeBytes+8:], 1) - return smt.MerkleRoot(root[:]) +func SmstRootWithSum(sum uint64) smt.MerkleSumRoot { + root := [protocol.TrieRootSize]byte{} + return encodeSum(root, sum) } // RandSmstRootWithSum returns a randomized SMST root with the given sum that // can be used for testing. Randomizing the root is a simple way to randomize // test claim hashes for testing proof requirement cases. -func RandSmstRootWithSum(t *testing.T, sum uint64) smt.MerkleRoot { +func RandSmstRootWithSum(t *testing.T, sum uint64) smt.MerkleSumRoot { t.Helper() - root := [smt.SmstRootSizeBytes]byte{} + root := [protocol.TrieRootSize]byte{} // Only populate the first 32 bytes with random data, leave the last 8 bytes for the sum. - _, err := rand.Read(root[:smt.SmtRootSizeBytes]) //nolint:staticcheck // We need a deterministic pseudo-random source. + _, err := rand.Read(root[:protocol.TrieHasherSize]) //nolint:staticcheck // We need a deterministic pseudo-random source. require.NoError(t, err) - binary.BigEndian.PutUint64(root[smt.SmtRootSizeBytes:], sum) + return encodeSum(root, sum) +} + +// encodeSum returns a copy of the given root, binary encodes the given sum, +// and stores the encoded sum in the root copy. +func encodeSum(r [protocol.TrieRootSize]byte, sum uint64) smt.MerkleSumRoot { + root := make([]byte, protocol.TrieRootSize) + copy(root, r[:]) + + // Insert the sum into the root hash + binary.BigEndian.PutUint64(root[protocol.TrieHasherSize:], sum) // Insert the count into the root hash // TODO_TECHDEBT: This is a hard-coded count of 1, but could be a parameter. // TODO_TECHDEBT: We are assuming the sum takes up 8 bytes. - binary.BigEndian.PutUint64(root[smt.SmtRootSizeBytes+8:], 1) - return smt.MerkleRoot(root[:]) + binary.BigEndian.PutUint64(root[protocol.TrieHasherSize+8:], 1) + + return root } diff --git a/x/application/keeper/auto_undelegate.go b/x/application/keeper/auto_undelegate.go index dfe2fb67c..9e88cbb29 100644 --- a/x/application/keeper/auto_undelegate.go +++ b/x/application/keeper/auto_undelegate.go @@ -7,6 +7,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" proto "github.com/cosmos/gogoproto/proto" + gatewaytypes "github.com/pokt-network/poktroll/x/gateway/types" ) diff --git a/x/proof/keeper/msg_server_create_claim_test.go b/x/proof/keeper/msg_server_create_claim_test.go index 763d5ff6d..4ac0c4c72 100644 --- a/x/proof/keeper/msg_server_create_claim_test.go +++ b/x/proof/keeper/msg_server_create_claim_test.go @@ -5,11 +5,12 @@ import ( abci "github.com/cometbft/cometbft/abci/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" - "github.com/pokt-network/smt" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/pokt-network/smt" + keepertest "github.com/pokt-network/poktroll/testutil/keeper" testproof "github.com/pokt-network/poktroll/testutil/proof" "github.com/pokt-network/poktroll/testutil/sample" @@ -490,7 +491,7 @@ func newTestClaimMsg( supplierAddr string, appAddr string, service *sharedtypes.Service, - merkleRoot smt.MerkleRoot, + merkleRoot smt.MerkleSumRoot, ) *types.MsgCreateClaim { t.Helper() diff --git a/x/proof/types/claim.go b/x/proof/types/claim.go index 724dbc2d3..a9c9bad3a 100644 --- a/x/proof/types/claim.go +++ b/x/proof/types/claim.go @@ -1,8 +1,6 @@ package types import ( - "fmt" - "github.com/cometbft/cometbft/crypto" "github.com/pokt-network/smt" @@ -11,36 +9,13 @@ import ( // GetNumComputeUnits returns the number of compute units for a given claim // as determined by the sum of the root hash. func (claim *Claim) GetNumComputeUnits() (numComputeUnits uint64, err error) { - // NB: smt.MerkleRoot#Sum() will panic if the root hash is not valid. - // Convert this panic into an error return. - defer func() { - if r := recover(); r != nil { - numComputeUnits = 0 - err = fmt.Errorf( - "unable to get sum of invalid merkle root: %x; error: %v", - claim.GetRootHash(), r, - ) - } - }() - - return smt.MerkleRoot(claim.GetRootHash()).Sum(), nil + return smt.MerkleSumRoot(claim.GetRootHash()).Sum() } // GetNumRelays returns the number of relays for a given claim // as determined by the count of the root hash. func (claim *Claim) GetNumRelays() (numRelays uint64, err error) { - // Convert this panic into an error return. - defer func() { - if r := recover(); r != nil { - numRelays = 0 - err = fmt.Errorf( - "unable to get count of invalid merkle root: %x; error: %v", - claim.GetRootHash(), r, - ) - } - }() - - return smt.MerkleRoot(claim.GetRootHash()).Count(), nil + return smt.MerkleSumRoot(claim.GetRootHash()).Count() } // GetHash returns the SHA-256 hash of the serialized claim. diff --git a/x/tokenomics/keeper/settle_session_accounting.go b/x/tokenomics/keeper/settle_session_accounting.go index abefd8eed..61dfb6aa6 100644 --- a/x/tokenomics/keeper/settle_session_accounting.go +++ b/x/tokenomics/keeper/settle_session_accounting.go @@ -6,6 +6,10 @@ import ( "cosmossdk.io/math" cosmostypes "github.com/cosmos/cosmos-sdk/types" + + "github.com/pokt-network/poktroll/app/volatile" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/smt" "github.com/pokt-network/poktroll/telemetry" @@ -26,7 +30,7 @@ import ( func (k Keeper) SettleSessionAccounting( ctx context.Context, claim *prooftypes.Claim, -) error { +) (err error) { logger := k.Logger().With("method", "SettleSessionAccounting") settlementCoin := cosmostypes.NewCoin("upokt", math.NewInt(0)) @@ -34,7 +38,12 @@ func (k Keeper) SettleSessionAccounting( // This is emitted only when the function returns. defer telemetry.EventSuccessCounter( "settle_session_accounting", - func() float32 { return float32(settlementCoin.Amount.Int64()) }, + func() float32 { + if settlementCoin.Amount.BigInt() == nil { + return 0 + } + return float32(settlementCoin.Amount.Int64()) + }, func() bool { return isSuccessful }, ) @@ -50,7 +59,7 @@ func (k Keeper) SettleSessionAccounting( logger.Error("received a nil session header") return tokenomicstypes.ErrTokenomicsSessionHeaderNil } - if err := sessionHeader.ValidateBasic(); err != nil { + if err = sessionHeader.ValidateBasic(); err != nil { logger.Error("received an invalid session header", "error", err) return tokenomicstypes.ErrTokenomicsSessionHeaderInvalid } @@ -66,15 +75,19 @@ func (k Keeper) SettleSessionAccounting( } // Retrieve the sum of the root as a proxy into the amount of work done - root := (smt.MerkleRoot)(claim.GetRootHash()) + root := (smt.MerkleSumRoot)(claim.GetRootHash()) + + if !root.HasDigestSize(protocol.TrieHasherSize) { + return tokenomicstypes.ErrTokenomicsRootHashInvalid.Wrapf( + "root hash has invalid digest size (%d), expected (%d)", + root.DigestSize(), protocol.TrieHasherSize, + ) + } - // TODO_BLOCKER(@Olshansk): This check should be the responsibility of the SMST package - // since it's used to get compute units from the root hash. - if root == nil || len(root) != smt.SmstRootSizeBytes { - logger.Error(fmt.Sprintf("received an invalid root hash of size: %d", len(root))) - return tokenomicstypes.ErrTokenomicsRootHashInvalid + claimComputeUnits, err := root.Sum() + if err != nil { + return tokenomicstypes.ErrTokenomicsRootHashInvalid.Wrapf("%v", err) } - claimComputeUnits := root.Sum() // Helpers for logging the same metadata throughout this function calls logger = logger.With( @@ -96,7 +109,11 @@ func (k Keeper) SettleSessionAccounting( logger.Info(fmt.Sprintf("About to start settling claim for %d compute units", claimComputeUnits)) // Calculate the amount of tokens to mint & burn - settlementCoin = k.getCoinFromComputeUnits(ctx, root) + settlementCoin, err = k.getCoinFromComputeUnits(ctx, root) + if err != nil { + return err + } + settlementCoins := cosmostypes.NewCoins(settlementCoin) logger.Info(fmt.Sprintf( @@ -194,10 +211,20 @@ func (k Keeper) SettleSessionAccounting( return nil } -func (k Keeper) getCoinFromComputeUnits(ctx context.Context, root smt.MerkleRoot) cosmostypes.Coin { +func (k Keeper) getCoinFromComputeUnits(ctx context.Context, root smt.MerkleSumRoot) (cosmostypes.Coin, error) { // Retrieve the existing tokenomics params params := k.GetParams(ctx) - upokt := math.NewInt(int64(root.Sum() * params.ComputeUnitsToTokensMultiplier)) - return cosmostypes.NewCoin("upokt", upokt) + sum, err := root.Sum() + if err != nil { + return cosmostypes.Coin{}, err + } + + upokt := math.NewInt(int64(sum * params.ComputeUnitsToTokensMultiplier)) + + if upokt.IsNegative() { + return cosmostypes.Coin{}, tokenomicstypes.ErrTokenomicsRootHashInvalid.Wrap("sum * compute_units_to_tokens_multiplier is negative") + } + + return cosmostypes.NewCoin(volatile.DenomuPOKT, upokt), nil } diff --git a/x/tokenomics/keeper/settle_session_accounting_test.go b/x/tokenomics/keeper/settle_session_accounting_test.go index 480761b66..86a74b04f 100644 --- a/x/tokenomics/keeper/settle_session_accounting_test.go +++ b/x/tokenomics/keeper/settle_session_accounting_test.go @@ -11,9 +11,11 @@ import ( cosmostypes "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - "github.com/pokt-network/smt" "github.com/stretchr/testify/require" + "github.com/pokt-network/smt" + + "github.com/pokt-network/poktroll/pkg/crypto/protocol" testkeeper "github.com/pokt-network/poktroll/testutil/keeper" testproof "github.com/pokt-network/poktroll/testutil/proof" "github.com/pokt-network/poktroll/testutil/sample" @@ -300,11 +302,10 @@ func TestSettleSessionAccounting_AppNotFound(t *testing.T) { func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { keeper, ctx, appAddr, supplierAddr := testkeeper.TokenomicsKeeperWithActorAddrs(t) - rootHashSizeBytes := smt.SmstRootSizeBytes // Define test cases tests := []struct { desc string - root []byte // smst.MerkleRoot + root []byte // smst.MerkleSumRoot errExpected bool }{ { @@ -313,19 +314,19 @@ func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { errExpected: true, }, { - desc: fmt.Sprintf("Less than %d bytes", rootHashSizeBytes), - root: make([]byte, rootHashSizeBytes-1), // Less than expected number of bytes + desc: fmt.Sprintf("Less than %d bytes", protocol.TrieRootSize), + root: make([]byte, protocol.TrieRootSize-1), // Less than expected number of bytes errExpected: true, }, { - desc: fmt.Sprintf("More than %d bytes", rootHashSizeBytes), - root: make([]byte, rootHashSizeBytes+1), // More than expected number of bytes + desc: fmt.Sprintf("More than %d bytes", protocol.TrieRootSize), + root: make([]byte, protocol.TrieRootSize+1), // More than expected number of bytes errExpected: true, }, { desc: "correct size but empty", root: func() []byte { - root := make([]byte, rootHashSizeBytes) // All 0s + root := make([]byte, protocol.TrieRootSize) // All 0s return root[:] }(), errExpected: false, @@ -333,7 +334,7 @@ func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { { desc: "correct size but invalid value", root: func() []byte { - return bytes.Repeat([]byte("a"), rootHashSizeBytes) + return bytes.Repeat([]byte("a"), protocol.TrieRootSize) }(), errExpected: true, }, @@ -350,26 +351,12 @@ func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { // Iterate over each test case for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - // Use defer-recover to catch any panic - defer func() { - if r := recover(); r != nil { - t.Errorf("Test panicked: %s", r) - } - }() - // Setup claim by copying the testproof.BaseClaim and updating the root claim := testproof.BaseClaim(appAddr, supplierAddr, 0) - claim.RootHash = smt.MerkleRoot(test.root[:]) + claim.RootHash = smt.MerkleSumRoot(test.root[:]) // Execute test function - err := func() (err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("panic occurred: %v", r) - } - }() - return keeper.SettleSessionAccounting(ctx, &claim) - }() + err := keeper.SettleSessionAccounting(ctx, &claim) // Assert the error if test.errExpected { From a5f9b616586da9aa435cd4ef8d974a387a4f9781 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Fri, 19 Jul 2024 16:58:01 -0700 Subject: [PATCH 11/29] Last review of #690 --- api/poktroll/proof/params.pulsar.go | 1 + pkg/crypto/protocol/difficulty.go | 33 +++++++--- pkg/crypto/protocol/difficulty_test.go | 54 ++++++++++++++++- pkg/crypto/protocol/hash.go | 5 +- pkg/relayer/miner/gen/gen_fixtures.go | 1 + pkg/relayer/miner/miner.go | 11 ++-- pkg/relayer/miner/miner_test.go | 7 +-- proto/poktroll/proof/params.proto | 1 + testutil/testrelayer/relays.go | 4 +- x/proof/keeper/msg_server_submit_proof.go | 18 +++--- .../keeper/msg_server_submit_proof_test.go | 2 +- x/proof/types/params.go | 31 +++++----- x/proof/types/params.pb.go | 1 + x/service/types/relay.go | 18 +++--- .../keeper/msg_server_update_param_test.go | 1 + x/tokenomics/keeper/scale_difficulty_test.go | 14 +++-- x/tokenomics/keeper/settle_pending_claims.go | 1 + .../keeper/update_relay_mining_difficulty.go | 6 +- .../update_relay_mining_difficulty_test.go | 60 ++++++++++--------- x/tokenomics/module/abci.go | 29 +++++---- 20 files changed, 193 insertions(+), 105 deletions(-) diff --git a/api/poktroll/proof/params.pulsar.go b/api/poktroll/proof/params.pulsar.go index e75f23eac..b8162ea2b 100644 --- a/api/poktroll/proof/params.pulsar.go +++ b/api/poktroll/proof/params.pulsar.go @@ -626,6 +626,7 @@ type Params struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // TODO_FOLLOWUP(@olshansk, #690): Either delete this or change it to be named "minimum" // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash,omitempty"` // proof_request_probability is the probability of a session requiring a proof diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index 05e519ad1..90c3ec1c5 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -1,27 +1,44 @@ package protocol import ( + "bytes" "encoding/hex" "math/big" ) var ( - // Difficulty1HashBz is the chosen "highest" (easiest) target hash, which - // corresponds to the lowest possible difficulty. It effectively normalizes - // the difficulty number (which is returned by GetDifficultyFromHash) by defining - // the hash which corresponds to difficulty 1. + // BaseRelayDifficultyHashBz is the chosen "highest" (easiest) target hash, which + // corresponds to the lowest possible difficulty. + // + // It effectively normalizes the difficulty number (which is returned by GetDifficultyFromHash) + // by defining the hash which corresponds to the base difficulty. + // + // When this is the difficulty of a particular service, all relays are reward / volume applicable. + // + // Bitcoin uses a similar concept, where the target hash is defined as the hash: // - https://bitcoin.stackexchange.com/questions/107976/bitcoin-difficulty-why-leading-0s // - https://bitcoin.stackexchange.com/questions/121920/is-it-always-possible-to-find-a-number-whose-hash-starts-with-a-certain-number-o - Difficulty1HashBz, _ = hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + BaseRelayDifficultyHashBz, _ = hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") ) // GetDifficultyFromHash returns the "difficulty" of the given hash, with respect -// to the "highest" (easiest) target hash, Difficulty1Hash. +// to the "highest" (easiest) target hash, BaseRelayDifficultyHash. // The resultant value is not used for any business logic but is simplify there to have a human-readable version of the hash. func GetDifficultyFromHash(hashBz [RelayHasherSize]byte) int64 { - difficulty1HashInt := new(big.Int).SetBytes(Difficulty1HashBz) + baseRelayDifficultyHashInt := new(big.Int).SetBytes(BaseRelayDifficultyHashBz) hashInt := new(big.Int).SetBytes(hashBz[:]) // difficulty is the ratio of the highest target hash to the given hash. - return new(big.Int).Div(difficulty1HashInt, hashInt).Int64() + return new(big.Int).Div(baseRelayDifficultyHashInt, hashInt).Int64() +} + +// IsRelayVolumeApplicable returns true if the relay IS reward / volume applicable. +// A relay is reward / volume applicable IFF its hash is less than the target hash. +// - relayHash is the hash of the relay to be checked. +// - targetHash is the hash of the relay difficulty target for a particular service. +// +// TODO_MAINNET: Devise a test that tries to attack the network and ensure that +// there is sufficient telemetry. +func IsRelayVolumeApplicable(relayHash, targetHash []byte) bool { + return bytes.Compare(relayHash, targetHash) == -1 // True if relayHash < targetHash } diff --git a/pkg/crypto/protocol/difficulty_test.go b/pkg/crypto/protocol/difficulty_test.go index a9aa89cdd..199862ccf 100644 --- a/pkg/crypto/protocol/difficulty_test.go +++ b/pkg/crypto/protocol/difficulty_test.go @@ -32,7 +32,7 @@ func TestGetDifficultyFromHash(t *testing.T) { { desc: "Highest difficulty", hashHex: "0000000000000000000000000000000000000000000000000000000000000001", - expectedDifficulty: new(big.Int).SetBytes(Difficulty1HashBz).Int64(), + expectedDifficulty: new(big.Int).SetBytes(BaseRelayDifficultyHashBz).Int64(), }, } @@ -52,3 +52,55 @@ func TestGetDifficultyFromHash(t *testing.T) { }) } } + +func TestIsRelayVolumeApplicable(t *testing.T) { + tests := []struct { + desc string + relayHashHex string + targetHashHex string + expectedVolumeApplicable bool + }{ + { + desc: "Applicable: relayHash << targetHash", + relayHashHex: "000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: true, + }, + { + desc: "Applicable: relayHash < targetHash", + relayHashHex: "00efffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: true, + }, + { + desc: "Not Applicable: relayHash = targetHash", + relayHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: false, + }, + { + desc: "Not applicable: relayHash > targetHash", + relayHashHex: "0effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: false, + }, + { + desc: "Not applicable: relayHash >> targetHash", + relayHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: false, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + relayHash, err := hex.DecodeString(test.relayHashHex) + require.NoError(t, err) + + targetHash, err := hex.DecodeString(test.targetHashHex) + require.NoError(t, err) + + require.Equal(t, test.expectedVolumeApplicable, IsRelayVolumeApplicable(relayHash, targetHash)) + }) + } +} diff --git a/pkg/crypto/protocol/hash.go b/pkg/crypto/protocol/hash.go index a88380b77..4ef69514c 100644 --- a/pkg/crypto/protocol/hash.go +++ b/pkg/crypto/protocol/hash.go @@ -1,10 +1,11 @@ package protocol -// GetHashFromBytes returns the hash of the relay (full, request or response) bytes. +// GetRelayHashFromBytes returns the hash of the relay (full, request or response) bytes. // It is used as helper in the case that the relay is already marshaled and // centralizes the hasher used. -func GetHashFromBytes(relayBz []byte) (hash [RelayHasherSize]byte) { +func GetRelayHashFromBytes(relayBz []byte) (hash [RelayHasherSize]byte) { hasher := NewRelayHasher() + // NB: Intentionally ignoring the error, following sha256.Sum256 implementation. _, _ = hasher.Write(relayBz) hashBz := hasher.Sum(nil) diff --git a/pkg/relayer/miner/gen/gen_fixtures.go b/pkg/relayer/miner/gen/gen_fixtures.go index 0531e4266..585639a54 100644 --- a/pkg/relayer/miner/gen/gen_fixtures.go +++ b/pkg/relayer/miner/gen/gen_fixtures.go @@ -32,6 +32,7 @@ const ( defaultOutPath = "relay_fixtures_test.go" ) +// TODO_FOLLOWUP(@olshansk, #690): Do a global anycase grep for "DifficultyBits" and update/remove things appropriately. var ( // flagDifficultyBitsThreshold is the number of leading zero bits that a // randomized, serialized relay must have to be included in the diff --git a/pkg/relayer/miner/miner.go b/pkg/relayer/miner/miner.go index a21aee91e..c5f7a5605 100644 --- a/pkg/relayer/miner/miner.go +++ b/pkg/relayer/miner/miner.go @@ -1,7 +1,6 @@ package miner import ( - "bytes" "context" "cosmossdk.io/depinject" @@ -30,6 +29,7 @@ type miner struct { // // TODO_MAINNET(#543): This is populated by querying the corresponding on-chain parameter during construction. // If this parameter is updated on-chain the relayminer will need to be restarted to query the new value. + // TODO_FOLLOWUP(@olshansk, #690): This needs to be maintained (and updated) on a per service level. relayDifficultyTargetHash []byte } @@ -109,16 +109,15 @@ func (mnr *miner) mapMineRelay( _ context.Context, relay *servicetypes.Relay, ) (_ either.Either[*relayer.MinedRelay], skip bool) { - // TODO_TECHDEBT(@red-0ne, #446): Centralize the configuration for the SMT spec. - // TODO_TECHDEBT(@red-0ne): marshal using canonical codec. relayBz, err := relay.Marshal() if err != nil { return either.Error[*relayer.MinedRelay](err), false } - relayHash := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) + relayHash := relayHashArr[:] // The relay IS NOT volume / reward applicable - if bytes.Compare(relayHash[:], mnr.relayDifficultyTargetHash) == 1 { + if !protocol.IsRelayVolumeApplicable(relayHash, mnr.relayDifficultyTargetHash) { return either.Success[*relayer.MinedRelay](nil), true } @@ -126,6 +125,6 @@ func (mnr *miner) mapMineRelay( return either.Success(&relayer.MinedRelay{ Relay: *relay, Bytes: relayBz, - Hash: relayHash[:], + Hash: relayHash, }), false } diff --git a/pkg/relayer/miner/miner_test.go b/pkg/relayer/miner/miner_test.go index 8515d26c8..3b817a075 100644 --- a/pkg/relayer/miner/miner_test.go +++ b/pkg/relayer/miner/miner_test.go @@ -23,7 +23,7 @@ import ( servicetypes "github.com/pokt-network/poktroll/x/service/types" ) -var testTargetHash, _ = hex.DecodeString("0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") +var testRelayMiningTargetHash, _ = hex.DecodeString("0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") // TestMiner_MinedRelays constructs an observable of mined relays, through which // it pipes pre-mined relay fixtures. It asserts that the observable only emits @@ -43,7 +43,7 @@ func TestMiner_MinedRelays(t *testing.T) { proofQueryClientMock := testqueryclients.NewTestProofQueryClient(t) deps := depinject.Supply(proofQueryClientMock) - mnr, err := miner.NewMiner(deps, miner.WithRelayDifficultyTargetHash(testTargetHash)) + mnr, err := miner.NewMiner(deps, miner.WithRelayDifficultyTargetHash(testRelayMiningTargetHash)) require.NoError(t, err) minedRelays := mnr.MinedRelays(ctx, mockRelaysObs) @@ -134,8 +134,7 @@ func unmarshalHexMinedRelay( err = relay.Unmarshal(relayBz) require.NoError(t, err) - // TODO_TECHDEBT(@red-0ne, #446): Centralize the configuration for the SMT spec. - relayHashArr := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ diff --git a/proto/poktroll/proof/params.proto b/proto/poktroll/proof/params.proto index 3e8f1bc17..8f8042d95 100644 --- a/proto/poktroll/proof/params.proto +++ b/proto/poktroll/proof/params.proto @@ -12,6 +12,7 @@ message Params { option (amino.name) = "poktroll/x/proof/Params"; option (gogoproto.equal) = true; + // TODO_FOLLOWUP(@olshansk, #690): Either delete this or change it to be named "minimum" // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. bytes relay_difficulty_target_hash = 1 [(gogoproto.jsontag) = "relay_difficulty_target_hash"]; diff --git a/testutil/testrelayer/relays.go b/testutil/testrelayer/relays.go index 8cc870b42..60fe06f30 100644 --- a/testutil/testrelayer/relays.go +++ b/testutil/testrelayer/relays.go @@ -57,7 +57,7 @@ func NewUnsignedMinedRelay( relayBz, err := relay.Marshal() require.NoError(t, err) - relayHashArr := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ @@ -111,7 +111,7 @@ func NewSignedMinedRelay( relayBz, err := relay.Marshal() require.NoError(t, err) - relayHashArr := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index af695b46a..2e2e21fda 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -204,6 +204,7 @@ func (k msgServer) SubmitProof( logger.Debug("successfully verified relay response signature") // Get the proof module's governance parameters. + // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service params := k.GetParams(ctx) // Verify the relay difficulty is above the minimum required to earn rewards. @@ -447,7 +448,8 @@ func verifyClosestProof( // TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared // function that can be used by both the proof and the miner packages. func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { - relayHash := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) + relayHash := relayHashArr[:] if len(targetHash) != protocol.RelayHasherSize { return types.ErrProofInvalidRelay.Wrapf( @@ -458,17 +460,15 @@ func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { ) } - var targetHashArr [protocol.RelayHasherSize]byte - copy(targetHashArr[:], targetHash) + if !protocol.IsRelayVolumeApplicable(relayHash, targetHash) { + var targetHashArr [protocol.RelayHasherSize]byte + copy(targetHashArr[:], targetHash) - // TODO_MAINNET: Devise a test that tries to attack the network and ensure that there - // is sufficient telemetry. - // NB: If relayHash > targetHash, then the difficulty is less than the target difficulty. - if bytes.Compare(relayHash[:], targetHash[:]) == 1 { - relayDifficulty := protocol.GetDifficultyFromHash(relayHash) + relayDifficulty := protocol.GetDifficultyFromHash(relayHashArr) targetDifficulty := protocol.GetDifficultyFromHash(targetHashArr) + return types.ErrProofInvalidRelay.Wrapf( - "relay difficulty %d is less than the target difficulty %d", + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", relayDifficulty, targetDifficulty, ) diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index ec65b92b2..50c51885a 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -48,7 +48,7 @@ var ( // - the relay difficulty target hash to the easiest difficulty so that these tests don't need to mine for valid relays. // - the proof request probability to 1 so that all test sessions require a proof. testProofParams = prooftypes.Params{ - RelayDifficultyTargetHash: protocol.Difficulty1HashBz, + RelayDifficultyTargetHash: protocol.BaseRelayDifficultyHashBz, ProofRequestProbability: 1, } ) diff --git a/x/proof/types/params.go b/x/proof/types/params.go index 22c0c4cfa..4dd893cb8 100644 --- a/x/proof/types/params.go +++ b/x/proof/types/params.go @@ -16,19 +16,22 @@ var ( _ client.ProofParams = (*Params)(nil) _ paramtypes.ParamSet = (*Params)(nil) - KeyMinRelayDifficultyBits = []byte("MinRelayDifficultyBits") - ParamRelayDifficultyTargetHash = "relay_difficulty_target_hash" - DefaultRelayDifficultyTargetHashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" // all relays are payable - DefaultRelayDifficultyTargetHash, _ = hex.DecodeString(DefaultRelayDifficultyTargetHashHex) // TODO_MAINNET(#142, #401): Determine the default value. - KeyProofRequestProbability = []byte("ProofRequestProbability") - ParamProofRequestProbability = "proof_request_probability" - DefaultProofRequestProbability float32 = 0.25 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md - KeyProofRequirementThreshold = []byte("ProofRequirementThreshold") - ParamProofRequirementThreshold = "proof_requirement_threshold" - DefaultProofRequirementThreshold uint64 = 20 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md - KeyProofMissingPenalty = []byte("ProofMissingPenalty") - ParamProofMissingPenalty = "proof_missing_penalty" - DefaultProofMissingPenalty = cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(320)) // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + // TODO_FOLLOWUP(@olshansk, #690): Delete this parameter. + KeyRelayDifficultyTargetHash = []byte("RelayDifficultyTargetHash") + ParamRelayDifficultyTargetHash = "relay_difficulty_target_hash" + DefaultRelayDifficultyTargetHash, _ = hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") // all relays are payable + + KeyProofRequestProbability = []byte("ProofRequestProbability") + ParamProofRequestProbability = "proof_request_probability" + DefaultProofRequestProbability float32 = 0.25 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + + KeyProofRequirementThreshold = []byte("ProofRequirementThreshold") + ParamProofRequirementThreshold = "proof_requirement_threshold" + DefaultProofRequirementThreshold uint64 = 20 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + + KeyProofMissingPenalty = []byte("ProofMissingPenalty") + ParamProofMissingPenalty = "proof_missing_penalty" + DefaultProofMissingPenalty = cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(320)) // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md ) // ParamKeyTable the param key table for launch module @@ -65,7 +68,7 @@ func DefaultParams() Params { func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { return paramtypes.ParamSetPairs{ paramtypes.NewParamSetPair( - KeyMinRelayDifficultyBits, + KeyRelayDifficultyTargetHash, &p.RelayDifficultyTargetHash, ValidateRelayDifficultyTargetHash, ), diff --git a/x/proof/types/params.pb.go b/x/proof/types/params.pb.go index 9718a5496..a82615004 100644 --- a/x/proof/types/params.pb.go +++ b/x/proof/types/params.pb.go @@ -29,6 +29,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Params defines the parameters for the module. type Params struct { + // TODO_FOLLOWUP(@olshansk, #690): Either delete this or change it to be named "minimum" // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash"` // proof_request_probability is the probability of a session requiring a proof diff --git a/x/service/types/relay.go b/x/service/types/relay.go index f1a4ac271..b43ded9f7 100644 --- a/x/service/types/relay.go +++ b/x/service/types/relay.go @@ -11,30 +11,30 @@ import ( // GetHash returns the hash of the relay, which contains both the signed // relay request and the relay response. It is used as the key for insertion // into the SMT. -func (relay *Relay) GetHash() ([32]byte, error) { +func (relay *Relay) GetHash() ([protocol.RelayHasherSize]byte, error) { relayBz, err := relay.Marshal() if err != nil { - return [32]byte{}, err + return [protocol.RelayHasherSize]byte{}, err } - return protocol.GetHashFromBytes(relayBz), nil + return protocol.GetRelayHashFromBytes(relayBz), nil } // GetSignableBytesHash returns the hash of the signable bytes of the relay request // Hashing the marshaled request message guarantees that the signable bytes are // always of a constant and expected length. -func (req RelayRequest) GetSignableBytesHash() ([32]byte, error) { +func (req RelayRequest) GetSignableBytesHash() ([protocol.RelayHasherSize]byte, error) { // req and req.Meta are not pointers, so we can set the signature to nil // in order to generate the signable bytes hash without the need restore it. req.Meta.Signature = nil requestBz, err := req.Marshal() if err != nil { - return [32]byte{}, err + return [protocol.RelayHasherSize]byte{}, err } // return the marshaled request hash to guarantee that the signable bytes // are always of a constant and expected length - return protocol.GetHashFromBytes(requestBz), nil + return protocol.GetRelayHashFromBytes(requestBz), nil } // ValidateBasic performs basic validation of the RelayResponse Meta, SessionHeader @@ -65,18 +65,18 @@ func (req *RelayRequest) ValidateBasic() error { // GetSignableBytesHash returns the hash of the signable bytes of the relay response // Hashing the marshaled response message guarantees that the signable bytes are // always of a constant and expected length. -func (res RelayResponse) GetSignableBytesHash() ([32]byte, error) { +func (res RelayResponse) GetSignableBytesHash() ([protocol.RelayHasherSize]byte, error) { // res and res.Meta are not pointers, so we can set the signature to nil // in order to generate the signable bytes hash without the need restore it. res.Meta.SupplierSignature = nil responseBz, err := res.Marshal() if err != nil { - return [32]byte{}, err + return [protocol.RelayHasherSize]byte{}, err } // return the marshaled response hash to guarantee that the signable bytes // are always of a constant and expected length - return protocol.GetHashFromBytes(responseBz), nil + return protocol.GetRelayHashFromBytes(responseBz), nil } // ValidateBasic performs basic validation of the RelayResponse Meta, SessionHeader diff --git a/x/tokenomics/keeper/msg_server_update_param_test.go b/x/tokenomics/keeper/msg_server_update_param_test.go index f2237bfc1..253ea20fa 100644 --- a/x/tokenomics/keeper/msg_server_update_param_test.go +++ b/x/tokenomics/keeper/msg_server_update_param_test.go @@ -11,6 +11,7 @@ import ( tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) +// TODO_UPNEX func TestMsgUpdateParam_UpdateMinRelayDifficultyBitsOnly(t *testing.T) { var expectedComputeUnitsToTokensMultiplier int64 = 8 diff --git a/x/tokenomics/keeper/scale_difficulty_test.go b/x/tokenomics/keeper/scale_difficulty_test.go index 00f3ee164..7b2cb4cf6 100644 --- a/x/tokenomics/keeper/scale_difficulty_test.go +++ b/x/tokenomics/keeper/scale_difficulty_test.go @@ -13,6 +13,7 @@ import ( // TestScaleDifficultyTargetHash tests the scaling of a target hash by a given ratio. // Some expectations are manually adjusted to account for some precision loss in the // implementation. +// TODO_FOLLOWUP(@olshansk, #690): Ensure that the ratio corresponds to the probability of of a relay being accepted. If not, explain why. func TestScaleDifficultyTargetHash(t *testing.T) { tests := []struct { desc string @@ -75,7 +76,7 @@ func TestScaleDifficultyTargetHash(t *testing.T) { expectedHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { - desc: "Maxes out at Difficulty1", + desc: "Maxes out at BaseRelayDifficulty", targetHashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", ratio: 10, expectedHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -87,12 +88,15 @@ func TestScaleDifficultyTargetHash(t *testing.T) { targetHashBz, targetErr := hex.DecodeString(test.targetHashHex) require.NoError(t, targetErr) - expectedBytes, expectedErr := hex.DecodeString(test.expectedHashHex) + expectedHashBz, expectedErr := hex.DecodeString(test.expectedHashHex) require.NoError(t, expectedErr) - scaledHash := scaleDifficultyTargetHash(targetHashBz, new(big.Float).SetFloat64(test.ratio)) - assert.Equal(t, len(scaledHash), len(targetHashBz)) - require.Equalf(t, 0, bytes.Compare(scaledHash, expectedBytes), "expected hash %x, got %x", expectedBytes, scaledHash) + scaledDifficultyHash := scaleDifficultyTargetHash(targetHashBz, new(big.Float).SetFloat64(test.ratio)) + assert.Equal(t, len(scaledDifficultyHash), len(targetHashBz)) + + // Ensure the scaled difficulty hash equals the one provided + require.Zero(t, bytes.Compare(expectedHashBz, scaledDifficultyHash), + "expected difficulty hash %x, but got %x", expectedHashBz, scaledDifficultyHash) }) } } diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 0ad62634c..7fe1a3c53 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -104,6 +104,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( expiredResult.NumComputeUnits += numClaimComputeUnits continue } + // TODO_FOLLOWUP(@olshansk, #690): Document the potential changes needed here. // NB: If a proof is found, it is valid because verification is done // at the time of submission. } diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index deb6e20f9..b222b4f37 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -127,7 +127,7 @@ func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newR return prooftypes.DefaultRelayDifficultyTargetHash } - // Calculate the proportion of target relays to the new EMA + // Calculate the proportion of target relays relative to the EMA of actual volume applicable relays // TODO_MAINNET: Use a language agnostic float implementation or arithmetic library // to ensure deterministic results across different language implementations of the // protocol. @@ -158,9 +158,9 @@ func scaleDifficultyTargetHash(targetHash []byte, ratio *big.Float) []byte { scaledTargetInt, _ := scaledTargetFloat.Int(nil) scaledTargetHash := scaledTargetInt.Bytes() - // Ensure the scaled target hash maxes out at Difficulty1. + // Ensure the scaled target hash maxes out at BaseRelayDifficulty if len(scaledTargetHash) > len(targetHash) { - return protocol.Difficulty1HashBz + return protocol.BaseRelayDifficultyHashBz } // Ensure the scaled target hash has the same length as the default target hash. diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go index d752ae5c8..fef8272ec 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go @@ -143,15 +143,17 @@ func TestUpdateRelayMiningDifficulty_FirstDifficulty(t *testing.T) { _, err := keeper.UpdateRelayMiningDifficulty(ctx, relaysPerServiceMap) require.NoError(t, err) - difficulty, found := keeper.GetRelayMiningDifficulty(ctx, "svc1") + relayDifficulty, found := keeper.GetRelayMiningDifficulty(ctx, "svc1") require.True(t, found) - require.Equal(t, tt.numRelays, difficulty.NumRelaysEma) - require.Equal(t, tt.expectedRelayMiningDifficulty.NumRelaysEma, difficulty.NumRelaysEma) + require.Equal(t, tt.numRelays, relayDifficulty.NumRelaysEma) + require.Equal(t, tt.expectedRelayMiningDifficulty.NumRelaysEma, relayDifficulty.NumRelaysEma) - require.Lessf(t, bytes.Compare(difficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash), 1, + // NB: An increase in difficulty is indicated by a decrease in the target hash + didDifficultyIncrease := bytes.Compare(relayDifficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash) < 1 + require.True(t, didDifficultyIncrease, "expected difficulty.TargetHash (%x) to be less than or equal to expectedRelayMiningDifficulty.TargetHash (%x)", - difficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash, + relayDifficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash, ) }) } @@ -159,28 +161,28 @@ func TestUpdateRelayMiningDifficulty_FirstDifficulty(t *testing.T) { func TestComputeNewDifficultyHash(t *testing.T) { tests := []struct { - desc string - numRelaysTarget uint64 - relaysEma uint64 - expectedDifficultyHash []byte + desc string + numRelaysTarget uint64 + relaysEma uint64 + expectedRelayDifficultyHash []byte }{ { - desc: "Relays Target > Relays EMA", - numRelaysTarget: 100, - relaysEma: 50, - expectedDifficultyHash: defaultDifficulty(), + desc: "Relays Target > Relays EMA", + numRelaysTarget: 100, + relaysEma: 50, + expectedRelayDifficultyHash: defaultDifficulty(), }, { - desc: "Relays Target == Relays EMA", - numRelaysTarget: 100, - relaysEma: 100, - expectedDifficultyHash: defaultDifficulty(), + desc: "Relays Target == Relays EMA", + numRelaysTarget: 100, + relaysEma: 100, + expectedRelayDifficultyHash: defaultDifficulty(), }, { desc: "Relays Target < Relays EMA", numRelaysTarget: 50, relaysEma: 100, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b01111111}, makeBytesFullOfOnes(31)..., ), @@ -189,7 +191,7 @@ func TestComputeNewDifficultyHash(t *testing.T) { desc: "Relays Target << Relays EMA", numRelaysTarget: 50, relaysEma: 200, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b00111111}, makeBytesFullOfOnes(31)..., ), @@ -198,7 +200,7 @@ func TestComputeNewDifficultyHash(t *testing.T) { desc: "Relays Target << Relays EMA", numRelaysTarget: 50, relaysEma: 1000, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b00001111}, makeBytesFullOfOnes(31)..., ), @@ -207,7 +209,7 @@ func TestComputeNewDifficultyHash(t *testing.T) { desc: "Relays Target << Relays EMA", numRelaysTarget: 50, relaysEma: 10000, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b00000001}, makeBytesFullOfOnes(31)..., ), @@ -216,22 +218,24 @@ func TestComputeNewDifficultyHash(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - result := keeper.ComputeNewDifficultyTargetHash(prooftypes.DefaultRelayDifficultyTargetHash, tt.numRelaysTarget, tt.relaysEma) + newRelayDifficultyTargetHash := keeper.ComputeNewDifficultyTargetHash(prooftypes.DefaultRelayDifficultyTargetHash, tt.numRelaysTarget, tt.relaysEma) - require.Lessf(t, bytes.Compare(result, tt.expectedDifficultyHash), 1, + // NB: An increase in difficulty is indicated by a decrease in the target hash + didDifficultyIncrease := bytes.Compare(newRelayDifficultyTargetHash, tt.expectedRelayDifficultyHash) < 1 + require.True(t, didDifficultyIncrease, "expected difficulty.TargetHash (%x) to be less than or equal to expectedRelayMiningDifficulty.TargetHash (%x)", - result, tt.expectedDifficultyHash, + newRelayDifficultyTargetHash, tt.expectedRelayDifficultyHash, ) }) } } func makeBytesFullOfOnes(length int) []byte { - result := make([]byte, length) - for i := range result { - result[i] = 0b11111111 + output := make([]byte, length) + for i := range output { + output[i] = 0b11111111 } - return result + return output } func defaultDifficulty() []byte { diff --git a/x/tokenomics/module/abci.go b/x/tokenomics/module/abci.go index 7bf320928..1f40f2895 100644 --- a/x/tokenomics/module/abci.go +++ b/x/tokenomics/module/abci.go @@ -14,6 +14,7 @@ import ( // EndBlocker called at every block and settles all pending claims. func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { logger := k.Logger().With("method", "EndBlocker") + // NB: There are two main reasons why we settle expiring claims in the end // instead of when a proof is submitted: // 1. Logic - Probabilistic proof allows claims to be settled (i.e. rewarded) @@ -26,7 +27,13 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { return err } - // Defer telemetry calls so that they reference the final values the relevant variables. + logger.Info(fmt.Sprintf( + "settled %d claims and expired %d claims", + settledResult.NumClaims, + expiredResult.NumClaims, + )) + + // Telemetry - defer telemetry calls so that they reference the final values the relevant variables. defer func() { telemetry.ClaimCounter( prooftypes.ClaimProofStage_SETTLED, @@ -62,12 +69,6 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { // TODO_IMPROVE(#observability): Add a counter for expired compute units. }() - logger.Info(fmt.Sprintf( - "settled %d claims and expired %d claims", - settledResult.NumClaims, - expiredResult.NumClaims, - )) - // Update the relay mining difficulty for every service that settled pending // claims based on how many estimated relays were serviced for it. difficultyPerServiceMap, err := k.UpdateRelayMiningDifficulty(ctx, settledResult.RelaysPerServiceMap) @@ -80,14 +81,16 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { len(settledResult.RelaysPerServiceMap), )) - // Emit telemetry for each service's relay mining difficulty. - for serviceId, newDifficulty := range difficultyPerServiceMap { - var newTargetHash [protocol.RelayHasherSize]byte - copy(newTargetHash[:], newDifficulty.TargetHash) + // Telemetry - emit telemetry for each service's relay mining difficulty. + for serviceId, newRelayMiningDifficulty := range difficultyPerServiceMap { + var newRelayMiningTargetHash [protocol.RelayHasherSize]byte + copy(newRelayMiningTargetHash[:], newRelayMiningDifficulty.TargetHash) - difficulty := protocol.GetDifficultyFromHash(newTargetHash) + // NB: The difficulty integer is just a human readable interpretation of + // the target hash and is not actually used for business logic. + difficulty := protocol.GetDifficultyFromHash(newRelayMiningTargetHash) telemetry.RelayMiningDifficultyGauge(difficulty, serviceId) - telemetry.RelayEMAGauge(newDifficulty.NumRelaysEma, serviceId) + telemetry.RelayEMAGauge(newRelayMiningDifficulty.NumRelaysEma, serviceId) } return nil From c365a4f04dc0814c80fbc5c9f82a6bf248f0f3ef Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Fri, 19 Jul 2024 17:27:35 -0700 Subject: [PATCH 12/29] Self review --- Makefile | 11 ++++++++--- pkg/client/events/query_client_test.go | 1 - pkg/relayer/miner/gen/gen_fixtures.go | 1 - .../tokenomics/relay_mining_difficulty_test.go | 2 -- testutil/testrelayer/relays.go | 1 - x/proof/types/params.go | 3 ++- x/tokenomics/keeper/msg_server_update_param_test.go | 2 +- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 1a46832ec..0b46f84ef 100644 --- a/Makefile +++ b/Makefile @@ -503,17 +503,22 @@ go_develop_and_test: go_develop test_all ## Generate protos, mocks and run all t # TODO_DISCUSS_IN_THIS_COMMIT - SHOULD NEVER BE COMMITTED TO MASTER. It is a way for the reviewer of a PR to start / reply to a discussion. # TODO_IN_THIS_COMMIT - SHOULD NEVER BE COMMITTED TO MASTER. It is a way to start the review process while non-critical changes are still in progress + +# Define shared variable for the exclude parameters +EXCLUDE_GREP = --exclude-dir={.git,vendor,./docusaurus,.vscode,.idea} --exclude={Makefile,reviewdog.yml,*.pb.go,*.pulsar.go} + .PHONY: todo_list todo_list: ## List all the TODOs in the project (excludes vendor and prototype directories) - grep --exclude-dir={.git,vendor,./docusaurus} -r TODO . + grep -r $(EXCLUDE_GREP) TODO . | grep -v 'TODO()' .PHONY: todo_count todo_count: ## Print a count of all the TODOs in the project - grep --exclude-dir={.git,vendor,./docusaurus} -r TODO . | wc -l + grep -r $(EXCLUDE_GREP) TODO . | grep -v 'TODO()' | wc -l .PHONY: todo_this_commit todo_this_commit: ## List all the TODOs needed to be done in this commit - grep -n --exclude-dir={.git,vendor,.vscode,.idea} --exclude={Makefile,reviewdog.yml} -r -e "TODO_IN_THIS_" + grep -r $(EXCLUDE_GREP) TODO_IN_THIS .| grep -v 'TODO()' + #################### ### Gateways ### diff --git a/pkg/client/events/query_client_test.go b/pkg/client/events/query_client_test.go index 2c868ac42..2130d350b 100644 --- a/pkg/client/events/query_client_test.go +++ b/pkg/client/events/query_client_test.go @@ -366,7 +366,6 @@ func behavesLikeEitherObserver[V any]( require.NoError(t, err) require.Equal(t, notificationsLimit, int(atomic.LoadInt32(&eventsCounter))) - // TODO_THIS_COMMIT: is this necessary? time.Sleep(10 * time.Millisecond) if onLimit != nil { diff --git a/pkg/relayer/miner/gen/gen_fixtures.go b/pkg/relayer/miner/gen/gen_fixtures.go index 585639a54..9c6e7fbe5 100644 --- a/pkg/relayer/miner/gen/gen_fixtures.go +++ b/pkg/relayer/miner/gen/gen_fixtures.go @@ -153,7 +153,6 @@ func genRandomizedMinedRelayFixtures( Res: nil, } - // TODO_TECHDEBT(@red-0ne): use canonical codec. relayBz, err := relay.Marshal() if err != nil { errCh <- err diff --git a/tests/integration/tokenomics/relay_mining_difficulty_test.go b/tests/integration/tokenomics/relay_mining_difficulty_test.go index d97f38949..3bc9a82c5 100644 --- a/tests/integration/tokenomics/relay_mining_difficulty_test.go +++ b/tests/integration/tokenomics/relay_mining_difficulty_test.go @@ -21,8 +21,6 @@ import ( tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) -// TODO_UPNEXT(@Olshansk, #571): Implement these tests - func init() { cmd.InitSDKConfig() } diff --git a/testutil/testrelayer/relays.go b/testutil/testrelayer/relays.go index 60fe06f30..43a4c5753 100644 --- a/testutil/testrelayer/relays.go +++ b/testutil/testrelayer/relays.go @@ -53,7 +53,6 @@ func NewUnsignedMinedRelay( }, } - // TODO_TECHDEBT(@red-0ne): marshal using canonical codec. relayBz, err := relay.Marshal() require.NoError(t, err) diff --git a/x/proof/types/params.go b/x/proof/types/params.go index 4dd893cb8..218831ba2 100644 --- a/x/proof/types/params.go +++ b/x/proof/types/params.go @@ -19,7 +19,8 @@ var ( // TODO_FOLLOWUP(@olshansk, #690): Delete this parameter. KeyRelayDifficultyTargetHash = []byte("RelayDifficultyTargetHash") ParamRelayDifficultyTargetHash = "relay_difficulty_target_hash" - DefaultRelayDifficultyTargetHash, _ = hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") // all relays are payable + DefaultRelayDifficultyTargetHashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" // all relays are payable + DefaultRelayDifficultyTargetHash, _ = hex.DecodeString(DefaultRelayDifficultyTargetHashHex) KeyProofRequestProbability = []byte("ProofRequestProbability") ParamProofRequestProbability = "proof_request_probability" diff --git a/x/tokenomics/keeper/msg_server_update_param_test.go b/x/tokenomics/keeper/msg_server_update_param_test.go index 253ea20fa..e4a842ccb 100644 --- a/x/tokenomics/keeper/msg_server_update_param_test.go +++ b/x/tokenomics/keeper/msg_server_update_param_test.go @@ -11,7 +11,7 @@ import ( tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) -// TODO_UPNEX +// TODO_FOLLOWUP(@olshansk func TestMsgUpdateParam_UpdateMinRelayDifficultyBitsOnly(t *testing.T) { var expectedComputeUnitsToTokensMultiplier int64 = 8 From 09d4cb048aa9e4cc5ebf9fbaf291afe75402ae45 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Mon, 22 Jul 2024 13:18:08 -0700 Subject: [PATCH 13/29] Fix flaky tests --- Makefile | 13 +++++++++++++ testutil/network/network.go | 2 +- x/proof/keeper/msg_server_submit_proof.go | 9 +++++++-- x/proof/keeper/msg_server_submit_proof_test.go | 5 +++-- x/tokenomics/keeper/msg_server_update_param_test.go | 2 +- 5 files changed, 25 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 0b46f84ef..571f52e7d 100644 --- a/Makefile +++ b/Makefile @@ -814,6 +814,19 @@ warn_message_local_stress_test: ## Print a warning message when kicking off a lo @echo "| |" @echo "+-----------------------------------------------------------------------------------------------+" +PHONY: warn_flaky_tests +warn_flaky_tests: ## Print a warning message that some unit tests may be flaky + @echo "+-----------------------------------------------------------------------------------------------+" + @echo "| |" + @echo "| IMPORTANT: READ ME IF YOUR TESTS FAIL!!! |" + @echo "| |" + @echo "| 1. Our unit / integration tests are far from perfect & some are flaky |" + @echo "| 2. If you ran 'make go_develop_and_test' and a failure occured, try to run: |" + @echo "| 'make test_all' once or twice more |" + @echo "| 3. If the same error persistes, isolate it with 'go test -v ./path/to/failing/module |" + @echo "| |" + @echo "+-----------------------------------------------------------------------------------------------+" + ############## ### Claims ### ############## diff --git a/testutil/network/network.go b/testutil/network/network.go index fdfb7f8a1..518a39800 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -50,7 +50,7 @@ func New(t *testing.T, configs ...Config) *Network { cfg = configs[0] } net, err := network.New(t, t.TempDir(), cfg) - require.NoError(t, err) + require.NoError(t, err, "TODO_FLAKY: This config setup is periodically flakyis a flaky is ") _, err = net.WaitForHeight(1) require.NoError(t, err) t.Cleanup(net.Cleanup) diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index 2e2e21fda..fcc271b03 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -208,7 +208,11 @@ func (k msgServer) SubmitProof( params := k.GetParams(ctx) // Verify the relay difficulty is above the minimum required to earn rewards. - if err = validateRelayDifficulty(relayBz, params.RelayDifficultyTargetHash); err != nil { + if err = validateRelayDifficulty( + relayBz, + params.RelayDifficultyTargetHash, + sessionHeader.Service.Id, + ); err != nil { return nil, status.Error(codes.FailedPrecondition, err.Error()) } logger.Debug("successfully validated relay mining difficulty") @@ -447,7 +451,7 @@ func verifyClosestProof( // required minimum threshold. // TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared // function that can be used by both the proof and the miner packages. -func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { +func validateRelayDifficulty(relayBz, targetHash []byte, serviceId string) error { relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] @@ -471,6 +475,7 @@ func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", relayDifficulty, targetDifficulty, + serviceId, ) } diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 50c51885a..99d890294 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -1023,7 +1023,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { desc: "relay difficulty must be greater than or equal to minimum (zero difficulty)", newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { // Set the minimum relay difficulty to a non-zero value such that the relays - // constructed by the test helpers have a negligable chance of being valid. + // constructed by the test helpers have a negligible chance of being valid. err = keepers.Keeper.SetParams(ctx, prooftypes.Params{ RelayDifficultyTargetHash: lowTargetHash, }) @@ -1047,9 +1047,10 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { expectedErr: status.Error( codes.FailedPrecondition, prooftypes.ErrProofInvalidRelay.Wrapf( - "relay difficulty %d is less than the target difficulty %d", + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", validClosestRelayDifficultyBits, highExpectedTargetDifficulty, + validSessionHeader.Service.Id, ).Error(), ), }, diff --git a/x/tokenomics/keeper/msg_server_update_param_test.go b/x/tokenomics/keeper/msg_server_update_param_test.go index e4a842ccb..5f3375e16 100644 --- a/x/tokenomics/keeper/msg_server_update_param_test.go +++ b/x/tokenomics/keeper/msg_server_update_param_test.go @@ -11,7 +11,7 @@ import ( tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) -// TODO_FOLLOWUP(@olshansk +// TODO_FOLLOWUP(@olshansk, #690): Rename this test. func TestMsgUpdateParam_UpdateMinRelayDifficultyBitsOnly(t *testing.T) { var expectedComputeUnitsToTokensMultiplier int64 = 8 From 36d55a3edb76e5dee7ba94a7cc69a08f5b5c6e0b Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Mon, 22 Jul 2024 13:25:00 -0700 Subject: [PATCH 14/29] Minor nits --- Makefile | 2 +- pkg/crypto/protocol/hasher.go | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 571f52e7d..62c141898 100644 --- a/Makefile +++ b/Makefile @@ -420,7 +420,7 @@ test_verbose: check_go_version ## Run all go tests verbosely go test -count=1 -v -race -tags test ./... .PHONY: test_all -test_all: check_go_version ## Run all go tests showing detailed output only on failures +test_all: warn_flaky_tests check_go_version ## Run all go tests showing detailed output only on failures go test -count=1 -race -tags test ./... .PHONY: test_all_with_integration diff --git a/pkg/crypto/protocol/hasher.go b/pkg/crypto/protocol/hasher.go index ce9c175f9..e5f008c1a 100644 --- a/pkg/crypto/protocol/hasher.go +++ b/pkg/crypto/protocol/hasher.go @@ -3,14 +3,13 @@ package protocol import "crypto/sha256" const ( - RelayHasherSize = sha256.Size - TrieHasherSize = sha256.Size - TrieRootSize = TrieHasherSize + trieRootMetadataSize - // TODO_CONSIDERATION: Export this from the SMT package. - trieRootMetadataSize = 16 + RelayHasherSize = sha256.Size + TrieHasherSize = sha256.Size + TrieRootSize = TrieHasherSize + trieRootMetadataSize + trieRootMetadataSize = 16 // TODO_CONSIDERATION: Export this from the SMT package. ) var ( NewRelayHasher = sha256.New - NewTrieHasher = sha256.New + NewTrieHasher = sha256.New ) From 9cf20ebe5d179541f6c1bb18df986d6c7e2f37cc Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Mon, 22 Jul 2024 20:54:45 -0700 Subject: [PATCH 15/29] Move the verification layer --- api/poktroll/tokenomics/event.pulsar.go | 195 +++- proto/poktroll/tokenomics/event.proto | 8 + x/proof/keeper/msg_server_create_claim.go | 49 +- .../keeper/msg_server_create_claim_test.go | 5 +- x/proof/keeper/msg_server_submit_proof.go | 445 +-------- .../keeper/msg_server_submit_proof_test.go | 711 +------------ x/proof/keeper/proof_validation.go | 419 ++++++++ x/proof/keeper/proof_validation_test.go | 934 ++++++++++++++++++ x/proof/keeper/session.go | 37 +- x/proof/types/errors.go | 1 + x/tokenomics/keeper/settle_pending_claims.go | 41 +- x/tokenomics/types/event.pb.go | 157 ++- x/tokenomics/types/expected_keepers.go | 2 +- 13 files changed, 1735 insertions(+), 1269 deletions(-) create mode 100644 x/proof/keeper/proof_validation.go create mode 100644 x/proof/keeper/proof_validation_test.go diff --git a/api/poktroll/tokenomics/event.pulsar.go b/api/poktroll/tokenomics/event.pulsar.go index 8ea69eae8..300c5cb18 100644 --- a/api/poktroll/tokenomics/event.pulsar.go +++ b/api/poktroll/tokenomics/event.pulsar.go @@ -20,6 +20,7 @@ var ( fd_EventClaimExpired_claim protoreflect.FieldDescriptor fd_EventClaimExpired_num_relays protoreflect.FieldDescriptor fd_EventClaimExpired_num_compute_units protoreflect.FieldDescriptor + fd_EventClaimExpired_expiration_reason protoreflect.FieldDescriptor ) func init() { @@ -28,6 +29,7 @@ func init() { fd_EventClaimExpired_claim = md_EventClaimExpired.Fields().ByName("claim") fd_EventClaimExpired_num_relays = md_EventClaimExpired.Fields().ByName("num_relays") fd_EventClaimExpired_num_compute_units = md_EventClaimExpired.Fields().ByName("num_compute_units") + fd_EventClaimExpired_expiration_reason = md_EventClaimExpired.Fields().ByName("expiration_reason") } var _ protoreflect.Message = (*fastReflection_EventClaimExpired)(nil) @@ -113,6 +115,12 @@ func (x *fastReflection_EventClaimExpired) Range(f func(protoreflect.FieldDescri return } } + if x.ExpirationReason != 0 { + value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.ExpirationReason)) + if !f(fd_EventClaimExpired_expiration_reason, value) { + return + } + } } // Has reports whether a field is populated. @@ -134,6 +142,8 @@ func (x *fastReflection_EventClaimExpired) Has(fd protoreflect.FieldDescriptor) return x.NumRelays != uint64(0) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": return x.NumComputeUnits != uint64(0) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + return x.ExpirationReason != 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -156,6 +166,8 @@ func (x *fastReflection_EventClaimExpired) Clear(fd protoreflect.FieldDescriptor x.NumRelays = uint64(0) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": x.NumComputeUnits = uint64(0) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + x.ExpirationReason = 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -181,6 +193,9 @@ func (x *fastReflection_EventClaimExpired) Get(descriptor protoreflect.FieldDesc case "poktroll.tokenomics.EventClaimExpired.num_compute_units": value := x.NumComputeUnits return protoreflect.ValueOfUint64(value) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + value := x.ExpirationReason + return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -207,6 +222,8 @@ func (x *fastReflection_EventClaimExpired) Set(fd protoreflect.FieldDescriptor, x.NumRelays = value.Uint() case "poktroll.tokenomics.EventClaimExpired.num_compute_units": x.NumComputeUnits = value.Uint() + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + x.ExpirationReason = (ClaimExpirationReason)(value.Enum()) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -236,6 +253,8 @@ func (x *fastReflection_EventClaimExpired) Mutable(fd protoreflect.FieldDescript panic(fmt.Errorf("field num_relays of message poktroll.tokenomics.EventClaimExpired is not mutable")) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": panic(fmt.Errorf("field num_compute_units of message poktroll.tokenomics.EventClaimExpired is not mutable")) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + panic(fmt.Errorf("field expiration_reason of message poktroll.tokenomics.EventClaimExpired is not mutable")) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -256,6 +275,8 @@ func (x *fastReflection_EventClaimExpired) NewField(fd protoreflect.FieldDescrip return protoreflect.ValueOfUint64(uint64(0)) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": return protoreflect.ValueOfUint64(uint64(0)) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + return protoreflect.ValueOfEnum(0) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -335,6 +356,9 @@ func (x *fastReflection_EventClaimExpired) ProtoMethods() *protoiface.Methods { if x.NumComputeUnits != 0 { n += 1 + runtime.Sov(uint64(x.NumComputeUnits)) } + if x.ExpirationReason != 0 { + n += 1 + runtime.Sov(uint64(x.ExpirationReason)) + } if x.unknownFields != nil { n += len(x.unknownFields) } @@ -364,6 +388,11 @@ func (x *fastReflection_EventClaimExpired) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } + if x.ExpirationReason != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.ExpirationReason)) + i-- + dAtA[i] = 0x20 + } if x.NumComputeUnits != 0 { i = runtime.EncodeVarint(dAtA, i, uint64(x.NumComputeUnits)) i-- @@ -511,6 +540,25 @@ func (x *fastReflection_EventClaimExpired) ProtoMethods() *protoiface.Methods { break } } + case 4: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ExpirationReason", wireType) + } + x.ExpirationReason = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.ExpirationReason |= ClaimExpirationReason(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -2360,6 +2408,55 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type ClaimExpirationReason int32 + +const ( + ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED ClaimExpirationReason = 0 // Default value, means may be valid + ClaimExpirationReason_PROOF_MISSING ClaimExpirationReason = 1 + ClaimExpirationReason_PROOF_INVALID ClaimExpirationReason = 2 +) + +// Enum value maps for ClaimExpirationReason. +var ( + ClaimExpirationReason_name = map[int32]string{ + 0: "EXPIRATION_REASON_UNSPECIFIED", + 1: "PROOF_MISSING", + 2: "PROOF_INVALID", + } + ClaimExpirationReason_value = map[string]int32{ + "EXPIRATION_REASON_UNSPECIFIED": 0, + "PROOF_MISSING": 1, + "PROOF_INVALID": 2, + } +) + +func (x ClaimExpirationReason) Enum() *ClaimExpirationReason { + p := new(ClaimExpirationReason) + *p = x + return p +} + +func (x ClaimExpirationReason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClaimExpirationReason) Descriptor() protoreflect.EnumDescriptor { + return file_poktroll_tokenomics_event_proto_enumTypes[0].Descriptor() +} + +func (ClaimExpirationReason) Type() protoreflect.EnumType { + return &file_poktroll_tokenomics_event_proto_enumTypes[0] +} + +func (x ClaimExpirationReason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClaimExpirationReason.Descriptor instead. +func (ClaimExpirationReason) EnumDescriptor() ([]byte, []int) { + return file_poktroll_tokenomics_event_proto_rawDescGZIP(), []int{0} +} + // EventClaimExpired is an event emitted during settlement whenever a claim requiring // an on-chain proof doesn't have one. The claim cannot be settled, leading to that work // never being rewarded. @@ -2368,9 +2465,11 @@ type EventClaimExpired struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Claim *proof.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim,omitempty"` - NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays,omitempty"` - NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units,omitempty"` + Claim *proof.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim,omitempty"` + // TODO_MAINNET: Shold we include the proof here too? + NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays,omitempty"` + NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units,omitempty"` + ExpirationReason ClaimExpirationReason `protobuf:"varint,4,opt,name=expiration_reason,json=expirationReason,proto3,enum=poktroll.tokenomics.ClaimExpirationReason" json:"expiration_reason,omitempty"` } func (x *EventClaimExpired) Reset() { @@ -2414,6 +2513,13 @@ func (x *EventClaimExpired) GetNumComputeUnits() uint64 { return 0 } +func (x *EventClaimExpired) GetExpirationReason() ClaimExpirationReason { + if x != nil { + return x.ExpirationReason + } + return ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED +} + // EventClaimSettled is an event emitted whenever a claim is settled. // The proof_required determines whether the claim requires a proof that has been submitted or not type EventClaimSettled struct { @@ -2610,7 +2716,7 @@ var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2f, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x01, 0x0a, 0x11, 0x45, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x02, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, @@ -2622,7 +2728,14 @@ var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x15, 0xea, 0xde, 0x1f, 0x11, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x43, 0x6f, - 0x6d, 0x70, 0x75, 0x74, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x45, + 0x6d, 0x70, 0x75, 0x74, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x6e, 0x0a, 0x11, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6c, 0x61, 0x69, + 0x6d, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x42, 0x15, 0xea, 0xde, 0x1f, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, @@ -2672,19 +2785,25 @@ var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, - 0x75, 0x72, 0x6e, 0x42, 0xb8, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x42, - 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x24, 0x63, - 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, - 0x69, 0x63, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x54, 0x58, 0xaa, 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xca, - 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, - 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x14, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, - 0x6c, 0x6c, 0x3a, 0x3a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x75, 0x72, 0x6e, 0x2a, 0x60, 0x0a, 0x15, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x1d, + 0x45, 0x58, 0x50, 0x49, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, + 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x10, 0x02, 0x42, 0xb8, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, + 0x73, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x24, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x54, 0x58, 0xaa, 0x02, 0x13, 0x50, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, + 0x73, 0xca, 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x5c, 0x47, 0x50, + 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x14, 0x50, 0x6f, 0x6b, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2699,27 +2818,30 @@ func file_poktroll_tokenomics_event_proto_rawDescGZIP() []byte { return file_poktroll_tokenomics_event_proto_rawDescData } +var file_poktroll_tokenomics_event_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_poktroll_tokenomics_event_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_poktroll_tokenomics_event_proto_goTypes = []interface{}{ - (*EventClaimExpired)(nil), // 0: poktroll.tokenomics.EventClaimExpired - (*EventClaimSettled)(nil), // 1: poktroll.tokenomics.EventClaimSettled - (*EventRelayMiningDifficultyUpdated)(nil), // 2: poktroll.tokenomics.EventRelayMiningDifficultyUpdated - (*EventApplicationOverserviced)(nil), // 3: poktroll.tokenomics.EventApplicationOverserviced - (*proof.Claim)(nil), // 4: poktroll.proof.Claim - (proof.ProofRequirementReason)(0), // 5: poktroll.proof.ProofRequirementReason - (*v1beta1.Coin)(nil), // 6: cosmos.base.v1beta1.Coin + (ClaimExpirationReason)(0), // 0: poktroll.tokenomics.ClaimExpirationReason + (*EventClaimExpired)(nil), // 1: poktroll.tokenomics.EventClaimExpired + (*EventClaimSettled)(nil), // 2: poktroll.tokenomics.EventClaimSettled + (*EventRelayMiningDifficultyUpdated)(nil), // 3: poktroll.tokenomics.EventRelayMiningDifficultyUpdated + (*EventApplicationOverserviced)(nil), // 4: poktroll.tokenomics.EventApplicationOverserviced + (*proof.Claim)(nil), // 5: poktroll.proof.Claim + (proof.ProofRequirementReason)(0), // 6: poktroll.proof.ProofRequirementReason + (*v1beta1.Coin)(nil), // 7: cosmos.base.v1beta1.Coin } var file_poktroll_tokenomics_event_proto_depIdxs = []int32{ - 4, // 0: poktroll.tokenomics.EventClaimExpired.claim:type_name -> poktroll.proof.Claim - 4, // 1: poktroll.tokenomics.EventClaimSettled.claim:type_name -> poktroll.proof.Claim - 5, // 2: poktroll.tokenomics.EventClaimSettled.proof_requirement:type_name -> poktroll.proof.ProofRequirementReason - 6, // 3: poktroll.tokenomics.EventApplicationOverserviced.expected_burn:type_name -> cosmos.base.v1beta1.Coin - 6, // 4: poktroll.tokenomics.EventApplicationOverserviced.effective_burn:type_name -> cosmos.base.v1beta1.Coin - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 5, // 0: poktroll.tokenomics.EventClaimExpired.claim:type_name -> poktroll.proof.Claim + 0, // 1: poktroll.tokenomics.EventClaimExpired.expiration_reason:type_name -> poktroll.tokenomics.ClaimExpirationReason + 5, // 2: poktroll.tokenomics.EventClaimSettled.claim:type_name -> poktroll.proof.Claim + 6, // 3: poktroll.tokenomics.EventClaimSettled.proof_requirement:type_name -> poktroll.proof.ProofRequirementReason + 7, // 4: poktroll.tokenomics.EventApplicationOverserviced.expected_burn:type_name -> cosmos.base.v1beta1.Coin + 7, // 5: poktroll.tokenomics.EventApplicationOverserviced.effective_burn:type_name -> cosmos.base.v1beta1.Coin + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_poktroll_tokenomics_event_proto_init() } @@ -2782,13 +2904,14 @@ func file_poktroll_tokenomics_event_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_poktroll_tokenomics_event_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 4, NumExtensions: 0, NumServices: 0, }, GoTypes: file_poktroll_tokenomics_event_proto_goTypes, DependencyIndexes: file_poktroll_tokenomics_event_proto_depIdxs, + EnumInfos: file_poktroll_tokenomics_event_proto_enumTypes, MessageInfos: file_poktroll_tokenomics_event_proto_msgTypes, }.Build() File_poktroll_tokenomics_event_proto = out.File diff --git a/proto/poktroll/tokenomics/event.proto b/proto/poktroll/tokenomics/event.proto index f45cd6389..780564496 100644 --- a/proto/poktroll/tokenomics/event.proto +++ b/proto/poktroll/tokenomics/event.proto @@ -8,13 +8,21 @@ import "cosmos/base/v1beta1/coin.proto"; import "poktroll/proof/claim.proto"; import "poktroll/proof/requirement.proto"; +enum ClaimExpirationReason { + EXPIRATION_REASON_UNSPECIFIED = 0; // Default value, means may be valid + PROOF_MISSING = 1; + PROOF_INVALID = 2; +} + // EventClaimExpired is an event emitted during settlement whenever a claim requiring // an on-chain proof doesn't have one. The claim cannot be settled, leading to that work // never being rewarded. message EventClaimExpired { poktroll.proof.Claim claim = 1 [(gogoproto.jsontag) = "claim"]; + // TODO_MAINNET: Shold we include the proof here too? uint64 num_relays = 2 [(gogoproto.jsontag) = "num_relays"]; uint64 num_compute_units = 3 [(gogoproto.jsontag) = "num_compute_units"]; + ClaimExpirationReason expiration_reason = 4 [(gogoproto.jsontag) = "expiration_reason"]; } // EventClaimSettled is an event emitted whenever a claim is settled. diff --git a/x/proof/keeper/msg_server_create_claim.go b/x/proof/keeper/msg_server_create_claim.go index 7d58f1d20..afe0b41db 100644 --- a/x/proof/keeper/msg_server_create_claim.go +++ b/x/proof/keeper/msg_server_create_claim.go @@ -37,53 +37,44 @@ func (k msgServer) CreateClaim( }() logger := k.Logger().With("method", "CreateClaim") + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) logger.Info("creating claim") + // Basic validation of the CreateClaim message. if err = msg.ValidateBasic(); err != nil { return nil, err } + logger.Info("validated the createClaim message") // Compare msg session header w/ on-chain session header. var session *sessiontypes.Session - session, err = k.queryAndValidateSessionHeader(ctx, msg) + session, err = k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - // Use the session header from the on-chain hydrated session. - sessionHeader := session.GetHeader() - - // Set the session header to the on-chain hydrated session header. - msg.SessionHeader = sessionHeader - - // Validate claim message commit height is within the respective session's - // claim creation window using the on-chain session header. - if err = k.validateClaimWindow(ctx, msg); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) + // Construct and insert claim + claim = types.Claim{ + SupplierAddress: msg.GetSupplierAddress(), + SessionHeader: session.GetHeader(), + RootHash: msg.GetRootHash(), } + // Helpers for logging the same metadata throughout this function calls logger = logger. With( "session_id", session.GetSessionId(), - "session_end_height", sessionHeader.GetSessionEndBlockHeight(), + "session_end_height", claim.SessionHeader.GetSessionEndBlockHeight(), "supplier", msg.GetSupplierAddress(), ) - logger.Info("validated claim") - - // Assign and upsert claim after all validation. - claim = types.Claim{ - SupplierAddress: msg.GetSupplierAddress(), - SessionHeader: sessionHeader, - RootHash: msg.GetRootHash(), + // Validate claim message commit height is within the respective session's + // claim creation window using the on-chain session header. + if err = k.validateClaimWindow(ctx, claim.SessionHeader, claim.SupplierAddress); err != nil { + return nil, status.Error(codes.FailedPrecondition, err.Error()) } - _, isExistingClaim = k.Keeper.GetClaim(ctx, claim.GetSessionHeader().GetSessionId(), claim.GetSupplierAddress()) - - k.Keeper.UpsertClaim(ctx, claim) - - logger.Info("created new claim") - + // Get metadata for the event we want to emit numRelays, err = claim.GetNumRelays() if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) @@ -92,6 +83,11 @@ func (k msgServer) CreateClaim( if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) } + _, isExistingClaim = k.Keeper.GetClaim(ctx, claim.GetSessionHeader().GetSessionId(), claim.GetSupplierAddress()) + + // Upsert the claim + k.Keeper.UpsertClaim(ctx, claim) + logger.Info("successfully upserted the claim") // Emit the appropriate event based on whether the claim was created or updated. var claimUpsertEvent proto.Message @@ -113,8 +109,6 @@ func (k msgServer) CreateClaim( }, ) } - - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) if err = sdkCtx.EventManager().EmitTypedEvent(claimUpsertEvent); err != nil { return nil, status.Error( codes.Internal, @@ -126,7 +120,6 @@ func (k msgServer) CreateClaim( ) } - // TODO_BETA: return the claim in the response. return &types.MsgCreateClaimResponse{ Claim: &claim, }, nil diff --git a/x/proof/keeper/msg_server_create_claim_test.go b/x/proof/keeper/msg_server_create_claim_test.go index 4ac0c4c72..69515cc64 100644 --- a/x/proof/keeper/msg_server_create_claim_test.go +++ b/x/proof/keeper/msg_server_create_claim_test.go @@ -5,12 +5,11 @@ import ( abci "github.com/cometbft/cometbft/abci/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" + "github.com/pokt-network/smt" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/pokt-network/smt" - keepertest "github.com/pokt-network/poktroll/testutil/keeper" testproof "github.com/pokt-network/poktroll/testutil/proof" "github.com/pokt-network/poktroll/testutil/sample" @@ -140,7 +139,7 @@ func TestMsgServer_CreateClaim_Success(t *testing.T) { events := sdkCtx.EventManager().Events() require.Equal(t, 1, len(events)) - require.Equal(t, events[0].Type, "poktroll.proof.EventClaimCreated") + require.Equal(t, "poktroll.proof.EventClaimCreated", events[0].Type) event, err := cosmostypes.ParseTypedEvent(abci.Event(events[0])) require.NoError(t, err) diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index fcc271b03..f63974b08 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -5,21 +5,15 @@ package keeper // Ref: https://github.com/pokt-network/poktroll/pull/448#discussion_r1549742985 import ( - "bytes" "context" - "fmt" - cosmoscryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/gogoproto/proto" - "github.com/pokt-network/smt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/telemetry" "github.com/pokt-network/poktroll/x/proof/types" - servicetypes "github.com/pokt-network/poktroll/x/service/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) @@ -36,14 +30,6 @@ func (k msgServer) SubmitProof( ctx context.Context, msg *types.MsgSubmitProof, ) (_ *types.MsgSubmitProofResponse, err error) { - // TODO_MAINNET: A potential issue with doing proof validation inside - // `SubmitProof` is that we will not be storing false proofs on-chain (e.g. for slashing purposes). - // This could be considered a feature (e.g. less state bloat against sybil attacks) - // or a bug (i.e. no mechanisms for slashing suppliers who submit false proofs). - // Revisit this prior to mainnet launch as to whether the business logic for settling sessions should be in EndBlocker or here. - logger := k.Logger().With("method", "SubmitProof") - logger.Info("About to start submitting proof") - // Declare claim to reference in telemetry. var ( claim = new(types.Claim) @@ -62,201 +48,50 @@ func (k msgServer) SubmitProof( } }() - /* - TODO_BLOCKER(@bryanchriswhite): Document these steps in proof - verification, link to the doc for reference and delete the comments. - - ## Actions (error if anything fails) - 1. Retrieve a fully hydrated `session` from on-chain store using `msg` metadata - 2. Retrieve a fully hydrated `claim` from on-chain store using `msg` metadata - 3. Retrieve `relay.Req` and `relay.Res` from deserializing `proof.ClosestValueHash` - - ## Basic Validations (metadata only) - 1. proof.sessionId == claim.sessionId - 2. msg.supplier in session.suppliers - 3. relay.Req.signer == session.appAddr - 4. relay.Res.signer == msg.supplier - - ## Msg distribution validation (governance based params) - 1. Validate Proof submission is not too early; governance-based param + pseudo-random variation - 2. Validate Proof submission is not too late; governance-based param + pseudo-random variation - - ## Relay Signature validation - 1. verify(relay.Req.Signature, appRing) - 2. verify(relay.Res.Signature, supplier.pubKey) - - ## Relay Mining validation - 1. verify(proof.path) is the expected path; pseudo-random variation using on-chain data - 2. verify(proof.ValueHash, expectedDifficulty); governance based - 3. verify(claim.Root, proof.ClosestProof); verify the closest proof is correct - */ - - // Decomposing a few variables for easier access - sessionHeader := msg.GetSessionHeader() - supplierAddr := msg.GetSupplierAddress() - - // Helpers for logging the same metadata throughout this function calls - logger = logger.With( - "session_id", sessionHeader.GetSessionId(), - "session_end_height", sessionHeader.GetSessionEndBlockHeight(), - "supplier", supplierAddr) + logger := k.Logger().With("method", "SubmitProof") + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + logger.Info("About to start submitting proof") // Basic validation of the SubmitProof message. if err = msg.ValidateBasic(); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - logger.Info("validated the submitProof message ") + logger.Info("validated the submitProof message") - // Retrieve the supplier's public key. - var supplierPubKey cosmoscryptotypes.PubKey - supplierPubKey, err = k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) + // Compare msg session header w/ on-chain session header. + var session *sessiontypes.Session + session, err = k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) if err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } - // Validate the session header. - var onChainSession *sessiontypes.Session - onChainSession, err = k.queryAndValidateSessionHeader(ctx, msg) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) + // Construct the proof + proof := types.Proof{ + SupplierAddress: msg.GetSupplierAddress(), + SessionHeader: session.GetHeader(), + ClosestMerkleProof: msg.GetProof(), } - logger.Info("queried and validated the session header") - // Re-hydrate message session header with the on-chain session header. - // This corrects for discrepancies between unvalidated fields in the session header - // which can be derived from known values (e.g. session end height). - msg.SessionHeader = onChainSession.GetHeader() + // Helpers for logging the same metadata throughout this function calls + logger = logger.With( + "session_id", proof.SessionHeader.SessionId, + "session_end_height", proof.SessionHeader.SessionEndBlockHeight, + "supplier", proof.SupplierAddress) // Validate proof message commit height is within the respective session's // proof submission window using the on-chain session header. - if err = k.validateProofWindow(ctx, msg); err != nil { + if err = k.validateProofWindow(ctx, proof.SessionHeader, proof.SupplierAddress); err != nil { return nil, status.Error(codes.FailedPrecondition, err.Error()) } - // Unmarshal the closest merkle proof from the message. - sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} - if err = sparseMerkleClosestProof.Unmarshal(msg.GetProof()); err != nil { - return nil, status.Error(codes.InvalidArgument, - types.ErrProofInvalidProof.Wrapf( - "failed to unmarshal closest merkle proof: %s", - err, - ).Error(), - ) - } - - // TODO_MAINNET(#427): Utilize smt.VerifyCompactClosestProof here to - // reduce on-chain storage requirements for proofs. - // Get the relay request and response from the proof.GetClosestMerkleProof. - relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) - relay := &servicetypes.Relay{} - if err = k.cdc.Unmarshal(relayBz, relay); err != nil { - return nil, status.Error( - codes.InvalidArgument, - types.ErrProofInvalidRelay.Wrapf( - "failed to unmarshal relay: %s", - err, - ).Error(), - ) - } - - // Basic validation of the relay request. - relayReq := relay.GetReq() - if err = relayReq.ValidateBasic(); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully validated relay request") - - // Make sure that the supplier address in the proof matches the one in the relay request. - if supplierAddr != relayReq.Meta.SupplierAddress { - return nil, status.Error(codes.FailedPrecondition, "supplier address mismatch") - } - logger.Debug("the proof supplier address matches the relay request supplier address") - - // Basic validation of the relay response. - relayRes := relay.GetRes() - if err = relayRes.ValidateBasic(); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully validated relay response") - - // Verify that the relay request session header matches the proof session header. - if err = compareSessionHeaders(msg.GetSessionHeader(), relayReq.Meta.GetSessionHeader()); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully compared relay request session header") - - // Verify that the relay response session header matches the proof session header. - if err = compareSessionHeaders(msg.GetSessionHeader(), relayRes.Meta.GetSessionHeader()); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully compared relay response session header") - - // Verify the relay request's signature. - if err = k.ringClient.VerifyRelayRequestSignature(ctx, relayReq); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully verified relay request signature") - - // Verify the relay response's signature. - if err = relayRes.VerifySupplierSignature(supplierPubKey); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully verified relay response signature") - - // Get the proof module's governance parameters. - // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service - params := k.GetParams(ctx) - - // Verify the relay difficulty is above the minimum required to earn rewards. - if err = validateRelayDifficulty( - relayBz, - params.RelayDifficultyTargetHash, - sessionHeader.Service.Id, - ); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully validated relay mining difficulty") - - // Validate that path the proof is submitted for matches the expected one - // based on the pseudo-random on-chain data associated with the header. - if err = k.validateClosestPath( - ctx, - sparseMerkleClosestProof, - msg.GetSessionHeader(), - msg.GetSupplierAddress(), - ); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully validated proof path") - // Retrieve the corresponding claim for the proof submitted so it can be // used in the proof validation below. - claim, err = k.queryAndValidateClaimForProof(ctx, msg) + claim, err = k.queryAndValidateClaimForProof(ctx, proof.SessionHeader, proof.SupplierAddress) if err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - - logger.Debug("successfully retrieved and validated claim") - - // Verify the proof's closest merkle proof. - if err = verifyClosestProof(sparseMerkleClosestProof, claim.GetRootHash()); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) + return nil, status.Error(codes.Internal, types.ErrProofClaimNotFound.Wrap(err.Error()).Error()) } - logger.Debug("successfully verified closest merkle proof") - - // Construct and insert proof after all validation. - proof := types.Proof{ - SupplierAddress: supplierAddr, - SessionHeader: msg.GetSessionHeader(), - ClosestMerkleProof: msg.GetProof(), - } - logger.Debug(fmt.Sprintf("queried and validated the claim for session ID %q", sessionHeader.SessionId)) - - _, isExistingProof = k.GetProof(ctx, proof.GetSessionHeader().GetSessionId(), proof.GetSupplierAddress()) - - k.UpsertProof(ctx, proof) - logger.Info("successfully upserted the proof") + // Get metadata for the event we want to emit numRelays, err = claim.GetNumRelays() if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) @@ -265,6 +100,11 @@ func (k msgServer) SubmitProof( if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) } + _, isExistingProof = k.GetProof(ctx, proof.SessionHeader.SessionId, proof.SupplierAddress) + + // Upsert the proof + k.UpsertProof(ctx, proof) + logger.Info("successfully upserted the proof") // Emit the appropriate event based on whether the claim was created or updated. var proofUpsertEvent proto.Message @@ -288,8 +128,6 @@ func (k msgServer) SubmitProof( }, ) } - - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) if err = sdkCtx.EventManager().EmitTypedEvent(proofUpsertEvent); err != nil { return nil, status.Error( codes.Internal, @@ -305,232 +143,3 @@ func (k msgServer) SubmitProof( Proof: &proof, }, nil } - -// queryAndValidateClaimForProof ensures that a claim corresponding to the given -// proof's session exists & has a matching supplier address and session header, -// it then returns the corresponding claim if the validation is successful. -func (k msgServer) queryAndValidateClaimForProof( - ctx context.Context, - msg *types.MsgSubmitProof, -) (*types.Claim, error) { - sessionId := msg.GetSessionHeader().GetSessionId() - // NB: no need to assert the testSessionId or supplier address as it is retrieved - // by respective values of the given proof. I.e., if the claim exists, then these - // values are guaranteed to match. - foundClaim, found := k.GetClaim(ctx, sessionId, msg.GetSupplierAddress()) - if !found { - return nil, types.ErrProofClaimNotFound.Wrapf( - "no claim found for session ID %q and supplier %q", - sessionId, - msg.GetSupplierAddress(), - ) - } - - claimSessionHeader := foundClaim.GetSessionHeader() - proofSessionHeader := msg.GetSessionHeader() - - // Ensure session start heights match. - if claimSessionHeader.GetSessionStartBlockHeight() != proofSessionHeader.GetSessionStartBlockHeight() { - return nil, types.ErrProofInvalidSessionStartHeight.Wrapf( - "claim session start height %d does not match proof session start height %d", - claimSessionHeader.GetSessionStartBlockHeight(), - proofSessionHeader.GetSessionStartBlockHeight(), - ) - } - - // Ensure session end heights match. - if claimSessionHeader.GetSessionEndBlockHeight() != proofSessionHeader.GetSessionEndBlockHeight() { - return nil, types.ErrProofInvalidSessionEndHeight.Wrapf( - "claim session end height %d does not match proof session end height %d", - claimSessionHeader.GetSessionEndBlockHeight(), - proofSessionHeader.GetSessionEndBlockHeight(), - ) - } - - // Ensure application addresses match. - if claimSessionHeader.GetApplicationAddress() != proofSessionHeader.GetApplicationAddress() { - return nil, types.ErrProofInvalidAddress.Wrapf( - "claim application address %q does not match proof application address %q", - claimSessionHeader.GetApplicationAddress(), - proofSessionHeader.GetApplicationAddress(), - ) - } - - // Ensure service IDs match. - if claimSessionHeader.GetService().GetId() != proofSessionHeader.GetService().GetId() { - return nil, types.ErrProofInvalidService.Wrapf( - "claim service ID %q does not match proof service ID %q", - claimSessionHeader.GetService().GetId(), - proofSessionHeader.GetService().GetId(), - ) - } - - return &foundClaim, nil -} - -// compareSessionHeaders compares a session header against an expected session header. -// This is necessary to validate the proof's session header against both the relay -// request and response's session headers. -func compareSessionHeaders(expectedSessionHeader, sessionHeader *sessiontypes.SessionHeader) error { - // Compare the Application address. - if sessionHeader.GetApplicationAddress() != expectedSessionHeader.GetApplicationAddress() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers application addresses mismatch; expect: %q, got: %q", - expectedSessionHeader.GetApplicationAddress(), - sessionHeader.GetApplicationAddress(), - ) - } - - // Compare the Service IDs. - if sessionHeader.GetService().GetId() != expectedSessionHeader.GetService().GetId() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers service IDs mismatch; expected: %q, got: %q", - expectedSessionHeader.GetService().GetId(), - sessionHeader.GetService().GetId(), - ) - } - - // Compare the Service names. - if sessionHeader.GetService().GetName() != expectedSessionHeader.GetService().GetName() { - return types.ErrProofInvalidRelay.Wrapf( - "sessionHeaders service names mismatch expect: %q, got: %q", - expectedSessionHeader.GetService().GetName(), - sessionHeader.GetService().GetName(), - ) - } - - // Compare the Session start block heights. - if sessionHeader.GetSessionStartBlockHeight() != expectedSessionHeader.GetSessionStartBlockHeight() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers session start heights mismatch; expected: %d, got: %d", - expectedSessionHeader.GetSessionStartBlockHeight(), - sessionHeader.GetSessionStartBlockHeight(), - ) - } - - // Compare the Session end block heights. - if sessionHeader.GetSessionEndBlockHeight() != expectedSessionHeader.GetSessionEndBlockHeight() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers session end heights mismatch; expected: %d, got: %d", - expectedSessionHeader.GetSessionEndBlockHeight(), - sessionHeader.GetSessionEndBlockHeight(), - ) - } - - // Compare the Session IDs. - if sessionHeader.GetSessionId() != expectedSessionHeader.GetSessionId() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers session IDs mismatch; expected: %q, got: %q", - expectedSessionHeader.GetSessionId(), - sessionHeader.GetSessionId(), - ) - } - - return nil -} - -// verifyClosestProof verifies the the correctness of the ClosestMerkleProof -// against the root hash committed to when creating the claim. -func verifyClosestProof( - proof *smt.SparseMerkleClosestProof, - claimRootHash []byte, -) error { - valid, err := smt.VerifyClosestProof(proof, claimRootHash, &protocol.SmtSpec) - if err != nil { - return err - } - - if !valid { - return types.ErrProofInvalidProof.Wrap("invalid closest merkle proof") - } - - return nil -} - -// validateRelayDifficulty ensures that the relay's mining difficulty meets the -// required minimum threshold. -// TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared -// function that can be used by both the proof and the miner packages. -func validateRelayDifficulty(relayBz, targetHash []byte, serviceId string) error { - relayHashArr := protocol.GetRelayHashFromBytes(relayBz) - relayHash := relayHashArr[:] - - if len(targetHash) != protocol.RelayHasherSize { - return types.ErrProofInvalidRelay.Wrapf( - "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", - targetHash, - protocol.RelayHasherSize, - len(targetHash), - ) - } - - if !protocol.IsRelayVolumeApplicable(relayHash, targetHash) { - var targetHashArr [protocol.RelayHasherSize]byte - copy(targetHashArr[:], targetHash) - - relayDifficulty := protocol.GetDifficultyFromHash(relayHashArr) - targetDifficulty := protocol.GetDifficultyFromHash(targetHashArr) - - return types.ErrProofInvalidRelay.Wrapf( - "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", - relayDifficulty, - targetDifficulty, - serviceId, - ) - } - - return nil -} - -// validateClosestPath ensures that the proof's path matches the expected path. -// Since the proof path needs to be pseudo-randomly selected AFTER the session -// ends, the seed for this is the block hash at the height when the proof window -// opens. -func (k msgServer) validateClosestPath( - ctx context.Context, - proof *smt.SparseMerkleClosestProof, - sessionHeader *sessiontypes.SessionHeader, - supplierAddr string, -) error { - // The RelayMiner has to wait until the submit claim and proof windows is are open - // in order to to create the claim and submit claims and proofs, respectively. - // These windows are calculated as specified in the docs; - // see: https://dev.poktroll.com/protocol/primitives/claim_and_proof_lifecycle. - // - // For reference, see relayerSessionsManager#waitForEarliest{CreateClaim,SubmitProof}Height(). - // - // The RelayMiner has to wait this long to ensure that late relays (i.e. - // submitted during SessionNumber=(N+1) but created during SessionNumber=N) are - // still included as part of SessionNumber=N. - // - // Since smt.ProveClosest is defined in terms of proof window open height, - // this block's hash needs to be used for validation too. - earliestSupplierProofCommitHeight, err := k.sharedQuerier.GetEarliestSupplierProofCommitHeight( - ctx, - sessionHeader.GetSessionEndBlockHeight(), - supplierAddr, - ) - if err != nil { - return err - } - - // earliestSupplierProofCommitHeight - 1 is the block that will have its hash used as the - // source of entropy for all the session trees in that batch, waiting for it to - // be received before proceeding. - proofPathSeedBlockHash := k.sessionKeeper.GetBlockHash(ctx, earliestSupplierProofCommitHeight-1) - - // TODO_BETA: Investigate "proof for the path provided does not match one expected by the on-chain protocol" - // error that may occur due to block height differing from the off-chain part. - k.logger.Info("E2E_DEBUG: height for block hash when verifying the proof", earliestSupplierProofCommitHeight, sessionHeader.GetSessionId()) - - expectedProofPath := protocol.GetPathForProof(proofPathSeedBlockHash, sessionHeader.GetSessionId()) - if !bytes.Equal(proof.Path, expectedProofPath) { - return types.ErrProofInvalidProof.Wrapf( - "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", - proof.Path, - expectedProofPath, - ) - } - - return nil -} diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 99d890294..8d8c5a7a8 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -2,33 +2,24 @@ package keeper_test import ( "context" - "encoding/hex" - "os" "testing" "cosmossdk.io/depinject" - ring_secp256k1 "github.com/athanorlabs/go-dleq/secp256k1" "github.com/cosmos/cosmos-sdk/crypto/keyring" cosmostypes "github.com/cosmos/cosmos-sdk/types" - "github.com/pokt-network/ring-go" - "github.com/pokt-network/smt" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/pokt-network/poktroll/pkg/crypto" "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/crypto/rings" "github.com/pokt-network/poktroll/pkg/polylog/polyzero" "github.com/pokt-network/poktroll/pkg/relayer" - "github.com/pokt-network/poktroll/pkg/relayer/session" testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" "github.com/pokt-network/poktroll/testutil/testkeyring" - "github.com/pokt-network/poktroll/testutil/testrelayer" "github.com/pokt-network/poktroll/x/proof/keeper" prooftypes "github.com/pokt-network/poktroll/x/proof/types" - servicetypes "github.com/pokt-network/poktroll/x/service/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" "github.com/pokt-network/poktroll/x/shared" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" @@ -99,7 +90,7 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) { sharedParams := keepers.SharedKeeper.GetParams(ctx) sdkCtx := cosmostypes.UnwrapSDKContext(ctx) - // Set proof keeper params to disable relaymining and always require a proof. + // Set proof keeper params to disable relay mining and always require a proof. err := keepers.Keeper.SetParams(ctx, testProofParams) require.NoError(t, err) @@ -395,7 +386,7 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) { // Assert that only the create claim event was emitted. events := sdkCtx.EventManager().Events() require.Equal(t, 1, len(events)) - require.Equal(t, events[0].Type, "poktroll.proof.EventClaimCreated") + require.Equal(t, "poktroll.proof.EventClaimCreated", events[0].Type) }) } } @@ -418,9 +409,6 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // Construct a keyring to hold the keypairs for the accounts used in the test. keyRing := keyring.NewInMemory(keepers.Codec) - // The base session start height used for testing - sessionStartHeight := int64(1) - // Create a pre-generated account iterator to create accounts for the test. preGeneratedAccts := testkeyring.PreGeneratedAccounts() @@ -468,17 +456,10 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // to be claimed and for which a valid proof would be accepted. validSessionHeader := keepers.GetSessionHeader(ctx, t, appAddr, service, 1) - // Get the session for the application/supplier pair which is - // *not* expected to be claimed. - unclaimedSessionHeader := keepers.GetSessionHeader(ctx, t, wrongAppAddr, wrongService, 1) - // Construct a session header with session ID that doesn't match the expected session ID. wrongSessionIdHeader := *validSessionHeader wrongSessionIdHeader.SessionId = "wrong session ID" - // TODO_TECHDEBT: add a test case such that we can distinguish between early - // & late session end block heights. - // Construct a proof message server from the proof keeper. srv := keeper.NewMsgServerImpl(*keepers.Keeper) @@ -528,49 +509,6 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { keepers, ) - // Compute the difficulty in bits of the closest relay from the valid session tree. - validClosestRelayDifficultyBits := getClosestRelayDifficulty(t, validSessionTree, expectedMerkleProofPath) - - // Copy `emptyBlockHash` to `wrongClosestProofPath` to with a missing byte - // so the closest proof is invalid (i.e. unmarshalable). - invalidClosestProofBytes := make([]byte, len(expectedMerkleProofPath)-1) - - // Store the expected error returned during deserialization of the invalid - // closest Merkle proof bytes. - sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} - expectedInvalidProofUnmarshalErr := sparseMerkleClosestProof.Unmarshal(invalidClosestProofBytes) - - // Construct a relay to be mangled such that it fails to deserialize in order - // to set the error expectation for the relevant test case. - mangledRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) - - // Ensure valid relay request and response signatures. - testrelayer.SignRelayRequest(ctx, t, mangledRelay, appAddr, keyRing, ringClient) - testrelayer.SignRelayResponse(ctx, t, mangledRelay, supplierUid, supplierAddr, keyRing) - - // Serialize the relay so that it can be mangled. - mangledRelayBz, err := mangledRelay.Marshal() - require.NoError(t, err) - - // Mangle the serialized relay to cause an error during deserialization. - // Mangling could involve any byte randomly being swapped to any value - // so unmarshaling fails, but we are setting the first byte to 0 for simplicity. - mangledRelayBz[0] = 0x00 - - // Declare an invalid signature byte slice to construct expected relay request - // and response errors and use in corresponding test cases. - invalidSignatureBz := []byte("invalid signature bytes") - - // Prepare an invalid proof of the correct size. - wrongClosestProofPath := make([]byte, len(expectedMerkleProofPath)) - copy(wrongClosestProofPath, expectedMerkleProofPath) - copy(wrongClosestProofPath, "wrong closest proof path") - - lowTargetHash, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") - var lowTargetHashArr [protocol.RelayHasherSize]byte - copy(lowTargetHashArr[:], lowTargetHash) - highExpectedTargetDifficulty := protocol.GetDifficultyFromHash(lowTargetHashArr) - tests := []struct { desc string newProofMsg func(t *testing.T) *prooftypes.MsgSubmitProof @@ -662,531 +600,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { ).Error(), ), }, - { - desc: "merkle proof must be deserializable", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct new proof message. - proof := newTestProofMsg(t, - supplierAddr, - validSessionHeader, - validSessionTree, - expectedMerkleProofPath, - ) - - // Set merkle proof to an incorrect byte slice. - proof.Proof = invalidClosestProofBytes - - return proof - }, - expectedErr: status.Error( - codes.InvalidArgument, - prooftypes.ErrProofInvalidProof.Wrapf( - "failed to unmarshal closest merkle proof: %s", - expectedInvalidProofUnmarshalErr, - ).Error(), - ), - }, - { - desc: "relay must be deserializable", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a session tree to which we'll add 1 unserializable relay. - mangledRelaySessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) - - // Add the mangled relay to the session tree. - err = mangledRelaySessionTree.Update([]byte{1}, mangledRelayBz, 1) - require.NoError(t, err) - - // Get the Merkle root for the session tree in order to construct a claim. - mangledRelayMerkleRootBz, flushErr := mangledRelaySessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a session tree - // with an unserializable relay. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - mangledRelayMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an unserializable relay. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - mangledRelaySessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.InvalidArgument, - prooftypes.ErrProofInvalidRelay.Wrapf( - "failed to unmarshal relay: %s", - keepers.Codec.Unmarshal(mangledRelayBz, &servicetypes.Relay{}), - ).Error(), - ), - }, - { - // TODO_TEST(community): expand: test case to cover each session header field. - desc: "relay request session header must match proof session header", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the proof session ID. - numRelays := uint(1) - wrongRequestSessionIdSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, &wrongSessionIdHeader, validSessionHeader, - keyRing, - ringClient, - ) - - // Get the Merkle root for the session tree in order to construct a claim. - wrongRequestSessionIdMerkleRootBz, flushErr := wrongRequestSessionIdSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a relay - // request containing the wrong session ID. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongRequestSessionIdMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message using the valid session header, - // *not* the one used in the session tree's relay request. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - wrongRequestSessionIdSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelay.Wrapf( - "session headers session IDs mismatch; expected: %q, got: %q", - validSessionHeader.GetSessionId(), - wrongSessionIdHeader.GetSessionId(), - ).Error(), - ), - }, - { - // TODO_TEST: expand: test case to cover each session header field. - desc: "relay response session header must match proof session header", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the expected session ID. - numRelays := uint(1) - wrongResponseSessionIdSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, &wrongSessionIdHeader, - keyRing, - ringClient, - ) - - // Get the Merkle root for the session tree in order to construct a claim. - wrongResponseSessionIdMerkleRootBz, flushErr := wrongResponseSessionIdSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a relay - // response containing the wrong session ID. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongResponseSessionIdMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message using the valid session header, - // *not* the one used in the session tree's relay response. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - wrongResponseSessionIdSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelay.Wrapf( - "session headers session IDs mismatch; expected: %q, got: %q", - validSessionHeader.GetSessionId(), - wrongSessionIdHeader.GetSessionId(), - ).Error(), - ), - }, - { - desc: "relay request signature must be valid", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Set the relay request signature to an invalid byte slice. - invalidRequestSignatureRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) - invalidRequestSignatureRelay.Req.Meta.Signature = invalidSignatureBz - - // Ensure a valid relay response signature. - testrelayer.SignRelayResponse(ctx, t, invalidRequestSignatureRelay, supplierUid, supplierAddr, keyRing) - - invalidRequestSignatureRelayBz, marshalErr := invalidRequestSignatureRelay.Marshal() - require.NoError(t, marshalErr) - - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the expected session ID. - invalidRequestSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) - - // Add the relay to the session tree. - err = invalidRequestSignatureSessionTree.Update([]byte{1}, invalidRequestSignatureRelayBz, 1) - require.NoError(t, err) - - // Get the Merkle root for the session tree in order to construct a claim. - invalidRequestSignatureMerkleRootBz, flushErr := invalidRequestSignatureSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a session tree - // with an invalid relay request signature. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - invalidRequestSignatureMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an invalid relay request signature. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - invalidRequestSignatureSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelayRequest.Wrapf( - "error deserializing ring signature: %s", - new(ring.RingSig).Deserialize(ring_secp256k1.NewCurve(), invalidSignatureBz), - ).Error(), - ), - }, - { - desc: "relay request signature is valid but signed by an incorrect application", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") - return nil - }, - }, - { - desc: "relay response signature must be valid", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Set the relay response signature to an invalid byte slice. - relay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) - relay.Res.Meta.SupplierSignature = invalidSignatureBz - - // Ensure a valid relay request signature - testrelayer.SignRelayRequest(ctx, t, relay, appAddr, keyRing, ringClient) - - relayBz, marshalErr := relay.Marshal() - require.NoError(t, marshalErr) - - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the expected session ID. - invalidResponseSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) - - // Add the relay to the session tree. - err = invalidResponseSignatureSessionTree.Update([]byte{1}, relayBz, 1) - require.NoError(t, err) - - // Get the Merkle root for the session tree in order to construct a claim. - invalidResponseSignatureMerkleRootBz, flushErr := invalidResponseSignatureSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a session tree - // with an invalid relay response signature. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - invalidResponseSignatureMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an invalid relay response signature. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - invalidResponseSignatureSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - servicetypes.ErrServiceInvalidRelayResponse.Wrap("invalid signature").Error(), - ), - }, - { - desc: "relay response signature is valid but signed by an incorrect supplier", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") - return nil - }, - }, - { - desc: "the merkle proof path provided does not match the one expected/enforced by the protocol", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a new valid session tree for this test case because once the - // closest proof has already been generated, the path cannot be changed. - numRelays := uint(5) - wrongPathSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, validSessionHeader, - keyRing, - ringClient, - ) - - wrongPathMerkleRootBz, flushErr := wrongPathSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) - - // Create a valid claim with the expected merkle root. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongPathMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an invalid relay response signature. - return newTestProofMsg(t, supplierAddr, validSessionHeader, wrongPathSessionTree, wrongClosestProofPath) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidProof.Wrapf( - "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", - wrongClosestProofPath, - protocol.GetPathForProof(sdkCtx.HeaderHash(), validSessionHeader.GetSessionId()), - ).Error(), - ), - }, - { - desc: "relay difficulty must be greater than or equal to minimum (zero difficulty)", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Set the minimum relay difficulty to a non-zero value such that the relays - // constructed by the test helpers have a negligible chance of being valid. - err = keepers.Keeper.SetParams(ctx, prooftypes.Params{ - RelayDifficultyTargetHash: lowTargetHash, - }) - require.NoError(t, err) - - // Reset the minimum relay difficulty to zero after this test case. - t.Cleanup(func() { - err = keepers.Keeper.SetParams(ctx, prooftypes.DefaultParams()) - require.NoError(t, err) - }) - - // Construct a proof message with a session tree containing - // a relay of insufficient difficulty. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - validSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelay.Wrapf( - "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", - validClosestRelayDifficultyBits, - highExpectedTargetDifficulty, - validSessionHeader.Service.Id, - ).Error(), - ), - }, - { - desc: "relay difficulty must be greater than or equal to minimum (non-zero difficulty)", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") - return nil - }, - }, - { // group: claim must exist for proof message - desc: "claim must exist for proof message", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a new session tree corresponding to the unclaimed session. - numRelays := uint(5) - unclaimedSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - "wrong_supplier", wrongSupplierAddr, - unclaimedSessionHeader, unclaimedSessionHeader, unclaimedSessionHeader, - keyRing, - ringClient, - ) - - // Discard session tree Merkle root because no claim is being created. - // Session tree must be closed (flushed) to compute closest Merkle Proof. - _, err = unclaimedSessionTree.Flush() - require.NoError(t, err) - - // Compute expected proof path for the unclaimed session. - expectedMerkleProofPath := protocol.GetPathForProof( - blockHeaderHash, - unclaimedSessionHeader.GetSessionId(), - ) - - // Construct new proof message using the supplier & session header - // from the session which is *not* expected to be claimed. - return newTestProofMsg(t, - wrongSupplierAddr, - unclaimedSessionHeader, - unclaimedSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofClaimNotFound.Wrapf( - "no claim found for session ID %q and supplier %q", - unclaimedSessionHeader.GetSessionId(), - wrongSupplierAddr, - ).Error(), - ), - }, - { - desc: "Valid proof cannot validate claim with an incorrect root", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - numRelays := uint(10) - wrongMerkleRootSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, validSessionHeader, - keyRing, - ringClient, - ) - - wrongMerkleRootBz, err := wrongMerkleRootSessionTree.Flush() - require.NoError(t, err) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) - - // Create a claim with the incorrect Merkle root. - wrongMerkleRootClaimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, wrongMerkleRootClaimMsg) - require.NoError(t, err) - - // Construct a valid session tree with 5 relays. - validSessionTree := newFilledSessionTree( - ctx, t, - uint(5), - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, validSessionHeader, - keyRing, - ringClient, - ) - - _, err = validSessionTree.Flush() - require.NoError(t, err) - - // Compute expected proof path for the session. - expectedMerkleProofPath := protocol.GetPathForProof( - blockHeaderHash, - validSessionHeader.GetSessionId(), - ) - - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - validSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidProof.Wrap("invalid closest merkle proof").Error(), - ), - }, - { - desc: "claim and proof application addresses must match", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") - return nil - }, - }, - { - desc: "claim and proof service IDs must match", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") - return nil - }, - }, - { - desc: "claim and proof supplier addresses must match", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") - return nil - }, - }, + // TODO_TECHDEBT: add a test case such that we can distinguish between early & late session end block heights. } // Submit the corresponding proof. @@ -1230,100 +644,6 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { } } -// newFilledSessionTree creates a new session tree with numRelays of relays -// filled out using the request and response headers provided where every -// relay is signed by the supplier and application respectively. -func newFilledSessionTree( - ctx context.Context, t *testing.T, - numRelays uint, - supplierKeyUid, supplierAddr string, - sessionTreeHeader, reqHeader, resHeader *sessiontypes.SessionHeader, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) relayer.SessionTree { - t.Helper() - - // Initialize an empty session tree with the given session header. - sessionTree := newEmptySessionTree(t, sessionTreeHeader, supplierAddr) - - // Add numRelays of relays to the session tree. - fillSessionTree( - ctx, t, - sessionTree, numRelays, - supplierKeyUid, supplierAddr, - reqHeader, resHeader, - keyRing, - ringClient, - ) - - return sessionTree -} - -// newEmptySessionTree creates a new empty session tree with for given session. -func newEmptySessionTree( - t *testing.T, - sessionTreeHeader *sessiontypes.SessionHeader, - supplierAddr string, -) relayer.SessionTree { - t.Helper() - - // Create a temporary session tree store directory for persistence. - testSessionTreeStoreDir, err := os.MkdirTemp("", "session_tree_store_dir") - require.NoError(t, err) - - // Delete the temporary session tree store directory after the test completes. - t.Cleanup(func() { - _ = os.RemoveAll(testSessionTreeStoreDir) - }) - - accAddress := cosmostypes.MustAccAddressFromBech32(supplierAddr) - - // Construct a session tree to add relays to and generate a proof from. - sessionTree, err := session.NewSessionTree( - sessionTreeHeader, - &accAddress, - testSessionTreeStoreDir, - ) - require.NoError(t, err) - - return sessionTree -} - -// fillSessionTree fills the session tree with valid signed relays. -// A total of numRelays relays are added to the session tree with -// increasing weights (relay 1 has weight 1, relay 2 has weight 2, etc.). -func fillSessionTree( - ctx context.Context, t *testing.T, - sessionTree relayer.SessionTree, - numRelays uint, - supplierKeyUid, supplierAddr string, - reqHeader, resHeader *sessiontypes.SessionHeader, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) { - t.Helper() - - for i := 0; i < int(numRelays); i++ { - relay := testrelayer.NewSignedEmptyRelay( - ctx, t, - supplierKeyUid, supplierAddr, - reqHeader, resHeader, - keyRing, - ringClient, - ) - relayBz, err := relay.Marshal() - require.NoError(t, err) - - relayKey, err := relay.GetHash() - require.NoError(t, err) - - relayWeight := uint64(i) - - err = sessionTree.Update(relayKey[:], relayBz, relayWeight) - require.NoError(t, err) - } -} - // newTestProofMsg creates a new submit proof message that can be submitted // to be validated and stored on-chain. func newTestProofMsg( @@ -1406,28 +726,3 @@ func createClaimAndStoreBlockHash( return claimRes.GetClaim() } - -// getClosestRelayDifficulty returns the mining difficulty number which corresponds -// to the relayHash stored in the sessionTree that is closest to the merkle proof -// path provided. -func getClosestRelayDifficulty( - t *testing.T, - sessionTree relayer.SessionTree, - closestMerkleProofPath []byte, -) int64 { - // Retrieve a merkle proof that is closest to the path provided - closestMerkleProof, err := sessionTree.ProveClosest(closestMerkleProofPath) - require.NoError(t, err) - - // Extract the Relay (containing the RelayResponse & RelayRequest) from the merkle proof. - relay := new(servicetypes.Relay) - relayBz := closestMerkleProof.GetValueHash(&protocol.SmtSpec) - err = relay.Unmarshal(relayBz) - require.NoError(t, err) - - // Retrieve the hash of the relay. - relayHash, err := relay.GetHash() - require.NoError(t, err) - - return protocol.GetDifficultyFromHash(relayHash) -} diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go new file mode 100644 index 000000000..8439da485 --- /dev/null +++ b/x/proof/keeper/proof_validation.go @@ -0,0 +1,419 @@ +package keeper + +import ( + "bytes" + "context" + + "github.com/pokt-network/smt" + + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/x/proof/types" + servicetypes "github.com/pokt-network/poktroll/x/service/types" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" +) + +/* + TODO_MAINNET: Document these steps in the docs and link here. + + ## Actions (error if anything fails) + 1. Retrieve a fully hydrated `session` from on-chain store using `msg` metadata + 2. Retrieve a fully hydrated `claim` from on-chain store using `msg` metadata + 3. Retrieve `relay.Req` and `relay.Res` from deserializing `proof.ClosestValueHash` + + ## Basic Validations (metadata only) + 1. proof.sessionId == claim.sessionId + 2. msg.supplier in session.suppliers + 3. relay.Req.signer == session.appAddr + 4. relay.Res.signer == msg.supplier + + ## Msg distribution validation (governance based params) + 1. Validate Proof submission is not too early; governance-based param + pseudo-random variation + 2. Validate Proof submission is not too late; governance-based param + pseudo-random variation + + ## Relay Signature validation + 1. verify(relay.Req.Signature, appRing) + 2. verify(relay.Res.Signature, supplier.pubKey) + + ## Relay Mining validation + 1. verify(proof.path) is the expected path; pseudo-random variation using on-chain data + 2. verify(proof.ValueHash, expectedDifficulty); governance based + 3. verify(claim.Root, proof.ClosestProof); verify the closest proof is correct +*/ + +// IsProofValid validates the proof submitted by the supplier is correct with +// respect to an on-chain claim. +func (k Keeper) IsProofValid( + ctx context.Context, + proof *types.Proof, +) (valid bool, err error) { + logger := k.Logger().With("method", "ValidateProof") + + // Retrieve the supplier's public key. + supplierAddr := proof.SupplierAddress + supplierPubKey, err := k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) + if err != nil { + return false, err + } + + // Validate the session header. + var onChainSession *sessiontypes.Session + onChainSession, err = k.queryAndValidateSessionHeader(ctx, proof.SessionHeader, supplierAddr) + if err != nil { + return false, err + } + logger.Info("queried and validated the session header") + + // Re-hydrate message session header with the on-chain session header. + // This corrects for discrepancies between unvalidated fields in the session + // header which can be derived from known values (e.g. session end height). + sessionHeader := onChainSession.GetHeader() + + // Validate proof message commit height is within the respective session's + // proof submission window using the on-chain session header. + if err = k.validateProofWindow(ctx, sessionHeader, supplierAddr); err != nil { + return false, err + } + + if proof.ClosestMerkleProof == nil || len(proof.ClosestMerkleProof) == 0 { + return false, types.ErrProofInvalidProof.Wrap("proof cannot be empty") + } + + // Unmarshal the closest merkle proof from the message. + sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} + if err = sparseMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof); err != nil { + return false, types.ErrProofInvalidProof.Wrapf( + "failed to unmarshal closest merkle proof: %s", + err, + ) + } + + // TODO_MAINNET(#427): Utilize smt.VerifyCompactClosestProof here to + // reduce on-chain storage requirements for proofs. + // Get the relay request and response from the proof.GetClosestMerkleProof. + relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) + relay := &servicetypes.Relay{} + if err = k.cdc.Unmarshal(relayBz, relay); err != nil { + return false, types.ErrProofInvalidRelay.Wrapf( + "failed to unmarshal relay: %s", + err, + ) + } + + // Basic validation of the relay request. + relayReq := relay.GetReq() + if err = relayReq.ValidateBasic(); err != nil { + return false, err + } + logger.Debug("successfully validated relay request") + + // Make sure that the supplier address in the proof matches the one in the relay request. + if supplierAddr != relayReq.Meta.SupplierAddress { + return false, types.ErrProofSupplierMismatch.Wrapf("supplier type mismatch") + } + logger.Debug("the proof supplier address matches the relay request supplier address") + + // Basic validation of the relay response. + relayRes := relay.GetRes() + if err = relayRes.ValidateBasic(); err != nil { + return false, err + } + logger.Debug("successfully validated relay response") + + // Verify that the relay request session header matches the proof session header. + if err = compareSessionHeaders(sessionHeader, relayReq.Meta.GetSessionHeader()); err != nil { + return false, err + } + logger.Debug("successfully compared relay request session header") + + // Verify that the relay response session header matches the proof session header. + if err = compareSessionHeaders(sessionHeader, relayRes.Meta.GetSessionHeader()); err != nil { + return false, err + } + logger.Debug("successfully compared relay response session header") + + // Verify the relay request's signature. + if err = k.ringClient.VerifyRelayRequestSignature(ctx, relayReq); err != nil { + return false, err + } + logger.Debug("successfully verified relay request signature") + + // Verify the relay response's signature. + if err = relayRes.VerifySupplierSignature(supplierPubKey); err != nil { + return false, err + } + logger.Debug("successfully verified relay response signature") + + // Get the proof module's governance parameters. + // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service + params := k.GetParams(ctx) + + // Verify the relay difficulty is above the minimum required to earn rewards. + if err = validateRelayDifficulty( + relayBz, + params.RelayDifficultyTargetHash, + sessionHeader.Service.Id, + ); err != nil { + return false, err + } + logger.Debug("successfully validated relay mining difficulty") + + // Validate that path the proof is submitted for matches the expected one + // based on the pseudo-random on-chain data associated with the header. + if err = k.validateClosestPath( + ctx, + sparseMerkleClosestProof, + sessionHeader, + supplierAddr, + ); err != nil { + return false, err + } + logger.Debug("successfully validated proof path") + + // Retrieve the corresponding claim for the proof submitted so it can be + // used in the proof validation below. + claim, err := k.queryAndValidateClaimForProof(ctx, sessionHeader, supplierAddr) + if err != nil { + return false, err + } + + logger.Debug("successfully retrieved and validated claim") + + // Verify the proof's closest merkle proof. + if err = verifyClosestProof(sparseMerkleClosestProof, claim.GetRootHash()); err != nil { + return false, err + } + logger.Debug("successfully verified closest merkle proof") + + return true, nil + +} + +// validateClosestPath ensures that the proof's path matches the expected path. +// Since the proof path needs to be pseudo-randomly selected AFTER the session +// ends, the seed for this is the block hash at the height when the proof window +// opens. +func (k Keeper) validateClosestPath( + ctx context.Context, + proof *smt.SparseMerkleClosestProof, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, +) error { + // The RelayMiner has to wait until the submit claim and proof windows is are open + // in order to to create the claim and submit claims and proofs, respectively. + // These windows are calculated as specified in the docs; + // see: https://dev.poktroll.com/protocol/primitives/claim_and_proof_lifecycle. + // + // For reference, see relayerSessionsManager#waitForEarliest{CreateClaim,SubmitProof}Height(). + // + // The RelayMiner has to wait this long to ensure that late relays (i.e. + // submitted during SessionNumber=(N+1) but created during SessionNumber=N) are + // still included as part of SessionNumber=N. + // + // Since smt.ProveClosest is defined in terms of proof window open height, + // this block's hash needs to be used for validation too. + earliestSupplierProofCommitHeight, err := k.sharedQuerier.GetEarliestSupplierProofCommitHeight( + ctx, + sessionHeader.GetSessionEndBlockHeight(), + supplierAddr, + ) + if err != nil { + return err + } + + // earliestSupplierProofCommitHeight - 1 is the block that will have its hash used as the + // source of entropy for all the session trees in that batch, waiting for it to + // be received before proceeding. + proofPathSeedBlockHash := k.sessionKeeper.GetBlockHash(ctx, earliestSupplierProofCommitHeight-1) + + // TODO_BETA: Investigate "proof for the path provided does not match one expected by the on-chain protocol" + // error that may occur due to block height differing from the off-chain part. + k.logger.Info("E2E_DEBUG: height for block hash when verifying the proof", earliestSupplierProofCommitHeight, sessionHeader.GetSessionId()) + + expectedProofPath := protocol.GetPathForProof(proofPathSeedBlockHash, sessionHeader.GetSessionId()) + if !bytes.Equal(proof.Path, expectedProofPath) { + return types.ErrProofInvalidProof.Wrapf( + "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", + proof.Path, + expectedProofPath, + ) + } + + return nil +} + +// queryAndValidateClaimForProof ensures that a claim corresponding to the given +// proof's session exists & has a matching supplier address and session header, +// it then returns the corresponding claim if the validation is successful. +func (k Keeper) queryAndValidateClaimForProof( + ctx context.Context, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, +) (*types.Claim, error) { + sessionId := sessionHeader.SessionId + // NB: no need to assert the testSessionId or supplier address as it is retrieved + // by respective values of the given proof. I.e., if the claim exists, then these + // values are guaranteed to match. + foundClaim, found := k.GetClaim(ctx, sessionId, supplierAddr) + if !found { + return nil, types.ErrProofClaimNotFound.Wrapf( + "no claim found for session ID %q and supplier %q", + sessionId, + supplierAddr, + ) + } + + claimSessionHeader := foundClaim.GetSessionHeader() + proofSessionHeader := sessionHeader + + // Ensure session start heights match. + if claimSessionHeader.GetSessionStartBlockHeight() != proofSessionHeader.GetSessionStartBlockHeight() { + return nil, types.ErrProofInvalidSessionStartHeight.Wrapf( + "claim session start height %d does not match proof session start height %d", + claimSessionHeader.GetSessionStartBlockHeight(), + proofSessionHeader.GetSessionStartBlockHeight(), + ) + } + + // Ensure session end heights match. + if claimSessionHeader.GetSessionEndBlockHeight() != proofSessionHeader.GetSessionEndBlockHeight() { + return nil, types.ErrProofInvalidSessionEndHeight.Wrapf( + "claim session end height %d does not match proof session end height %d", + claimSessionHeader.GetSessionEndBlockHeight(), + proofSessionHeader.GetSessionEndBlockHeight(), + ) + } + + // Ensure application addresses match. + if claimSessionHeader.GetApplicationAddress() != proofSessionHeader.GetApplicationAddress() { + return nil, types.ErrProofInvalidAddress.Wrapf( + "claim application address %q does not match proof application address %q", + claimSessionHeader.GetApplicationAddress(), + proofSessionHeader.GetApplicationAddress(), + ) + } + + // Ensure service IDs match. + if claimSessionHeader.GetService().GetId() != proofSessionHeader.GetService().GetId() { + return nil, types.ErrProofInvalidService.Wrapf( + "claim service ID %q does not match proof service ID %q", + claimSessionHeader.GetService().GetId(), + proofSessionHeader.GetService().GetId(), + ) + } + + return &foundClaim, nil +} + +// compareSessionHeaders compares a session header against an expected session header. +// This is necessary to validate the proof's session header against both the relay +// request and response's session headers. +func compareSessionHeaders(expectedSessionHeader, sessionHeader *sessiontypes.SessionHeader) error { + // Compare the Application address. + if sessionHeader.GetApplicationAddress() != expectedSessionHeader.GetApplicationAddress() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers application addresses mismatch; expect: %q, got: %q", + expectedSessionHeader.GetApplicationAddress(), + sessionHeader.GetApplicationAddress(), + ) + } + + // Compare the Service IDs. + if sessionHeader.GetService().GetId() != expectedSessionHeader.GetService().GetId() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers service IDs mismatch; expected: %q, got: %q", + expectedSessionHeader.GetService().GetId(), + sessionHeader.GetService().GetId(), + ) + } + + // Compare the Service names. + if sessionHeader.GetService().GetName() != expectedSessionHeader.GetService().GetName() { + return types.ErrProofInvalidRelay.Wrapf( + "sessionHeaders service names mismatch expect: %q, got: %q", + expectedSessionHeader.GetService().GetName(), + sessionHeader.GetService().GetName(), + ) + } + + // Compare the Session start block heights. + if sessionHeader.GetSessionStartBlockHeight() != expectedSessionHeader.GetSessionStartBlockHeight() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers session start heights mismatch; expected: %d, got: %d", + expectedSessionHeader.GetSessionStartBlockHeight(), + sessionHeader.GetSessionStartBlockHeight(), + ) + } + + // Compare the Session end block heights. + if sessionHeader.GetSessionEndBlockHeight() != expectedSessionHeader.GetSessionEndBlockHeight() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers session end heights mismatch; expected: %d, got: %d", + expectedSessionHeader.GetSessionEndBlockHeight(), + sessionHeader.GetSessionEndBlockHeight(), + ) + } + + // Compare the Session IDs. + if sessionHeader.GetSessionId() != expectedSessionHeader.GetSessionId() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers session IDs mismatch; expected: %q, got: %q", + expectedSessionHeader.GetSessionId(), + sessionHeader.GetSessionId(), + ) + } + + return nil +} + +// verifyClosestProof verifies the the correctness of the ClosestMerkleProof +// against the root hash committed to when creating the claim. +func verifyClosestProof( + proof *smt.SparseMerkleClosestProof, + claimRootHash []byte, +) error { + valid, err := smt.VerifyClosestProof(proof, claimRootHash, &protocol.SmtSpec) + if err != nil { + return err + } + + if !valid { + return types.ErrProofInvalidProof.Wrap("invalid closest merkle proof") + } + + return nil +} + +// validateRelayDifficulty ensures that the relay's mining difficulty meets the +// required minimum threshold. +// TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared +// function that can be used by both the proof and the miner packages. +func validateRelayDifficulty(relayBz, targetHash []byte, serviceId string) error { + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) + relayHash := relayHashArr[:] + + if len(targetHash) != protocol.RelayHasherSize { + return types.ErrProofInvalidRelay.Wrapf( + "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", + targetHash, + protocol.RelayHasherSize, + len(targetHash), + ) + } + + if !protocol.IsRelayVolumeApplicable(relayHash, targetHash) { + var targetHashArr [protocol.RelayHasherSize]byte + copy(targetHashArr[:], targetHash) + + relayDifficulty := protocol.GetDifficultyFromHash(relayHashArr) + targetDifficulty := protocol.GetDifficultyFromHash(targetHashArr) + + return types.ErrProofInvalidRelay.Wrapf( + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", + relayDifficulty, + targetDifficulty, + serviceId, + ) + } + + return nil +} diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go new file mode 100644 index 000000000..4a1a33291 --- /dev/null +++ b/x/proof/keeper/proof_validation_test.go @@ -0,0 +1,934 @@ +package keeper_test + +import ( + "context" + "encoding/hex" + "os" + "testing" + + "cosmossdk.io/depinject" + ring_secp256k1 "github.com/athanorlabs/go-dleq/secp256k1" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + cosmostypes "github.com/cosmos/cosmos-sdk/types" + "github.com/pokt-network/ring-go" + "github.com/pokt-network/smt" + "github.com/stretchr/testify/require" + + "github.com/pokt-network/poktroll/pkg/crypto" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/pkg/crypto/rings" + "github.com/pokt-network/poktroll/pkg/polylog/polyzero" + "github.com/pokt-network/poktroll/pkg/relayer" + "github.com/pokt-network/poktroll/pkg/relayer/session" + keepertest "github.com/pokt-network/poktroll/testutil/keeper" + "github.com/pokt-network/poktroll/testutil/testkeyring" + "github.com/pokt-network/poktroll/testutil/testrelayer" + prooftypes "github.com/pokt-network/poktroll/x/proof/types" + servicetypes "github.com/pokt-network/poktroll/x/service/types" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" + "github.com/pokt-network/poktroll/x/shared" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" +) + +func TestIsProofValid_Error(t *testing.T) { + opts := []keepertest.ProofKeepersOpt{ + // Set block hash such that on-chain closest merkle proof validation + // uses the expected path. + keepertest.WithBlockHash(blockHeaderHash), + // Set block height to 1 so there is a valid session on-chain. + keepertest.WithBlockHeight(1), + } + keepers, ctx := keepertest.NewProofModuleKeepers(t, opts...) + + // Ensure the minimum relay difficulty bits is set to zero so that test cases + // don't need to mine for valid relays. + err := keepers.Keeper.SetParams(ctx, testProofParams) + require.NoError(t, err) + + // Construct a keyring to hold the keypairs for the accounts used in the test. + keyRing := keyring.NewInMemory(keepers.Codec) + + // Create a pre-generated account iterator to create accounts for the test. + preGeneratedAccts := testkeyring.PreGeneratedAccounts() + + // Create accounts in the account keeper with corresponding keys in the keyring + // for the applications and suppliers used in the tests. + supplierAddr := testkeyring.CreateOnChainAccount( + ctx, t, + supplierUid, + keyRing, + keepers, + preGeneratedAccts, + ).String() + wrongSupplierAddr := testkeyring.CreateOnChainAccount( + ctx, t, + "wrong_supplier", + keyRing, + keepers, + preGeneratedAccts, + ).String() + appAddr := testkeyring.CreateOnChainAccount( + ctx, t, + "app", + keyRing, + keepers, + preGeneratedAccts, + ).String() + wrongAppAddr := testkeyring.CreateOnChainAccount( + ctx, t, + "wrong_app", + keyRing, + keepers, + preGeneratedAccts, + ).String() + + service := &sharedtypes.Service{Id: testServiceId} + wrongService := &sharedtypes.Service{Id: "wrong_svc"} + + // Add a supplier and application pair that are expected to be in the session. + keepers.AddServiceActors(ctx, t, service, supplierAddr, appAddr) + + // Add a supplier and application pair that are *not* expected to be in the session. + keepers.AddServiceActors(ctx, t, wrongService, wrongSupplierAddr, wrongAppAddr) + + // Get the session for the application/supplier pair which is expected + // to be claimed and for which a valid proof would be accepted. + validSessionHeader := keepers.GetSessionHeader(ctx, t, appAddr, service, 1) + + // Get the session for the application/supplier pair which is + // *not* expected to be claimed. + unclaimedSessionHeader := keepers.GetSessionHeader(ctx, t, wrongAppAddr, wrongService, 1) + + // Construct a session header with session ID that doesn't match the expected session ID. + wrongSessionIdHeader := *validSessionHeader + wrongSessionIdHeader.SessionId = "wrong session ID" + + // TODO_TECHDEBT: add a test case such that we can distinguish between early + // & late session end block heights. + + // Construct a ringClient to get the application's ring & verify the relay + // request signature. + ringClient, err := rings.NewRingClient(depinject.Supply( + polyzero.NewLogger(), + prooftypes.NewAppKeeperQueryClient(keepers.ApplicationKeeper), + prooftypes.NewAccountKeeperQueryClient(keepers.AccountKeeper), + prooftypes.NewSharedKeeperQueryClient(keepers.SharedKeeper, keepers.SessionKeeper), + )) + require.NoError(t, err) + + // Construct a valid session tree with 5 relays. + numRelays := uint(5) + validSessionTree := newFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + // Advance the block height to the earliest claim commit height. + sharedParams := keepers.SharedKeeper.GetParams(ctx) + claimMsgHeight := shared.GetEarliestSupplierClaimCommitHeight( + &sharedParams, + validSessionHeader.GetSessionEndBlockHeight(), + blockHeaderHash, + supplierAddr, + ) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + sdkCtx = sdkCtx.WithBlockHeight(claimMsgHeight) + ctx = sdkCtx + + merkleRootBz, err := validSessionTree.Flush() + require.NoError(t, err) + + claim := prooftypes.Claim{ + SessionHeader: validSessionHeader, + SupplierAddress: supplierAddr, + RootHash: merkleRootBz, + } + keepers.UpsertClaim(ctx, claim) + + // Compute the difficulty in bits of the closest relay from the valid session tree. + validClosestRelayDifficultyBits := getClosestRelayDifficulty(t, validSessionTree, expectedMerkleProofPath) + + // Copy `emptyBlockHash` to `wrongClosestProofPath` to with a missing byte + // so the closest proof is invalid (i.e. unmarshalable). + invalidClosestProofBytes := make([]byte, len(expectedMerkleProofPath)-1) + + // Store the expected error returned during deserialization of the invalid + // closest Merkle proof bytes. + sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} + expectedInvalidProofUnmarshalErr := sparseMerkleClosestProof.Unmarshal(invalidClosestProofBytes) + + // Construct a relay to be mangled such that it fails to deserialize in order + // to set the error expectation for the relevant test case. + mangledRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + + // Ensure valid relay request and response signatures. + testrelayer.SignRelayRequest(ctx, t, mangledRelay, appAddr, keyRing, ringClient) + testrelayer.SignRelayResponse(ctx, t, mangledRelay, supplierUid, supplierAddr, keyRing) + + // Serialize the relay so that it can be mangled. + mangledRelayBz, err := mangledRelay.Marshal() + require.NoError(t, err) + + // Mangle the serialized relay to cause an error during deserialization. + // Mangling could involve any byte randomly being swapped to any value + // so unmarshaling fails, but we are setting the first byte to 0 for simplicity. + mangledRelayBz[0] = 0x00 + + // Declare an invalid signature byte slice to construct expected relay request + // and response errors and use in corresponding test cases. + invalidSignatureBz := []byte("invalid signature bytes") + + // Prepare an invalid proof of the correct size. + wrongClosestProofPath := make([]byte, len(expectedMerkleProofPath)) + copy(wrongClosestProofPath, expectedMerkleProofPath) + copy(wrongClosestProofPath, "wrong closest proof path") + + lowTargetHash, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + var lowTargetHashArr [protocol.RelayHasherSize]byte + copy(lowTargetHashArr[:], lowTargetHash) + highExpectedTargetDifficulty := protocol.GetDifficultyFromHash(lowTargetHashArr) + + tests := []struct { + desc string + newProof func(t *testing.T) *prooftypes.Proof + expectedErr error + }{ + { + desc: "proof service ID cannot be empty", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set proof session ID to empty string. + emptySessionIdHeader := *validSessionHeader + emptySessionIdHeader.SessionId = "" + + // Construct new proof message. + return newProof(t, + supplierAddr, + &emptySessionIdHeader, + validSessionTree, + expectedMerkleProofPath) + }, + expectedErr: prooftypes.ErrProofInvalidSessionId.Wrapf( + "session ID does not match on-chain session ID; expected %q, got %q", + validSessionHeader.GetSessionId(), + "", + ), + }, + { + desc: "merkle proof cannot be empty", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct new proof message. + proof := newProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + + // Set merkle proof to an empty byte slice. + proof.ClosestMerkleProof = []byte{} + return proof + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrap( + "proof cannot be empty", + ), + }, + { + desc: "proof session ID must match on-chain session ID", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct new proof message using the wrong session ID. + return newProof(t, + supplierAddr, + &wrongSessionIdHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidSessionId.Wrapf( + "session ID does not match on-chain session ID; expected %q, got %q", + validSessionHeader.GetSessionId(), + wrongSessionIdHeader.GetSessionId(), + ), + }, + { + desc: "proof supplier must be in on-chain session", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a proof message with a supplier that does not belong in the session. + return newProof(t, + wrongSupplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofNotFound.Wrapf( + "supplier address %q not found in session ID %q", + wrongSupplierAddr, + validSessionHeader.GetSessionId(), + ), + }, + { + desc: "merkle proof must be deserializabled", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct new proof message. + proof := newProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + + // Set merkle proof to an incorrect byte slice. + proof.ClosestMerkleProof = invalidClosestProofBytes + + return proof + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrapf( + "failed to unmarshal closest merkle proof: %s", + expectedInvalidProofUnmarshalErr, + ), + }, + { + desc: "relay must be deserializable", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a session tree to which we'll add 1 unserializable relay. + mangledRelaySessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) + + // Add the mangled relay to the session tree. + err = mangledRelaySessionTree.Update([]byte{1}, mangledRelayBz, 1) + require.NoError(t, err) + + // Get the Merkle root for the session tree in order to construct a claim. + mangledRelayMerkleRootBz, flushErr := mangledRelaySessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a session tree + // with an unserializable relay. + claim := newClaim(t, + supplierAddr, + validSessionHeader, + mangledRelayMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an unserializable relay. + return newProof(t, + supplierAddr, + validSessionHeader, + mangledRelaySessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "failed to unmarshal relay: %s", + keepers.Codec.Unmarshal(mangledRelayBz, &servicetypes.Relay{}), + ), + }, + { + // TODO_TEST(community): expand: test case to cover each session header field. + desc: "relay request session header must match proof session header", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the proof session ID. + numRelays := uint(1) + wrongRequestSessionIdSessionTree := newFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, &wrongSessionIdHeader, validSessionHeader, + keyRing, + ringClient, + ) + + // Get the Merkle root for the session tree in order to construct a claim. + wrongRequestSessionIdMerkleRootBz, flushErr := wrongRequestSessionIdSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a relay + // request containing the wrong session ID. + claim := newClaim(t, + supplierAddr, + validSessionHeader, + wrongRequestSessionIdMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message using the valid session header, + // *not* the one used in the session tree's relay request. + return newProof(t, + supplierAddr, + validSessionHeader, + wrongRequestSessionIdSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "session headers session IDs mismatch; expected: %q, got: %q", + validSessionHeader.GetSessionId(), + wrongSessionIdHeader.GetSessionId(), + ), + }, + { + // TODO_TEST: expand: test case to cover each session header field. + desc: "relay response session header must match proof session header", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the expected session ID. + numRelays := uint(1) + wrongResponseSessionIdSessionTree := newFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, &wrongSessionIdHeader, + keyRing, + ringClient, + ) + + // Get the Merkle root for the session tree in order to construct a claim. + wrongResponseSessionIdMerkleRootBz, flushErr := wrongResponseSessionIdSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a relay + // response containing the wrong session ID. + claim := newClaim(t, + supplierAddr, + validSessionHeader, + wrongResponseSessionIdMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message using the valid session header, + // *not* the one used in the session tree's relay response. + return newProof(t, + supplierAddr, + validSessionHeader, + wrongResponseSessionIdSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "session headers session IDs mismatch; expected: %q, got: %q", + validSessionHeader.GetSessionId(), + wrongSessionIdHeader.GetSessionId(), + ), + }, + { + desc: "relay request signature must be valid", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set the relay request signature to an invalid byte slice. + invalidRequestSignatureRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + invalidRequestSignatureRelay.Req.Meta.Signature = invalidSignatureBz + + // Ensure a valid relay response signature. + testrelayer.SignRelayResponse(ctx, t, invalidRequestSignatureRelay, supplierUid, supplierAddr, keyRing) + + invalidRequestSignatureRelayBz, marshalErr := invalidRequestSignatureRelay.Marshal() + require.NoError(t, marshalErr) + + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the expected session ID. + invalidRequestSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) + + // Add the relay to the session tree. + err = invalidRequestSignatureSessionTree.Update([]byte{1}, invalidRequestSignatureRelayBz, 1) + require.NoError(t, err) + + // Get the Merkle root for the session tree in order to construct a claim. + invalidRequestSignatureMerkleRootBz, flushErr := invalidRequestSignatureSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a session tree + // with an invalid relay request signature. + + claim := newClaim(t, + supplierAddr, + validSessionHeader, + invalidRequestSignatureMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an invalid relay request signature. + return newProof(t, + supplierAddr, + validSessionHeader, + invalidRequestSignatureSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelayRequest.Wrapf( + "error deserializing ring signature: %s", + new(ring.RingSig).Deserialize(ring_secp256k1.NewCurve(), invalidSignatureBz), + ), + }, + { + desc: "relay request signature is valid but signed by an incorrect application", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") + return nil + }, + }, + { + desc: "relay response signature must be valid", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set the relay response signature to an invalid byte slice. + relay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + relay.Res.Meta.SupplierSignature = invalidSignatureBz + + // Ensure a valid relay request signature + testrelayer.SignRelayRequest(ctx, t, relay, appAddr, keyRing, ringClient) + + relayBz, marshalErr := relay.Marshal() + require.NoError(t, marshalErr) + + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the expected session ID. + invalidResponseSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) + + // Add the relay to the session tree. + err = invalidResponseSignatureSessionTree.Update([]byte{1}, relayBz, 1) + require.NoError(t, err) + + // Get the Merkle root for the session tree in order to construct a claim. + invalidResponseSignatureMerkleRootBz, flushErr := invalidResponseSignatureSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a session tree + // with an invalid relay response signature. + claim := newClaim(t, + supplierAddr, + validSessionHeader, + invalidResponseSignatureMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an invalid relay response signature. + return newProof(t, + supplierAddr, + validSessionHeader, + invalidResponseSignatureSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: servicetypes.ErrServiceInvalidRelayResponse.Wrap("invalid signature"), + }, + { + desc: "relay response signature is valid but signed by an incorrect supplier", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") + return nil + }, + }, + { + desc: "the merkle proof path provided does not match the one expected/enforced by the protocol", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a new valid session tree for this test case because once the + // closest proof has already been generated, the path cannot be changed. + numRelays := uint(5) + wrongPathSessionTree := newFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + wrongPathMerkleRootBz, flushErr := wrongPathSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) + + // Create an upsert the claim + claim := newClaim(t, + supplierAddr, + validSessionHeader, + wrongPathMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an invalid relay response signature. + return newProof(t, supplierAddr, validSessionHeader, wrongPathSessionTree, wrongClosestProofPath) + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrapf( + "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", + wrongClosestProofPath, + protocol.GetPathForProof(sdkCtx.HeaderHash(), validSessionHeader.GetSessionId()), + ), + }, + { + desc: "relay difficulty must be greater than or equal to minimum (zero difficulty)", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set the minimum relay difficulty to a non-zero value such that the relays + // constructed by the test helpers have a negligible chance of being valid. + err = keepers.Keeper.SetParams(ctx, prooftypes.Params{ + RelayDifficultyTargetHash: lowTargetHash, + }) + require.NoError(t, err) + + // Reset the minimum relay difficulty to zero after this test case. + t.Cleanup(func() { + err = keepers.Keeper.SetParams(ctx, prooftypes.DefaultParams()) + require.NoError(t, err) + }) + + // Construct a proof message with a session tree containing + // a relay of insufficient difficulty. + return newProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", + validClosestRelayDifficultyBits, + highExpectedTargetDifficulty, + validSessionHeader.Service.Id, + ), + }, + { + desc: "relay difficulty must be greater than or equal to minimum (non-zero difficulty)", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") + return nil + }, + }, + { + desc: "claim must exist for proof message", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a new session tree corresponding to the unclaimed session. + numRelays := uint(5) + unclaimedSessionTree := newFilledSessionTree( + ctx, t, + numRelays, + "wrong_supplier", wrongSupplierAddr, + unclaimedSessionHeader, unclaimedSessionHeader, unclaimedSessionHeader, + keyRing, + ringClient, + ) + + // Discard session tree Merkle root because no claim is being created. + // Session tree must be closed (flushed) to compute closest Merkle Proof. + _, err = unclaimedSessionTree.Flush() + require.NoError(t, err) + + // Compute expected proof path for the unclaimed session. + expectedMerkleProofPath := protocol.GetPathForProof( + blockHeaderHash, + unclaimedSessionHeader.GetSessionId(), + ) + + // Construct new proof message using the supplier & session header + // from the session which is *not* expected to be claimed. + return newProof(t, + wrongSupplierAddr, + unclaimedSessionHeader, + unclaimedSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofClaimNotFound.Wrapf( + "no claim found for session ID %q and supplier %q", + unclaimedSessionHeader.GetSessionId(), + wrongSupplierAddr, + ), + }, + { + desc: "Valid proof cannot validate claim with an incorrect root", + newProof: func(t *testing.T) *prooftypes.Proof { + numRelays := uint(10) + wrongMerkleRootSessionTree := newFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + wrongMerkleRootBz, err := wrongMerkleRootSessionTree.Flush() + require.NoError(t, err) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) + + // Create a claim with the incorrect Merkle root. + claim := newClaim(t, + supplierAddr, + validSessionHeader, + wrongMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct a valid session tree with 5 relays. + validSessionTree := newFilledSessionTree( + ctx, t, + uint(5), + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + _, err = validSessionTree.Flush() + require.NoError(t, err) + + // Compute expected proof path for the session. + expectedMerkleProofPath := protocol.GetPathForProof( + blockHeaderHash, + validSessionHeader.GetSessionId(), + ) + + return newProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrap("invalid closest merkle proof"), + }, + { + desc: "claim and proof application addresses must match", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") + return nil + }, + }, + { + desc: "claim and proof service IDs must match", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") + return nil + }, + }, + { + desc: "claim and proof supplier addresses must match", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") + return nil + }, + }, + } + + // Submit the corresponding proof. + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + proof := test.newProof(t) + + // Advance the block height to the proof path seed height. + earliestSupplierProofCommitHeight := shared.GetEarliestSupplierProofCommitHeight( + &sharedParams, + proof.GetSessionHeader().GetSessionEndBlockHeight(), + blockHeaderHash, + proof.GetSupplierAddress(), + ) + ctx = keepertest.SetBlockHeight(ctx, earliestSupplierProofCommitHeight-1) + + // Store proof path seed block hash in the session keeper so that it can + // look it up during proof validation. + keepers.StoreBlockHash(ctx) + + // Advance the block height to the earliest proof commit height. + ctx = keepertest.SetBlockHeight(ctx, earliestSupplierProofCommitHeight) + isProofValid, err := keepers.IsProofValid(ctx, proof) + require.ErrorContains(t, err, test.expectedErr.Error()) + require.False(t, isProofValid) + }) + } +} + +// newFilledSessionTree creates a new session tree with numRelays of relays +// filled out using the request and response headers provided where every +// relay is signed by the supplier and application respectively. +func newFilledSessionTree( + ctx context.Context, t *testing.T, + numRelays uint, + supplierKeyUid, supplierAddr string, + sessionTreeHeader, reqHeader, resHeader *sessiontypes.SessionHeader, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) relayer.SessionTree { + t.Helper() + + // Initialize an empty session tree with the given session header. + sessionTree := newEmptySessionTree(t, sessionTreeHeader, supplierAddr) + + // Add numRelays of relays to the session tree. + fillSessionTree( + ctx, t, + sessionTree, numRelays, + supplierKeyUid, supplierAddr, + reqHeader, resHeader, + keyRing, + ringClient, + ) + + return sessionTree +} + +// newEmptySessionTree creates a new empty session tree with for given session. +func newEmptySessionTree( + t *testing.T, + sessionTreeHeader *sessiontypes.SessionHeader, + supplierAddr string, +) relayer.SessionTree { + t.Helper() + + // Create a temporary session tree store directory for persistence. + testSessionTreeStoreDir, err := os.MkdirTemp("", "session_tree_store_dir") + require.NoError(t, err) + + // Delete the temporary session tree store directory after the test completes. + t.Cleanup(func() { + _ = os.RemoveAll(testSessionTreeStoreDir) + }) + + accAddress := cosmostypes.MustAccAddressFromBech32(supplierAddr) + + // Construct a session tree to add relays to and generate a proof from. + sessionTree, err := session.NewSessionTree( + sessionTreeHeader, + &accAddress, + testSessionTreeStoreDir, + ) + require.NoError(t, err) + + return sessionTree +} + +// fillSessionTree fills the session tree with valid signed relays. +// A total of numRelays relays are added to the session tree with +// increasing weights (relay 1 has weight 1, relay 2 has weight 2, etc.). +func fillSessionTree( + ctx context.Context, t *testing.T, + sessionTree relayer.SessionTree, + numRelays uint, + supplierKeyUid, supplierAddr string, + reqHeader, resHeader *sessiontypes.SessionHeader, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) { + t.Helper() + + for i := 0; i < int(numRelays); i++ { + relay := testrelayer.NewSignedEmptyRelay( + ctx, t, + supplierKeyUid, supplierAddr, + reqHeader, resHeader, + keyRing, + ringClient, + ) + relayBz, err := relay.Marshal() + require.NoError(t, err) + + relayKey, err := relay.GetHash() + require.NoError(t, err) + + relayWeight := uint64(i) + + err = sessionTree.Update(relayKey[:], relayBz, relayWeight) + require.NoError(t, err) + } +} + +// getClosestRelayDifficulty returns the mining difficulty number which corresponds +// to the relayHash stored in the sessionTree that is closest to the merkle proof +// path provided. +func getClosestRelayDifficulty( + t *testing.T, + sessionTree relayer.SessionTree, + closestMerkleProofPath []byte, +) int64 { + // Retrieve a merkle proof that is closest to the path provided + closestMerkleProof, err := sessionTree.ProveClosest(closestMerkleProofPath) + require.NoError(t, err) + + // Extract the Relay (containing the RelayResponse & RelayRequest) from the merkle proof. + relay := new(servicetypes.Relay) + relayBz := closestMerkleProof.GetValueHash(&protocol.SmtSpec) + err = relay.Unmarshal(relayBz) + require.NoError(t, err) + + // Retrieve the hash of the relay. + relayHash, err := relay.GetHash() + require.NoError(t, err) + + return protocol.GetDifficultyFromHash(relayHash) +} + +// newProof creates a new proof structure. +func newProof( + t *testing.T, + supplierAddr string, + sessionHeader *sessiontypes.SessionHeader, + sessionTree relayer.SessionTree, + closestProofPath []byte, +) *prooftypes.Proof { + t.Helper() + + // Generate a closest proof from the session tree using closestProofPath. + merkleProof, err := sessionTree.ProveClosest(closestProofPath) + require.NoError(t, err) + require.NotNil(t, merkleProof) + + // Serialize the closest merkle proof. + merkleProofBz, err := merkleProof.Marshal() + require.NoError(t, err) + + return &prooftypes.Proof{ + SupplierAddress: supplierAddr, + SessionHeader: sessionHeader, + ClosestMerkleProof: merkleProofBz, + } +} + +func newClaim( + t *testing.T, + supplierAddr string, + sessionHeader *sessiontypes.SessionHeader, + rootHash []byte, +) *prooftypes.Claim { + // Create a new claim. + return &prooftypes.Claim{ + SupplierAddress: supplierAddr, + SessionHeader: sessionHeader, + RootHash: rootHash, + } +} diff --git a/x/proof/keeper/session.go b/x/proof/keeper/session.go index cd201f146..f2c3990a7 100644 --- a/x/proof/keeper/session.go +++ b/x/proof/keeper/session.go @@ -11,23 +11,16 @@ import ( sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) -type msgWithSessionAndSupplier interface { - GetSessionHeader() *sessiontypes.SessionHeader - GetSupplierAddress() string -} - // queryAndValidateSessionHeader ensures that a session with the sessionID of the given session // header exists and that this session includes the supplier with the given address. // It returns a session which is hydrated with the on-chain session data. -func (k msgServer) queryAndValidateSessionHeader( +func (k Keeper) queryAndValidateSessionHeader( ctx context.Context, - msg msgWithSessionAndSupplier, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, ) (*sessiontypes.Session, error) { logger := k.Logger().With("method", "queryAndValidateSessionHeader") - sessionHeader := msg.GetSessionHeader() - supplierAddr := msg.GetSupplierAddress() - sessionReq := &sessiontypes.QueryGetSessionRequest{ ApplicationAddress: sessionHeader.GetApplicationAddress(), Service: sessionHeader.GetService(), @@ -36,7 +29,7 @@ func (k msgServer) queryAndValidateSessionHeader( // Get the on-chain session for the ground-truth against which the given // session header is to be validated. - sessionRes, err := k.Keeper.sessionKeeper.GetSession(ctx, sessionReq) + sessionRes, err := k.sessionKeeper.GetSession(ctx, sessionReq) if err != nil { return nil, err } @@ -84,12 +77,12 @@ func (k msgServer) queryAndValidateSessionHeader( // It *assumes* that the msg's session header is a valid on-chain session with correct // height fields. First call #queryAndValidateSessionHeader to ensure any user-provided // session header is valid and correctly hydrated. -func (k msgServer) validateClaimWindow( +func (k Keeper) validateClaimWindow( ctx context.Context, - msg *types.MsgCreateClaim, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, ) error { logger := k.Logger().With("method", "validateClaimWindow") - sessionHeader := msg.GetSessionHeader() sharedParams := k.sharedKeeper.GetParams(ctx) sessionEndHeight := sessionHeader.GetSessionEndBlockHeight() @@ -102,7 +95,7 @@ func (k msgServer) validateClaimWindow( earliestClaimCommitHeight, err := k.sharedQuerier.GetEarliestSupplierClaimCommitHeight( ctx, sessionEndHeight, - msg.GetSupplierAddress(), + supplierAddr, ) if err != nil { return err @@ -141,7 +134,7 @@ func (k msgServer) validateClaimWindow( "claim_window_open_height", claimWindowOpenHeight, "earliest_claim_commit_height", earliestClaimCommitHeight, "claim_window_close_height", claimWindowCloseHeight, - "supplier_addr", msg.GetSupplierAddress(), + "supplier_addr", supplierAddr, ). Debug("validated claim window") @@ -152,14 +145,14 @@ func (k msgServer) validateClaimWindow( // It *assumes* that the msg's session header is a valid on-chain session with correct // height fields. First call #queryAndValidateSessionHeader to ensure any user-provided // session header is valid and correctly hydrated. -func (k msgServer) validateProofWindow( +func (k Keeper) validateProofWindow( ctx context.Context, - msg *types.MsgSubmitProof, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, ) error { logger := k.Logger().With("method", "validateProofWindow") - sessionHeader := msg.GetSessionHeader() - sharedParams := k.sharedKeeper.GetParams(ctx) + sharedParams := k.sharedKeeper.GetParams(ctx) sessionEndHeight := sessionHeader.GetSessionEndBlockHeight() // Get the proof window open and close heights for the given session header. @@ -170,7 +163,7 @@ func (k msgServer) validateProofWindow( earliestProofCommitHeight, err := k.sharedQuerier.GetEarliestSupplierProofCommitHeight( ctx, sessionEndHeight, - msg.GetSupplierAddress(), + supplierAddr, ) if err != nil { return err @@ -205,7 +198,7 @@ func (k msgServer) validateProofWindow( "proof_window_open_height", proofWindowOpenHeight, "earliest_proof_commit_height", earliestProofCommitHeight, "proof_window_close_height", proofWindowCloseHeight, - "supplier_addr", msg.GetSupplierAddress(), + "supplier_addr", supplierAddr, ). Debug("validated proof window") diff --git a/x/proof/types/errors.go b/x/proof/types/errors.go index 5c3dab70f..bfc0f4b93 100644 --- a/x/proof/types/errors.go +++ b/x/proof/types/errors.go @@ -31,4 +31,5 @@ var ( ErrProofParamInvalid = sdkerrors.Register(ModuleName, 1120, "the provided param is invalid") ErrProofClaimOutsideOfWindow = sdkerrors.Register(ModuleName, 1121, "claim attempted outside of the session's claim window") ErrProofProofOutsideOfWindow = sdkerrors.Register(ModuleName, 1122, "proof attempted outside of the session's proof window") + ErrProofSupplierMismatch = sdkerrors.Register(ModuleName, 1123, "supplier address does not match the claim or proof") ) diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 7fe1a3c53..1b395b21e 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -64,7 +64,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( sessionId := claim.SessionHeader.SessionId - _, isProofFound := k.proofKeeper.GetProof(ctx, sessionId, claim.SupplierAddress) + proof, isProofFound := k.proofKeeper.GetProof(ctx, sessionId, claim.SupplierAddress) // Using the probabilistic proofs approach, determine if this expiring // claim required an on-chain proof proofRequirement, err = k.proofRequirementForClaim(ctx, &claim) @@ -80,14 +80,31 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( "proof_requirement", proofRequirement, ) - if proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED { - // If a proof is not found, the claim will expire and never be settled. - if !isProofFound { + proofIsRequired := (proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED) + if proofIsRequired { + + // EXPIRATION_REASON_UNSPECIFIED is the default + var expirationReason types.ClaimExpirationReason = types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED + if isProofFound { + // Should claim expire because proof is invalid? + isProofValid, err := k.proofKeeper.IsProofValid(ctx, &proof) + if !isProofValid || err != nil { + expirationReason = types.ClaimExpirationReason_PROOF_INVALID + } + } else { + // Should claim expire because proof is required but unavailable? + expirationReason = types.ClaimExpirationReason_PROOF_MISSING + } + + // If the proof is missing or invalid -> expire it + if expirationReason != types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED { + // Proof was required but not found. // Emit an event that a claim has expired and being removed without being settled. claimExpiredEvent := types.EventClaimExpired{ - Claim: &claim, - NumComputeUnits: numClaimComputeUnits, - NumRelays: numRelaysInSessionTree, + Claim: &claim, + NumComputeUnits: numClaimComputeUnits, + NumRelays: numRelaysInSessionTree, + ExpirationReason: types.ClaimExpirationReason_PROOF_INVALID, } if err = ctx.EventManager().EmitTypedEvent(&claimExpiredEvent); err != nil { return settledResult, expiredResult, err @@ -104,11 +121,15 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( expiredResult.NumComputeUnits += numClaimComputeUnits continue } - // TODO_FOLLOWUP(@olshansk, #690): Document the potential changes needed here. - // NB: If a proof is found, it is valid because verification is done - // at the time of submission. + } + // TODO_MAINNET: A potential issue with doing proof validation inside + // `SubmitProof` is that we will not be storing false proofs on-chain (e.g. for slashing purposes). + // This could be considered a feature (e.g. less state bloat against sybil attacks) + // or a bug (i.e. no mechanisms for slashing suppliers who submit false proofs). + // Revisit this prior to mainnet launch as to whether the business logic for settling sessions should be in EndBlocker or here. + // Manage the mint & burn accounting for the claim. if err = k.SettleSessionAccounting(ctx, &claim); err != nil { logger.Error(fmt.Sprintf("error settling session accounting for claim %q: %v", claim.SessionHeader.SessionId, err)) diff --git a/x/tokenomics/types/event.pb.go b/x/tokenomics/types/event.pb.go index 8ac8e9321..5c55fbc6e 100644 --- a/x/tokenomics/types/event.pb.go +++ b/x/tokenomics/types/event.pb.go @@ -25,13 +25,43 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type ClaimExpirationReason int32 + +const ( + ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED ClaimExpirationReason = 0 + ClaimExpirationReason_PROOF_MISSING ClaimExpirationReason = 1 + ClaimExpirationReason_PROOF_INVALID ClaimExpirationReason = 2 +) + +var ClaimExpirationReason_name = map[int32]string{ + 0: "EXPIRATION_REASON_UNSPECIFIED", + 1: "PROOF_MISSING", + 2: "PROOF_INVALID", +} + +var ClaimExpirationReason_value = map[string]int32{ + "EXPIRATION_REASON_UNSPECIFIED": 0, + "PROOF_MISSING": 1, + "PROOF_INVALID": 2, +} + +func (x ClaimExpirationReason) String() string { + return proto.EnumName(ClaimExpirationReason_name, int32(x)) +} + +func (ClaimExpirationReason) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a78874bbf91a58c7, []int{0} +} + // EventClaimExpired is an event emitted during settlement whenever a claim requiring // an on-chain proof doesn't have one. The claim cannot be settled, leading to that work // never being rewarded. type EventClaimExpired struct { - Claim *types.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim"` - NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays"` - NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units"` + Claim *types.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim"` + // TODO_MAINNET: Shold we include the proof here too? + NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays"` + NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units"` + ExpirationReason ClaimExpirationReason `protobuf:"varint,4,opt,name=expiration_reason,json=expirationReason,proto3,enum=poktroll.tokenomics.ClaimExpirationReason" json:"expiration_reason"` } func (m *EventClaimExpired) Reset() { *m = EventClaimExpired{} } @@ -88,6 +118,13 @@ func (m *EventClaimExpired) GetNumComputeUnits() uint64 { return 0 } +func (m *EventClaimExpired) GetExpirationReason() ClaimExpirationReason { + if m != nil { + return m.ExpirationReason + } + return ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED +} + // EventClaimSettled is an event emitted whenever a claim is settled. // The proof_required determines whether the claim requires a proof that has been submitted or not type EventClaimSettled struct { @@ -299,6 +336,7 @@ func (m *EventApplicationOverserviced) GetEffectiveBurn() *types1.Coin { } func init() { + proto.RegisterEnum("poktroll.tokenomics.ClaimExpirationReason", ClaimExpirationReason_name, ClaimExpirationReason_value) proto.RegisterType((*EventClaimExpired)(nil), "poktroll.tokenomics.EventClaimExpired") proto.RegisterType((*EventClaimSettled)(nil), "poktroll.tokenomics.EventClaimSettled") proto.RegisterType((*EventRelayMiningDifficultyUpdated)(nil), "poktroll.tokenomics.EventRelayMiningDifficultyUpdated") @@ -308,46 +346,52 @@ func init() { func init() { proto.RegisterFile("poktroll/tokenomics/event.proto", fileDescriptor_a78874bbf91a58c7) } var fileDescriptor_a78874bbf91a58c7 = []byte{ - // 621 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x94, 0xcb, 0x6e, 0x13, 0x3d, - 0x14, 0xc7, 0x3b, 0xbd, 0x7c, 0x52, 0xdc, 0xaf, 0x37, 0x97, 0x8a, 0x50, 0xca, 0x24, 0x64, 0x81, - 0x8a, 0x50, 0x67, 0xd4, 0x56, 0x62, 0x85, 0x2a, 0x9a, 0x12, 0xa9, 0x2c, 0x0a, 0x68, 0xa0, 0x1b, - 0x36, 0x23, 0x67, 0x7c, 0x92, 0x98, 0xce, 0xd8, 0xc6, 0xe3, 0x99, 0xa4, 0x6f, 0xc1, 0x03, 0xf0, - 0x02, 0x3c, 0x04, 0x7b, 0x24, 0x36, 0x5d, 0xb2, 0xaa, 0x50, 0xbb, 0xeb, 0x53, 0x20, 0x7b, 0x72, - 0x19, 0xa5, 0x20, 0xd6, 0x6c, 0x12, 0xeb, 0xfc, 0x7f, 0xe7, 0x9c, 0xbf, 0x8f, 0xc7, 0x46, 0x35, - 0x29, 0xce, 0xb4, 0x12, 0x71, 0xec, 0x6b, 0x71, 0x06, 0x5c, 0x24, 0x2c, 0x4a, 0x7d, 0xc8, 0x81, - 0x6b, 0x4f, 0x2a, 0xa1, 0x05, 0x5e, 0x1f, 0x01, 0xde, 0x04, 0xd8, 0xbc, 0xd3, 0x15, 0x5d, 0x61, - 0x75, 0xdf, 0xac, 0x0a, 0x74, 0xd3, 0x8d, 0x44, 0x9a, 0x88, 0xd4, 0x6f, 0x93, 0x14, 0xfc, 0x7c, - 0xb7, 0x0d, 0x9a, 0xec, 0xfa, 0x91, 0x60, 0x7c, 0xa8, 0x6f, 0x8e, 0x7b, 0x49, 0x25, 0x44, 0xc7, - 0x8f, 0x62, 0xc2, 0x92, 0xa1, 0x56, 0x9f, 0xd2, 0x14, 0x7c, 0xcc, 0x98, 0x82, 0x64, 0x6c, 0xa4, - 0xf1, 0xd5, 0x41, 0x6b, 0x2d, 0x63, 0xec, 0xc8, 0xa4, 0xb5, 0x06, 0x92, 0x29, 0xa0, 0xf8, 0x29, - 0x5a, 0xb0, 0x65, 0xaa, 0x4e, 0xdd, 0xd9, 0x5e, 0xdc, 0xdb, 0xf0, 0xc6, 0x76, 0x6d, 0x1d, 0xcf, - 0xc2, 0xcd, 0xca, 0xcd, 0x65, 0xad, 0xe0, 0x82, 0xe2, 0x0f, 0xef, 0x20, 0xc4, 0xb3, 0x24, 0x54, - 0x10, 0x93, 0xf3, 0xb4, 0x3a, 0x5b, 0x77, 0xb6, 0xe7, 0x9b, 0xcb, 0x37, 0x97, 0xb5, 0x52, 0x34, - 0xa8, 0xf0, 0x2c, 0x09, 0xec, 0x12, 0x1f, 0xa2, 0x35, 0x23, 0x44, 0x22, 0x91, 0x99, 0x86, 0x30, - 0xe3, 0x4c, 0xa7, 0xd5, 0x39, 0x9b, 0xb5, 0x71, 0x73, 0x59, 0xbb, 0x2d, 0x06, 0x2b, 0x3c, 0x4b, - 0x8e, 0x8a, 0xc8, 0xa9, 0x09, 0x34, 0xbe, 0xcc, 0x96, 0xfd, 0xbf, 0x05, 0xad, 0xe3, 0x7f, 0xc9, - 0x3f, 0xfe, 0x80, 0xd6, 0xac, 0xa5, 0xb0, 0x74, 0x34, 0xd5, 0xf9, 0xba, 0xb3, 0xbd, 0xbc, 0xf7, - 0x68, 0xda, 0xf5, 0x1b, 0xf3, 0x1b, 0x4c, 0xb8, 0x00, 0x48, 0x2a, 0x78, 0xd1, 0xea, 0x56, 0x91, - 0x60, 0x55, 0x4e, 0xe1, 0x8d, 0xcf, 0xb3, 0xe8, 0xa1, 0x9d, 0x95, 0xb5, 0x7f, 0xc2, 0x38, 0xe3, - 0xdd, 0x17, 0xac, 0xd3, 0x61, 0x51, 0x16, 0xeb, 0xf3, 0x53, 0x49, 0x89, 0x06, 0x8a, 0x1f, 0x20, - 0x94, 0x82, 0xca, 0x59, 0x04, 0x21, 0xa3, 0x76, 0x80, 0x95, 0xa0, 0x32, 0x8c, 0xbc, 0xa4, 0xf8, - 0x00, 0x6d, 0x49, 0x05, 0x79, 0xa8, 0x89, 0xea, 0x82, 0x0e, 0x7b, 0x24, 0xed, 0x85, 0x3d, 0x18, - 0x84, 0xc0, 0x23, 0x41, 0x81, 0xda, 0xa1, 0x55, 0x82, 0xaa, 0x61, 0xde, 0x59, 0xe4, 0x98, 0xa4, - 0xbd, 0x63, 0x18, 0xb4, 0x0a, 0x1d, 0x3f, 0x43, 0xf7, 0x39, 0xf4, 0xff, 0x98, 0x3e, 0x67, 0xd3, - 0xef, 0x72, 0xe8, 0xff, 0x36, 0x7b, 0x07, 0xad, 0xdb, 0xee, 0x93, 0xf3, 0x08, 0x21, 0x21, 0x76, - 0x60, 0xf3, 0x66, 0xc7, 0x90, 0xbf, 0x1a, 0x9d, 0x4e, 0x2b, 0x21, 0xf8, 0x09, 0xc2, 0xa6, 0xd9, - 0x14, 0xbd, 0x60, 0xe9, 0x15, 0x0e, 0xfd, 0x32, 0xdc, 0xf8, 0xee, 0xa0, 0x2d, 0x3b, 0x9e, 0x43, - 0x29, 0x63, 0x16, 0x11, 0xcd, 0x04, 0x7f, 0x9d, 0x83, 0x1a, 0xee, 0x9d, 0xe2, 0xc7, 0x68, 0x95, - 0x4c, 0xa4, 0x90, 0x50, 0xaa, 0x86, 0xf3, 0x59, 0x29, 0xc5, 0x0f, 0x29, 0x55, 0xf8, 0x00, 0x2d, - 0xc1, 0x40, 0x42, 0xa4, 0x81, 0x86, 0xed, 0x4c, 0x71, 0x3b, 0x96, 0xc5, 0xbd, 0x7b, 0x5e, 0x71, - 0x99, 0x3d, 0x73, 0x99, 0xbd, 0xe1, 0x65, 0xf6, 0x8e, 0x04, 0xe3, 0xc1, 0xff, 0x23, 0xbe, 0x99, - 0x29, 0x8e, 0x9f, 0xa3, 0x65, 0xe8, 0x74, 0x20, 0xd2, 0x2c, 0x87, 0xa2, 0xc0, 0xdc, 0xdf, 0x0a, - 0x2c, 0x8d, 0x13, 0x4c, 0x85, 0xe6, 0xc9, 0xb7, 0x2b, 0xd7, 0xb9, 0xb8, 0x72, 0x9d, 0x9f, 0x57, - 0xae, 0xf3, 0xe9, 0xda, 0x9d, 0xb9, 0xb8, 0x76, 0x67, 0x7e, 0x5c, 0xbb, 0x33, 0xef, 0xf7, 0xbb, - 0x4c, 0xf7, 0xb2, 0xb6, 0x17, 0x89, 0xc4, 0x37, 0x5f, 0xd8, 0x0e, 0x07, 0xdd, 0x17, 0xea, 0xcc, - 0x1f, 0x3f, 0x16, 0x83, 0xf2, 0xb3, 0xa5, 0xcf, 0x25, 0xa4, 0xed, 0xff, 0xec, 0x73, 0xb1, 0xff, - 0x2b, 0x00, 0x00, 0xff, 0xff, 0x67, 0x30, 0x58, 0x9b, 0xda, 0x04, 0x00, 0x00, + // 718 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x54, 0xc1, 0x4e, 0xdb, 0x4a, + 0x14, 0x8d, 0x03, 0x3c, 0x29, 0xc3, 0x03, 0x12, 0x53, 0xd4, 0x94, 0x42, 0x12, 0xb2, 0xa8, 0x28, + 0x15, 0xb6, 0x00, 0xa9, 0xab, 0x0a, 0x35, 0x09, 0xa6, 0x58, 0x2a, 0x49, 0xe4, 0x40, 0x55, 0x75, + 0x33, 0x75, 0xec, 0x9b, 0x64, 0x4a, 0x3c, 0xe3, 0x8e, 0xc7, 0x49, 0xf8, 0x8b, 0x7e, 0x40, 0x7f, + 0xa0, 0x8b, 0xfe, 0x47, 0xa5, 0x6e, 0x58, 0x76, 0x85, 0x2a, 0xd8, 0xf1, 0x15, 0x95, 0xc7, 0x21, + 0x49, 0x03, 0x55, 0xd7, 0xdd, 0x24, 0xd6, 0x3d, 0xe7, 0xdc, 0x7b, 0xe6, 0x78, 0x7c, 0x51, 0xde, + 0x67, 0x67, 0x82, 0xb3, 0x6e, 0x57, 0x17, 0xec, 0x0c, 0x28, 0xf3, 0x88, 0x13, 0xe8, 0xd0, 0x03, + 0x2a, 0x34, 0x9f, 0x33, 0xc1, 0xd4, 0xe5, 0x5b, 0x82, 0x36, 0x26, 0xac, 0x3e, 0x68, 0xb3, 0x36, + 0x93, 0xb8, 0x1e, 0x3d, 0xc5, 0xd4, 0xd5, 0x9c, 0xc3, 0x02, 0x8f, 0x05, 0x7a, 0xd3, 0x0e, 0x40, + 0xef, 0xed, 0x34, 0x41, 0xd8, 0x3b, 0xba, 0xc3, 0x08, 0x1d, 0xe2, 0xab, 0xa3, 0x59, 0x3e, 0x67, + 0xac, 0xa5, 0x3b, 0x5d, 0x9b, 0x78, 0x43, 0xac, 0x30, 0x85, 0x71, 0xf8, 0x18, 0x12, 0x0e, 0xde, + 0xc8, 0x48, 0xf1, 0x6b, 0x12, 0x65, 0x8c, 0xc8, 0x58, 0x25, 0x92, 0x19, 0x03, 0x9f, 0x70, 0x70, + 0xd5, 0xe7, 0x68, 0x4e, 0xb6, 0xc9, 0x2a, 0x05, 0x65, 0x73, 0x7e, 0x77, 0x45, 0x1b, 0xd9, 0x95, + 0x7d, 0x34, 0x49, 0x2e, 0xa7, 0x6e, 0x2e, 0xf3, 0x31, 0xcf, 0x8a, 0xff, 0xd4, 0x6d, 0x84, 0x68, + 0xe8, 0x61, 0x0e, 0x5d, 0xfb, 0x3c, 0xc8, 0x26, 0x0b, 0xca, 0xe6, 0x6c, 0x79, 0xf1, 0xe6, 0x32, + 0x3f, 0x51, 0xb5, 0x52, 0x34, 0xf4, 0x2c, 0xf9, 0xa8, 0x96, 0x50, 0x26, 0x02, 0x1c, 0xe6, 0xf9, + 0xa1, 0x00, 0x1c, 0x52, 0x22, 0x82, 0xec, 0x8c, 0x54, 0xad, 0xdc, 0x5c, 0xe6, 0xef, 0x82, 0xd6, + 0x12, 0x0d, 0xbd, 0x4a, 0x5c, 0x39, 0x8d, 0x0a, 0x2a, 0x45, 0x19, 0x88, 0x4c, 0xdb, 0x82, 0x30, + 0x8a, 0x39, 0xd8, 0x01, 0xa3, 0xd9, 0xd9, 0x82, 0xb2, 0xb9, 0xb8, 0xbb, 0xa5, 0xdd, 0x13, 0xb2, + 0x36, 0x3e, 0xa7, 0x94, 0x58, 0x52, 0x11, 0x8f, 0xbb, 0xd3, 0xc8, 0x4a, 0xc3, 0x14, 0xb1, 0xf8, + 0xe5, 0xb7, 0xbc, 0x1a, 0x20, 0x44, 0xf7, 0x9f, 0xca, 0xeb, 0x03, 0xca, 0x48, 0x4b, 0x78, 0xe2, + 0x2a, 0x0c, 0xf3, 0x7a, 0x32, 0xed, 0xba, 0x1e, 0xfd, 0x5a, 0x63, 0xde, 0x64, 0x56, 0x77, 0x9a, + 0x58, 0x69, 0x7f, 0x8a, 0x5e, 0xfc, 0x9c, 0x44, 0x1b, 0x32, 0x2b, 0x69, 0xff, 0x98, 0x50, 0x42, + 0xdb, 0x07, 0xa4, 0xd5, 0x22, 0x4e, 0xd8, 0x15, 0xe7, 0xa7, 0xbe, 0x6b, 0x0b, 0x70, 0xd5, 0x75, + 0x84, 0x02, 0xe0, 0x3d, 0xe2, 0x00, 0x26, 0xae, 0x0c, 0x30, 0x65, 0xa5, 0x86, 0x15, 0xd3, 0x55, + 0xf7, 0xd1, 0x9a, 0xcf, 0xa1, 0x87, 0x85, 0xcd, 0xdb, 0x20, 0x70, 0xc7, 0x0e, 0x3a, 0xb8, 0x03, + 0x03, 0x0c, 0xd4, 0x61, 0x2e, 0xb8, 0x32, 0xb4, 0x94, 0x95, 0x8d, 0x38, 0x27, 0x92, 0x72, 0x64, + 0x07, 0x9d, 0x23, 0x18, 0x18, 0x31, 0xae, 0xbe, 0x40, 0x8f, 0x29, 0xf4, 0xff, 0x28, 0x9f, 0x91, + 0xf2, 0x87, 0x14, 0xfa, 0xf7, 0xaa, 0xb7, 0xd1, 0xb2, 0x9c, 0x3e, 0x7e, 0x1f, 0x18, 0x3c, 0x5b, + 0x06, 0x36, 0x1b, 0x9d, 0x18, 0x7a, 0xd5, 0xdb, 0xb7, 0x63, 0x78, 0xb6, 0xfa, 0x0c, 0xa9, 0xd1, + 0xb0, 0x29, 0xf6, 0x9c, 0x64, 0x2f, 0x51, 0xe8, 0x4f, 0x92, 0x8b, 0xdf, 0x15, 0xb4, 0x26, 0xe3, + 0x29, 0xf9, 0x7e, 0x97, 0x38, 0xf2, 0x96, 0xd5, 0x7a, 0xc0, 0x87, 0x67, 0x77, 0xd5, 0xa7, 0x28, + 0x6d, 0x8f, 0x21, 0x6c, 0xbb, 0x2e, 0x1f, 0xe6, 0xb3, 0x34, 0x51, 0x2f, 0xb9, 0x2e, 0x57, 0xf7, + 0xd1, 0x02, 0x0c, 0x7c, 0x70, 0x04, 0xb8, 0xb8, 0x19, 0x72, 0x2a, 0x63, 0x99, 0xdf, 0x7d, 0xa4, + 0xc5, 0xcb, 0x43, 0x8b, 0x96, 0x87, 0x36, 0x5c, 0x1e, 0x5a, 0x85, 0x11, 0x6a, 0xfd, 0x7f, 0xcb, + 0x2f, 0x87, 0x9c, 0xaa, 0x2f, 0xd1, 0x22, 0xb4, 0x5a, 0xe0, 0x08, 0xd2, 0x83, 0xb8, 0xc1, 0xcc, + 0xdf, 0x1a, 0x2c, 0x8c, 0x04, 0x51, 0x87, 0xad, 0xf7, 0x68, 0xe5, 0xde, 0x4f, 0x4b, 0xdd, 0x40, + 0xeb, 0xc6, 0xdb, 0xba, 0x69, 0x95, 0x4e, 0xcc, 0x5a, 0x15, 0x5b, 0x46, 0xa9, 0x51, 0xab, 0xe2, + 0xd3, 0x6a, 0xa3, 0x6e, 0x54, 0xcc, 0x43, 0xd3, 0x38, 0x48, 0x27, 0xd4, 0x0c, 0x5a, 0xa8, 0x5b, + 0xb5, 0xda, 0x21, 0x3e, 0x36, 0x1b, 0x0d, 0xb3, 0xfa, 0x2a, 0xad, 0x8c, 0x4b, 0x66, 0xf5, 0x4d, + 0xe9, 0xb5, 0x79, 0x90, 0x4e, 0x96, 0x8f, 0xbf, 0x5d, 0xe5, 0x94, 0x8b, 0xab, 0x9c, 0xf2, 0xf3, + 0x2a, 0xa7, 0x7c, 0xba, 0xce, 0x25, 0x2e, 0xae, 0x73, 0x89, 0x1f, 0xd7, 0xb9, 0xc4, 0xbb, 0xbd, + 0x36, 0x11, 0x9d, 0xb0, 0xa9, 0x39, 0xcc, 0xd3, 0xa3, 0x3b, 0xbc, 0x4d, 0x41, 0xf4, 0x19, 0x3f, + 0xd3, 0x47, 0xeb, 0x6f, 0x30, 0xb9, 0x88, 0xc5, 0xb9, 0x0f, 0x41, 0xf3, 0x3f, 0xb9, 0x00, 0xf7, + 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 0xac, 0x71, 0x68, 0xf9, 0xac, 0x05, 0x00, 0x00, } func (m *EventClaimExpired) Marshal() (dAtA []byte, err error) { @@ -370,6 +414,11 @@ func (m *EventClaimExpired) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExpirationReason != 0 { + i = encodeVarintEvent(dAtA, i, uint64(m.ExpirationReason)) + i-- + dAtA[i] = 0x20 + } if m.NumComputeUnits != 0 { i = encodeVarintEvent(dAtA, i, uint64(m.NumComputeUnits)) i-- @@ -580,6 +629,9 @@ func (m *EventClaimExpired) Size() (n int) { if m.NumComputeUnits != 0 { n += 1 + sovEvent(uint64(m.NumComputeUnits)) } + if m.ExpirationReason != 0 { + n += 1 + sovEvent(uint64(m.ExpirationReason)) + } return n } @@ -762,6 +814,25 @@ func (m *EventClaimExpired) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationReason", wireType) + } + m.ExpirationReason = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpirationReason |= ClaimExpirationReason(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipEvent(dAtA[iNdEx:]) diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go index a5ada5bb3..5f56776fe 100644 --- a/x/tokenomics/types/expected_keepers.go +++ b/x/tokenomics/types/expected_keepers.go @@ -43,11 +43,11 @@ type ProofKeeper interface { RemoveProof(ctx context.Context, sessionId, supplierAddr string) AllClaims(ctx context.Context, req *prooftypes.QueryAllClaimsRequest) (*prooftypes.QueryAllClaimsResponse, error) + IsProofValid(ctx context.Context, proof *prooftypes.Proof) (valid bool, err error) // Only used for testing & simulation UpsertClaim(ctx context.Context, claim prooftypes.Claim) UpsertProof(ctx context.Context, claim prooftypes.Proof) - GetParams(ctx context.Context) prooftypes.Params SetParams(ctx context.Context, params prooftypes.Params) error } From cb166cdcd5134839bb8897cf80ba92658bd26db5 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Mon, 22 Jul 2024 20:55:18 -0700 Subject: [PATCH 16/29] Update testutil/network/network.go Co-authored-by: Bryan White --- testutil/network/network.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testutil/network/network.go b/testutil/network/network.go index 518a39800..091f871b7 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -50,7 +50,7 @@ func New(t *testing.T, configs ...Config) *Network { cfg = configs[0] } net, err := network.New(t, t.TempDir(), cfg) - require.NoError(t, err, "TODO_FLAKY: This config setup is periodically flakyis a flaky is ") + require.NoError(t, err, "TODO_FLAKY: This config setup is periodically flaky") _, err = net.WaitForHeight(1) require.NoError(t, err) t.Cleanup(net.Cleanup) From a52a5e801eb9f2ad6aa4d96c27da0e824766ac38 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Mon, 22 Jul 2024 22:01:45 -0700 Subject: [PATCH 17/29] Working on one last test --- x/proof/keeper/proof_validation.go | 32 ++++---- .../keeper_settle_pending_claims_test.go | 77 ++++++++++++++++++- x/tokenomics/keeper/settle_pending_claims.go | 18 ++--- .../keeper/settle_session_accounting_test.go | 3 +- x/tokenomics/types/expected_keepers.go | 1 + 5 files changed, 105 insertions(+), 26 deletions(-) diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 8439da485..68f6df0e0 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -1,17 +1,5 @@ package keeper -import ( - "bytes" - "context" - - "github.com/pokt-network/smt" - - "github.com/pokt-network/poktroll/pkg/crypto/protocol" - "github.com/pokt-network/poktroll/x/proof/types" - servicetypes "github.com/pokt-network/poktroll/x/service/types" - sessiontypes "github.com/pokt-network/poktroll/x/session/types" -) - /* TODO_MAINNET: Document these steps in the docs and link here. @@ -40,8 +28,27 @@ import ( 3. verify(claim.Root, proof.ClosestProof); verify the closest proof is correct */ +import ( + "bytes" + "context" + + "github.com/pokt-network/smt" + + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/x/proof/types" + servicetypes "github.com/pokt-network/poktroll/x/service/types" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" +) + // IsProofValid validates the proof submitted by the supplier is correct with // respect to an on-chain claim. +// +// This function should be called during session settlement (i.e. EndBlocker) +// rather than during proof submission (i.e. SubmitProof) because: +// 1. RPC requests should be quick, lightweight and only do basic validation +// 2. Validators are the ones responsible for the heavy processing & validation during state transitions +// 3. This creates an opportunity to slash suppliers who submit false proofs, whereas +// they can keep retrying if it takes place in the SubmitProof handler. func (k Keeper) IsProofValid( ctx context.Context, proof *types.Proof, @@ -185,7 +192,6 @@ func (k Keeper) IsProofValid( logger.Debug("successfully verified closest merkle proof") return true, nil - } // validateClosestPath ensures that the proof's path matches the expected path. diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 6c97a0e5a..4031e0d61 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -2,11 +2,13 @@ package keeper_test import ( "context" + "fmt" "testing" "cosmossdk.io/math" "github.com/cosmos/cosmos-sdk/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -21,6 +23,7 @@ import ( sessiontypes "github.com/pokt-network/poktroll/x/session/types" "github.com/pokt-network/poktroll/x/shared" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" + suppliertypes "github.com/pokt-network/poktroll/x/supplier/types" tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) @@ -75,12 +78,19 @@ func (s *TestSuite) SetupTest() { RootHash: testutilproof.SmstRootWithSum(s.expectedComputeUnits), } - // Prepare a claim that can be inserted + // Prepare a proof that can be inserted s.proof = prooftypes.Proof{ SupplierAddress: s.claim.SupplierAddress, SessionHeader: s.claim.SessionHeader, - // ClosestMerkleProof + // ClosestMerkleProof: + } + + supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) + supplier := sharedtypes.Supplier{ + Address: supplierAddr, + Stake: &supplierStake, } + s.keepers.SetSupplier(s.ctx, supplier) appStake := types.NewCoin("upokt", math.NewInt(1000000)) app := apptypes.Application{ @@ -88,6 +98,15 @@ func (s *TestSuite) SetupTest() { Stake: &appStake, } s.keepers.SetApplication(s.ctx, app) + + // TODO_IN_THIS_PR: Finish this part + moduleBaseMint := types.NewCoins(sdk.NewCoin("upokt", math.NewInt(690000000000000042))) + err := s.keepers.MintCoins(s.sdkCtx, suppliertypes.ModuleName, moduleBaseMint) + require.NoError(s.T(), err) + s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, suppliertypes.ModuleName, sdk.AccAddress(supplier.Address), moduleBaseMint) + err = s.keepers.MintCoins(s.sdkCtx, apptypes.ModuleName, moduleBaseMint) + require.NoError(s.T(), err) + s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, suppliertypes.ModuleName, sdk.AccAddress(app.Address), moduleBaseMint) } // TestSettleExpiringClaimsSuite tests the claim settlement process. @@ -189,9 +208,63 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequiredAndNotProv events, "poktroll.tokenomics.EventClaimExpired") require.Len(t, expectedEvents, 1) + fmt.Println("expectedEvents", expectedEvents) + // Validate the event + expectedEvent := expectedEvents[0] + require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) + require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_MISSING, expectedEvent.GetExpirationReason()) +} + +func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOneProvided() { + // Retrieve default values + t := s.T() + ctx := s.ctx + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + sharedParams := s.keepers.SharedKeeper.GetParams(ctx) + + // Create a claim that requires a proof and an invalid proof + claim := s.claim + proof := s.proof + proof.ClosestMerkleProof = []byte("invalid_proof") + + // Upsert the proof & claim + s.keepers.UpsertClaim(ctx, claim) + s.keepers.UpsertProof(ctx, proof) + + // Settle pending claims after proof window closes + // Expectation: All (1) claims should be expired. + // NB: proofs should be rejected when the current height equals the proof window close height. + sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) + sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) + require.NoError(t, err) + + // Check that no claims were settled. + require.Equal(t, uint64(0), settledResult.NumClaims) + // Validate that exactly one claims expired + require.Equal(t, uint64(1), expiredResult.NumClaims) + + // Validate that no claims remain. + claims := s.keepers.GetAllClaims(ctx) + require.Len(t, claims, 0) + + // Validate that no proofs remain. + proofs := s.keepers.GetAllProofs(ctx) + require.Len(t, proofs, 0) + + // Confirm an expiration event was emitted + events := sdkCtx.EventManager().Events() + require.Len(t, events, 5) // minting, burning, settling, etc.. + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, + events, "poktroll.tokenomics.EventClaimExpired") + require.Len(t, expectedEvents, 1) + + fmt.Println("expectedEvents", expectedEvents) // Validate the event expectedEvent := expectedEvents[0] require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) + require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_INVALID, expectedEvent.GetExpirationReason()) } func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvided_ViaThreshold() { diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 1b395b21e..11c6fa114 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -82,12 +82,11 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( proofIsRequired := (proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED) if proofIsRequired { - // EXPIRATION_REASON_UNSPECIFIED is the default var expirationReason types.ClaimExpirationReason = types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED if isProofFound { - // Should claim expire because proof is invalid? - isProofValid, err := k.proofKeeper.IsProofValid(ctx, &proof) + var isProofValid bool + isProofValid, err = k.proofKeeper.IsProofValid(ctx, &proof) if !isProofValid || err != nil { expirationReason = types.ClaimExpirationReason_PROOF_INVALID } @@ -104,7 +103,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( Claim: &claim, NumComputeUnits: numClaimComputeUnits, NumRelays: numRelaysInSessionTree, - ExpirationReason: types.ClaimExpirationReason_PROOF_INVALID, + ExpirationReason: expirationReason, } if err = ctx.EventManager().EmitTypedEvent(&claimExpiredEvent); err != nil { return settledResult, expiredResult, err @@ -115,6 +114,9 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( // The claim & proof are no longer necessary, so there's no need for them // to take up on-chain space. k.proofKeeper.RemoveClaim(ctx, sessionId, claim.SupplierAddress) + if isProofFound { + k.proofKeeper.RemoveProof(ctx, sessionId, claim.SupplierAddress) + } expiredResult.NumClaims++ expiredResult.NumRelays += numRelaysInSessionTree @@ -124,11 +126,9 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( } - // TODO_MAINNET: A potential issue with doing proof validation inside - // `SubmitProof` is that we will not be storing false proofs on-chain (e.g. for slashing purposes). - // This could be considered a feature (e.g. less state bloat against sybil attacks) - // or a bug (i.e. no mechanisms for slashing suppliers who submit false proofs). - // Revisit this prior to mainnet launch as to whether the business logic for settling sessions should be in EndBlocker or here. + // If this code path is reached, then either: + // 1. The claim does not require a proof. + // 2. The claim requires a proof and a valid proof was found. // Manage the mint & burn accounting for the claim. if err = k.SettleSessionAccounting(ctx, &claim); err != nil { diff --git a/x/tokenomics/keeper/settle_session_accounting_test.go b/x/tokenomics/keeper/settle_session_accounting_test.go index 86a74b04f..0c4edb5e4 100644 --- a/x/tokenomics/keeper/settle_session_accounting_test.go +++ b/x/tokenomics/keeper/settle_session_accounting_test.go @@ -11,9 +11,8 @@ import ( cosmostypes "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - "github.com/stretchr/testify/require" - "github.com/pokt-network/smt" + "github.com/stretchr/testify/require" "github.com/pokt-network/poktroll/pkg/crypto/protocol" testkeeper "github.com/pokt-network/poktroll/testutil/keeper" diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go index 5f56776fe..2d6ce532c 100644 --- a/x/tokenomics/types/expected_keepers.go +++ b/x/tokenomics/types/expected_keepers.go @@ -38,6 +38,7 @@ type ApplicationKeeper interface { type ProofKeeper interface { GetAllClaims(ctx context.Context) []prooftypes.Claim + GetAllProofs(ctx context.Context) []prooftypes.Proof RemoveClaim(ctx context.Context, sessionId, supplierAddr string) GetProof(ctx context.Context, sessionId, supplierAddr string) (proof prooftypes.Proof, isProofFound bool) RemoveProof(ctx context.Context, sessionId, supplierAddr string) From ac4a212935c144f16c5eae166ef5a0bfe747d225 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 09:50:14 -0700 Subject: [PATCH 18/29] Add a TODO_MAINNET --- pkg/crypto/protocol/difficulty.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index 90c3ec1c5..ccc2e1ee1 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -24,6 +24,7 @@ var ( // GetDifficultyFromHash returns the "difficulty" of the given hash, with respect // to the "highest" (easiest) target hash, BaseRelayDifficultyHash. // The resultant value is not used for any business logic but is simplify there to have a human-readable version of the hash. +// TODO_MAINNET: Can this cause an integer overflow? func GetDifficultyFromHash(hashBz [RelayHasherSize]byte) int64 { baseRelayDifficultyHashInt := new(big.Int).SetBytes(BaseRelayDifficultyHashBz) hashInt := new(big.Int).SetBytes(hashBz[:]) From f16ba64a10f3de32804d895ab2d2cf017d5e470c Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 14:38:43 -0700 Subject: [PATCH 19/29] WIP --- pkg/client/query/accquerier.go | 3 +++ x/proof/keeper/keeper.go | 2 ++ x/proof/keeper/proof_validation.go | 9 ++++--- x/proof/types/account_query_client.go | 3 +++ x/proof/types/errors.go | 1 + .../keeper_settle_pending_claims_test.go | 27 ++++++++++++++----- x/tokenomics/keeper/settle_pending_claims.go | 1 + 7 files changed, 36 insertions(+), 10 deletions(-) diff --git a/pkg/client/query/accquerier.go b/pkg/client/query/accquerier.go index 6b6e26778..2b4d9c2bc 100644 --- a/pkg/client/query/accquerier.go +++ b/pkg/client/query/accquerier.go @@ -93,6 +93,9 @@ func (aq *accQuerier) GetPubKeyFromAddress(ctx context.Context, address string) if err != nil { return nil, err } + if acc == nil { + return nil, ErrQueryAccountNotFound.Wrapf("address: %s", address) + } // If the account's public key is nil, then return an error. pubKey := acc.GetPubKey() diff --git a/x/proof/keeper/keeper.go b/x/proof/keeper/keeper.go index 0f35fe593..e7bbc9475 100644 --- a/x/proof/keeper/keeper.go +++ b/x/proof/keeper/keeper.go @@ -30,6 +30,7 @@ type ( sessionKeeper types.SessionKeeper applicationKeeper types.ApplicationKeeper + accountKeeper types.AccountKeeper sharedKeeper types.SharedKeeper ringClient crypto.RingClient @@ -88,6 +89,7 @@ func NewKeeper( sessionKeeper: sessionKeeper, applicationKeeper: applicationKeeper, + accountKeeper: accountKeeper, sharedKeeper: sharedKeeper, ringClient: ringKeeperClient, diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 68f6df0e0..f303fe6ef 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -32,6 +32,7 @@ import ( "bytes" "context" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/smt" "github.com/pokt-network/poktroll/pkg/crypto/protocol" @@ -57,9 +58,9 @@ func (k Keeper) IsProofValid( // Retrieve the supplier's public key. supplierAddr := proof.SupplierAddress - supplierPubKey, err := k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) - if err != nil { - return false, err + supplierAccount := k.accountKeeper.GetAccount(ctx, sdk.AccAddress(supplierAddr)) + if supplierAccount == nil || supplierAccount.GetPubKey() == nil { + return false, types.ErrProofAccNotFound.Wrapf("account for supplier %q not found", supplierAddr) } // Validate the session header. @@ -145,7 +146,7 @@ func (k Keeper) IsProofValid( logger.Debug("successfully verified relay request signature") // Verify the relay response's signature. - if err = relayRes.VerifySupplierSignature(supplierPubKey); err != nil { + if err = relayRes.VerifySupplierSignature(supplierAccount.GetPubKey()); err != nil { return false, err } logger.Debug("successfully verified relay response signature") diff --git a/x/proof/types/account_query_client.go b/x/proof/types/account_query_client.go index 9d89b44d7..86ec77a6e 100644 --- a/x/proof/types/account_query_client.go +++ b/x/proof/types/account_query_client.go @@ -62,6 +62,9 @@ func (accountQueryClient *AccountKeeperQueryClient) GetPubKeyFromAddress( if err != nil { return nil, err } + if acc == nil { + return nil, ErrProofAccNotFound.Wrapf("account not found for address %s", address) + } // If the account's public key is nil, then return an error. pubKey := acc.GetPubKey() diff --git a/x/proof/types/errors.go b/x/proof/types/errors.go index bfc0f4b93..7e9c64867 100644 --- a/x/proof/types/errors.go +++ b/x/proof/types/errors.go @@ -32,4 +32,5 @@ var ( ErrProofClaimOutsideOfWindow = sdkerrors.Register(ModuleName, 1121, "claim attempted outside of the session's claim window") ErrProofProofOutsideOfWindow = sdkerrors.Register(ModuleName, 1122, "proof attempted outside of the session's proof window") ErrProofSupplierMismatch = sdkerrors.Register(ModuleName, 1123, "supplier address does not match the claim or proof") + ErrProofAccNotFound = sdkerrors.Register(ModuleName, 1124, "account not found") ) diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 4031e0d61..09324a875 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -52,6 +52,8 @@ type TestSuite struct { // - A claim that will require a proof via threshold, given the default proof params. // - A proof which contains only the session header supplier address. func (s *TestSuite) SetupTest() { + t := s.T() + supplierAddr := sample.AccAddress() appAddr := sample.AccAddress() @@ -99,14 +101,27 @@ func (s *TestSuite) SetupTest() { } s.keepers.SetApplication(s.ctx, app) - // TODO_IN_THIS_PR: Finish this part + // Mint some coins to the supplier and application modules moduleBaseMint := types.NewCoins(sdk.NewCoin("upokt", math.NewInt(690000000000000042))) + err := s.keepers.MintCoins(s.sdkCtx, suppliertypes.ModuleName, moduleBaseMint) - require.NoError(s.T(), err) - s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, suppliertypes.ModuleName, sdk.AccAddress(supplier.Address), moduleBaseMint) + require.NoError(t, err) + err = s.keepers.MintCoins(s.sdkCtx, apptypes.ModuleName, moduleBaseMint) - require.NoError(s.T(), err) - s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, suppliertypes.ModuleName, sdk.AccAddress(app.Address), moduleBaseMint) + require.NoError(t, err) + + // Send some coins to the supplier and application accounts + sendAmount := types.NewCoins(sdk.NewCoin("upokt", math.NewInt(1000000))) + + err = s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, suppliertypes.ModuleName, sdk.AccAddress(supplier.Address), sendAmount) + require.NoError(t, err) + acc := s.keepers.GetAccount(s.ctx, sdk.AccAddress(supplierAddr)) + require.NotNil(t, acc) + + err = s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, apptypes.ModuleName, sdk.AccAddress(app.Address), sendAmount) + require.NoError(t, err) + acc = s.keepers.GetAccount(s.ctx, sdk.AccAddress(appAddr)) + require.NotNil(t, acc) } // TestSettleExpiringClaimsSuite tests the claim settlement process. @@ -255,7 +270,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOn // Confirm an expiration event was emitted events := sdkCtx.EventManager().Events() - require.Len(t, events, 5) // minting, burning, settling, etc.. + require.Len(t, events, 17) // minting, burning, settling, etc.. expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, events, "poktroll.tokenomics.EventClaimExpired") require.Len(t, expectedEvents, 1) diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 11c6fa114..562870b84 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -88,6 +88,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( var isProofValid bool isProofValid, err = k.proofKeeper.IsProofValid(ctx, &proof) if !isProofValid || err != nil { + logger.Warn(fmt.Sprintf("Proof was found but is invalid due to %v", err)) expirationReason = types.ClaimExpirationReason_PROOF_INVALID } } else { From 6dbbcd58e0433db1e1fd3072b3a4b90bffb8b52b Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 16:42:45 -0700 Subject: [PATCH 20/29] Not functioning but temp WIP --- proto/poktroll/shared/service.proto | 2 +- testutil/keeper/tokenomics.go | 2 + testutil/testtree/tree.go | 152 +++++++++++++ .../keeper/msg_server_submit_proof_test.go | 7 +- x/proof/keeper/proof_validation.go | 18 +- x/proof/keeper/proof_validation_test.go | 202 +++--------------- .../keeper_settle_pending_claims_test.go | 150 +++++++++---- x/tokenomics/keeper/settle_pending_claims.go | 3 +- x/tokenomics/types/expected_keepers.go | 12 +- 9 files changed, 322 insertions(+), 226 deletions(-) create mode 100644 testutil/testtree/tree.go diff --git a/proto/poktroll/shared/service.proto b/proto/poktroll/shared/service.proto index 5ba16f7ef..00975dcca 100644 --- a/proto/poktroll/shared/service.proto +++ b/proto/poktroll/shared/service.proto @@ -46,7 +46,7 @@ enum RPCType { WEBSOCKET = 2; // WebSocket JSON_RPC = 3; // JSON-RPC REST = 4; // REST - // Add new RPC types here as needed +// Add new RPC types here as needed } // Enum to define configuration options diff --git a/testutil/keeper/tokenomics.go b/testutil/keeper/tokenomics.go index a0b5ffc19..beaeda343 100644 --- a/testutil/keeper/tokenomics.go +++ b/testutil/keeper/tokenomics.go @@ -62,6 +62,7 @@ type TokenomicsModuleKeepers struct { tokenomicstypes.SupplierKeeper tokenomicstypes.ProofKeeper tokenomicstypes.SharedKeeper + tokenomicstypes.SessionKeeper Codec *codec.ProtoCodec } @@ -363,6 +364,7 @@ func NewTokenomicsModuleKeepers( SupplierKeeper: &supplierKeeper, ProofKeeper: &proofKeeper, SharedKeeper: &sharedKeeper, + SessionKeeper: &sessionKeeper, Codec: cdc, } diff --git a/testutil/testtree/tree.go b/testutil/testtree/tree.go new file mode 100644 index 000000000..691bba903 --- /dev/null +++ b/testutil/testtree/tree.go @@ -0,0 +1,152 @@ +package testtree + +import ( + "context" + "os" + "testing" + + "github.com/99designs/keyring" + cosmostypes "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + "github.com/pokt-network/poktroll/pkg/crypto" + "github.com/pokt-network/poktroll/pkg/relayer" + "github.com/pokt-network/poktroll/pkg/relayer/session" + "github.com/pokt-network/poktroll/testutil/testrelayer" + prooftypes "github.com/pokt-network/poktroll/x/proof/types" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" +) + +// NewFilledSessionTree creates a new session tree with numRelays of relays +// filled out using the request and response headers provided where every +// relay is signed by the supplier and application respectively. +func NewFilledSessionTree( + ctx context.Context, t *testing.T, + numRelays uint, + supplierKeyUid, supplierAddr string, + sessionTreeHeader, reqHeader, resHeader *sessiontypes.SessionHeader, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) relayer.SessionTree { + t.Helper() + + // Initialize an empty session tree with the given session header. + sessionTree := NewEmptySessionTree(t, sessionTreeHeader, supplierAddr) + + // Add numRelays of relays to the session tree. + FillSessionTree( + ctx, t, + sessionTree, numRelays, + supplierKeyUid, supplierAddr, + reqHeader, resHeader, + keyRing, + ringClient, + ) + + return sessionTree +} + +// NewEmptySessionTree creates a new empty session tree with for given session. +func NewEmptySessionTree( + t *testing.T, + sessionTreeHeader *sessiontypes.SessionHeader, + supplierAddr string, +) relayer.SessionTree { + t.Helper() + + // Create a temporary session tree store directory for persistence. + testSessionTreeStoreDir, err := os.MkdirTemp("", "session_tree_store_dir") + require.NoError(t, err) + + // Delete the temporary session tree store directory after the test completes. + t.Cleanup(func() { + _ = os.RemoveAll(testSessionTreeStoreDir) + }) + + accAddress := cosmostypes.MustAccAddressFromBech32(supplierAddr) + + // Construct a session tree to add relays to and generate a proof from. + sessionTree, err := session.NewSessionTree( + sessionTreeHeader, + &accAddress, + testSessionTreeStoreDir, + ) + require.NoError(t, err) + + return sessionTree +} + +// FillSessionTree fills the session tree with valid signed relays. +// A total of numRelays relays are added to the session tree with +// increasing weights (relay 1 has weight 1, relay 2 has weight 2, etc.). +func FillSessionTree( + ctx context.Context, t *testing.T, + sessionTree relayer.SessionTree, + numRelays uint, + supplierKeyUid, supplierAddr string, + reqHeader, resHeader *sessiontypes.SessionHeader, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) { + t.Helper() + + for i := 0; i < int(numRelays); i++ { + relay := testrelayer.NewSignedEmptyRelay( + ctx, t, + supplierKeyUid, supplierAddr, + reqHeader, resHeader, + keyRing, + ringClient, + ) + relayBz, err := relay.Marshal() + require.NoError(t, err) + + relayKey, err := relay.GetHash() + require.NoError(t, err) + + relayWeight := uint64(i) + + err = sessionTree.Update(relayKey[:], relayBz, relayWeight) + require.NoError(t, err) + } +} + +// NewProof creates a new proof structure. +func NewProof( + t *testing.T, + supplierAddr string, + sessionHeader *sessiontypes.SessionHeader, + sessionTree relayer.SessionTree, + closestProofPath []byte, +) *prooftypes.Proof { + t.Helper() + + // Generate a closest proof from the session tree using closestProofPath. + merkleProof, err := sessionTree.ProveClosest(closestProofPath) + require.NoError(t, err) + require.NotNil(t, merkleProof) + + // Serialize the closest merkle proof. + merkleProofBz, err := merkleProof.Marshal() + require.NoError(t, err) + + return &prooftypes.Proof{ + SupplierAddress: supplierAddr, + SessionHeader: sessionHeader, + ClosestMerkleProof: merkleProofBz, + } +} + +func NewClaim( + t *testing.T, + supplierAddr string, + sessionHeader *sessiontypes.SessionHeader, + rootHash []byte, +) *prooftypes.Claim { + // Create a new claim. + return &prooftypes.Claim{ + SupplierAddress: supplierAddr, + SessionHeader: sessionHeader, + RootHash: rootHash, + } +} diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 8d8c5a7a8..4dfee7473 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -18,6 +18,7 @@ import ( testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" "github.com/pokt-network/poktroll/testutil/testkeyring" + "github.com/pokt-network/poktroll/testutil/testtree" "github.com/pokt-network/poktroll/x/proof/keeper" prooftypes "github.com/pokt-network/poktroll/x/proof/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" @@ -142,7 +143,7 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) { // Submit the corresponding proof. expectedNumRelays := uint(5) - sessionTree := newFilledSessionTree( + sessionTree := testtree.NewFilledSessionTree( ctx, t, expectedNumRelays, supplierUid, supplierAddr, @@ -289,7 +290,7 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) { // Submit the corresponding proof. numRelays := uint(5) - sessionTree := newFilledSessionTree( + sessionTree := NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -475,7 +476,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // Construct a valid session tree with 5 relays. numRelays := uint(5) - validSessionTree := newFilledSessionTree( + validSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index f303fe6ef..4a2b40776 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -31,6 +31,7 @@ package keeper import ( "bytes" "context" + "fmt" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/smt" @@ -58,10 +59,19 @@ func (k Keeper) IsProofValid( // Retrieve the supplier's public key. supplierAddr := proof.SupplierAddress - supplierAccount := k.accountKeeper.GetAccount(ctx, sdk.AccAddress(supplierAddr)) - if supplierAccount == nil || supplierAccount.GetPubKey() == nil { - return false, types.ErrProofAccNotFound.Wrapf("account for supplier %q not found", supplierAddr) + supplierAccAddr, err := sdk.AccAddressFromBech32(supplierAddr) + if err != nil { + return false, err + } + supplierAccount := k.accountKeeper.GetAccount(ctx, supplierAccAddr) + fmt.Println("OLSH", supplierAccAddr, supplierAccount.GetPubKey()) + // require.NotNil(t, acc) + + supplierPubKey, err := k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) + if err != nil { + return false, err } + fmt.Println("OLSH3", supplierPubKey) // Validate the session header. var onChainSession *sessiontypes.Session @@ -146,7 +156,7 @@ func (k Keeper) IsProofValid( logger.Debug("successfully verified relay request signature") // Verify the relay response's signature. - if err = relayRes.VerifySupplierSignature(supplierAccount.GetPubKey()); err != nil { + if err = relayRes.VerifySupplierSignature(supplierPubKey); err != nil { return false, err } logger.Debug("successfully verified relay response signature") diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 4a1a33291..3c8638a3f 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -1,9 +1,7 @@ package keeper_test import ( - "context" "encoding/hex" - "os" "testing" "cosmossdk.io/depinject" @@ -14,18 +12,16 @@ import ( "github.com/pokt-network/smt" "github.com/stretchr/testify/require" - "github.com/pokt-network/poktroll/pkg/crypto" "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/crypto/rings" "github.com/pokt-network/poktroll/pkg/polylog/polyzero" "github.com/pokt-network/poktroll/pkg/relayer" - "github.com/pokt-network/poktroll/pkg/relayer/session" keepertest "github.com/pokt-network/poktroll/testutil/keeper" "github.com/pokt-network/poktroll/testutil/testkeyring" "github.com/pokt-network/poktroll/testutil/testrelayer" + "github.com/pokt-network/poktroll/testutil/testtree" prooftypes "github.com/pokt-network/poktroll/x/proof/types" servicetypes "github.com/pokt-network/poktroll/x/service/types" - sessiontypes "github.com/pokt-network/poktroll/x/session/types" "github.com/pokt-network/poktroll/x/shared" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) @@ -118,7 +114,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a valid session tree with 5 relays. numRelays := uint(5) - validSessionTree := newFilledSessionTree( + validSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -205,7 +201,7 @@ func TestIsProofValid_Error(t *testing.T) { emptySessionIdHeader.SessionId = "" // Construct new proof message. - return newProof(t, + return testtree.NewProof(t, supplierAddr, &emptySessionIdHeader, validSessionTree, @@ -221,7 +217,7 @@ func TestIsProofValid_Error(t *testing.T) { desc: "merkle proof cannot be empty", newProof: func(t *testing.T) *prooftypes.Proof { // Construct new proof message. - proof := newProof(t, + proof := testtree.NewProof(t, supplierAddr, validSessionHeader, validSessionTree, @@ -240,7 +236,7 @@ func TestIsProofValid_Error(t *testing.T) { desc: "proof session ID must match on-chain session ID", newProof: func(t *testing.T) *prooftypes.Proof { // Construct new proof message using the wrong session ID. - return newProof(t, + return testtree.NewProof(t, supplierAddr, &wrongSessionIdHeader, validSessionTree, @@ -257,7 +253,7 @@ func TestIsProofValid_Error(t *testing.T) { desc: "proof supplier must be in on-chain session", newProof: func(t *testing.T) *prooftypes.Proof { // Construct a proof message with a supplier that does not belong in the session. - return newProof(t, + return testtree.NewProof(t, wrongSupplierAddr, validSessionHeader, validSessionTree, @@ -274,7 +270,7 @@ func TestIsProofValid_Error(t *testing.T) { desc: "merkle proof must be deserializabled", newProof: func(t *testing.T) *prooftypes.Proof { // Construct new proof message. - proof := newProof(t, + proof := testtree.NewProof(t, supplierAddr, validSessionHeader, validSessionTree, @@ -295,7 +291,7 @@ func TestIsProofValid_Error(t *testing.T) { desc: "relay must be deserializable", newProof: func(t *testing.T) *prooftypes.Proof { // Construct a session tree to which we'll add 1 unserializable relay. - mangledRelaySessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) + mangledRelaySessionTree := testtree.NewEmptySessionTree(t, validSessionHeader, supplierAddr) // Add the mangled relay to the session tree. err = mangledRelaySessionTree.Update([]byte{1}, mangledRelayBz, 1) @@ -311,7 +307,7 @@ func TestIsProofValid_Error(t *testing.T) { // Create a claim with a merkle root derived from a session tree // with an unserializable relay. - claim := newClaim(t, + claim := testtree.NewClaim(t, supplierAddr, validSessionHeader, mangledRelayMerkleRootBz, @@ -321,7 +317,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message derived from a session tree // with an unserializable relay. - return newProof(t, + return testtree.NewProof(t, supplierAddr, validSessionHeader, mangledRelaySessionTree, @@ -340,7 +336,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the proof session ID. numRelays := uint(1) - wrongRequestSessionIdSessionTree := newFilledSessionTree( + wrongRequestSessionIdSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -359,7 +355,7 @@ func TestIsProofValid_Error(t *testing.T) { // Create a claim with a merkle root derived from a relay // request containing the wrong session ID. - claim := newClaim(t, + claim := NewClaim(t, supplierAddr, validSessionHeader, wrongRequestSessionIdMerkleRootBz, @@ -369,7 +365,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message using the valid session header, // *not* the one used in the session tree's relay request. - return newProof(t, + return NewProof(t, supplierAddr, validSessionHeader, wrongRequestSessionIdSessionTree, @@ -389,7 +385,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the expected session ID. numRelays := uint(1) - wrongResponseSessionIdSessionTree := newFilledSessionTree( + wrongResponseSessionIdSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -408,7 +404,7 @@ func TestIsProofValid_Error(t *testing.T) { // Create a claim with a merkle root derived from a relay // response containing the wrong session ID. - claim := newClaim(t, + claim := testtree.NewClaim(t, supplierAddr, validSessionHeader, wrongResponseSessionIdMerkleRootBz, @@ -418,7 +414,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message using the valid session header, // *not* the one used in the session tree's relay response. - return newProof(t, + return testtree.NewProof(t, supplierAddr, validSessionHeader, wrongResponseSessionIdSessionTree, @@ -446,7 +442,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the expected session ID. - invalidRequestSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) + invalidRequestSignatureSessionTree := NewEmptySessionTree(t, validSessionHeader, supplierAddr) // Add the relay to the session tree. err = invalidRequestSignatureSessionTree.Update([]byte{1}, invalidRequestSignatureRelayBz, 1) @@ -463,7 +459,7 @@ func TestIsProofValid_Error(t *testing.T) { // Create a claim with a merkle root derived from a session tree // with an invalid relay request signature. - claim := newClaim(t, + claim := testtree.NewClaim(t, supplierAddr, validSessionHeader, invalidRequestSignatureMerkleRootBz, @@ -473,7 +469,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message derived from a session tree // with an invalid relay request signature. - return newProof(t, + return testtree.NewProof(t, supplierAddr, validSessionHeader, invalidRequestSignatureSessionTree, @@ -507,7 +503,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the expected session ID. - invalidResponseSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) + invalidResponseSignatureSessionTree := NewEmptySessionTree(t, validSessionHeader, supplierAddr) // Add the relay to the session tree. err = invalidResponseSignatureSessionTree.Update([]byte{1}, relayBz, 1) @@ -523,7 +519,7 @@ func TestIsProofValid_Error(t *testing.T) { // Create a claim with a merkle root derived from a session tree // with an invalid relay response signature. - claim := newClaim(t, + claim := NewClaim(t, supplierAddr, validSessionHeader, invalidResponseSignatureMerkleRootBz, @@ -533,7 +529,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message derived from a session tree // with an invalid relay response signature. - return newProof(t, + return NewProof(t, supplierAddr, validSessionHeader, invalidResponseSignatureSessionTree, @@ -555,7 +551,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a new valid session tree for this test case because once the // closest proof has already been generated, the path cannot be changed. numRelays := uint(5) - wrongPathSessionTree := newFilledSessionTree( + wrongPathSessionTree := testtree.SessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -571,7 +567,7 @@ func TestIsProofValid_Error(t *testing.T) { claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) // Create an upsert the claim - claim := newClaim(t, + claim := testtree.NewClaim(t, supplierAddr, validSessionHeader, wrongPathMerkleRootBz, @@ -581,7 +577,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message derived from a session tree // with an invalid relay response signature. - return newProof(t, supplierAddr, validSessionHeader, wrongPathSessionTree, wrongClosestProofPath) + return testtree.NewProof(t, supplierAddr, validSessionHeader, wrongPathSessionTree, wrongClosestProofPath) }, expectedErr: prooftypes.ErrProofInvalidProof.Wrapf( "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", @@ -607,7 +603,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a proof message with a session tree containing // a relay of insufficient difficulty. - return newProof(t, + return NewProof(t, supplierAddr, validSessionHeader, validSessionTree, @@ -633,7 +629,7 @@ func TestIsProofValid_Error(t *testing.T) { newProof: func(t *testing.T) *prooftypes.Proof { // Construct a new session tree corresponding to the unclaimed session. numRelays := uint(5) - unclaimedSessionTree := newFilledSessionTree( + unclaimedSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, "wrong_supplier", wrongSupplierAddr, @@ -655,7 +651,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message using the supplier & session header // from the session which is *not* expected to be claimed. - return newProof(t, + return NewProof(t, wrongSupplierAddr, unclaimedSessionHeader, unclaimedSessionTree, @@ -672,7 +668,7 @@ func TestIsProofValid_Error(t *testing.T) { desc: "Valid proof cannot validate claim with an incorrect root", newProof: func(t *testing.T) *prooftypes.Proof { numRelays := uint(10) - wrongMerkleRootSessionTree := newFilledSessionTree( + wrongMerkleRootSessionTree := NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -688,7 +684,7 @@ func TestIsProofValid_Error(t *testing.T) { claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) // Create a claim with the incorrect Merkle root. - claim := newClaim(t, + claim := testtree.NewClaim(t, supplierAddr, validSessionHeader, wrongMerkleRootBz, @@ -697,7 +693,7 @@ func TestIsProofValid_Error(t *testing.T) { require.NoError(t, err) // Construct a valid session tree with 5 relays. - validSessionTree := newFilledSessionTree( + validSessionTree := testtree.NewFilledSessionTree( ctx, t, uint(5), supplierUid, supplierAddr, @@ -715,7 +711,7 @@ func TestIsProofValid_Error(t *testing.T) { validSessionHeader.GetSessionId(), ) - return newProof(t, + return testtree.NewProof(t, supplierAddr, validSessionHeader, validSessionTree, @@ -774,100 +770,6 @@ func TestIsProofValid_Error(t *testing.T) { } } -// newFilledSessionTree creates a new session tree with numRelays of relays -// filled out using the request and response headers provided where every -// relay is signed by the supplier and application respectively. -func newFilledSessionTree( - ctx context.Context, t *testing.T, - numRelays uint, - supplierKeyUid, supplierAddr string, - sessionTreeHeader, reqHeader, resHeader *sessiontypes.SessionHeader, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) relayer.SessionTree { - t.Helper() - - // Initialize an empty session tree with the given session header. - sessionTree := newEmptySessionTree(t, sessionTreeHeader, supplierAddr) - - // Add numRelays of relays to the session tree. - fillSessionTree( - ctx, t, - sessionTree, numRelays, - supplierKeyUid, supplierAddr, - reqHeader, resHeader, - keyRing, - ringClient, - ) - - return sessionTree -} - -// newEmptySessionTree creates a new empty session tree with for given session. -func newEmptySessionTree( - t *testing.T, - sessionTreeHeader *sessiontypes.SessionHeader, - supplierAddr string, -) relayer.SessionTree { - t.Helper() - - // Create a temporary session tree store directory for persistence. - testSessionTreeStoreDir, err := os.MkdirTemp("", "session_tree_store_dir") - require.NoError(t, err) - - // Delete the temporary session tree store directory after the test completes. - t.Cleanup(func() { - _ = os.RemoveAll(testSessionTreeStoreDir) - }) - - accAddress := cosmostypes.MustAccAddressFromBech32(supplierAddr) - - // Construct a session tree to add relays to and generate a proof from. - sessionTree, err := session.NewSessionTree( - sessionTreeHeader, - &accAddress, - testSessionTreeStoreDir, - ) - require.NoError(t, err) - - return sessionTree -} - -// fillSessionTree fills the session tree with valid signed relays. -// A total of numRelays relays are added to the session tree with -// increasing weights (relay 1 has weight 1, relay 2 has weight 2, etc.). -func fillSessionTree( - ctx context.Context, t *testing.T, - sessionTree relayer.SessionTree, - numRelays uint, - supplierKeyUid, supplierAddr string, - reqHeader, resHeader *sessiontypes.SessionHeader, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) { - t.Helper() - - for i := 0; i < int(numRelays); i++ { - relay := testrelayer.NewSignedEmptyRelay( - ctx, t, - supplierKeyUid, supplierAddr, - reqHeader, resHeader, - keyRing, - ringClient, - ) - relayBz, err := relay.Marshal() - require.NoError(t, err) - - relayKey, err := relay.GetHash() - require.NoError(t, err) - - relayWeight := uint64(i) - - err = sessionTree.Update(relayKey[:], relayBz, relayWeight) - require.NoError(t, err) - } -} - // getClosestRelayDifficulty returns the mining difficulty number which corresponds // to the relayHash stored in the sessionTree that is closest to the merkle proof // path provided. @@ -892,43 +794,3 @@ func getClosestRelayDifficulty( return protocol.GetDifficultyFromHash(relayHash) } - -// newProof creates a new proof structure. -func newProof( - t *testing.T, - supplierAddr string, - sessionHeader *sessiontypes.SessionHeader, - sessionTree relayer.SessionTree, - closestProofPath []byte, -) *prooftypes.Proof { - t.Helper() - - // Generate a closest proof from the session tree using closestProofPath. - merkleProof, err := sessionTree.ProveClosest(closestProofPath) - require.NoError(t, err) - require.NotNil(t, merkleProof) - - // Serialize the closest merkle proof. - merkleProofBz, err := merkleProof.Marshal() - require.NoError(t, err) - - return &prooftypes.Proof{ - SupplierAddress: supplierAddr, - SessionHeader: sessionHeader, - ClosestMerkleProof: merkleProofBz, - } -} - -func newClaim( - t *testing.T, - supplierAddr string, - sessionHeader *sessiontypes.SessionHeader, - rootHash []byte, -) *prooftypes.Claim { - // Create a new claim. - return &prooftypes.Claim{ - SupplierAddress: supplierAddr, - SessionHeader: sessionHeader, - RootHash: rootHash, - } -} diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 09324a875..7091ce090 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -5,25 +5,27 @@ import ( "fmt" "testing" + "cosmossdk.io/depinject" "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/pokt-network/poktroll/cmd/poktrolld/cmd" + "github.com/pokt-network/poktroll/pkg/crypto/rings" + "github.com/pokt-network/poktroll/pkg/polylog/polyzero" testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" testutilproof "github.com/pokt-network/poktroll/testutil/proof" - "github.com/pokt-network/poktroll/testutil/sample" - testsession "github.com/pokt-network/poktroll/testutil/session" + "github.com/pokt-network/poktroll/testutil/testkeyring" + "github.com/pokt-network/poktroll/testutil/testtree" apptypes "github.com/pokt-network/poktroll/x/application/types" prooftypes "github.com/pokt-network/poktroll/x/proof/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" "github.com/pokt-network/poktroll/x/shared" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" - suppliertypes "github.com/pokt-network/poktroll/x/supplier/types" tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) @@ -54,12 +56,101 @@ type TestSuite struct { func (s *TestSuite) SetupTest() { t := s.T() - supplierAddr := sample.AccAddress() - appAddr := sample.AccAddress() - s.keepers, s.ctx = keepertest.NewTokenomicsModuleKeepers(s.T(), nil) s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx) + // Construct a keyring to hold the keypairs for the accounts used in the test. + keyRing := keyring.NewInMemory(s.keepers.Codec) + + // Create a pre-generated account iterator to create accounts for the test. + preGeneratedAccts := testkeyring.PreGeneratedAccounts() + + // Create accounts in the account keeper with corresponding keys in the keyring + // // for the applications and suppliers used in the tests. + supplierAddr := testkeyring.CreateOnChainAccount( + s.ctx, t, + "supplier", + keyRing, + s.keepers.AccountKeeper, + preGeneratedAccts, + ).String() + appAddr := testkeyring.CreateOnChainAccount( + s.ctx, t, + "app", + keyRing, + s.keepers.AccountKeeper, + preGeneratedAccts, + ).String() + + service := &sharedtypes.Service{Id: testServiceId} + + // Get the session for the application/supplier pair which is expected + // to be claimed and for which a valid proof would be accepted. + sessionReq := &sessiontypes.QueryGetSessionRequest{ + ApplicationAddress: appAddr, + Service: service, + BlockHeight: 1, + } + sessionRes, err := s.keepers.GetSession(s.ctx, sessionReq) + require.NoError(t, err) + validSessionHeader := sessionRes.Session.Header + + // Construct a ringClient to get the application's ring & verify the relay + // request signature. + ringClient, err := rings.NewRingClient(depinject.Supply( + polyzero.NewLogger(), + prooftypes.NewAppKeeperQueryClient(s.keepers.ApplicationKeeper), + prooftypes.NewAccountKeeperQueryClient(s.keepers.AccountKeeper), + prooftypes.NewSharedKeeperQueryClient(s.keepers.SharedKeeper, s.keepers.SessionKeeper), + )) + require.NoError(t, err) + + // Construct a valid session tree with 5 relays. + numRelays := uint(5) + validSessionTree := testtree.NewFilledSessionTree( + s.ctx, t, + numRelays, + "supplier", supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + // Advance the block height to the earliest claim commit height. + sharedParams := s.keepers.SharedKeeper.GetParams(s.ctx) + claimMsgHeight := shared.GetEarliestSupplierClaimCommitHeight( + &sharedParams, + validSessionHeader.GetSessionEndBlockHeight(), + blockHeaderHash, + supplierAddr, + ) + sdkCtx := cosmostypes.UnwrapSDKContext(s.ctx) + sdkCtx = sdkCtx.WithBlockHeight(claimMsgHeight) + + merkleRootBz, err := validSessionTree.Flush() + require.NoError(t, err) + + // Compute the difficulty in bits of the closest relay from the valid session tree. + // validClosestRelayDifficultyBits := getClosestRelayDifficulty(t, validSessionTree, expectedMerkleProofPath) + + /* + // Prepare supplier account + supplierAddr, supplierPubKey := sample.AccAddressAndPubKey() + supplierAccAddr, err := sdk.AccAddressFromBech32(supplierAddr) + require.NoError(t, err) + supplierAcc := s.keepers.NewAccountWithAddress(s.ctx, supplierAccAddr) + supplierAcc.SetPubKey(supplierPubKey) + s.keepers.SetAccount(s.ctx, supplierAcc) + + // Prepare application account + appAddr, appPubKey := sample.AccAddressAndPubKey() + appAccAddr, err := sdk.AccAddressFromBech32(appAddr) + require.NoError(t, err) + appAcc := s.keepers.NewAccountWithAddress(s.ctx, appAccAddr) + appAcc.SetPubKey(appPubKey) + s.keepers.SetAccount(s.ctx, appAcc) + */ + // Set the suite expectedComputeUnits to equal the default proof_requirement_threshold // such that by default, s.claim will require a proof 100% of the time. s.expectedComputeUnits = prooftypes.DefaultProofRequirementThreshold @@ -67,17 +158,8 @@ func (s *TestSuite) SetupTest() { // Prepare a claim that can be inserted s.claim = prooftypes.Claim{ SupplierAddress: supplierAddr, - SessionHeader: &sessiontypes.SessionHeader{ - ApplicationAddress: appAddr, - Service: &sharedtypes.Service{Id: testServiceId}, - SessionId: "session_id", - SessionStartBlockHeight: 1, - SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), - }, - - // Set the suite expectedComputeUnits to be equal to the default threshold. - // This SHOULD make the claim require a proof given the default proof parameters. - RootHash: testutilproof.SmstRootWithSum(s.expectedComputeUnits), + SessionHeader: validSessionHeader, + RootHash: merkleRootBz, } // Prepare a proof that can be inserted @@ -89,39 +171,19 @@ func (s *TestSuite) SetupTest() { supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) supplier := sharedtypes.Supplier{ - Address: supplierAddr, - Stake: &supplierStake, + Address: supplierAddr, + Stake: &supplierStake, + Services: []*sharedtypes.SupplierServiceConfig{{Service: service}}, } s.keepers.SetSupplier(s.ctx, supplier) appStake := types.NewCoin("upokt", math.NewInt(1000000)) app := apptypes.Application{ - Address: appAddr, - Stake: &appStake, + Address: appAddr, + Stake: &appStake, + ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, } s.keepers.SetApplication(s.ctx, app) - - // Mint some coins to the supplier and application modules - moduleBaseMint := types.NewCoins(sdk.NewCoin("upokt", math.NewInt(690000000000000042))) - - err := s.keepers.MintCoins(s.sdkCtx, suppliertypes.ModuleName, moduleBaseMint) - require.NoError(t, err) - - err = s.keepers.MintCoins(s.sdkCtx, apptypes.ModuleName, moduleBaseMint) - require.NoError(t, err) - - // Send some coins to the supplier and application accounts - sendAmount := types.NewCoins(sdk.NewCoin("upokt", math.NewInt(1000000))) - - err = s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, suppliertypes.ModuleName, sdk.AccAddress(supplier.Address), sendAmount) - require.NoError(t, err) - acc := s.keepers.GetAccount(s.ctx, sdk.AccAddress(supplierAddr)) - require.NotNil(t, acc) - - err = s.keepers.SendCoinsFromModuleToAccount(s.sdkCtx, apptypes.ModuleName, sdk.AccAddress(app.Address), sendAmount) - require.NoError(t, err) - acc = s.keepers.GetAccount(s.ctx, sdk.AccAddress(appAddr)) - require.NotNil(t, acc) } // TestSettleExpiringClaimsSuite tests the claim settlement process. diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 562870b84..1eb535233 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -82,8 +82,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( proofIsRequired := (proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED) if proofIsRequired { - // EXPIRATION_REASON_UNSPECIFIED is the default - var expirationReason types.ClaimExpirationReason = types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED + var expirationReason types.ClaimExpirationReason = types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED // EXPIRATION_REASON_UNSPECIFIED is the default if isProofFound { var isProofValid bool isProofValid, err = k.proofKeeper.IsProofValid(ctx, &proof) diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go index 2d6ce532c..eb06daa7a 100644 --- a/x/tokenomics/types/expected_keepers.go +++ b/x/tokenomics/types/expected_keepers.go @@ -5,6 +5,7 @@ package types import ( "context" + "github.com/cosmos/cosmos-sdk/types" sdk "github.com/cosmos/cosmos-sdk/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" @@ -16,7 +17,13 @@ import ( // AccountKeeper defines the expected interface for the Account module. type AccountKeeper interface { - GetAccount(ctx context.Context, addr sdk.AccAddress) sdk.AccountI // only used for simulation + // Only used for testing & simulation + GetAccount(ctx context.Context, addr sdk.AccAddress) sdk.AccountI + SetAccount(context.Context, types.AccountI) + // Return a new account with the next account number and the specified address. Does not save the new account to the store. + NewAccountWithAddress(context.Context, sdk.AccAddress) sdk.AccountI + // Fetch the next account number, and increment the internal counter. + NextAccountNumber(context.Context) uint64 } // BankKeeper defines the expected interface for the Bank module. @@ -34,11 +41,11 @@ type BankKeeper interface { type ApplicationKeeper interface { GetApplication(ctx context.Context, appAddr string) (app apptypes.Application, found bool) SetApplication(ctx context.Context, app apptypes.Application) + GetAllApplications(ctx context.Context) []apptypes.Application } type ProofKeeper interface { GetAllClaims(ctx context.Context) []prooftypes.Claim - GetAllProofs(ctx context.Context) []prooftypes.Proof RemoveClaim(ctx context.Context, sessionId, supplierAddr string) GetProof(ctx context.Context, sessionId, supplierAddr string) (proof prooftypes.Proof, isProofFound bool) RemoveProof(ctx context.Context, sessionId, supplierAddr string) @@ -47,6 +54,7 @@ type ProofKeeper interface { IsProofValid(ctx context.Context, proof *prooftypes.Proof) (valid bool, err error) // Only used for testing & simulation + GetAllProofs(ctx context.Context) []prooftypes.Proof UpsertClaim(ctx context.Context, claim prooftypes.Claim) UpsertProof(ctx context.Context, claim prooftypes.Proof) GetParams(ctx context.Context) prooftypes.Params From 58591c43951bb309495c43b8f2082ead011dad55 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 17:53:19 -0700 Subject: [PATCH 21/29] The test passes --- testutil/testtree/tree.go | 12 +- .../keeper/msg_server_submit_proof_test.go | 2 +- x/proof/keeper/proof_validation.go | 7 +- x/proof/keeper/proof_validation_test.go | 20 +-- .../keeper_settle_pending_claims_test.go | 120 +++++++++--------- 5 files changed, 85 insertions(+), 76 deletions(-) diff --git a/testutil/testtree/tree.go b/testutil/testtree/tree.go index 691bba903..e56be9a40 100644 --- a/testutil/testtree/tree.go +++ b/testutil/testtree/tree.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/99designs/keyring" + "github.com/cosmos/cosmos-sdk/crypto/keyring" cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" @@ -104,6 +104,7 @@ func FillSessionTree( relayKey, err := relay.GetHash() require.NoError(t, err) + // See FillSessionTreeExpectedComputeUnits below for explanation. relayWeight := uint64(i) err = sessionTree.Update(relayKey[:], relayBz, relayWeight) @@ -111,6 +112,15 @@ func FillSessionTree( } } +// FillSessionTreeExpectedComputeUnits returns the number of expected compute units +// to covert numRelays (in a test scenario) whereby every subsequent relay costs +// an addition compute unit. +// This is basic random approach selected for testing purposes. Don't think too +// deeply about it. +func FillSessionTreeExpectedComputeUnits(numRelays uint) uint64 { + return uint64(numRelays * (numRelays - 1) / 2) +} + // NewProof creates a new proof structure. func NewProof( t *testing.T, diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 4dfee7473..b4a076fd6 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -48,7 +48,7 @@ var ( func init() { // The CometBFT header hash is 32 bytes: https://docs.cometbft.com/main/spec/core/data_structures blockHeaderHash = make([]byte, 32) - expectedMerkleProofPath = protocol.GetPathForProof(blockHeaderHash, "TODO_BLOCKER_session_id_currently_unused") + // expectedMerkleProofPath = protocol.GetPathForProof(blockHeaderHash, "TODO_BLOCKER_session_id_currently_unused") } func TestMsgServer_SubmitProof_Success(t *testing.T) { diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 4a2b40776..fd46da9d7 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -164,11 +164,15 @@ func (k Keeper) IsProofValid( // Get the proof module's governance parameters. // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service params := k.GetParams(ctx) + relayDifficultyTargetHash := params.RelayDifficultyTargetHash + if relayDifficultyTargetHash == nil || len(relayDifficultyTargetHash) == 0 { + relayDifficultyTargetHash = types.DefaultRelayDifficultyTargetHash + } // Verify the relay difficulty is above the minimum required to earn rewards. if err = validateRelayDifficulty( relayBz, - params.RelayDifficultyTargetHash, + relayDifficultyTargetHash, sessionHeader.Service.Id, ); err != nil { return false, err @@ -247,6 +251,7 @@ func (k Keeper) validateClosestPath( k.logger.Info("E2E_DEBUG: height for block hash when verifying the proof", earliestSupplierProofCommitHeight, sessionHeader.GetSessionId()) expectedProofPath := protocol.GetPathForProof(proofPathSeedBlockHash, sessionHeader.GetSessionId()) + fmt.Println("OLSH OMG", proofPathSeedBlockHash, expectedProofPath) if !bytes.Equal(proof.Path, expectedProofPath) { return types.ErrProofInvalidProof.Wrapf( "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 3c8638a3f..70a0cc87c 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -355,7 +355,7 @@ func TestIsProofValid_Error(t *testing.T) { // Create a claim with a merkle root derived from a relay // request containing the wrong session ID. - claim := NewClaim(t, + claim := testtree.NewClaim(t, supplierAddr, validSessionHeader, wrongRequestSessionIdMerkleRootBz, @@ -365,7 +365,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message using the valid session header, // *not* the one used in the session tree's relay request. - return NewProof(t, + return testtree.NewProof(t, supplierAddr, validSessionHeader, wrongRequestSessionIdSessionTree, @@ -442,7 +442,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the expected session ID. - invalidRequestSignatureSessionTree := NewEmptySessionTree(t, validSessionHeader, supplierAddr) + invalidRequestSignatureSessionTree := testtree.NewEmptySessionTree(t, validSessionHeader, supplierAddr) // Add the relay to the session tree. err = invalidRequestSignatureSessionTree.Update([]byte{1}, invalidRequestSignatureRelayBz, 1) @@ -503,7 +503,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the expected session ID. - invalidResponseSignatureSessionTree := NewEmptySessionTree(t, validSessionHeader, supplierAddr) + invalidResponseSignatureSessionTree := testtree.NewEmptySessionTree(t, validSessionHeader, supplierAddr) // Add the relay to the session tree. err = invalidResponseSignatureSessionTree.Update([]byte{1}, relayBz, 1) @@ -519,7 +519,7 @@ func TestIsProofValid_Error(t *testing.T) { // Create a claim with a merkle root derived from a session tree // with an invalid relay response signature. - claim := NewClaim(t, + claim := testtree.NewClaim(t, supplierAddr, validSessionHeader, invalidResponseSignatureMerkleRootBz, @@ -529,7 +529,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message derived from a session tree // with an invalid relay response signature. - return NewProof(t, + return testtree.NewProof(t, supplierAddr, validSessionHeader, invalidResponseSignatureSessionTree, @@ -551,7 +551,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a new valid session tree for this test case because once the // closest proof has already been generated, the path cannot be changed. numRelays := uint(5) - wrongPathSessionTree := testtree.SessionTree( + wrongPathSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -603,7 +603,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct a proof message with a session tree containing // a relay of insufficient difficulty. - return NewProof(t, + return testtree.NewProof(t, supplierAddr, validSessionHeader, validSessionTree, @@ -651,7 +651,7 @@ func TestIsProofValid_Error(t *testing.T) { // Construct new proof message using the supplier & session header // from the session which is *not* expected to be claimed. - return NewProof(t, + return testtree.NewProof(t, wrongSupplierAddr, unclaimedSessionHeader, unclaimedSessionTree, @@ -668,7 +668,7 @@ func TestIsProofValid_Error(t *testing.T) { desc: "Valid proof cannot validate claim with an incorrect root", newProof: func(t *testing.T) *prooftypes.Proof { numRelays := uint(10) - wrongMerkleRootSessionTree := NewFilledSessionTree( + wrongMerkleRootSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 7091ce090..60a466ca0 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/pokt-network/poktroll/cmd/poktrolld/cmd" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/crypto/rings" "github.com/pokt-network/poktroll/pkg/polylog/polyzero" testutilevents "github.com/pokt-network/poktroll/testutil/events" @@ -57,7 +58,10 @@ func (s *TestSuite) SetupTest() { t := s.T() s.keepers, s.ctx = keepertest.NewTokenomicsModuleKeepers(s.T(), nil) - s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx) + sdkCtx := cosmostypes.UnwrapSDKContext(s.ctx) + sdkCtx = sdkCtx.WithBlockHeight(1) + s.sdkCtx = sdkCtx + s.ctx = sdkCtx // Construct a keyring to hold the keypairs for the accounts used in the test. keyRing := keyring.NewInMemory(s.keepers.Codec) @@ -82,8 +86,42 @@ func (s *TestSuite) SetupTest() { preGeneratedAccts, ).String() + /* + // Prepare supplier account + supplierAddr, supplierPubKey := sample.AccAddressAndPubKey() + supplierAccAddr, err := sdk.AccAddressFromBech32(supplierAddr) + require.NoError(t, err) + supplierAcc := s.keepers.NewAccountWithAddress(s.ctx, supplierAccAddr) + supplierAcc.SetPubKey(supplierPubKey) + s.keepers.SetAccount(s.ctx, supplierAcc) + + // Prepare application account + appAddr, appPubKey := sample.AccAddressAndPubKey() + appAccAddr, err := sdk.AccAddressFromBech32(appAddr) + require.NoError(t, err) + appAcc := s.keepers.NewAccountWithAddress(s.ctx, appAccAddr) + appAcc.SetPubKey(appPubKey) + s.keepers.SetAccount(s.ctx, appAcc) + */ + service := &sharedtypes.Service{Id: testServiceId} + supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) + supplier := sharedtypes.Supplier{ + Address: supplierAddr, + Stake: &supplierStake, + Services: []*sharedtypes.SupplierServiceConfig{{Service: service}}, + } + s.keepers.SetSupplier(s.ctx, supplier) + + appStake := types.NewCoin("upokt", math.NewInt(1000000)) + app := apptypes.Application{ + Address: appAddr, + Stake: &appStake, + ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, + } + s.keepers.SetApplication(s.ctx, app) + // Get the session for the application/supplier pair which is expected // to be claimed and for which a valid proof would be accepted. sessionReq := &sessiontypes.QueryGetSessionRequest{ @@ -91,9 +129,9 @@ func (s *TestSuite) SetupTest() { Service: service, BlockHeight: 1, } - sessionRes, err := s.keepers.GetSession(s.ctx, sessionReq) + sessionRes, err := s.keepers.GetSession(s.sdkCtx, sessionReq) require.NoError(t, err) - validSessionHeader := sessionRes.Session.Header + sessionHeader := sessionRes.Session.Header // Construct a ringClient to get the application's ring & verify the relay // request signature. @@ -107,83 +145,39 @@ func (s *TestSuite) SetupTest() { // Construct a valid session tree with 5 relays. numRelays := uint(5) - validSessionTree := testtree.NewFilledSessionTree( + sessionTree := testtree.NewFilledSessionTree( s.ctx, t, numRelays, "supplier", supplierAddr, - validSessionHeader, validSessionHeader, validSessionHeader, + sessionHeader, sessionHeader, sessionHeader, keyRing, ringClient, ) + s.expectedComputeUnits = testtree.FillSessionTreeExpectedComputeUnits(numRelays) + + blockHeaderHash := make([]byte, 0) + expectedMerkleProofPath := protocol.GetPathForProof(blockHeaderHash, sessionHeader.SessionId) + fmt.Println("OLSH OMG", blockHeaderHash, expectedMerkleProofPath) // Advance the block height to the earliest claim commit height. sharedParams := s.keepers.SharedKeeper.GetParams(s.ctx) claimMsgHeight := shared.GetEarliestSupplierClaimCommitHeight( &sharedParams, - validSessionHeader.GetSessionEndBlockHeight(), + sessionHeader.GetSessionEndBlockHeight(), blockHeaderHash, supplierAddr, ) - sdkCtx := cosmostypes.UnwrapSDKContext(s.ctx) - sdkCtx = sdkCtx.WithBlockHeight(claimMsgHeight) + sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx) + sdkCtx = sdkCtx.WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash) + s.ctx = sdkCtx + s.sdkCtx = sdkCtx - merkleRootBz, err := validSessionTree.Flush() + merkleRootBz, err := sessionTree.Flush() require.NoError(t, err) - // Compute the difficulty in bits of the closest relay from the valid session tree. - // validClosestRelayDifficultyBits := getClosestRelayDifficulty(t, validSessionTree, expectedMerkleProofPath) - - /* - // Prepare supplier account - supplierAddr, supplierPubKey := sample.AccAddressAndPubKey() - supplierAccAddr, err := sdk.AccAddressFromBech32(supplierAddr) - require.NoError(t, err) - supplierAcc := s.keepers.NewAccountWithAddress(s.ctx, supplierAccAddr) - supplierAcc.SetPubKey(supplierPubKey) - s.keepers.SetAccount(s.ctx, supplierAcc) - - // Prepare application account - appAddr, appPubKey := sample.AccAddressAndPubKey() - appAccAddr, err := sdk.AccAddressFromBech32(appAddr) - require.NoError(t, err) - appAcc := s.keepers.NewAccountWithAddress(s.ctx, appAccAddr) - appAcc.SetPubKey(appPubKey) - s.keepers.SetAccount(s.ctx, appAcc) - */ - - // Set the suite expectedComputeUnits to equal the default proof_requirement_threshold - // such that by default, s.claim will require a proof 100% of the time. - s.expectedComputeUnits = prooftypes.DefaultProofRequirementThreshold - // Prepare a claim that can be inserted - s.claim = prooftypes.Claim{ - SupplierAddress: supplierAddr, - SessionHeader: validSessionHeader, - RootHash: merkleRootBz, - } - - // Prepare a proof that can be inserted - s.proof = prooftypes.Proof{ - SupplierAddress: s.claim.SupplierAddress, - SessionHeader: s.claim.SessionHeader, - // ClosestMerkleProof: - } - - supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) - supplier := sharedtypes.Supplier{ - Address: supplierAddr, - Stake: &supplierStake, - Services: []*sharedtypes.SupplierServiceConfig{{Service: service}}, - } - s.keepers.SetSupplier(s.ctx, supplier) - - appStake := types.NewCoin("upokt", math.NewInt(1000000)) - app := apptypes.Application{ - Address: appAddr, - Stake: &appStake, - ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, - } - s.keepers.SetApplication(s.ctx, app) + s.claim = *testtree.NewClaim(t, supplierAddr, sessionHeader, merkleRootBz) + s.proof = *testtree.NewProof(t, supplierAddr, sessionHeader, sessionTree, expectedMerkleProofPath) } // TestSettleExpiringClaimsSuite tests the claim settlement process. @@ -340,8 +334,8 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOn fmt.Println("expectedEvents", expectedEvents) // Validate the event expectedEvent := expectedEvents[0] - require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_INVALID, expectedEvent.GetExpirationReason()) + require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) } func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvided_ViaThreshold() { From d21f9fc4c40ac0d3741aaa802a5b1a16ef8813af Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 18:03:21 -0700 Subject: [PATCH 22/29] About to do a self review --- x/proof/keeper/proof_validation.go | 12 ------------ .../keeper/keeper_settle_pending_claims_test.go | 13 ++++++------- 2 files changed, 6 insertions(+), 19 deletions(-) diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index fd46da9d7..962495138 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -31,9 +31,7 @@ package keeper import ( "bytes" "context" - "fmt" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pokt-network/smt" "github.com/pokt-network/poktroll/pkg/crypto/protocol" @@ -59,19 +57,10 @@ func (k Keeper) IsProofValid( // Retrieve the supplier's public key. supplierAddr := proof.SupplierAddress - supplierAccAddr, err := sdk.AccAddressFromBech32(supplierAddr) - if err != nil { - return false, err - } - supplierAccount := k.accountKeeper.GetAccount(ctx, supplierAccAddr) - fmt.Println("OLSH", supplierAccAddr, supplierAccount.GetPubKey()) - // require.NotNil(t, acc) - supplierPubKey, err := k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) if err != nil { return false, err } - fmt.Println("OLSH3", supplierPubKey) // Validate the session header. var onChainSession *sessiontypes.Session @@ -251,7 +240,6 @@ func (k Keeper) validateClosestPath( k.logger.Info("E2E_DEBUG: height for block hash when verifying the proof", earliestSupplierProofCommitHeight, sessionHeader.GetSessionId()) expectedProofPath := protocol.GetPathForProof(proofPathSeedBlockHash, sessionHeader.GetSessionId()) - fmt.Println("OLSH OMG", proofPathSeedBlockHash, expectedProofPath) if !bytes.Equal(proof.Path, expectedProofPath) { return types.ErrProofInvalidProof.Wrapf( "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 60a466ca0..dfac9acf6 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -144,7 +144,7 @@ func (s *TestSuite) SetupTest() { require.NoError(t, err) // Construct a valid session tree with 5 relays. - numRelays := uint(5) + numRelays := uint(10) sessionTree := testtree.NewFilledSessionTree( s.ctx, t, numRelays, @@ -157,7 +157,6 @@ func (s *TestSuite) SetupTest() { blockHeaderHash := make([]byte, 0) expectedMerkleProofPath := protocol.GetPathForProof(blockHeaderHash, sessionHeader.SessionId) - fmt.Println("OLSH OMG", blockHeaderHash, expectedMerkleProofPath) // Advance the block height to the earliest claim commit height. sharedParams := s.keepers.SharedKeeper.GetParams(s.ctx) @@ -282,8 +281,8 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequiredAndNotProv fmt.Println("expectedEvents", expectedEvents) // Validate the event expectedEvent := expectedEvents[0] - require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_MISSING, expectedEvent.GetExpirationReason()) + require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) } func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOneProvided() { @@ -326,7 +325,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOn // Confirm an expiration event was emitted events := sdkCtx.EventManager().Events() - require.Len(t, events, 17) // minting, burning, settling, etc.. + require.Len(t, events, 5) // minting, burning, settling, etc.. expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, events, "poktroll.tokenomics.EventClaimExpired") require.Len(t, expectedEvents, 1) @@ -383,7 +382,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvide // Validate the event expectedEvent := expectedEvents[0] - require.NotEqual(t, prooftypes.ProofRequirementReason_NOT_REQUIRED, expectedEvent.GetProofRequirement()) + require.Equal(t, prooftypes.ProofRequirementReason_THRESHOLD, expectedEvent.GetProofRequirement()) require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) } @@ -400,7 +399,7 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi // matches s.claim. err := s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ ProofRequestProbability: 1, - // +1 to push the threshold above s.claim's compute units + // +1 to push the requirement threshold ABOVE s.claim's compute units ProofRequirementThreshold: s.expectedComputeUnits + 1, }) require.NoError(t, err) @@ -440,7 +439,7 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi events, "poktroll.tokenomics.EventClaimSettled") require.Len(t, expectedEvents, 1) expectedEvent := expectedEvents[0] - require.NotEqual(t, prooftypes.ProofRequirementReason_NOT_REQUIRED, expectedEvent.GetProofRequirement()) + require.Equal(t, prooftypes.ProofRequirementReason_PROBABILISTIC, expectedEvent.GetProofRequirement()) require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) } From 99783a865b1d263f1253f60dd629ba551ac3a5cb Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 18:17:00 -0700 Subject: [PATCH 23/29] Review of #690 by @olshansk (#699) - This is a review of #690 - Creating a custom PR to make it easier to track the changes for future reference --- Co-authored-by: Bryan White --- Makefile | 26 ++++++-- api/poktroll/proof/params.pulsar.go | 1 + pkg/client/events/query_client_test.go | 1 - pkg/crypto/protocol/difficulty.go | 34 ++++++++--- pkg/crypto/protocol/difficulty_test.go | 54 ++++++++++++++++- pkg/crypto/protocol/hash.go | 5 +- pkg/crypto/protocol/hasher.go | 11 ++-- pkg/relayer/miner/gen/gen_fixtures.go | 2 +- pkg/relayer/miner/miner.go | 11 ++-- pkg/relayer/miner/miner_test.go | 7 +-- proto/poktroll/proof/params.proto | 1 + .../relay_mining_difficulty_test.go | 2 - testutil/network/network.go | 2 +- testutil/testrelayer/relays.go | 5 +- x/proof/keeper/msg_server_submit_proof.go | 27 +++++---- .../keeper/msg_server_submit_proof_test.go | 7 ++- x/proof/types/params.go | 32 +++++----- x/proof/types/params.pb.go | 1 + x/service/types/relay.go | 18 +++--- .../keeper/msg_server_update_param_test.go | 1 + x/tokenomics/keeper/scale_difficulty_test.go | 14 +++-- x/tokenomics/keeper/settle_pending_claims.go | 1 + .../keeper/update_relay_mining_difficulty.go | 6 +- .../update_relay_mining_difficulty_test.go | 60 ++++++++++--------- x/tokenomics/module/abci.go | 29 +++++---- 25 files changed, 233 insertions(+), 125 deletions(-) diff --git a/Makefile b/Makefile index 1a46832ec..62c141898 100644 --- a/Makefile +++ b/Makefile @@ -420,7 +420,7 @@ test_verbose: check_go_version ## Run all go tests verbosely go test -count=1 -v -race -tags test ./... .PHONY: test_all -test_all: check_go_version ## Run all go tests showing detailed output only on failures +test_all: warn_flaky_tests check_go_version ## Run all go tests showing detailed output only on failures go test -count=1 -race -tags test ./... .PHONY: test_all_with_integration @@ -503,17 +503,22 @@ go_develop_and_test: go_develop test_all ## Generate protos, mocks and run all t # TODO_DISCUSS_IN_THIS_COMMIT - SHOULD NEVER BE COMMITTED TO MASTER. It is a way for the reviewer of a PR to start / reply to a discussion. # TODO_IN_THIS_COMMIT - SHOULD NEVER BE COMMITTED TO MASTER. It is a way to start the review process while non-critical changes are still in progress + +# Define shared variable for the exclude parameters +EXCLUDE_GREP = --exclude-dir={.git,vendor,./docusaurus,.vscode,.idea} --exclude={Makefile,reviewdog.yml,*.pb.go,*.pulsar.go} + .PHONY: todo_list todo_list: ## List all the TODOs in the project (excludes vendor and prototype directories) - grep --exclude-dir={.git,vendor,./docusaurus} -r TODO . + grep -r $(EXCLUDE_GREP) TODO . | grep -v 'TODO()' .PHONY: todo_count todo_count: ## Print a count of all the TODOs in the project - grep --exclude-dir={.git,vendor,./docusaurus} -r TODO . | wc -l + grep -r $(EXCLUDE_GREP) TODO . | grep -v 'TODO()' | wc -l .PHONY: todo_this_commit todo_this_commit: ## List all the TODOs needed to be done in this commit - grep -n --exclude-dir={.git,vendor,.vscode,.idea} --exclude={Makefile,reviewdog.yml} -r -e "TODO_IN_THIS_" + grep -r $(EXCLUDE_GREP) TODO_IN_THIS .| grep -v 'TODO()' + #################### ### Gateways ### @@ -809,6 +814,19 @@ warn_message_local_stress_test: ## Print a warning message when kicking off a lo @echo "| |" @echo "+-----------------------------------------------------------------------------------------------+" +PHONY: warn_flaky_tests +warn_flaky_tests: ## Print a warning message that some unit tests may be flaky + @echo "+-----------------------------------------------------------------------------------------------+" + @echo "| |" + @echo "| IMPORTANT: READ ME IF YOUR TESTS FAIL!!! |" + @echo "| |" + @echo "| 1. Our unit / integration tests are far from perfect & some are flaky |" + @echo "| 2. If you ran 'make go_develop_and_test' and a failure occured, try to run: |" + @echo "| 'make test_all' once or twice more |" + @echo "| 3. If the same error persistes, isolate it with 'go test -v ./path/to/failing/module |" + @echo "| |" + @echo "+-----------------------------------------------------------------------------------------------+" + ############## ### Claims ### ############## diff --git a/api/poktroll/proof/params.pulsar.go b/api/poktroll/proof/params.pulsar.go index e75f23eac..b8162ea2b 100644 --- a/api/poktroll/proof/params.pulsar.go +++ b/api/poktroll/proof/params.pulsar.go @@ -626,6 +626,7 @@ type Params struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // TODO_FOLLOWUP(@olshansk, #690): Either delete this or change it to be named "minimum" // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash,omitempty"` // proof_request_probability is the probability of a session requiring a proof diff --git a/pkg/client/events/query_client_test.go b/pkg/client/events/query_client_test.go index 2c868ac42..2130d350b 100644 --- a/pkg/client/events/query_client_test.go +++ b/pkg/client/events/query_client_test.go @@ -366,7 +366,6 @@ func behavesLikeEitherObserver[V any]( require.NoError(t, err) require.Equal(t, notificationsLimit, int(atomic.LoadInt32(&eventsCounter))) - // TODO_THIS_COMMIT: is this necessary? time.Sleep(10 * time.Millisecond) if onLimit != nil { diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index 05e519ad1..8d7ccb835 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -1,27 +1,45 @@ package protocol import ( + "bytes" "encoding/hex" "math/big" ) var ( - // Difficulty1HashBz is the chosen "highest" (easiest) target hash, which - // corresponds to the lowest possible difficulty. It effectively normalizes - // the difficulty number (which is returned by GetDifficultyFromHash) by defining - // the hash which corresponds to difficulty 1. + // BaseRelayDifficultyHashBz is the chosen "highest" (easiest) target hash, which + // corresponds to the lowest possible difficulty. + // + // It effectively normalizes the difficulty number (which is returned by GetDifficultyFromHash) + // by defining the hash which corresponds to the base difficulty. + // + // When this is the difficulty of a particular service, all relays are reward / volume applicable. + // + // Bitcoin uses a similar concept, where the target hash is defined as the hash: // - https://bitcoin.stackexchange.com/questions/107976/bitcoin-difficulty-why-leading-0s // - https://bitcoin.stackexchange.com/questions/121920/is-it-always-possible-to-find-a-number-whose-hash-starts-with-a-certain-number-o - Difficulty1HashBz, _ = hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + BaseRelayDifficultyHashBz, _ = hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") ) // GetDifficultyFromHash returns the "difficulty" of the given hash, with respect -// to the "highest" (easiest) target hash, Difficulty1Hash. +// to the "highest" (easiest) target hash, BaseRelayDifficultyHash. // The resultant value is not used for any business logic but is simplify there to have a human-readable version of the hash. func GetDifficultyFromHash(hashBz [RelayHasherSize]byte) int64 { - difficulty1HashInt := new(big.Int).SetBytes(Difficulty1HashBz) + baseRelayDifficultyHashInt := new(big.Int).SetBytes(BaseRelayDifficultyHashBz) hashInt := new(big.Int).SetBytes(hashBz[:]) // difficulty is the ratio of the highest target hash to the given hash. - return new(big.Int).Div(difficulty1HashInt, hashInt).Int64() + // TODO_MAINNET: Can this cause an integer overflow? + return new(big.Int).Div(baseRelayDifficultyHashInt, hashInt).Int64() +} + +// IsRelayVolumeApplicable returns true if the relay IS reward / volume applicable. +// A relay is reward / volume applicable IFF its hash is less than the target hash. +// - relayHash is the hash of the relay to be checked. +// - targetHash is the hash of the relay difficulty target for a particular service. +// +// TODO_MAINNET: Devise a test that tries to attack the network and ensure that +// there is sufficient telemetry. +func IsRelayVolumeApplicable(relayHash, targetHash []byte) bool { + return bytes.Compare(relayHash, targetHash) == -1 // True if relayHash < targetHash } diff --git a/pkg/crypto/protocol/difficulty_test.go b/pkg/crypto/protocol/difficulty_test.go index a9aa89cdd..199862ccf 100644 --- a/pkg/crypto/protocol/difficulty_test.go +++ b/pkg/crypto/protocol/difficulty_test.go @@ -32,7 +32,7 @@ func TestGetDifficultyFromHash(t *testing.T) { { desc: "Highest difficulty", hashHex: "0000000000000000000000000000000000000000000000000000000000000001", - expectedDifficulty: new(big.Int).SetBytes(Difficulty1HashBz).Int64(), + expectedDifficulty: new(big.Int).SetBytes(BaseRelayDifficultyHashBz).Int64(), }, } @@ -52,3 +52,55 @@ func TestGetDifficultyFromHash(t *testing.T) { }) } } + +func TestIsRelayVolumeApplicable(t *testing.T) { + tests := []struct { + desc string + relayHashHex string + targetHashHex string + expectedVolumeApplicable bool + }{ + { + desc: "Applicable: relayHash << targetHash", + relayHashHex: "000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: true, + }, + { + desc: "Applicable: relayHash < targetHash", + relayHashHex: "00efffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: true, + }, + { + desc: "Not Applicable: relayHash = targetHash", + relayHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: false, + }, + { + desc: "Not applicable: relayHash > targetHash", + relayHashHex: "0effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: false, + }, + { + desc: "Not applicable: relayHash >> targetHash", + relayHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + targetHashHex: "00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedVolumeApplicable: false, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + relayHash, err := hex.DecodeString(test.relayHashHex) + require.NoError(t, err) + + targetHash, err := hex.DecodeString(test.targetHashHex) + require.NoError(t, err) + + require.Equal(t, test.expectedVolumeApplicable, IsRelayVolumeApplicable(relayHash, targetHash)) + }) + } +} diff --git a/pkg/crypto/protocol/hash.go b/pkg/crypto/protocol/hash.go index a88380b77..4ef69514c 100644 --- a/pkg/crypto/protocol/hash.go +++ b/pkg/crypto/protocol/hash.go @@ -1,10 +1,11 @@ package protocol -// GetHashFromBytes returns the hash of the relay (full, request or response) bytes. +// GetRelayHashFromBytes returns the hash of the relay (full, request or response) bytes. // It is used as helper in the case that the relay is already marshaled and // centralizes the hasher used. -func GetHashFromBytes(relayBz []byte) (hash [RelayHasherSize]byte) { +func GetRelayHashFromBytes(relayBz []byte) (hash [RelayHasherSize]byte) { hasher := NewRelayHasher() + // NB: Intentionally ignoring the error, following sha256.Sum256 implementation. _, _ = hasher.Write(relayBz) hashBz := hasher.Sum(nil) diff --git a/pkg/crypto/protocol/hasher.go b/pkg/crypto/protocol/hasher.go index ce9c175f9..e5f008c1a 100644 --- a/pkg/crypto/protocol/hasher.go +++ b/pkg/crypto/protocol/hasher.go @@ -3,14 +3,13 @@ package protocol import "crypto/sha256" const ( - RelayHasherSize = sha256.Size - TrieHasherSize = sha256.Size - TrieRootSize = TrieHasherSize + trieRootMetadataSize - // TODO_CONSIDERATION: Export this from the SMT package. - trieRootMetadataSize = 16 + RelayHasherSize = sha256.Size + TrieHasherSize = sha256.Size + TrieRootSize = TrieHasherSize + trieRootMetadataSize + trieRootMetadataSize = 16 // TODO_CONSIDERATION: Export this from the SMT package. ) var ( NewRelayHasher = sha256.New - NewTrieHasher = sha256.New + NewTrieHasher = sha256.New ) diff --git a/pkg/relayer/miner/gen/gen_fixtures.go b/pkg/relayer/miner/gen/gen_fixtures.go index 0531e4266..9c6e7fbe5 100644 --- a/pkg/relayer/miner/gen/gen_fixtures.go +++ b/pkg/relayer/miner/gen/gen_fixtures.go @@ -32,6 +32,7 @@ const ( defaultOutPath = "relay_fixtures_test.go" ) +// TODO_FOLLOWUP(@olshansk, #690): Do a global anycase grep for "DifficultyBits" and update/remove things appropriately. var ( // flagDifficultyBitsThreshold is the number of leading zero bits that a // randomized, serialized relay must have to be included in the @@ -152,7 +153,6 @@ func genRandomizedMinedRelayFixtures( Res: nil, } - // TODO_TECHDEBT(@red-0ne): use canonical codec. relayBz, err := relay.Marshal() if err != nil { errCh <- err diff --git a/pkg/relayer/miner/miner.go b/pkg/relayer/miner/miner.go index a21aee91e..c5f7a5605 100644 --- a/pkg/relayer/miner/miner.go +++ b/pkg/relayer/miner/miner.go @@ -1,7 +1,6 @@ package miner import ( - "bytes" "context" "cosmossdk.io/depinject" @@ -30,6 +29,7 @@ type miner struct { // // TODO_MAINNET(#543): This is populated by querying the corresponding on-chain parameter during construction. // If this parameter is updated on-chain the relayminer will need to be restarted to query the new value. + // TODO_FOLLOWUP(@olshansk, #690): This needs to be maintained (and updated) on a per service level. relayDifficultyTargetHash []byte } @@ -109,16 +109,15 @@ func (mnr *miner) mapMineRelay( _ context.Context, relay *servicetypes.Relay, ) (_ either.Either[*relayer.MinedRelay], skip bool) { - // TODO_TECHDEBT(@red-0ne, #446): Centralize the configuration for the SMT spec. - // TODO_TECHDEBT(@red-0ne): marshal using canonical codec. relayBz, err := relay.Marshal() if err != nil { return either.Error[*relayer.MinedRelay](err), false } - relayHash := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) + relayHash := relayHashArr[:] // The relay IS NOT volume / reward applicable - if bytes.Compare(relayHash[:], mnr.relayDifficultyTargetHash) == 1 { + if !protocol.IsRelayVolumeApplicable(relayHash, mnr.relayDifficultyTargetHash) { return either.Success[*relayer.MinedRelay](nil), true } @@ -126,6 +125,6 @@ func (mnr *miner) mapMineRelay( return either.Success(&relayer.MinedRelay{ Relay: *relay, Bytes: relayBz, - Hash: relayHash[:], + Hash: relayHash, }), false } diff --git a/pkg/relayer/miner/miner_test.go b/pkg/relayer/miner/miner_test.go index 8515d26c8..3b817a075 100644 --- a/pkg/relayer/miner/miner_test.go +++ b/pkg/relayer/miner/miner_test.go @@ -23,7 +23,7 @@ import ( servicetypes "github.com/pokt-network/poktroll/x/service/types" ) -var testTargetHash, _ = hex.DecodeString("0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") +var testRelayMiningTargetHash, _ = hex.DecodeString("0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") // TestMiner_MinedRelays constructs an observable of mined relays, through which // it pipes pre-mined relay fixtures. It asserts that the observable only emits @@ -43,7 +43,7 @@ func TestMiner_MinedRelays(t *testing.T) { proofQueryClientMock := testqueryclients.NewTestProofQueryClient(t) deps := depinject.Supply(proofQueryClientMock) - mnr, err := miner.NewMiner(deps, miner.WithRelayDifficultyTargetHash(testTargetHash)) + mnr, err := miner.NewMiner(deps, miner.WithRelayDifficultyTargetHash(testRelayMiningTargetHash)) require.NoError(t, err) minedRelays := mnr.MinedRelays(ctx, mockRelaysObs) @@ -134,8 +134,7 @@ func unmarshalHexMinedRelay( err = relay.Unmarshal(relayBz) require.NoError(t, err) - // TODO_TECHDEBT(@red-0ne, #446): Centralize the configuration for the SMT spec. - relayHashArr := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ diff --git a/proto/poktroll/proof/params.proto b/proto/poktroll/proof/params.proto index 3e8f1bc17..8f8042d95 100644 --- a/proto/poktroll/proof/params.proto +++ b/proto/poktroll/proof/params.proto @@ -12,6 +12,7 @@ message Params { option (amino.name) = "poktroll/x/proof/Params"; option (gogoproto.equal) = true; + // TODO_FOLLOWUP(@olshansk, #690): Either delete this or change it to be named "minimum" // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. bytes relay_difficulty_target_hash = 1 [(gogoproto.jsontag) = "relay_difficulty_target_hash"]; diff --git a/tests/integration/tokenomics/relay_mining_difficulty_test.go b/tests/integration/tokenomics/relay_mining_difficulty_test.go index d97f38949..3bc9a82c5 100644 --- a/tests/integration/tokenomics/relay_mining_difficulty_test.go +++ b/tests/integration/tokenomics/relay_mining_difficulty_test.go @@ -21,8 +21,6 @@ import ( tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) -// TODO_UPNEXT(@Olshansk, #571): Implement these tests - func init() { cmd.InitSDKConfig() } diff --git a/testutil/network/network.go b/testutil/network/network.go index fdfb7f8a1..091f871b7 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -50,7 +50,7 @@ func New(t *testing.T, configs ...Config) *Network { cfg = configs[0] } net, err := network.New(t, t.TempDir(), cfg) - require.NoError(t, err) + require.NoError(t, err, "TODO_FLAKY: This config setup is periodically flaky") _, err = net.WaitForHeight(1) require.NoError(t, err) t.Cleanup(net.Cleanup) diff --git a/testutil/testrelayer/relays.go b/testutil/testrelayer/relays.go index 8cc870b42..43a4c5753 100644 --- a/testutil/testrelayer/relays.go +++ b/testutil/testrelayer/relays.go @@ -53,11 +53,10 @@ func NewUnsignedMinedRelay( }, } - // TODO_TECHDEBT(@red-0ne): marshal using canonical codec. relayBz, err := relay.Marshal() require.NoError(t, err) - relayHashArr := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ @@ -111,7 +110,7 @@ func NewSignedMinedRelay( relayBz, err := relay.Marshal() require.NoError(t, err) - relayHashArr := protocol.GetHashFromBytes(relayBz) + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] return &relayer.MinedRelay{ diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index af695b46a..fcc271b03 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -204,10 +204,15 @@ func (k msgServer) SubmitProof( logger.Debug("successfully verified relay response signature") // Get the proof module's governance parameters. + // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service params := k.GetParams(ctx) // Verify the relay difficulty is above the minimum required to earn rewards. - if err = validateRelayDifficulty(relayBz, params.RelayDifficultyTargetHash); err != nil { + if err = validateRelayDifficulty( + relayBz, + params.RelayDifficultyTargetHash, + sessionHeader.Service.Id, + ); err != nil { return nil, status.Error(codes.FailedPrecondition, err.Error()) } logger.Debug("successfully validated relay mining difficulty") @@ -446,8 +451,9 @@ func verifyClosestProof( // required minimum threshold. // TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared // function that can be used by both the proof and the miner packages. -func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { - relayHash := protocol.GetHashFromBytes(relayBz) +func validateRelayDifficulty(relayBz, targetHash []byte, serviceId string) error { + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) + relayHash := relayHashArr[:] if len(targetHash) != protocol.RelayHasherSize { return types.ErrProofInvalidRelay.Wrapf( @@ -458,19 +464,18 @@ func validateRelayDifficulty(relayBz []byte, targetHash []byte) error { ) } - var targetHashArr [protocol.RelayHasherSize]byte - copy(targetHashArr[:], targetHash) + if !protocol.IsRelayVolumeApplicable(relayHash, targetHash) { + var targetHashArr [protocol.RelayHasherSize]byte + copy(targetHashArr[:], targetHash) - // TODO_MAINNET: Devise a test that tries to attack the network and ensure that there - // is sufficient telemetry. - // NB: If relayHash > targetHash, then the difficulty is less than the target difficulty. - if bytes.Compare(relayHash[:], targetHash[:]) == 1 { - relayDifficulty := protocol.GetDifficultyFromHash(relayHash) + relayDifficulty := protocol.GetDifficultyFromHash(relayHashArr) targetDifficulty := protocol.GetDifficultyFromHash(targetHashArr) + return types.ErrProofInvalidRelay.Wrapf( - "relay difficulty %d is less than the target difficulty %d", + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", relayDifficulty, targetDifficulty, + serviceId, ) } diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index ec65b92b2..99d890294 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -48,7 +48,7 @@ var ( // - the relay difficulty target hash to the easiest difficulty so that these tests don't need to mine for valid relays. // - the proof request probability to 1 so that all test sessions require a proof. testProofParams = prooftypes.Params{ - RelayDifficultyTargetHash: protocol.Difficulty1HashBz, + RelayDifficultyTargetHash: protocol.BaseRelayDifficultyHashBz, ProofRequestProbability: 1, } ) @@ -1023,7 +1023,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { desc: "relay difficulty must be greater than or equal to minimum (zero difficulty)", newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { // Set the minimum relay difficulty to a non-zero value such that the relays - // constructed by the test helpers have a negligable chance of being valid. + // constructed by the test helpers have a negligible chance of being valid. err = keepers.Keeper.SetParams(ctx, prooftypes.Params{ RelayDifficultyTargetHash: lowTargetHash, }) @@ -1047,9 +1047,10 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { expectedErr: status.Error( codes.FailedPrecondition, prooftypes.ErrProofInvalidRelay.Wrapf( - "relay difficulty %d is less than the target difficulty %d", + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", validClosestRelayDifficultyBits, highExpectedTargetDifficulty, + validSessionHeader.Service.Id, ).Error(), ), }, diff --git a/x/proof/types/params.go b/x/proof/types/params.go index 22c0c4cfa..218831ba2 100644 --- a/x/proof/types/params.go +++ b/x/proof/types/params.go @@ -16,19 +16,23 @@ var ( _ client.ProofParams = (*Params)(nil) _ paramtypes.ParamSet = (*Params)(nil) - KeyMinRelayDifficultyBits = []byte("MinRelayDifficultyBits") - ParamRelayDifficultyTargetHash = "relay_difficulty_target_hash" - DefaultRelayDifficultyTargetHashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" // all relays are payable - DefaultRelayDifficultyTargetHash, _ = hex.DecodeString(DefaultRelayDifficultyTargetHashHex) // TODO_MAINNET(#142, #401): Determine the default value. - KeyProofRequestProbability = []byte("ProofRequestProbability") - ParamProofRequestProbability = "proof_request_probability" - DefaultProofRequestProbability float32 = 0.25 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md - KeyProofRequirementThreshold = []byte("ProofRequirementThreshold") - ParamProofRequirementThreshold = "proof_requirement_threshold" - DefaultProofRequirementThreshold uint64 = 20 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md - KeyProofMissingPenalty = []byte("ProofMissingPenalty") - ParamProofMissingPenalty = "proof_missing_penalty" - DefaultProofMissingPenalty = cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(320)) // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + // TODO_FOLLOWUP(@olshansk, #690): Delete this parameter. + KeyRelayDifficultyTargetHash = []byte("RelayDifficultyTargetHash") + ParamRelayDifficultyTargetHash = "relay_difficulty_target_hash" + DefaultRelayDifficultyTargetHashHex = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" // all relays are payable + DefaultRelayDifficultyTargetHash, _ = hex.DecodeString(DefaultRelayDifficultyTargetHashHex) + + KeyProofRequestProbability = []byte("ProofRequestProbability") + ParamProofRequestProbability = "proof_request_probability" + DefaultProofRequestProbability float32 = 0.25 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + + KeyProofRequirementThreshold = []byte("ProofRequirementThreshold") + ParamProofRequirementThreshold = "proof_requirement_threshold" + DefaultProofRequirementThreshold uint64 = 20 // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md + + KeyProofMissingPenalty = []byte("ProofMissingPenalty") + ParamProofMissingPenalty = "proof_missing_penalty" + DefaultProofMissingPenalty = cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(320)) // See: https://github.com/pokt-network/pocket-core/blob/staging/docs/proposals/probabilistic_proofs.md ) // ParamKeyTable the param key table for launch module @@ -65,7 +69,7 @@ func DefaultParams() Params { func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { return paramtypes.ParamSetPairs{ paramtypes.NewParamSetPair( - KeyMinRelayDifficultyBits, + KeyRelayDifficultyTargetHash, &p.RelayDifficultyTargetHash, ValidateRelayDifficultyTargetHash, ), diff --git a/x/proof/types/params.pb.go b/x/proof/types/params.pb.go index 9718a5496..a82615004 100644 --- a/x/proof/types/params.pb.go +++ b/x/proof/types/params.pb.go @@ -29,6 +29,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Params defines the parameters for the module. type Params struct { + // TODO_FOLLOWUP(@olshansk, #690): Either delete this or change it to be named "minimum" // relay_difficulty_target_hash is the maximum value a relay hash must be less than to be volume/reward applicable. RelayDifficultyTargetHash []byte `protobuf:"bytes,1,opt,name=relay_difficulty_target_hash,json=relayDifficultyTargetHash,proto3" json:"relay_difficulty_target_hash"` // proof_request_probability is the probability of a session requiring a proof diff --git a/x/service/types/relay.go b/x/service/types/relay.go index f1a4ac271..b43ded9f7 100644 --- a/x/service/types/relay.go +++ b/x/service/types/relay.go @@ -11,30 +11,30 @@ import ( // GetHash returns the hash of the relay, which contains both the signed // relay request and the relay response. It is used as the key for insertion // into the SMT. -func (relay *Relay) GetHash() ([32]byte, error) { +func (relay *Relay) GetHash() ([protocol.RelayHasherSize]byte, error) { relayBz, err := relay.Marshal() if err != nil { - return [32]byte{}, err + return [protocol.RelayHasherSize]byte{}, err } - return protocol.GetHashFromBytes(relayBz), nil + return protocol.GetRelayHashFromBytes(relayBz), nil } // GetSignableBytesHash returns the hash of the signable bytes of the relay request // Hashing the marshaled request message guarantees that the signable bytes are // always of a constant and expected length. -func (req RelayRequest) GetSignableBytesHash() ([32]byte, error) { +func (req RelayRequest) GetSignableBytesHash() ([protocol.RelayHasherSize]byte, error) { // req and req.Meta are not pointers, so we can set the signature to nil // in order to generate the signable bytes hash without the need restore it. req.Meta.Signature = nil requestBz, err := req.Marshal() if err != nil { - return [32]byte{}, err + return [protocol.RelayHasherSize]byte{}, err } // return the marshaled request hash to guarantee that the signable bytes // are always of a constant and expected length - return protocol.GetHashFromBytes(requestBz), nil + return protocol.GetRelayHashFromBytes(requestBz), nil } // ValidateBasic performs basic validation of the RelayResponse Meta, SessionHeader @@ -65,18 +65,18 @@ func (req *RelayRequest) ValidateBasic() error { // GetSignableBytesHash returns the hash of the signable bytes of the relay response // Hashing the marshaled response message guarantees that the signable bytes are // always of a constant and expected length. -func (res RelayResponse) GetSignableBytesHash() ([32]byte, error) { +func (res RelayResponse) GetSignableBytesHash() ([protocol.RelayHasherSize]byte, error) { // res and res.Meta are not pointers, so we can set the signature to nil // in order to generate the signable bytes hash without the need restore it. res.Meta.SupplierSignature = nil responseBz, err := res.Marshal() if err != nil { - return [32]byte{}, err + return [protocol.RelayHasherSize]byte{}, err } // return the marshaled response hash to guarantee that the signable bytes // are always of a constant and expected length - return protocol.GetHashFromBytes(responseBz), nil + return protocol.GetRelayHashFromBytes(responseBz), nil } // ValidateBasic performs basic validation of the RelayResponse Meta, SessionHeader diff --git a/x/tokenomics/keeper/msg_server_update_param_test.go b/x/tokenomics/keeper/msg_server_update_param_test.go index f2237bfc1..5f3375e16 100644 --- a/x/tokenomics/keeper/msg_server_update_param_test.go +++ b/x/tokenomics/keeper/msg_server_update_param_test.go @@ -11,6 +11,7 @@ import ( tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) +// TODO_FOLLOWUP(@olshansk, #690): Rename this test. func TestMsgUpdateParam_UpdateMinRelayDifficultyBitsOnly(t *testing.T) { var expectedComputeUnitsToTokensMultiplier int64 = 8 diff --git a/x/tokenomics/keeper/scale_difficulty_test.go b/x/tokenomics/keeper/scale_difficulty_test.go index 00f3ee164..7b2cb4cf6 100644 --- a/x/tokenomics/keeper/scale_difficulty_test.go +++ b/x/tokenomics/keeper/scale_difficulty_test.go @@ -13,6 +13,7 @@ import ( // TestScaleDifficultyTargetHash tests the scaling of a target hash by a given ratio. // Some expectations are manually adjusted to account for some precision loss in the // implementation. +// TODO_FOLLOWUP(@olshansk, #690): Ensure that the ratio corresponds to the probability of of a relay being accepted. If not, explain why. func TestScaleDifficultyTargetHash(t *testing.T) { tests := []struct { desc string @@ -75,7 +76,7 @@ func TestScaleDifficultyTargetHash(t *testing.T) { expectedHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { - desc: "Maxes out at Difficulty1", + desc: "Maxes out at BaseRelayDifficulty", targetHashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", ratio: 10, expectedHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -87,12 +88,15 @@ func TestScaleDifficultyTargetHash(t *testing.T) { targetHashBz, targetErr := hex.DecodeString(test.targetHashHex) require.NoError(t, targetErr) - expectedBytes, expectedErr := hex.DecodeString(test.expectedHashHex) + expectedHashBz, expectedErr := hex.DecodeString(test.expectedHashHex) require.NoError(t, expectedErr) - scaledHash := scaleDifficultyTargetHash(targetHashBz, new(big.Float).SetFloat64(test.ratio)) - assert.Equal(t, len(scaledHash), len(targetHashBz)) - require.Equalf(t, 0, bytes.Compare(scaledHash, expectedBytes), "expected hash %x, got %x", expectedBytes, scaledHash) + scaledDifficultyHash := scaleDifficultyTargetHash(targetHashBz, new(big.Float).SetFloat64(test.ratio)) + assert.Equal(t, len(scaledDifficultyHash), len(targetHashBz)) + + // Ensure the scaled difficulty hash equals the one provided + require.Zero(t, bytes.Compare(expectedHashBz, scaledDifficultyHash), + "expected difficulty hash %x, but got %x", expectedHashBz, scaledDifficultyHash) }) } } diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 0ad62634c..7fe1a3c53 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -104,6 +104,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( expiredResult.NumComputeUnits += numClaimComputeUnits continue } + // TODO_FOLLOWUP(@olshansk, #690): Document the potential changes needed here. // NB: If a proof is found, it is valid because verification is done // at the time of submission. } diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index deb6e20f9..b222b4f37 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -127,7 +127,7 @@ func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newR return prooftypes.DefaultRelayDifficultyTargetHash } - // Calculate the proportion of target relays to the new EMA + // Calculate the proportion of target relays relative to the EMA of actual volume applicable relays // TODO_MAINNET: Use a language agnostic float implementation or arithmetic library // to ensure deterministic results across different language implementations of the // protocol. @@ -158,9 +158,9 @@ func scaleDifficultyTargetHash(targetHash []byte, ratio *big.Float) []byte { scaledTargetInt, _ := scaledTargetFloat.Int(nil) scaledTargetHash := scaledTargetInt.Bytes() - // Ensure the scaled target hash maxes out at Difficulty1. + // Ensure the scaled target hash maxes out at BaseRelayDifficulty if len(scaledTargetHash) > len(targetHash) { - return protocol.Difficulty1HashBz + return protocol.BaseRelayDifficultyHashBz } // Ensure the scaled target hash has the same length as the default target hash. diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go index d752ae5c8..fef8272ec 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty_test.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty_test.go @@ -143,15 +143,17 @@ func TestUpdateRelayMiningDifficulty_FirstDifficulty(t *testing.T) { _, err := keeper.UpdateRelayMiningDifficulty(ctx, relaysPerServiceMap) require.NoError(t, err) - difficulty, found := keeper.GetRelayMiningDifficulty(ctx, "svc1") + relayDifficulty, found := keeper.GetRelayMiningDifficulty(ctx, "svc1") require.True(t, found) - require.Equal(t, tt.numRelays, difficulty.NumRelaysEma) - require.Equal(t, tt.expectedRelayMiningDifficulty.NumRelaysEma, difficulty.NumRelaysEma) + require.Equal(t, tt.numRelays, relayDifficulty.NumRelaysEma) + require.Equal(t, tt.expectedRelayMiningDifficulty.NumRelaysEma, relayDifficulty.NumRelaysEma) - require.Lessf(t, bytes.Compare(difficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash), 1, + // NB: An increase in difficulty is indicated by a decrease in the target hash + didDifficultyIncrease := bytes.Compare(relayDifficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash) < 1 + require.True(t, didDifficultyIncrease, "expected difficulty.TargetHash (%x) to be less than or equal to expectedRelayMiningDifficulty.TargetHash (%x)", - difficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash, + relayDifficulty.TargetHash, tt.expectedRelayMiningDifficulty.TargetHash, ) }) } @@ -159,28 +161,28 @@ func TestUpdateRelayMiningDifficulty_FirstDifficulty(t *testing.T) { func TestComputeNewDifficultyHash(t *testing.T) { tests := []struct { - desc string - numRelaysTarget uint64 - relaysEma uint64 - expectedDifficultyHash []byte + desc string + numRelaysTarget uint64 + relaysEma uint64 + expectedRelayDifficultyHash []byte }{ { - desc: "Relays Target > Relays EMA", - numRelaysTarget: 100, - relaysEma: 50, - expectedDifficultyHash: defaultDifficulty(), + desc: "Relays Target > Relays EMA", + numRelaysTarget: 100, + relaysEma: 50, + expectedRelayDifficultyHash: defaultDifficulty(), }, { - desc: "Relays Target == Relays EMA", - numRelaysTarget: 100, - relaysEma: 100, - expectedDifficultyHash: defaultDifficulty(), + desc: "Relays Target == Relays EMA", + numRelaysTarget: 100, + relaysEma: 100, + expectedRelayDifficultyHash: defaultDifficulty(), }, { desc: "Relays Target < Relays EMA", numRelaysTarget: 50, relaysEma: 100, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b01111111}, makeBytesFullOfOnes(31)..., ), @@ -189,7 +191,7 @@ func TestComputeNewDifficultyHash(t *testing.T) { desc: "Relays Target << Relays EMA", numRelaysTarget: 50, relaysEma: 200, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b00111111}, makeBytesFullOfOnes(31)..., ), @@ -198,7 +200,7 @@ func TestComputeNewDifficultyHash(t *testing.T) { desc: "Relays Target << Relays EMA", numRelaysTarget: 50, relaysEma: 1000, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b00001111}, makeBytesFullOfOnes(31)..., ), @@ -207,7 +209,7 @@ func TestComputeNewDifficultyHash(t *testing.T) { desc: "Relays Target << Relays EMA", numRelaysTarget: 50, relaysEma: 10000, - expectedDifficultyHash: append( + expectedRelayDifficultyHash: append( []byte{0b00000001}, makeBytesFullOfOnes(31)..., ), @@ -216,22 +218,24 @@ func TestComputeNewDifficultyHash(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - result := keeper.ComputeNewDifficultyTargetHash(prooftypes.DefaultRelayDifficultyTargetHash, tt.numRelaysTarget, tt.relaysEma) + newRelayDifficultyTargetHash := keeper.ComputeNewDifficultyTargetHash(prooftypes.DefaultRelayDifficultyTargetHash, tt.numRelaysTarget, tt.relaysEma) - require.Lessf(t, bytes.Compare(result, tt.expectedDifficultyHash), 1, + // NB: An increase in difficulty is indicated by a decrease in the target hash + didDifficultyIncrease := bytes.Compare(newRelayDifficultyTargetHash, tt.expectedRelayDifficultyHash) < 1 + require.True(t, didDifficultyIncrease, "expected difficulty.TargetHash (%x) to be less than or equal to expectedRelayMiningDifficulty.TargetHash (%x)", - result, tt.expectedDifficultyHash, + newRelayDifficultyTargetHash, tt.expectedRelayDifficultyHash, ) }) } } func makeBytesFullOfOnes(length int) []byte { - result := make([]byte, length) - for i := range result { - result[i] = 0b11111111 + output := make([]byte, length) + for i := range output { + output[i] = 0b11111111 } - return result + return output } func defaultDifficulty() []byte { diff --git a/x/tokenomics/module/abci.go b/x/tokenomics/module/abci.go index 7bf320928..1f40f2895 100644 --- a/x/tokenomics/module/abci.go +++ b/x/tokenomics/module/abci.go @@ -14,6 +14,7 @@ import ( // EndBlocker called at every block and settles all pending claims. func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { logger := k.Logger().With("method", "EndBlocker") + // NB: There are two main reasons why we settle expiring claims in the end // instead of when a proof is submitted: // 1. Logic - Probabilistic proof allows claims to be settled (i.e. rewarded) @@ -26,7 +27,13 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { return err } - // Defer telemetry calls so that they reference the final values the relevant variables. + logger.Info(fmt.Sprintf( + "settled %d claims and expired %d claims", + settledResult.NumClaims, + expiredResult.NumClaims, + )) + + // Telemetry - defer telemetry calls so that they reference the final values the relevant variables. defer func() { telemetry.ClaimCounter( prooftypes.ClaimProofStage_SETTLED, @@ -62,12 +69,6 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { // TODO_IMPROVE(#observability): Add a counter for expired compute units. }() - logger.Info(fmt.Sprintf( - "settled %d claims and expired %d claims", - settledResult.NumClaims, - expiredResult.NumClaims, - )) - // Update the relay mining difficulty for every service that settled pending // claims based on how many estimated relays were serviced for it. difficultyPerServiceMap, err := k.UpdateRelayMiningDifficulty(ctx, settledResult.RelaysPerServiceMap) @@ -80,14 +81,16 @@ func EndBlocker(ctx sdk.Context, k keeper.Keeper) (err error) { len(settledResult.RelaysPerServiceMap), )) - // Emit telemetry for each service's relay mining difficulty. - for serviceId, newDifficulty := range difficultyPerServiceMap { - var newTargetHash [protocol.RelayHasherSize]byte - copy(newTargetHash[:], newDifficulty.TargetHash) + // Telemetry - emit telemetry for each service's relay mining difficulty. + for serviceId, newRelayMiningDifficulty := range difficultyPerServiceMap { + var newRelayMiningTargetHash [protocol.RelayHasherSize]byte + copy(newRelayMiningTargetHash[:], newRelayMiningDifficulty.TargetHash) - difficulty := protocol.GetDifficultyFromHash(newTargetHash) + // NB: The difficulty integer is just a human readable interpretation of + // the target hash and is not actually used for business logic. + difficulty := protocol.GetDifficultyFromHash(newRelayMiningTargetHash) telemetry.RelayMiningDifficultyGauge(difficulty, serviceId) - telemetry.RelayEMAGauge(newDifficulty.NumRelaysEma, serviceId) + telemetry.RelayEMAGauge(newRelayMiningDifficulty.NumRelaysEma, serviceId) } return nil From 0147e9953bd54121e03f6160f72212cb98e80198 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 18:42:49 -0700 Subject: [PATCH 24/29] Finished self review --- .../keeper/msg_server_submit_proof_test.go | 3 +- .../keeper_settle_pending_claims_test.go | 36 ++++--------------- 2 files changed, 7 insertions(+), 32 deletions(-) diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 991fa72ca..a9a8064f7 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -48,7 +48,6 @@ var ( func init() { // The CometBFT header hash is 32 bytes: https://docs.cometbft.com/main/spec/core/data_structures blockHeaderHash = make([]byte, 32) - // expectedMerkleProofPath = protocol.GetPathForProof(blockHeaderHash, "TODO_BLOCKER_session_id_currently_unused") } func TestMsgServer_SubmitProof_Success(t *testing.T) { @@ -290,7 +289,7 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) { // Submit the corresponding proof. numRelays := uint(5) - sessionTree := NewFilledSessionTree( + sessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index dfac9acf6..af32465c5 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -2,7 +2,6 @@ package keeper_test import ( "context" - "fmt" "testing" "cosmossdk.io/depinject" @@ -58,10 +57,8 @@ func (s *TestSuite) SetupTest() { t := s.T() s.keepers, s.ctx = keepertest.NewTokenomicsModuleKeepers(s.T(), nil) - sdkCtx := cosmostypes.UnwrapSDKContext(s.ctx) - sdkCtx = sdkCtx.WithBlockHeight(1) - s.sdkCtx = sdkCtx - s.ctx = sdkCtx + s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(1) + s.ctx = s.sdkCtx // Construct a keyring to hold the keypairs for the accounts used in the test. keyRing := keyring.NewInMemory(s.keepers.Codec) @@ -86,24 +83,6 @@ func (s *TestSuite) SetupTest() { preGeneratedAccts, ).String() - /* - // Prepare supplier account - supplierAddr, supplierPubKey := sample.AccAddressAndPubKey() - supplierAccAddr, err := sdk.AccAddressFromBech32(supplierAddr) - require.NoError(t, err) - supplierAcc := s.keepers.NewAccountWithAddress(s.ctx, supplierAccAddr) - supplierAcc.SetPubKey(supplierPubKey) - s.keepers.SetAccount(s.ctx, supplierAcc) - - // Prepare application account - appAddr, appPubKey := sample.AccAddressAndPubKey() - appAccAddr, err := sdk.AccAddressFromBech32(appAddr) - require.NoError(t, err) - appAcc := s.keepers.NewAccountWithAddress(s.ctx, appAccAddr) - appAcc.SetPubKey(appPubKey) - s.keepers.SetAccount(s.ctx, appAcc) - */ - service := &sharedtypes.Service{Id: testServiceId} supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) @@ -143,7 +122,7 @@ func (s *TestSuite) SetupTest() { )) require.NoError(t, err) - // Construct a valid session tree with 5 relays. + // Construct a valid session tree with 10 relays. numRelays := uint(10) sessionTree := testtree.NewFilledSessionTree( s.ctx, t, @@ -166,10 +145,9 @@ func (s *TestSuite) SetupTest() { blockHeaderHash, supplierAddr, ) - sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx) - sdkCtx = sdkCtx.WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash) - s.ctx = sdkCtx - s.sdkCtx = sdkCtx + s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash + s.ctx = s.sdkCtx + merkleRootBz, err := sessionTree.Flush() require.NoError(t, err) @@ -278,7 +256,6 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequiredAndNotProv events, "poktroll.tokenomics.EventClaimExpired") require.Len(t, expectedEvents, 1) - fmt.Println("expectedEvents", expectedEvents) // Validate the event expectedEvent := expectedEvents[0] require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_MISSING, expectedEvent.GetExpirationReason()) @@ -330,7 +307,6 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOn events, "poktroll.tokenomics.EventClaimExpired") require.Len(t, expectedEvents, 1) - fmt.Println("expectedEvents", expectedEvents) // Validate the event expectedEvent := expectedEvents[0] require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_INVALID, expectedEvent.GetExpirationReason()) From f7b4e5cfe42b70d41da92b5ccefb0691607f4788 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Tue, 23 Jul 2024 19:04:03 -0700 Subject: [PATCH 25/29] Missing ) --- x/tokenomics/keeper/keeper_settle_pending_claims_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index af32465c5..da3f3d799 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -145,10 +145,9 @@ func (s *TestSuite) SetupTest() { blockHeaderHash, supplierAddr, ) - s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash + s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash) s.ctx = s.sdkCtx - merkleRootBz, err := sessionTree.Flush() require.NoError(t, err) From 2d60b8d64611369d2745cd68fd3cbafa0e6ab722 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Wed, 24 Jul 2024 13:50:29 -0700 Subject: [PATCH 26/29] Replied to some PR comments --- x/proof/keeper/proof_validation.go | 42 ++++++++++---------- x/proof/keeper/proof_validation_test.go | 7 ++-- x/tokenomics/keeper/settle_pending_claims.go | 8 ++-- x/tokenomics/types/expected_keepers.go | 2 +- 4 files changed, 28 insertions(+), 31 deletions(-) diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 962495138..8faed76e5 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -40,7 +40,7 @@ import ( sessiontypes "github.com/pokt-network/poktroll/x/session/types" ) -// IsProofValid validates the proof submitted by the supplier is correct with +// EnsureValidProof validates the proof submitted by the supplier is correct with // respect to an on-chain claim. // // This function should be called during session settlement (i.e. EndBlocker) @@ -49,24 +49,24 @@ import ( // 2. Validators are the ones responsible for the heavy processing & validation during state transitions // 3. This creates an opportunity to slash suppliers who submit false proofs, whereas // they can keep retrying if it takes place in the SubmitProof handler. -func (k Keeper) IsProofValid( +func (k Keeper) EnsureValidProof( ctx context.Context, proof *types.Proof, -) (valid bool, err error) { +) error { logger := k.Logger().With("method", "ValidateProof") // Retrieve the supplier's public key. supplierAddr := proof.SupplierAddress supplierPubKey, err := k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) if err != nil { - return false, err + return err } // Validate the session header. var onChainSession *sessiontypes.Session onChainSession, err = k.queryAndValidateSessionHeader(ctx, proof.SessionHeader, supplierAddr) if err != nil { - return false, err + return err } logger.Info("queried and validated the session header") @@ -78,17 +78,17 @@ func (k Keeper) IsProofValid( // Validate proof message commit height is within the respective session's // proof submission window using the on-chain session header. if err = k.validateProofWindow(ctx, sessionHeader, supplierAddr); err != nil { - return false, err + return err } if proof.ClosestMerkleProof == nil || len(proof.ClosestMerkleProof) == 0 { - return false, types.ErrProofInvalidProof.Wrap("proof cannot be empty") + return types.ErrProofInvalidProof.Wrap("proof cannot be empty") } // Unmarshal the closest merkle proof from the message. sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} if err = sparseMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof); err != nil { - return false, types.ErrProofInvalidProof.Wrapf( + return types.ErrProofInvalidProof.Wrapf( "failed to unmarshal closest merkle proof: %s", err, ) @@ -100,7 +100,7 @@ func (k Keeper) IsProofValid( relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) relay := &servicetypes.Relay{} if err = k.cdc.Unmarshal(relayBz, relay); err != nil { - return false, types.ErrProofInvalidRelay.Wrapf( + return types.ErrProofInvalidRelay.Wrapf( "failed to unmarshal relay: %s", err, ) @@ -109,44 +109,44 @@ func (k Keeper) IsProofValid( // Basic validation of the relay request. relayReq := relay.GetReq() if err = relayReq.ValidateBasic(); err != nil { - return false, err + return err } logger.Debug("successfully validated relay request") // Make sure that the supplier address in the proof matches the one in the relay request. if supplierAddr != relayReq.Meta.SupplierAddress { - return false, types.ErrProofSupplierMismatch.Wrapf("supplier type mismatch") + return types.ErrProofSupplierMismatch.Wrapf("supplier type mismatch") } logger.Debug("the proof supplier address matches the relay request supplier address") // Basic validation of the relay response. relayRes := relay.GetRes() if err = relayRes.ValidateBasic(); err != nil { - return false, err + return err } logger.Debug("successfully validated relay response") // Verify that the relay request session header matches the proof session header. if err = compareSessionHeaders(sessionHeader, relayReq.Meta.GetSessionHeader()); err != nil { - return false, err + return err } logger.Debug("successfully compared relay request session header") // Verify that the relay response session header matches the proof session header. if err = compareSessionHeaders(sessionHeader, relayRes.Meta.GetSessionHeader()); err != nil { - return false, err + return err } logger.Debug("successfully compared relay response session header") // Verify the relay request's signature. if err = k.ringClient.VerifyRelayRequestSignature(ctx, relayReq); err != nil { - return false, err + return err } logger.Debug("successfully verified relay request signature") // Verify the relay response's signature. if err = relayRes.VerifySupplierSignature(supplierPubKey); err != nil { - return false, err + return err } logger.Debug("successfully verified relay response signature") @@ -164,7 +164,7 @@ func (k Keeper) IsProofValid( relayDifficultyTargetHash, sessionHeader.Service.Id, ); err != nil { - return false, err + return err } logger.Debug("successfully validated relay mining difficulty") @@ -176,7 +176,7 @@ func (k Keeper) IsProofValid( sessionHeader, supplierAddr, ); err != nil { - return false, err + return err } logger.Debug("successfully validated proof path") @@ -184,18 +184,18 @@ func (k Keeper) IsProofValid( // used in the proof validation below. claim, err := k.queryAndValidateClaimForProof(ctx, sessionHeader, supplierAddr) if err != nil { - return false, err + return err } logger.Debug("successfully retrieved and validated claim") // Verify the proof's closest merkle proof. if err = verifyClosestProof(sparseMerkleClosestProof, claim.GetRootHash()); err != nil { - return false, err + return err } logger.Debug("successfully verified closest merkle proof") - return true, nil + return nil } // validateClosestPath ensures that the proof's path matches the expected path. diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 70a0cc87c..308a5ff23 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -26,7 +26,7 @@ import ( sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) -func TestIsProofValid_Error(t *testing.T) { +func TestEnsureValidProof_Error(t *testing.T) { opts := []keepertest.ProofKeepersOpt{ // Set block hash such that on-chain closest merkle proof validation // uses the expected path. @@ -267,7 +267,7 @@ func TestIsProofValid_Error(t *testing.T) { ), }, { - desc: "merkle proof must be deserializabled", + desc: "merkle proof must be deserializable", newProof: func(t *testing.T) *prooftypes.Proof { // Construct new proof message. proof := testtree.NewProof(t, @@ -763,9 +763,8 @@ func TestIsProofValid_Error(t *testing.T) { // Advance the block height to the earliest proof commit height. ctx = keepertest.SetBlockHeight(ctx, earliestSupplierProofCommitHeight) - isProofValid, err := keepers.IsProofValid(ctx, proof) + err := keepers.EnsureValidProof(ctx, proof) require.ErrorContains(t, err, test.expectedErr.Error()) - require.False(t, isProofValid) }) } } diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 037434797..044d3b6bf 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -82,16 +82,14 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( proofIsRequired := (proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED) if proofIsRequired { - var expirationReason types.ClaimExpirationReason = types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED // EXPIRATION_REASON_UNSPECIFIED is the default + expirationReason := types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED // EXPIRATION_REASON_UNSPECIFIED is the default + if isProofFound { - var isProofValid bool - isProofValid, err = k.proofKeeper.IsProofValid(ctx, &proof) - if !isProofValid || err != nil { + if err = k.proofKeeper.EnsureValidProof(ctx, &proof); err == nil { logger.Warn(fmt.Sprintf("Proof was found but is invalid due to %v", err)) expirationReason = types.ClaimExpirationReason_PROOF_INVALID } } else { - // Should claim expire because proof is required but unavailable? expirationReason = types.ClaimExpirationReason_PROOF_MISSING } diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go index eb06daa7a..26babd982 100644 --- a/x/tokenomics/types/expected_keepers.go +++ b/x/tokenomics/types/expected_keepers.go @@ -51,7 +51,7 @@ type ProofKeeper interface { RemoveProof(ctx context.Context, sessionId, supplierAddr string) AllClaims(ctx context.Context, req *prooftypes.QueryAllClaimsRequest) (*prooftypes.QueryAllClaimsResponse, error) - IsProofValid(ctx context.Context, proof *prooftypes.Proof) (valid bool, err error) + EnsureValidProof(ctx context.Context, proof *prooftypes.Proof) error // Only used for testing & simulation GetAllProofs(ctx context.Context) []prooftypes.Proof From e2aa6fdd55aea61ad453971f4048172134d062d9 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Wed, 24 Jul 2024 14:05:19 -0700 Subject: [PATCH 27/29] Reply to all review comments --- x/proof/keeper/msg_server_create_claim.go | 4 +--- x/proof/keeper/msg_server_submit_proof.go | 16 +++++++++++----- x/proof/keeper/proof_validation.go | 5 +++++ x/proof/keeper/proof_validation_test.go | 9 +-------- x/tokenomics/keeper/settle_pending_claims.go | 1 + 5 files changed, 19 insertions(+), 16 deletions(-) diff --git a/x/proof/keeper/msg_server_create_claim.go b/x/proof/keeper/msg_server_create_claim.go index afe0b41db..1e9dc8b52 100644 --- a/x/proof/keeper/msg_server_create_claim.go +++ b/x/proof/keeper/msg_server_create_claim.go @@ -10,7 +10,6 @@ import ( "github.com/pokt-network/poktroll/telemetry" "github.com/pokt-network/poktroll/x/proof/types" - sessiontypes "github.com/pokt-network/poktroll/x/session/types" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) @@ -47,8 +46,7 @@ func (k msgServer) CreateClaim( logger.Info("validated the createClaim message") // Compare msg session header w/ on-chain session header. - var session *sessiontypes.Session - session, err = k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) + session, err := k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index f63974b08..2d95b1dff 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -14,15 +14,22 @@ import ( "github.com/pokt-network/poktroll/telemetry" "github.com/pokt-network/poktroll/x/proof/types" - sessiontypes "github.com/pokt-network/poktroll/x/session/types" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) // SubmitProof is the server handler to submit and store a proof on-chain. // A proof that's stored on-chain is what leads to rewards (i.e. inflation) -// downstream, making the series of checks a critical part of the protocol. +// downstream, making this a critical part of the protocol. // -// Note: The entity sending the SubmitProof messages does not necessarily need +// Note that the validation of the proof is done in `EnsureValidProof`. However, +// preliminary checks are done in the handler to prevent sybil or DoS attacks on +// full nodes because storing and validating proofs is expensive. +// +// We are playing a balance of security and efficiency here, where enough validation +// is done on proof submission, and exhaustive validation is done during session +// settlement. +// +// The entity sending the SubmitProof messages does not necessarily need // to correspond to the supplier signing the proof. For example, a single entity // could (theoretically) batch multiple proofs (signed by the corresponding supplier) // into one transaction to save on transaction fees. @@ -59,8 +66,7 @@ func (k msgServer) SubmitProof( logger.Info("validated the submitProof message") // Compare msg session header w/ on-chain session header. - var session *sessiontypes.Session - session, err = k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) + session, err := k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 8faed76e5..55d37fcac 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -49,6 +49,11 @@ import ( // 2. Validators are the ones responsible for the heavy processing & validation during state transitions // 3. This creates an opportunity to slash suppliers who submit false proofs, whereas // they can keep retrying if it takes place in the SubmitProof handler. +// +// Note that some of the validation here is redundant with the validation done in +// SubmitProof (in the handler). The reason for this is because were are trying +// to find a balance between preventing sybil or DoS attacks on full nodes +// during proof submission, but being completely exhaustive in all the checks done here. func (k Keeper) EnsureValidProof( ctx context.Context, proof *types.Proof, diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 308a5ff23..1a4707ec5 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -586,7 +586,7 @@ func TestEnsureValidProof_Error(t *testing.T) { ), }, { - desc: "relay difficulty must be greater than or equal to minimum (zero difficulty)", + desc: "relay difficulty must be greater than or equal to a high difficulty (low target hash)", newProof: func(t *testing.T) *prooftypes.Proof { // Set the minimum relay difficulty to a non-zero value such that the relays // constructed by the test helpers have a negligible chance of being valid. @@ -617,13 +617,6 @@ func TestEnsureValidProof_Error(t *testing.T) { validSessionHeader.Service.Id, ), }, - { - desc: "relay difficulty must be greater than or equal to minimum (non-zero difficulty)", - newProof: func(t *testing.T) *prooftypes.Proof { - t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") - return nil - }, - }, { desc: "claim must exist for proof message", newProof: func(t *testing.T) *prooftypes.Proof { diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 044d3b6bf..65522a6b9 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -102,6 +102,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( NumComputeUnits: numClaimComputeUnits, NumRelays: numRelaysInSessionTree, ExpirationReason: expirationReason, + // TODO_CONSIDERATION: Add the error to the event if the proof was invalid. } if err = ctx.EventManager().EmitTypedEvent(&claimExpiredEvent); err != nil { return settledResult, expiredResult, err From a4d28e36518bbcad0b651c15bedf1ac785eec144 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Wed, 24 Jul 2024 14:12:13 -0700 Subject: [PATCH 28/29] Fix unit test --- .../keeper/keeper_settle_pending_claims_test.go | 15 ++++----------- x/tokenomics/keeper/settle_pending_claims.go | 2 +- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 8dbe0bca6..95d12614e 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -379,21 +379,14 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi }) require.NoError(t, err) - // Create a claim that requires a proof - claim := s.claim - - // 0. Add the claim & verify it exists - s.keepers.UpsertClaim(ctx, claim) - claims := s.keepers.GetAllClaims(ctx) - s.Require().Len(claims, 1) - - // Upsert the proof + // Upsert the claim & proof + s.keepers.UpsertClaim(ctx, s.claim) s.keepers.UpsertProof(ctx, s.proof) // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proof window has definitely closed at this point - sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) sdkCtx = sdkCtx.WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) @@ -405,7 +398,7 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi require.Equal(t, uint64(0), expiredResult.NumClaims) // Validate that no claims remain. - claims = s.keepers.GetAllClaims(ctx) + claims := s.keepers.GetAllClaims(ctx) require.Len(t, claims, 0) // Confirm an settlement event was emitted diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 65522a6b9..e04dc65b5 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -85,7 +85,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( expirationReason := types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED // EXPIRATION_REASON_UNSPECIFIED is the default if isProofFound { - if err = k.proofKeeper.EnsureValidProof(ctx, &proof); err == nil { + if err = k.proofKeeper.EnsureValidProof(ctx, &proof); err != nil { logger.Warn(fmt.Sprintf("Proof was found but is invalid due to %v", err)) expirationReason = types.ClaimExpirationReason_PROOF_INVALID } From 9e8e8e8e26260a1268023a9401006d9fc6f9a5f0 Mon Sep 17 00:00:00 2001 From: Daniel Olshansky Date: Wed, 24 Jul 2024 20:50:56 -0700 Subject: [PATCH 29/29] Omit nil check --- x/proof/keeper/proof_validation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 55d37fcac..bd4afac74 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -159,7 +159,7 @@ func (k Keeper) EnsureValidProof( // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service params := k.GetParams(ctx) relayDifficultyTargetHash := params.RelayDifficultyTargetHash - if relayDifficultyTargetHash == nil || len(relayDifficultyTargetHash) == 0 { + if len(relayDifficultyTargetHash) == 0 { relayDifficultyTargetHash = types.DefaultRelayDifficultyTargetHash }