diff --git a/api/poktroll/tokenomics/event.pulsar.go b/api/poktroll/tokenomics/event.pulsar.go index 8ea69eae8..300c5cb18 100644 --- a/api/poktroll/tokenomics/event.pulsar.go +++ b/api/poktroll/tokenomics/event.pulsar.go @@ -20,6 +20,7 @@ var ( fd_EventClaimExpired_claim protoreflect.FieldDescriptor fd_EventClaimExpired_num_relays protoreflect.FieldDescriptor fd_EventClaimExpired_num_compute_units protoreflect.FieldDescriptor + fd_EventClaimExpired_expiration_reason protoreflect.FieldDescriptor ) func init() { @@ -28,6 +29,7 @@ func init() { fd_EventClaimExpired_claim = md_EventClaimExpired.Fields().ByName("claim") fd_EventClaimExpired_num_relays = md_EventClaimExpired.Fields().ByName("num_relays") fd_EventClaimExpired_num_compute_units = md_EventClaimExpired.Fields().ByName("num_compute_units") + fd_EventClaimExpired_expiration_reason = md_EventClaimExpired.Fields().ByName("expiration_reason") } var _ protoreflect.Message = (*fastReflection_EventClaimExpired)(nil) @@ -113,6 +115,12 @@ func (x *fastReflection_EventClaimExpired) Range(f func(protoreflect.FieldDescri return } } + if x.ExpirationReason != 0 { + value := protoreflect.ValueOfEnum((protoreflect.EnumNumber)(x.ExpirationReason)) + if !f(fd_EventClaimExpired_expiration_reason, value) { + return + } + } } // Has reports whether a field is populated. @@ -134,6 +142,8 @@ func (x *fastReflection_EventClaimExpired) Has(fd protoreflect.FieldDescriptor) return x.NumRelays != uint64(0) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": return x.NumComputeUnits != uint64(0) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + return x.ExpirationReason != 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -156,6 +166,8 @@ func (x *fastReflection_EventClaimExpired) Clear(fd protoreflect.FieldDescriptor x.NumRelays = uint64(0) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": x.NumComputeUnits = uint64(0) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + x.ExpirationReason = 0 default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -181,6 +193,9 @@ func (x *fastReflection_EventClaimExpired) Get(descriptor protoreflect.FieldDesc case "poktroll.tokenomics.EventClaimExpired.num_compute_units": value := x.NumComputeUnits return protoreflect.ValueOfUint64(value) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + value := x.ExpirationReason + return protoreflect.ValueOfEnum((protoreflect.EnumNumber)(value)) default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -207,6 +222,8 @@ func (x *fastReflection_EventClaimExpired) Set(fd protoreflect.FieldDescriptor, x.NumRelays = value.Uint() case "poktroll.tokenomics.EventClaimExpired.num_compute_units": x.NumComputeUnits = value.Uint() + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + x.ExpirationReason = (ClaimExpirationReason)(value.Enum()) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -236,6 +253,8 @@ func (x *fastReflection_EventClaimExpired) Mutable(fd protoreflect.FieldDescript panic(fmt.Errorf("field num_relays of message poktroll.tokenomics.EventClaimExpired is not mutable")) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": panic(fmt.Errorf("field num_compute_units of message poktroll.tokenomics.EventClaimExpired is not mutable")) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + panic(fmt.Errorf("field expiration_reason of message poktroll.tokenomics.EventClaimExpired is not mutable")) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -256,6 +275,8 @@ func (x *fastReflection_EventClaimExpired) NewField(fd protoreflect.FieldDescrip return protoreflect.ValueOfUint64(uint64(0)) case "poktroll.tokenomics.EventClaimExpired.num_compute_units": return protoreflect.ValueOfUint64(uint64(0)) + case "poktroll.tokenomics.EventClaimExpired.expiration_reason": + return protoreflect.ValueOfEnum(0) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: poktroll.tokenomics.EventClaimExpired")) @@ -335,6 +356,9 @@ func (x *fastReflection_EventClaimExpired) ProtoMethods() *protoiface.Methods { if x.NumComputeUnits != 0 { n += 1 + runtime.Sov(uint64(x.NumComputeUnits)) } + if x.ExpirationReason != 0 { + n += 1 + runtime.Sov(uint64(x.ExpirationReason)) + } if x.unknownFields != nil { n += len(x.unknownFields) } @@ -364,6 +388,11 @@ func (x *fastReflection_EventClaimExpired) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } + if x.ExpirationReason != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.ExpirationReason)) + i-- + dAtA[i] = 0x20 + } if x.NumComputeUnits != 0 { i = runtime.EncodeVarint(dAtA, i, uint64(x.NumComputeUnits)) i-- @@ -511,6 +540,25 @@ func (x *fastReflection_EventClaimExpired) ProtoMethods() *protoiface.Methods { break } } + case 4: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ExpirationReason", wireType) + } + x.ExpirationReason = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + x.ExpirationReason |= ClaimExpirationReason(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -2360,6 +2408,55 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type ClaimExpirationReason int32 + +const ( + ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED ClaimExpirationReason = 0 // Default value, means may be valid + ClaimExpirationReason_PROOF_MISSING ClaimExpirationReason = 1 + ClaimExpirationReason_PROOF_INVALID ClaimExpirationReason = 2 +) + +// Enum value maps for ClaimExpirationReason. +var ( + ClaimExpirationReason_name = map[int32]string{ + 0: "EXPIRATION_REASON_UNSPECIFIED", + 1: "PROOF_MISSING", + 2: "PROOF_INVALID", + } + ClaimExpirationReason_value = map[string]int32{ + "EXPIRATION_REASON_UNSPECIFIED": 0, + "PROOF_MISSING": 1, + "PROOF_INVALID": 2, + } +) + +func (x ClaimExpirationReason) Enum() *ClaimExpirationReason { + p := new(ClaimExpirationReason) + *p = x + return p +} + +func (x ClaimExpirationReason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClaimExpirationReason) Descriptor() protoreflect.EnumDescriptor { + return file_poktroll_tokenomics_event_proto_enumTypes[0].Descriptor() +} + +func (ClaimExpirationReason) Type() protoreflect.EnumType { + return &file_poktroll_tokenomics_event_proto_enumTypes[0] +} + +func (x ClaimExpirationReason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClaimExpirationReason.Descriptor instead. +func (ClaimExpirationReason) EnumDescriptor() ([]byte, []int) { + return file_poktroll_tokenomics_event_proto_rawDescGZIP(), []int{0} +} + // EventClaimExpired is an event emitted during settlement whenever a claim requiring // an on-chain proof doesn't have one. The claim cannot be settled, leading to that work // never being rewarded. @@ -2368,9 +2465,11 @@ type EventClaimExpired struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Claim *proof.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim,omitempty"` - NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays,omitempty"` - NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units,omitempty"` + Claim *proof.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim,omitempty"` + // TODO_MAINNET: Shold we include the proof here too? + NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays,omitempty"` + NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units,omitempty"` + ExpirationReason ClaimExpirationReason `protobuf:"varint,4,opt,name=expiration_reason,json=expirationReason,proto3,enum=poktroll.tokenomics.ClaimExpirationReason" json:"expiration_reason,omitempty"` } func (x *EventClaimExpired) Reset() { @@ -2414,6 +2513,13 @@ func (x *EventClaimExpired) GetNumComputeUnits() uint64 { return 0 } +func (x *EventClaimExpired) GetExpirationReason() ClaimExpirationReason { + if x != nil { + return x.ExpirationReason + } + return ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED +} + // EventClaimSettled is an event emitted whenever a claim is settled. // The proof_required determines whether the claim requires a proof that has been submitted or not type EventClaimSettled struct { @@ -2610,7 +2716,7 @@ var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2f, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x01, 0x0a, 0x11, 0x45, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x02, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, @@ -2622,7 +2728,14 @@ var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x15, 0xea, 0xde, 0x1f, 0x11, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x43, 0x6f, - 0x6d, 0x70, 0x75, 0x74, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x45, + 0x6d, 0x70, 0x75, 0x74, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x6e, 0x0a, 0x11, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6c, 0x61, 0x69, + 0x6d, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x42, 0x15, 0xea, 0xde, 0x1f, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x6f, 0x66, @@ -2672,19 +2785,25 @@ var file_poktroll_tokenomics_event_proto_rawDesc = []byte{ 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x69, 0x6e, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, - 0x75, 0x72, 0x6e, 0x42, 0xb8, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x42, - 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x24, 0x63, - 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, - 0x69, 0x63, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x54, 0x58, 0xaa, 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xca, - 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, - 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x14, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, - 0x6c, 0x6c, 0x3a, 0x3a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x75, 0x72, 0x6e, 0x2a, 0x60, 0x0a, 0x15, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x1d, + 0x45, 0x58, 0x50, 0x49, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, + 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x4f, 0x4f, 0x46, 0x5f, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x10, 0x02, 0x42, 0xb8, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, + 0x73, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x24, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x70, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x54, 0x58, 0xaa, 0x02, 0x13, 0x50, 0x6f, + 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, + 0x73, 0xca, 0x02, 0x13, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0xe2, 0x02, 0x1f, 0x50, 0x6f, 0x6b, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x5c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, 0x5c, 0x47, 0x50, + 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x14, 0x50, 0x6f, 0x6b, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x3a, 0x3a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x63, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2699,27 +2818,30 @@ func file_poktroll_tokenomics_event_proto_rawDescGZIP() []byte { return file_poktroll_tokenomics_event_proto_rawDescData } +var file_poktroll_tokenomics_event_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_poktroll_tokenomics_event_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_poktroll_tokenomics_event_proto_goTypes = []interface{}{ - (*EventClaimExpired)(nil), // 0: poktroll.tokenomics.EventClaimExpired - (*EventClaimSettled)(nil), // 1: poktroll.tokenomics.EventClaimSettled - (*EventRelayMiningDifficultyUpdated)(nil), // 2: poktroll.tokenomics.EventRelayMiningDifficultyUpdated - (*EventApplicationOverserviced)(nil), // 3: poktroll.tokenomics.EventApplicationOverserviced - (*proof.Claim)(nil), // 4: poktroll.proof.Claim - (proof.ProofRequirementReason)(0), // 5: poktroll.proof.ProofRequirementReason - (*v1beta1.Coin)(nil), // 6: cosmos.base.v1beta1.Coin + (ClaimExpirationReason)(0), // 0: poktroll.tokenomics.ClaimExpirationReason + (*EventClaimExpired)(nil), // 1: poktroll.tokenomics.EventClaimExpired + (*EventClaimSettled)(nil), // 2: poktroll.tokenomics.EventClaimSettled + (*EventRelayMiningDifficultyUpdated)(nil), // 3: poktroll.tokenomics.EventRelayMiningDifficultyUpdated + (*EventApplicationOverserviced)(nil), // 4: poktroll.tokenomics.EventApplicationOverserviced + (*proof.Claim)(nil), // 5: poktroll.proof.Claim + (proof.ProofRequirementReason)(0), // 6: poktroll.proof.ProofRequirementReason + (*v1beta1.Coin)(nil), // 7: cosmos.base.v1beta1.Coin } var file_poktroll_tokenomics_event_proto_depIdxs = []int32{ - 4, // 0: poktroll.tokenomics.EventClaimExpired.claim:type_name -> poktroll.proof.Claim - 4, // 1: poktroll.tokenomics.EventClaimSettled.claim:type_name -> poktroll.proof.Claim - 5, // 2: poktroll.tokenomics.EventClaimSettled.proof_requirement:type_name -> poktroll.proof.ProofRequirementReason - 6, // 3: poktroll.tokenomics.EventApplicationOverserviced.expected_burn:type_name -> cosmos.base.v1beta1.Coin - 6, // 4: poktroll.tokenomics.EventApplicationOverserviced.effective_burn:type_name -> cosmos.base.v1beta1.Coin - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 5, // 0: poktroll.tokenomics.EventClaimExpired.claim:type_name -> poktroll.proof.Claim + 0, // 1: poktroll.tokenomics.EventClaimExpired.expiration_reason:type_name -> poktroll.tokenomics.ClaimExpirationReason + 5, // 2: poktroll.tokenomics.EventClaimSettled.claim:type_name -> poktroll.proof.Claim + 6, // 3: poktroll.tokenomics.EventClaimSettled.proof_requirement:type_name -> poktroll.proof.ProofRequirementReason + 7, // 4: poktroll.tokenomics.EventApplicationOverserviced.expected_burn:type_name -> cosmos.base.v1beta1.Coin + 7, // 5: poktroll.tokenomics.EventApplicationOverserviced.effective_burn:type_name -> cosmos.base.v1beta1.Coin + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_poktroll_tokenomics_event_proto_init() } @@ -2782,13 +2904,14 @@ func file_poktroll_tokenomics_event_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_poktroll_tokenomics_event_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 4, NumExtensions: 0, NumServices: 0, }, GoTypes: file_poktroll_tokenomics_event_proto_goTypes, DependencyIndexes: file_poktroll_tokenomics_event_proto_depIdxs, + EnumInfos: file_poktroll_tokenomics_event_proto_enumTypes, MessageInfos: file_poktroll_tokenomics_event_proto_msgTypes, }.Build() File_poktroll_tokenomics_event_proto = out.File diff --git a/pkg/client/query/accquerier.go b/pkg/client/query/accquerier.go index 6b6e26778..2b4d9c2bc 100644 --- a/pkg/client/query/accquerier.go +++ b/pkg/client/query/accquerier.go @@ -93,6 +93,9 @@ func (aq *accQuerier) GetPubKeyFromAddress(ctx context.Context, address string) if err != nil { return nil, err } + if acc == nil { + return nil, ErrQueryAccountNotFound.Wrapf("address: %s", address) + } // If the account's public key is nil, then return an error. pubKey := acc.GetPubKey() diff --git a/pkg/crypto/protocol/difficulty.go b/pkg/crypto/protocol/difficulty.go index 8d7ccb835..7615135ae 100644 --- a/pkg/crypto/protocol/difficulty.go +++ b/pkg/crypto/protocol/difficulty.go @@ -24,6 +24,7 @@ var ( // GetDifficultyFromHash returns the "difficulty" of the given hash, with respect // to the "highest" (easiest) target hash, BaseRelayDifficultyHash. // The resultant value is not used for any business logic but is simplify there to have a human-readable version of the hash. +// TODO_MAINNET: Can this cause an integer overflow? func GetDifficultyFromHash(hashBz [RelayHasherSize]byte) int64 { baseRelayDifficultyHashInt := new(big.Int).SetBytes(BaseRelayDifficultyHashBz) hashInt := new(big.Int).SetBytes(hashBz[:]) diff --git a/proto/poktroll/shared/service.proto b/proto/poktroll/shared/service.proto index 7f45534c3..9a80732dc 100644 --- a/proto/poktroll/shared/service.proto +++ b/proto/poktroll/shared/service.proto @@ -49,7 +49,7 @@ enum RPCType { WEBSOCKET = 2; // WebSocket JSON_RPC = 3; // JSON-RPC REST = 4; // REST - // Add new RPC types here as needed +// Add new RPC types here as needed } // Enum to define configuration options diff --git a/proto/poktroll/tokenomics/event.proto b/proto/poktroll/tokenomics/event.proto index f45cd6389..780564496 100644 --- a/proto/poktroll/tokenomics/event.proto +++ b/proto/poktroll/tokenomics/event.proto @@ -8,13 +8,21 @@ import "cosmos/base/v1beta1/coin.proto"; import "poktroll/proof/claim.proto"; import "poktroll/proof/requirement.proto"; +enum ClaimExpirationReason { + EXPIRATION_REASON_UNSPECIFIED = 0; // Default value, means may be valid + PROOF_MISSING = 1; + PROOF_INVALID = 2; +} + // EventClaimExpired is an event emitted during settlement whenever a claim requiring // an on-chain proof doesn't have one. The claim cannot be settled, leading to that work // never being rewarded. message EventClaimExpired { poktroll.proof.Claim claim = 1 [(gogoproto.jsontag) = "claim"]; + // TODO_MAINNET: Shold we include the proof here too? uint64 num_relays = 2 [(gogoproto.jsontag) = "num_relays"]; uint64 num_compute_units = 3 [(gogoproto.jsontag) = "num_compute_units"]; + ClaimExpirationReason expiration_reason = 4 [(gogoproto.jsontag) = "expiration_reason"]; } // EventClaimSettled is an event emitted whenever a claim is settled. diff --git a/testutil/keeper/tokenomics.go b/testutil/keeper/tokenomics.go index 621c98858..5ad06f56b 100644 --- a/testutil/keeper/tokenomics.go +++ b/testutil/keeper/tokenomics.go @@ -62,6 +62,7 @@ type TokenomicsModuleKeepers struct { tokenomicstypes.SupplierKeeper tokenomicstypes.ProofKeeper tokenomicstypes.SharedKeeper + tokenomicstypes.SessionKeeper Codec *codec.ProtoCodec } @@ -373,6 +374,7 @@ func NewTokenomicsModuleKeepers( SupplierKeeper: &supplierKeeper, ProofKeeper: &proofKeeper, SharedKeeper: &sharedKeeper, + SessionKeeper: &sessionKeeper, Codec: cdc, } diff --git a/testutil/testtree/tree.go b/testutil/testtree/tree.go new file mode 100644 index 000000000..e56be9a40 --- /dev/null +++ b/testutil/testtree/tree.go @@ -0,0 +1,162 @@ +package testtree + +import ( + "context" + "os" + "testing" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + cosmostypes "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + "github.com/pokt-network/poktroll/pkg/crypto" + "github.com/pokt-network/poktroll/pkg/relayer" + "github.com/pokt-network/poktroll/pkg/relayer/session" + "github.com/pokt-network/poktroll/testutil/testrelayer" + prooftypes "github.com/pokt-network/poktroll/x/proof/types" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" +) + +// NewFilledSessionTree creates a new session tree with numRelays of relays +// filled out using the request and response headers provided where every +// relay is signed by the supplier and application respectively. +func NewFilledSessionTree( + ctx context.Context, t *testing.T, + numRelays uint, + supplierKeyUid, supplierAddr string, + sessionTreeHeader, reqHeader, resHeader *sessiontypes.SessionHeader, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) relayer.SessionTree { + t.Helper() + + // Initialize an empty session tree with the given session header. + sessionTree := NewEmptySessionTree(t, sessionTreeHeader, supplierAddr) + + // Add numRelays of relays to the session tree. + FillSessionTree( + ctx, t, + sessionTree, numRelays, + supplierKeyUid, supplierAddr, + reqHeader, resHeader, + keyRing, + ringClient, + ) + + return sessionTree +} + +// NewEmptySessionTree creates a new empty session tree with for given session. +func NewEmptySessionTree( + t *testing.T, + sessionTreeHeader *sessiontypes.SessionHeader, + supplierAddr string, +) relayer.SessionTree { + t.Helper() + + // Create a temporary session tree store directory for persistence. + testSessionTreeStoreDir, err := os.MkdirTemp("", "session_tree_store_dir") + require.NoError(t, err) + + // Delete the temporary session tree store directory after the test completes. + t.Cleanup(func() { + _ = os.RemoveAll(testSessionTreeStoreDir) + }) + + accAddress := cosmostypes.MustAccAddressFromBech32(supplierAddr) + + // Construct a session tree to add relays to and generate a proof from. + sessionTree, err := session.NewSessionTree( + sessionTreeHeader, + &accAddress, + testSessionTreeStoreDir, + ) + require.NoError(t, err) + + return sessionTree +} + +// FillSessionTree fills the session tree with valid signed relays. +// A total of numRelays relays are added to the session tree with +// increasing weights (relay 1 has weight 1, relay 2 has weight 2, etc.). +func FillSessionTree( + ctx context.Context, t *testing.T, + sessionTree relayer.SessionTree, + numRelays uint, + supplierKeyUid, supplierAddr string, + reqHeader, resHeader *sessiontypes.SessionHeader, + keyRing keyring.Keyring, + ringClient crypto.RingClient, +) { + t.Helper() + + for i := 0; i < int(numRelays); i++ { + relay := testrelayer.NewSignedEmptyRelay( + ctx, t, + supplierKeyUid, supplierAddr, + reqHeader, resHeader, + keyRing, + ringClient, + ) + relayBz, err := relay.Marshal() + require.NoError(t, err) + + relayKey, err := relay.GetHash() + require.NoError(t, err) + + // See FillSessionTreeExpectedComputeUnits below for explanation. + relayWeight := uint64(i) + + err = sessionTree.Update(relayKey[:], relayBz, relayWeight) + require.NoError(t, err) + } +} + +// FillSessionTreeExpectedComputeUnits returns the number of expected compute units +// to covert numRelays (in a test scenario) whereby every subsequent relay costs +// an addition compute unit. +// This is basic random approach selected for testing purposes. Don't think too +// deeply about it. +func FillSessionTreeExpectedComputeUnits(numRelays uint) uint64 { + return uint64(numRelays * (numRelays - 1) / 2) +} + +// NewProof creates a new proof structure. +func NewProof( + t *testing.T, + supplierAddr string, + sessionHeader *sessiontypes.SessionHeader, + sessionTree relayer.SessionTree, + closestProofPath []byte, +) *prooftypes.Proof { + t.Helper() + + // Generate a closest proof from the session tree using closestProofPath. + merkleProof, err := sessionTree.ProveClosest(closestProofPath) + require.NoError(t, err) + require.NotNil(t, merkleProof) + + // Serialize the closest merkle proof. + merkleProofBz, err := merkleProof.Marshal() + require.NoError(t, err) + + return &prooftypes.Proof{ + SupplierAddress: supplierAddr, + SessionHeader: sessionHeader, + ClosestMerkleProof: merkleProofBz, + } +} + +func NewClaim( + t *testing.T, + supplierAddr string, + sessionHeader *sessiontypes.SessionHeader, + rootHash []byte, +) *prooftypes.Claim { + // Create a new claim. + return &prooftypes.Claim{ + SupplierAddress: supplierAddr, + SessionHeader: sessionHeader, + RootHash: rootHash, + } +} diff --git a/x/proof/keeper/keeper.go b/x/proof/keeper/keeper.go index 0f35fe593..e7bbc9475 100644 --- a/x/proof/keeper/keeper.go +++ b/x/proof/keeper/keeper.go @@ -30,6 +30,7 @@ type ( sessionKeeper types.SessionKeeper applicationKeeper types.ApplicationKeeper + accountKeeper types.AccountKeeper sharedKeeper types.SharedKeeper ringClient crypto.RingClient @@ -88,6 +89,7 @@ func NewKeeper( sessionKeeper: sessionKeeper, applicationKeeper: applicationKeeper, + accountKeeper: accountKeeper, sharedKeeper: sharedKeeper, ringClient: ringKeeperClient, diff --git a/x/proof/keeper/msg_server_create_claim.go b/x/proof/keeper/msg_server_create_claim.go index 7d58f1d20..1e9dc8b52 100644 --- a/x/proof/keeper/msg_server_create_claim.go +++ b/x/proof/keeper/msg_server_create_claim.go @@ -10,7 +10,6 @@ import ( "github.com/pokt-network/poktroll/telemetry" "github.com/pokt-network/poktroll/x/proof/types" - sessiontypes "github.com/pokt-network/poktroll/x/session/types" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) @@ -37,53 +36,43 @@ func (k msgServer) CreateClaim( }() logger := k.Logger().With("method", "CreateClaim") + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) logger.Info("creating claim") + // Basic validation of the CreateClaim message. if err = msg.ValidateBasic(); err != nil { return nil, err } + logger.Info("validated the createClaim message") // Compare msg session header w/ on-chain session header. - var session *sessiontypes.Session - session, err = k.queryAndValidateSessionHeader(ctx, msg) + session, err := k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - // Use the session header from the on-chain hydrated session. - sessionHeader := session.GetHeader() - - // Set the session header to the on-chain hydrated session header. - msg.SessionHeader = sessionHeader - - // Validate claim message commit height is within the respective session's - // claim creation window using the on-chain session header. - if err = k.validateClaimWindow(ctx, msg); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) + // Construct and insert claim + claim = types.Claim{ + SupplierAddress: msg.GetSupplierAddress(), + SessionHeader: session.GetHeader(), + RootHash: msg.GetRootHash(), } + // Helpers for logging the same metadata throughout this function calls logger = logger. With( "session_id", session.GetSessionId(), - "session_end_height", sessionHeader.GetSessionEndBlockHeight(), + "session_end_height", claim.SessionHeader.GetSessionEndBlockHeight(), "supplier", msg.GetSupplierAddress(), ) - logger.Info("validated claim") - - // Assign and upsert claim after all validation. - claim = types.Claim{ - SupplierAddress: msg.GetSupplierAddress(), - SessionHeader: sessionHeader, - RootHash: msg.GetRootHash(), + // Validate claim message commit height is within the respective session's + // claim creation window using the on-chain session header. + if err = k.validateClaimWindow(ctx, claim.SessionHeader, claim.SupplierAddress); err != nil { + return nil, status.Error(codes.FailedPrecondition, err.Error()) } - _, isExistingClaim = k.Keeper.GetClaim(ctx, claim.GetSessionHeader().GetSessionId(), claim.GetSupplierAddress()) - - k.Keeper.UpsertClaim(ctx, claim) - - logger.Info("created new claim") - + // Get metadata for the event we want to emit numRelays, err = claim.GetNumRelays() if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) @@ -92,6 +81,11 @@ func (k msgServer) CreateClaim( if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) } + _, isExistingClaim = k.Keeper.GetClaim(ctx, claim.GetSessionHeader().GetSessionId(), claim.GetSupplierAddress()) + + // Upsert the claim + k.Keeper.UpsertClaim(ctx, claim) + logger.Info("successfully upserted the claim") // Emit the appropriate event based on whether the claim was created or updated. var claimUpsertEvent proto.Message @@ -113,8 +107,6 @@ func (k msgServer) CreateClaim( }, ) } - - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) if err = sdkCtx.EventManager().EmitTypedEvent(claimUpsertEvent); err != nil { return nil, status.Error( codes.Internal, @@ -126,7 +118,6 @@ func (k msgServer) CreateClaim( ) } - // TODO_BETA: return the claim in the response. return &types.MsgCreateClaimResponse{ Claim: &claim, }, nil diff --git a/x/proof/keeper/msg_server_create_claim_test.go b/x/proof/keeper/msg_server_create_claim_test.go index 4ac0c4c72..69515cc64 100644 --- a/x/proof/keeper/msg_server_create_claim_test.go +++ b/x/proof/keeper/msg_server_create_claim_test.go @@ -5,12 +5,11 @@ import ( abci "github.com/cometbft/cometbft/abci/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" + "github.com/pokt-network/smt" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/pokt-network/smt" - keepertest "github.com/pokt-network/poktroll/testutil/keeper" testproof "github.com/pokt-network/poktroll/testutil/proof" "github.com/pokt-network/poktroll/testutil/sample" @@ -140,7 +139,7 @@ func TestMsgServer_CreateClaim_Success(t *testing.T) { events := sdkCtx.EventManager().Events() require.Equal(t, 1, len(events)) - require.Equal(t, events[0].Type, "poktroll.proof.EventClaimCreated") + require.Equal(t, "poktroll.proof.EventClaimCreated", events[0].Type) event, err := cosmostypes.ParseTypedEvent(abci.Event(events[0])) require.NoError(t, err) diff --git a/x/proof/keeper/msg_server_submit_proof.go b/x/proof/keeper/msg_server_submit_proof.go index fcc271b03..2d95b1dff 100644 --- a/x/proof/keeper/msg_server_submit_proof.go +++ b/x/proof/keeper/msg_server_submit_proof.go @@ -5,30 +5,31 @@ package keeper // Ref: https://github.com/pokt-network/poktroll/pull/448#discussion_r1549742985 import ( - "bytes" "context" - "fmt" - cosmoscryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/gogoproto/proto" - "github.com/pokt-network/smt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/telemetry" "github.com/pokt-network/poktroll/x/proof/types" - servicetypes "github.com/pokt-network/poktroll/x/service/types" - sessiontypes "github.com/pokt-network/poktroll/x/session/types" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) // SubmitProof is the server handler to submit and store a proof on-chain. // A proof that's stored on-chain is what leads to rewards (i.e. inflation) -// downstream, making the series of checks a critical part of the protocol. +// downstream, making this a critical part of the protocol. // -// Note: The entity sending the SubmitProof messages does not necessarily need +// Note that the validation of the proof is done in `EnsureValidProof`. However, +// preliminary checks are done in the handler to prevent sybil or DoS attacks on +// full nodes because storing and validating proofs is expensive. +// +// We are playing a balance of security and efficiency here, where enough validation +// is done on proof submission, and exhaustive validation is done during session +// settlement. +// +// The entity sending the SubmitProof messages does not necessarily need // to correspond to the supplier signing the proof. For example, a single entity // could (theoretically) batch multiple proofs (signed by the corresponding supplier) // into one transaction to save on transaction fees. @@ -36,14 +37,6 @@ func (k msgServer) SubmitProof( ctx context.Context, msg *types.MsgSubmitProof, ) (_ *types.MsgSubmitProofResponse, err error) { - // TODO_MAINNET: A potential issue with doing proof validation inside - // `SubmitProof` is that we will not be storing false proofs on-chain (e.g. for slashing purposes). - // This could be considered a feature (e.g. less state bloat against sybil attacks) - // or a bug (i.e. no mechanisms for slashing suppliers who submit false proofs). - // Revisit this prior to mainnet launch as to whether the business logic for settling sessions should be in EndBlocker or here. - logger := k.Logger().With("method", "SubmitProof") - logger.Info("About to start submitting proof") - // Declare claim to reference in telemetry. var ( claim = new(types.Claim) @@ -62,201 +55,49 @@ func (k msgServer) SubmitProof( } }() - /* - TODO_BLOCKER(@bryanchriswhite): Document these steps in proof - verification, link to the doc for reference and delete the comments. - - ## Actions (error if anything fails) - 1. Retrieve a fully hydrated `session` from on-chain store using `msg` metadata - 2. Retrieve a fully hydrated `claim` from on-chain store using `msg` metadata - 3. Retrieve `relay.Req` and `relay.Res` from deserializing `proof.ClosestValueHash` - - ## Basic Validations (metadata only) - 1. proof.sessionId == claim.sessionId - 2. msg.supplier in session.suppliers - 3. relay.Req.signer == session.appAddr - 4. relay.Res.signer == msg.supplier - - ## Msg distribution validation (governance based params) - 1. Validate Proof submission is not too early; governance-based param + pseudo-random variation - 2. Validate Proof submission is not too late; governance-based param + pseudo-random variation - - ## Relay Signature validation - 1. verify(relay.Req.Signature, appRing) - 2. verify(relay.Res.Signature, supplier.pubKey) - - ## Relay Mining validation - 1. verify(proof.path) is the expected path; pseudo-random variation using on-chain data - 2. verify(proof.ValueHash, expectedDifficulty); governance based - 3. verify(claim.Root, proof.ClosestProof); verify the closest proof is correct - */ - - // Decomposing a few variables for easier access - sessionHeader := msg.GetSessionHeader() - supplierAddr := msg.GetSupplierAddress() - - // Helpers for logging the same metadata throughout this function calls - logger = logger.With( - "session_id", sessionHeader.GetSessionId(), - "session_end_height", sessionHeader.GetSessionEndBlockHeight(), - "supplier", supplierAddr) + logger := k.Logger().With("method", "SubmitProof") + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + logger.Info("About to start submitting proof") // Basic validation of the SubmitProof message. if err = msg.ValidateBasic(); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - logger.Info("validated the submitProof message ") + logger.Info("validated the submitProof message") - // Retrieve the supplier's public key. - var supplierPubKey cosmoscryptotypes.PubKey - supplierPubKey, err = k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) + // Compare msg session header w/ on-chain session header. + session, err := k.queryAndValidateSessionHeader(ctx, msg.GetSessionHeader(), msg.GetSupplierAddress()) if err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } - // Validate the session header. - var onChainSession *sessiontypes.Session - onChainSession, err = k.queryAndValidateSessionHeader(ctx, msg) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) + // Construct the proof + proof := types.Proof{ + SupplierAddress: msg.GetSupplierAddress(), + SessionHeader: session.GetHeader(), + ClosestMerkleProof: msg.GetProof(), } - logger.Info("queried and validated the session header") - // Re-hydrate message session header with the on-chain session header. - // This corrects for discrepancies between unvalidated fields in the session header - // which can be derived from known values (e.g. session end height). - msg.SessionHeader = onChainSession.GetHeader() + // Helpers for logging the same metadata throughout this function calls + logger = logger.With( + "session_id", proof.SessionHeader.SessionId, + "session_end_height", proof.SessionHeader.SessionEndBlockHeight, + "supplier", proof.SupplierAddress) // Validate proof message commit height is within the respective session's // proof submission window using the on-chain session header. - if err = k.validateProofWindow(ctx, msg); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - - // Unmarshal the closest merkle proof from the message. - sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} - if err = sparseMerkleClosestProof.Unmarshal(msg.GetProof()); err != nil { - return nil, status.Error(codes.InvalidArgument, - types.ErrProofInvalidProof.Wrapf( - "failed to unmarshal closest merkle proof: %s", - err, - ).Error(), - ) - } - - // TODO_MAINNET(#427): Utilize smt.VerifyCompactClosestProof here to - // reduce on-chain storage requirements for proofs. - // Get the relay request and response from the proof.GetClosestMerkleProof. - relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) - relay := &servicetypes.Relay{} - if err = k.cdc.Unmarshal(relayBz, relay); err != nil { - return nil, status.Error( - codes.InvalidArgument, - types.ErrProofInvalidRelay.Wrapf( - "failed to unmarshal relay: %s", - err, - ).Error(), - ) - } - - // Basic validation of the relay request. - relayReq := relay.GetReq() - if err = relayReq.ValidateBasic(); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully validated relay request") - - // Make sure that the supplier address in the proof matches the one in the relay request. - if supplierAddr != relayReq.Meta.SupplierAddress { - return nil, status.Error(codes.FailedPrecondition, "supplier address mismatch") - } - logger.Debug("the proof supplier address matches the relay request supplier address") - - // Basic validation of the relay response. - relayRes := relay.GetRes() - if err = relayRes.ValidateBasic(); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully validated relay response") - - // Verify that the relay request session header matches the proof session header. - if err = compareSessionHeaders(msg.GetSessionHeader(), relayReq.Meta.GetSessionHeader()); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully compared relay request session header") - - // Verify that the relay response session header matches the proof session header. - if err = compareSessionHeaders(msg.GetSessionHeader(), relayRes.Meta.GetSessionHeader()); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully compared relay response session header") - - // Verify the relay request's signature. - if err = k.ringClient.VerifyRelayRequestSignature(ctx, relayReq); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully verified relay request signature") - - // Verify the relay response's signature. - if err = relayRes.VerifySupplierSignature(supplierPubKey); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully verified relay response signature") - - // Get the proof module's governance parameters. - // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service - params := k.GetParams(ctx) - - // Verify the relay difficulty is above the minimum required to earn rewards. - if err = validateRelayDifficulty( - relayBz, - params.RelayDifficultyTargetHash, - sessionHeader.Service.Id, - ); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully validated relay mining difficulty") - - // Validate that path the proof is submitted for matches the expected one - // based on the pseudo-random on-chain data associated with the header. - if err = k.validateClosestPath( - ctx, - sparseMerkleClosestProof, - msg.GetSessionHeader(), - msg.GetSupplierAddress(), - ); err != nil { + if err = k.validateProofWindow(ctx, proof.SessionHeader, proof.SupplierAddress); err != nil { return nil, status.Error(codes.FailedPrecondition, err.Error()) } - logger.Debug("successfully validated proof path") // Retrieve the corresponding claim for the proof submitted so it can be // used in the proof validation below. - claim, err = k.queryAndValidateClaimForProof(ctx, msg) + claim, err = k.queryAndValidateClaimForProof(ctx, proof.SessionHeader, proof.SupplierAddress) if err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) + return nil, status.Error(codes.Internal, types.ErrProofClaimNotFound.Wrap(err.Error()).Error()) } - logger.Debug("successfully retrieved and validated claim") - - // Verify the proof's closest merkle proof. - if err = verifyClosestProof(sparseMerkleClosestProof, claim.GetRootHash()); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) - } - logger.Debug("successfully verified closest merkle proof") - - // Construct and insert proof after all validation. - proof := types.Proof{ - SupplierAddress: supplierAddr, - SessionHeader: msg.GetSessionHeader(), - ClosestMerkleProof: msg.GetProof(), - } - logger.Debug(fmt.Sprintf("queried and validated the claim for session ID %q", sessionHeader.SessionId)) - - _, isExistingProof = k.GetProof(ctx, proof.GetSessionHeader().GetSessionId(), proof.GetSupplierAddress()) - - k.UpsertProof(ctx, proof) - logger.Info("successfully upserted the proof") - + // Get metadata for the event we want to emit numRelays, err = claim.GetNumRelays() if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) @@ -265,6 +106,11 @@ func (k msgServer) SubmitProof( if err != nil { return nil, status.Error(codes.Internal, types.ErrProofInvalidClaimRootHash.Wrap(err.Error()).Error()) } + _, isExistingProof = k.GetProof(ctx, proof.SessionHeader.SessionId, proof.SupplierAddress) + + // Upsert the proof + k.UpsertProof(ctx, proof) + logger.Info("successfully upserted the proof") // Emit the appropriate event based on whether the claim was created or updated. var proofUpsertEvent proto.Message @@ -288,8 +134,6 @@ func (k msgServer) SubmitProof( }, ) } - - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) if err = sdkCtx.EventManager().EmitTypedEvent(proofUpsertEvent); err != nil { return nil, status.Error( codes.Internal, @@ -305,232 +149,3 @@ func (k msgServer) SubmitProof( Proof: &proof, }, nil } - -// queryAndValidateClaimForProof ensures that a claim corresponding to the given -// proof's session exists & has a matching supplier address and session header, -// it then returns the corresponding claim if the validation is successful. -func (k msgServer) queryAndValidateClaimForProof( - ctx context.Context, - msg *types.MsgSubmitProof, -) (*types.Claim, error) { - sessionId := msg.GetSessionHeader().GetSessionId() - // NB: no need to assert the testSessionId or supplier address as it is retrieved - // by respective values of the given proof. I.e., if the claim exists, then these - // values are guaranteed to match. - foundClaim, found := k.GetClaim(ctx, sessionId, msg.GetSupplierAddress()) - if !found { - return nil, types.ErrProofClaimNotFound.Wrapf( - "no claim found for session ID %q and supplier %q", - sessionId, - msg.GetSupplierAddress(), - ) - } - - claimSessionHeader := foundClaim.GetSessionHeader() - proofSessionHeader := msg.GetSessionHeader() - - // Ensure session start heights match. - if claimSessionHeader.GetSessionStartBlockHeight() != proofSessionHeader.GetSessionStartBlockHeight() { - return nil, types.ErrProofInvalidSessionStartHeight.Wrapf( - "claim session start height %d does not match proof session start height %d", - claimSessionHeader.GetSessionStartBlockHeight(), - proofSessionHeader.GetSessionStartBlockHeight(), - ) - } - - // Ensure session end heights match. - if claimSessionHeader.GetSessionEndBlockHeight() != proofSessionHeader.GetSessionEndBlockHeight() { - return nil, types.ErrProofInvalidSessionEndHeight.Wrapf( - "claim session end height %d does not match proof session end height %d", - claimSessionHeader.GetSessionEndBlockHeight(), - proofSessionHeader.GetSessionEndBlockHeight(), - ) - } - - // Ensure application addresses match. - if claimSessionHeader.GetApplicationAddress() != proofSessionHeader.GetApplicationAddress() { - return nil, types.ErrProofInvalidAddress.Wrapf( - "claim application address %q does not match proof application address %q", - claimSessionHeader.GetApplicationAddress(), - proofSessionHeader.GetApplicationAddress(), - ) - } - - // Ensure service IDs match. - if claimSessionHeader.GetService().GetId() != proofSessionHeader.GetService().GetId() { - return nil, types.ErrProofInvalidService.Wrapf( - "claim service ID %q does not match proof service ID %q", - claimSessionHeader.GetService().GetId(), - proofSessionHeader.GetService().GetId(), - ) - } - - return &foundClaim, nil -} - -// compareSessionHeaders compares a session header against an expected session header. -// This is necessary to validate the proof's session header against both the relay -// request and response's session headers. -func compareSessionHeaders(expectedSessionHeader, sessionHeader *sessiontypes.SessionHeader) error { - // Compare the Application address. - if sessionHeader.GetApplicationAddress() != expectedSessionHeader.GetApplicationAddress() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers application addresses mismatch; expect: %q, got: %q", - expectedSessionHeader.GetApplicationAddress(), - sessionHeader.GetApplicationAddress(), - ) - } - - // Compare the Service IDs. - if sessionHeader.GetService().GetId() != expectedSessionHeader.GetService().GetId() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers service IDs mismatch; expected: %q, got: %q", - expectedSessionHeader.GetService().GetId(), - sessionHeader.GetService().GetId(), - ) - } - - // Compare the Service names. - if sessionHeader.GetService().GetName() != expectedSessionHeader.GetService().GetName() { - return types.ErrProofInvalidRelay.Wrapf( - "sessionHeaders service names mismatch expect: %q, got: %q", - expectedSessionHeader.GetService().GetName(), - sessionHeader.GetService().GetName(), - ) - } - - // Compare the Session start block heights. - if sessionHeader.GetSessionStartBlockHeight() != expectedSessionHeader.GetSessionStartBlockHeight() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers session start heights mismatch; expected: %d, got: %d", - expectedSessionHeader.GetSessionStartBlockHeight(), - sessionHeader.GetSessionStartBlockHeight(), - ) - } - - // Compare the Session end block heights. - if sessionHeader.GetSessionEndBlockHeight() != expectedSessionHeader.GetSessionEndBlockHeight() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers session end heights mismatch; expected: %d, got: %d", - expectedSessionHeader.GetSessionEndBlockHeight(), - sessionHeader.GetSessionEndBlockHeight(), - ) - } - - // Compare the Session IDs. - if sessionHeader.GetSessionId() != expectedSessionHeader.GetSessionId() { - return types.ErrProofInvalidRelay.Wrapf( - "session headers session IDs mismatch; expected: %q, got: %q", - expectedSessionHeader.GetSessionId(), - sessionHeader.GetSessionId(), - ) - } - - return nil -} - -// verifyClosestProof verifies the the correctness of the ClosestMerkleProof -// against the root hash committed to when creating the claim. -func verifyClosestProof( - proof *smt.SparseMerkleClosestProof, - claimRootHash []byte, -) error { - valid, err := smt.VerifyClosestProof(proof, claimRootHash, &protocol.SmtSpec) - if err != nil { - return err - } - - if !valid { - return types.ErrProofInvalidProof.Wrap("invalid closest merkle proof") - } - - return nil -} - -// validateRelayDifficulty ensures that the relay's mining difficulty meets the -// required minimum threshold. -// TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared -// function that can be used by both the proof and the miner packages. -func validateRelayDifficulty(relayBz, targetHash []byte, serviceId string) error { - relayHashArr := protocol.GetRelayHashFromBytes(relayBz) - relayHash := relayHashArr[:] - - if len(targetHash) != protocol.RelayHasherSize { - return types.ErrProofInvalidRelay.Wrapf( - "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", - targetHash, - protocol.RelayHasherSize, - len(targetHash), - ) - } - - if !protocol.IsRelayVolumeApplicable(relayHash, targetHash) { - var targetHashArr [protocol.RelayHasherSize]byte - copy(targetHashArr[:], targetHash) - - relayDifficulty := protocol.GetDifficultyFromHash(relayHashArr) - targetDifficulty := protocol.GetDifficultyFromHash(targetHashArr) - - return types.ErrProofInvalidRelay.Wrapf( - "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", - relayDifficulty, - targetDifficulty, - serviceId, - ) - } - - return nil -} - -// validateClosestPath ensures that the proof's path matches the expected path. -// Since the proof path needs to be pseudo-randomly selected AFTER the session -// ends, the seed for this is the block hash at the height when the proof window -// opens. -func (k msgServer) validateClosestPath( - ctx context.Context, - proof *smt.SparseMerkleClosestProof, - sessionHeader *sessiontypes.SessionHeader, - supplierAddr string, -) error { - // The RelayMiner has to wait until the submit claim and proof windows is are open - // in order to to create the claim and submit claims and proofs, respectively. - // These windows are calculated as specified in the docs; - // see: https://dev.poktroll.com/protocol/primitives/claim_and_proof_lifecycle. - // - // For reference, see relayerSessionsManager#waitForEarliest{CreateClaim,SubmitProof}Height(). - // - // The RelayMiner has to wait this long to ensure that late relays (i.e. - // submitted during SessionNumber=(N+1) but created during SessionNumber=N) are - // still included as part of SessionNumber=N. - // - // Since smt.ProveClosest is defined in terms of proof window open height, - // this block's hash needs to be used for validation too. - earliestSupplierProofCommitHeight, err := k.sharedQuerier.GetEarliestSupplierProofCommitHeight( - ctx, - sessionHeader.GetSessionEndBlockHeight(), - supplierAddr, - ) - if err != nil { - return err - } - - // earliestSupplierProofCommitHeight - 1 is the block that will have its hash used as the - // source of entropy for all the session trees in that batch, waiting for it to - // be received before proceeding. - proofPathSeedBlockHash := k.sessionKeeper.GetBlockHash(ctx, earliestSupplierProofCommitHeight-1) - - // TODO_BETA: Investigate "proof for the path provided does not match one expected by the on-chain protocol" - // error that may occur due to block height differing from the off-chain part. - k.logger.Info("E2E_DEBUG: height for block hash when verifying the proof", earliestSupplierProofCommitHeight, sessionHeader.GetSessionId()) - - expectedProofPath := protocol.GetPathForProof(proofPathSeedBlockHash, sessionHeader.GetSessionId()) - if !bytes.Equal(proof.Path, expectedProofPath) { - return types.ErrProofInvalidProof.Wrapf( - "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", - proof.Path, - expectedProofPath, - ) - } - - return nil -} diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index 99d890294..a9a8064f7 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -2,33 +2,25 @@ package keeper_test import ( "context" - "encoding/hex" - "os" "testing" "cosmossdk.io/depinject" - ring_secp256k1 "github.com/athanorlabs/go-dleq/secp256k1" "github.com/cosmos/cosmos-sdk/crypto/keyring" cosmostypes "github.com/cosmos/cosmos-sdk/types" - "github.com/pokt-network/ring-go" - "github.com/pokt-network/smt" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/pokt-network/poktroll/pkg/crypto" "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/crypto/rings" "github.com/pokt-network/poktroll/pkg/polylog/polyzero" "github.com/pokt-network/poktroll/pkg/relayer" - "github.com/pokt-network/poktroll/pkg/relayer/session" testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" "github.com/pokt-network/poktroll/testutil/testkeyring" - "github.com/pokt-network/poktroll/testutil/testrelayer" + "github.com/pokt-network/poktroll/testutil/testtree" "github.com/pokt-network/poktroll/x/proof/keeper" prooftypes "github.com/pokt-network/poktroll/x/proof/types" - servicetypes "github.com/pokt-network/poktroll/x/service/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" "github.com/pokt-network/poktroll/x/shared" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" @@ -56,7 +48,6 @@ var ( func init() { // The CometBFT header hash is 32 bytes: https://docs.cometbft.com/main/spec/core/data_structures blockHeaderHash = make([]byte, 32) - expectedMerkleProofPath = protocol.GetPathForProof(blockHeaderHash, "TODO_BLOCKER_session_id_currently_unused") } func TestMsgServer_SubmitProof_Success(t *testing.T) { @@ -99,7 +90,7 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) { sharedParams := keepers.SharedKeeper.GetParams(ctx) sdkCtx := cosmostypes.UnwrapSDKContext(ctx) - // Set proof keeper params to disable relaymining and always require a proof. + // Set proof keeper params to disable relay mining and always require a proof. err := keepers.Keeper.SetParams(ctx, testProofParams) require.NoError(t, err) @@ -151,7 +142,7 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) { // Submit the corresponding proof. expectedNumRelays := uint(5) - sessionTree := newFilledSessionTree( + sessionTree := testtree.NewFilledSessionTree( ctx, t, expectedNumRelays, supplierUid, supplierAddr, @@ -298,7 +289,7 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) { // Submit the corresponding proof. numRelays := uint(5) - sessionTree := newFilledSessionTree( + sessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -395,7 +386,7 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) { // Assert that only the create claim event was emitted. events := sdkCtx.EventManager().Events() require.Equal(t, 1, len(events)) - require.Equal(t, events[0].Type, "poktroll.proof.EventClaimCreated") + require.Equal(t, "poktroll.proof.EventClaimCreated", events[0].Type) }) } } @@ -418,9 +409,6 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // Construct a keyring to hold the keypairs for the accounts used in the test. keyRing := keyring.NewInMemory(keepers.Codec) - // The base session start height used for testing - sessionStartHeight := int64(1) - // Create a pre-generated account iterator to create accounts for the test. preGeneratedAccts := testkeyring.PreGeneratedAccounts() @@ -468,17 +456,10 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // to be claimed and for which a valid proof would be accepted. validSessionHeader := keepers.GetSessionHeader(ctx, t, appAddr, service, 1) - // Get the session for the application/supplier pair which is - // *not* expected to be claimed. - unclaimedSessionHeader := keepers.GetSessionHeader(ctx, t, wrongAppAddr, wrongService, 1) - // Construct a session header with session ID that doesn't match the expected session ID. wrongSessionIdHeader := *validSessionHeader wrongSessionIdHeader.SessionId = "wrong session ID" - // TODO_TECHDEBT: add a test case such that we can distinguish between early - // & late session end block heights. - // Construct a proof message server from the proof keeper. srv := keeper.NewMsgServerImpl(*keepers.Keeper) @@ -494,7 +475,7 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { // Construct a valid session tree with 5 relays. numRelays := uint(5) - validSessionTree := newFilledSessionTree( + validSessionTree := testtree.NewFilledSessionTree( ctx, t, numRelays, supplierUid, supplierAddr, @@ -528,49 +509,6 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { keepers, ) - // Compute the difficulty in bits of the closest relay from the valid session tree. - validClosestRelayDifficultyBits := getClosestRelayDifficulty(t, validSessionTree, expectedMerkleProofPath) - - // Copy `emptyBlockHash` to `wrongClosestProofPath` to with a missing byte - // so the closest proof is invalid (i.e. unmarshalable). - invalidClosestProofBytes := make([]byte, len(expectedMerkleProofPath)-1) - - // Store the expected error returned during deserialization of the invalid - // closest Merkle proof bytes. - sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} - expectedInvalidProofUnmarshalErr := sparseMerkleClosestProof.Unmarshal(invalidClosestProofBytes) - - // Construct a relay to be mangled such that it fails to deserialize in order - // to set the error expectation for the relevant test case. - mangledRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) - - // Ensure valid relay request and response signatures. - testrelayer.SignRelayRequest(ctx, t, mangledRelay, appAddr, keyRing, ringClient) - testrelayer.SignRelayResponse(ctx, t, mangledRelay, supplierUid, supplierAddr, keyRing) - - // Serialize the relay so that it can be mangled. - mangledRelayBz, err := mangledRelay.Marshal() - require.NoError(t, err) - - // Mangle the serialized relay to cause an error during deserialization. - // Mangling could involve any byte randomly being swapped to any value - // so unmarshaling fails, but we are setting the first byte to 0 for simplicity. - mangledRelayBz[0] = 0x00 - - // Declare an invalid signature byte slice to construct expected relay request - // and response errors and use in corresponding test cases. - invalidSignatureBz := []byte("invalid signature bytes") - - // Prepare an invalid proof of the correct size. - wrongClosestProofPath := make([]byte, len(expectedMerkleProofPath)) - copy(wrongClosestProofPath, expectedMerkleProofPath) - copy(wrongClosestProofPath, "wrong closest proof path") - - lowTargetHash, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") - var lowTargetHashArr [protocol.RelayHasherSize]byte - copy(lowTargetHashArr[:], lowTargetHash) - highExpectedTargetDifficulty := protocol.GetDifficultyFromHash(lowTargetHashArr) - tests := []struct { desc string newProofMsg func(t *testing.T) *prooftypes.MsgSubmitProof @@ -662,531 +600,6 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { ).Error(), ), }, - { - desc: "merkle proof must be deserializable", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct new proof message. - proof := newTestProofMsg(t, - supplierAddr, - validSessionHeader, - validSessionTree, - expectedMerkleProofPath, - ) - - // Set merkle proof to an incorrect byte slice. - proof.Proof = invalidClosestProofBytes - - return proof - }, - expectedErr: status.Error( - codes.InvalidArgument, - prooftypes.ErrProofInvalidProof.Wrapf( - "failed to unmarshal closest merkle proof: %s", - expectedInvalidProofUnmarshalErr, - ).Error(), - ), - }, - { - desc: "relay must be deserializable", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a session tree to which we'll add 1 unserializable relay. - mangledRelaySessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) - - // Add the mangled relay to the session tree. - err = mangledRelaySessionTree.Update([]byte{1}, mangledRelayBz, 1) - require.NoError(t, err) - - // Get the Merkle root for the session tree in order to construct a claim. - mangledRelayMerkleRootBz, flushErr := mangledRelaySessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a session tree - // with an unserializable relay. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - mangledRelayMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an unserializable relay. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - mangledRelaySessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.InvalidArgument, - prooftypes.ErrProofInvalidRelay.Wrapf( - "failed to unmarshal relay: %s", - keepers.Codec.Unmarshal(mangledRelayBz, &servicetypes.Relay{}), - ).Error(), - ), - }, - { - // TODO_TEST(community): expand: test case to cover each session header field. - desc: "relay request session header must match proof session header", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the proof session ID. - numRelays := uint(1) - wrongRequestSessionIdSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, &wrongSessionIdHeader, validSessionHeader, - keyRing, - ringClient, - ) - - // Get the Merkle root for the session tree in order to construct a claim. - wrongRequestSessionIdMerkleRootBz, flushErr := wrongRequestSessionIdSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a relay - // request containing the wrong session ID. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongRequestSessionIdMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message using the valid session header, - // *not* the one used in the session tree's relay request. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - wrongRequestSessionIdSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelay.Wrapf( - "session headers session IDs mismatch; expected: %q, got: %q", - validSessionHeader.GetSessionId(), - wrongSessionIdHeader.GetSessionId(), - ).Error(), - ), - }, - { - // TODO_TEST: expand: test case to cover each session header field. - desc: "relay response session header must match proof session header", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the expected session ID. - numRelays := uint(1) - wrongResponseSessionIdSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, &wrongSessionIdHeader, - keyRing, - ringClient, - ) - - // Get the Merkle root for the session tree in order to construct a claim. - wrongResponseSessionIdMerkleRootBz, flushErr := wrongResponseSessionIdSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a relay - // response containing the wrong session ID. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongResponseSessionIdMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message using the valid session header, - // *not* the one used in the session tree's relay response. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - wrongResponseSessionIdSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelay.Wrapf( - "session headers session IDs mismatch; expected: %q, got: %q", - validSessionHeader.GetSessionId(), - wrongSessionIdHeader.GetSessionId(), - ).Error(), - ), - }, - { - desc: "relay request signature must be valid", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Set the relay request signature to an invalid byte slice. - invalidRequestSignatureRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) - invalidRequestSignatureRelay.Req.Meta.Signature = invalidSignatureBz - - // Ensure a valid relay response signature. - testrelayer.SignRelayResponse(ctx, t, invalidRequestSignatureRelay, supplierUid, supplierAddr, keyRing) - - invalidRequestSignatureRelayBz, marshalErr := invalidRequestSignatureRelay.Marshal() - require.NoError(t, marshalErr) - - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the expected session ID. - invalidRequestSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) - - // Add the relay to the session tree. - err = invalidRequestSignatureSessionTree.Update([]byte{1}, invalidRequestSignatureRelayBz, 1) - require.NoError(t, err) - - // Get the Merkle root for the session tree in order to construct a claim. - invalidRequestSignatureMerkleRootBz, flushErr := invalidRequestSignatureSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a session tree - // with an invalid relay request signature. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - invalidRequestSignatureMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an invalid relay request signature. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - invalidRequestSignatureSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelayRequest.Wrapf( - "error deserializing ring signature: %s", - new(ring.RingSig).Deserialize(ring_secp256k1.NewCurve(), invalidSignatureBz), - ).Error(), - ), - }, - { - desc: "relay request signature is valid but signed by an incorrect application", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") - return nil - }, - }, - { - desc: "relay response signature must be valid", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Set the relay response signature to an invalid byte slice. - relay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) - relay.Res.Meta.SupplierSignature = invalidSignatureBz - - // Ensure a valid relay request signature - testrelayer.SignRelayRequest(ctx, t, relay, appAddr, keyRing, ringClient) - - relayBz, marshalErr := relay.Marshal() - require.NoError(t, marshalErr) - - // Construct a session tree with 1 relay with a session header containing - // a session ID that doesn't match the expected session ID. - invalidResponseSignatureSessionTree := newEmptySessionTree(t, validSessionHeader, supplierAddr) - - // Add the relay to the session tree. - err = invalidResponseSignatureSessionTree.Update([]byte{1}, relayBz, 1) - require.NoError(t, err) - - // Get the Merkle root for the session tree in order to construct a claim. - invalidResponseSignatureMerkleRootBz, flushErr := invalidResponseSignatureSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := cosmostypes.UnwrapSDKContext(ctx) - claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) - - // Create a claim with a merkle root derived from a session tree - // with an invalid relay response signature. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - invalidResponseSignatureMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an invalid relay response signature. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - invalidResponseSignatureSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - servicetypes.ErrServiceInvalidRelayResponse.Wrap("invalid signature").Error(), - ), - }, - { - desc: "relay response signature is valid but signed by an incorrect supplier", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") - return nil - }, - }, - { - desc: "the merkle proof path provided does not match the one expected/enforced by the protocol", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a new valid session tree for this test case because once the - // closest proof has already been generated, the path cannot be changed. - numRelays := uint(5) - wrongPathSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, validSessionHeader, - keyRing, - ringClient, - ) - - wrongPathMerkleRootBz, flushErr := wrongPathSessionTree.Flush() - require.NoError(t, flushErr) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) - - // Create a valid claim with the expected merkle root. - claimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongPathMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, claimMsg) - require.NoError(t, err) - - // Construct new proof message derived from a session tree - // with an invalid relay response signature. - return newTestProofMsg(t, supplierAddr, validSessionHeader, wrongPathSessionTree, wrongClosestProofPath) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidProof.Wrapf( - "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", - wrongClosestProofPath, - protocol.GetPathForProof(sdkCtx.HeaderHash(), validSessionHeader.GetSessionId()), - ).Error(), - ), - }, - { - desc: "relay difficulty must be greater than or equal to minimum (zero difficulty)", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Set the minimum relay difficulty to a non-zero value such that the relays - // constructed by the test helpers have a negligible chance of being valid. - err = keepers.Keeper.SetParams(ctx, prooftypes.Params{ - RelayDifficultyTargetHash: lowTargetHash, - }) - require.NoError(t, err) - - // Reset the minimum relay difficulty to zero after this test case. - t.Cleanup(func() { - err = keepers.Keeper.SetParams(ctx, prooftypes.DefaultParams()) - require.NoError(t, err) - }) - - // Construct a proof message with a session tree containing - // a relay of insufficient difficulty. - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - validSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidRelay.Wrapf( - "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", - validClosestRelayDifficultyBits, - highExpectedTargetDifficulty, - validSessionHeader.Service.Id, - ).Error(), - ), - }, - { - desc: "relay difficulty must be greater than or equal to minimum (non-zero difficulty)", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") - return nil - }, - }, - { // group: claim must exist for proof message - desc: "claim must exist for proof message", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - // Construct a new session tree corresponding to the unclaimed session. - numRelays := uint(5) - unclaimedSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - "wrong_supplier", wrongSupplierAddr, - unclaimedSessionHeader, unclaimedSessionHeader, unclaimedSessionHeader, - keyRing, - ringClient, - ) - - // Discard session tree Merkle root because no claim is being created. - // Session tree must be closed (flushed) to compute closest Merkle Proof. - _, err = unclaimedSessionTree.Flush() - require.NoError(t, err) - - // Compute expected proof path for the unclaimed session. - expectedMerkleProofPath := protocol.GetPathForProof( - blockHeaderHash, - unclaimedSessionHeader.GetSessionId(), - ) - - // Construct new proof message using the supplier & session header - // from the session which is *not* expected to be claimed. - return newTestProofMsg(t, - wrongSupplierAddr, - unclaimedSessionHeader, - unclaimedSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofClaimNotFound.Wrapf( - "no claim found for session ID %q and supplier %q", - unclaimedSessionHeader.GetSessionId(), - wrongSupplierAddr, - ).Error(), - ), - }, - { - desc: "Valid proof cannot validate claim with an incorrect root", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - numRelays := uint(10) - wrongMerkleRootSessionTree := newFilledSessionTree( - ctx, t, - numRelays, - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, validSessionHeader, - keyRing, - ringClient, - ) - - wrongMerkleRootBz, err := wrongMerkleRootSessionTree.Flush() - require.NoError(t, err) - - // Re-set the block height to the earliest claim commit height to create a new claim. - claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) - - // Create a claim with the incorrect Merkle root. - wrongMerkleRootClaimMsg := newTestClaimMsg(t, - sessionStartHeight, - validSessionHeader.GetSessionId(), - supplierAddr, - appAddr, - service, - wrongMerkleRootBz, - ) - _, err = srv.CreateClaim(claimCtx, wrongMerkleRootClaimMsg) - require.NoError(t, err) - - // Construct a valid session tree with 5 relays. - validSessionTree := newFilledSessionTree( - ctx, t, - uint(5), - supplierUid, supplierAddr, - validSessionHeader, validSessionHeader, validSessionHeader, - keyRing, - ringClient, - ) - - _, err = validSessionTree.Flush() - require.NoError(t, err) - - // Compute expected proof path for the session. - expectedMerkleProofPath := protocol.GetPathForProof( - blockHeaderHash, - validSessionHeader.GetSessionId(), - ) - - return newTestProofMsg(t, - supplierAddr, - validSessionHeader, - validSessionTree, - expectedMerkleProofPath, - ) - }, - expectedErr: status.Error( - codes.FailedPrecondition, - prooftypes.ErrProofInvalidProof.Wrap("invalid closest merkle proof").Error(), - ), - }, - { - desc: "claim and proof application addresses must match", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") - return nil - }, - }, - { - desc: "claim and proof service IDs must match", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") - return nil - }, - }, - { - desc: "claim and proof supplier addresses must match", - newProofMsg: func(t *testing.T) *prooftypes.MsgSubmitProof { - t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") - return nil - }, - }, } // Submit the corresponding proof. @@ -1230,100 +643,6 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { } } -// newFilledSessionTree creates a new session tree with numRelays of relays -// filled out using the request and response headers provided where every -// relay is signed by the supplier and application respectively. -func newFilledSessionTree( - ctx context.Context, t *testing.T, - numRelays uint, - supplierKeyUid, supplierAddr string, - sessionTreeHeader, reqHeader, resHeader *sessiontypes.SessionHeader, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) relayer.SessionTree { - t.Helper() - - // Initialize an empty session tree with the given session header. - sessionTree := newEmptySessionTree(t, sessionTreeHeader, supplierAddr) - - // Add numRelays of relays to the session tree. - fillSessionTree( - ctx, t, - sessionTree, numRelays, - supplierKeyUid, supplierAddr, - reqHeader, resHeader, - keyRing, - ringClient, - ) - - return sessionTree -} - -// newEmptySessionTree creates a new empty session tree with for given session. -func newEmptySessionTree( - t *testing.T, - sessionTreeHeader *sessiontypes.SessionHeader, - supplierAddr string, -) relayer.SessionTree { - t.Helper() - - // Create a temporary session tree store directory for persistence. - testSessionTreeStoreDir, err := os.MkdirTemp("", "session_tree_store_dir") - require.NoError(t, err) - - // Delete the temporary session tree store directory after the test completes. - t.Cleanup(func() { - _ = os.RemoveAll(testSessionTreeStoreDir) - }) - - accAddress := cosmostypes.MustAccAddressFromBech32(supplierAddr) - - // Construct a session tree to add relays to and generate a proof from. - sessionTree, err := session.NewSessionTree( - sessionTreeHeader, - &accAddress, - testSessionTreeStoreDir, - ) - require.NoError(t, err) - - return sessionTree -} - -// fillSessionTree fills the session tree with valid signed relays. -// A total of numRelays relays are added to the session tree with -// increasing weights (relay 1 has weight 1, relay 2 has weight 2, etc.). -func fillSessionTree( - ctx context.Context, t *testing.T, - sessionTree relayer.SessionTree, - numRelays uint, - supplierKeyUid, supplierAddr string, - reqHeader, resHeader *sessiontypes.SessionHeader, - keyRing keyring.Keyring, - ringClient crypto.RingClient, -) { - t.Helper() - - for i := 0; i < int(numRelays); i++ { - relay := testrelayer.NewSignedEmptyRelay( - ctx, t, - supplierKeyUid, supplierAddr, - reqHeader, resHeader, - keyRing, - ringClient, - ) - relayBz, err := relay.Marshal() - require.NoError(t, err) - - relayKey, err := relay.GetHash() - require.NoError(t, err) - - relayWeight := uint64(i) - - err = sessionTree.Update(relayKey[:], relayBz, relayWeight) - require.NoError(t, err) - } -} - // newTestProofMsg creates a new submit proof message that can be submitted // to be validated and stored on-chain. func newTestProofMsg( @@ -1406,28 +725,3 @@ func createClaimAndStoreBlockHash( return claimRes.GetClaim() } - -// getClosestRelayDifficulty returns the mining difficulty number which corresponds -// to the relayHash stored in the sessionTree that is closest to the merkle proof -// path provided. -func getClosestRelayDifficulty( - t *testing.T, - sessionTree relayer.SessionTree, - closestMerkleProofPath []byte, -) int64 { - // Retrieve a merkle proof that is closest to the path provided - closestMerkleProof, err := sessionTree.ProveClosest(closestMerkleProofPath) - require.NoError(t, err) - - // Extract the Relay (containing the RelayResponse & RelayRequest) from the merkle proof. - relay := new(servicetypes.Relay) - relayBz := closestMerkleProof.GetValueHash(&protocol.SmtSpec) - err = relay.Unmarshal(relayBz) - require.NoError(t, err) - - // Retrieve the hash of the relay. - relayHash, err := relay.GetHash() - require.NoError(t, err) - - return protocol.GetDifficultyFromHash(relayHash) -} diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go new file mode 100644 index 000000000..bd4afac74 --- /dev/null +++ b/x/proof/keeper/proof_validation.go @@ -0,0 +1,434 @@ +package keeper + +/* + TODO_MAINNET: Document these steps in the docs and link here. + + ## Actions (error if anything fails) + 1. Retrieve a fully hydrated `session` from on-chain store using `msg` metadata + 2. Retrieve a fully hydrated `claim` from on-chain store using `msg` metadata + 3. Retrieve `relay.Req` and `relay.Res` from deserializing `proof.ClosestValueHash` + + ## Basic Validations (metadata only) + 1. proof.sessionId == claim.sessionId + 2. msg.supplier in session.suppliers + 3. relay.Req.signer == session.appAddr + 4. relay.Res.signer == msg.supplier + + ## Msg distribution validation (governance based params) + 1. Validate Proof submission is not too early; governance-based param + pseudo-random variation + 2. Validate Proof submission is not too late; governance-based param + pseudo-random variation + + ## Relay Signature validation + 1. verify(relay.Req.Signature, appRing) + 2. verify(relay.Res.Signature, supplier.pubKey) + + ## Relay Mining validation + 1. verify(proof.path) is the expected path; pseudo-random variation using on-chain data + 2. verify(proof.ValueHash, expectedDifficulty); governance based + 3. verify(claim.Root, proof.ClosestProof); verify the closest proof is correct +*/ + +import ( + "bytes" + "context" + + "github.com/pokt-network/smt" + + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/x/proof/types" + servicetypes "github.com/pokt-network/poktroll/x/service/types" + sessiontypes "github.com/pokt-network/poktroll/x/session/types" +) + +// EnsureValidProof validates the proof submitted by the supplier is correct with +// respect to an on-chain claim. +// +// This function should be called during session settlement (i.e. EndBlocker) +// rather than during proof submission (i.e. SubmitProof) because: +// 1. RPC requests should be quick, lightweight and only do basic validation +// 2. Validators are the ones responsible for the heavy processing & validation during state transitions +// 3. This creates an opportunity to slash suppliers who submit false proofs, whereas +// they can keep retrying if it takes place in the SubmitProof handler. +// +// Note that some of the validation here is redundant with the validation done in +// SubmitProof (in the handler). The reason for this is because were are trying +// to find a balance between preventing sybil or DoS attacks on full nodes +// during proof submission, but being completely exhaustive in all the checks done here. +func (k Keeper) EnsureValidProof( + ctx context.Context, + proof *types.Proof, +) error { + logger := k.Logger().With("method", "ValidateProof") + + // Retrieve the supplier's public key. + supplierAddr := proof.SupplierAddress + supplierPubKey, err := k.accountQuerier.GetPubKeyFromAddress(ctx, supplierAddr) + if err != nil { + return err + } + + // Validate the session header. + var onChainSession *sessiontypes.Session + onChainSession, err = k.queryAndValidateSessionHeader(ctx, proof.SessionHeader, supplierAddr) + if err != nil { + return err + } + logger.Info("queried and validated the session header") + + // Re-hydrate message session header with the on-chain session header. + // This corrects for discrepancies between unvalidated fields in the session + // header which can be derived from known values (e.g. session end height). + sessionHeader := onChainSession.GetHeader() + + // Validate proof message commit height is within the respective session's + // proof submission window using the on-chain session header. + if err = k.validateProofWindow(ctx, sessionHeader, supplierAddr); err != nil { + return err + } + + if proof.ClosestMerkleProof == nil || len(proof.ClosestMerkleProof) == 0 { + return types.ErrProofInvalidProof.Wrap("proof cannot be empty") + } + + // Unmarshal the closest merkle proof from the message. + sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} + if err = sparseMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof); err != nil { + return types.ErrProofInvalidProof.Wrapf( + "failed to unmarshal closest merkle proof: %s", + err, + ) + } + + // TODO_MAINNET(#427): Utilize smt.VerifyCompactClosestProof here to + // reduce on-chain storage requirements for proofs. + // Get the relay request and response from the proof.GetClosestMerkleProof. + relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) + relay := &servicetypes.Relay{} + if err = k.cdc.Unmarshal(relayBz, relay); err != nil { + return types.ErrProofInvalidRelay.Wrapf( + "failed to unmarshal relay: %s", + err, + ) + } + + // Basic validation of the relay request. + relayReq := relay.GetReq() + if err = relayReq.ValidateBasic(); err != nil { + return err + } + logger.Debug("successfully validated relay request") + + // Make sure that the supplier address in the proof matches the one in the relay request. + if supplierAddr != relayReq.Meta.SupplierAddress { + return types.ErrProofSupplierMismatch.Wrapf("supplier type mismatch") + } + logger.Debug("the proof supplier address matches the relay request supplier address") + + // Basic validation of the relay response. + relayRes := relay.GetRes() + if err = relayRes.ValidateBasic(); err != nil { + return err + } + logger.Debug("successfully validated relay response") + + // Verify that the relay request session header matches the proof session header. + if err = compareSessionHeaders(sessionHeader, relayReq.Meta.GetSessionHeader()); err != nil { + return err + } + logger.Debug("successfully compared relay request session header") + + // Verify that the relay response session header matches the proof session header. + if err = compareSessionHeaders(sessionHeader, relayRes.Meta.GetSessionHeader()); err != nil { + return err + } + logger.Debug("successfully compared relay response session header") + + // Verify the relay request's signature. + if err = k.ringClient.VerifyRelayRequestSignature(ctx, relayReq); err != nil { + return err + } + logger.Debug("successfully verified relay request signature") + + // Verify the relay response's signature. + if err = relayRes.VerifySupplierSignature(supplierPubKey); err != nil { + return err + } + logger.Debug("successfully verified relay response signature") + + // Get the proof module's governance parameters. + // TODO_FOLLOWUP(@olshansk, #690): Get the difficulty associated with the service + params := k.GetParams(ctx) + relayDifficultyTargetHash := params.RelayDifficultyTargetHash + if len(relayDifficultyTargetHash) == 0 { + relayDifficultyTargetHash = types.DefaultRelayDifficultyTargetHash + } + + // Verify the relay difficulty is above the minimum required to earn rewards. + if err = validateRelayDifficulty( + relayBz, + relayDifficultyTargetHash, + sessionHeader.Service.Id, + ); err != nil { + return err + } + logger.Debug("successfully validated relay mining difficulty") + + // Validate that path the proof is submitted for matches the expected one + // based on the pseudo-random on-chain data associated with the header. + if err = k.validateClosestPath( + ctx, + sparseMerkleClosestProof, + sessionHeader, + supplierAddr, + ); err != nil { + return err + } + logger.Debug("successfully validated proof path") + + // Retrieve the corresponding claim for the proof submitted so it can be + // used in the proof validation below. + claim, err := k.queryAndValidateClaimForProof(ctx, sessionHeader, supplierAddr) + if err != nil { + return err + } + + logger.Debug("successfully retrieved and validated claim") + + // Verify the proof's closest merkle proof. + if err = verifyClosestProof(sparseMerkleClosestProof, claim.GetRootHash()); err != nil { + return err + } + logger.Debug("successfully verified closest merkle proof") + + return nil +} + +// validateClosestPath ensures that the proof's path matches the expected path. +// Since the proof path needs to be pseudo-randomly selected AFTER the session +// ends, the seed for this is the block hash at the height when the proof window +// opens. +func (k Keeper) validateClosestPath( + ctx context.Context, + proof *smt.SparseMerkleClosestProof, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, +) error { + // The RelayMiner has to wait until the submit claim and proof windows is are open + // in order to to create the claim and submit claims and proofs, respectively. + // These windows are calculated as specified in the docs; + // see: https://dev.poktroll.com/protocol/primitives/claim_and_proof_lifecycle. + // + // For reference, see relayerSessionsManager#waitForEarliest{CreateClaim,SubmitProof}Height(). + // + // The RelayMiner has to wait this long to ensure that late relays (i.e. + // submitted during SessionNumber=(N+1) but created during SessionNumber=N) are + // still included as part of SessionNumber=N. + // + // Since smt.ProveClosest is defined in terms of proof window open height, + // this block's hash needs to be used for validation too. + earliestSupplierProofCommitHeight, err := k.sharedQuerier.GetEarliestSupplierProofCommitHeight( + ctx, + sessionHeader.GetSessionEndBlockHeight(), + supplierAddr, + ) + if err != nil { + return err + } + + // earliestSupplierProofCommitHeight - 1 is the block that will have its hash used as the + // source of entropy for all the session trees in that batch, waiting for it to + // be received before proceeding. + proofPathSeedBlockHash := k.sessionKeeper.GetBlockHash(ctx, earliestSupplierProofCommitHeight-1) + + // TODO_BETA: Investigate "proof for the path provided does not match one expected by the on-chain protocol" + // error that may occur due to block height differing from the off-chain part. + k.logger.Info("E2E_DEBUG: height for block hash when verifying the proof", earliestSupplierProofCommitHeight, sessionHeader.GetSessionId()) + + expectedProofPath := protocol.GetPathForProof(proofPathSeedBlockHash, sessionHeader.GetSessionId()) + if !bytes.Equal(proof.Path, expectedProofPath) { + return types.ErrProofInvalidProof.Wrapf( + "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", + proof.Path, + expectedProofPath, + ) + } + + return nil +} + +// queryAndValidateClaimForProof ensures that a claim corresponding to the given +// proof's session exists & has a matching supplier address and session header, +// it then returns the corresponding claim if the validation is successful. +func (k Keeper) queryAndValidateClaimForProof( + ctx context.Context, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, +) (*types.Claim, error) { + sessionId := sessionHeader.SessionId + // NB: no need to assert the testSessionId or supplier address as it is retrieved + // by respective values of the given proof. I.e., if the claim exists, then these + // values are guaranteed to match. + foundClaim, found := k.GetClaim(ctx, sessionId, supplierAddr) + if !found { + return nil, types.ErrProofClaimNotFound.Wrapf( + "no claim found for session ID %q and supplier %q", + sessionId, + supplierAddr, + ) + } + + claimSessionHeader := foundClaim.GetSessionHeader() + proofSessionHeader := sessionHeader + + // Ensure session start heights match. + if claimSessionHeader.GetSessionStartBlockHeight() != proofSessionHeader.GetSessionStartBlockHeight() { + return nil, types.ErrProofInvalidSessionStartHeight.Wrapf( + "claim session start height %d does not match proof session start height %d", + claimSessionHeader.GetSessionStartBlockHeight(), + proofSessionHeader.GetSessionStartBlockHeight(), + ) + } + + // Ensure session end heights match. + if claimSessionHeader.GetSessionEndBlockHeight() != proofSessionHeader.GetSessionEndBlockHeight() { + return nil, types.ErrProofInvalidSessionEndHeight.Wrapf( + "claim session end height %d does not match proof session end height %d", + claimSessionHeader.GetSessionEndBlockHeight(), + proofSessionHeader.GetSessionEndBlockHeight(), + ) + } + + // Ensure application addresses match. + if claimSessionHeader.GetApplicationAddress() != proofSessionHeader.GetApplicationAddress() { + return nil, types.ErrProofInvalidAddress.Wrapf( + "claim application address %q does not match proof application address %q", + claimSessionHeader.GetApplicationAddress(), + proofSessionHeader.GetApplicationAddress(), + ) + } + + // Ensure service IDs match. + if claimSessionHeader.GetService().GetId() != proofSessionHeader.GetService().GetId() { + return nil, types.ErrProofInvalidService.Wrapf( + "claim service ID %q does not match proof service ID %q", + claimSessionHeader.GetService().GetId(), + proofSessionHeader.GetService().GetId(), + ) + } + + return &foundClaim, nil +} + +// compareSessionHeaders compares a session header against an expected session header. +// This is necessary to validate the proof's session header against both the relay +// request and response's session headers. +func compareSessionHeaders(expectedSessionHeader, sessionHeader *sessiontypes.SessionHeader) error { + // Compare the Application address. + if sessionHeader.GetApplicationAddress() != expectedSessionHeader.GetApplicationAddress() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers application addresses mismatch; expect: %q, got: %q", + expectedSessionHeader.GetApplicationAddress(), + sessionHeader.GetApplicationAddress(), + ) + } + + // Compare the Service IDs. + if sessionHeader.GetService().GetId() != expectedSessionHeader.GetService().GetId() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers service IDs mismatch; expected: %q, got: %q", + expectedSessionHeader.GetService().GetId(), + sessionHeader.GetService().GetId(), + ) + } + + // Compare the Service names. + if sessionHeader.GetService().GetName() != expectedSessionHeader.GetService().GetName() { + return types.ErrProofInvalidRelay.Wrapf( + "sessionHeaders service names mismatch expect: %q, got: %q", + expectedSessionHeader.GetService().GetName(), + sessionHeader.GetService().GetName(), + ) + } + + // Compare the Session start block heights. + if sessionHeader.GetSessionStartBlockHeight() != expectedSessionHeader.GetSessionStartBlockHeight() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers session start heights mismatch; expected: %d, got: %d", + expectedSessionHeader.GetSessionStartBlockHeight(), + sessionHeader.GetSessionStartBlockHeight(), + ) + } + + // Compare the Session end block heights. + if sessionHeader.GetSessionEndBlockHeight() != expectedSessionHeader.GetSessionEndBlockHeight() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers session end heights mismatch; expected: %d, got: %d", + expectedSessionHeader.GetSessionEndBlockHeight(), + sessionHeader.GetSessionEndBlockHeight(), + ) + } + + // Compare the Session IDs. + if sessionHeader.GetSessionId() != expectedSessionHeader.GetSessionId() { + return types.ErrProofInvalidRelay.Wrapf( + "session headers session IDs mismatch; expected: %q, got: %q", + expectedSessionHeader.GetSessionId(), + sessionHeader.GetSessionId(), + ) + } + + return nil +} + +// verifyClosestProof verifies the the correctness of the ClosestMerkleProof +// against the root hash committed to when creating the claim. +func verifyClosestProof( + proof *smt.SparseMerkleClosestProof, + claimRootHash []byte, +) error { + valid, err := smt.VerifyClosestProof(proof, claimRootHash, &protocol.SmtSpec) + if err != nil { + return err + } + + if !valid { + return types.ErrProofInvalidProof.Wrap("invalid closest merkle proof") + } + + return nil +} + +// validateRelayDifficulty ensures that the relay's mining difficulty meets the +// required minimum threshold. +// TODO_TECHDEBT: Factor out the relay mining difficulty validation into a shared +// function that can be used by both the proof and the miner packages. +func validateRelayDifficulty(relayBz, targetHash []byte, serviceId string) error { + relayHashArr := protocol.GetRelayHashFromBytes(relayBz) + relayHash := relayHashArr[:] + + if len(targetHash) != protocol.RelayHasherSize { + return types.ErrProofInvalidRelay.Wrapf( + "invalid RelayDifficultyTargetHash: (%x); length wanted: %d; got: %d", + targetHash, + protocol.RelayHasherSize, + len(targetHash), + ) + } + + if !protocol.IsRelayVolumeApplicable(relayHash, targetHash) { + var targetHashArr [protocol.RelayHasherSize]byte + copy(targetHashArr[:], targetHash) + + relayDifficulty := protocol.GetDifficultyFromHash(relayHashArr) + targetDifficulty := protocol.GetDifficultyFromHash(targetHashArr) + + return types.ErrProofInvalidRelay.Wrapf( + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", + relayDifficulty, + targetDifficulty, + serviceId, + ) + } + + return nil +} diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go new file mode 100644 index 000000000..1a4707ec5 --- /dev/null +++ b/x/proof/keeper/proof_validation_test.go @@ -0,0 +1,788 @@ +package keeper_test + +import ( + "encoding/hex" + "testing" + + "cosmossdk.io/depinject" + ring_secp256k1 "github.com/athanorlabs/go-dleq/secp256k1" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + cosmostypes "github.com/cosmos/cosmos-sdk/types" + "github.com/pokt-network/ring-go" + "github.com/pokt-network/smt" + "github.com/stretchr/testify/require" + + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/pkg/crypto/rings" + "github.com/pokt-network/poktroll/pkg/polylog/polyzero" + "github.com/pokt-network/poktroll/pkg/relayer" + keepertest "github.com/pokt-network/poktroll/testutil/keeper" + "github.com/pokt-network/poktroll/testutil/testkeyring" + "github.com/pokt-network/poktroll/testutil/testrelayer" + "github.com/pokt-network/poktroll/testutil/testtree" + prooftypes "github.com/pokt-network/poktroll/x/proof/types" + servicetypes "github.com/pokt-network/poktroll/x/service/types" + "github.com/pokt-network/poktroll/x/shared" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" +) + +func TestEnsureValidProof_Error(t *testing.T) { + opts := []keepertest.ProofKeepersOpt{ + // Set block hash such that on-chain closest merkle proof validation + // uses the expected path. + keepertest.WithBlockHash(blockHeaderHash), + // Set block height to 1 so there is a valid session on-chain. + keepertest.WithBlockHeight(1), + } + keepers, ctx := keepertest.NewProofModuleKeepers(t, opts...) + + // Ensure the minimum relay difficulty bits is set to zero so that test cases + // don't need to mine for valid relays. + err := keepers.Keeper.SetParams(ctx, testProofParams) + require.NoError(t, err) + + // Construct a keyring to hold the keypairs for the accounts used in the test. + keyRing := keyring.NewInMemory(keepers.Codec) + + // Create a pre-generated account iterator to create accounts for the test. + preGeneratedAccts := testkeyring.PreGeneratedAccounts() + + // Create accounts in the account keeper with corresponding keys in the keyring + // for the applications and suppliers used in the tests. + supplierAddr := testkeyring.CreateOnChainAccount( + ctx, t, + supplierUid, + keyRing, + keepers, + preGeneratedAccts, + ).String() + wrongSupplierAddr := testkeyring.CreateOnChainAccount( + ctx, t, + "wrong_supplier", + keyRing, + keepers, + preGeneratedAccts, + ).String() + appAddr := testkeyring.CreateOnChainAccount( + ctx, t, + "app", + keyRing, + keepers, + preGeneratedAccts, + ).String() + wrongAppAddr := testkeyring.CreateOnChainAccount( + ctx, t, + "wrong_app", + keyRing, + keepers, + preGeneratedAccts, + ).String() + + service := &sharedtypes.Service{Id: testServiceId} + wrongService := &sharedtypes.Service{Id: "wrong_svc"} + + // Add a supplier and application pair that are expected to be in the session. + keepers.AddServiceActors(ctx, t, service, supplierAddr, appAddr) + + // Add a supplier and application pair that are *not* expected to be in the session. + keepers.AddServiceActors(ctx, t, wrongService, wrongSupplierAddr, wrongAppAddr) + + // Get the session for the application/supplier pair which is expected + // to be claimed and for which a valid proof would be accepted. + validSessionHeader := keepers.GetSessionHeader(ctx, t, appAddr, service, 1) + + // Get the session for the application/supplier pair which is + // *not* expected to be claimed. + unclaimedSessionHeader := keepers.GetSessionHeader(ctx, t, wrongAppAddr, wrongService, 1) + + // Construct a session header with session ID that doesn't match the expected session ID. + wrongSessionIdHeader := *validSessionHeader + wrongSessionIdHeader.SessionId = "wrong session ID" + + // TODO_TECHDEBT: add a test case such that we can distinguish between early + // & late session end block heights. + + // Construct a ringClient to get the application's ring & verify the relay + // request signature. + ringClient, err := rings.NewRingClient(depinject.Supply( + polyzero.NewLogger(), + prooftypes.NewAppKeeperQueryClient(keepers.ApplicationKeeper), + prooftypes.NewAccountKeeperQueryClient(keepers.AccountKeeper), + prooftypes.NewSharedKeeperQueryClient(keepers.SharedKeeper, keepers.SessionKeeper), + )) + require.NoError(t, err) + + // Construct a valid session tree with 5 relays. + numRelays := uint(5) + validSessionTree := testtree.NewFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + // Advance the block height to the earliest claim commit height. + sharedParams := keepers.SharedKeeper.GetParams(ctx) + claimMsgHeight := shared.GetEarliestSupplierClaimCommitHeight( + &sharedParams, + validSessionHeader.GetSessionEndBlockHeight(), + blockHeaderHash, + supplierAddr, + ) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + sdkCtx = sdkCtx.WithBlockHeight(claimMsgHeight) + ctx = sdkCtx + + merkleRootBz, err := validSessionTree.Flush() + require.NoError(t, err) + + claim := prooftypes.Claim{ + SessionHeader: validSessionHeader, + SupplierAddress: supplierAddr, + RootHash: merkleRootBz, + } + keepers.UpsertClaim(ctx, claim) + + // Compute the difficulty in bits of the closest relay from the valid session tree. + validClosestRelayDifficultyBits := getClosestRelayDifficulty(t, validSessionTree, expectedMerkleProofPath) + + // Copy `emptyBlockHash` to `wrongClosestProofPath` to with a missing byte + // so the closest proof is invalid (i.e. unmarshalable). + invalidClosestProofBytes := make([]byte, len(expectedMerkleProofPath)-1) + + // Store the expected error returned during deserialization of the invalid + // closest Merkle proof bytes. + sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} + expectedInvalidProofUnmarshalErr := sparseMerkleClosestProof.Unmarshal(invalidClosestProofBytes) + + // Construct a relay to be mangled such that it fails to deserialize in order + // to set the error expectation for the relevant test case. + mangledRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + + // Ensure valid relay request and response signatures. + testrelayer.SignRelayRequest(ctx, t, mangledRelay, appAddr, keyRing, ringClient) + testrelayer.SignRelayResponse(ctx, t, mangledRelay, supplierUid, supplierAddr, keyRing) + + // Serialize the relay so that it can be mangled. + mangledRelayBz, err := mangledRelay.Marshal() + require.NoError(t, err) + + // Mangle the serialized relay to cause an error during deserialization. + // Mangling could involve any byte randomly being swapped to any value + // so unmarshaling fails, but we are setting the first byte to 0 for simplicity. + mangledRelayBz[0] = 0x00 + + // Declare an invalid signature byte slice to construct expected relay request + // and response errors and use in corresponding test cases. + invalidSignatureBz := []byte("invalid signature bytes") + + // Prepare an invalid proof of the correct size. + wrongClosestProofPath := make([]byte, len(expectedMerkleProofPath)) + copy(wrongClosestProofPath, expectedMerkleProofPath) + copy(wrongClosestProofPath, "wrong closest proof path") + + lowTargetHash, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + var lowTargetHashArr [protocol.RelayHasherSize]byte + copy(lowTargetHashArr[:], lowTargetHash) + highExpectedTargetDifficulty := protocol.GetDifficultyFromHash(lowTargetHashArr) + + tests := []struct { + desc string + newProof func(t *testing.T) *prooftypes.Proof + expectedErr error + }{ + { + desc: "proof service ID cannot be empty", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set proof session ID to empty string. + emptySessionIdHeader := *validSessionHeader + emptySessionIdHeader.SessionId = "" + + // Construct new proof message. + return testtree.NewProof(t, + supplierAddr, + &emptySessionIdHeader, + validSessionTree, + expectedMerkleProofPath) + }, + expectedErr: prooftypes.ErrProofInvalidSessionId.Wrapf( + "session ID does not match on-chain session ID; expected %q, got %q", + validSessionHeader.GetSessionId(), + "", + ), + }, + { + desc: "merkle proof cannot be empty", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct new proof message. + proof := testtree.NewProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + + // Set merkle proof to an empty byte slice. + proof.ClosestMerkleProof = []byte{} + return proof + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrap( + "proof cannot be empty", + ), + }, + { + desc: "proof session ID must match on-chain session ID", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct new proof message using the wrong session ID. + return testtree.NewProof(t, + supplierAddr, + &wrongSessionIdHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidSessionId.Wrapf( + "session ID does not match on-chain session ID; expected %q, got %q", + validSessionHeader.GetSessionId(), + wrongSessionIdHeader.GetSessionId(), + ), + }, + { + desc: "proof supplier must be in on-chain session", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a proof message with a supplier that does not belong in the session. + return testtree.NewProof(t, + wrongSupplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofNotFound.Wrapf( + "supplier address %q not found in session ID %q", + wrongSupplierAddr, + validSessionHeader.GetSessionId(), + ), + }, + { + desc: "merkle proof must be deserializable", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct new proof message. + proof := testtree.NewProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + + // Set merkle proof to an incorrect byte slice. + proof.ClosestMerkleProof = invalidClosestProofBytes + + return proof + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrapf( + "failed to unmarshal closest merkle proof: %s", + expectedInvalidProofUnmarshalErr, + ), + }, + { + desc: "relay must be deserializable", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a session tree to which we'll add 1 unserializable relay. + mangledRelaySessionTree := testtree.NewEmptySessionTree(t, validSessionHeader, supplierAddr) + + // Add the mangled relay to the session tree. + err = mangledRelaySessionTree.Update([]byte{1}, mangledRelayBz, 1) + require.NoError(t, err) + + // Get the Merkle root for the session tree in order to construct a claim. + mangledRelayMerkleRootBz, flushErr := mangledRelaySessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a session tree + // with an unserializable relay. + claim := testtree.NewClaim(t, + supplierAddr, + validSessionHeader, + mangledRelayMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an unserializable relay. + return testtree.NewProof(t, + supplierAddr, + validSessionHeader, + mangledRelaySessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "failed to unmarshal relay: %s", + keepers.Codec.Unmarshal(mangledRelayBz, &servicetypes.Relay{}), + ), + }, + { + // TODO_TEST(community): expand: test case to cover each session header field. + desc: "relay request session header must match proof session header", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the proof session ID. + numRelays := uint(1) + wrongRequestSessionIdSessionTree := testtree.NewFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, &wrongSessionIdHeader, validSessionHeader, + keyRing, + ringClient, + ) + + // Get the Merkle root for the session tree in order to construct a claim. + wrongRequestSessionIdMerkleRootBz, flushErr := wrongRequestSessionIdSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a relay + // request containing the wrong session ID. + claim := testtree.NewClaim(t, + supplierAddr, + validSessionHeader, + wrongRequestSessionIdMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message using the valid session header, + // *not* the one used in the session tree's relay request. + return testtree.NewProof(t, + supplierAddr, + validSessionHeader, + wrongRequestSessionIdSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "session headers session IDs mismatch; expected: %q, got: %q", + validSessionHeader.GetSessionId(), + wrongSessionIdHeader.GetSessionId(), + ), + }, + { + // TODO_TEST: expand: test case to cover each session header field. + desc: "relay response session header must match proof session header", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the expected session ID. + numRelays := uint(1) + wrongResponseSessionIdSessionTree := testtree.NewFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, &wrongSessionIdHeader, + keyRing, + ringClient, + ) + + // Get the Merkle root for the session tree in order to construct a claim. + wrongResponseSessionIdMerkleRootBz, flushErr := wrongResponseSessionIdSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a relay + // response containing the wrong session ID. + claim := testtree.NewClaim(t, + supplierAddr, + validSessionHeader, + wrongResponseSessionIdMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message using the valid session header, + // *not* the one used in the session tree's relay response. + return testtree.NewProof(t, + supplierAddr, + validSessionHeader, + wrongResponseSessionIdSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "session headers session IDs mismatch; expected: %q, got: %q", + validSessionHeader.GetSessionId(), + wrongSessionIdHeader.GetSessionId(), + ), + }, + { + desc: "relay request signature must be valid", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set the relay request signature to an invalid byte slice. + invalidRequestSignatureRelay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + invalidRequestSignatureRelay.Req.Meta.Signature = invalidSignatureBz + + // Ensure a valid relay response signature. + testrelayer.SignRelayResponse(ctx, t, invalidRequestSignatureRelay, supplierUid, supplierAddr, keyRing) + + invalidRequestSignatureRelayBz, marshalErr := invalidRequestSignatureRelay.Marshal() + require.NoError(t, marshalErr) + + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the expected session ID. + invalidRequestSignatureSessionTree := testtree.NewEmptySessionTree(t, validSessionHeader, supplierAddr) + + // Add the relay to the session tree. + err = invalidRequestSignatureSessionTree.Update([]byte{1}, invalidRequestSignatureRelayBz, 1) + require.NoError(t, err) + + // Get the Merkle root for the session tree in order to construct a claim. + invalidRequestSignatureMerkleRootBz, flushErr := invalidRequestSignatureSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a session tree + // with an invalid relay request signature. + + claim := testtree.NewClaim(t, + supplierAddr, + validSessionHeader, + invalidRequestSignatureMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an invalid relay request signature. + return testtree.NewProof(t, + supplierAddr, + validSessionHeader, + invalidRequestSignatureSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelayRequest.Wrapf( + "error deserializing ring signature: %s", + new(ring.RingSig).Deserialize(ring_secp256k1.NewCurve(), invalidSignatureBz), + ), + }, + { + desc: "relay request signature is valid but signed by an incorrect application", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") + return nil + }, + }, + { + desc: "relay response signature must be valid", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set the relay response signature to an invalid byte slice. + relay := testrelayer.NewEmptyRelay(validSessionHeader, validSessionHeader, supplierAddr) + relay.Res.Meta.SupplierSignature = invalidSignatureBz + + // Ensure a valid relay request signature + testrelayer.SignRelayRequest(ctx, t, relay, appAddr, keyRing, ringClient) + + relayBz, marshalErr := relay.Marshal() + require.NoError(t, marshalErr) + + // Construct a session tree with 1 relay with a session header containing + // a session ID that doesn't match the expected session ID. + invalidResponseSignatureSessionTree := testtree.NewEmptySessionTree(t, validSessionHeader, supplierAddr) + + // Add the relay to the session tree. + err = invalidResponseSignatureSessionTree.Update([]byte{1}, relayBz, 1) + require.NoError(t, err) + + // Get the Merkle root for the session tree in order to construct a claim. + invalidResponseSignatureMerkleRootBz, flushErr := invalidResponseSignatureSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := cosmostypes.UnwrapSDKContext(ctx) + claimCtx = claimCtx.WithBlockHeight(claimMsgHeight) + + // Create a claim with a merkle root derived from a session tree + // with an invalid relay response signature. + claim := testtree.NewClaim(t, + supplierAddr, + validSessionHeader, + invalidResponseSignatureMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an invalid relay response signature. + return testtree.NewProof(t, + supplierAddr, + validSessionHeader, + invalidResponseSignatureSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: servicetypes.ErrServiceInvalidRelayResponse.Wrap("invalid signature"), + }, + { + desc: "relay response signature is valid but signed by an incorrect supplier", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("TODO_TECHDEBT(@bryanchriswhite): Implement this") + return nil + }, + }, + { + desc: "the merkle proof path provided does not match the one expected/enforced by the protocol", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a new valid session tree for this test case because once the + // closest proof has already been generated, the path cannot be changed. + numRelays := uint(5) + wrongPathSessionTree := testtree.NewFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + wrongPathMerkleRootBz, flushErr := wrongPathSessionTree.Flush() + require.NoError(t, flushErr) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) + + // Create an upsert the claim + claim := testtree.NewClaim(t, + supplierAddr, + validSessionHeader, + wrongPathMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct new proof message derived from a session tree + // with an invalid relay response signature. + return testtree.NewProof(t, supplierAddr, validSessionHeader, wrongPathSessionTree, wrongClosestProofPath) + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrapf( + "the path of the proof provided (%x) does not match one expected by the on-chain protocol (%x)", + wrongClosestProofPath, + protocol.GetPathForProof(sdkCtx.HeaderHash(), validSessionHeader.GetSessionId()), + ), + }, + { + desc: "relay difficulty must be greater than or equal to a high difficulty (low target hash)", + newProof: func(t *testing.T) *prooftypes.Proof { + // Set the minimum relay difficulty to a non-zero value such that the relays + // constructed by the test helpers have a negligible chance of being valid. + err = keepers.Keeper.SetParams(ctx, prooftypes.Params{ + RelayDifficultyTargetHash: lowTargetHash, + }) + require.NoError(t, err) + + // Reset the minimum relay difficulty to zero after this test case. + t.Cleanup(func() { + err = keepers.Keeper.SetParams(ctx, prooftypes.DefaultParams()) + require.NoError(t, err) + }) + + // Construct a proof message with a session tree containing + // a relay of insufficient difficulty. + return testtree.NewProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidRelay.Wrapf( + "the difficulty relay being proven is (%d), and is smaller than the target difficulty (%d) for service %s", + validClosestRelayDifficultyBits, + highExpectedTargetDifficulty, + validSessionHeader.Service.Id, + ), + }, + { + desc: "claim must exist for proof message", + newProof: func(t *testing.T) *prooftypes.Proof { + // Construct a new session tree corresponding to the unclaimed session. + numRelays := uint(5) + unclaimedSessionTree := testtree.NewFilledSessionTree( + ctx, t, + numRelays, + "wrong_supplier", wrongSupplierAddr, + unclaimedSessionHeader, unclaimedSessionHeader, unclaimedSessionHeader, + keyRing, + ringClient, + ) + + // Discard session tree Merkle root because no claim is being created. + // Session tree must be closed (flushed) to compute closest Merkle Proof. + _, err = unclaimedSessionTree.Flush() + require.NoError(t, err) + + // Compute expected proof path for the unclaimed session. + expectedMerkleProofPath := protocol.GetPathForProof( + blockHeaderHash, + unclaimedSessionHeader.GetSessionId(), + ) + + // Construct new proof message using the supplier & session header + // from the session which is *not* expected to be claimed. + return testtree.NewProof(t, + wrongSupplierAddr, + unclaimedSessionHeader, + unclaimedSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofClaimNotFound.Wrapf( + "no claim found for session ID %q and supplier %q", + unclaimedSessionHeader.GetSessionId(), + wrongSupplierAddr, + ), + }, + { + desc: "Valid proof cannot validate claim with an incorrect root", + newProof: func(t *testing.T) *prooftypes.Proof { + numRelays := uint(10) + wrongMerkleRootSessionTree := testtree.NewFilledSessionTree( + ctx, t, + numRelays, + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + wrongMerkleRootBz, err := wrongMerkleRootSessionTree.Flush() + require.NoError(t, err) + + // Re-set the block height to the earliest claim commit height to create a new claim. + claimCtx := keepertest.SetBlockHeight(ctx, claimMsgHeight) + + // Create a claim with the incorrect Merkle root. + claim := testtree.NewClaim(t, + supplierAddr, + validSessionHeader, + wrongMerkleRootBz, + ) + keepers.UpsertClaim(claimCtx, *claim) + require.NoError(t, err) + + // Construct a valid session tree with 5 relays. + validSessionTree := testtree.NewFilledSessionTree( + ctx, t, + uint(5), + supplierUid, supplierAddr, + validSessionHeader, validSessionHeader, validSessionHeader, + keyRing, + ringClient, + ) + + _, err = validSessionTree.Flush() + require.NoError(t, err) + + // Compute expected proof path for the session. + expectedMerkleProofPath := protocol.GetPathForProof( + blockHeaderHash, + validSessionHeader.GetSessionId(), + ) + + return testtree.NewProof(t, + supplierAddr, + validSessionHeader, + validSessionTree, + expectedMerkleProofPath, + ) + }, + expectedErr: prooftypes.ErrProofInvalidProof.Wrap("invalid closest merkle proof"), + }, + { + desc: "claim and proof application addresses must match", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") + return nil + }, + }, + { + desc: "claim and proof service IDs must match", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") + return nil + }, + }, + { + desc: "claim and proof supplier addresses must match", + newProof: func(t *testing.T) *prooftypes.Proof { + t.Skip("this test case reduces to either the 'claim must exist for proof message' or 'proof session ID must match on-chain session ID cases") + return nil + }, + }, + } + + // Submit the corresponding proof. + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + proof := test.newProof(t) + + // Advance the block height to the proof path seed height. + earliestSupplierProofCommitHeight := shared.GetEarliestSupplierProofCommitHeight( + &sharedParams, + proof.GetSessionHeader().GetSessionEndBlockHeight(), + blockHeaderHash, + proof.GetSupplierAddress(), + ) + ctx = keepertest.SetBlockHeight(ctx, earliestSupplierProofCommitHeight-1) + + // Store proof path seed block hash in the session keeper so that it can + // look it up during proof validation. + keepers.StoreBlockHash(ctx) + + // Advance the block height to the earliest proof commit height. + ctx = keepertest.SetBlockHeight(ctx, earliestSupplierProofCommitHeight) + err := keepers.EnsureValidProof(ctx, proof) + require.ErrorContains(t, err, test.expectedErr.Error()) + }) + } +} + +// getClosestRelayDifficulty returns the mining difficulty number which corresponds +// to the relayHash stored in the sessionTree that is closest to the merkle proof +// path provided. +func getClosestRelayDifficulty( + t *testing.T, + sessionTree relayer.SessionTree, + closestMerkleProofPath []byte, +) int64 { + // Retrieve a merkle proof that is closest to the path provided + closestMerkleProof, err := sessionTree.ProveClosest(closestMerkleProofPath) + require.NoError(t, err) + + // Extract the Relay (containing the RelayResponse & RelayRequest) from the merkle proof. + relay := new(servicetypes.Relay) + relayBz := closestMerkleProof.GetValueHash(&protocol.SmtSpec) + err = relay.Unmarshal(relayBz) + require.NoError(t, err) + + // Retrieve the hash of the relay. + relayHash, err := relay.GetHash() + require.NoError(t, err) + + return protocol.GetDifficultyFromHash(relayHash) +} diff --git a/x/proof/keeper/session.go b/x/proof/keeper/session.go index cd201f146..f2c3990a7 100644 --- a/x/proof/keeper/session.go +++ b/x/proof/keeper/session.go @@ -11,23 +11,16 @@ import ( sharedtypes "github.com/pokt-network/poktroll/x/shared/types" ) -type msgWithSessionAndSupplier interface { - GetSessionHeader() *sessiontypes.SessionHeader - GetSupplierAddress() string -} - // queryAndValidateSessionHeader ensures that a session with the sessionID of the given session // header exists and that this session includes the supplier with the given address. // It returns a session which is hydrated with the on-chain session data. -func (k msgServer) queryAndValidateSessionHeader( +func (k Keeper) queryAndValidateSessionHeader( ctx context.Context, - msg msgWithSessionAndSupplier, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, ) (*sessiontypes.Session, error) { logger := k.Logger().With("method", "queryAndValidateSessionHeader") - sessionHeader := msg.GetSessionHeader() - supplierAddr := msg.GetSupplierAddress() - sessionReq := &sessiontypes.QueryGetSessionRequest{ ApplicationAddress: sessionHeader.GetApplicationAddress(), Service: sessionHeader.GetService(), @@ -36,7 +29,7 @@ func (k msgServer) queryAndValidateSessionHeader( // Get the on-chain session for the ground-truth against which the given // session header is to be validated. - sessionRes, err := k.Keeper.sessionKeeper.GetSession(ctx, sessionReq) + sessionRes, err := k.sessionKeeper.GetSession(ctx, sessionReq) if err != nil { return nil, err } @@ -84,12 +77,12 @@ func (k msgServer) queryAndValidateSessionHeader( // It *assumes* that the msg's session header is a valid on-chain session with correct // height fields. First call #queryAndValidateSessionHeader to ensure any user-provided // session header is valid and correctly hydrated. -func (k msgServer) validateClaimWindow( +func (k Keeper) validateClaimWindow( ctx context.Context, - msg *types.MsgCreateClaim, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, ) error { logger := k.Logger().With("method", "validateClaimWindow") - sessionHeader := msg.GetSessionHeader() sharedParams := k.sharedKeeper.GetParams(ctx) sessionEndHeight := sessionHeader.GetSessionEndBlockHeight() @@ -102,7 +95,7 @@ func (k msgServer) validateClaimWindow( earliestClaimCommitHeight, err := k.sharedQuerier.GetEarliestSupplierClaimCommitHeight( ctx, sessionEndHeight, - msg.GetSupplierAddress(), + supplierAddr, ) if err != nil { return err @@ -141,7 +134,7 @@ func (k msgServer) validateClaimWindow( "claim_window_open_height", claimWindowOpenHeight, "earliest_claim_commit_height", earliestClaimCommitHeight, "claim_window_close_height", claimWindowCloseHeight, - "supplier_addr", msg.GetSupplierAddress(), + "supplier_addr", supplierAddr, ). Debug("validated claim window") @@ -152,14 +145,14 @@ func (k msgServer) validateClaimWindow( // It *assumes* that the msg's session header is a valid on-chain session with correct // height fields. First call #queryAndValidateSessionHeader to ensure any user-provided // session header is valid and correctly hydrated. -func (k msgServer) validateProofWindow( +func (k Keeper) validateProofWindow( ctx context.Context, - msg *types.MsgSubmitProof, + sessionHeader *sessiontypes.SessionHeader, + supplierAddr string, ) error { logger := k.Logger().With("method", "validateProofWindow") - sessionHeader := msg.GetSessionHeader() - sharedParams := k.sharedKeeper.GetParams(ctx) + sharedParams := k.sharedKeeper.GetParams(ctx) sessionEndHeight := sessionHeader.GetSessionEndBlockHeight() // Get the proof window open and close heights for the given session header. @@ -170,7 +163,7 @@ func (k msgServer) validateProofWindow( earliestProofCommitHeight, err := k.sharedQuerier.GetEarliestSupplierProofCommitHeight( ctx, sessionEndHeight, - msg.GetSupplierAddress(), + supplierAddr, ) if err != nil { return err @@ -205,7 +198,7 @@ func (k msgServer) validateProofWindow( "proof_window_open_height", proofWindowOpenHeight, "earliest_proof_commit_height", earliestProofCommitHeight, "proof_window_close_height", proofWindowCloseHeight, - "supplier_addr", msg.GetSupplierAddress(), + "supplier_addr", supplierAddr, ). Debug("validated proof window") diff --git a/x/proof/types/account_query_client.go b/x/proof/types/account_query_client.go index 9d89b44d7..86ec77a6e 100644 --- a/x/proof/types/account_query_client.go +++ b/x/proof/types/account_query_client.go @@ -62,6 +62,9 @@ func (accountQueryClient *AccountKeeperQueryClient) GetPubKeyFromAddress( if err != nil { return nil, err } + if acc == nil { + return nil, ErrProofAccNotFound.Wrapf("account not found for address %s", address) + } // If the account's public key is nil, then return an error. pubKey := acc.GetPubKey() diff --git a/x/proof/types/errors.go b/x/proof/types/errors.go index 5c3dab70f..7e9c64867 100644 --- a/x/proof/types/errors.go +++ b/x/proof/types/errors.go @@ -31,4 +31,6 @@ var ( ErrProofParamInvalid = sdkerrors.Register(ModuleName, 1120, "the provided param is invalid") ErrProofClaimOutsideOfWindow = sdkerrors.Register(ModuleName, 1121, "claim attempted outside of the session's claim window") ErrProofProofOutsideOfWindow = sdkerrors.Register(ModuleName, 1122, "proof attempted outside of the session's proof window") + ErrProofSupplierMismatch = sdkerrors.Register(ModuleName, 1123, "supplier address does not match the claim or proof") + ErrProofAccNotFound = sdkerrors.Register(ModuleName, 1124, "account not found") ) diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 581bc8a0e..95d12614e 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -4,18 +4,23 @@ import ( "context" "testing" + "cosmossdk.io/depinject" "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/types" cosmostypes "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/pokt-network/poktroll/cmd/poktrolld/cmd" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/pkg/crypto/rings" + "github.com/pokt-network/poktroll/pkg/polylog/polyzero" testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" testutilproof "github.com/pokt-network/poktroll/testutil/proof" - "github.com/pokt-network/poktroll/testutil/sample" - testsession "github.com/pokt-network/poktroll/testutil/session" + "github.com/pokt-network/poktroll/testutil/testkeyring" + "github.com/pokt-network/poktroll/testutil/testtree" apptypes "github.com/pokt-network/poktroll/x/application/types" prooftypes "github.com/pokt-network/poktroll/x/proof/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" @@ -49,57 +54,106 @@ type TestSuite struct { // - A claim that will require a proof via threshold, given the default proof params. // - A proof which contains only the session header supplier address. func (s *TestSuite) SetupTest() { - supplierAddr := sample.AccAddress() - appAddr := sample.AccAddress() + t := s.T() s.keepers, s.ctx = keepertest.NewTokenomicsModuleKeepers(s.T(), nil) - s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx) - - // Set the suite expectedComputeUnits to equal the default proof_requirement_threshold - // such that by default, s.claim will require a proof 100% of the time. - s.expectedComputeUnits = prooftypes.DefaultProofRequirementThreshold - - // Create a service that can be registered in the application and used in the claim - service := sharedtypes.NewService( - testServiceId, - "", - 1, - ) - - // Prepare a claim that can be inserted - s.claim = prooftypes.Claim{ - SupplierAddress: supplierAddr, - SessionHeader: &sessiontypes.SessionHeader{ - ApplicationAddress: appAddr, - Service: &sharedtypes.Service{Id: service.Id}, - SessionId: "session_id", - SessionStartBlockHeight: 1, - SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), - }, - - // Set the suite expectedComputeUnits to be equal to the default threshold. - // This SHOULD make the claim require a proof given the default proof parameters. - RootHash: testutilproof.SmstRootWithSum(s.expectedComputeUnits), - } - - // Prepare a claim that can be inserted - s.proof = prooftypes.Proof{ - SupplierAddress: s.claim.SupplierAddress, - SessionHeader: s.claim.SessionHeader, - // ClosestMerkleProof + s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(1) + s.ctx = s.sdkCtx + + // Construct a keyring to hold the keypairs for the accounts used in the test. + keyRing := keyring.NewInMemory(s.keepers.Codec) + + // Create a pre-generated account iterator to create accounts for the test. + preGeneratedAccts := testkeyring.PreGeneratedAccounts() + + // Create accounts in the account keeper with corresponding keys in the keyring + // // for the applications and suppliers used in the tests. + supplierAddr := testkeyring.CreateOnChainAccount( + s.ctx, t, + "supplier", + keyRing, + s.keepers.AccountKeeper, + preGeneratedAccts, + ).String() + appAddr := testkeyring.CreateOnChainAccount( + s.ctx, t, + "app", + keyRing, + s.keepers.AccountKeeper, + preGeneratedAccts, + ).String() + + service := &sharedtypes.Service{Id: testServiceId} + + supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) + supplier := sharedtypes.Supplier{ + Address: supplierAddr, + Stake: &supplierStake, + Services: []*sharedtypes.SupplierServiceConfig{{Service: service}}, } + s.keepers.SetSupplier(s.ctx, supplier) appStake := types.NewCoin("upokt", math.NewInt(1000000)) app := apptypes.Application{ - Address: appAddr, - Stake: &appStake, - ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{ - { - Service: service, - }, - }, + Address: appAddr, + Stake: &appStake, + ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, } s.keepers.SetApplication(s.ctx, app) + + // Get the session for the application/supplier pair which is expected + // to be claimed and for which a valid proof would be accepted. + sessionReq := &sessiontypes.QueryGetSessionRequest{ + ApplicationAddress: appAddr, + Service: service, + BlockHeight: 1, + } + sessionRes, err := s.keepers.GetSession(s.sdkCtx, sessionReq) + require.NoError(t, err) + sessionHeader := sessionRes.Session.Header + + // Construct a ringClient to get the application's ring & verify the relay + // request signature. + ringClient, err := rings.NewRingClient(depinject.Supply( + polyzero.NewLogger(), + prooftypes.NewAppKeeperQueryClient(s.keepers.ApplicationKeeper), + prooftypes.NewAccountKeeperQueryClient(s.keepers.AccountKeeper), + prooftypes.NewSharedKeeperQueryClient(s.keepers.SharedKeeper, s.keepers.SessionKeeper), + )) + require.NoError(t, err) + + // Construct a valid session tree with 10 relays. + numRelays := uint(10) + sessionTree := testtree.NewFilledSessionTree( + s.ctx, t, + numRelays, + "supplier", supplierAddr, + sessionHeader, sessionHeader, sessionHeader, + keyRing, + ringClient, + ) + s.expectedComputeUnits = testtree.FillSessionTreeExpectedComputeUnits(numRelays) + + blockHeaderHash := make([]byte, 0) + expectedMerkleProofPath := protocol.GetPathForProof(blockHeaderHash, sessionHeader.SessionId) + + // Advance the block height to the earliest claim commit height. + sharedParams := s.keepers.SharedKeeper.GetParams(s.ctx) + claimMsgHeight := shared.GetEarliestSupplierClaimCommitHeight( + &sharedParams, + sessionHeader.GetSessionEndBlockHeight(), + blockHeaderHash, + supplierAddr, + ) + s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash) + s.ctx = s.sdkCtx + + merkleRootBz, err := sessionTree.Flush() + require.NoError(t, err) + + // Prepare a claim that can be inserted + s.claim = *testtree.NewClaim(t, supplierAddr, sessionHeader, merkleRootBz) + s.proof = *testtree.NewProof(t, supplierAddr, sessionHeader, sessionTree, expectedMerkleProofPath) } // TestSettleExpiringClaimsSuite tests the claim settlement process. @@ -203,6 +257,58 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequiredAndNotProv // Validate the event expectedEvent := expectedEvents[0] + require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_MISSING, expectedEvent.GetExpirationReason()) + require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) +} + +func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOneProvided() { + // Retrieve default values + t := s.T() + ctx := s.ctx + sdkCtx := cosmostypes.UnwrapSDKContext(ctx) + sharedParams := s.keepers.SharedKeeper.GetParams(ctx) + + // Create a claim that requires a proof and an invalid proof + claim := s.claim + proof := s.proof + proof.ClosestMerkleProof = []byte("invalid_proof") + + // Upsert the proof & claim + s.keepers.UpsertClaim(ctx, claim) + s.keepers.UpsertProof(ctx, proof) + + // Settle pending claims after proof window closes + // Expectation: All (1) claims should be expired. + // NB: proofs should be rejected when the current height equals the proof window close height. + sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) + sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) + require.NoError(t, err) + + // Check that no claims were settled. + require.Equal(t, uint64(0), settledResult.NumClaims) + // Validate that exactly one claims expired + require.Equal(t, uint64(1), expiredResult.NumClaims) + + // Validate that no claims remain. + claims := s.keepers.GetAllClaims(ctx) + require.Len(t, claims, 0) + + // Validate that no proofs remain. + proofs := s.keepers.GetAllProofs(ctx) + require.Len(t, proofs, 0) + + // Confirm an expiration event was emitted + events := sdkCtx.EventManager().Events() + require.Len(t, events, 5) // minting, burning, settling, etc.. + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, + events, "poktroll.tokenomics.EventClaimExpired") + require.Len(t, expectedEvents, 1) + + // Validate the event + expectedEvent := expectedEvents[0] + require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_INVALID, expectedEvent.GetExpirationReason()) require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) } @@ -251,7 +357,7 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvide // Validate the event expectedEvent := expectedEvents[0] - require.NotEqual(t, prooftypes.ProofRequirementReason_NOT_REQUIRED, expectedEvent.GetProofRequirement()) + require.Equal(t, prooftypes.ProofRequirementReason_THRESHOLD, expectedEvent.GetProofRequirement()) require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) } @@ -268,26 +374,19 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi // matches s.claim. err := s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ ProofRequestProbability: 1, - // +1 to push the threshold above s.claim's compute units + // +1 to push the requirement threshold ABOVE s.claim's compute units ProofRequirementThreshold: s.expectedComputeUnits + 1, }) require.NoError(t, err) - // Create a claim that requires a proof - claim := s.claim - - // 0. Add the claim & verify it exists - s.keepers.UpsertClaim(ctx, claim) - claims := s.keepers.GetAllClaims(ctx) - s.Require().Len(claims, 1) - - // Upsert the proof + // Upsert the claim & proof + s.keepers.UpsertClaim(ctx, s.claim) s.keepers.UpsertProof(ctx, s.proof) // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proof window has definitely closed at this point - sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) sdkCtx = sdkCtx.WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) @@ -299,7 +398,7 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi require.Equal(t, uint64(0), expiredResult.NumClaims) // Validate that no claims remain. - claims = s.keepers.GetAllClaims(ctx) + claims := s.keepers.GetAllClaims(ctx) require.Len(t, claims, 0) // Confirm an settlement event was emitted @@ -308,7 +407,7 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi events, "poktroll.tokenomics.EventClaimSettled") require.Len(t, expectedEvents, 1) expectedEvent := expectedEvents[0] - require.NotEqual(t, prooftypes.ProofRequirementReason_NOT_REQUIRED, expectedEvent.GetProofRequirement()) + require.Equal(t, prooftypes.ProofRequirementReason_PROBABILISTIC, expectedEvent.GetProofRequirement()) require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) } diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index 7fe1a3c53..e04dc65b5 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -64,7 +64,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( sessionId := claim.SessionHeader.SessionId - _, isProofFound := k.proofKeeper.GetProof(ctx, sessionId, claim.SupplierAddress) + proof, isProofFound := k.proofKeeper.GetProof(ctx, sessionId, claim.SupplierAddress) // Using the probabilistic proofs approach, determine if this expiring // claim required an on-chain proof proofRequirement, err = k.proofRequirementForClaim(ctx, &claim) @@ -80,14 +80,29 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( "proof_requirement", proofRequirement, ) - if proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED { - // If a proof is not found, the claim will expire and never be settled. - if !isProofFound { + proofIsRequired := (proofRequirement != prooftypes.ProofRequirementReason_NOT_REQUIRED) + if proofIsRequired { + expirationReason := types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED // EXPIRATION_REASON_UNSPECIFIED is the default + + if isProofFound { + if err = k.proofKeeper.EnsureValidProof(ctx, &proof); err != nil { + logger.Warn(fmt.Sprintf("Proof was found but is invalid due to %v", err)) + expirationReason = types.ClaimExpirationReason_PROOF_INVALID + } + } else { + expirationReason = types.ClaimExpirationReason_PROOF_MISSING + } + + // If the proof is missing or invalid -> expire it + if expirationReason != types.ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED { + // Proof was required but not found. // Emit an event that a claim has expired and being removed without being settled. claimExpiredEvent := types.EventClaimExpired{ - Claim: &claim, - NumComputeUnits: numClaimComputeUnits, - NumRelays: numRelaysInSessionTree, + Claim: &claim, + NumComputeUnits: numClaimComputeUnits, + NumRelays: numRelaysInSessionTree, + ExpirationReason: expirationReason, + // TODO_CONSIDERATION: Add the error to the event if the proof was invalid. } if err = ctx.EventManager().EmitTypedEvent(&claimExpiredEvent); err != nil { return settledResult, expiredResult, err @@ -98,17 +113,21 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( // The claim & proof are no longer necessary, so there's no need for them // to take up on-chain space. k.proofKeeper.RemoveClaim(ctx, sessionId, claim.SupplierAddress) + if isProofFound { + k.proofKeeper.RemoveProof(ctx, sessionId, claim.SupplierAddress) + } expiredResult.NumClaims++ expiredResult.NumRelays += numRelaysInSessionTree expiredResult.NumComputeUnits += numClaimComputeUnits continue } - // TODO_FOLLOWUP(@olshansk, #690): Document the potential changes needed here. - // NB: If a proof is found, it is valid because verification is done - // at the time of submission. } + // If this code path is reached, then either: + // 1. The claim does not require a proof. + // 2. The claim requires a proof and a valid proof was found. + // Manage the mint & burn accounting for the claim. if err = k.SettleSessionAccounting(ctx, &claim); err != nil { logger.Error(fmt.Sprintf("error settling session accounting for claim %q: %v", claim.SessionHeader.SessionId, err)) diff --git a/x/tokenomics/keeper/settle_session_accounting_test.go b/x/tokenomics/keeper/settle_session_accounting_test.go index 3f4274bf8..6bcf240c0 100644 --- a/x/tokenomics/keeper/settle_session_accounting_test.go +++ b/x/tokenomics/keeper/settle_session_accounting_test.go @@ -11,9 +11,8 @@ import ( cosmostypes "github.com/cosmos/cosmos-sdk/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - "github.com/stretchr/testify/require" - "github.com/pokt-network/smt" + "github.com/stretchr/testify/require" "github.com/pokt-network/poktroll/pkg/crypto/protocol" testkeeper "github.com/pokt-network/poktroll/testutil/keeper" diff --git a/x/tokenomics/types/event.pb.go b/x/tokenomics/types/event.pb.go index 8ac8e9321..5c55fbc6e 100644 --- a/x/tokenomics/types/event.pb.go +++ b/x/tokenomics/types/event.pb.go @@ -25,13 +25,43 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type ClaimExpirationReason int32 + +const ( + ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED ClaimExpirationReason = 0 + ClaimExpirationReason_PROOF_MISSING ClaimExpirationReason = 1 + ClaimExpirationReason_PROOF_INVALID ClaimExpirationReason = 2 +) + +var ClaimExpirationReason_name = map[int32]string{ + 0: "EXPIRATION_REASON_UNSPECIFIED", + 1: "PROOF_MISSING", + 2: "PROOF_INVALID", +} + +var ClaimExpirationReason_value = map[string]int32{ + "EXPIRATION_REASON_UNSPECIFIED": 0, + "PROOF_MISSING": 1, + "PROOF_INVALID": 2, +} + +func (x ClaimExpirationReason) String() string { + return proto.EnumName(ClaimExpirationReason_name, int32(x)) +} + +func (ClaimExpirationReason) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a78874bbf91a58c7, []int{0} +} + // EventClaimExpired is an event emitted during settlement whenever a claim requiring // an on-chain proof doesn't have one. The claim cannot be settled, leading to that work // never being rewarded. type EventClaimExpired struct { - Claim *types.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim"` - NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays"` - NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units"` + Claim *types.Claim `protobuf:"bytes,1,opt,name=claim,proto3" json:"claim"` + // TODO_MAINNET: Shold we include the proof here too? + NumRelays uint64 `protobuf:"varint,2,opt,name=num_relays,json=numRelays,proto3" json:"num_relays"` + NumComputeUnits uint64 `protobuf:"varint,3,opt,name=num_compute_units,json=numComputeUnits,proto3" json:"num_compute_units"` + ExpirationReason ClaimExpirationReason `protobuf:"varint,4,opt,name=expiration_reason,json=expirationReason,proto3,enum=poktroll.tokenomics.ClaimExpirationReason" json:"expiration_reason"` } func (m *EventClaimExpired) Reset() { *m = EventClaimExpired{} } @@ -88,6 +118,13 @@ func (m *EventClaimExpired) GetNumComputeUnits() uint64 { return 0 } +func (m *EventClaimExpired) GetExpirationReason() ClaimExpirationReason { + if m != nil { + return m.ExpirationReason + } + return ClaimExpirationReason_EXPIRATION_REASON_UNSPECIFIED +} + // EventClaimSettled is an event emitted whenever a claim is settled. // The proof_required determines whether the claim requires a proof that has been submitted or not type EventClaimSettled struct { @@ -299,6 +336,7 @@ func (m *EventApplicationOverserviced) GetEffectiveBurn() *types1.Coin { } func init() { + proto.RegisterEnum("poktroll.tokenomics.ClaimExpirationReason", ClaimExpirationReason_name, ClaimExpirationReason_value) proto.RegisterType((*EventClaimExpired)(nil), "poktroll.tokenomics.EventClaimExpired") proto.RegisterType((*EventClaimSettled)(nil), "poktroll.tokenomics.EventClaimSettled") proto.RegisterType((*EventRelayMiningDifficultyUpdated)(nil), "poktroll.tokenomics.EventRelayMiningDifficultyUpdated") @@ -308,46 +346,52 @@ func init() { func init() { proto.RegisterFile("poktroll/tokenomics/event.proto", fileDescriptor_a78874bbf91a58c7) } var fileDescriptor_a78874bbf91a58c7 = []byte{ - // 621 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x94, 0xcb, 0x6e, 0x13, 0x3d, - 0x14, 0xc7, 0x3b, 0xbd, 0x7c, 0x52, 0xdc, 0xaf, 0x37, 0x97, 0x8a, 0x50, 0xca, 0x24, 0x64, 0x81, - 0x8a, 0x50, 0x67, 0xd4, 0x56, 0x62, 0x85, 0x2a, 0x9a, 0x12, 0xa9, 0x2c, 0x0a, 0x68, 0xa0, 0x1b, - 0x36, 0x23, 0x67, 0x7c, 0x92, 0x98, 0xce, 0xd8, 0xc6, 0xe3, 0x99, 0xa4, 0x6f, 0xc1, 0x03, 0xf0, - 0x02, 0x3c, 0x04, 0x7b, 0x24, 0x36, 0x5d, 0xb2, 0xaa, 0x50, 0xbb, 0xeb, 0x53, 0x20, 0x7b, 0x72, - 0x19, 0xa5, 0x20, 0xd6, 0x6c, 0x12, 0xeb, 0xfc, 0x7f, 0xe7, 0x9c, 0xbf, 0x8f, 0xc7, 0x46, 0x35, - 0x29, 0xce, 0xb4, 0x12, 0x71, 0xec, 0x6b, 0x71, 0x06, 0x5c, 0x24, 0x2c, 0x4a, 0x7d, 0xc8, 0x81, - 0x6b, 0x4f, 0x2a, 0xa1, 0x05, 0x5e, 0x1f, 0x01, 0xde, 0x04, 0xd8, 0xbc, 0xd3, 0x15, 0x5d, 0x61, - 0x75, 0xdf, 0xac, 0x0a, 0x74, 0xd3, 0x8d, 0x44, 0x9a, 0x88, 0xd4, 0x6f, 0x93, 0x14, 0xfc, 0x7c, - 0xb7, 0x0d, 0x9a, 0xec, 0xfa, 0x91, 0x60, 0x7c, 0xa8, 0x6f, 0x8e, 0x7b, 0x49, 0x25, 0x44, 0xc7, - 0x8f, 0x62, 0xc2, 0x92, 0xa1, 0x56, 0x9f, 0xd2, 0x14, 0x7c, 0xcc, 0x98, 0x82, 0x64, 0x6c, 0xa4, - 0xf1, 0xd5, 0x41, 0x6b, 0x2d, 0x63, 0xec, 0xc8, 0xa4, 0xb5, 0x06, 0x92, 0x29, 0xa0, 0xf8, 0x29, - 0x5a, 0xb0, 0x65, 0xaa, 0x4e, 0xdd, 0xd9, 0x5e, 0xdc, 0xdb, 0xf0, 0xc6, 0x76, 0x6d, 0x1d, 0xcf, - 0xc2, 0xcd, 0xca, 0xcd, 0x65, 0xad, 0xe0, 0x82, 0xe2, 0x0f, 0xef, 0x20, 0xc4, 0xb3, 0x24, 0x54, - 0x10, 0x93, 0xf3, 0xb4, 0x3a, 0x5b, 0x77, 0xb6, 0xe7, 0x9b, 0xcb, 0x37, 0x97, 0xb5, 0x52, 0x34, - 0xa8, 0xf0, 0x2c, 0x09, 0xec, 0x12, 0x1f, 0xa2, 0x35, 0x23, 0x44, 0x22, 0x91, 0x99, 0x86, 0x30, - 0xe3, 0x4c, 0xa7, 0xd5, 0x39, 0x9b, 0xb5, 0x71, 0x73, 0x59, 0xbb, 0x2d, 0x06, 0x2b, 0x3c, 0x4b, - 0x8e, 0x8a, 0xc8, 0xa9, 0x09, 0x34, 0xbe, 0xcc, 0x96, 0xfd, 0xbf, 0x05, 0xad, 0xe3, 0x7f, 0xc9, - 0x3f, 0xfe, 0x80, 0xd6, 0xac, 0xa5, 0xb0, 0x74, 0x34, 0xd5, 0xf9, 0xba, 0xb3, 0xbd, 0xbc, 0xf7, - 0x68, 0xda, 0xf5, 0x1b, 0xf3, 0x1b, 0x4c, 0xb8, 0x00, 0x48, 0x2a, 0x78, 0xd1, 0xea, 0x56, 0x91, - 0x60, 0x55, 0x4e, 0xe1, 0x8d, 0xcf, 0xb3, 0xe8, 0xa1, 0x9d, 0x95, 0xb5, 0x7f, 0xc2, 0x38, 0xe3, - 0xdd, 0x17, 0xac, 0xd3, 0x61, 0x51, 0x16, 0xeb, 0xf3, 0x53, 0x49, 0x89, 0x06, 0x8a, 0x1f, 0x20, - 0x94, 0x82, 0xca, 0x59, 0x04, 0x21, 0xa3, 0x76, 0x80, 0x95, 0xa0, 0x32, 0x8c, 0xbc, 0xa4, 0xf8, - 0x00, 0x6d, 0x49, 0x05, 0x79, 0xa8, 0x89, 0xea, 0x82, 0x0e, 0x7b, 0x24, 0xed, 0x85, 0x3d, 0x18, - 0x84, 0xc0, 0x23, 0x41, 0x81, 0xda, 0xa1, 0x55, 0x82, 0xaa, 0x61, 0xde, 0x59, 0xe4, 0x98, 0xa4, - 0xbd, 0x63, 0x18, 0xb4, 0x0a, 0x1d, 0x3f, 0x43, 0xf7, 0x39, 0xf4, 0xff, 0x98, 0x3e, 0x67, 0xd3, - 0xef, 0x72, 0xe8, 0xff, 0x36, 0x7b, 0x07, 0xad, 0xdb, 0xee, 0x93, 0xf3, 0x08, 0x21, 0x21, 0x76, - 0x60, 0xf3, 0x66, 0xc7, 0x90, 0xbf, 0x1a, 0x9d, 0x4e, 0x2b, 0x21, 0xf8, 0x09, 0xc2, 0xa6, 0xd9, - 0x14, 0xbd, 0x60, 0xe9, 0x15, 0x0e, 0xfd, 0x32, 0xdc, 0xf8, 0xee, 0xa0, 0x2d, 0x3b, 0x9e, 0x43, - 0x29, 0x63, 0x16, 0x11, 0xcd, 0x04, 0x7f, 0x9d, 0x83, 0x1a, 0xee, 0x9d, 0xe2, 0xc7, 0x68, 0x95, - 0x4c, 0xa4, 0x90, 0x50, 0xaa, 0x86, 0xf3, 0x59, 0x29, 0xc5, 0x0f, 0x29, 0x55, 0xf8, 0x00, 0x2d, - 0xc1, 0x40, 0x42, 0xa4, 0x81, 0x86, 0xed, 0x4c, 0x71, 0x3b, 0x96, 0xc5, 0xbd, 0x7b, 0x5e, 0x71, - 0x99, 0x3d, 0x73, 0x99, 0xbd, 0xe1, 0x65, 0xf6, 0x8e, 0x04, 0xe3, 0xc1, 0xff, 0x23, 0xbe, 0x99, - 0x29, 0x8e, 0x9f, 0xa3, 0x65, 0xe8, 0x74, 0x20, 0xd2, 0x2c, 0x87, 0xa2, 0xc0, 0xdc, 0xdf, 0x0a, - 0x2c, 0x8d, 0x13, 0x4c, 0x85, 0xe6, 0xc9, 0xb7, 0x2b, 0xd7, 0xb9, 0xb8, 0x72, 0x9d, 0x9f, 0x57, - 0xae, 0xf3, 0xe9, 0xda, 0x9d, 0xb9, 0xb8, 0x76, 0x67, 0x7e, 0x5c, 0xbb, 0x33, 0xef, 0xf7, 0xbb, - 0x4c, 0xf7, 0xb2, 0xb6, 0x17, 0x89, 0xc4, 0x37, 0x5f, 0xd8, 0x0e, 0x07, 0xdd, 0x17, 0xea, 0xcc, - 0x1f, 0x3f, 0x16, 0x83, 0xf2, 0xb3, 0xa5, 0xcf, 0x25, 0xa4, 0xed, 0xff, 0xec, 0x73, 0xb1, 0xff, - 0x2b, 0x00, 0x00, 0xff, 0xff, 0x67, 0x30, 0x58, 0x9b, 0xda, 0x04, 0x00, 0x00, + // 718 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x54, 0xc1, 0x4e, 0xdb, 0x4a, + 0x14, 0x8d, 0x03, 0x3c, 0x29, 0xc3, 0x03, 0x12, 0x53, 0xd4, 0x94, 0x42, 0x12, 0xb2, 0xa8, 0x28, + 0x15, 0xb6, 0x00, 0xa9, 0xab, 0x0a, 0x35, 0x09, 0xa6, 0x58, 0x2a, 0x49, 0xe4, 0x40, 0x55, 0x75, + 0x33, 0x75, 0xec, 0x9b, 0x64, 0x4a, 0x3c, 0xe3, 0x8e, 0xc7, 0x49, 0xf8, 0x8b, 0x7e, 0x40, 0x7f, + 0xa0, 0x8b, 0xfe, 0x47, 0xa5, 0x6e, 0x58, 0x76, 0x85, 0x2a, 0xd8, 0xf1, 0x15, 0x95, 0xc7, 0x21, + 0x49, 0x03, 0x55, 0xd7, 0xdd, 0x24, 0xd6, 0x3d, 0xe7, 0xdc, 0x7b, 0xe6, 0x78, 0x7c, 0x51, 0xde, + 0x67, 0x67, 0x82, 0xb3, 0x6e, 0x57, 0x17, 0xec, 0x0c, 0x28, 0xf3, 0x88, 0x13, 0xe8, 0xd0, 0x03, + 0x2a, 0x34, 0x9f, 0x33, 0xc1, 0xd4, 0xe5, 0x5b, 0x82, 0x36, 0x26, 0xac, 0x3e, 0x68, 0xb3, 0x36, + 0x93, 0xb8, 0x1e, 0x3d, 0xc5, 0xd4, 0xd5, 0x9c, 0xc3, 0x02, 0x8f, 0x05, 0x7a, 0xd3, 0x0e, 0x40, + 0xef, 0xed, 0x34, 0x41, 0xd8, 0x3b, 0xba, 0xc3, 0x08, 0x1d, 0xe2, 0xab, 0xa3, 0x59, 0x3e, 0x67, + 0xac, 0xa5, 0x3b, 0x5d, 0x9b, 0x78, 0x43, 0xac, 0x30, 0x85, 0x71, 0xf8, 0x18, 0x12, 0x0e, 0xde, + 0xc8, 0x48, 0xf1, 0x6b, 0x12, 0x65, 0x8c, 0xc8, 0x58, 0x25, 0x92, 0x19, 0x03, 0x9f, 0x70, 0x70, + 0xd5, 0xe7, 0x68, 0x4e, 0xb6, 0xc9, 0x2a, 0x05, 0x65, 0x73, 0x7e, 0x77, 0x45, 0x1b, 0xd9, 0x95, + 0x7d, 0x34, 0x49, 0x2e, 0xa7, 0x6e, 0x2e, 0xf3, 0x31, 0xcf, 0x8a, 0xff, 0xd4, 0x6d, 0x84, 0x68, + 0xe8, 0x61, 0x0e, 0x5d, 0xfb, 0x3c, 0xc8, 0x26, 0x0b, 0xca, 0xe6, 0x6c, 0x79, 0xf1, 0xe6, 0x32, + 0x3f, 0x51, 0xb5, 0x52, 0x34, 0xf4, 0x2c, 0xf9, 0xa8, 0x96, 0x50, 0x26, 0x02, 0x1c, 0xe6, 0xf9, + 0xa1, 0x00, 0x1c, 0x52, 0x22, 0x82, 0xec, 0x8c, 0x54, 0xad, 0xdc, 0x5c, 0xe6, 0xef, 0x82, 0xd6, + 0x12, 0x0d, 0xbd, 0x4a, 0x5c, 0x39, 0x8d, 0x0a, 0x2a, 0x45, 0x19, 0x88, 0x4c, 0xdb, 0x82, 0x30, + 0x8a, 0x39, 0xd8, 0x01, 0xa3, 0xd9, 0xd9, 0x82, 0xb2, 0xb9, 0xb8, 0xbb, 0xa5, 0xdd, 0x13, 0xb2, + 0x36, 0x3e, 0xa7, 0x94, 0x58, 0x52, 0x11, 0x8f, 0xbb, 0xd3, 0xc8, 0x4a, 0xc3, 0x14, 0xb1, 0xf8, + 0xe5, 0xb7, 0xbc, 0x1a, 0x20, 0x44, 0xf7, 0x9f, 0xca, 0xeb, 0x03, 0xca, 0x48, 0x4b, 0x78, 0xe2, + 0x2a, 0x0c, 0xf3, 0x7a, 0x32, 0xed, 0xba, 0x1e, 0xfd, 0x5a, 0x63, 0xde, 0x64, 0x56, 0x77, 0x9a, + 0x58, 0x69, 0x7f, 0x8a, 0x5e, 0xfc, 0x9c, 0x44, 0x1b, 0x32, 0x2b, 0x69, 0xff, 0x98, 0x50, 0x42, + 0xdb, 0x07, 0xa4, 0xd5, 0x22, 0x4e, 0xd8, 0x15, 0xe7, 0xa7, 0xbe, 0x6b, 0x0b, 0x70, 0xd5, 0x75, + 0x84, 0x02, 0xe0, 0x3d, 0xe2, 0x00, 0x26, 0xae, 0x0c, 0x30, 0x65, 0xa5, 0x86, 0x15, 0xd3, 0x55, + 0xf7, 0xd1, 0x9a, 0xcf, 0xa1, 0x87, 0x85, 0xcd, 0xdb, 0x20, 0x70, 0xc7, 0x0e, 0x3a, 0xb8, 0x03, + 0x03, 0x0c, 0xd4, 0x61, 0x2e, 0xb8, 0x32, 0xb4, 0x94, 0x95, 0x8d, 0x38, 0x27, 0x92, 0x72, 0x64, + 0x07, 0x9d, 0x23, 0x18, 0x18, 0x31, 0xae, 0xbe, 0x40, 0x8f, 0x29, 0xf4, 0xff, 0x28, 0x9f, 0x91, + 0xf2, 0x87, 0x14, 0xfa, 0xf7, 0xaa, 0xb7, 0xd1, 0xb2, 0x9c, 0x3e, 0x7e, 0x1f, 0x18, 0x3c, 0x5b, + 0x06, 0x36, 0x1b, 0x9d, 0x18, 0x7a, 0xd5, 0xdb, 0xb7, 0x63, 0x78, 0xb6, 0xfa, 0x0c, 0xa9, 0xd1, + 0xb0, 0x29, 0xf6, 0x9c, 0x64, 0x2f, 0x51, 0xe8, 0x4f, 0x92, 0x8b, 0xdf, 0x15, 0xb4, 0x26, 0xe3, + 0x29, 0xf9, 0x7e, 0x97, 0x38, 0xf2, 0x96, 0xd5, 0x7a, 0xc0, 0x87, 0x67, 0x77, 0xd5, 0xa7, 0x28, + 0x6d, 0x8f, 0x21, 0x6c, 0xbb, 0x2e, 0x1f, 0xe6, 0xb3, 0x34, 0x51, 0x2f, 0xb9, 0x2e, 0x57, 0xf7, + 0xd1, 0x02, 0x0c, 0x7c, 0x70, 0x04, 0xb8, 0xb8, 0x19, 0x72, 0x2a, 0x63, 0x99, 0xdf, 0x7d, 0xa4, + 0xc5, 0xcb, 0x43, 0x8b, 0x96, 0x87, 0x36, 0x5c, 0x1e, 0x5a, 0x85, 0x11, 0x6a, 0xfd, 0x7f, 0xcb, + 0x2f, 0x87, 0x9c, 0xaa, 0x2f, 0xd1, 0x22, 0xb4, 0x5a, 0xe0, 0x08, 0xd2, 0x83, 0xb8, 0xc1, 0xcc, + 0xdf, 0x1a, 0x2c, 0x8c, 0x04, 0x51, 0x87, 0xad, 0xf7, 0x68, 0xe5, 0xde, 0x4f, 0x4b, 0xdd, 0x40, + 0xeb, 0xc6, 0xdb, 0xba, 0x69, 0x95, 0x4e, 0xcc, 0x5a, 0x15, 0x5b, 0x46, 0xa9, 0x51, 0xab, 0xe2, + 0xd3, 0x6a, 0xa3, 0x6e, 0x54, 0xcc, 0x43, 0xd3, 0x38, 0x48, 0x27, 0xd4, 0x0c, 0x5a, 0xa8, 0x5b, + 0xb5, 0xda, 0x21, 0x3e, 0x36, 0x1b, 0x0d, 0xb3, 0xfa, 0x2a, 0xad, 0x8c, 0x4b, 0x66, 0xf5, 0x4d, + 0xe9, 0xb5, 0x79, 0x90, 0x4e, 0x96, 0x8f, 0xbf, 0x5d, 0xe5, 0x94, 0x8b, 0xab, 0x9c, 0xf2, 0xf3, + 0x2a, 0xa7, 0x7c, 0xba, 0xce, 0x25, 0x2e, 0xae, 0x73, 0x89, 0x1f, 0xd7, 0xb9, 0xc4, 0xbb, 0xbd, + 0x36, 0x11, 0x9d, 0xb0, 0xa9, 0x39, 0xcc, 0xd3, 0xa3, 0x3b, 0xbc, 0x4d, 0x41, 0xf4, 0x19, 0x3f, + 0xd3, 0x47, 0xeb, 0x6f, 0x30, 0xb9, 0x88, 0xc5, 0xb9, 0x0f, 0x41, 0xf3, 0x3f, 0xb9, 0x00, 0xf7, + 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 0xac, 0x71, 0x68, 0xf9, 0xac, 0x05, 0x00, 0x00, } func (m *EventClaimExpired) Marshal() (dAtA []byte, err error) { @@ -370,6 +414,11 @@ func (m *EventClaimExpired) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExpirationReason != 0 { + i = encodeVarintEvent(dAtA, i, uint64(m.ExpirationReason)) + i-- + dAtA[i] = 0x20 + } if m.NumComputeUnits != 0 { i = encodeVarintEvent(dAtA, i, uint64(m.NumComputeUnits)) i-- @@ -580,6 +629,9 @@ func (m *EventClaimExpired) Size() (n int) { if m.NumComputeUnits != 0 { n += 1 + sovEvent(uint64(m.NumComputeUnits)) } + if m.ExpirationReason != 0 { + n += 1 + sovEvent(uint64(m.ExpirationReason)) + } return n } @@ -762,6 +814,25 @@ func (m *EventClaimExpired) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationReason", wireType) + } + m.ExpirationReason = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpirationReason |= ClaimExpirationReason(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipEvent(dAtA[iNdEx:]) diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go index a5ada5bb3..26babd982 100644 --- a/x/tokenomics/types/expected_keepers.go +++ b/x/tokenomics/types/expected_keepers.go @@ -5,6 +5,7 @@ package types import ( "context" + "github.com/cosmos/cosmos-sdk/types" sdk "github.com/cosmos/cosmos-sdk/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" @@ -16,7 +17,13 @@ import ( // AccountKeeper defines the expected interface for the Account module. type AccountKeeper interface { - GetAccount(ctx context.Context, addr sdk.AccAddress) sdk.AccountI // only used for simulation + // Only used for testing & simulation + GetAccount(ctx context.Context, addr sdk.AccAddress) sdk.AccountI + SetAccount(context.Context, types.AccountI) + // Return a new account with the next account number and the specified address. Does not save the new account to the store. + NewAccountWithAddress(context.Context, sdk.AccAddress) sdk.AccountI + // Fetch the next account number, and increment the internal counter. + NextAccountNumber(context.Context) uint64 } // BankKeeper defines the expected interface for the Bank module. @@ -34,6 +41,7 @@ type BankKeeper interface { type ApplicationKeeper interface { GetApplication(ctx context.Context, appAddr string) (app apptypes.Application, found bool) SetApplication(ctx context.Context, app apptypes.Application) + GetAllApplications(ctx context.Context) []apptypes.Application } type ProofKeeper interface { @@ -43,11 +51,12 @@ type ProofKeeper interface { RemoveProof(ctx context.Context, sessionId, supplierAddr string) AllClaims(ctx context.Context, req *prooftypes.QueryAllClaimsRequest) (*prooftypes.QueryAllClaimsResponse, error) + EnsureValidProof(ctx context.Context, proof *prooftypes.Proof) error // Only used for testing & simulation + GetAllProofs(ctx context.Context) []prooftypes.Proof UpsertClaim(ctx context.Context, claim prooftypes.Claim) UpsertProof(ctx context.Context, claim prooftypes.Proof) - GetParams(ctx context.Context) prooftypes.Params SetParams(ctx context.Context, params prooftypes.Params) error }