diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index b20bb982..af54879b 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -48,6 +48,16 @@ jobs: pip3 install yq yq --version + - name: Install polycli + run: | + POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" + tmp_dir=$(mktemp -d) + curl -L "https://github.com/0xPolygon/polygon-cli/releases/download/${POLYCLI_VERSION}/polycli_${POLYCLI_VERSION}_linux_amd64.tar.gz" | tar -xz -C "$tmp_dir" + mv "$tmp_dir"/* /usr/local/bin/polycli + rm -rf "$tmp_dir" + sudo chmod +x /usr/local/bin/polycli + /usr/local/bin/polycli version + - name: Install foundry uses: foundry-rs/foundry-toolchain@v1 diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 6f970150..5b4d030f 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -51,7 +51,11 @@ jobs: - name: Install polycli run: | - tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" + POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" + tmp_dir=$(mktemp -d) + curl -L "https://github.com/0xPolygon/polygon-cli/releases/download/${POLYCLI_VERSION}/polycli_${POLYCLI_VERSION}_linux_amd64.tar.gz" | tar -xz -C "$tmp_dir" + mv "$tmp_dir"/* /usr/local/bin/polycli + rm -rf "$tmp_dir" sudo chmod +x /usr/local/bin/polycli /usr/local/bin/polycli version diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index 1ba94d7a..874f7ada 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -6,6 +6,7 @@ import ( "math/big" "time" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" @@ -70,7 +71,7 @@ func (a *AggOracle) Start(ctx context.Context) { case errors.Is(err, l1infotreesync.ErrBlockNotProcessed): a.logger.Debugf("syncer is not ready for the block %d", blockNumToFetch) - case errors.Is(err, l1infotreesync.ErrNotFound): + case errors.Is(err, db.ErrNotFound): blockNumToFetch = 0 a.logger.Debugf("syncer has not found any GER until block %d", blockNumToFetch) diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index e79fba2e..e6a61c5e 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -160,7 +160,7 @@ func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) return s.processor.GetLastProcessedBlock(ctx) } -func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (tree.Root, error) { +func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (*tree.Root, error) { return s.processor.exitTree.GetRootByHash(ctx, root) } @@ -172,10 +172,7 @@ func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) return s.processor.GetBridges(ctx, fromBlock, toBlock) } -// GetProof retrieves the Merkle proof for the given deposit count and exit root. -func (s *BridgeSync) GetProof( - ctx context.Context, depositCount uint32, localExitRoot common.Hash, -) ([32]common.Hash, error) { +func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) { return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) } @@ -186,3 +183,11 @@ func (p *processor) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, } return root.BlockNum, nil } + +func (s *BridgeSync) GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) { + root, err := s.processor.exitTree.GetRootByHash(ctx, ler) + if err != nil { + return root, err + } + return root, nil +} diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index 1319835b..2e574a4e 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -10,6 +10,7 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/contracts/claimmock" "github.com/0xPolygon/cdk/test/contracts/claimmockcaller" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -52,11 +53,11 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) proofLocal := [32][32]byte{} - proofLocalH := [32]common.Hash{} + proofLocalH := tree.Proof{} proofLocal[5] = common.HexToHash("beef") proofLocalH[5] = common.HexToHash("beef") proofRollup := [32][32]byte{} - proofRollupH := [32]common.Hash{} + proofRollupH := tree.Proof{} proofRollup[4] = common.HexToHash("a1fa") proofRollupH[4] = common.HexToHash("a1fa") expectedClaim := Claim{ diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index b34267ce..9be7a6bc 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" rpcTypes "github.com/0xPolygon/cdk-rpc/types" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/sync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum" @@ -181,7 +182,7 @@ func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.H callStack.Push(c) } } - return ErrNotFound + return db.ErrNotFound } func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) { diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 47b26595..e4ba5423 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -23,7 +23,6 @@ import ( var ( // ErrBlockNotProcessed indicates that the given block(s) have not been processed yet. ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") - ErrNotFound = errors.New("not found") ) // Bridge is the representation of a bridge event @@ -184,7 +183,7 @@ func (p *processor) queryBlockRange(tx db.Querier, fromBlock, toBlock uint64, ta `, table), fromBlock, toBlock) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return nil, ErrNotFound + return nil, db.ErrNotFound } return nil, err } diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index fbcdca73..c9df6561 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -9,8 +9,10 @@ import ( "time" dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" @@ -31,14 +33,13 @@ const ( var ( ErrInvalidClaim = errors.New("invalid claim") - ErrNotFound = errors.New("not found") ) // Claim representation of a claim event type Claim struct { LeafType uint8 - ProofLocalExitRoot [32]common.Hash - ProofRollupExitRoot [32]common.Hash + ProofLocalExitRoot tree.Proof + ProofRollupExitRoot tree.Proof GlobalIndex *big.Int MainnetExitRoot common.Hash RollupExitRoot common.Hash @@ -131,7 +132,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { c.logger.Debugf("queue is empty") err = nil time.Sleep(c.waitOnEmptyQueue) @@ -242,7 +243,7 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error } _, err = getClaim(tx, claim.GlobalIndex) - if !errors.Is(err, ErrNotFound) { + if !errors.Is(err, db.ErrNotFound) { if err != nil { tx.Rollback() @@ -264,7 +265,7 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error var queuePosition uint64 lastQueuePosition, _, err := getLastQueueIndex(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): queuePosition = 0 case err != nil: @@ -307,7 +308,7 @@ func (c *ClaimSponsor) getClaimByQueueIndex(ctx context.Context, queueIndex uint return nil, err } if globalIndexBytes == nil { - return nil, ErrNotFound + return nil, db.ErrNotFound } return getClaim(tx, new(big.Int).SetBytes(globalIndexBytes)) @@ -345,7 +346,7 @@ func getIndex(iter iter.KV) (uint64, *big.Int, error) { return 0, nil, err } if k == nil { - return 0, nil, ErrNotFound + return 0, nil, db.ErrNotFound } globalIndex := new(big.Int).SetBytes(v) @@ -368,7 +369,7 @@ func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) { return nil, err } if claimBytes == nil { - return nil, ErrNotFound + return nil, db.ErrNotFound } claim := &Claim{} err = json.Unmarshal(claimBytes, claim) diff --git a/cmd/run.go b/cmd/run.go index 773c5e24..0b744243 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -25,7 +25,6 @@ import ( "github.com/0xPolygon/cdk/etherman" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" @@ -81,10 +80,6 @@ func start(cliCtx *cli.Context) error { claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, c.ClaimSponsor) l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, c.BridgeL1Sync, reorgDetectorL1, l1Client) l2BridgeSync := runBridgeSyncL2IfNeeded(cliCtx.Context, components, c.BridgeL2Sync, reorgDetectorL2, l2Client) - l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded( - cliCtx.Context, components, c.L1Bridge2InfoIndexSync, - l1BridgeSync, l1InfoTreeSync, l1Client, - ) lastGERSync := runLastGERSyncIfNeeded( cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, ) @@ -115,7 +110,6 @@ func start(cliCtx *cli.Context) error { c.Common.NetworkID, claimSponsor, l1InfoTreeSync, - l1Bridge2InfoIndexSync, lastGERSync, l1BridgeSync, l2BridgeSync, @@ -623,34 +617,6 @@ func runClaimSponsorIfNeeded( return cs } -func runL1Bridge2InfoIndexSyncIfNeeded( - ctx context.Context, - components []string, - cfg l1bridge2infoindexsync.Config, - l1BridgeSync *bridgesync.BridgeSync, - l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - l1Client *ethclient.Client, -) *l1bridge2infoindexsync.L1Bridge2InfoIndexSync { - if !isNeeded([]string{cdkcommon.RPC}, components) { - return nil - } - l1Bridge2InfoIndexSync, err := l1bridge2infoindexsync.New( - cfg.DBPath, - l1BridgeSync, - l1InfoTreeSync, - l1Client, - cfg.RetryAfterErrorPeriod.Duration, - cfg.MaxRetryAttemptsAfterError, - cfg.WaitForSyncersPeriod.Duration, - ) - if err != nil { - log.Fatalf("error creating l1Bridge2InfoIndexSync: %s", err) - } - go l1Bridge2InfoIndexSync.Start(ctx) - - return l1Bridge2InfoIndexSync -} - func runLastGERSyncIfNeeded( ctx context.Context, components []string, @@ -751,7 +717,6 @@ func createRPC( cdkNetworkID uint32, sponsor *claimsponsor.ClaimSponsor, l1InfoTree *l1infotreesync.L1InfoTreeSync, - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync, injectedGERs *lastgersync.LastGERSync, bridgeL1 *bridgesync.BridgeSync, bridgeL2 *bridgesync.BridgeSync, @@ -767,7 +732,6 @@ func createRPC( cdkNetworkID, sponsor, l1InfoTree, - l1Bridge2Index, injectedGERs, bridgeL1, bridgeL2, diff --git a/config/config.go b/config/config.go index cb899df8..431d0175 100644 --- a/config/config.go +++ b/config/config.go @@ -3,7 +3,6 @@ package config import ( "bytes" "errors" - "fmt" "path/filepath" "strings" @@ -14,7 +13,6 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" @@ -52,6 +50,22 @@ const ( FlagOutputFile = "output" // FlagMaxAmount is the flag to avoid to use the flag FlagAmount FlagMaxAmount = "max-amount" + + deprecatedFieldSyncDB = "Aggregator.Synchronizer.DB is deprecated use Aggregator.Synchronizer.SQLDB instead" +) + +type ForbiddenField struct { + FieldName string + Reason string +} + +var ( + forbiddenFieldsOnConfig = []ForbiddenField{ + { + FieldName: "aggregator.synchronizer.db.", + Reason: deprecatedFieldSyncDB, + }, + } ) /* @@ -96,10 +110,6 @@ type Config struct { // ClaimSponsor is the config for the claim sponsor ClaimSponsor claimsponsor.EVMClaimSponsorConfig - // L1Bridge2InfoIndexSync is the config for the synchronizers that maintains the relation of - // bridge from L1 --> L1 Info tree index. Needed for the bridge service (RPC) - L1Bridge2InfoIndexSync l1bridge2infoindexsync.Config - // BridgeL1Sync is the configuration for the synchronizer of the bridge of the L1 BridgeL1Sync bridgesync.Config @@ -128,15 +138,18 @@ func Default() (*Config, error) { return &cfg, nil } +func Load(ctx *cli.Context) (*Config, error) { + configFilePath := ctx.String(FlagCfg) + return LoadFile(configFilePath) +} // Load loads the configuration -func Load(ctx *cli.Context) (*Config, error) { +func LoadFile(configFilePath string) (*Config, error) { cfg, err := Default() if err != nil { return nil, err } - - configFilePath := ctx.String(FlagCfg) + expectedKeys := viper.AllKeys() if configFilePath != "" { dirName, fileName := filepath.Split(configFilePath) @@ -160,7 +173,6 @@ func Load(ctx *cli.Context) (*Config, error) { log.Error("config file not found") } else { log.Errorf("error reading config file: ", err) - return nil, err } } @@ -179,8 +191,45 @@ func Load(ctx *cli.Context) (*Config, error) { if err != nil { return nil, err } + if expectedKeys != nil { + configKeys := viper.AllKeys() + unexpectedFields := getUnexpectedFields(configKeys, expectedKeys) + for _, field := range unexpectedFields { + forbbidenInfo := getForbiddenField(field) + if forbbidenInfo != nil { + log.Warnf("forbidden field %s in config file: %s", field, forbbidenInfo.Reason) + } else { + log.Debugf("field %s in config file doesnt have a default value", field) + } + } + } + return cfg, nil +} + +func getForbiddenField(fieldName string) *ForbiddenField { + for _, forbiddenField := range forbiddenFieldsOnConfig { + if forbiddenField.FieldName == fieldName || strings.HasPrefix(fieldName, forbiddenField.FieldName) { + return &forbiddenField + } + } + return nil +} - fmt.Println("cfg", cfg.NetworkConfig.L1Config) +func getUnexpectedFields(keysOnFile, expectedConfigKeys []string) []string { + wrongFields := make([]string, 0) + for _, key := range keysOnFile { + if !contains(expectedConfigKeys, key) { + wrongFields = append(wrongFields, key) + } + } + return wrongFields +} - return cfg, nil +func contains(keys []string, key string) bool { + for _, k := range keys { + if k == key { + return true + } + } + return false } diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 00000000..1aaa24e0 --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,51 @@ +package config + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadDeafaultConfig(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(DefaultValues)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} + +const configWithUnexpectedFields = ` +[UnknownField] +Field = "value" +` + +func TestLoadConfigWithUnexpectedFields(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(configWithUnexpectedFields)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} + +const configWithForbiddenFields = ` +[aggregator.synchronizer.db] +name = "value" +` + +func TestLoadConfigWithForbiddenFields(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(configWithForbiddenFields)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} diff --git a/config/default.go b/config/default.go index d9ff2158..bca18ac4 100644 --- a/config/default.go +++ b/config/default.go @@ -106,14 +106,13 @@ SequencerPrivateKey = {} L1ChainID = 11155111 HTTPHeaders = [] [Aggregator.Synchronizer] - [Aggregator.Synchronizer.DB] - Name = "sync_db" - User = "sync_user" - Password = "sync_password" - Host = "cdk-l1-sync-db" - Port = "5432" - EnableLog = false - MaxConns = 10 + [Aggregator.Synchronizer.Log] + Environment = "development" # "production" or "development" + Level = "info" + Outputs = ["stderr"] + [Aggregator.Synchronizer.SQLDB] + DriverName = "sqlite3" + DataSourceName = "file:/tmp/aggregator_sync_db.sqlite" [Aggregator.Synchronizer.Synchronizer] SyncInterval = "10s" SyncChunkSize = 1000 @@ -122,9 +121,19 @@ SequencerPrivateKey = {} BlockFinality = "finalized" OverrideStorageCheck = false [Aggregator.Synchronizer.Etherman] + L1URL = "http://localhost:8545" + ForkIDChunkSize = 100 + L1ChainID = 0 [Aggregator.Synchronizer.Etherman.Validium] Enabled = false - + TrustedSequencerURL = "" + RetryOnDACErrorInterval = "1m" + DataSourcePriority = ["trusted", "external"] + [Aggregator.Synchronizer.Etherman.Validium.Translator] + FullMatchRules = [] + [Aggregator.Synchronizer.Etherman.Validium.RateLimit] + NumRequests = 900 + Interval = "1s" [ReorgDetectorL1] DBPath = "/tmp/reorgdetectorl1" @@ -212,12 +221,6 @@ GasOffset = 0 L1ChainID = 1337 HTTPHeaders = [] -[L1Bridge2InfoIndexSync] -DBPath = "/tmp/l1bridge2infoindexsync" -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForSyncersPeriod = "3s" - [BridgeL1Sync] DBPath = "/tmp/bridgel1sync" BlockFinality = "LatestBlock" diff --git a/db/meddler.go b/db/meddler.go index 90071916..e1f55086 100644 --- a/db/meddler.go +++ b/db/meddler.go @@ -19,6 +19,7 @@ func initMeddler() { meddler.Register("bigint", BigIntMeddler{}) meddler.Register("merkleproof", MerkleProofMeddler{}) meddler.Register("hash", HashMeddler{}) + meddler.Register("address", AddressMeddler{}) } func SQLiteErr(err error) (*sqlite.Error, bool) { @@ -176,3 +177,38 @@ func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err } return field.Hex(), nil } + +// AddressMeddler encodes or decodes the field value to or from JSON +type AddressMeddler struct{} + +// PreRead is called before a Scan operation for fields that have the ProofMeddler +func (b AddressMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { + // give a pointer to a byte buffer to grab the raw data + return new(string), nil +} + +// PostRead is called after a Scan operation for fields that have the ProofMeddler +func (b AddressMeddler) PostRead(fieldPtr, scanTarget interface{}) error { + ptr, ok := scanTarget.(*string) + if !ok { + return errors.New("scanTarget is not *string") + } + if ptr == nil { + return errors.New("AddressMeddler.PostRead: nil pointer") + } + field, ok := fieldPtr.(*common.Address) + if !ok { + return errors.New("fieldPtr is not common.Address") + } + *field = common.HexToAddress(*ptr) + return nil +} + +// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +func (b AddressMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { + field, ok := fieldPtr.(common.Address) + if !ok { + return nil, errors.New("fieldPtr is not common.Address") + } + return field.Hex(), nil +} diff --git a/db/sqlite.go b/db/sqlite.go index e30e9e26..ba8faefb 100644 --- a/db/sqlite.go +++ b/db/sqlite.go @@ -2,6 +2,7 @@ package db import ( "database/sql" + "errors" _ "github.com/mattn/go-sqlite3" ) @@ -10,6 +11,10 @@ const ( UniqueConstrain = 1555 ) +var ( + ErrNotFound = errors.New("not found") +) + // NewSQLiteDB creates a new SQLite DB func NewSQLiteDB(dbPath string) (*sql.DB, error) { initMeddler() @@ -25,3 +30,10 @@ func NewSQLiteDB(dbPath string) (*sql.DB, error) { `) return db, err } + +func ReturnErrNotFound(err error) error { + if errors.Is(err, sql.ErrNoRows) { + return ErrNotFound + } + return err +} diff --git a/go.mod b/go.mod index a2ca38f4..aeaae312 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,8 @@ require ( github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 - github.com/ethereum/go-ethereum v1.14.5 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 + github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 github.com/iden3/go-iden3-crypto v0.0.16 @@ -19,7 +19,7 @@ require ( github.com/ledgerwatch/erigon-lib v1.0.0 github.com/mattn/go-sqlite3 v1.14.23 github.com/mitchellh/mapstructure v1.5.0 - github.com/rubenv/sql-migrate v1.6.1 + github.com/rubenv/sql-migrate v1.7.0 github.com/russross/meddler v1.0.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 @@ -27,9 +27,9 @@ require ( go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/metric v1.24.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.24.0 - golang.org/x/net v0.26.0 - golang.org/x/sync v0.7.0 + golang.org/x/crypto v0.27.0 + golang.org/x/net v0.29.0 + golang.org/x/sync v0.8.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.2 modernc.org/sqlite v1.32.0 @@ -44,13 +44,14 @@ require ( github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.0 // indirect + github.com/cockroachdb/pebble v1.1.1 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect @@ -66,10 +67,9 @@ require ( github.com/erigontech/mdbx-go v0.27.14 // indirect github.com/ethereum/c-kzg-4844 v1.0.0 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect - github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect - github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -87,7 +87,7 @@ require ( github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 // indirect github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.4 // indirect + github.com/holiman/uint256 v1.3.1 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgio v1.0.0 // indirect @@ -149,8 +149,8 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 818a9b5d..e3544380 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUx github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234/go.mod h1:zBZWxwOHKlw+ghd9roQLgIkDZWA7e7qO3EsfQQT/+oQ= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 h1:h/B5AzWSZTxb1HouulXeE9nbHD1d4/nc67ZQc0khAQA= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0/go.mod h1:+tQwkDf+5AL3dgL6G1t0qmwct0NJDlGlzqycOM5jn5g= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 h1:8GbJBNsYO4zrqiBX++et8eQrJDEWEZuo3Ch3M416YnI= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1/go.mod h1:96i+QSANfbikwlUY3U9MLNtg3656W3dWfbGqH+Od1/k= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -33,8 +33,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= @@ -53,12 +53,14 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= -github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= -github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= +github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= +github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= @@ -95,12 +97,10 @@ github.com/erigontech/mdbx-go v0.27.14 h1:IVVeQVCAjZRpAR8bThlP2ISxrOwdV35NZdGwAg github.com/erigontech/mdbx-go v0.27.14/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.14.5 h1:szuFzO1MhJmweXjoM5nSAeDvjNUH3vIQoMzzQnfvjpw= -github.com/ethereum/go-ethereum v1.14.5/go.mod h1:VEDGGhSxY7IEjn98hJRFXl/uFvpRgbIIf2PpXiyGGgc= +github.com/ethereum/go-ethereum v1.14.8 h1:NgOWvXS+lauK+zFukEvi85UmmsS/OkV0N23UZ1VTIig= +github.com/ethereum/go-ethereum v1.14.8/go.mod h1:TJhyuDq0JDppAkFXgqjwpdlQApywnu/m10kFPxh8vvs= github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= -github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= -github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -110,8 +110,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= -github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= @@ -188,8 +188,8 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= +github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -370,8 +370,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rubenv/sql-migrate v1.6.1 h1:bo6/sjsan9HaXAsNxYP/jCEDUGibHp8JmOBw7NTGRos= -github.com/rubenv/sql-migrate v1.6.1/go.mod h1:tPzespupJS0jacLfhbwto/UjSX+8h2FdWB7ar+QlHa0= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/meddler v1.0.1 h1:JLR7Z4M4iGm1nr7DIURBq18UW8cTrm+qArUFgOhELo8= @@ -476,8 +476,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -499,15 +499,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -539,8 +539,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -550,8 +550,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= diff --git a/l1bridge2infoindexsync/config.go b/l1bridge2infoindexsync/config.go deleted file mode 100644 index ef37f738..00000000 --- a/l1bridge2infoindexsync/config.go +++ /dev/null @@ -1,15 +0,0 @@ -package l1bridge2infoindexsync - -import "github.com/0xPolygon/cdk/config/types" - -type Config struct { - // DBPath path of the DB - DBPath string `mapstructure:"DBPath"` - // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry - RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` - // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. - // Any number smaller than zero will be considered as unlimited retries - MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` - // WaitForSyncersPeriod time that will be waited when the synchronizer has reached the latest state - WaitForSyncersPeriod types.Duration `mapstructure:"WaitForSyncersPeriod"` -} diff --git a/l1bridge2infoindexsync/downloader.go b/l1bridge2infoindexsync/downloader.go deleted file mode 100644 index f4db8422..00000000 --- a/l1bridge2infoindexsync/downloader.go +++ /dev/null @@ -1,70 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "math/big" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rpc" -) - -type downloader struct { - l1Bridge *bridgesync.BridgeSync - l1Info *l1infotreesync.L1InfoTreeSync - l1Client ethereum.ChainReader -} - -func newDownloader( - l1Bridge *bridgesync.BridgeSync, - l1Info *l1infotreesync.L1InfoTreeSync, - l1Client ethereum.ChainReader, -) *downloader { - return &downloader{ - l1Bridge: l1Bridge, - l1Info: l1Info, - l1Client: l1Client, - } -} - -func (d *downloader) getLastFinalizedL1Block(ctx context.Context) (uint64, error) { - b, err := d.l1Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - if err != nil { - return 0, err - } - - return b.NumberU64(), nil -} - -func (d *downloader) getLastProcessedBlockBridge(ctx context.Context) (uint64, error) { - return d.l1Bridge.GetLastProcessedBlock(ctx) -} - -func (d *downloader) getLastProcessedBlockL1InfoTree(ctx context.Context) (uint64, error) { - return d.l1Info.GetLastProcessedBlock(ctx) -} - -func (d *downloader) getLastL1InfoIndexUntilBlock(ctx context.Context, blockNum uint64) (uint32, error) { - info, err := d.l1Info.GetLatestInfoUntilBlock(ctx, blockNum) - if err != nil { - return 0, err - } - - return info.L1InfoTreeIndex, nil -} - -func (d *downloader) getMainnetExitRootAtL1InfoTreeIndex(ctx context.Context, index uint32) (common.Hash, error) { - leaf, err := d.l1Info.GetInfoByIndex(ctx, index) - if err != nil { - return common.Hash{}, err - } - - return leaf.MainnetExitRoot, nil -} - -func (d *downloader) getBridgeIndex(ctx context.Context, mainnetExitRoot common.Hash) (types.Root, error) { - return d.l1Bridge.GetBridgeRootByHash(ctx, mainnetExitRoot) -} diff --git a/l1bridge2infoindexsync/driver.go b/l1bridge2infoindexsync/driver.go deleted file mode 100644 index 921a0c41..00000000 --- a/l1bridge2infoindexsync/driver.go +++ /dev/null @@ -1,221 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "errors" - "time" - - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" -) - -type driver struct { - downloader *downloader - processor *processor - rh *sync.RetryHandler - waitForSyncersPeriod time.Duration -} - -func newDriver( - downloader *downloader, - processor *processor, - rh *sync.RetryHandler, - waitForSyncersPeriod time.Duration, -) *driver { - return &driver{ - downloader: downloader, - processor: processor, - rh: rh, - waitForSyncersPeriod: waitForSyncersPeriod, - } -} - -func (d *driver) sync(ctx context.Context) { - var ( - attempts int - lpbProcessor uint64 - lastProcessedL1InfoIndex uint32 - err error - ) - for { - lpbProcessor, lastProcessedL1InfoIndex, err = d.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx) - if err != nil { - attempts++ - log.Errorf("error getting last processed block and index: %v", err) - d.rh.Handle("GetLastProcessedBlockAndL1InfoTreeIndex", attempts) - - continue - } - - break - } - for { - attempts = 0 - var ( - syncUntilBlock uint64 - shouldWait bool - ) - for { - syncUntilBlock, shouldWait, err = d.getTargetSynchronizationBlock(ctx, lpbProcessor) - if err != nil { - attempts++ - log.Errorf("error getting target sync block: %v", err) - d.rh.Handle("getTargetSynchronizationBlock", attempts) - - continue - } - - break - } - if shouldWait { - log.Debugf("waiting for syncers to catch up") - time.Sleep(d.waitForSyncersPeriod) - - continue - } - - attempts = 0 - var lastL1InfoTreeIndex uint32 - found := false - for { - lastL1InfoTreeIndex, err = d.downloader.getLastL1InfoIndexUntilBlock(ctx, syncUntilBlock) - if err != nil { - if errors.Is(err, l1infotreesync.ErrNotFound) || errors.Is(err, l1infotreesync.ErrBlockNotProcessed) { - log.Debugf("l1 info tree index not ready, querying until block %d: %s", syncUntilBlock, err) - - break - } - attempts++ - log.Errorf("error getting last l1 info tree index: %v", err) - d.rh.Handle("getLastL1InfoIndexUntilBlock", attempts) - - continue - } - found = true - - break - } - if !found { - time.Sleep(d.waitForSyncersPeriod) - - continue - } - - relations := []bridge2L1InfoRelation{} - var init uint32 - if lastProcessedL1InfoIndex > 0 { - init = lastProcessedL1InfoIndex + 1 - } - if init <= lastL1InfoTreeIndex { - log.Debugf("getting relations from index %d to %d", init, lastL1InfoTreeIndex) - } - for i := init; i <= lastL1InfoTreeIndex; i++ { - attempts = 0 - for { - relation, err := d.getRelation(ctx, i) - if err != nil { - attempts++ - log.Errorf("error getting relation: %v", err) - d.rh.Handle("getRelation", attempts) - - continue - } - relations = append(relations, relation) - - break - } - } - - attempts = 0 - log.Debugf("processing until block %d: %+v", syncUntilBlock, relations) - for { - if err := d.processor.processUntilBlock(ctx, syncUntilBlock, relations); err != nil { - attempts++ - log.Errorf("error processing block: %v", err) - d.rh.Handle("processUntilBlock", attempts) - - continue - } - - break - } - - lpbProcessor = syncUntilBlock - if len(relations) > 0 { - lastProcessedL1InfoIndex = relations[len(relations)-1].l1InfoTreeIndex - log.Debugf("last processed index %d", lastProcessedL1InfoIndex) - } - } -} - -func (d *driver) getTargetSynchronizationBlock( - ctx context.Context, lpbProcessor uint64, -) (syncUntilBlock uint64, shouldWait bool, err error) { - // NOTE: if this had configurable finality, it would be needed to deal with reorgs - lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx) - if err != nil { - return - } - checkProcessedBlockFn := func(blockToCheck, lastProcessed uint64, blockType string) bool { - if blockToCheck >= lastProcessed { - log.Debugf( - "should wait because the last processed block (%d) is greater or equal than the %s (%d)", - blockToCheck, blockType, lastProcessed) - shouldWait = true - - return true - } - - return false - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last finalised") { - return - } - lpbInfo, err := d.downloader.getLastProcessedBlockL1InfoTree(ctx) - if err != nil { - return - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from L1 Info tree sync") { - return - } - lpbBridge, err := d.downloader.getLastProcessedBlockBridge(ctx) - if err != nil { - return - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from l1 bridge sync") { - return - } - - // Bridge, L1Info and L1 ahead of procesor. Pick the smallest block num as target - if lastFinalised <= lpbInfo { - log.Debugf("target sync block is the last finalised block (%d)", lastFinalised) - syncUntilBlock = lastFinalised - } else { - log.Debugf("target sync block is the last processed block from L1 info tree (%d)", lpbInfo) - syncUntilBlock = lpbInfo - } - if lpbBridge < syncUntilBlock { - log.Debugf("target sync block is the last processed block from bridge (%d)", lpbBridge) - syncUntilBlock = lpbBridge - } - - return -} - -func (d *driver) getRelation(ctx context.Context, l1InfoIndex uint32) (bridge2L1InfoRelation, error) { - mer, err := d.downloader.getMainnetExitRootAtL1InfoTreeIndex(ctx, l1InfoIndex) - if err != nil { - return bridge2L1InfoRelation{}, err - } - - bridgeRoot, err := d.downloader.getBridgeIndex(ctx, mer) - if err != nil { - return bridge2L1InfoRelation{}, err - } - - return bridge2L1InfoRelation{ - bridgeIndex: bridgeRoot.Index, - l1InfoTreeIndex: l1InfoIndex, - }, nil -} diff --git a/l1bridge2infoindexsync/e2e_test.go b/l1bridge2infoindexsync/e2e_test.go deleted file mode 100644 index e134c1ab..00000000 --- a/l1bridge2infoindexsync/e2e_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package l1bridge2infoindexsync_test - -import ( - "context" - "errors" - "fmt" - "math/big" - "path" - "strconv" - "testing" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmglobalexitrootv2" - "github.com/0xPolygon/cdk/bridgesync" - cdktypes "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/ethereum/go-ethereum/rpc" - "github.com/stretchr/testify/require" -) - -func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - bridgeAddr common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - genesisAlloc := map[common.Address]types.Account{ - authDeployer.From: { - Balance: balance, - }, - authCaller.From: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - if bridgeABI == nil { - return nil, common.Address{}, common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - dataCallProxy, err := bridgeABI.Pack("initialize", - uint32(0), // networkIDMainnet - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - if precalculatedAddr != checkGERAddr { - err = errors.New("error deploying bridge") - return - } - - gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2( - authDeployer, client.Client(), authCaller.From, bridgeAddr, - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, common.Address{}, nil, nil, errors.New("error calculating addr") - } - - return client, gerAddr, bridgeAddr, gerContract, bridgeContract, nil -} - -func TestE2E(t *testing.T) { - ctx := context.Background() - dbPathBridgeSync := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathL1Sync := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathReorg := t.TempDir() - dbPathL12InfoSync := t.TempDir() - - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - privateKey, err = crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - require.NotEqual(t, authDeployer.From, auth.From) - client, gerAddr, bridgeAddr, gerSc, bridgeSc, err := newSimulatedClient(authDeployer, auth) - require.NoError(t, err) - rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Second)}) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - - testClient := helpers.TestClient{ClientRenamed: client.Client()} - bridgeSync, err := bridgesync.NewL1(ctx, dbPathBridgeSync, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) - require.NoError(t, err) - go bridgeSync.Start(ctx) - - l1Sync, err := l1infotreesync.New( - ctx, - dbPathL1Sync, - gerAddr, - common.Address{}, - 10, - etherman.SafeBlock, - rd, - client.Client(), - time.Millisecond, - 0, - time.Millisecond, - 3, - ) - require.NoError(t, err) - go l1Sync.Start(ctx) - - bridge2InfoSync, err := l1bridge2infoindexsync.New(dbPathL12InfoSync, bridgeSync, l1Sync, client.Client(), 0, 0, time.Millisecond) - require.NoError(t, err) - go bridge2InfoSync.Start(ctx) - - // Send bridge txs - expectedIndex := -1 - for i := 0; i < 10; i++ { - bridge := bridgesync.Bridge{ - Amount: big.NewInt(0), - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("f00"), - } - _, err := bridgeSc.BridgeAsset( - auth, - bridge.DestinationNetwork, - bridge.DestinationAddress, - bridge.Amount, - bridge.OriginAddress, - true, nil, - ) - require.NoError(t, err) - expectedIndex++ - client.Commit() - - // Wait for block to be finalised - updateAtBlock, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - for { - lastFinalisedBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - require.NoError(t, err) - if lastFinalisedBlock.NumberU64() >= updateAtBlock { - break - } - client.Commit() - time.Sleep(time.Microsecond) - } - - // Wait for syncer to catch up - syncerUpToDate := false - var errMsg string - lb, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - require.NoError(t, err) - for i := 0; i < 10; i++ { - lpb, err := bridge2InfoSync.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if lpb == lb.NumberU64() { - syncerUpToDate = true - - break - } - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb.NumberU64(), lpb) - } - require.True(t, syncerUpToDate, errMsg) - - actualIndex, err := bridge2InfoSync.GetL1InfoTreeIndexByDepositCount(ctx, uint32(i)) - require.NoError(t, err) - require.Equal(t, uint32(expectedIndex), actualIndex) - - if i%2 == 1 { - // Update L1 info tree without a bridge on L1 - _, err = gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - expectedIndex++ - client.Commit() - } - } -} diff --git a/l1bridge2infoindexsync/l1bridge2infoindexsync.go b/l1bridge2infoindexsync/l1bridge2infoindexsync.go deleted file mode 100644 index c24bebba..00000000 --- a/l1bridge2infoindexsync/l1bridge2infoindexsync.go +++ /dev/null @@ -1,62 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "time" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum" -) - -type L1Bridge2InfoIndexSync struct { - processor *processor - driver *driver -} - -func New( - dbPath string, - l1Bridge *bridgesync.BridgeSync, - l1Info *l1infotreesync.L1InfoTreeSync, - l1Client ethereum.ChainReader, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - waitForSyncersPeriod time.Duration, -) (*L1Bridge2InfoIndexSync, error) { - dwn := newDownloader(l1Bridge, l1Info, l1Client) - - prc, err := newProcessor(dbPath) - if err != nil { - return nil, err - } - - rh := &sync.RetryHandler{ - RetryAfterErrorPeriod: retryAfterErrorPeriod, - MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, - } - drv := newDriver(dwn, prc, rh, waitForSyncersPeriod) - - return &L1Bridge2InfoIndexSync{ - driver: drv, - processor: prc, - }, nil -} - -func (s *L1Bridge2InfoIndexSync) Start(ctx context.Context) { - s.driver.sync(ctx) -} - -// GetLastProcessedBlock retrieves the last processed block number by the processor. -func (s *L1Bridge2InfoIndexSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - lpb, _, err := s.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx) - - return lpb, err -} - -// GetL1InfoTreeIndexByDepositCount retrieves the L1 Info Tree index for a given deposit count. -func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount( - ctx context.Context, depositCount uint32, -) (uint32, error) { - return s.processor.getL1InfoTreeIndexByBridgeIndex(ctx, depositCount) -} diff --git a/l1bridge2infoindexsync/processor.go b/l1bridge2infoindexsync/processor.go deleted file mode 100644 index bfe9f3a6..00000000 --- a/l1bridge2infoindexsync/processor.go +++ /dev/null @@ -1,206 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "errors" - "fmt" - - "github.com/0xPolygon/cdk/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" -) - -const ( - lastProcessedTable = "l1bridge2infoindexsync-lastProcessed" - relationTable = "l1bridge2infoindexsync-relation" -) - -var ( - lastProcessedKey = []byte("lp") - ErrNotFound = errors.New("not found") -) - -type processor struct { - db kv.RwDB -} - -type bridge2L1InfoRelation struct { - bridgeIndex uint32 - l1InfoTreeIndex uint32 -} - -type lastProcessed struct { - block uint64 - index uint32 -} - -func (lp *lastProcessed) MarshalBinary() ([]byte, error) { - return append(common.Uint64ToBytes(lp.block), common.Uint32ToBytes(lp.index)...), nil -} - -func (lp *lastProcessed) UnmarshalBinary(data []byte) error { - const expectedDataLength = 12 - if len(data) != expectedDataLength { - return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data)) - } - lp.block = common.BytesToUint64(data[:8]) - lp.index = common.BytesToUint32(data[8:]) - - return nil -} - -func newProcessor(dbPath string) (*processor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - return kv.TableCfg{ - lastProcessedTable: {}, - relationTable: {}, - } - } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() - if err != nil { - return nil, err - } - - return &processor{ - db: db, - }, nil -} - -// GetLastProcessedBlockAndL1InfoTreeIndex returns the last processed block oby the processor, including blocks -// that don't have events -func (p *processor) GetLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context) (uint64, uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, 0, err - } - defer tx.Rollback() - - return p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) -} - -func (p *processor) getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.Tx) (uint64, uint32, error) { - if lastProcessedBytes, err := tx.GetOne(lastProcessedTable, lastProcessedKey); err != nil { - return 0, 0, err - } else if lastProcessedBytes == nil { - return 0, 0, nil - } else { - lp := &lastProcessed{} - if err := lp.UnmarshalBinary(lastProcessedBytes); err != nil { - return 0, 0, err - } - - return lp.block, lp.index, nil - } -} - -func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex( - ctx context.Context, blockNum uint64, index uint32, -) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx, blockNum, index); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() -} - -func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.RwTx, blockNum uint64, index uint32) error { - lp := &lastProcessed{ - block: blockNum, - index: index, - } - value, err := lp.MarshalBinary() - if err != nil { - return err - } - - return tx.Put(lastProcessedTable, lastProcessedKey, value) -} - -func (p *processor) processUntilBlock( - ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation, -) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - - if len(relations) == 0 { - _, lastIndex, err := p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) - if err != nil { - tx.Rollback() - - return err - } - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx( - tx, - lastProcessedBlock, - lastIndex, - ); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() - } - - for _, relation := range relations { - if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); !errors.Is(err, ErrNotFound) { - // Note that indexes could be repeated as the L1 Info tree update can be produced by a rollup and not mainnet. - // Hence if the index already exist, do not update as it's better to have the lowest index possible for the relation - continue - } - if err := tx.Put( - relationTable, - common.Uint32ToBytes(relation.bridgeIndex), - common.Uint32ToBytes(relation.l1InfoTreeIndex), - ); err != nil { - tx.Rollback() - - return err - } - } - - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx( - tx, - lastProcessedBlock, - relations[len(relations)-1].l1InfoTreeIndex, - ); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() -} - -func (p *processor) getL1InfoTreeIndexByBridgeIndex(ctx context.Context, depositCount uint32) (uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - return p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, depositCount) -} - -func (p *processor) getL1InfoTreeIndexByBridgeIndexWithTx(tx kv.Tx, depositCount uint32) (uint32, error) { - indexBytes, err := tx.GetOne(relationTable, common.Uint32ToBytes(depositCount)) - if err != nil { - return 0, err - } - if indexBytes == nil { - return 0, ErrNotFound - } - - return common.BytesToUint32(indexBytes), nil -} diff --git a/l1bridge2infoindexsync/processor_test.go b/l1bridge2infoindexsync/processor_test.go deleted file mode 100644 index 9305dd9b..00000000 --- a/l1bridge2infoindexsync/processor_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDuplicatedKey(t *testing.T) { - dbPath := t.TempDir() - p, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.Background() - err = p.processUntilBlock(ctx, 5, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 2}}) - require.NoError(t, err) - err = p.processUntilBlock(ctx, 7, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 3}}) - require.NoError(t, err) - l1InfoTreeIndex, err := p.getL1InfoTreeIndexByBridgeIndex(ctx, 2) - require.NoError(t, err) - require.Equal(t, uint32(2), l1InfoTreeIndex) -} diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go index 2051f7b5..16ccb37a 100644 --- a/l1infotreesync/downloader.go +++ b/l1infotreesync/downloader.go @@ -86,7 +86,8 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr l, err, ) } - log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", common.Bytes2Hex(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) + log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", + common.BytesToHash(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) return nil } diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 90f7f091..21820059 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -119,30 +119,165 @@ func TestE2E(t *testing.T) { tx, err := verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, i%2 != 0) require.NoError(t, err) client.Commit() - // Let the processor catch up - time.Sleep(time.Millisecond * 100) receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash()) require.NoError(t, err) require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful) require.True(t, len(receipt.Logs) == 1+i%2+i%2) + // Let the processor catch + processorUpdated := false + for i := 0; i < 30; i++ { + lpb, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + if receipt.BlockNumber.Uint64() == lpb { + processorUpdated = true + break + } + time.Sleep(time.Millisecond * 10) + } + require.True(t, processorUpdated) + + // Assert rollup exit root expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) require.NoError(t, err) require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash, fmt.Sprintf("rollupID: %d, i: %d", rollupID, i)) + + // Assert verify batches + expectedVerify := l1infotreesync.VerifyBatches{ + BlockNumber: receipt.BlockNumber.Uint64(), + BlockPosition: uint64(i%2 + i%2), + RollupID: rollupID, + ExitRoot: newLocalExitRoot, + Aggregator: auth.From, + RollupExitRoot: expectedRollupExitRoot, + } + actualVerify, err := syncer.GetLastVerifiedBatches(rollupID) + require.NoError(t, err) + require.Equal(t, expectedVerify, *actualVerify) } } } +func TestWithReorgs(t *testing.T) { + ctx := context.Background() + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathReorg := t.TempDir() + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) + require.NoError(t, err) + client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) + require.NoError(t, err) + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 30)}) + require.NoError(t, err) + require.NoError(t, rd.Start(ctx)) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25) + require.NoError(t, err) + go syncer.Start(ctx) + + // Commit block + header, err := client.Client().HeaderByHash(ctx, client.Commit()) // Block 3 + require.NoError(t, err) + reorgFrom := header.Hash() + fmt.Println("start from header:", header.Number) + + updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) + + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } + + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(1, 1) + + // Block 4 + commitBlocks(t, client, 1, time.Second*5) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root + expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + // Assert L1 Info tree root + expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) + require.NoError(t, err) + info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) + require.NoError(t, err) + + require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) + require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + + // Forking from block 3 + err = client.Fork(reorgFrom) + require.NoError(t, err) + + // Block 4, 5, 6 after the fork + commitBlocks(t, client, 3, time.Millisecond*500) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.ErrorContains(t, err, "not found") // rollup exit tree reorged, it does not have any exits in it + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + // Forking from block 3 again + err = client.Fork(reorgFrom) + require.NoError(t, err) + time.Sleep(time.Millisecond * 500) + + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(2, 1) + + // Block 4, 5, 6, 7 after the fork + commitBlocks(t, client, 4, time.Millisecond*100) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) +} + func TestStressAndReorgs(t *testing.T) { const ( - totalIterations = 200 // Have tested with much larger number (+10k) - enableReorgs = false // test fails when set to true - reorgEveryXIterations = 53 - maxReorgDepth = 5 - maxEventsPerBlock = 7 - maxRollups = 31 + totalIterations = 3 + blocksInIteration = 140 + reorgEveryXIterations = 70 + reorgSizeInBlocks = 2 + maxRollupID = 31 + extraBlocksToMine = 10 ) ctx := context.Background() @@ -157,58 +292,48 @@ func TestStressAndReorgs(t *testing.T) { rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100) require.NoError(t, err) go syncer.Start(ctx) - for i := 0; i < totalIterations; i++ { - for j := 0; j < i%maxEventsPerBlock; j++ { - switch j % 3 { - case 0: // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - case 1: // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) - _, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) - case 2: // Update Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) - _, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, false) + updateL1InfoTreeAndRollupExitTree := func(i, j int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) + + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "fffa" + strconv.Itoa(j)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } + + for i := 1; i <= totalIterations; i++ { + for j := 1; j <= blocksInIteration; j++ { + commitBlocks(t, client, 1, time.Millisecond*10) + + if j%reorgEveryXIterations == 0 { + currentBlockNum, err := client.Client().BlockNumber(ctx) require.NoError(t, err) - } - } - client.Commit() - time.Sleep(time.Microsecond * 30) // Sleep just enough for goroutine to switch - if enableReorgs && i%reorgEveryXIterations == 0 { - reorgDepth := i%maxReorgDepth + 1 - currentBlockNum, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - targetReorgBlockNum := currentBlockNum - uint64(reorgDepth) - if targetReorgBlockNum < currentBlockNum { // we are dealing with uints... - reorgBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(targetReorgBlockNum))) + + block, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(currentBlockNum-reorgSizeInBlocks))) require.NoError(t, err) - err = client.Fork(reorgBlock.Hash()) + reorgFrom := block.Hash() + err = client.Fork(reorgFrom) require.NoError(t, err) + } else { + updateL1InfoTreeAndRollupExitTree(i, j, uint32(j%maxRollupID)+1) } } } - syncerUpToDate := false - var errMsg string - lb, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - for i := 0; i < 50; i++ { - lpb, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if lpb == lb { - syncerUpToDate = true + commitBlocks(t, client, 1, time.Millisecond*10) - break - } - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) - } - require.True(t, syncerUpToDate, errMsg) + waitForSyncerToCatchUp(ctx, t, syncer, client) // Assert rollup exit root expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) @@ -227,6 +352,39 @@ func TestStressAndReorgs(t *testing.T) { info, err := syncer.GetInfoByIndex(ctx, lastRoot.Index) require.NoError(t, err, fmt.Sprintf("index: %d", lastRoot.Index)) - require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) + t.Logf("expectedL1InfoRoot: %s", common.Hash(expectedL1InfoRoot).String()) require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) +} + +func waitForSyncerToCatchUp(ctx context.Context, t *testing.T, syncer *l1infotreesync.L1InfoTreeSync, client *simulated.Backend) { + t.Helper() + + syncerUpToDate := false + var errMsg string + + for i := 0; i < 200; i++ { + lpb, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + lb, err := client.Client().BlockNumber(ctx) + require.NoError(t, err) + if lpb == lb { + syncerUpToDate = true + break + } + time.Sleep(time.Second / 2) + errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) + } + + require.True(t, syncerUpToDate, errMsg) +} + +// commitBlocks commits the specified number of blocks with the given client and waits for the specified duration after each block +func commitBlocks(t *testing.T, client *simulated.Backend, numBlocks int, waitDuration time.Duration) { + t.Helper() + + for i := 0; i < numBlocks; i++ { + client.Commit() + time.Sleep(waitDuration) + } } diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 546a8ead..4c4b796e 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -151,3 +151,42 @@ func (s *L1InfoTreeSync) GetLocalExitRoot( return s.processor.rollupExitTree.GetLeaf(ctx, networkID-1, rollupExitRoot) } + +func (s *L1InfoTreeSync) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + return s.processor.GetLastVerifiedBatches(rollupID) +} + +func (s *L1InfoTreeSync) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + return s.processor.GetFirstVerifiedBatches(rollupID) +} + +func (s *L1InfoTreeSync) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + return s.processor.GetFirstVerifiedBatchesAfterBlock(rollupID, blockNum) +} + +func (s *L1InfoTreeSync) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstL1InfoWithRollupExitRoot(rollupExitRoot) +} + +func (s *L1InfoTreeSync) GetLastInfo() (*L1InfoTreeLeaf, error) { + return s.processor.GetLastInfo() +} + +func (s *L1InfoTreeSync) GetFirstInfo() (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstInfo() +} + +func (s *L1InfoTreeSync) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstInfoAfterBlock(blockNum) +} + +func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { + return s.processor.GetInfoByGlobalExitRoot(ger) +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot creates a merkle proof for the L1 Info tree +func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( + ctx context.Context, index uint32, root common.Hash, +) (types.Proof, error) { + return s.processor.l1InfoTree.GetProof(ctx, index, root) +} diff --git a/l1infotreesync/migrations/l1infotreesync0001.sql b/l1infotreesync/migrations/l1infotreesync0001.sql index 39a45dd4..7a689281 100644 --- a/l1infotreesync/migrations/l1infotreesync0001.sql +++ b/l1infotreesync/migrations/l1infotreesync0001.sql @@ -16,7 +16,19 @@ CREATE TABLE l1info_leaf ( timestamp INTEGER NOT NULL, mainnet_exit_root VARCHAR NOT NULL, rollup_exit_root VARCHAR NOT NULL, - global_exit_root VARCHAR NOT NULL, + global_exit_root VARCHAR NOT NULL UNIQUE, hash VARCHAR NOT NULL, PRIMARY KEY (block_num, block_pos) ); + +CREATE TABLE verify_batches ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + rollup_id INTEGER NOT NULL, + batch_num INTEGER NOT NULL, + state_root VARCHAR NOT NULL, + exit_root VARCHAR NOT NULL, + aggregator VARCHAR NOT NULL, + rollup_exit_root VARCHAR NOT NULL, + PRIMARY KEY (block_num, block_pos) +); diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c76d7aac..0bb31cc3 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -13,7 +13,7 @@ import ( "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" treeTypes "github.com/0xPolygon/cdk/tree/types" - ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" "github.com/russross/meddler" "golang.org/x/crypto/sha3" @@ -21,7 +21,6 @@ import ( var ( ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") - ErrNotFound = errors.New("not found") ErrNoBlock0 = errors.New("blockNum must be greater than 0") ) @@ -34,25 +33,29 @@ type processor struct { // UpdateL1InfoTree representation of the UpdateL1InfoTree event type UpdateL1InfoTree struct { BlockPosition uint64 - MainnetExitRoot ethCommon.Hash - RollupExitRoot ethCommon.Hash - ParentHash ethCommon.Hash + MainnetExitRoot common.Hash + RollupExitRoot common.Hash + ParentHash common.Hash Timestamp uint64 } // VerifyBatches representation of the VerifyBatches and VerifyBatchesTrustedAggregator events type VerifyBatches struct { - BlockPosition uint64 - RollupID uint32 - NumBatch uint64 - StateRoot ethCommon.Hash - ExitRoot ethCommon.Hash - Aggregator ethCommon.Address + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + RollupID uint32 `meddler:"rollup_id"` + NumBatch uint64 `meddler:"batch_num"` + StateRoot common.Hash `meddler:"state_root,hash"` + ExitRoot common.Hash `meddler:"exit_root,hash"` + Aggregator common.Address `meddler:"aggregator,address"` + + // Not provided by downloader + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` } type InitL1InfoRootMap struct { LeafCount uint32 - CurrentL1InfoRoot ethCommon.Hash + CurrentL1InfoRoot common.Hash } type Event struct { @@ -63,20 +66,20 @@ type Event struct { // L1InfoTreeLeaf representation of a leaf of the L1 Info tree type L1InfoTreeLeaf struct { - BlockNumber uint64 `meddler:"block_num"` - BlockPosition uint64 `meddler:"block_pos"` - L1InfoTreeIndex uint32 `meddler:"position"` - PreviousBlockHash ethCommon.Hash `meddler:"previous_block_hash,hash"` - Timestamp uint64 `meddler:"timestamp"` - MainnetExitRoot ethCommon.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot ethCommon.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` - Hash ethCommon.Hash `meddler:"hash,hash"` + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + L1InfoTreeIndex uint32 `meddler:"position"` + PreviousBlockHash common.Hash `meddler:"previous_block_hash,hash"` + Timestamp uint64 `meddler:"timestamp"` + MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` + GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` + Hash common.Hash `meddler:"hash,hash"` } // Hash as expected by the tree -func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { - var res [32]byte +func (l *L1InfoTreeLeaf) hash() common.Hash { + var res [treeTypes.DefaultHeight]byte t := make([]byte, 8) //nolint:mnd binary.BigEndian.PutUint64(t, l.Timestamp) copy(res[:], keccak256.Hash(l.globalExitRoot().Bytes(), l.PreviousBlockHash.Bytes(), t)) @@ -84,8 +87,8 @@ func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { } // GlobalExitRoot returns the GER -func (l *L1InfoTreeLeaf) globalExitRoot() ethCommon.Hash { - var gerBytes [32]byte +func (l *L1InfoTreeLeaf) globalExitRoot() common.Hash { + var gerBytes [treeTypes.DefaultHeight]byte hasher := sha3.NewLegacyKeccak256() hasher.Write(l.MainnetExitRoot[:]) hasher.Write(l.RollupExitRoot[:]) @@ -153,7 +156,7 @@ func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64 ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return nil, ErrNotFound + return nil, db.ErrNotFound } return nil, err } @@ -219,7 +222,6 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { if err := tx.Commit(); err != nil { return err } - return nil } @@ -238,7 +240,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } }() - if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, b.Num); err != nil { + if _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, b.Num); err != nil { return fmt.Errorf("err: %w", err) } @@ -247,7 +249,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { lastIndex, err := p.getLastIndex(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): initialL1InfoIndex = 0 err = nil case err != nil: @@ -274,8 +276,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } info.GlobalExitRoot = info.globalExitRoot() info.Hash = info.hash() - err = meddler.Insert(tx, "l1info_leaf", info) - if err != nil { + if err = meddler.Insert(tx, "l1info_leaf", info); err != nil { return fmt.Errorf("err: %w", err) } err = p.l1InfoTree.AddLeaf(tx, info.BlockNumber, info.BlockPosition, treeTypes.Leaf{ @@ -289,13 +290,19 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } if event.VerifyBatches != nil { - err = p.rollupExitTree.UpsertLeaf(tx, b.Num, event.VerifyBatches.BlockPosition, treeTypes.Leaf{ + newRoot, err := p.rollupExitTree.UpsertLeaf(tx, b.Num, event.VerifyBatches.BlockPosition, treeTypes.Leaf{ Index: event.VerifyBatches.RollupID - 1, Hash: event.VerifyBatches.ExitRoot, }) if err != nil { return fmt.Errorf("err: %w", err) } + verifyBatches := event.VerifyBatches + verifyBatches.BlockNumber = b.Num + verifyBatches.RollupExitRoot = newRoot + if err = meddler.Insert(tx, "verify_batches", verifyBatches); err != nil { + return fmt.Errorf("err: %w", err) + } } if event.InitL1InfoRootMap != nil { @@ -317,7 +324,92 @@ func (p *processor) getLastIndex(tx db.Querier) (uint32, error) { row := tx.QueryRow("SELECT position FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;") err := row.Scan(&lastProcessedIndex) if errors.Is(err, sql.ErrNoRows) { - return 0, ErrNotFound + return 0, db.ErrNotFound } return lastProcessedIndex, err } + +func (p *processor) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num DESC, block_pos DESC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 AND block_num >= $2 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID, blockNum) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE rollup_exit_root = $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupExitRoot.Hex()) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetLastInfo() (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + ORDER BY block_num DESC, block_pos DESC + LIMIT 1; + `) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstInfo() (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE block_num >= $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, blockNum) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE global_exit_root = $1 + LIMIT 1; + `, ger.Hex()) + return info, db.ReturnErrNotFound(err) +} diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 01550f31..3da02998 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -1,3 +1,176 @@ package l1infotreesync -// TODO: add unit test +import ( + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestGetVerifiedBatches(t *testing.T) { + dbPath := "file:TestGetVerifiedBatches?mode=memory&cache=shared" + p, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + + // Test ErrNotFound returned correctly on all methods + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatchesAfterBlock(0, 0) + require.Equal(t, db.ErrNotFound, err) + + // First insert + expected1 := &VerifyBatches{ + RollupID: 420, + NumBatch: 69, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.HexToHash("b455"), + Aggregator: common.HexToAddress("beef"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 1, + Events: []interface{}{ + Event{VerifyBatches: expected1}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err := p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + + // Second insert + expected2 := &VerifyBatches{ + RollupID: 420, + NumBatch: 690, + StateRoot: common.HexToHash("5ca1e3"), + ExitRoot: common.HexToHash("ba55"), + Aggregator: common.HexToAddress("beef3"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 2, + Events: []interface{}{ + Event{VerifyBatches: expected2}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err = p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected2, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatchesAfterBlock(420, 2) + require.NoError(t, err) + require.Equal(t, expected2, actual) +} + +func TestGetInfo(t *testing.T) { + dbPath := "file:TestGetInfo?mode=memory&cache=shared" + p, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + + // Test ErrNotFound returned correctly on all methods + _, err = p.GetFirstL1InfoWithRollupExitRoot(common.Hash{}) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetLastInfo() + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstInfo() + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstInfoAfterBlock(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetInfoByGlobalExitRoot(common.Hash{}) + require.Equal(t, db.ErrNotFound, err) + + // First insert + info1 := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + expected1 := L1InfoTreeLeaf{ + BlockNumber: 1, + L1InfoTreeIndex: 0, + PreviousBlockHash: info1.ParentHash, + Timestamp: info1.Timestamp, + MainnetExitRoot: info1.MainnetExitRoot, + RollupExitRoot: info1.RollupExitRoot, + } + expected1.GlobalExitRoot = expected1.globalExitRoot() + expected1.Hash = expected1.hash() + err = p.ProcessBlock(ctx, sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info1}, + }, + }) + require.NoError(t, err) + actual, err := p.GetFirstL1InfoWithRollupExitRoot(info1.RollupExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetLastInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfoAfterBlock(0) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetInfoByGlobalExitRoot(expected1.GlobalExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + + // Second insert + info2 := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("b055"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + expected2 := L1InfoTreeLeaf{ + BlockNumber: 2, + L1InfoTreeIndex: 1, + PreviousBlockHash: info2.ParentHash, + Timestamp: info2.Timestamp, + MainnetExitRoot: info2.MainnetExitRoot, + RollupExitRoot: info2.RollupExitRoot, + } + expected2.GlobalExitRoot = expected2.globalExitRoot() + expected2.Hash = expected2.hash() + err = p.ProcessBlock(ctx, sync.Block{ + Num: 2, + Events: []interface{}{ + Event{UpdateL1InfoTree: info2}, + }, + }) + require.NoError(t, err) + actual, err = p.GetFirstL1InfoWithRollupExitRoot(info2.RollupExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetLastInfo() + require.NoError(t, err) + require.Equal(t, expected2, *actual) + actual, err = p.GetFirstInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfoAfterBlock(2) + require.NoError(t, err) + require.Equal(t, expected2, *actual) + actual, err = p.GetInfoByGlobalExitRoot(expected2.GlobalExitRoot) + require.NoError(t, err) + require.Equal(t, expected2, *actual) +} diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go index 91e05c7a..e76bb578 100644 --- a/lastgersync/evmdownloader.go +++ b/lastgersync/evmdownloader.go @@ -8,10 +8,10 @@ import ( "time" "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitroot" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -67,7 +67,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC ) for { lastIndex, err = d.processor.getLastIndex(ctx) - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { lastIndex = 0 } else if err != nil { log.Errorf("error getting last indes: %v", err) @@ -105,7 +105,11 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC break } - blockHeader := d.GetBlockHeader(ctx, lastBlock) + blockHeader, isCanceled := d.GetBlockHeader(ctx, lastBlock) + if isCanceled { + return + } + block := &sync.EVMBlock{ EVMBlockHeader: sync.EVMBlockHeader{ Num: blockHeader.Num, @@ -129,7 +133,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC func (d *downloader) getGERsFromIndex(ctx context.Context, fromL1InfoTreeIndex uint32) ([]Event, error) { lastRoot, err := d.l1InfoTreesync.GetLastL1InfoTreeRoot(ctx) - if errors.Is(err, tree.ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { return nil, nil } if err != nil { diff --git a/lastgersync/processor.go b/lastgersync/processor.go index 628ea04a..45104f09 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -7,6 +7,7 @@ import ( "math" "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" ethCommon "github.com/ethereum/go-ethereum/common" @@ -22,7 +23,6 @@ const ( var ( lastProcessedKey = []byte("lp") - ErrNotFound = errors.New("not found") ) type Event struct { @@ -111,7 +111,7 @@ func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) { return 0, err } if k == nil { - return 0, ErrNotFound + return 0, db.ErrNotFound } return common.BytesToUint32(k), nil @@ -142,7 +142,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if lenEvents > 0 { li, err := p.getLastIndexWithTx(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): lastIndex = -1 case err != nil: @@ -286,7 +286,7 @@ func (p *processor) GetFirstGERAfterL1InfoTreeIndex( return 0, ethCommon.Hash{}, err } if l1InfoIndexBytes == nil { - return 0, ethCommon.Hash{}, ErrNotFound + return 0, ethCommon.Hash{}, db.ErrNotFound } return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go index 7a995bac..496a844c 100644 --- a/reorgdetector/reorgdetector.go +++ b/reorgdetector/reorgdetector.go @@ -120,12 +120,20 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { errGroup errgroup.Group ) - rd.trackedBlocksLock.Lock() - defer rd.trackedBlocksLock.Unlock() + subscriberIDs := rd.getSubscriberIDs() - for id, hdrs := range rd.trackedBlocks { + for _, id := range subscriberIDs { id := id - hdrs := hdrs + + // This is done like this because of a possible deadlock + // between AddBlocksToTrack and detectReorgInTrackedList + rd.trackedBlocksLock.RLock() + hdrs, ok := rd.trackedBlocks[id] + rd.trackedBlocksLock.RUnlock() + + if !ok { + continue + } errGroup.Go(func() error { headers := hdrs.getSorted() @@ -136,7 +144,7 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { if !ok || currentHeader == nil { if currentHeader, err = rd.client.HeaderByNumber(ctx, new(big.Int).SetUint64(hdr.Num)); err != nil { headersCacheLock.Unlock() - return fmt.Errorf("failed to get the header: %w", err) + return fmt.Errorf("failed to get the header %d: %w", hdr.Num, err) } headersCache[hdr.Num] = currentHeader } diff --git a/reorgdetector/reorgdetector_db.go b/reorgdetector/reorgdetector_db.go index 3174cbc0..79bd6cd4 100644 --- a/reorgdetector/reorgdetector_db.go +++ b/reorgdetector/reorgdetector_db.go @@ -53,6 +53,10 @@ func (rd *ReorgDetector) getTrackedBlocks(ctx context.Context) (map[string]*head // saveTrackedBlock saves the tracked block for a subscriber in db and in memory func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b header) error { + rd.trackedBlocksLock.Lock() + + // this has to go after the lock, because of a possible deadlock + // between AddBlocksToTrack and detectReorgInTrackedList tx, err := rd.db.BeginRw(ctx) if err != nil { return err @@ -60,7 +64,6 @@ func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b head defer tx.Rollback() - rd.trackedBlocksLock.Lock() hdrs, ok := rd.trackedBlocks[id] if !ok || hdrs.isEmpty() { hdrs = newHeadersList(b) diff --git a/reorgdetector/reorgdetector_sub.go b/reorgdetector/reorgdetector_sub.go index 675a81c5..c5002a2b 100644 --- a/reorgdetector/reorgdetector_sub.go +++ b/reorgdetector/reorgdetector_sub.go @@ -34,9 +34,24 @@ func (rd *ReorgDetector) Subscribe(id string) (*Subscription, error) { func (rd *ReorgDetector) notifySubscriber(id string, startingBlock header) { // Notify subscriber about this particular reorg rd.subscriptionsLock.RLock() - if sub, ok := rd.subscriptions[id]; ok { + sub, ok := rd.subscriptions[id] + rd.subscriptionsLock.RUnlock() + + if ok { sub.ReorgedBlock <- startingBlock.Num <-sub.ReorgProcessed } - rd.subscriptionsLock.RUnlock() +} + +// getSubscriberIDs returns a list of subscriber IDs +func (rd *ReorgDetector) getSubscriberIDs() []string { + rd.subscriptionsLock.RLock() + defer rd.subscriptionsLock.RUnlock() + + ids := make([]string, 0, len(rd.subscriptions)) + for id := range rd.subscriptions { + ids = append(ids, id) + } + + return ids } diff --git a/rpc/bridge.go b/rpc/bridge.go index 23c67409..c769158e 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -2,18 +2,16 @@ package rpc import ( "context" + "errors" "fmt" "math/big" "time" "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" + "github.com/0xPolygon/cdk/rpc/types" + tree "github.com/0xPolygon/cdk/tree/types" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" ) @@ -23,22 +21,26 @@ const ( BRIDGE = "bridge" meterName = "github.com/0xPolygon/cdk/rpc" - zeroHex = "0x0" + zeroHex = "0x0" + binnarySearchDivider = 2 +) + +var ( + ErrNotOnL1Info = errors.New("this bridge has not been included on the L1 Info Tree yet") ) // BridgeEndpoints contains implementations for the "bridge" RPC endpoints type BridgeEndpoints struct { - logger *log.Logger - meter metric.Meter - readTimeout time.Duration - writeTimeout time.Duration - networkID uint32 - sponsor *claimsponsor.ClaimSponsor - l1InfoTree *l1infotreesync.L1InfoTreeSync - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync - injectedGERs *lastgersync.LastGERSync - bridgeL1 *bridgesync.BridgeSync - bridgeL2 *bridgesync.BridgeSync + logger *log.Logger + meter metric.Meter + readTimeout time.Duration + writeTimeout time.Duration + networkID uint32 + sponsor ClaimSponsorer + l1InfoTree L1InfoTreer + injectedGERs LastGERer + bridgeL1 Bridger + bridgeL2 Bridger } // NewBridgeEndpoints returns InteropEndpoints @@ -47,26 +49,24 @@ func NewBridgeEndpoints( writeTimeout time.Duration, readTimeout time.Duration, networkID uint32, - sponsor *claimsponsor.ClaimSponsor, - l1InfoTree *l1infotreesync.L1InfoTreeSync, - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync, - injectedGERs *lastgersync.LastGERSync, - bridgeL1 *bridgesync.BridgeSync, - bridgeL2 *bridgesync.BridgeSync, + sponsor ClaimSponsorer, + l1InfoTree L1InfoTreer, + injectedGERs LastGERer, + bridgeL1 Bridger, + bridgeL2 Bridger, ) *BridgeEndpoints { meter := otel.Meter(meterName) return &BridgeEndpoints{ - logger: logger, - meter: meter, - readTimeout: readTimeout, - writeTimeout: writeTimeout, - networkID: networkID, - sponsor: sponsor, - l1InfoTree: l1InfoTree, - l1Bridge2Index: l1Bridge2Index, - injectedGERs: injectedGERs, - bridgeL1: bridgeL1, - bridgeL2: bridgeL2, + logger: logger, + meter: meter, + readTimeout: readTimeout, + writeTimeout: writeTimeout, + networkID: networkID, + sponsor: sponsor, + l1InfoTree: l1InfoTree, + injectedGERs: injectedGERs, + bridgeL1: bridgeL1, + bridgeL2: bridgeL2, } } @@ -84,21 +84,26 @@ func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCoun c.Add(ctx, 1) if networkID == 0 { - l1InfoTreeIndex, err := b.l1Bridge2Index.GetL1InfoTreeIndexByDepositCount(ctx, depositCount) + l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL1Bridge(ctx, depositCount) // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( + "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), + ) } return l1InfoTreeIndex, nil } if networkID == b.networkID { + l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL2Bridge(ctx, depositCount) // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - "TODO: batchsync / certificatesync missing implementation", - ) + if err != nil { + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( + "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), + ) + } + return l1InfoTreeIndex, nil } return zeroHex, rpc.NewRPCError( rpc.DefaultErrorCode, @@ -143,12 +148,6 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd ) } -type ClaimProof struct { - ProofLocalExitRoot [32]common.Hash - ProofRollupExitRoot [32]common.Hash - L1InfoTreeLeaf l1infotreesync.L1InfoTreeLeaf -} - // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent @@ -172,7 +171,7 @@ func (b *BridgeEndpoints) ClaimProof( if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err)) } - var proofLocalExitRoot [32]common.Hash + var proofLocalExitRoot tree.Proof switch { case networkID == 0: proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot) @@ -202,8 +201,7 @@ func (b *BridgeEndpoints) ClaimProof( fmt.Sprintf("this client does not support network %d", networkID), ) } - - return ClaimProof{ + return types.ClaimProof{ ProofLocalExitRoot: proofLocalExitRoot, ProofRollupExitRoot: proofRollupExitRoot, L1InfoTreeLeaf: *info, @@ -258,3 +256,111 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa } return claim.Status, nil } + +func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL1Bridge(ctx context.Context, depositCount uint32) (uint32, error) { + lastInfo, err := b.l1InfoTree.GetLastInfo() + if err != nil { + return 0, err + } + + root, err := b.bridgeL1.GetRootByLER(ctx, lastInfo.MainnetExitRoot) + if err != nil { + return 0, err + } + if root.Index < depositCount { + return 0, ErrNotOnL1Info + } + + firstInfo, err := b.l1InfoTree.GetFirstInfo() + if err != nil { + return 0, err + } + + // Binary search between the first and last blcoks where L1 info tree was updated. + // Find the smallest l1 info tree index that is greater than depositCount and matches with + // a MER that is included on the l1 info tree + bestResult := lastInfo + lowerLimit := firstInfo.BlockNumber + upperLimit := lastInfo.BlockNumber + for lowerLimit <= upperLimit { + targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) + targetInfo, err := b.l1InfoTree.GetFirstInfoAfterBlock(targetBlock) + if err != nil { + return 0, err + } + root, err := b.bridgeL1.GetRootByLER(ctx, targetInfo.MainnetExitRoot) + if err != nil { + return 0, err + } + //nolint:gocritic // switch statement doesn't make sense here, I couldn't break + if root.Index < depositCount { + lowerLimit = targetBlock + 1 + } else if root.Index == depositCount { + bestResult = targetInfo + break + } else { + bestResult = targetInfo + upperLimit = targetBlock - 1 + } + } + + return bestResult.L1InfoTreeIndex, nil +} + +func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL2Bridge(ctx context.Context, depositCount uint32) (uint32, error) { + // NOTE: this code assumes that all the rollup exit roots + // (produced by the smart contract call verifyBatches / verifyBatchesTrustedAggregator) + // are included in the L1 info tree. As per the current implementation (smart contracts) of the protocol + // this is true. This could change in the future + lastVerified, err := b.l1InfoTree.GetLastVerifiedBatches(b.networkID - 1) + if err != nil { + return 0, err + } + + root, err := b.bridgeL2.GetRootByLER(ctx, lastVerified.ExitRoot) + if err != nil { + return 0, err + } + if root.Index < depositCount { + return 0, ErrNotOnL1Info + } + + firstVerified, err := b.l1InfoTree.GetFirstVerifiedBatches(b.networkID - 1) + if err != nil { + return 0, err + } + + // Binary search between the first and last blcoks where batches were verified. + // Find the smallest deposit count that is greater than depositCount and matches with + // a LER that is verified + bestResult := lastVerified + lowerLimit := firstVerified.BlockNumber + upperLimit := lastVerified.BlockNumber + for lowerLimit <= upperLimit { + targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) + targetVerified, err := b.l1InfoTree.GetFirstVerifiedBatchesAfterBlock(b.networkID-1, targetBlock) + if err != nil { + return 0, err + } + root, err = b.bridgeL2.GetRootByLER(ctx, targetVerified.ExitRoot) + if err != nil { + return 0, err + } + //nolint:gocritic // switch statement doesn't make sense here, I couldn't break + if root.Index < depositCount { + lowerLimit = targetBlock + 1 + } else if root.Index == depositCount { + bestResult = targetVerified + break + } else { + bestResult = targetVerified + upperLimit = targetBlock - 1 + } + } + + info, err := b.l1InfoTree.GetFirstL1InfoWithRollupExitRoot(bestResult.RollupExitRoot) + if err != nil { + return 0, err + } + return info.L1InfoTreeIndex, nil +} diff --git a/rpc/bridge_interfaces.go b/rpc/bridge_interfaces.go new file mode 100644 index 00000000..84292e22 --- /dev/null +++ b/rpc/bridge_interfaces.go @@ -0,0 +1,40 @@ +package rpc + +import ( + "context" + "math/big" + + "github.com/0xPolygon/cdk/claimsponsor" + "github.com/0xPolygon/cdk/l1infotreesync" + tree "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" +) + +type Bridger interface { + GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) + GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) +} + +type LastGERer interface { + GetFirstGERAfterL1InfoTreeIndex( + ctx context.Context, atOrAfterL1InfoTreeIndex uint32, + ) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) +} + +type L1InfoTreer interface { + GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) + GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (tree.Proof, error) + GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) + GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) + GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) + GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) + GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) + GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) + GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) + GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) +} + +type ClaimSponsorer interface { + AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error + GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) +} diff --git a/rpc/bridge_test.go b/rpc/bridge_test.go new file mode 100644 index 00000000..9d461a50 --- /dev/null +++ b/rpc/bridge_test.go @@ -0,0 +1,443 @@ +package rpc + +import ( + "context" + "errors" + "testing" + + cdkCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + mocks "github.com/0xPolygon/cdk/rpc/mocks" + tree "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestGetFirstL1InfoTreeIndexForL1Bridge(t *testing.T) { + type testCase struct { + description string + setupMocks func() + depositCount uint32 + expectedIndex uint32 + expectedErr error + } + ctx := context.Background() + b := newBridgeWithMocks(t) + fooErr := errors.New("foo") + firstL1Info := &l1infotreesync.L1InfoTreeLeaf{ + BlockNumber: 10, + MainnetExitRoot: common.HexToHash("alfa"), + } + lastL1Info := &l1infotreesync.L1InfoTreeLeaf{ + BlockNumber: 1000, + MainnetExitRoot: common.HexToHash("alfa"), + } + mockHappyPath := func() { + // to make this work, assume that block number == l1 info tree index == deposit count + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + infoAfterBlock := &l1infotreesync.L1InfoTreeLeaf{} + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Run(func(args mock.Arguments) { + blockNum, ok := args.Get(0).(uint64) + require.True(t, ok) + infoAfterBlock.L1InfoTreeIndex = uint32(blockNum) + infoAfterBlock.BlockNumber = blockNum + infoAfterBlock.MainnetExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + }). + Return(infoAfterBlock, nil) + rootByLER := &tree.Root{} + b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). + Run(func(args mock.Arguments) { + ler, ok := args.Get(1).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + if ler == common.HexToHash("alfa") { + index = uint32(lastL1Info.BlockNumber) + } + rootByLER.Index = index + }). + Return(rootByLER, nil) + } + testCases := []testCase{ + { + description: "error on GetLastInfo", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on first GetRootByLER", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "not included yet", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 10}, nil). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: ErrNotOnL1Info, + }, + { + description: "error on GetFirstInfo", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetFirstInfoAfterBlock", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetRootByLER (inside binnary search)", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Return(firstL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "happy path 1", + setupMocks: mockHappyPath, + depositCount: 10, + expectedIndex: 10, + expectedErr: nil, + }, + { + description: "happy path 2", + setupMocks: mockHappyPath, + depositCount: 11, + expectedIndex: 11, + expectedErr: nil, + }, + { + description: "happy path 3", + setupMocks: mockHappyPath, + depositCount: 333, + expectedIndex: 333, + expectedErr: nil, + }, + { + description: "happy path 4", + setupMocks: mockHappyPath, + depositCount: 420, + expectedIndex: 420, + expectedErr: nil, + }, + { + description: "happy path 5", + setupMocks: mockHappyPath, + depositCount: 69, + expectedIndex: 69, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + log.Debugf("running test case: %s", tc.description) + tc.setupMocks() + actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL1Bridge(ctx, tc.depositCount) + require.Equal(t, tc.expectedErr, err) + require.Equal(t, tc.expectedIndex, actualIndex) + } +} + +func TestGetFirstL1InfoTreeIndexForL2Bridge(t *testing.T) { + type testCase struct { + description string + setupMocks func() + depositCount uint32 + expectedIndex uint32 + expectedErr error + } + ctx := context.Background() + b := newBridgeWithMocks(t) + fooErr := errors.New("foo") + firstVerified := &l1infotreesync.VerifyBatches{ + BlockNumber: 10, + ExitRoot: common.HexToHash("alfa"), + } + lastVerified := &l1infotreesync.VerifyBatches{ + BlockNumber: 1000, + ExitRoot: common.HexToHash("alfa"), + } + mockHappyPath := func() { + // to make this work, assume that block number == l1 info tree index == deposit count + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + verifiedAfterBlock := &l1infotreesync.VerifyBatches{} + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Run(func(args mock.Arguments) { + blockNum, ok := args.Get(1).(uint64) + require.True(t, ok) + verifiedAfterBlock.BlockNumber = blockNum + verifiedAfterBlock.ExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + verifiedAfterBlock.RollupExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + }). + Return(verifiedAfterBlock, nil) + rootByLER := &tree.Root{} + b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). + Run(func(args mock.Arguments) { + ler, ok := args.Get(1).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + if ler == common.HexToHash("alfa") { + index = uint32(lastVerified.BlockNumber) + } + rootByLER.Index = index + }). + Return(rootByLER, nil) + info := &l1infotreesync.L1InfoTreeLeaf{} + b.l1InfoTree.On("GetFirstL1InfoWithRollupExitRoot", mock.Anything). + Run(func(args mock.Arguments) { + exitRoot, ok := args.Get(0).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(exitRoot.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + info.L1InfoTreeIndex = index + }). + Return(info, nil). + Once() + } + testCases := []testCase{ + { + description: "error on GetLastVerified", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on first GetRootByLER", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "not included yet", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 10}, nil). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: ErrNotOnL1Info, + }, + { + description: "error on GetFirstVerified", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetFirstVerifiedBatchesAfterBlock", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetRootByLER (inside binnary search)", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Return(firstVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "happy path 1", + setupMocks: mockHappyPath, + depositCount: 10, + expectedIndex: 10, + expectedErr: nil, + }, + { + description: "happy path 2", + setupMocks: mockHappyPath, + depositCount: 11, + expectedIndex: 11, + expectedErr: nil, + }, + { + description: "happy path 3", + setupMocks: mockHappyPath, + depositCount: 333, + expectedIndex: 333, + expectedErr: nil, + }, + { + description: "happy path 4", + setupMocks: mockHappyPath, + depositCount: 420, + expectedIndex: 420, + expectedErr: nil, + }, + { + description: "happy path 5", + setupMocks: mockHappyPath, + depositCount: 69, + expectedIndex: 69, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + log.Debugf("running test case: %s", tc.description) + tc.setupMocks() + actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL2Bridge(ctx, tc.depositCount) + require.Equal(t, tc.expectedErr, err) + require.Equal(t, tc.expectedIndex, actualIndex) + } +} + +type bridgeWithMocks struct { + bridge *BridgeEndpoints + sponsor *mocks.ClaimSponsorer + l1InfoTree *mocks.L1InfoTreer + injectedGERs *mocks.LastGERer + bridgeL1 *mocks.Bridger + bridgeL2 *mocks.Bridger +} + +func newBridgeWithMocks(t *testing.T) bridgeWithMocks { + t.Helper() + b := bridgeWithMocks{ + sponsor: mocks.NewClaimSponsorer(t), + l1InfoTree: mocks.NewL1InfoTreer(t), + injectedGERs: mocks.NewLastGERer(t), + bridgeL1: mocks.NewBridger(t), + bridgeL2: mocks.NewBridger(t), + } + logger := log.WithFields("module", "bridgerpc") + b.bridge = NewBridgeEndpoints( + logger, 0, 0, 2, b.sponsor, b.l1InfoTree, b.injectedGERs, b.bridgeL1, b.bridgeL2, + ) + return b +} diff --git a/rpc/bridge_client.go b/rpc/client/bridge.go similarity index 95% rename from rpc/bridge_client.go rename to rpc/client/bridge.go index 04d57700..f67907f2 100644 --- a/rpc/bridge_client.go +++ b/rpc/client/bridge.go @@ -8,12 +8,13 @@ import ( "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/rpc/types" ) type BridgeClientInterface interface { L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) - ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error) + ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) SponsorClaim(claim claimsponsor.Claim) error GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) } @@ -53,7 +54,7 @@ func (c *Client) InjectedInfoAfterIndex( // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent -func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error) { +func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { response, err := rpc.JSONRPCCall(c.url, "bridge_claimProof", networkID, depositCount, l1InfoTreeIndex) if err != nil { return nil, err @@ -61,7 +62,7 @@ func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeInd if response.Error != nil { return nil, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) } - var result ClaimProof + var result types.ClaimProof return &result, json.Unmarshal(response.Result, &result) } diff --git a/rpc/client.go b/rpc/client/client.go similarity index 100% rename from rpc/client.go rename to rpc/client/client.go diff --git a/rpc/mocks/bridge_client_interface.go b/rpc/mocks/bridge_client_interface.go new file mode 100644 index 00000000..4c5200e4 --- /dev/null +++ b/rpc/mocks/bridge_client_interface.go @@ -0,0 +1,319 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/rpc/types" +) + +// BridgeClientInterface is an autogenerated mock type for the BridgeClientInterface type +type BridgeClientInterface struct { + mock.Mock +} + +type BridgeClientInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *BridgeClientInterface) EXPECT() *BridgeClientInterface_Expecter { + return &BridgeClientInterface_Expecter{mock: &_m.Mock} +} + +// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex +func (_m *BridgeClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { + ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for ClaimProof") + } + + var r0 *types.ClaimProof + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { + return rf(networkID, depositCount, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { + r0 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ClaimProof) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { + r1 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' +type BridgeClientInterface_ClaimProof_Call struct { + *mock.Call +} + +// ClaimProof is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +// - l1InfoTreeIndex uint32 +func (_e *BridgeClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_ClaimProof_Call { + return &BridgeClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} +} + +func (_c *BridgeClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Return(run) + return _c +} + +// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex +func (_m *BridgeClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { + ret := _m.Called(globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSponsoredClaimStatus") + } + + var r0 claimsponsor.ClaimStatus + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { + return rf(globalIndex) + } + if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { + r0 = rf(globalIndex) + } else { + r0 = ret.Get(0).(claimsponsor.ClaimStatus) + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' +type BridgeClientInterface_GetSponsoredClaimStatus_Call struct { + *mock.Call +} + +// GetSponsoredClaimStatus is a helper method to define mock.On call +// - globalIndex *big.Int +func (_e *BridgeClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + return &BridgeClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int)) + }) + return _c +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(run) + return _c +} + +// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex +func (_m *BridgeClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(networkID, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for InjectedInfoAfterIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(networkID, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(networkID, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' +type BridgeClientInterface_InjectedInfoAfterIndex_Call struct { + *mock.Call +} + +// InjectedInfoAfterIndex is a helper method to define mock.On call +// - networkID uint32 +// - l1InfoTreeIndex uint32 +func (_e *BridgeClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + return &BridgeClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(run) + return _c +} + +// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount +func (_m *BridgeClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { + ret := _m.Called(networkID, depositCount) + + if len(ret) == 0 { + panic("no return value specified for L1InfoTreeIndexForBridge") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { + return rf(networkID, depositCount) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { + r0 = rf(networkID, depositCount) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, depositCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' +type BridgeClientInterface_L1InfoTreeIndexForBridge_Call struct { + *mock.Call +} + +// L1InfoTreeIndexForBridge is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +func (_e *BridgeClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + return &BridgeClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(run) + return _c +} + +// SponsorClaim provides a mock function with given fields: claim +func (_m *BridgeClientInterface) SponsorClaim(claim claimsponsor.Claim) error { + ret := _m.Called(claim) + + if len(ret) == 0 { + panic("no return value specified for SponsorClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { + r0 = rf(claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BridgeClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' +type BridgeClientInterface_SponsorClaim_Call struct { + *mock.Call +} + +// SponsorClaim is a helper method to define mock.On call +// - claim claimsponsor.Claim +func (_e *BridgeClientInterface_Expecter) SponsorClaim(claim interface{}) *BridgeClientInterface_SponsorClaim_Call { + return &BridgeClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(claimsponsor.Claim)) + }) + return _c +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) Return(_a0 error) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewBridgeClientInterface creates a new instance of BridgeClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridgeClientInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *BridgeClientInterface { + mock := &BridgeClientInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/bridger.go b/rpc/mocks/bridger.go new file mode 100644 index 00000000..d0344c29 --- /dev/null +++ b/rpc/mocks/bridger.go @@ -0,0 +1,159 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/tree/types" +) + +// Bridger is an autogenerated mock type for the Bridger type +type Bridger struct { + mock.Mock +} + +type Bridger_Expecter struct { + mock *mock.Mock +} + +func (_m *Bridger) EXPECT() *Bridger_Expecter { + return &Bridger_Expecter{mock: &_m.Mock} +} + +// GetProof provides a mock function with given fields: ctx, depositCount, localExitRoot +func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (types.Proof, error) { + ret := _m.Called(ctx, depositCount, localExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetProof") + } + + var r0 types.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { + return rf(ctx, depositCount, localExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { + r0 = rf(ctx, depositCount, localExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, depositCount, localExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Bridger_GetProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProof' +type Bridger_GetProof_Call struct { + *mock.Call +} + +// GetProof is a helper method to define mock.On call +// - ctx context.Context +// - depositCount uint32 +// - localExitRoot common.Hash +func (_e *Bridger_Expecter) GetProof(ctx interface{}, depositCount interface{}, localExitRoot interface{}) *Bridger_GetProof_Call { + return &Bridger_GetProof_Call{Call: _e.mock.On("GetProof", ctx, depositCount, localExitRoot)} +} + +func (_c *Bridger_GetProof_Call) Run(run func(ctx context.Context, depositCount uint32, localExitRoot common.Hash)) *Bridger_GetProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *Bridger_GetProof_Call) Return(_a0 types.Proof, _a1 error) *Bridger_GetProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *Bridger_GetProof_Call { + _c.Call.Return(run) + return _c +} + +// GetRootByLER provides a mock function with given fields: ctx, ler +func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*types.Root, error) { + ret := _m.Called(ctx, ler) + + if len(ret) == 0 { + panic("no return value specified for GetRootByLER") + } + + var r0 *types.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Root, error)); ok { + return rf(ctx, ler) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Root); ok { + r0 = rf(ctx, ler) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Root) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, ler) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Bridger_GetRootByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRootByLER' +type Bridger_GetRootByLER_Call struct { + *mock.Call +} + +// GetRootByLER is a helper method to define mock.On call +// - ctx context.Context +// - ler common.Hash +func (_e *Bridger_Expecter) GetRootByLER(ctx interface{}, ler interface{}) *Bridger_GetRootByLER_Call { + return &Bridger_GetRootByLER_Call{Call: _e.mock.On("GetRootByLER", ctx, ler)} +} + +func (_c *Bridger_GetRootByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *Bridger_GetRootByLER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *Bridger_GetRootByLER_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetRootByLER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Root, error)) *Bridger_GetRootByLER_Call { + _c.Call.Return(run) + return _c +} + +// NewBridger creates a new instance of Bridger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridger(t interface { + mock.TestingT + Cleanup(func()) +}) *Bridger { + mock := &Bridger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/claim_sponsorer.go b/rpc/mocks/claim_sponsorer.go new file mode 100644 index 00000000..59530955 --- /dev/null +++ b/rpc/mocks/claim_sponsorer.go @@ -0,0 +1,145 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + + mock "github.com/stretchr/testify/mock" +) + +// ClaimSponsorer is an autogenerated mock type for the ClaimSponsorer type +type ClaimSponsorer struct { + mock.Mock +} + +type ClaimSponsorer_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimSponsorer) EXPECT() *ClaimSponsorer_Expecter { + return &ClaimSponsorer_Expecter{mock: &_m.Mock} +} + +// AddClaimToQueue provides a mock function with given fields: ctx, claim +func (_m *ClaimSponsorer) AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error { + ret := _m.Called(ctx, claim) + + if len(ret) == 0 { + panic("no return value specified for AddClaimToQueue") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *claimsponsor.Claim) error); ok { + r0 = rf(ctx, claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimSponsorer_AddClaimToQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddClaimToQueue' +type ClaimSponsorer_AddClaimToQueue_Call struct { + *mock.Call +} + +// AddClaimToQueue is a helper method to define mock.On call +// - ctx context.Context +// - claim *claimsponsor.Claim +func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(ctx interface{}, claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { + return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", ctx, claim)} +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(ctx context.Context, claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*claimsponsor.Claim)) + }) + return _c +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Return(_a0 error) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(context.Context, *claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Return(run) + return _c +} + +// GetClaim provides a mock function with given fields: ctx, globalIndex +func (_m *ClaimSponsorer) GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) { + ret := _m.Called(ctx, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaim") + } + + var r0 *claimsponsor.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*claimsponsor.Claim, error)); ok { + return rf(ctx, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *claimsponsor.Claim); ok { + r0 = rf(ctx, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*claimsponsor.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimSponsorer_GetClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaim' +type ClaimSponsorer_GetClaim_Call struct { + *mock.Call +} + +// GetClaim is a helper method to define mock.On call +// - ctx context.Context +// - globalIndex *big.Int +func (_e *ClaimSponsorer_Expecter) GetClaim(ctx interface{}, globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { + return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", ctx, globalIndex)} +} + +func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *ClaimSponsorer_GetClaim_Call) Return(_a0 *claimsponsor.Claim, _a1 error) *ClaimSponsorer_GetClaim_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(context.Context, *big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimSponsorer creates a new instance of ClaimSponsorer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimSponsorer(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimSponsorer { + mock := &ClaimSponsorer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/client_factory_interface.go b/rpc/mocks/client_factory_interface.go new file mode 100644 index 00000000..aca7aed0 --- /dev/null +++ b/rpc/mocks/client_factory_interface.go @@ -0,0 +1,83 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + rpc "github.com/0xPolygon/cdk/rpc/client" + mock "github.com/stretchr/testify/mock" +) + +// ClientFactoryInterface is an autogenerated mock type for the ClientFactoryInterface type +type ClientFactoryInterface struct { + mock.Mock +} + +type ClientFactoryInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ClientFactoryInterface) EXPECT() *ClientFactoryInterface_Expecter { + return &ClientFactoryInterface_Expecter{mock: &_m.Mock} +} + +// NewClient provides a mock function with given fields: url +func (_m *ClientFactoryInterface) NewClient(url string) rpc.ClientInterface { + ret := _m.Called(url) + + if len(ret) == 0 { + panic("no return value specified for NewClient") + } + + var r0 rpc.ClientInterface + if rf, ok := ret.Get(0).(func(string) rpc.ClientInterface); ok { + r0 = rf(url) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(rpc.ClientInterface) + } + } + + return r0 +} + +// ClientFactoryInterface_NewClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewClient' +type ClientFactoryInterface_NewClient_Call struct { + *mock.Call +} + +// NewClient is a helper method to define mock.On call +// - url string +func (_e *ClientFactoryInterface_Expecter) NewClient(url interface{}) *ClientFactoryInterface_NewClient_Call { + return &ClientFactoryInterface_NewClient_Call{Call: _e.mock.On("NewClient", url)} +} + +func (_c *ClientFactoryInterface_NewClient_Call) Run(run func(url string)) *ClientFactoryInterface_NewClient_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ClientFactoryInterface_NewClient_Call) Return(_a0 rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClientFactoryInterface_NewClient_Call) RunAndReturn(run func(string) rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { + _c.Call.Return(run) + return _c +} + +// NewClientFactoryInterface creates a new instance of ClientFactoryInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientFactoryInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ClientFactoryInterface { + mock := &ClientFactoryInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/client_interface.go b/rpc/mocks/client_interface.go new file mode 100644 index 00000000..28b87775 --- /dev/null +++ b/rpc/mocks/client_interface.go @@ -0,0 +1,319 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/rpc/types" +) + +// ClientInterface is an autogenerated mock type for the ClientInterface type +type ClientInterface struct { + mock.Mock +} + +type ClientInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ClientInterface) EXPECT() *ClientInterface_Expecter { + return &ClientInterface_Expecter{mock: &_m.Mock} +} + +// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex +func (_m *ClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { + ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for ClaimProof") + } + + var r0 *types.ClaimProof + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { + return rf(networkID, depositCount, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { + r0 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ClaimProof) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { + r1 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' +type ClientInterface_ClaimProof_Call struct { + *mock.Call +} + +// ClaimProof is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +// - l1InfoTreeIndex uint32 +func (_e *ClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *ClientInterface_ClaimProof_Call { + return &ClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} +} + +func (_c *ClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *ClientInterface_ClaimProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *ClientInterface_ClaimProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *ClientInterface_ClaimProof_Call { + _c.Call.Return(run) + return _c +} + +// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex +func (_m *ClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { + ret := _m.Called(globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSponsoredClaimStatus") + } + + var r0 claimsponsor.ClaimStatus + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { + return rf(globalIndex) + } + if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { + r0 = rf(globalIndex) + } else { + r0 = ret.Get(0).(claimsponsor.ClaimStatus) + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' +type ClientInterface_GetSponsoredClaimStatus_Call struct { + *mock.Call +} + +// GetSponsoredClaimStatus is a helper method to define mock.On call +// - globalIndex *big.Int +func (_e *ClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *ClientInterface_GetSponsoredClaimStatus_Call { + return &ClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int)) + }) + return _c +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(run) + return _c +} + +// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex +func (_m *ClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(networkID, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for InjectedInfoAfterIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(networkID, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(networkID, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' +type ClientInterface_InjectedInfoAfterIndex_Call struct { + *mock.Call +} + +// InjectedInfoAfterIndex is a helper method to define mock.On call +// - networkID uint32 +// - l1InfoTreeIndex uint32 +func (_e *ClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *ClientInterface_InjectedInfoAfterIndex_Call { + return &ClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(run) + return _c +} + +// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount +func (_m *ClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { + ret := _m.Called(networkID, depositCount) + + if len(ret) == 0 { + panic("no return value specified for L1InfoTreeIndexForBridge") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { + return rf(networkID, depositCount) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { + r0 = rf(networkID, depositCount) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, depositCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' +type ClientInterface_L1InfoTreeIndexForBridge_Call struct { + *mock.Call +} + +// L1InfoTreeIndexForBridge is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +func (_e *ClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *ClientInterface_L1InfoTreeIndexForBridge_Call { + return &ClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(run) + return _c +} + +// SponsorClaim provides a mock function with given fields: claim +func (_m *ClientInterface) SponsorClaim(claim claimsponsor.Claim) error { + ret := _m.Called(claim) + + if len(ret) == 0 { + panic("no return value specified for SponsorClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { + r0 = rf(claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' +type ClientInterface_SponsorClaim_Call struct { + *mock.Call +} + +// SponsorClaim is a helper method to define mock.On call +// - claim claimsponsor.Claim +func (_e *ClientInterface_Expecter) SponsorClaim(claim interface{}) *ClientInterface_SponsorClaim_Call { + return &ClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} +} + +func (_c *ClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *ClientInterface_SponsorClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(claimsponsor.Claim)) + }) + return _c +} + +func (_c *ClientInterface_SponsorClaim_Call) Return(_a0 error) *ClientInterface_SponsorClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *ClientInterface_SponsorClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewClientInterface creates a new instance of ClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ClientInterface { + mock := &ClientInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/l1_info_treer.go b/rpc/mocks/l1_info_treer.go new file mode 100644 index 00000000..a4e0f66c --- /dev/null +++ b/rpc/mocks/l1_info_treer.go @@ -0,0 +1,626 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/tree/types" +) + +// L1InfoTreer is an autogenerated mock type for the L1InfoTreer type +type L1InfoTreer struct { + mock.Mock +} + +type L1InfoTreer_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreer) EXPECT() *L1InfoTreer_Expecter { + return &L1InfoTreer_Expecter{mock: &_m.Mock} +} + +// GetFirstInfo provides a mock function with given fields: +func (_m *L1InfoTreer) GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFirstInfo") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfo' +type L1InfoTreer_GetFirstInfo_Call struct { + *mock.Call +} + +// GetFirstInfo is a helper method to define mock.On call +func (_e *L1InfoTreer_Expecter) GetFirstInfo() *L1InfoTreer_GetFirstInfo_Call { + return &L1InfoTreer_GetFirstInfo_Call{Call: _e.mock.On("GetFirstInfo")} +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) Run(run func()) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstInfoAfterBlock provides a mock function with given fields: blockNum +func (_m *L1InfoTreer) GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for GetFirstInfoAfterBlock") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(blockNum) + } + if rf, ok := ret.Get(0).(func(uint64) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstInfoAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfoAfterBlock' +type L1InfoTreer_GetFirstInfoAfterBlock_Call struct { + *mock.Call +} + +// GetFirstInfoAfterBlock is a helper method to define mock.On call +// - blockNum uint64 +func (_e *L1InfoTreer_Expecter) GetFirstInfoAfterBlock(blockNum interface{}) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + return &L1InfoTreer_GetFirstInfoAfterBlock_Call{Call: _e.mock.On("GetFirstInfoAfterBlock", blockNum)} +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Run(run func(blockNum uint64)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) RunAndReturn(run func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstL1InfoWithRollupExitRoot provides a mock function with given fields: rollupExitRoot +func (_m *L1InfoTreer) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(rollupExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetFirstL1InfoWithRollupExitRoot") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(rollupExitRoot) + } + if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(rollupExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(rollupExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstL1InfoWithRollupExitRoot' +type L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call struct { + *mock.Call +} + +// GetFirstL1InfoWithRollupExitRoot is a helper method to define mock.On call +// - rollupExitRoot common.Hash +func (_e *L1InfoTreer_Expecter) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot interface{}) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + return &L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call{Call: _e.mock.On("GetFirstL1InfoWithRollupExitRoot", rollupExitRoot)} +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Run(run func(rollupExitRoot common.Hash)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstVerifiedBatches provides a mock function with given fields: rollupID +func (_m *L1InfoTreer) GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID) + + if len(ret) == 0 { + panic("no return value specified for GetFirstVerifiedBatches") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID) + } + if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32) error); ok { + r1 = rf(rollupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatches' +type L1InfoTreer_GetFirstVerifiedBatches_Call struct { + *mock.Call +} + +// GetFirstVerifiedBatches is a helper method to define mock.On call +// - rollupID uint32 +func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetFirstVerifiedBatches_Call { + return &L1InfoTreer_GetFirstVerifiedBatches_Call{Call: _e.mock.On("GetFirstVerifiedBatches", rollupID)} +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstVerifiedBatchesAfterBlock provides a mock function with given fields: rollupID, blockNum +func (_m *L1InfoTreer) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID, blockNum) + + if len(ret) == 0 { + panic("no return value specified for GetFirstVerifiedBatchesAfterBlock") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID, blockNum) + } + if rf, ok := ret.Get(0).(func(uint32, uint64) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID, blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint64) error); ok { + r1 = rf(rollupID, blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatchesAfterBlock' +type L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call struct { + *mock.Call +} + +// GetFirstVerifiedBatchesAfterBlock is a helper method to define mock.On call +// - rollupID uint32 +// - blockNum uint64 +func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatchesAfterBlock(rollupID interface{}, blockNum interface{}) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + return &L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call{Call: _e.mock.On("GetFirstVerifiedBatchesAfterBlock", rollupID, blockNum)} +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Run(run func(rollupID uint32, blockNum uint64)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint64)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) RunAndReturn(run func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetInfoByIndex provides a mock function with given fields: ctx, index +func (_m *L1InfoTreer) GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetInfoByIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(ctx, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetInfoByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByIndex' +type L1InfoTreer_GetInfoByIndex_Call struct { + *mock.Call +} + +// GetInfoByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L1InfoTreer_Expecter) GetInfoByIndex(ctx interface{}, index interface{}) *L1InfoTreer_GetInfoByIndex_Call { + return &L1InfoTreer_GetInfoByIndex_Call{Call: _e.mock.On("GetInfoByIndex", ctx, index)} +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) RunAndReturn(run func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastInfo provides a mock function with given fields: +func (_m *L1InfoTreer) GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLastInfo") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLastInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastInfo' +type L1InfoTreer_GetLastInfo_Call struct { + *mock.Call +} + +// GetLastInfo is a helper method to define mock.On call +func (_e *L1InfoTreer_Expecter) GetLastInfo() *L1InfoTreer_GetLastInfo_Call { + return &L1InfoTreer_GetLastInfo_Call{Call: _e.mock.On("GetLastInfo")} +} + +func (_c *L1InfoTreer_GetLastInfo_Call) Run(run func()) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreer_GetLastInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLastInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetLastVerifiedBatches provides a mock function with given fields: rollupID +func (_m *L1InfoTreer) GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID) + + if len(ret) == 0 { + panic("no return value specified for GetLastVerifiedBatches") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID) + } + if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32) error); ok { + r1 = rf(rollupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLastVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastVerifiedBatches' +type L1InfoTreer_GetLastVerifiedBatches_Call struct { + *mock.Call +} + +// GetLastVerifiedBatches is a helper method to define mock.On call +// - rollupID uint32 +func (_e *L1InfoTreer_Expecter) GetLastVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetLastVerifiedBatches_Call { + return &L1InfoTreer_GetLastVerifiedBatches_Call{Call: _e.mock.On("GetLastVerifiedBatches", rollupID)} +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Return(run) + return _c +} + +// GetLocalExitRoot provides a mock function with given fields: ctx, networkID, rollupExitRoot +func (_m *L1InfoTreer) GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) { + ret := _m.Called(ctx, networkID, rollupExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetLocalExitRoot") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (common.Hash, error)); ok { + return rf(ctx, networkID, rollupExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) common.Hash); ok { + r0 = rf(ctx, networkID, rollupExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, networkID, rollupExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLocalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLocalExitRoot' +type L1InfoTreer_GetLocalExitRoot_Call struct { + *mock.Call +} + +// GetLocalExitRoot is a helper method to define mock.On call +// - ctx context.Context +// - networkID uint32 +// - rollupExitRoot common.Hash +func (_e *L1InfoTreer_Expecter) GetLocalExitRoot(ctx interface{}, networkID interface{}, rollupExitRoot interface{}) *L1InfoTreer_GetLocalExitRoot_Call { + return &L1InfoTreer_GetLocalExitRoot_Call{Call: _e.mock.On("GetLocalExitRoot", ctx, networkID, rollupExitRoot)} +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) Run(run func(ctx context.Context, networkID uint32, rollupExitRoot common.Hash)) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) Return(_a0 common.Hash, _a1 error) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (common.Hash, error)) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetRollupExitTreeMerkleProof provides a mock function with given fields: ctx, networkID, root +func (_m *L1InfoTreer) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (types.Proof, error) { + ret := _m.Called(ctx, networkID, root) + + if len(ret) == 0 { + panic("no return value specified for GetRollupExitTreeMerkleProof") + } + + var r0 types.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { + return rf(ctx, networkID, root) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { + r0 = rf(ctx, networkID, root) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, networkID, root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetRollupExitTreeMerkleProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupExitTreeMerkleProof' +type L1InfoTreer_GetRollupExitTreeMerkleProof_Call struct { + *mock.Call +} + +// GetRollupExitTreeMerkleProof is a helper method to define mock.On call +// - ctx context.Context +// - networkID uint32 +// - root common.Hash +func (_e *L1InfoTreer_Expecter) GetRollupExitTreeMerkleProof(ctx interface{}, networkID interface{}, root interface{}) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + return &L1InfoTreer_GetRollupExitTreeMerkleProof_Call{Call: _e.mock.On("GetRollupExitTreeMerkleProof", ctx, networkID, root)} +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Run(run func(ctx context.Context, networkID uint32, root common.Hash)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Return(_a0 types.Proof, _a1 error) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Return(run) + return _c +} + +// NewL1InfoTreer creates a new instance of L1InfoTreer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreer(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreer { + mock := &L1InfoTreer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/last_ge_rer.go b/rpc/mocks/last_ge_rer.go new file mode 100644 index 00000000..d2e3068a --- /dev/null +++ b/rpc/mocks/last_ge_rer.go @@ -0,0 +1,104 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// LastGERer is an autogenerated mock type for the LastGERer type +type LastGERer struct { + mock.Mock +} + +type LastGERer_Expecter struct { + mock *mock.Mock +} + +func (_m *LastGERer) EXPECT() *LastGERer_Expecter { + return &LastGERer_Expecter{mock: &_m.Mock} +} + +// GetFirstGERAfterL1InfoTreeIndex provides a mock function with given fields: ctx, atOrAfterL1InfoTreeIndex +func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (uint32, common.Hash, error) { + ret := _m.Called(ctx, atOrAfterL1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for GetFirstGERAfterL1InfoTreeIndex") + } + + var r0 uint32 + var r1 common.Hash + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (uint32, common.Hash, error)); ok { + return rf(ctx, atOrAfterL1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) uint32); ok { + r0 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) common.Hash); ok { + r1 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(common.Hash) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32) error); ok { + r2 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstGERAfterL1InfoTreeIndex' +type LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call struct { + *mock.Call +} + +// GetFirstGERAfterL1InfoTreeIndex is a helper method to define mock.On call +// - ctx context.Context +// - atOrAfterL1InfoTreeIndex uint32 +func (_e *LastGERer_Expecter) GetFirstGERAfterL1InfoTreeIndex(ctx interface{}, atOrAfterL1InfoTreeIndex interface{}) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + return &LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call{Call: _e.mock.On("GetFirstGERAfterL1InfoTreeIndex", ctx, atOrAfterL1InfoTreeIndex)} +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Run(run func(ctx context.Context, atOrAfterL1InfoTreeIndex uint32)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(injectedL1InfoTreeIndex uint32, ger common.Hash, err error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(injectedL1InfoTreeIndex, ger, err) + return _c +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (uint32, common.Hash, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(run) + return _c +} + +// NewLastGERer creates a new instance of LastGERer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLastGERer(t interface { + mock.TestingT + Cleanup(func()) +}) *LastGERer { + mock := &LastGERer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/types/bridge.go b/rpc/types/bridge.go new file mode 100644 index 00000000..eb8c6464 --- /dev/null +++ b/rpc/types/bridge.go @@ -0,0 +1,12 @@ +package types + +import ( + "github.com/0xPolygon/cdk/l1infotreesync" + tree "github.com/0xPolygon/cdk/tree/types" +) + +type ClaimProof struct { + ProofLocalExitRoot tree.Proof + ProofRollupExitRoot tree.Proof + L1InfoTreeLeaf l1infotreesync.L1InfoTreeLeaf +} diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 4005c991..d3efc375 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -220,6 +220,7 @@ func (s *SequenceSender) purgeSequences() { // Purge the information of batches that are already virtualized s.mutexSequence.Lock() + defer s.mutexSequence.Unlock() truncateUntil := 0 toPurge := make([]uint64, 0) for i := 0; i < len(s.sequenceList); i++ { @@ -246,7 +247,6 @@ func (s *SequenceSender) purgeSequences() { } s.logger.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) } - s.mutexSequence.Unlock() } // tryToSendSequence checks if there is a sequence and it's worth it to send to L1 diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 7b451ed8..6d191c4a 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -134,10 +134,26 @@ func (t *TxBuilderBananaBase) NewSequence( sequence.OldAccInputHash = oldAccInputHash sequence.AccInputHash = accInputHash + + err = SequenceSanityCheck(sequence) + if err != nil { + return nil, fmt.Errorf("sequenceSanityCheck fails. Err: %w", err) + } res := NewBananaSequence(*sequence) return res, nil } +func SequenceSanityCheck(seq *etherman.SequenceBanana) error { + maxL1InfoIndex, err := calculateMaxL1InfoTreeIndexInsideSequence(seq) + if err != nil { + return err + } + if seq.CounterL1InfoRoot < maxL1InfoIndex+1 { + return fmt.Errorf("wrong CounterL1InfoRoot(%d): BatchL2Data (max=%d) ", seq.CounterL1InfoRoot, maxL1InfoIndex) + } + return nil +} + func (t *TxBuilderBananaBase) getL1InfoRoot(counterL1InfoRoot uint32) (common.Hash, error) { return t.globalExitRootContract.L1InfoRootMap(&bind.CallOpts{Pending: false}, counterL1InfoRoot) } diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index af4b05c0..3b449084 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -2,14 +2,17 @@ package txbuilder_test import ( "context" + "fmt" "math/big" "testing" + "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" + "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -31,8 +34,15 @@ func TestBananaBaseNewSequenceEmpty(t *testing.T) { seq, err := testData.sut.NewSequence(context.TODO(), nil, common.Address{}) require.NotNil(t, seq) require.NoError(t, err) - // TODO check values - // require.Equal(t, lastAcc, seq.LastAccInputHash()) +} + +func TestBananaBaseNewSequenceErrorHeaderByNumber(t *testing.T) { + testData := newBananaBaseTestData(t) + testData.l1Client.On("HeaderByNumber", mock.Anything, mock.Anything). + Return(nil, fmt.Errorf("error")) + seq, err := testData.sut.NewSequence(context.TODO(), nil, common.Address{}) + require.Nil(t, seq) + require.Error(t, err) } func TestBananaBaseNewBatchFromL2Block(t *testing.T) { @@ -79,6 +89,41 @@ func TestBananaBaseNewSequenceBatch(t *testing.T) { // TODO: check that the seq have the right values } +func TestBananaSanityCheck(t *testing.T) { + batch := state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: 1, + ChangeL2BlockHeader: state.ChangeL2BlockHeader{ + DeltaTimestamp: 1, + IndexL1InfoTree: 1, + }, + }, + }, + } + data, err := state.EncodeBatchV2(&batch) + require.NoError(t, err) + require.NotNil(t, data) + seq := etherman.SequenceBanana{ + CounterL1InfoRoot: 2, + Batches: []etherman.Batch{ + { + L2Data: data, + }, + }, + } + err = txbuilder.SequenceSanityCheck(&seq) + require.NoError(t, err, "inside batchl2data max is 1 and counter is 2 (2>=1+1)") + seq.CounterL1InfoRoot = 1 + err = txbuilder.SequenceSanityCheck(&seq) + require.Error(t, err, "inside batchl2data max is 1 and counter is 1. The batchl2data is not included in counter") +} + +func TestBananaSanityCheckNilSeq(t *testing.T) { + err := txbuilder.SequenceSanityCheck(nil) + require.Error(t, err, "nil sequence") +} + type testDataBananaBase struct { rollupContract *mocks_txbuilder.RollupBananaBaseContractor getContract *mocks_txbuilder.GlobalExitRootBananaContractor diff --git a/sequencesender/txbuilder/banana_types.go b/sequencesender/txbuilder/banana_types.go index c09095b6..c69d2876 100644 --- a/sequencesender/txbuilder/banana_types.go +++ b/sequencesender/txbuilder/banana_types.go @@ -5,6 +5,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/0xPolygon/cdk/state" "github.com/ethereum/go-ethereum/common" ) @@ -147,3 +148,37 @@ func (b *BananaSequence) LastVirtualBatchNumber() uint64 { func (b *BananaSequence) SetLastVirtualBatchNumber(batchNumber uint64) { b.SequenceBanana.LastVirtualBatchNumber = batchNumber } + +func calculateMaxL1InfoTreeIndexInsideL2Data(l2data []byte) (uint32, error) { + batchRawV2, err := state.DecodeBatchV2(l2data) + if err != nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideL2Data: error decoding batchL2Data, err:%w", err) + } + if batchRawV2 == nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideL2Data: batchRawV2 is nil") + } + maxIndex := uint32(0) + for _, block := range batchRawV2.Blocks { + if block.IndexL1InfoTree > maxIndex { + maxIndex = block.IndexL1InfoTree + } + } + return maxIndex, nil +} + +func calculateMaxL1InfoTreeIndexInsideSequence(seq *etherman.SequenceBanana) (uint32, error) { + if seq == nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideSequence: seq is nil") + } + maxIndex := uint32(0) + for _, batch := range seq.Batches { + index, err := calculateMaxL1InfoTreeIndexInsideL2Data(batch.L2Data) + if err != nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideBatches: error getting batch L1InfoTree , err:%w", err) + } + if index > maxIndex { + maxIndex = index + } + } + return maxIndex, nil +} diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index c9c4e661..13539f2f 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "math/big" "time" @@ -24,7 +25,7 @@ type EVMDownloaderInterface interface { WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log - GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader + GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) } type LogAppenderMap map[common.Hash]func(b *EVMBlock, l types.Log) error @@ -101,8 +102,13 @@ func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, download if len(blocks) == 0 || blocks[len(blocks)-1].Num < toBlock { // Indicate the last downloaded block if there are not events on it d.log.Debugf("sending block %d to the driver (without events)", toBlock) + header, isCanceled := d.GetBlockHeader(ctx, toBlock) + if isCanceled { + return + } + downloadedCh <- EVMBlock{ - EVMBlockHeader: d.GetBlockHeader(ctx, toBlock), + EVMBlockHeader: header, } } fromBlock = toBlock + 1 @@ -170,44 +176,53 @@ func (d *EVMDownloaderImplementation) WaitForNewBlocks( } func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock { - blocks := []EVMBlock{} - logs := d.GetLogs(ctx, fromBlock, toBlock) - for _, l := range logs { - if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { - b := d.GetBlockHeader(ctx, l.BlockNumber) - if b.Hash != l.BlockHash { - d.log.Infof( - "there has been a block hash change between the event query and the block query "+ - "for block %d: %s vs %s. Retrying.", - l.BlockNumber, b.Hash, l.BlockHash, - ) - return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) + select { + case <-ctx.Done(): + return nil + default: + blocks := []EVMBlock{} + logs := d.GetLogs(ctx, fromBlock, toBlock) + for _, l := range logs { + if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { + b, canceled := d.GetBlockHeader(ctx, l.BlockNumber) + if canceled { + return nil + } + + if b.Hash != l.BlockHash { + d.log.Infof( + "there has been a block hash change between the event query and the block query "+ + "for block %d: %s vs %s. Retrying.", + l.BlockNumber, b.Hash, l.BlockHash, + ) + return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) + } + blocks = append(blocks, EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: l.BlockNumber, + Hash: l.BlockHash, + Timestamp: b.Timestamp, + ParentHash: b.ParentHash, + }, + Events: []interface{}{}, + }) } - blocks = append(blocks, EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: l.BlockNumber, - Hash: l.BlockHash, - Timestamp: b.Timestamp, - ParentHash: b.ParentHash, - }, - Events: []interface{}{}, - }) - } - for { - attempts := 0 - err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l) - if err != nil { - attempts++ - d.log.Error("error trying to append log: ", err) - d.rh.Handle("getLogs", attempts) - continue + for { + attempts := 0 + err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l) + if err != nil { + attempts++ + d.log.Error("error trying to append log: ", err) + d.rh.Handle("getLogs", attempts) + continue + } + break } - break } - } - return blocks + return blocks + } } func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log { @@ -224,6 +239,11 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to for { unfilteredLogs, err = d.ethClient.FilterLogs(ctx, query) if err != nil { + if errors.Is(err, context.Canceled) { + // context is canceled, we don't want to fatal on max attempts in this case + return nil + } + attempts++ d.log.Error("error calling FilterLogs to eth client: ", err) d.rh.Handle("getLogs", attempts) @@ -243,11 +263,16 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to return logs } -func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { +func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { attempts := 0 for { header, err := d.ethClient.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) if err != nil { + if errors.Is(err, context.Canceled) { + // context is canceled, we don't want to fatal on max attempts in this case + return EVMBlockHeader{}, true + } + attempts++ d.log.Errorf("error getting block header for block %d, err: %v", blockNum, err) d.rh.Handle("getBlockHeader", attempts) @@ -258,6 +283,6 @@ func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockN Hash: header.Hash(), ParentHash: header.ParentHash, Timestamp: header.Time, - } + }, false } } diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index 59c43b8f..04c92e72 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -222,9 +222,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b1) d.On("GetEventsByBlockRange", mock.Anything, uint64(0), uint64(1)). - Return([]EVMBlock{}) + Return([]EVMBlock{}, false) d.On("GetBlockHeader", mock.Anything, uint64(1)). - Return(b1.EVMBlockHeader) + Return(b1.EVMBlockHeader, false) // iteration 1: wait for next block to be created d.On("WaitForNewBlocks", mock.Anything, uint64(1)). @@ -240,7 +240,7 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b2) d.On("GetEventsByBlockRange", mock.Anything, uint64(2), uint64(2)). - Return([]EVMBlock{b2}) + Return([]EVMBlock{b2}, false) // iteration 3: wait for next block to be created (jump to block 8) d.On("WaitForNewBlocks", mock.Anything, uint64(2)). @@ -270,9 +270,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b6, b7, b8) d.On("GetEventsByBlockRange", mock.Anything, uint64(3), uint64(8)). - Return([]EVMBlock{b6, b7}) + Return([]EVMBlock{b6, b7}, false) d.On("GetBlockHeader", mock.Anything, uint64(8)). - Return(b8.EVMBlockHeader) + Return(b8.EVMBlockHeader, false) // iteration 5: wait for next block to be created (jump to block 30) d.On("WaitForNewBlocks", mock.Anything, uint64(8)). @@ -288,9 +288,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b19) d.On("GetEventsByBlockRange", mock.Anything, uint64(9), uint64(19)). - Return([]EVMBlock{}) + Return([]EVMBlock{}, false) d.On("GetBlockHeader", mock.Anything, uint64(19)). - Return(b19.EVMBlockHeader) + Return(b19.EVMBlockHeader, false) // iteration 7: from block 20 to 30, events on last block b30 := EVMBlock{ @@ -302,7 +302,7 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b30) d.On("GetEventsByBlockRange", mock.Anything, uint64(20), uint64(30)). - Return([]EVMBlock{b30}) + Return([]EVMBlock{b30}, false) // iteration 8: wait for next block to be created (jump to block 35) d.On("WaitForNewBlocks", mock.Anything, uint64(30)). @@ -369,14 +369,16 @@ func TestGetBlockHeader(t *testing.T) { // at first attempt clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock := d.GetBlockHeader(ctx, blockNum) + actualBlock, isCanceled := d.GetBlockHeader(ctx, blockNum) assert.Equal(t, expectedBlock, actualBlock) + assert.False(t, isCanceled) // after error from client clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(nil, errors.New("foo")).Once() clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock = d.GetBlockHeader(ctx, blockNum) + actualBlock, isCanceled = d.GetBlockHeader(ctx, blockNum) assert.Equal(t, expectedBlock, actualBlock) + assert.False(t, isCanceled) } func buildAppender() LogAppenderMap { diff --git a/sync/evmdriver.go b/sync/evmdriver.go index ae7388e0..7865f645 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -71,6 +71,7 @@ reset: attempts int err error ) + for { lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx) if err != nil { @@ -84,18 +85,19 @@ reset: cancellableCtx, cancel := context.WithCancel(ctx) defer cancel() + log.Info("Starting sync...", " lastProcessedBlock", lastProcessedBlock) // start downloading downloadCh := make(chan EVMBlock, d.downloadBufferSize) - go d.downloader.Download(cancellableCtx, lastProcessedBlock, downloadCh) + go d.downloader.Download(cancellableCtx, lastProcessedBlock+1, downloadCh) for { select { case b := <-downloadCh: - d.log.Debug("handleNewBlock") + d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) d.handleNewBlock(ctx, b) case firstReorgedBlock := <-d.reorgSub.ReorgedBlock: - d.log.Debug("handleReorg") - d.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) + d.log.Debug("handleReorg from block: ", firstReorgedBlock) + d.handleReorg(ctx, cancel, firstReorgedBlock) goto reset } } @@ -130,15 +132,10 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { } } -func (d *EVMDriver) handleReorg( - ctx context.Context, cancel context.CancelFunc, downloadCh chan EVMBlock, firstReorgedBlock uint64, -) { +func (d *EVMDriver) handleReorg(ctx context.Context, cancel context.CancelFunc, firstReorgedBlock uint64) { // stop downloader cancel() - _, ok := <-downloadCh - for ok { - _, ok = <-downloadCh - } + // handle reorg attempts := 0 for { diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index 907dac28..c17370e1 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -198,36 +198,19 @@ func TestHandleReorg(t *testing.T) { // happy path _, cancel := context.WithCancel(ctx) - downloadCh := make(chan EVMBlock) firstReorgedBlock := uint64(5) pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - close(downloadCh) + go driver.handleReorg(ctx, cancel, firstReorgedBlock) done := <-reorgProcessed require.True(t, done) - // download ch sends some garbage - _, cancel = context.WithCancel(ctx) - downloadCh = make(chan EVMBlock) - firstReorgedBlock = uint64(6) - pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - downloadCh <- EVMBlock{} - downloadCh <- EVMBlock{} - downloadCh <- EVMBlock{} - close(downloadCh) - done = <-reorgProcessed - require.True(t, done) - // processor fails 2 times _, cancel = context.WithCancel(ctx) - downloadCh = make(chan EVMBlock) firstReorgedBlock = uint64(7) pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() pm.On("Reorg", ctx, firstReorgedBlock).Return(nil).Once() - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - close(downloadCh) + go driver.handleReorg(ctx, cancel, firstReorgedBlock) done = <-reorgProcessed require.True(t, done) } diff --git a/sync/mock_downloader_test.go b/sync/mock_downloader_test.go index c965efb6..f28045b5 100644 --- a/sync/mock_downloader_test.go +++ b/sync/mock_downloader_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.45.0. DO NOT EDIT. package sync @@ -20,7 +20,7 @@ func (_m *EVMDownloaderMock) Download(ctx context.Context, fromBlock uint64, dow } // GetBlockHeader provides a mock function with given fields: ctx, blockNum -func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { +func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { ret := _m.Called(ctx, blockNum) if len(ret) == 0 { @@ -28,13 +28,23 @@ func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64 } var r0 EVMBlockHeader + var r1 bool + if rf, ok := ret.Get(0).(func(context.Context, uint64) (EVMBlockHeader, bool)); ok { + return rf(ctx, blockNum) + } if rf, ok := ret.Get(0).(func(context.Context, uint64) EVMBlockHeader); ok { r0 = rf(ctx, blockNum) } else { r0 = ret.Get(0).(EVMBlockHeader) } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, uint64) bool); ok { + r1 = rf(ctx, blockNum) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 } // GetEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock diff --git a/test/Makefile b/test/Makefile index 0c50ec35..0864b8d2 100644 --- a/test/Makefile +++ b/test/Makefile @@ -21,6 +21,11 @@ generate-mocks-da: ## Generates mocks for dataavailability, using mockery tool rm -Rf ../dataavailability/mocks_da export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../dataavailability --output ../dataavailability/mocks_da --outpkg mocks_da ${COMMON_MOCKERY_PARAMS} +.PHONY: generate-mocks-rpc +generate-mocks-rpc: ## Generates mocks for rpc, using mockery tool + rm -Rf ../rpc/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../rpc --output ../rpc/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} + .PHONY: test-e2e-elderberry-validium test-e2e-elderberry-validium: stop ## Runs e2e tests checking elderberry/validium diff --git a/test/access-list-e2e.bats b/test/access-list-e2e.bats index c47b004a..83947c03 100644 --- a/test/access-list-e2e.bats +++ b/test/access-list-e2e.bats @@ -3,14 +3,11 @@ setup() { load 'helpers/common' _common_setup - readonly enclave=${ENCLAVE:-cdk-v1} - readonly sequencer=${KURTOSIS_NODE:-cdk-erigon-sequencer-001} - readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} - readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} - readonly key=${SENDER_key:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly erigon_sequencer_node=${KURTOSIS_ERIGON_SEQUENCER:-cdk-erigon-sequencer-001} + readonly kurtosis_sequencer_wrapper=${KURTOSIS_SEQUENCER_WRAPPER:-"kurtosis service exec $enclave $erigon_sequencer_node"} + readonly key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} readonly data_dir=${ACL_DATA_DIR:-"/home/erigon/data/dynamic-kurtosis-sequencer/txpool/acls"} - readonly kurtosis_sequencer_wrapper=${KURTOSIS_WRAPPER:-"kurtosis service exec $enclave $sequencer"} } teardown() { @@ -36,7 +33,7 @@ set_acl_mode() { @test "Test Block List - Sending regular transaction when address not in block list" { local value="10ether" run set_acl_mode "blocklist" - run sendTx $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" @@ -45,7 +42,7 @@ set_acl_mode() { @test "Test Block List - Sending contracts deploy transaction when address not in block list" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "blocklist" - run deployContract $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_success @@ -59,7 +56,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "sendTx" - run sendTx $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -70,7 +67,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "deploy" - run deployContract $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -80,7 +77,7 @@ set_acl_mode() { local value="10ether" run set_acl_mode "allowlist" - run sendTx $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -90,7 +87,7 @@ set_acl_mode() { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "allowlist" - run deployContract $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -99,10 +96,10 @@ set_acl_mode() { @test "Test Allow List - Sending regular transaction when address is in allow list" { local value="10ether" - run set_acl_mode "allowlist" - run add_to_access_list "allowlist" "sendTx" - run sendTx $key $receiver $value - + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "sendTx" + run send_tx $l2_rpc_url $key $receiver $value + assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" } @@ -110,9 +107,9 @@ set_acl_mode() { @test "Test Allow List - Sending contracts deploy transaction when address is in allow list" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" - run set_acl_mode "allowlist" - run add_to_access_list "allowlist" "deploy" - run deployContract $key $contract_artifact + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "deploy" + run deploy_contract $l2_rpc_url $key $contract_artifact assert_success diff --git a/test/basic-e2e.bats b/test/basic-e2e.bats index cbd845f5..1024ac4a 100644 --- a/test/basic-e2e.bats +++ b/test/basic-e2e.bats @@ -3,47 +3,193 @@ setup() { load 'helpers/common' _common_setup - readonly enclave=${ENCLAVE:-cdk-v1} - readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} - readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} - readonly private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} } @test "Send EOA transaction" { + local sender_addr=$(cast wallet address --private-key "$sender_private_key") + local initial_nonce=$(cast nonce "$sender_addr" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } local value="10ether" - run sendTx "$private_key" "$receiver" "$value" + # case 1: Transaction successful sender has sufficient balance + run send_tx "$l2_rpc_url" "$sender_private_key" "$receiver" "$value" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" + + # case 2: Transaction rejected as sender attempts to transfer more than it has in its wallet. + # Transaction will fail pre-validation check on the node and will be dropped subsequently from the pool + # without recording it on the chain and hence nonce will not change + local sender_balance=$(cast balance "$sender_addr" --ether --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve balance for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } + local excessive_value=$(echo "$sender_balance + 1" | bc)"ether" + run send_tx "$l2_rpc_url" "$sender_private_key" "$receiver" "$excessive_value" + assert_failure + + # Check whether the sender's nonce was updated correctly + local final_nonce=$(cast nonce "$sender_addr" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } + assert_equal "$final_nonce" "$(echo "$initial_nonce + 1" | bc)" } -@test "Deploy ERC20Mock contract" { +@test "Test ERC20Mock contract" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + wallet_A_output=$(cast wallet new) + address_A=$(echo "$wallet_A_output" | grep "Address" | awk '{print $2}') + address_A_private_key=$(echo "$wallet_A_output" | grep "Private key" | awk '{print $3}') + address_B=$(cast wallet new | grep "Address" | awk '{print $2}') # Deploy ERC20Mock - run deployContract "$private_key" "$contract_artifact" + run deploy_contract "$l2_rpc_url" "$sender_private_key" "$contract_artifact" assert_success contract_addr=$(echo "$output" | tail -n 1) # Mint ERC20 tokens - local mintFnSig="function mint(address receiver, uint256 amount)" local amount="5" - run sendTx "$private_key" "$contract_addr" "$mintFnSig" "$receiver" "$amount" + run send_tx "$l2_rpc_url" "$sender_private_key" "$contract_addr" "$mint_fn_sig" "$address_A" "$amount" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" - # Assert that balance is correct - local balanceOfFnSig="function balanceOf(address) (uint256)" - run queryContract "$contract_addr" "$balanceOfFnSig" "$receiver" + ## Case 2: Insufficient gas scenario => Transactions fails + # nonce would not increase since transaction fails at the node's pre-validation check + # Get bytecode from the contract artifact + local bytecode=$(jq -r .bytecode "$contract_artifact") + if [[ -z "$bytecode" || "$bytecode" == "null" ]]; then + echo "Error: Failed to read bytecode from $contract_artifact" + return 1 + fi + + # Estimate gas, gas price and gas cost + local gas_units=$(cast estimate --rpc-url "$l2_rpc_url" --create "$bytecode") + gas_units=$(echo "scale=0; $gas_units / 2" | bc) + local gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") + local value=$(echo "$gas_units * $gas_price" | bc) + local value_ether=$(cast to-unit "$value" ether)"ether" + + # Transfer only half amount of tokens needed for contract deployment fees + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + # Fetch initial nonce for address_A + local address_A_initial_nonce=$(cast nonce "$address_A" --rpc-url "$l2_rpc_url") || return 1 + # Attempt to deploy contract with insufficient gas + run deploy_contract "$l2_rpc_url" "$address_A_private_key" "$contract_artifact" + assert_failure + + ## Case 3: Transaction should fail as address_A tries to transfer more tokens than it has + # nonce would not increase + # Transfer funds for gas fees to address_A + value_ether="4ether" + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + # Fetch balance of address_A to simulate excessive transfer + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_A" + assert_success + local address_A_Balance=$(echo "$output" | tail -n 1) + address_A_Balance=$(echo "$address_A_Balance" | xargs) + + # Set excessive amount for transfer + local excessive_amount=$(echo "$address_A_Balance + 1" | bc) + + # Attempt transfer of excessive amount from address_A to address_B + local tranferFnSig="transfer(address,uint256)" + run send_tx "$l2_rpc_url" "$address_A_private_key" "$contract_addr" "$tranferFnSig" "$address_B" "$excessive_amount" + assert_failure + + # Verify balance of address_A after failed transaction + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_A" + assert_success + address_A_BalanceAfterFailedTx=$(echo "$output" | tail -n 1) + address_A_BalanceAfterFailedTx=$(echo "$address_A_BalanceAfterFailedTx" | xargs) + + # Ensure balance is unchanged + assert_equal "$address_A_BalanceAfterFailedTx" "$address_A_Balance" + + # Verify balance of address_B is still zero + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_B" assert_success - receiverBalance=$(echo "$output" | tail -n 1) + local address_B_Balance=$(echo "$output" | tail -n 1) + address_B_Balance=$(echo "$address_B_Balance" | xargs) - # Convert balance and amount to a standard format for comparison (e.g., remove any leading/trailing whitespace) - receiverBalance=$(echo "$receiverBalance" | xargs) - amount=$(echo "$amount" | xargs) + assert_equal "$address_B_Balance" "0" - # Check if the balance is equal to the amount - assert_equal "$receiverBalance" "$amount" + # Nonce should not increase + local address_A_final_nonce=$(cast nonce "$address_A" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $address_A using RPC URL: $l2_rpc_url" + return 1 + } + assert_equal "$address_A_final_nonce" "$address_A_initial_nonce" } + + +@test "Deploy and test UniswapV3 contract" { + # Generate new key pair + wallet_A_output=$(cast wallet new) + address_A=$(echo "$wallet_A_output" | grep "Address" | awk '{print $2}') + address_A_private_key=$(echo "$wallet_A_output" | grep "Private key" | awk '{print $3}') + + # Transfer funds for gas + local value_ether="50ether" + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + run polycli loadtest uniswapv3 --legacy -v 600 --rpc-url $l2_rpc_url --private-key $address_A_private_key + assert_success + + # Remove ANSI escape codes from the output + output=$(echo "$output" | sed -r "s/\x1B\[[0-9;]*[mGKH]//g") + + # Check if all required Uniswap contracts were deployed + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=WETH9" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapV3Factory" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapInterfaceMulticall" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=ProxyAdmin" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=TickLens" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NFTDescriptor" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NonfungibleTokenPositionDescriptor" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=TransparentUpgradeableProxy" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NonfungiblePositionManager" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=V3Migrator" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapV3Staker" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=QuoterV2" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=SwapRouter02" + + # Check if ERC20 tokens were minted + assert_output --regexp "Minted tokens amount=[0-9]+ recipient=0x[a-fA-F0-9]{40} token=SwapperA" + assert_output --regexp "Minted tokens amount=[0-9]+ recipient=0x[a-fA-F0-9]{40} token=SwapperB" + + # Check if liquidity pool was created and initialized + assert_output --regexp "Pool created and initialized fees=[0-9]+" + + # Check if liquidity was provided to the pool + assert_output --regexp "Liquidity provided to the pool liquidity=[0-9]+" + + # Check if transaction got executed successfully + assert_output --regexp "Starting main load test loop currentNonce=[0-9]+" + assert_output --regexp "Finished main load test loop lastNonce=[0-9]+ startNonce=[0-9]+" + assert_output --regexp "Got final block number currentNonce=[0-9]+ final block number=[0-9]+" + assert_output --regexp "Num errors numErrors=0" + assert_output --regexp "Finished" +} + diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index 98443b3b..842d87e9 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -1,43 +1,45 @@ setup() { load 'helpers/common-setup' _common_setup + load 'helpers/common' + load 'helpers/lxly-bridge-test' readonly data_availability_mode=${DATA_AVAILABILITY_MODE:-"cdk-validium"} $PROJECT_ROOT/test/scripts/kurtosis_prepare_params_yml.sh ../kurtosis-cdk $data_availability_mode [ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 - # Check if the genesis file is already downloaded - if [ ! -f "./tmp/cdk/genesis/genesis.json" ]; then - mkdir -p ./tmp/cdk - kurtosis files download cdk-v1 genesis ./tmp/cdk/genesis - [ $? -ne 0 ] && echo "Error downloading genesis file" && exit 1 + if [ -z "$BRIDGE_ADDRESS" ]; then + local combined_json_file="/opt/zkevm/combined.json" + echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 + + # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress + combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) + bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) + BRIDGE_ADDRESS=$bridge_default_address fi - # Download the genesis file - readonly bridge_default_address=$(jq -r ".genesis[] | select(.contractName == \"PolygonZkEVMBridge proxy\") | .address" ./tmp/cdk/genesis/genesis.json) - - readonly skey=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly destination_net=${DESTINATION_NET:-"1"} - readonly destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} - readonly ether_value=${ETHER_VALUE:-"0.0200000054"} - readonly token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} + + echo "Bridge address=$BRIDGE_ADDRESS" >&3 + + readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + destination_net=${DESTINATION_NET:-"1"} + destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} + ether_value=${ETHER_VALUE:-"0.0200000054"} + amount=$(cast to-wei $ether_value ether) + token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} readonly is_forced=${IS_FORCED:-"true"} - readonly bridge_addr=${BRIDGE_ADDRESS:-$bridge_default_address} + readonly bridge_addr=$BRIDGE_ADDRESS readonly meta_bytes=${META_BYTES:-"0x"} - readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)"} - readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 cdk-erigon-node-001 http-rpc)"} - readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print cdk-v1 zkevm-bridge-service-001 rpc)"} + readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} + readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} readonly dry_run=${DRY_RUN:-"false"} - - readonly amount=$(cast to-wei $ether_value ether) - readonly current_addr="$(cast wallet address --private-key $skey)" - readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID()(uint32)') - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID()(uint32)') + readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" + readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') + readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') } @test "Run deposit" { - load 'helpers/lxly-bridge-test' echo "Running LxLy deposit" >&3 run deposit assert_success @@ -45,27 +47,82 @@ setup() { } @test "Run claim" { - load 'helpers/lxly-bridge-test' - echo "Running LxLy claim" + echo "Running LxLy claim" >&3 - # The script timeout (in seconds). timeout="120" - start_time=$(date +%s) - end_time=$((start_time + timeout)) - - while true; do - current_time=$(date +%s) - if ((current_time > end_time)); then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached!" - exit 1 - fi - - run claim - if [ $status -eq 0 ]; then - break - fi - sleep 10 - done - + claim_frequency="10" + run wait_for_claim "$timeout" "$claim_frequency" + assert_success +} + +@test "Custom native token transfer" { + # Use GAS_TOKEN_ADDR if provided, otherwise retrieve from file + if [[ -n "$GAS_TOKEN_ADDR" ]]; then + echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 + local gas_token_addr="$GAS_TOKEN_ADDR" + else + echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 + readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json + run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" + assert_success + assert_output --regexp "0x[a-fA-F0-9]{40}" + local gas_token_addr=$output + fi + + echo "Gas token addr $gas_token_addr, L1 RPC: $l1_rpc_url" >&3 + + # Set receiver address and query for its initial native token balance on the L2 + receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} + local initial_receiver_balance=$(cast balance --ether "$receiver" --rpc-url "$l2_rpc_url") + echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 + + # Query for initial sender balance + run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local gas_token_init_sender_balance=$(echo "$output" | tail -n 1 | awk '{print $1}') + echo "Initial sender balance $gas_token_init_sender_balance" of gas token on L1 >&3 + + # Mint gas token on L1 + local tokens_amount="0.1ether" + local wei_amount=$(cast --to-unit $tokens_amount wei) + local minter_key=${MINTER_KEY:-"42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa"} + run mint_erc20_tokens "$l1_rpc_url" "$gas_token_addr" "$minter_key" "$sender_addr" "$tokens_amount" + assert_success + + # Assert that balance of gas token (on the L1) is correct + run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local gas_token_final_sender_balance=$(echo "$output" | + tail -n 1 | + awk '{print $1}') + local expected_balance=$(echo "$gas_token_init_sender_balance + $wei_amount" | + bc | + awk '{print $1}') + + echo "Sender balance ($sender_addr) (gas token L1): $gas_token_final_sender_balance" >&3 + assert_equal "$gas_token_final_sender_balance" "$expected_balance" + + # Send approve transaction to the gas token on L1 + deposit_ether_value="0.1ether" + run send_tx "$l1_rpc_url" "$sender_private_key" "$gas_token_addr" "$approve_fn_sig" "$bridge_addr" "$deposit_ether_value" + assert_success + assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" + + # Deposit + token_addr=$gas_token_addr + destination_addr=$receiver + destination_net=$l2_rpc_network_id + amount=$wei_amount + run deposit + assert_success + + # Claim deposits (settle them on the L2) + timeout="120" + claim_frequency="10" + run wait_for_claim "$timeout" "$claim_frequency" + assert_success + + # Validate that the native token of receiver on L2 has increased by the bridge tokens amount + run verify_native_token_balance "$l2_rpc_url" "$receiver" "$initial_receiver_balance" "$tokens_amount" assert_success } diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash index b7691366..415f211d 100644 --- a/test/helpers/common-setup.bash +++ b/test/helpers/common-setup.bash @@ -3,11 +3,24 @@ _common_setup() { bats_load_library 'bats-support' bats_load_library 'bats-assert' - + # get the containing directory of this file # use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0, # as those will point to the bats executable's location or the preprocessed file respectively - PROJECT_ROOT="$( cd "$( dirname "$BATS_TEST_FILENAME" )/.." >/dev/null 2>&1 && pwd )" + PROJECT_ROOT="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." >/dev/null 2>&1 && pwd)" # make executables in src/ visible to PATH PATH="$PROJECT_ROOT/src:$PATH" -} + + # ERC20 contracts function signatures + readonly mint_fn_sig="function mint(address,uint256)" + readonly balance_of_fn_sig="function balanceOf(address) (uint256)" + readonly approve_fn_sig="function approve(address,uint256)" + + + # Kurtosis enclave and service identifiers + readonly enclave=${KURTOSIS_ENCLAVE:-cdk-v1} + readonly contracts_container=${KURTOSIS_CONTRACTS:-contracts-001} + readonly contracts_service_wrapper=${KURTOSIS_CONTRACTS_WRAPPER:-"kurtosis service exec $enclave $contracts_container"} + readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-node-001} + readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node http-rpc)"} +} \ No newline at end of file diff --git a/test/helpers/common.bash b/test/helpers/common.bash index 15057d17..821a1f59 100644 --- a/test/helpers/common.bash +++ b/test/helpers/common.bash @@ -1,12 +1,13 @@ #!/usr/bin/env bash -function deployContract() { - local private_key="$1" - local contract_artifact="$2" +function deploy_contract() { + local rpc_url="$1" + local private_key="$2" + local contract_artifact="$3" # Check if rpc_url is available if [[ -z "$rpc_url" ]]; then - echo "Error: rpc_url environment variable is not set." + echo "Error: rpc_url parameter is not set." return 1 fi @@ -16,13 +17,13 @@ function deployContract() { fi # Get the sender address - local senderAddr=$(cast wallet address "$private_key") + local sender=$(cast wallet address "$private_key") if [[ $? -ne 0 ]]; then echo "Error: Failed to retrieve sender address." return 1 fi - echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $senderAddr)" >&3 + echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $sender)" >&3 # Get bytecode from the contract artifact local bytecode=$(jq -r .bytecode "$contract_artifact") @@ -69,98 +70,128 @@ function deployContract() { return 0 } -function sendTx() { - # Check if at least 3 arguments are provided - if [[ $# -lt 3 ]]; then - echo "Usage: sendTx [ ...]" +function send_tx() { + # Check if at least 4 arguments are provided + if [[ $# -lt 4 ]]; then + echo "Usage: send_tx [ ...]" return 1 fi - local private_key="$1" # Sender private key - local account_addr="$2" # Receiver address - local value_or_function_sig="$3" # Value or function signature + local rpc_url="$1" # RPC URL + local private_key="$2" # Sender private key + local receiver_addr="$3" # Receiver address + local value_or_function_sig="$4" # Value or function signature # Error handling: Ensure the receiver is a valid Ethereum address - if [[ ! "$account_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then - echo "Error: Invalid receiver address '$account_addr'." + if [[ ! "$receiver_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid receiver address '$receiver_addr'." return 1 fi - shift 3 # Shift the first 3 arguments (private_key, account_addr, value_or_function_sig) + shift 4 # Shift the first 4 arguments (rpc_url, private_key, receiver_addr, value_or_function_sig) + local params=("$@") # Collect all remaining arguments as function parameters - local senderAddr - senderAddr=$(cast wallet address "$private_key") - if [[ $? -ne 0 ]]; then - echo "Error: Failed to extract the sender address for $private_key" + # Get sender address from private key + local sender + sender=$(cast wallet address "$private_key") || { + echo "Error: Failed to extract the sender address." return 1 + } + + # Check if the value_or_function_sig is a numeric value (Ether to be transferred) + if [[ "$value_or_function_sig" =~ ^[0-9]+(\.[0-9]+)?(ether)?$ ]]; then + # Case: Ether transfer (EOA transaction) + # Get initial ether balances of sender and receiver + local sender_addr=$(cast wallet address --private-key "$private_key") + local sender_initial_balance receiver_initial_balance + sender_initial_balance=$(cast balance "$sender_addr" --ether --rpc-url "$rpc_url") || return 1 + receiver_initial_balance=$(cast balance "$receiver_addr" --ether --rpc-url "$rpc_url") || return 1 + + send_eoa_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender_addr" "$sender_initial_balance" "$receiver_initial_balance" + else + # Case: Smart contract interaction (contract interaction with function signature and parameters) + send_smart_contract_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "${params[@]}" fi +} - # Check if the first remaining argument is a numeric value (Ether to be transferred) - if [[ "$value_or_function_sig" =~ ^[0-9]+(ether)?$ ]]; then - # Case: EOA transaction (Ether transfer) - echo "Sending EOA transaction (RPC URL: $rpc_url, sender: $senderAddr) to: $account_addr " \ - "with value: $value_or_function_sig" >&3 - - cast_output=$(cast send --rpc-url "$rpc_url" \ - --private-key "$private_key" \ - "$account_addr" --value "$value_or_function_sig" \ - --legacy \ - 2>&1) - else - # Case: Smart contract transaction (contract interaction with function signature and parameters) - local params=("$@") # Collect all remaining arguments as function parameters +function send_eoa_transaction() { + local private_key="$1" + local receiver_addr="$2" + local value="$3" + local sender="$4" + local sender_initial_balance="$5" + local receiver_initial_balance="$6" - echo "Function signature: '$value_or_function_sig'" >&3 + echo "Sending EOA transaction (from: $sender, rpc url: $rpc_url) to: $receiver_addr with value: $value" >&3 - # Verify if the function signature starts with "function" - if [[ ! "$value_or_function_sig" =~ ^function\ .+\(.+\)$ ]]; then - echo "Error: Invalid function signature format '$value_or_function_sig'." - return 1 - fi + # Send transaction via cast + local cast_output tx_hash + cast_output=$(cast send --rpc-url "$rpc_url" --private-key "$private_key" "$receiver_addr" --value "$value" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi - echo "Sending smart contract transaction (RPC URL: $rpc_url, sender: $senderAddr) to $account_addr" \ - "with function signature: '$value_or_function_sig' and params: ${params[*]}" >&3 + tx_hash=$(extract_tx_hash "$cast_output") + [[ -z "$tx_hash" ]] && { + echo "Error: Failed to extract transaction hash." + return 1 + } - # Send the smart contract interaction using cast - cast_output=$(cast send --rpc-url "$rpc_url" \ - --private-key "$private_key" \ - "$account_addr" "$value_or_function_sig" "${params[@]}" \ - --legacy \ - 2>&1) + check_balances "$sender" "$receiver_addr" "$value" "$tx_hash" "$sender_initial_balance" "$receiver_initial_balance" + if [[ $? -ne 0 ]]; then + echo "Error: Balance not updated correctly." + return 1 fi - # Check if the transaction was successful + echo "Transaction successful (transaction hash: $tx_hash)" +} + +function send_smart_contract_transaction() { + local private_key="$1" + local receiver_addr="$2" + local function_sig="$3" + shift 3 + local params=("$@") + + echo "Sending smart contract transaction to $receiver_addr with function signature: '$function_sig' and params: ${params[*]}" >&3 + + # Send the smart contract interaction using cast + local cast_output tx_hash + cast_output=$(cast send "$receiver_addr" --rpc-url "$rpc_url" --private-key "$private_key" --legacy "$function_sig" "${params[@]}" 2>&1) if [[ $? -ne 0 ]]; then - echo "Error: Failed to send transaction. The cast send output:" + echo "Error: Failed to send transaction. Output:" echo "$cast_output" return 1 fi - # Extract the transaction hash from the output - local tx_hash=$(echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1) - echo "Tx hash: $tx_hash" - - if [[ -z "$tx_hash" ]]; then + tx_hash=$(extract_tx_hash "$cast_output") + [[ -z "$tx_hash" ]] && { echo "Error: Failed to extract transaction hash." return 1 - fi + } echo "Transaction successful (transaction hash: $tx_hash)" +} - return 0 +function extract_tx_hash() { + local cast_output="$1" + echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1 } -function queryContract() { - local addr="$1" # Contract address - local funcSignature="$2" # Function signature - shift 2 # Shift past the first two arguments +function query_contract() { + local rpc_url="$1" # RPC URL + local addr="$2" # Contract address + local funcSignature="$3" # Function signature + shift 3 # Shift past the first 3 arguments local params=("$@") # Collect remaining arguments as parameters array echo "Querying state of $addr account (RPC URL: $rpc_url) with function signature: '$funcSignature' and params: ${params[*]}" >&3 - # Check if rpc_url is available + # Check if rpc url is available if [[ -z "$rpc_url" ]]; then - echo "Error: rpc_url environment variable is not set." + echo "Error: rpc_url parameter is not provided." return 1 fi @@ -186,3 +217,110 @@ function queryContract() { return 0 } + +function check_balances() { + local sender="$1" + local receiver="$2" + local amount="$3" + local tx_hash="$4" + local sender_initial_balance="$5" + local receiver_initial_balance="$6" + + # Ethereum address regex: 0x followed by 40 hexadecimal characters + if [[ ! "$sender" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid sender address '$sender'." + return 1 + fi + + if [[ ! "$receiver" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid receiver address '$receiver'." + return 1 + fi + + # Transaction hash regex: 0x followed by 64 hexadecimal characters + if [[ ! "$tx_hash" =~ ^0x[a-fA-F0-9]{64}$ ]]; then + echo "Error: Invalid transaction hash: $tx_hash". + return 1 + fi + + local sender_final_balance=$(cast balance "$sender" --ether --rpc-url "$rpc_url") || return 1 + local tx_output=$(cast tx "$tx_hash" --rpc-url "$rpc_url") + local gas_used=$(tx_output | grep '^gas ' | awk '{print $2}') + local gas_price=$(tx_output | grep '^gasPrice' | awk '{print $2}') + local gas_fee=$(echo "$gas_used * $gas_price" | bc) + local gas_fee_in_ether=$(cast to-unit "$gas_fee" ether) + + local sender_balance_change=$(echo "$sender_initial_balance - $sender_final_balance" | bc) + echo "Sender balance changed by: '$sender_balance_change' wei" + echo "Gas fee paid: '$gas_fee_in_ether' ether" + + local receiver_final_balance=$(cast balance "$receiver" --ether --rpc-url "$rpc_url") || return 1 + local receiver_balance_change=$(echo "$receiver_final_balance - $receiver_initial_balance" | bc) + echo "Receiver balance changed by: '$receiver_balance_change' wei" + + # Trim 'ether' suffix from amount to get the numeric part + local value_in_ether=$(echo "$amount" | sed 's/ether$//') + + if ! echo "$receiver_balance_change == $value_in_ether" | bc -l; then + echo "Error: receiver balance updated incorrectly. Expected: $value_in_ether, Actual: $receiver_balance_change" + return 1 + fi + + # Calculate expected sender balance change + local expected_sender_change=$(echo "$value_in_ether + $gas_fee_in_ether" | bc) + if ! echo "$sender_balance_change == $expected_sender_change" | bc -l; then + echo "Error: sender balance updated incorrectly. Expected: $expected_sender_change, Actual: $sender_balance_change" + return 1 + fi +} + +function verify_native_token_balance() { + local rpc_url="$1" # RPC URL + local account="$2" # account address + local initial_balance="$3" # initial balance in Ether (decimal) + local ether_amount="$4" # amount to be added (in Ether, decimal) + + # Convert initial balance and amount to wei (no decimals) + local initial_balance_wei=$(cast --to-wei "$initial_balance") + + # Trim 'ether' from ether_amount if it exists + ether_amount=$(echo "$ether_amount" | sed 's/ether//') + local amount_wei=$(cast --to-wei "$ether_amount") + + # Get final balance in wei (after the operation) + local final_balance_wei=$(cast balance "$account" --rpc-url "$rpc_url" | awk '{print $1}') + + # Calculate expected final balance (initial_balance + amount) + local expected_final_balance_wei=$(echo "$initial_balance_wei + $amount_wei" | bc) + + # Check if final_balance matches the expected final balance + if [ "$(echo "$final_balance_wei == $expected_final_balance_wei" | bc)" -eq 1 ]; then + echo "✅ Balance verification successful: final balance is correct." + else + echo "❌ Balance verification failed: expected $expected_final_balance_wei but got $final_balance_wei." + exit 1 + fi +} + +function mint_erc20_tokens() { + local rpc_url="$1" # The L1 RPC URL + local erc20_token_addr="$2" # The gas token contract address + local minter_private_key="$3" # The minter private key + local receiver="$4" # The receiver address (for minted tokens) + local tokens_amount="$5" # The amount of tokens to transfer (e.g., "0.1ether") + + # Query the erc20 token balance of the sender + run query_contract "$rpc_url" "$erc20_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local erc20_token_balance=$(echo "$output" | tail -n 1) + + # Log the account's current gas token balance + echo "Initial account balance: $erc20_token_balance wei" >&3 + + # Convert tokens_amount to Wei for comparison + local wei_amount=$(cast --to-unit "$tokens_amount" wei) + + # Mint the required tokens by sending a transaction + run send_tx "$rpc_url" "$minter_private_key" "$erc20_token_addr" "$mint_fn_sig" "$receiver" "$tokens_amount" + assert_success +} diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index bbaf45e1..c1b43533 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -1,25 +1,26 @@ #!/usr/bin/env bash # Error code reference https://hackmd.io/WwahVBZERJKdfK3BbKxzQQ -function deposit () { +function deposit() { readonly deposit_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then - echo "Checking the current ETH balance: " >&3 - cast balance -e --rpc-url $l1_rpc_url $current_addr >&3 + echo "The ETH balance for sender "$sender_addr":" >&3 + cast balance -e --rpc-url $l1_rpc_url $sender_addr >&3 else - echo "Checking the current token balance for token at $token_addr: " >&3 - cast call --rpc-url $l1_rpc_url $token_addr 'balanceOf(address)(uint256)' $current_addr >&3 + echo "The "$token_addr" token balance for sender "$sender_addr":" >&3 + balance_wei=$(cast call --rpc-url "$l1_rpc_url" "$token_addr" "$balance_of_fn_sig" "$sender_addr") + echo "$(cast --from-wei "$balance_wei")" >&3 fi - echo "Attempting to deposit $amount wei to net $destination_net for token $token_addr" >&3 + echo "Attempting to deposit $amount [wei] to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$l1_rpc_url)" >&3 if [[ $dry_run == "true" ]]; then cast calldata $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then - cast send --legacy --private-key $skey --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else - cast send --legacy --private-key $skey --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + cast send --legacy --private-key $sender_private_key --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes fi fi } @@ -30,7 +31,7 @@ function claim() { readonly claimable_deposit_file=$(mktemp) echo "Getting full list of deposits" >&3 curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file - + echo "Looking for claimable deposits" >&3 jq '[.deposits[] | select(.ready_for_claim == true and .claim_tx_hash == "" and .dest_net == '$destination_net')]' $bridge_deposit_file | tee $claimable_deposit_file readonly claimable_count=$(jq '. | length' $claimable_deposit_file) @@ -40,7 +41,7 @@ function claim() { echo "We have no claimable deposits at this time" >&3 exit 1 fi - + echo "We have $claimable_count claimable deposits on network $destination_net. Let's get this party started." >&3 readonly current_deposit=$(mktemp) readonly current_proof=$(mktemp) @@ -69,9 +70,30 @@ function claim() { cast calldata $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata cast call --rpc-url $l2_rpc_url $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata else - cast send --legacy --rpc-url $l2_rpc_url --private-key $skey $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + cast send --legacy --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + fi + + done < <(seq 0 $((claimable_count - 1))) +} + +function wait_for_claim() { + local timeout="$1" # timeout (in seconds) + local claim_frequency="$2" # claim frequency (in seconds) + local start_time=$(date +%s) + local end_time=$((start_time + timeout)) + + while true; do + local current_time=$(date +%s) + if ((current_time > end_time)); then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached!" + exit 1 fi + run claim + if [ $status -eq 0 ]; then + break + fi - done < <(seq 0 $((claimable_count - 1)) ) + sleep "$claim_frequency" + done } diff --git a/test/scripts/env.sh b/test/scripts/env.sh index b81c18a4..2afb2af4 100644 --- a/test/scripts/env.sh +++ b/test/scripts/env.sh @@ -1,8 +1,7 @@ #!/bin/bash ### Common variables -ENCLAVE=cdk-v1 -CDK_ERIGON_NODE_NAME=cdk-erigon-node-001 +KURTOSIS_ENCLAVE=cdk-v1 TMP_CDK_FOLDER=tmp/cdk DEST_KURTOSIS_PARAMS_YML=../$TMP_CDK_FOLDER/e2e-params.yml -KURTOSIS_VERSION=develop KURTOSIS_FOLDER=../kurtosis-cdk +USE_L1_GAS_TOKEN_CONTRACT=true diff --git a/test/scripts/kurtosis_prepare_params_yml.sh b/test/scripts/kurtosis_prepare_params_yml.sh index aa57e272..38f44d51 100755 --- a/test/scripts/kurtosis_prepare_params_yml.sh +++ b/test/scripts/kurtosis_prepare_params_yml.sh @@ -28,3 +28,4 @@ mkdir -p $(dirname $DEST_KURTOSIS_PARAMS_YML) cp $KURTOSIS_FOLDER/params.yml $DEST_KURTOSIS_PARAMS_YML yq -Y --in-place ".args.cdk_node_image = \"cdk\"" $DEST_KURTOSIS_PARAMS_YML yq -Y --in-place ".args.data_availability_mode = \"$DATA_AVAILABILITY_MODE\"" $DEST_KURTOSIS_PARAMS_YML +yq -Y --in-place ".args.zkevm_use_gas_token_contract = $USE_L1_GAS_TOKEN_CONTRACT" $DEST_KURTOSIS_PARAMS_YML diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go index 20d22ec1..5b14b962 100644 --- a/tree/appendonlytree.go +++ b/tree/appendonlytree.go @@ -82,7 +82,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { siblings := [types.DefaultHeight]common.Hash{} lastRoot, err := t.getLastRootWithTx(tx) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { t.lastIndex = -1 t.lastLeftCache = siblings return nil @@ -102,7 +102,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { ) } if currentNode == nil { - return ErrNotFound + return db.ErrNotFound } siblings[h] = currentNode.Left if index&(1< 0 { @@ -113,7 +113,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { } // Reverse the siblings to go from leafs to root - for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { + for i, j := 0, len(siblings)-1; i == j; i, j = i+1, j-1 { siblings[i], siblings[j] = siblings[j], siblings[i] } diff --git a/tree/tree.go b/tree/tree.go index 2107ba68..5d307e8a 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -14,8 +14,7 @@ import ( ) var ( - EmptyProof = types.Proof{} - ErrNotFound = errors.New("not found") + EmptyProof = types.Proof{} ) type Tree struct { @@ -50,7 +49,7 @@ func newTree(db *sql.DB, tablePrefix string) *Tree { } func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( - siblings [32]common.Hash, + siblings types.Proof, hasUsedZeroHashes bool, err error, ) { @@ -60,7 +59,7 @@ func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( var currentNode *types.TreeNode currentNode, err = t.getRHTNode(tx, currentNodeHash) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { hasUsedZeroHashes = true siblings[h] = t.zeroHashes[h] err = nil @@ -113,7 +112,7 @@ func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (ty return types.Proof{}, err } if isErrNotFound { - return types.Proof{}, ErrNotFound + return types.Proof{}, db.ErrNotFound } return siblings, nil } @@ -127,7 +126,7 @@ func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return node, ErrNotFound + return node, db.ErrNotFound } return node, err } @@ -185,7 +184,7 @@ func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return root, db.ErrNotFound } return root, err } @@ -201,7 +200,7 @@ func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, er index, ); err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return root, db.ErrNotFound } return root, err } @@ -209,17 +208,17 @@ func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, er } // GetRootByHash returns the root associated to the hash -func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (types.Root, error) { - var root types.Root +func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (*types.Root, error) { + var root *types.Root if err := meddler.QueryRow( - t.db, &root, + t.db, root, fmt.Sprintf(`SELECT * FROM %s WHERE hash = $1;`, t.rootTable), hash.Hex(), ); err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return nil, db.ErrNotFound } - return root, err + return nil, err } return root, nil } diff --git a/tree/tree_test.go b/tree/tree_test.go index b5278723..dc2cfc9e 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -2,6 +2,7 @@ package tree_test import ( "context" + "database/sql" "encoding/json" "fmt" "os" @@ -18,6 +19,88 @@ import ( "github.com/stretchr/testify/require" ) +func TestCheckExpectedRoot(t *testing.T) { + createTreeDB := func() *sql.DB { + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debug("DB created at: ", dbPath) + require.NoError(t, migrations.RunMigrations(dbPath)) + treeDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + return treeDB + } + + addLeaves := func(merkletree *tree.AppendOnlyTree, + treeDB *sql.DB, + numOfLeavesToAdd, from int) { + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + for i := from; i < from+numOfLeavesToAdd; i++ { + require.NoError(t, merkletree.AddLeaf(tx, uint64(i), 0, types.Leaf{ + Index: uint32(i), + Hash: common.HexToHash(fmt.Sprintf("%x", i)), + })) + } + + require.NoError(t, tx.Commit()) + } + + t.Run("Check when no reorg", func(t *testing.T) { + numOfLeavesToAdd := 10 + indexToCheck := uint32(numOfLeavesToAdd - 1) + + treeDB := createTreeDB() + merkleTree := tree.NewAppendOnlyTree(treeDB, "") + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) + + expectedRoot, err := merkleTree.GetLastRoot(context.Background()) + require.NoError(t, err) + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) + + root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) + require.NoError(t, err) + require.Equal(t, expectedRoot.Hash, root2.Hash) + require.Equal(t, expectedRoot.Index, root2.Index) + }) + + t.Run("Check after rebuild tree when reorg", func(t *testing.T) { + numOfLeavesToAdd := 10 + indexToCheck := uint32(numOfLeavesToAdd - 1) + treeDB := createTreeDB() + merkleTree := tree.NewAppendOnlyTree(treeDB, "") + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) + + expectedRoot, err := merkleTree.GetLastRoot(context.Background()) + require.NoError(t, err) + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) + + // reorg tree + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + require.NoError(t, merkleTree.Reorg(tx, uint64(indexToCheck+1))) + require.NoError(t, tx.Commit()) + + // rebuild cache on adding new leaf + tx, err = db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + require.NoError(t, merkleTree.AddLeaf(tx, uint64(indexToCheck+1), 0, types.Leaf{ + Index: indexToCheck + 1, + Hash: common.HexToHash(fmt.Sprintf("%x", indexToCheck+1)), + })) + require.NoError(t, tx.Commit()) + + root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) + require.NoError(t, err) + require.Equal(t, expectedRoot.Hash, root2.Hash) + require.Equal(t, expectedRoot.Index, root2.Index) + }) +} + func TestMTAddLeaf(t *testing.T) { data, err := os.ReadFile("testvectors/root-vectors.json") require.NoError(t, err) diff --git a/tree/updatabletree.go b/tree/updatabletree.go index 3ed8b881..be861b55 100644 --- a/tree/updatabletree.go +++ b/tree/updatabletree.go @@ -23,21 +23,21 @@ func NewUpdatableTree(db *sql.DB, dbPrefix string) *UpdatableTree { return ut } -func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { +func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) (common.Hash, error) { var rootHash common.Hash root, err := t.getLastRootWithTx(tx) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { rootHash = t.zeroHashes[types.DefaultHeight] } else { - return err + return common.Hash{}, err } } else { rootHash = root.Hash } siblings, _, err := t.getSiblings(tx, leaf.Index, rootHash) if err != nil { - return err + return common.Hash{}, err } currentChildHash := leaf.Hash newNodes := []types.TreeNode{} @@ -59,10 +59,10 @@ func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, l BlockNum: blockNum, BlockPosition: blockPosition, }); err != nil { - return err + return common.Hash{}, err } if err := t.storeNodes(tx, newNodes); err != nil { - return err + return common.Hash{}, err } - return nil + return currentChildHash, nil }