From 56de789e58ad5d2fb699a4e0dba9bf478c4e4d95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 3 Sep 2024 13:59:37 +0200 Subject: [PATCH 01/17] feat: remove unused prover parameter --- aggregator/aggregator.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 0ba92c25..594338a4 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -1079,7 +1079,6 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf if proof == nil { // we don't have a proof generating at the moment, check if we // have a proof ready to verify - proof, err = a.getAndLockProofReadyToVerify(ctx, lastVerifiedBatchNumber) if errors.Is(err, state.ErrNotFound) { // nothing to verify, swallow the error From f3e111218f1db97db20222a6cbfc4a1d99d25e1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 3 Sep 2024 16:10:54 +0200 Subject: [PATCH 02/17] feat: adapt the package name for version.go --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 2dbd8a0c..ba83eaf5 100644 --- a/version.go +++ b/version.go @@ -1,4 +1,4 @@ -package zkevm +package cdk import ( "fmt" From 742b0b4397a2e4238e7170ca19a0387afd748af7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 08:36:07 +0200 Subject: [PATCH 03/17] feat: move the module names to a separate file and add module name to the sequence sender --- cmd/main.go | 18 +-- cmd/run.go | 29 ++-- common/components.go | 12 ++ log/log.go | 11 +- sequencesender/sequencesender.go | 203 +++++++++++++------------- sequencesender/txbuilder/interface.go | 7 +- 6 files changed, 146 insertions(+), 134 deletions(-) create mode 100644 common/components.go diff --git a/cmd/main.go b/cmd/main.go index 050fad2b..e92f22b4 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -4,6 +4,7 @@ import ( "os" zkevm "github.com/0xPolygon/cdk" + "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" "github.com/0xPolygon/cdk/log" "github.com/urfave/cli/v2" @@ -12,19 +13,8 @@ import ( const appName = "cdk" const ( - // SEQUENCE_SENDER name to identify the sequence-sender component - SEQUENCE_SENDER = "sequence-sender" //nolint:stylecheck - // AGGREGATOR name to identify the aggregator component - AGGREGATOR = "aggregator" - // AGGORACLE name to identify the aggoracle component - AGGORACLE = "aggoracle" - // RPC name to identify the rpc component - RPC = "rpc" -) - -const ( - // NetworkConfigFile name to identify the network_custom (genesis) config-file - NetworkConfigFile = "custom_network" + // NETWORK_CONFIGFILE name to identify the netowk_custom (genesis) config-file + NETWORK_CONFIGFILE = "custom_network" ) var ( @@ -51,7 +41,7 @@ var ( Aliases: []string{"co"}, Usage: "List of components to run", Required: false, - Value: cli.NewStringSlice(SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC), + Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC), } ) diff --git a/cmd/run.go b/cmd/run.go index af5ff7a4..386ba16d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -18,6 +18,7 @@ import ( "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" + "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" "github.com/0xPolygon/cdk/dataavailability" "github.com/0xPolygon/cdk/dataavailability/datacommittee" @@ -90,13 +91,13 @@ func start(cliCtx *cli.Context) error { for _, component := range components { switch component { - case SEQUENCE_SENDER: + case common.SEQUENCE_SENDER: c.SequenceSender.Log = c.Log seqSender := createSequenceSender(*c, l1Client, l1InfoTreeSync) // start sequence sender in a goroutine, checking for errors go seqSender.Start(cliCtx.Context) - case AGGREGATOR: + case common.AGGREGATOR: aggregator := createAggregator(cliCtx.Context, *c, !cliCtx.Bool(config.FlagMigrations)) // start aggregator in a goroutine, checking for errors go func() { @@ -104,10 +105,10 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } }() - case AGGORACLE: + case common.AGGORACLE: aggOracle := createAggoracle(*c, l1Client, l2Client, l1InfoTreeSync) go aggOracle.Start(cliCtx.Context) - case RPC: + case common.RPC: server := createRPC( c.RPC, c.Common.NetworkID, @@ -470,7 +471,7 @@ func runL1InfoTreeSyncerIfNeeded( l1Client *ethclient.Client, reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { - if !isNeeded([]string{AGGORACLE, RPC, SEQUENCE_SENDER}, components) { + if !isNeeded([]string{common.AGGORACLE, common.RPC, common.SEQUENCE_SENDER}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -496,7 +497,7 @@ func runL1InfoTreeSyncerIfNeeded( } func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client { - if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC}, components) { + if !isNeeded([]string{common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC}, components) { return nil } log.Debugf("dialing L1 client at: %s", urlRPCL1) @@ -509,7 +510,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client } func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client { - if !isNeeded([]string{AGGORACLE, RPC}, components) { + if !isNeeded([]string{common.AGGORACLE, common.RPC}, components) { return nil } log.Debugf("dialing L2 client at: %s", urlRPCL2) @@ -527,7 +528,7 @@ func runReorgDetectorL1IfNeeded( l1Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC}, components) { + if !isNeeded([]string{common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC}, components) { return nil, nil } rd := newReorgDetector(cfg, l1Client) @@ -549,7 +550,7 @@ func runReorgDetectorL2IfNeeded( l2Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{AGGORACLE, RPC}, components) { + if !isNeeded([]string{common.AGGORACLE, common.RPC}, components) { return nil, nil } rd := newReorgDetector(cfg, l2Client) @@ -571,7 +572,7 @@ func runClaimSponsorIfNeeded( l2Client *ethclient.Client, cfg claimsponsor.EVMClaimSponsorConfig, ) *claimsponsor.ClaimSponsor { - if !isNeeded([]string{RPC}, components) || !cfg.Enabled { + if !isNeeded([]string{common.RPC}, components) || !cfg.Enabled { return nil } // In the future there may support different backends other than EVM, and this will require different config. @@ -610,7 +611,7 @@ func runL1Bridge2InfoIndexSyncIfNeeded( l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, l1Client *ethclient.Client, ) *l1bridge2infoindexsync.L1Bridge2InfoIndexSync { - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{common.RPC}, components) { return nil } l1Bridge2InfoIndexSync, err := l1bridge2infoindexsync.New( @@ -638,7 +639,7 @@ func runLastGERSyncIfNeeded( l2Client *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, ) *lastgersync.LastGERSync { - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{common.RPC}, components) { return nil } lastGERSync, err := lastgersync.New( @@ -669,7 +670,7 @@ func runBridgeSyncL1IfNeeded( reorgDetectorL1 *reorgdetector.ReorgDetector, l1Client *ethclient.Client, ) *bridgesync.BridgeSync { - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{common.RPC}, components) { return nil } bridgeSyncL1, err := bridgesync.NewL1( @@ -701,7 +702,7 @@ func runBridgeSyncL2IfNeeded( l2Client *ethclient.Client, ) *bridgesync.BridgeSync { // TODO: will be needed by AGGSENDER - if !isNeeded([]string{RPC}, components) { + if !isNeeded([]string{common.RPC}, components) { return nil } bridgeSyncL2, err := bridgesync.NewL2( diff --git a/common/components.go b/common/components.go new file mode 100644 index 00000000..7410400b --- /dev/null +++ b/common/components.go @@ -0,0 +1,12 @@ +package common + +const ( + // SEQUENCE_SENDER name to identify the sequence-sender component + SEQUENCE_SENDER = "sequence-sender" + // AGGREGATOR name to identify the aggregator component + AGGREGATOR = "aggregator" + // AGGORACLE name to identify the aggoracle component + AGGORACLE = "aggoracle" + // RPC name to identify the rpc component + RPC = "rpc" +) diff --git a/log/log.go b/log/log.go index eae9f543..ea6cefc0 100644 --- a/log/log.go +++ b/log/log.go @@ -36,11 +36,12 @@ func getDefaultLog() *Logger { return l } // default level: debug - zapLogger, _, err := NewLogger(Config{ - Environment: EnvironmentDevelopment, - Level: "debug", - Outputs: []string{"stderr"}, - }) + zapLogger, _, err := NewLogger( + Config{ + Environment: EnvironmentDevelopment, + Level: "debug", + Outputs: []string{"stderr"}, + }) if err != nil { panic(err) } diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 8390c818..aeda393a 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -13,6 +13,7 @@ import ( "time" "github.com/0xPolygon/cdk-rpc/rpc" + cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" @@ -29,6 +30,7 @@ import ( // SequenceSender represents a sequence sender type SequenceSender struct { cfg Config + logger *log.Logger ethTxManager *ethtxmanager.Client etherman *etherman.Client currentNonce uint64 @@ -83,9 +85,12 @@ type ethTxAdditionalData struct { // New inits sequence sender func New(cfg Config, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { + logger := log.WithFields("module", cdkcommon.SEQUENCE_SENDER) + // Create sequencesender s := SequenceSender{ cfg: cfg, + logger: logger, etherman: etherman, ethTransactions: make(map[common.Hash]*ethTxData), ethTxData: make(map[common.Hash][]byte), @@ -95,12 +100,13 @@ func New(cfg Config, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) ( seqSendingStopped: false, TxBuilder: txBuilder, } - log.Infof("Seq_sender: %s", txBuilder.String()) + + logger.Infof("TxBuilder configuration: %s", txBuilder.String()) // Restore pending sent sequences err := s.loadSentSequencesTransactions() if err != nil { - log.Fatalf("error restoring sent sequences from file", err) + s.logger.Fatalf("error restoring sent sequences from file", err) return nil, err } @@ -113,16 +119,16 @@ func New(cfg Config, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) ( s.ethTxManager, err = ethtxmanager.New(cfg.EthTxManager) if err != nil { - log.Fatalf("error creating ethtxmanager client: %v", err) + s.logger.Fatalf("error creating ethtxmanager client: %v", err) return nil, err } // Create datastream client s.streamClient, err = datastreamer.NewClient(s.cfg.StreamClient.Server, 1) if err != nil { - log.Fatalf("failed to create stream client, error: %v", err) + s.logger.Fatalf("failed to create stream client, error: %v", err) } else { - log.Infof("new stream client") + s.logger.Infof("new stream client") } // Set func to handle the streaming s.streamClient.SetProcessEntryFunc(s.handleReceivedDataStream) @@ -140,28 +146,28 @@ func (s *SequenceSender) Start(ctx context.Context) { s.nonceMutex.Lock() s.currentNonce, err = s.etherman.CurrentNonce(ctx, s.cfg.L2Coinbase) if err != nil { - log.Fatalf("failed to get current nonce from %v, error: %v", s.cfg.L2Coinbase, err) + s.logger.Fatalf("failed to get current nonce from %v, error: %v", s.cfg.L2Coinbase, err) } else { - log.Infof("current nonce for %v is %d", s.cfg.L2Coinbase, s.currentNonce) + s.logger.Infof("current nonce for %v is %d", s.cfg.L2Coinbase, s.currentNonce) } s.nonceMutex.Unlock() // Get latest virtual state batch from L1 err = s.updateLatestVirtualBatch() if err != nil { - log.Fatalf("error getting latest sequenced batch, error: %v", err) + s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } // Sync all monitored sent L1 tx err = s.syncAllEthTxResults(ctx) if err != nil { - log.Fatalf("failed to sync monitored tx results, error: %v", err) + s.logger.Fatalf("failed to sync monitored tx results, error: %v", err) } // Start datastream client err = s.streamClient.Start() if err != nil { - log.Fatalf("failed to start stream client, error: %v", err) + s.logger.Fatalf("failed to start stream client, error: %v", err) } // Set starting point of the streaming @@ -174,10 +180,10 @@ func (s *SequenceSender) Start(ctx context.Context) { marshalledBookmark, err := proto.Marshal(bookmark) if err != nil { - log.Fatalf("failed to marshal bookmark, error: %v", err) + s.logger.Fatalf("failed to marshal bookmark, error: %v", err) } - log.Infof("stream client from bookmark %v", bookmark) + s.logger.Infof("stream client from bookmark %v", bookmark) // Current batch to sequence s.wipBatch = s.latestVirtualBatch + 1 @@ -189,7 +195,7 @@ func (s *SequenceSender) Start(ctx context.Context) { // Start receiving the streaming err = s.streamClient.ExecCommandStartBookmark(marshalledBookmark) if err != nil { - log.Fatalf("failed to connect to the streaming: %v", err) + s.logger.Fatalf("failed to connect to the streaming: %v", err) } } @@ -234,7 +240,7 @@ func (s *SequenceSender) purgeSequences() { } delete(s.sequenceData, toPurge[i]) } - log.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) + s.logger.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) } s.mutexSequence.Unlock() } @@ -262,9 +268,9 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) { if data.OnMonitor { err := s.ethTxManager.Remove(ctx, hash) if err != nil { - log.Warnf("error removing monitor tx %v from ethtxmanager: %v", hash, err) + s.logger.Warnf("error removing monitor tx %v from ethtxmanager: %v", hash, err) } else { - log.Infof("removed monitor tx %v from ethtxmanager", hash) + s.logger.Infof("removed monitor tx %v from ethtxmanager", hash) } } } @@ -283,7 +289,7 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) { delete(s.ethTransactions, toPurge[i]) delete(s.ethTxData, toPurge[i]) } - log.Infof("txs purged count: %d, fromNonce: %d, toNonce: %d", len(toPurge), firstPurged, lastPurged) + s.logger.Infof("txs purged count: %d, fromNonce: %d, toNonce: %d", len(toPurge), firstPurged, lastPurged) } s.mutexEthTx.Unlock() } @@ -314,10 +320,10 @@ func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { // Save updated sequences transactions err := s.saveSentSequencesTransactions(ctx) if err != nil { - log.Errorf("error saving tx sequence, error: %v", err) + s.logger.Errorf("error saving tx sequence, error: %v", err) } - log.Infof("%d tx results synchronized (%d in pending state)", txSync, txPending) + s.logger.Infof("%d tx results synchronized (%d in pending state)", txSync, txPending) return txPending, nil } @@ -326,7 +332,7 @@ func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { // Get all results results, err := s.ethTxManager.ResultsByStatus(ctx, nil) if err != nil { - log.Warnf("error getting results for all tx: %v", err) + s.logger.Warnf("error getting results for all tx: %v", err) return err } @@ -336,7 +342,7 @@ func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { for _, result := range results { txSequence, exists := s.ethTransactions[result.ID] if !exists { - log.Infof("transaction %v missing in memory structure. Adding it", result.ID) + s.logger.Infof("transaction %v missing in memory structure. Adding it", result.ID) // No info: from/to batch and the sent timestamp s.ethTransactions[result.ID] = ðTxData{ SentL1Timestamp: time.Time{}, @@ -354,10 +360,10 @@ func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { // Save updated sequences transactions err = s.saveSentSequencesTransactions(ctx) if err != nil { - log.Errorf("error saving tx sequence, error: %v", err) + s.logger.Errorf("error saving tx sequence, error: %v", err) } - log.Infof("%d tx results synchronized", numResults) + s.logger.Infof("%d tx results synchronized", numResults) return nil } @@ -386,7 +392,7 @@ func (s *SequenceSender) copyTxData( // updateEthTxResult handles updating transaction state func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmanager.MonitoredTxResult) { if txData.Status != txResult.Status.String() { - log.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) + s.logger.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) txData.StatusTimestamp = time.Now() stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + ", " + txData.Status + ", " + txResult.Status.String() @@ -420,13 +426,13 @@ func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmana func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash common.Hash) error { txData, exists := s.ethTransactions[txHash] if !exists { - log.Errorf("transaction %v not found in memory", txHash) + s.logger.Errorf("transaction %v not found in memory", txHash) return errors.New("transaction not found in memory structure") } txResult, err := s.ethTxManager.Result(ctx, txHash) if errors.Is(err, ethtxmanager.ErrNotFound) { - log.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) + s.logger.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) txData.OnMonitor = false // Resend tx errSend := s.sendTx(ctx, true, &txHash, nil, 0, 0, nil, txData.Gas) @@ -434,7 +440,7 @@ func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash com txData.OnMonitor = false } } else if err != nil { - log.Errorf("error getting result for tx %v: %v", txHash, err) + s.logger.Errorf("error getting result for tx %v: %v", txHash, err) return err } else { s.updateEthTxResult(txData, txResult) @@ -446,14 +452,14 @@ func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash com // tryToSendSequence checks if there is a sequence and it's worth it to send to L1 func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Update latest virtual batch - log.Infof("updating virtual batch") + s.logger.Infof("updating virtual batch") err := s.updateLatestVirtualBatch() if err != nil { return } // Update state of transactions - log.Infof("updating tx results") + s.logger.Infof("updating tx results") countPending, err := s.syncEthTxResults(ctx) if err != nil { return @@ -461,22 +467,22 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Check if the sequence sending is stopped if s.seqSendingStopped { - log.Warnf("sending is stopped!") + s.logger.Warnf("sending is stopped!") return } // Check if reached the maximum number of pending transactions if countPending >= s.cfg.MaxPendingTx { - log.Infof("max number of pending txs (%d) reached. Waiting for some to be completed", countPending) + s.logger.Infof("max number of pending txs (%d) reached. Waiting for some to be completed", countPending) return } // Check if should send sequence to L1 - log.Infof("getting sequences to send") + s.logger.Infof("getting sequences to send") sequence, err := s.getSequencesToSend(ctx) if err != nil || sequence == nil || sequence.Len() == 0 { if err != nil { - log.Errorf("error getting sequences: %v", err) + s.logger.Errorf("error getting sequences: %v", err) } return } @@ -486,10 +492,9 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { lastSequence := sequence.LastBatch() lastL2BlockTimestamp := lastSequence.LastL2BLockTimestamp() - log.Debugf(sequence.String()) - log.Infof("sending sequences to L1. From batch %d to batch %d", - firstSequence.BatchNumber(), lastSequence.BatchNumber(), - ) + s.logger.Debugf(sequence.String()) + s.logger.Infof("sending sequences to L1. From batch %d to batch %d", + firstSequence.BatchNumber(), lastSequence.BatchNumber()) // Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp // of the last L2 block in the sequence @@ -498,21 +503,21 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Get header of the last L1 block lastL1BlockHeader, err := s.etherman.GetLatestBlockHeader(ctx) if err != nil { - log.Errorf("failed to get last L1 block timestamp, err: %v", err) + s.logger.Errorf("failed to get last L1 block timestamp, err: %v", err) return } elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) if !elapsed { - log.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+ + s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+ "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, lastSequence.BatchNumber(), lastL2BlockTimestamp, timeMargin, ) time.Sleep(time.Duration(waitTime) * time.Second) } else { - log.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) "+ + s.logger.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) "+ "in the sequence is greater than %d seconds", lastL1BlockHeader.Number, lastL1BlockHeader.Time, @@ -533,12 +538,12 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Wait if the time difference is less than L1BlockTimestampMargin if !elapsed { - log.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) "+ + s.logger.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) "+ "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", waitTime, currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin) time.Sleep(time.Duration(waitTime) * time.Second) } else { - log.Infof("[SeqSender]sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) "+ + s.logger.Infof("sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) "+ "in the sequence is also greater than %d seconds", currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin) break @@ -546,35 +551,35 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Send sequences to L1 - log.Debugf(sequence.String()) - log.Infof( + s.logger.Debugf(sequence.String()) + s.logger.Infof( "sending sequences to L1. From batch %d to batch %d", firstSequence.BatchNumber(), lastSequence.BatchNumber(), ) tx, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { - log.Errorf("error building sequenceBatches tx: %v", err) + s.logger.Errorf("error building sequenceBatches tx: %v", err) return } // Get latest virtual state batch from L1 err = s.updateLatestVirtualBatch() if err != nil { - log.Fatalf("error getting latest sequenced batch, error: %v", err) + s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } sequence.SetLastVirtualBatchNumber(s.latestVirtualBatch) txToEstimateGas, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { - log.Errorf("error building sequenceBatches tx to estimate gas: %v", err) + s.logger.Errorf("error building sequenceBatches tx to estimate gas: %v", err) return } gas, err := s.etherman.EstimateGas(ctx, s.cfg.SenderAddress, tx.To(), nil, txToEstimateGas.Data()) if err != nil { - log.Errorf("error estimating gas: ", err) + s.logger.Errorf("error estimating gas: ", err) return } @@ -613,7 +618,7 @@ func (s *SequenceSender) sendTx( valueToBatch = toBatch } else { if txOldHash == nil { - log.Errorf("trying to resend a tx with nil hash") + s.logger.Errorf("trying to resend a tx with nil hash") return errors.New("resend tx with nil hash monitor id") } paramTo = &s.ethTransactions[*txOldHash].To @@ -629,7 +634,7 @@ func (s *SequenceSender) sendTx( // Add sequence tx txHash, err := s.ethTxManager.AddWithGas(ctx, paramTo, paramNonce, big.NewInt(0), paramData, s.cfg.GasOffset, nil, gas) if err != nil { - log.Errorf("error adding sequence to ethtxmanager: %v", err) + s.logger.Errorf("error adding sequence to ethtxmanager: %v", err) return err } @@ -661,7 +666,7 @@ func (s *SequenceSender) sendTx( // Save sent sequences err = s.saveSentSequencesTransactions(ctx) if err != nil { - log.Errorf("error saving tx sequence sent, error: %v", err) + s.logger.Errorf("error saving tx sequence sent, error: %v", err) } return nil } @@ -700,7 +705,7 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes // If the coinbase changes, the sequence ends here if len(sequenceBatches) > 0 && batch.LastCoinbase() != prevCoinbase { - log.Infof( + s.logger.Infof( "batch with different coinbase (batch %v, sequence %v), sequence will be sent to this point", prevCoinbase, batch.LastCoinbase, ) @@ -722,7 +727,7 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes // Check if the current batch is the last before a change to a new forkid // In this case we need to close and send the sequence to L1 if (s.cfg.ForkUpgradeBatchNumber != 0) && (batchNumber == (s.cfg.ForkUpgradeBatchNumber)) { - log.Infof("sequence should be sent to L1, as we have reached the batch %d "+ + s.logger.Infof("sequence should be sent to L1, as we have reached the batch %d "+ "from which a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber, ) @@ -732,16 +737,16 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes // Reached the latest batch. Decide if it's worth to send the sequence, or wait for new batches if len(sequenceBatches) == 0 { - log.Infof("no batches to be sequenced") + s.logger.Infof("no batches to be sequenced") return nil, nil } if s.latestVirtualTime.Before(time.Now().Add(-s.cfg.LastBatchVirtualizationTimeMaxWaitPeriod.Duration)) { - log.Infof("sequence should be sent, too much time without sending anything to L1") + s.logger.Infof("sequence should be sent, too much time without sending anything to L1") return s.TxBuilder.NewSequence(ctx, sequenceBatches, s.cfg.L2Coinbase) } - log.Infof("not enough time has passed since last batch was virtualized and the sequence could be bigger") + s.logger.Infof("not enough time has passed since last batch was virtualized and the sequence could be bigger") return nil, nil } @@ -749,17 +754,17 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes func (s *SequenceSender) loadSentSequencesTransactions() error { // Check if file exists if _, err := os.Stat(s.cfg.SequencesTxFileName); os.IsNotExist(err) { - log.Infof("file not found %s: %v", s.cfg.SequencesTxFileName, err) + s.logger.Infof("file not found %s: %v", s.cfg.SequencesTxFileName, err) return nil } else if err != nil { - log.Errorf("error opening file %s: %v", s.cfg.SequencesTxFileName, err) + s.logger.Errorf("error opening file %s: %v", s.cfg.SequencesTxFileName, err) return err } // Read file data, err := os.ReadFile(s.cfg.SequencesTxFileName) if err != nil { - log.Errorf("error reading file %s: %v", s.cfg.SequencesTxFileName, err) + s.logger.Errorf("error reading file %s: %v", s.cfg.SequencesTxFileName, err) return err } @@ -768,7 +773,7 @@ func (s *SequenceSender) loadSentSequencesTransactions() error { err = json.Unmarshal(data, &s.ethTransactions) s.mutexEthTx.Unlock() if err != nil { - log.Errorf("error decoding data from %s: %v", s.cfg.SequencesTxFileName, err) + s.logger.Errorf("error decoding data from %s: %v", s.cfg.SequencesTxFileName, err) return err } @@ -786,7 +791,7 @@ func (s *SequenceSender) saveSentSequencesTransactions(ctx context.Context) erro fileName := s.cfg.SequencesTxFileName[0:strings.IndexRune(s.cfg.SequencesTxFileName, '.')] + ".tmp" s.sequencesTxFile, err = os.Create(fileName) if err != nil { - log.Errorf("error creating file %s: %v", fileName, err) + s.logger.Errorf("error creating file %s: %v", fileName, err) return err } defer s.sequencesTxFile.Close() @@ -798,14 +803,14 @@ func (s *SequenceSender) saveSentSequencesTransactions(ctx context.Context) erro err = encoder.Encode(s.ethTransactions) s.mutexEthTx.Unlock() if err != nil { - log.Errorf("error writing file %s: %v", fileName, err) + s.logger.Errorf("error writing file %s: %v", fileName, err) return err } // Rename the new file err = os.Rename(fileName, s.cfg.SequencesTxFileName) if err != nil { - log.Errorf("error renaming file %s to %s: %v", fileName, s.cfg.SequencesTxFileName, err) + s.logger.Errorf("error renaming file %s to %s: %v", fileName, s.cfg.SequencesTxFileName, err) return err } @@ -845,11 +850,11 @@ func (s *SequenceSender) handleReceivedDataStream( err := proto.Unmarshal(entry.Data, l2Block) if err != nil { - log.Errorf("error unmarshalling L2Block: %v", err) + s.logger.Errorf("error unmarshalling L2Block: %v", err) return err } - log.Infof("received L2Block entry, l2Block.Number: %d, l2Block.BatchNumber: %d, entry.Number: %d", + s.logger.Infof("received L2Block entry, l2Block.Number: %d, l2Block.BatchNumber: %d, entry.Number: %d", l2Block.Number, l2Block.BatchNumber, entry.Number, ) @@ -858,7 +863,7 @@ func (s *SequenceSender) handleReceivedDataStream( !(prevEntryType == datastream.EntryType_ENTRY_TYPE_BATCH_START || prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - log.Fatalf("unexpected L2Block entry received, entry.Number: %d, l2Block.Number: %d, "+ + s.logger.Fatalf("unexpected L2Block entry received, entry.Number: %d, l2Block.Number: %d, "+ "prevEntry: %s, prevEntry.Number: %d", entry.Number, l2Block.Number, @@ -870,11 +875,11 @@ func (s *SequenceSender) handleReceivedDataStream( err := proto.Unmarshal(s.prevStreamEntry.Data, prevL2Block) if err != nil { - log.Errorf("error unmarshalling prevL2Block: %v", err) + s.logger.Errorf("error unmarshalling prevL2Block: %v", err) return err } if l2Block.Number != prevL2Block.Number+1 { - log.Fatalf("unexpected L2Block number %d received, it should be %d, entry.Number: %d, prevEntry.Number: %d", + s.logger.Fatalf("unexpected L2Block number %d received, it should be %d, entry.Number: %d, prevEntry.Number: %d", l2Block.Number, prevL2Block.Number+1, entry.Number, s.prevStreamEntry.Number) } } @@ -882,7 +887,7 @@ func (s *SequenceSender) handleReceivedDataStream( // Already virtualized if l2Block.BatchNumber <= s.fromStreamBatch { if l2Block.BatchNumber != s.latestStreamBatch { - log.Infof("skipped! batch already virtualized, number %d", l2Block.BatchNumber) + s.logger.Infof("skipped! batch already virtualized, number %d", l2Block.BatchNumber) } } else if !s.validStream && l2Block.BatchNumber == s.fromStreamBatch+1 { // Initial case after startup @@ -916,11 +921,11 @@ func (s *SequenceSender) handleReceivedDataStream( l2Tx := &datastream.Transaction{} err := proto.Unmarshal(entry.Data, l2Tx) if err != nil { - log.Errorf("error unmarshalling Transaction: %v", err) + s.logger.Errorf("error unmarshalling Transaction: %v", err) return err } - log.Debugf( + s.logger.Debugf( "received Transaction entry, tx.L2BlockNumber: %d, tx.Index: %d, entry.Number: %d", l2Tx.L2BlockNumber, l2Tx.Index, entry.Number, ) @@ -928,7 +933,7 @@ func (s *SequenceSender) handleReceivedDataStream( // Sanity checks if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - log.Fatalf("unexpected Transaction entry received, entry.Number: %d, transaction.L2BlockNumber: %d, "+ + s.logger.Fatalf("unexpected Transaction entry received, entry.Number: %d, transaction.L2BlockNumber: %d, "+ "transaction.Index: %d, prevEntry: %s, prevEntry.Number: %d", entry.Number, l2Tx.L2BlockNumber, l2Tx.Index, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number) } @@ -936,7 +941,7 @@ func (s *SequenceSender) handleReceivedDataStream( // Sanity check: tx should be decodable _, err = state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) if err != nil { - log.Fatalf("error decoding tx during sanity check: %v", err) + s.logger.Fatalf("error decoding tx during sanity check: %v", err) } // Add tx data @@ -953,11 +958,11 @@ func (s *SequenceSender) handleReceivedDataStream( batch := &datastream.BatchStart{} err := proto.Unmarshal(entry.Data, batch) if err != nil { - log.Errorf("error unmarshalling BatchStart: %v", err) + s.logger.Errorf("error unmarshalling BatchStart: %v", err) return err } - log.Infof("received BatchStart entry, batchStart.Number: %d, entry.Number: %d", batch.Number, entry.Number) + s.logger.Infof("received BatchStart entry, batchStart.Number: %d, entry.Number: %d", batch.Number, entry.Number) // Add batch start data s.addInfoSequenceBatchStart(batch) @@ -973,16 +978,16 @@ func (s *SequenceSender) handleReceivedDataStream( batch := &datastream.BatchEnd{} err := proto.Unmarshal(entry.Data, batch) if err != nil { - log.Errorf("error unmarshalling BatchEnd: %v", err) + s.logger.Errorf("error unmarshalling BatchEnd: %v", err) return err } - log.Infof("received BatchEnd entry, batchEnd.Number: %d, entry.Number: %d", batch.Number, entry.Number) + s.logger.Infof("received BatchEnd entry, batchEnd.Number: %d, entry.Number: %d", batch.Number, entry.Number) // Sanity checks if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - log.Fatalf( + s.logger.Fatalf( "unexpected BatchEnd entry received, entry.Number: %d, batchEnd.Number: %d, "+ "prevEntry.Type: %s, prevEntry.Number: %d", entry.Number, batch.Number, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number) @@ -994,7 +999,7 @@ func (s *SequenceSender) handleReceivedDataStream( // Close current wip batch err = s.closeSequenceBatch() if err != nil { - log.Fatalf("error closing wip batch") + s.logger.Fatalf("error closing wip batch") return err } @@ -1009,7 +1014,7 @@ func (s *SequenceSender) closeSequenceBatch() error { s.mutexSequence.Lock() defer s.mutexSequence.Unlock() - log.Infof("closing batch %d", s.wipBatch) + s.logger.Infof("closing batch %d", s.wipBatch) data := s.sequenceData[s.wipBatch] if data != nil { @@ -1017,24 +1022,24 @@ func (s *SequenceSender) closeSequenceBatch() error { batchL2Data, err := state.EncodeBatchV2(data.batchRaw) if err != nil { - log.Errorf("error closing and encoding the batch %d: %v", s.wipBatch, err) + s.logger.Errorf("error closing and encoding the batch %d: %v", s.wipBatch, err) return err } data.batch.SetL2Data(batchL2Data) } else { - log.Fatalf("wipBatch %d not found in sequenceData slice", s.wipBatch) + s.logger.Fatalf("wipBatch %d not found in sequenceData slice", s.wipBatch) } // Sanity Check if s.cfg.SanityCheckRPCURL != "" { rpcNumberOfBlocks, batchL2Data, err := s.getBatchFromRPC(s.wipBatch) if err != nil { - log.Fatalf("error getting batch number from RPC while trying to perform sanity check: %v", err) + s.logger.Fatalf("error getting batch number from RPC while trying to perform sanity check: %v", err) } else { dsNumberOfBlocks := len(s.sequenceData[s.wipBatch].batchRaw.Blocks) if rpcNumberOfBlocks != dsNumberOfBlocks { - log.Fatalf( + s.logger.Fatalf( "number of blocks in batch %d (%d) does not match the number of blocks in the batch from the RPC (%d)", s.wipBatch, dsNumberOfBlocks, rpcNumberOfBlocks, ) @@ -1042,15 +1047,15 @@ func (s *SequenceSender) closeSequenceBatch() error { if data.batchType == datastream.BatchType_BATCH_TYPE_REGULAR && common.Bytes2Hex(data.batch.L2Data()) != batchL2Data { - log.Infof("datastream batchL2Data: %s", common.Bytes2Hex(data.batch.L2Data())) - log.Infof("RPC batchL2Data: %s", batchL2Data) - log.Fatalf("batchL2Data in batch %d does not match batchL2Data from the RPC (%d)", s.wipBatch) + s.logger.Infof("datastream batchL2Data: %s", common.Bytes2Hex(data.batch.L2Data())) + s.logger.Infof("RPC batchL2Data: %s", batchL2Data) + s.logger.Fatalf("batchL2Data in batch %d does not match batchL2Data from the RPC (%d)", s.wipBatch) } - log.Infof("sanity check of batch %d against RPC successful", s.wipBatch) + s.logger.Infof("sanity check of batch %d against RPC successful", s.wipBatch) } } else { - log.Warnf("config param SanityCheckRPCURL not set, sanity check with RPC can't be done") + s.logger.Warnf("config param SanityCheckRPCURL not set, sanity check with RPC can't be done") } return nil @@ -1089,7 +1094,7 @@ func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (int, string, error // addNewSequenceBatch adds a new batch to the sequence func (s *SequenceSender) addNewSequenceBatch(l2Block *datastream.L2Block) { s.mutexSequence.Lock() - log.Infof("...new batch, number %d", l2Block.BatchNumber) + s.logger.Infof("...new batch, number %d", l2Block.BatchNumber) if l2Block.BatchNumber > s.wipBatch+1 { s.logFatalf("new batch number (%d) is not consecutive to the current one (%d)", l2Block.BatchNumber, s.wipBatch) @@ -1119,7 +1124,7 @@ func (s *SequenceSender) addNewSequenceBatch(l2Block *datastream.L2Block) { // addInfoSequenceBatchStart adds info from the batch start func (s *SequenceSender) addInfoSequenceBatchStart(batch *datastream.BatchStart) { s.mutexSequence.Lock() - log.Infof( + s.logger.Infof( "batch %d (%s) Start: type %d forkId %d chainId %d", batch.Number, datastream.BatchType_name[int32(batch.Type)], batch.Type, batch.ForkId, batch.ChainId, ) @@ -1161,7 +1166,7 @@ func (s *SequenceSender) addInfoSequenceBatchEnd(batch *datastream.BatchEnd) { // addNewBatchL2Block adds a new L2 block to the work in progress batch func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { s.mutexSequence.Lock() - log.Infof(".....new L2 block, number %d (batch %d)", l2Block.Number, l2Block.BatchNumber) + s.logger.Infof(".....new L2 block, number %d (batch %d)", l2Block.Number, l2Block.BatchNumber) // Current batch data := s.sequenceData[s.wipBatch] @@ -1185,7 +1190,7 @@ func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { // Get current L2 block _, blockRaw := s.getWipL2Block() if blockRaw == nil { - log.Debugf("wip block %d not found!") + s.logger.Debugf("wip block %d not found!") return } @@ -1200,7 +1205,7 @@ func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { // addNewBlockTx adds a new Tx to the current L2 block func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) { s.mutexSequence.Lock() - log.Debugf("........new tx, length %d EGP %d SR %x..", + s.logger.Debugf("........new tx, length %d EGP %d SR %x..", len(l2Tx.Encoded), l2Tx.EffectiveGasPricePercentage, l2Tx.ImStateRoot[:8], ) @@ -1210,7 +1215,7 @@ func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) { // New Tx raw tx, err := state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) if err != nil { - log.Fatalf("error decoding tx: %v", err) + s.logger.Fatalf("error decoding tx: %v", err) return } @@ -1252,10 +1257,10 @@ func (s *SequenceSender) updateLatestVirtualBatch() error { s.latestVirtualBatch, err = s.etherman.GetLatestBatchNumber() if err != nil { - log.Errorf("error getting latest virtual batch, error: %v", err) + s.logger.Errorf("error getting latest virtual batch, error: %v", err) return errors.New("fail to get latest virtual batch") } else { - log.Infof("latest virtual batch is %d", s.latestVirtualBatch) + s.logger.Infof("latest virtual batch is %d", s.latestVirtualBatch) } return nil } @@ -1291,8 +1296,8 @@ func (s *SequenceSender) marginTimeElapsed( // logFatalf logs error, activates flag to stop sequencing, and remains in an infinite loop func (s *SequenceSender) logFatalf(template string, args ...interface{}) { s.seqSendingStopped = true - log.Errorf(template, args...) - log.Errorf("sequence sending stopped.") + s.logger.Errorf(template, args...) + s.logger.Errorf("sequence sending stopped.") for { time.Sleep(1 * time.Second) } diff --git a/sequencesender/txbuilder/interface.go b/sequencesender/txbuilder/interface.go index 1a16dbba..905aa855 100644 --- a/sequencesender/txbuilder/interface.go +++ b/sequencesender/txbuilder/interface.go @@ -2,6 +2,7 @@ package txbuilder import ( "context" + "fmt" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" @@ -10,6 +11,9 @@ import ( ) type TxBuilder interface { + // Stringer interface + fmt.Stringer + // BuildSequenceBatchesTx Builds a sequence of batches transaction BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*ethtypes.Transaction, error) // NewSequence Creates a new sequence @@ -22,9 +26,8 @@ type TxBuilder interface { ) (seqsendertypes.Sequence, error) // NewBatchFromL2Block Creates a new batch from the L2 block from a datastream NewBatchFromL2Block(l2Block *datastream.L2Block) seqsendertypes.Batch - //SetCondNewSeq Allows to override the condition to send a new sequence, returns previous one + // SetCondNewSeq Allows to override the condition to send a new sequence, returns previous one SetCondNewSeq(cond CondNewSequence) CondNewSequence - String() string } type CondNewSequence interface { From c3526e2717884dd54fc08e09367fdfae35c3b468 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 08:37:04 +0200 Subject: [PATCH 04/17] feat: add module name to the aggregator --- aggregator/aggregator.go | 420 ++++++++++++++++++--------------------- cmd/run.go | 14 +- 2 files changed, 198 insertions(+), 236 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 594338a4..0d95ff51 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -60,7 +60,8 @@ type finalProofMsg struct { type Aggregator struct { prover.UnimplementedAggregatorServiceServer - cfg Config + cfg Config + logger *log.Logger state stateInterface etherman etherman @@ -100,6 +101,7 @@ type Aggregator struct { func New( ctx context.Context, cfg Config, + logger *log.Logger, stateInterface stateInterface, etherman etherman) (*Aggregator, error) { var profitabilityChecker aggregatorTxProfitabilityChecker @@ -123,7 +125,7 @@ func New( } ethTxManager, err := ethtxmanager.New(cfg.EthTxManager) if err != nil { - log.Fatalf("error creating ethtxmanager client: %v", err) + logger.Fatalf("error creating ethtxmanager client: %v", err) } var streamClient *datastreamer.StreamClient @@ -136,14 +138,12 @@ func New( Outputs: cfg.Log.Outputs, } - log.Init(cfg.Log) - - log.Info("Creating data stream client....") + logger.Info("Creating data stream client....") streamClient, err = datastreamer.NewClientWithLogsConfig(cfg.StreamClient.Server, dataStreamType, streamLogConfig) if err != nil { - log.Fatalf("failed to create stream client, error: %v", err) + logger.Fatalf("failed to create stream client, error: %v", err) } - log.Info("Data stream client created.") + logger.Info("Data stream client created.") } // Synchonizer logs @@ -157,10 +157,10 @@ func New( // Create L1 synchronizer client cfg.Synchronizer.Etherman.L1URL = cfg.EthTxManager.Etherman.URL - log.Debugf("Creating synchronizer client with config: %+v", cfg.Synchronizer) + logger.Debugf("Creating synchronizer client with config: %+v", cfg.Synchronizer) l1Syncr, err := synchronizer.NewSynchronizer(ctx, cfg.Synchronizer) if err != nil { - log.Fatalf("failed to create synchronizer client, error: %v", err) + logger.Fatalf("failed to create synchronizer client, error: %v", err) } var ( @@ -180,6 +180,7 @@ func New( a := &Aggregator{ ctx: ctx, cfg: cfg, + logger: logger, state: stateInterface, etherman: etherman, ethTxManager: ethTxManager, @@ -227,15 +228,15 @@ func (a *Aggregator) retrieveWitness() { for !success { var err error // Get Witness - dbBatch.Witness, err = getWitness(dbBatch.Batch.BatchNumber, a.cfg.WitnessURL, a.cfg.UseFullWitness) + dbBatch.Witness, err = a.getWitness(dbBatch.Batch.BatchNumber, a.cfg.WitnessURL, a.cfg.UseFullWitness) if err != nil { if errors.Is(err, errBusy) { - log.Debugf( + a.logger.Debugf( "Witness server is busy, retrying get witness for batch %d in %v", dbBatch.Batch.BatchNumber, a.cfg.RetryTime.Duration, ) } else { - log.Errorf("Failed to get witness for batch %d, err: %v", dbBatch.Batch.BatchNumber, err) + a.logger.Errorf("Failed to get witness for batch %d, err: %v", dbBatch.Batch.BatchNumber, err) } time.Sleep(a.cfg.RetryTime.Duration) @@ -244,7 +245,7 @@ func (a *Aggregator) retrieveWitness() { err = a.state.AddBatch(a.ctx, &dbBatch, nil) if err != nil { - log.Errorf("Error adding batch: %v", err) + a.logger.Errorf("Error adding batch: %v", err) time.Sleep(a.cfg.RetryTime.Duration) continue inner @@ -257,23 +258,23 @@ func (a *Aggregator) retrieveWitness() { } func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { - log.Warnf("Reorg detected, reorgData: %+v", reorgData) + a.logger.Warnf("Reorg detected, reorgData: %+v", reorgData) // Get new latest verified batch number lastVBatchNumber, err := a.l1Syncr.GetLastestVirtualBatchNumber(a.ctx) if err != nil { - log.Errorf("Error getting last virtual batch number: %v", err) + a.logger.Errorf("Error getting last virtual batch number: %v", err) } else { err = a.state.DeleteBatchesNewerThanBatchNumber(a.ctx, lastVBatchNumber, nil) if err != nil { - log.Errorf("Error deleting batches newer than batch number %d: %v", lastVBatchNumber, err) + a.logger.Errorf("Error deleting batches newer than batch number %d: %v", lastVBatchNumber, err) } } // Halt the aggregator a.halted.Store(true) for { - log.Warnf( + a.logger.Warnf( "Halting the aggregator due to a L1 reorg. " + "Reorged data has been deleted, so it is safe to manually restart the aggregator.", ) @@ -282,7 +283,7 @@ func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { } func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBatchesData) { - log.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData) + a.logger.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData) a.streamClientMutex.Lock() defer a.streamClientMutex.Unlock() @@ -298,9 +299,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat // Stop Reading the data stream err = a.streamClient.ExecCommandStop() if err != nil { - log.Errorf("failed to stop data stream: %v.", err) + a.logger.Errorf("failed to stop data stream: %v.", err) } else { - log.Info("Data stream client stopped") + a.logger.Info("Data stream client stopped") } } @@ -309,7 +310,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { lastVerifiedBatchNumber, err = a.etherman.GetLatestVerifiedBatchNum() if err != nil { - log.Errorf("Error getting latest verified batch number: %v", err) + a.logger.Errorf("Error getting latest verified batch number: %v", err) } } @@ -325,9 +326,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { err = a.state.DeleteBatchesNewerThanBatchNumber(a.ctx, rollbackData.LastBatchNumber, nil) if err != nil { - log.Errorf("Error deleting batches newer than batch number %d: %v", rollbackData.LastBatchNumber, err) + a.logger.Errorf("Error deleting batches newer than batch number %d: %v", rollbackData.LastBatchNumber, err) } else { - log.Infof("Deleted batches newer than batch number %d", rollbackData.LastBatchNumber) + a.logger.Infof("Deleted batches newer than batch number %d", rollbackData.LastBatchNumber) } } @@ -335,9 +336,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { err = a.state.DeleteBatchesOlderThanBatchNumber(a.ctx, rollbackData.LastBatchNumber, nil) if err != nil { - log.Errorf("Error deleting batches older than batch number %d: %v", rollbackData.LastBatchNumber, err) + a.logger.Errorf("Error deleting batches older than batch number %d: %v", rollbackData.LastBatchNumber, err) } else { - log.Infof("Deleted batches older than batch number %d", rollbackData.LastBatchNumber) + a.logger.Infof("Deleted batches older than batch number %d", rollbackData.LastBatchNumber) } } @@ -345,9 +346,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { err = a.state.DeleteUngeneratedProofs(a.ctx, nil) if err != nil { - log.Errorf("Error deleting ungenerated proofs: %v", err) + a.logger.Errorf("Error deleting ungenerated proofs: %v", err) } else { - log.Info("Deleted ungenerated proofs") + a.logger.Info("Deleted ungenerated proofs") } } @@ -355,9 +356,9 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat if err == nil { err = a.state.DeleteGeneratedProofs(a.ctx, rollbackData.LastBatchNumber+1, maxDBBigIntValue, nil) if err != nil { - log.Errorf("Error deleting generated proofs: %v", err) + a.logger.Errorf("Error deleting generated proofs: %v", err) } else { - log.Infof("Deleted generated proofs for batches newer than %d", rollbackData.LastBatchNumber) + a.logger.Infof("Deleted generated proofs for batches newer than %d", rollbackData.LastBatchNumber) } } @@ -365,7 +366,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat // Reset current batch data previously read from the data stream a.resetCurrentBatchData() a.currentStreamBatch = state.Batch{} - log.Info("Current batch data reset") + a.logger.Info("Current batch data reset") var marshalledBookMark []byte // Reset the data stream reading point @@ -376,33 +377,33 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat marshalledBookMark, err = proto.Marshal(bookMark) if err != nil { - log.Error("failed to marshal bookmark: %v", err) + a.logger.Error("failed to marshal bookmark: %v", err) } else { // Restart the stream client if needed if dsClientWasRunning { a.streamClient.SetProcessEntryFunc(a.handleReceivedDataStream) err = a.streamClient.Start() if err != nil { - log.Errorf("failed to start stream client, error: %v", err) + a.logger.Errorf("failed to start stream client, error: %v", err) } else { // Resume data stream reading err = a.streamClient.ExecCommandStartBookmark(marshalledBookMark) if err != nil { - log.Errorf("failed to connect to data stream: %v", err) + a.logger.Errorf("failed to connect to data stream: %v", err) } - log.Info("Data stream client resumed") + a.logger.Info("Data stream client resumed") } } } } if err == nil { - log.Info("Handling rollback batches event finished successfully") + a.logger.Info("Handling rollback batches event finished successfully") } else { // Halt the aggregator a.halted.Store(true) for { - log.Errorf("Halting the aggregator due to an error handling rollback batches event: %v", err) + a.logger.Errorf("Halting the aggregator due to an error handling rollback batches event: %v", err) time.Sleep(10 * time.Second) //nolint:gomnd } } @@ -422,7 +423,7 @@ func (a *Aggregator) handleReceivedDataStream( batch := &datastream.BatchStart{} err := proto.Unmarshal(entry.Data, batch) if err != nil { - log.Errorf("Error unmarshalling batch: %v", err) + a.logger.Errorf("Error unmarshalling batch: %v", err) return err } @@ -435,7 +436,7 @@ func (a *Aggregator) handleReceivedDataStream( batch := &datastream.BatchEnd{} err := proto.Unmarshal(entry.Data, batch) if err != nil { - log.Errorf("Error unmarshalling batch: %v", err) + a.logger.Errorf("Error unmarshalling batch: %v", err) return err } @@ -455,18 +456,18 @@ func (a *Aggregator) handleReceivedDataStream( // Get batchl2Data from L1 virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil && !errors.Is(err, entities.ErrNotFound) { - log.Errorf("Error getting virtual batch: %v", err) + a.logger.Errorf("Error getting virtual batch: %v", err) return err } for errors.Is(err, entities.ErrNotFound) { - log.Debug("Waiting for virtual batch to be available") + a.logger.Debug("Waiting for virtual batch to be available") time.Sleep(a.cfg.RetryTime.Duration) virtualBatch, err = a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil && !errors.Is(err, entities.ErrNotFound) { - log.Errorf("Error getting virtual batch: %v", err) + a.logger.Errorf("Error getting virtual batch: %v", err) return err } @@ -477,7 +478,7 @@ func (a *Aggregator) handleReceivedDataStream( a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { batchl2Data, err = state.EncodeBatchV2(&a.currentStreamBatchRaw) if err != nil { - log.Errorf("Error encoding batch: %v", err) + a.logger.Errorf("Error encoding batch: %v", err) return err } @@ -495,30 +496,30 @@ func (a *Aggregator) handleReceivedDataStream( // Compare BatchL2Data from L1 and DataStream if common.Bytes2Hex(batchl2Data) != common.Bytes2Hex(virtualBatch.BatchL2Data) && a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { - log.Warnf("BatchL2Data from L1 and data stream are different for batch %d", a.currentStreamBatch.BatchNumber) + a.logger.Warnf("BatchL2Data from L1 and data stream are different for batch %d", a.currentStreamBatch.BatchNumber) if a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID { - log.Warnf("Batch is marked as invalid in data stream") + a.logger.Warnf("Batch is marked as invalid in data stream") } else { - log.Warnf("DataStream BatchL2Data:%v", common.Bytes2Hex(batchl2Data)) + a.logger.Warnf("DataStream BatchL2Data:%v", common.Bytes2Hex(batchl2Data)) } - log.Warnf("L1 BatchL2Data:%v", common.Bytes2Hex(virtualBatch.BatchL2Data)) + a.logger.Warnf("L1 BatchL2Data:%v", common.Bytes2Hex(virtualBatch.BatchL2Data)) } // Get L1InfoRoot sequence, err := a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil { - log.Errorf("Error getting sequence: %v", err) + a.logger.Errorf("Error getting sequence: %v", err) return err } for sequence == nil { - log.Debug("Waiting for sequence to be available") + a.logger.Debug("Waiting for sequence to be available") time.Sleep(a.cfg.RetryTime.Duration) sequence, err = a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) if err != nil { - log.Errorf("Error getting sequence: %v", err) + a.logger.Errorf("Error getting sequence: %v", err) return err } @@ -530,7 +531,7 @@ func (a *Aggregator) handleReceivedDataStream( // Calculate Acc Input Hash oldDBBatch, err := a.state.GetBatch(a.ctx, a.currentStreamBatch.BatchNumber-1, nil) if err != nil { - log.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber-1, err) + a.logger.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber-1, err) return err } @@ -539,7 +540,7 @@ func (a *Aggregator) handleReceivedDataStream( if a.currentStreamBatch.BatchNumber == 1 { l1Block, err := a.l1Syncr.GetL1BlockByNumber(a.ctx, virtualBatch.BlockNumber) if err != nil { - log.Errorf("Error getting L1 block: %v", err) + a.logger.Errorf("Error getting L1 block: %v", err) return err } @@ -568,7 +569,7 @@ func (a *Aggregator) handleReceivedDataStream( wDBBatch, err := a.state.GetBatch(a.ctx, a.currentStreamBatch.BatchNumber, nil) if err != nil { if !errors.Is(err, state.ErrNotFound) { - log.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber, err) + a.logger.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber, err) return err } @@ -581,7 +582,7 @@ func (a *Aggregator) handleReceivedDataStream( // Store batch in the DB err = a.state.AddBatch(a.ctx, &dbBatch, nil) if err != nil { - log.Errorf("Error adding batch: %v", err) + a.logger.Errorf("Error adding batch: %v", err) return err } @@ -604,7 +605,7 @@ func (a *Aggregator) handleReceivedDataStream( l2Block := &datastream.L2Block{} err := proto.Unmarshal(entry.Data, l2Block) if err != nil { - log.Errorf("Error unmarshalling L2Block: %v", err) + a.logger.Errorf("Error unmarshalling L2Block: %v", err) return err } @@ -625,14 +626,14 @@ func (a *Aggregator) handleReceivedDataStream( l2Tx := &datastream.Transaction{} err := proto.Unmarshal(entry.Data, l2Tx) if err != nil { - log.Errorf("Error unmarshalling L2Tx: %v", err) + a.logger.Errorf("Error unmarshalling L2Tx: %v", err) return err } // New Tx raw tx, err := state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) if err != nil { - log.Errorf("Error decoding tx: %v", err) + a.logger.Errorf("Error decoding tx: %v", err) return err } @@ -655,7 +656,7 @@ func (a *Aggregator) Start() error { // Initial L1 Sync blocking err := a.l1Syncr.Sync(true) if err != nil { - log.Fatalf("Failed to synchronize from L1: %v", err) + a.logger.Fatalf("Failed to synchronize from L1: %v", err) return err } @@ -664,7 +665,7 @@ func (a *Aggregator) Start() error { go func() { err := a.l1Syncr.Sync(false) if err != nil { - log.Fatalf("Failed to synchronize from L1: %v", err) + a.logger.Fatalf("Failed to synchronize from L1: %v", err) } }() @@ -672,7 +673,7 @@ func (a *Aggregator) Start() error { address := fmt.Sprintf("%s:%d", a.cfg.Host, a.cfg.Port) lis, err := net.Listen("tcp", address) if err != nil { - log.Fatalf("Failed to listen: %v", err) + a.logger.Fatalf("Failed to listen: %v", err) } a.srv = grpc.NewServer() @@ -704,8 +705,8 @@ func (a *Aggregator) Start() error { return err } - log.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) - log.Infof("Starting AccInputHash:%v", accInputHash.String()) + a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) // Store Acc Input Hash of the latest verified batch dummyDBBatch := state.DBBatch{ @@ -738,7 +739,7 @@ func (a *Aggregator) Start() error { err = a.streamClient.Start() if err != nil { - log.Fatalf("failed to start stream client, error: %v", err) + a.logger.Fatalf("failed to start stream client, error: %v", err) } bookMark := &datastream.BookMark{ @@ -748,20 +749,20 @@ func (a *Aggregator) Start() error { marshalledBookMark, err := proto.Marshal(bookMark) if err != nil { - log.Fatalf("failed to marshal bookmark: %v", err) + a.logger.Fatalf("failed to marshal bookmark: %v", err) } err = a.streamClient.ExecCommandStartBookmark(marshalledBookMark) if err != nil { - log.Fatalf("failed to connect to data stream: %v", err) + a.logger.Fatalf("failed to connect to data stream: %v", err) } // A this point everything is ready, so start serving go func() { - log.Infof("Server listening on port %d", a.cfg.Port) + a.logger.Infof("Server listening on port %d", a.cfg.Port) if err := a.srv.Serve(lis); err != nil { a.exit() - log.Fatalf("Failed to serve: %v", err) + a.logger.Fatalf("Failed to serve: %v", err) } }() } @@ -791,17 +792,17 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro return err } - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), ) - log.Info("Establishing stream connection with prover") + tmpLogger.Info("Establishing stream connection with prover") // Check if prover supports the required Fork ID if !prover.SupportsForkID(a.cfg.ForkId) { err := errors.New("prover does not support required fork ID") - log.Warn(FirstToUpper(err.Error())) + tmpLogger.Warn(FirstToUpper(err.Error())) return err } @@ -819,13 +820,13 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro if !a.halted.Load() { isIdle, err := prover.IsIdle() if err != nil { - log.Errorf("Failed to check if prover is idle: %v", err) + tmpLogger.Errorf("Failed to check if prover is idle: %v", err) time.Sleep(a.cfg.RetryTime.Duration) continue } if !isIdle { - log.Debug("Prover is not idle") + tmpLogger.Debug("Prover is not idle") time.Sleep(a.cfg.RetryTime.Duration) continue @@ -833,18 +834,18 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro _, err = a.tryBuildFinalProof(ctx, prover, nil) if err != nil { - log.Errorf("Error checking proofs to verify: %v", err) + tmpLogger.Errorf("Error checking proofs to verify: %v", err) } proofGenerated, err := a.tryAggregateProofs(ctx, prover) if err != nil { - log.Errorf("Error trying to aggregate proofs: %v", err) + tmpLogger.Errorf("Error trying to aggregate proofs: %v", err) } if !proofGenerated { proofGenerated, err = a.tryGenerateBatchProof(ctx, prover) if err != nil { - log.Errorf("Error trying to generate proof: %v", err) + tmpLogger.Errorf("Error trying to generate proof: %v", err) } } if !proofGenerated { @@ -870,14 +871,16 @@ func (a *Aggregator) sendFinalProof() { ctx := a.ctx proof := msg.recursiveProof - log.WithFields("proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) - log.Info("Verifying final proof with ethereum smart contract") + tmpLogger := a.logger.WithFields( + "proofId", proof.ProofID, + "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal)) + tmpLogger.Info("Verifying final proof with ethereum smart contract") a.startProofVerification() finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil) if err != nil { - log.Errorf("Failed to retrieve batch with number [%d]: %v", proof.BatchNumberFinal, err) + tmpLogger.Errorf("Failed to retrieve batch with number [%d]: %v", proof.BatchNumberFinal, err) a.endProofVerification() continue @@ -924,28 +927,28 @@ func (a *Aggregator) settleWithAggLayer( } signedTx, err := tx.Sign(a.sequencerPrivateKey) if err != nil { - log.Errorf("failed to sign tx: %v", err) + a.logger.Errorf("failed to sign tx: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) return false } - log.Debug("final proof: %+v", tx) - log.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) + a.logger.Debug("final proof: %+v", tx) + a.logger.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) txHash, err := a.aggLayerClient.SendTx(*signedTx) if err != nil { - log.Errorf("failed to send tx to the agglayer: %v", err) + a.logger.Errorf("failed to send tx to the agglayer: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) return false } - log.Infof("tx %s sent to agglayer, waiting to be mined", txHash.Hex()) - log.Debugf("Timeout set to %f seconds", a.cfg.AggLayerTxTimeout.Duration.Seconds()) + a.logger.Infof("tx %s sent to agglayer, waiting to be mined", txHash.Hex()) + a.logger.Debugf("Timeout set to %f seconds", a.cfg.AggLayerTxTimeout.Duration.Seconds()) waitCtx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(a.cfg.AggLayerTxTimeout.Duration)) defer cancelFunc() if err := a.aggLayerClient.WaitTxToBeMined(txHash, waitCtx); err != nil { - log.Errorf("agglayer didn't mine the tx: %v", err) + a.logger.Errorf("agglayer didn't mine the tx: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) return false @@ -965,7 +968,7 @@ func (a *Aggregator) settleDirect( proof.BatchNumber-1, proof.BatchNumberFinal, &inputs, sender, ) if err != nil { - log.Errorf("Error estimating batch verification to add to eth tx manager: %v", err) + a.logger.Errorf("Error estimating batch verification to add to eth tx manager: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) return false @@ -973,7 +976,7 @@ func (a *Aggregator) settleDirect( monitoredTxID, err := a.ethTxManager.Add(ctx, to, nil, big.NewInt(0), data, a.cfg.GasOffset, nil) if err != nil { - log.Errorf("Error Adding TX to ethTxManager: %v", err) + a.logger.Errorf("Error Adding TX to ethTxManager: %v", err) mTxLogger := ethtxmanager.CreateLogger(monitoredTxID, sender, to) mTxLogger.Errorf("Error to add batch verification tx to eth tx manager: %v", err) a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) @@ -990,23 +993,22 @@ func (a *Aggregator) settleDirect( } func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Context, proof *state.Proof) { - log := log.WithFields( + tmpLogger := a.logger.WithFields( "proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal), ) proof.GeneratingSince = nil err := a.state.UpdateGeneratedProof(ctx, proof, nil) if err != nil { - log.Errorf("Failed updating proof state (false): %v", err) + tmpLogger.Errorf("Failed updating proof state (false): %v", err) } a.endProofVerification() } // buildFinalProof builds and return the final proof for an aggregated/batch proof. func (a *Aggregator) buildFinalProof( - ctx context.Context, prover proverInterface, proof *state.Proof, -) (*prover.FinalProof, error) { - log := log.WithFields( + ctx context.Context, prover proverInterface, proof *state.Proof) (*prover.FinalProof, error) { + tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), @@ -1020,8 +1022,8 @@ func (a *Aggregator) buildFinalProof( } proof.ProofID = finalProofID - log.Infof("Final proof ID for batches [%d-%d]: %s", proof.BatchNumber, proof.BatchNumberFinal, *proof.ProofID) - log = log.WithFields("finalProofId", finalProofID) + tmpLogger.Infof("Final proof ID for batches [%d-%d]: %s", proof.BatchNumber, proof.BatchNumberFinal, *proof.ProofID) + tmpLogger = tmpLogger.WithFields("finalProofId", finalProofID) finalProof, err := prover.WaitFinalProof(ctx, *proof.ProofID) if err != nil { @@ -1037,10 +1039,9 @@ func (a *Aggregator) buildFinalProof( if err != nil { return nil, fmt.Errorf("failed to retrieve batch with number [%d]", proof.BatchNumberFinal) } - log.Warnf( + tmpLogger.Warnf( "NewLocalExitRoot and NewStateRoot look like a mock values, using values from executor instead: LER: %v, SR: %v", - finalDBBatch.Batch.LocalExitRoot.TerminalString(), finalDBBatch.Batch.StateRoot.TerminalString(), - ) + finalDBBatch.Batch.LocalExitRoot.TerminalString(), finalDBBatch.Batch.StateRoot.TerminalString()) finalProof.Public.NewStateRoot = finalDBBatch.Batch.StateRoot.Bytes() finalProof.Public.NewLocalExitRoot = finalDBBatch.Batch.LocalExitRoot.Bytes() } @@ -1056,20 +1057,18 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf proverName := prover.Name() proverID := prover.ID() - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", proverName, "proverId", proverID, "proverAddr", prover.Addr(), ) - log.Debug("tryBuildFinalProof start") + tmpLogger.Debug("tryBuildFinalProof start") - var err error if !a.canVerifyProof() { - log.Debug("Time to verify proof not reached or proof verification in progress") - + tmpLogger.Debug("Time to verify proof not reached or proof verification in progress") return false, nil } - log.Debug("Send final proof time reached") + tmpLogger.Debug("Send final proof time reached") lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() if err != nil { @@ -1082,8 +1081,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf proof, err = a.getAndLockProofReadyToVerify(ctx, lastVerifiedBatchNumber) if errors.Is(err, state.ErrNotFound) { // nothing to verify, swallow the error - log.Debug("No proof ready to verify") - + tmpLogger.Debug("No proof ready to verify") return false, nil } if err != nil { @@ -1096,7 +1094,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf proof.GeneratingSince = nil err2 := a.state.UpdateGeneratedProof(a.ctx, proof, nil) if err2 != nil { - log.Errorf("Failed to unlock proof: %v", err2) + tmpLogger.Errorf("Failed to unlock proof: %v", err2) } } }() @@ -1112,7 +1110,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf } } - log = log.WithFields( + tmpLogger = tmpLogger.WithFields( "proofId", *proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal), ) @@ -1121,8 +1119,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf finalProof, err := a.buildFinalProof(ctx, prover, proof) if err != nil { err = fmt.Errorf("failed to build final proof, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1139,8 +1136,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf case a.finalProof <- msg: } - log.Debug("tryBuildFinalProof end") - + tmpLogger.Debug("tryBuildFinalProof end") return true, nil } @@ -1150,18 +1146,15 @@ func (a *Aggregator) validateEligibleFinalProof( batchNumberToVerify := lastVerifiedBatchNum + 1 if proof.BatchNumber != batchNumberToVerify { - if proof.BatchNumber < batchNumberToVerify && proof.BatchNumberFinal >= batchNumberToVerify { + if proof.BatchNumber < batchNumberToVerify && + proof.BatchNumberFinal >= batchNumberToVerify { // We have a proof that contains some batches below the last batch verified, anyway can be eligible as final proof - log.Warnf( - "Proof %d-%d contains some batches lower than last batch verified %d. Check anyway if it is eligible", - proof.BatchNumber, proof.BatchNumberFinal, lastVerifiedBatchNum, - ) + a.logger.Warnf("Proof %d-%d contains some batches lower than last batch verified %d. Check anyway if it is eligible", + proof.BatchNumber, proof.BatchNumberFinal, lastVerifiedBatchNum) } else if proof.BatchNumberFinal < batchNumberToVerify { // We have a proof that contains batches below that the last batch verified, we need to delete this proof - log.Warnf( - "Proof %d-%d lower than next batch to verify %d. Deleting it", - proof.BatchNumber, proof.BatchNumberFinal, batchNumberToVerify, - ) + a.logger.Warnf("Proof %d-%d lower than next batch to verify %d. Deleting it", + proof.BatchNumber, proof.BatchNumberFinal, batchNumberToVerify) err := a.state.DeleteGeneratedProofs(ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) if err != nil { return false, fmt.Errorf("failed to delete discarded proof, err: %w", err) @@ -1169,11 +1162,8 @@ func (a *Aggregator) validateEligibleFinalProof( return false, nil } else { - log.Debugf( - "Proof batch number %d is not the following to last verfied batch number %d", - proof.BatchNumber, lastVerifiedBatchNum, - ) - + a.logger.Debugf("Proof batch number %d is not the following to last verfied batch number %d", + proof.BatchNumber, lastVerifiedBatchNum) return false, nil } } @@ -1183,11 +1173,8 @@ func (a *Aggregator) validateEligibleFinalProof( return false, fmt.Errorf("failed to check if proof contains complete sequences, %w", err) } if !bComplete { - log.Infof( - "Recursive proof %d-%d not eligible to be verified: not containing complete sequences", - proof.BatchNumber, proof.BatchNumberFinal, - ) - + a.logger.Infof("Recursive proof %d-%d not eligible to be verified: not containing complete sequences", + proof.BatchNumber, proof.BatchNumberFinal) return false, nil } @@ -1221,8 +1208,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. // Release proofs from generating state in a single transaction dbTx, err := a.state.BeginStateTransaction(ctx) if err != nil { - log.Warnf("Failed to begin transaction to release proof aggregation state, err: %v", err) - + a.logger.Warnf("Failed to begin transaction to release proof aggregation state, err: %v", err) return err } @@ -1236,8 +1222,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state: %w", err) - log.Error(FirstToUpper(err.Error())) - + a.logger.Error(FirstToUpper(err.Error())) return err } @@ -1253,9 +1238,8 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. } func (a *Aggregator) getAndLockProofsToAggregate( - ctx context.Context, prover proverInterface, -) (*state.Proof, *state.Proof, error) { - log := log.WithFields( + ctx context.Context, prover proverInterface) (*state.Proof, *state.Proof, error) { + tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), @@ -1272,8 +1256,7 @@ func (a *Aggregator) getAndLockProofsToAggregate( // Set proofs in generating state in a single transaction dbTx, err := a.state.BeginStateTransaction(ctx) if err != nil { - log.Errorf("Failed to begin transaction to set proof aggregation state, err: %v", err) - + tmpLogger.Errorf("Failed to begin transaction to set proof aggregation state, err: %v", err) return nil, nil, err } @@ -1288,8 +1271,7 @@ func (a *Aggregator) getAndLockProofsToAggregate( if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return nil, nil, err } @@ -1308,18 +1290,17 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf proverName := prover.Name() proverID := prover.ID() - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", proverName, "proverId", proverID, "proverAddr", prover.Addr(), ) - log.Debug("tryAggregateProofs start") + tmpLogger.Debug("tryAggregateProofs start") proof1, proof2, err0 := a.getAndLockProofsToAggregate(ctx, prover) if errors.Is(err0, state.ErrNotFound) { // nothing to aggregate, swallow the error - log.Debug("Nothing to aggregate") - + tmpLogger.Debug("Nothing to aggregate") return false, nil } if err0 != nil { @@ -1335,19 +1316,17 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf if err != nil { err2 := a.unlockProofsToAggregate(a.ctx, proof1, proof2) if err2 != nil { - log.Errorf("Failed to release aggregated proofs, err: %v", err2) + tmpLogger.Errorf("Failed to release aggregated proofs, err: %v", err2) } } - log.Debug("tryAggregateProofs end") + tmpLogger.Debug("tryAggregateProofs end") }() - log.Infof( - "Aggregating proofs: %d-%d and %d-%d", - proof1.BatchNumber, proof1.BatchNumberFinal, proof2.BatchNumber, proof2.BatchNumberFinal, - ) + tmpLogger.Infof("Aggregating proofs: %d-%d and %d-%d", + proof1.BatchNumber, proof1.BatchNumberFinal, proof2.BatchNumber, proof2.BatchNumberFinal) batches := fmt.Sprintf("%d-%d", proof1.BatchNumber, proof2.BatchNumberFinal) - log = log.WithFields("batches", batches) + tmpLogger = tmpLogger.WithFields("batches", batches) inputProver := map[string]interface{}{ "recursive_proof_1": proof1.Proof, @@ -1356,8 +1335,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf b, err := json.Marshal(inputProver) if err != nil { err = fmt.Errorf("failed to serialize input prover, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1372,25 +1350,23 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf aggrProofID, err = prover.AggregatedProof(proof1.Proof, proof2.Proof) if err != nil { err = fmt.Errorf("failed to get aggregated proof id, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } proof.ProofID = aggrProofID - log.Infof("Proof ID for aggregated proof: %v", *proof.ProofID) - log = log.WithFields("proofId", *proof.ProofID) + tmpLogger.Infof("Proof ID for aggregated proof: %v", *proof.ProofID) + tmpLogger = tmpLogger.WithFields("proofId", *proof.ProofID) recursiveProof, _, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) if err != nil { err = fmt.Errorf("failed to get aggregated proof from prover, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } - log.Info("Aggregated proof generated") + tmpLogger.Info("Aggregated proof generated") proof.Proof = recursiveProof @@ -1399,8 +1375,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf dbTx, err := a.state.BeginStateTransaction(ctx) if err != nil { err = fmt.Errorf("failed to begin transaction to update proof aggregation state, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1408,13 +1383,11 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } err = fmt.Errorf("failed to delete previously aggregated proofs, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1425,21 +1398,18 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf if err != nil { if err := dbTx.Rollback(ctx); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } err = fmt.Errorf("failed to store the recursive proof, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } err = dbTx.Commit(ctx) if err != nil { err = fmt.Errorf("failed to store the recursive proof, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } @@ -1451,7 +1421,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf finalProofBuilt, finalProofErr := a.tryBuildFinalProof(ctx, prover, proof) if finalProofErr != nil { // just log the error and continue to handle the aggregated proof - log.Errorf("Failed trying to check if recursive proof can be verified: %v", finalProofErr) + tmpLogger.Errorf("Failed trying to check if recursive proof can be verified: %v", finalProofErr) } // NOTE(pg): prover is done, use a.ctx from now on @@ -1463,8 +1433,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf err := a.state.UpdateGeneratedProof(a.ctx, proof, nil) if err != nil { err = fmt.Errorf("failed to store batch proof result, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } } @@ -1487,7 +1456,7 @@ func (a *Aggregator) getAndLockBatchToProve( proverID := prover.ID() proverName := prover.Name() - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", proverName, "proverId", proverID, "proverAddr", prover.Addr(), @@ -1510,7 +1479,7 @@ func (a *Aggregator) getAndLockBatchToProve( batchNumberToVerify++ proofExists, err = a.state.CheckProofExistsForBatch(ctx, batchNumberToVerify, nil) if err != nil { - log.Infof("Error checking proof exists for batch %d", batchNumberToVerify) + tmpLogger.Infof("Error checking proof exists for batch %d", batchNumberToVerify) return nil, nil, nil, err } @@ -1524,7 +1493,7 @@ func (a *Aggregator) getAndLockBatchToProve( // Not found, so it it not possible to verify the batch yet if sequence == nil || errors.Is(err, entities.ErrNotFound) { - log.Infof("No sequence found for batch %d", batchNumberToVerify) + tmpLogger.Infof("No sequence found for batch %d", batchNumberToVerify) return nil, nil, nil, state.ErrNotFound } @@ -1538,7 +1507,7 @@ func (a *Aggregator) getAndLockBatchToProve( dbBatch, err := a.state.GetBatch(ctx, batchNumberToVerify, nil) if err != nil { if errors.Is(err, state.ErrNotFound) { - log.Infof("Batch (%d) is not yet in DB", batchNumberToVerify) + tmpLogger.Infof("Batch (%d) is not yet in DB", batchNumberToVerify) } return nil, nil, nil, err @@ -1546,34 +1515,34 @@ func (a *Aggregator) getAndLockBatchToProve( // Check if the witness is already in the DB if len(dbBatch.Witness) == 0 { - log.Infof("Witness for batch %d is not yet in DB", batchNumberToVerify) + tmpLogger.Infof("Witness for batch %d is not yet in DB", batchNumberToVerify) return nil, nil, nil, state.ErrNotFound } err = a.state.AddSequence(ctx, stateSequence, nil) if err != nil { - log.Infof("Error storing sequence for batch %d", batchNumberToVerify) + tmpLogger.Infof("Error storing sequence for batch %d", batchNumberToVerify) return nil, nil, nil, err } // All the data required to generate a proof is ready - log.Infof("Found virtual batch %d pending to generate proof", dbBatch.Batch.BatchNumber) - log = log.WithFields("batch", dbBatch.Batch.BatchNumber) + tmpLogger.Infof("Found virtual batch %d pending to generate proof", dbBatch.Batch.BatchNumber) + tmpLogger = tmpLogger.WithFields("batch", dbBatch.Batch.BatchNumber) - log.Info("Checking profitability to aggregate batch") + tmpLogger.Info("Checking profitability to aggregate batch") // pass pol collateral as zero here, bcs in smart contract fee for aggregator is not defined yet isProfitable, err := a.profitabilityChecker.IsProfitable(ctx, big.NewInt(0)) if err != nil { - log.Errorf("Failed to check aggregator profitability, err: %v", err) + tmpLogger.Errorf("Failed to check aggregator profitability, err: %v", err) return nil, nil, nil, err } if !isProfitable { - log.Infof("Batch is not profitable, pol collateral %d", big.NewInt(0)) + tmpLogger.Infof("Batch is not profitable, pol collateral %d", big.NewInt(0)) return nil, nil, nil, err } @@ -1590,7 +1559,7 @@ func (a *Aggregator) getAndLockBatchToProve( // Avoid other prover to process the same batch err = a.state.AddGeneratedProof(ctx, proof, nil) if err != nil { - log.Errorf("Failed to add batch proof, err: %v", err) + tmpLogger.Errorf("Failed to add batch proof, err: %v", err) return nil, nil, nil, err } @@ -1599,25 +1568,24 @@ func (a *Aggregator) getAndLockBatchToProve( } func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInterface) (bool, error) { - log := log.WithFields( + tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), ) - log.Debug("tryGenerateBatchProof start") + tmpLogger.Debug("tryGenerateBatchProof start") batchToProve, witness, proof, err0 := a.getAndLockBatchToProve(ctx, prover) if errors.Is(err0, state.ErrNotFound) || errors.Is(err0, entities.ErrNotFound) { // nothing to proof, swallow the error - log.Debug("Nothing to generate proof") - + tmpLogger.Debug("Nothing to generate proof") return false, nil } if err0 != nil { return false, err0 } - log = log.WithFields("batch", batchToProve.BatchNumber) + tmpLogger = tmpLogger.WithFields("batch", batchToProve.BatchNumber) var ( genProofID *string @@ -1626,60 +1594,56 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt defer func() { if err != nil { - log.Debug("Deleting proof in progress") + tmpLogger.Debug("Deleting proof in progress") err2 := a.state.DeleteGeneratedProofs(a.ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) if err2 != nil { - log.Errorf("Failed to delete proof in progress, err: %v", err2) + tmpLogger.Errorf("Failed to delete proof in progress, err: %v", err2) } } - log.Debug("tryGenerateBatchProof end") + tmpLogger.Debug("tryGenerateBatchProof end") }() - log.Infof("Sending zki + batch to the prover, batchNumber [%d]", batchToProve.BatchNumber) + tmpLogger.Infof("Sending zki + batch to the prover, batchNumber [%d]", batchToProve.BatchNumber) inputProver, err := a.buildInputProver(ctx, batchToProve, witness) if err != nil { err = fmt.Errorf("failed to build input prover, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } - log.Infof("Sending a batch to the prover. OldAccInputHash [%#x], L1InfoRoot [%#x]", + tmpLogger.Infof("Sending a batch to the prover. OldAccInputHash [%#x], L1InfoRoot [%#x]", inputProver.PublicInputs.OldAccInputHash, inputProver.PublicInputs.L1InfoRoot) genProofID, err = prover.BatchProof(inputProver) if err != nil { err = fmt.Errorf("failed to get batch proof id, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } proof.ProofID = genProofID - log = log.WithFields("proofId", *proof.ProofID) + tmpLogger = tmpLogger.WithFields("proofId", *proof.ProofID) resGetProof, stateRoot, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) if err != nil { err = fmt.Errorf("failed to get proof from prover, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } - log.Info("Batch proof generated") + tmpLogger.Info("Batch proof generated") // Sanity Check: state root from the proof must match the one from the batch if a.cfg.BatchProofSanityCheckEnabled && (stateRoot != common.Hash{}) && (stateRoot != batchToProve.StateRoot) { for { - log.Errorf( - "State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", + tmpLogger.Errorf("State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String(), ) time.Sleep(a.cfg.RetryTime.Duration) } } else { - log.Infof("State root sanity check for batch %d passed", batchToProve.BatchNumber) + tmpLogger.Infof("State root sanity check for batch %d passed", batchToProve.BatchNumber) } proof.Proof = resGetProof @@ -1690,7 +1654,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt finalProofBuilt, finalProofErr := a.tryBuildFinalProof(ctx, prover, proof) if finalProofErr != nil { // just log the error and continue to handle the generated proof - log.Errorf("Error trying to build final proof: %v", finalProofErr) + tmpLogger.Errorf("Error trying to build final proof: %v", finalProofErr) } // NOTE(pg): prover is done, use a.ctx from now on @@ -1702,8 +1666,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt err := a.state.UpdateGeneratedProof(a.ctx, proof, nil) if err != nil { err = fmt.Errorf("failed to store batch proof result, %w", err) - log.Error(FirstToUpper(err.Error())) - + tmpLogger.Error(FirstToUpper(err.Error())) return false, err } } @@ -1754,8 +1717,7 @@ func (a *Aggregator) buildInputProver( } else { batchRawData, err = state.DecodeBatchV2(batchToVerify.BatchL2Data) if err != nil { - log.Errorf("Failed to decode batch data, err: %v", err) - + a.logger.Errorf("Failed to decode batch data, err: %v", err) return nil, err } } @@ -1784,19 +1746,17 @@ func (a *Aggregator) buildInputProver( if !contained && l2blockRaw.IndexL1InfoTree != 0 { leaves, err := a.l1Syncr.GetL1InfoTreeLeaves(ctx, []uint32{l2blockRaw.IndexL1InfoTree}) if err != nil { - log.Errorf("Error getting l1InfoTreeLeaf: %v", err) - + a.logger.Errorf("Error getting l1InfoTreeLeaf: %v", err) return nil, err } l1InfoTreeLeaf := leaves[l2blockRaw.IndexL1InfoTree] // Calculate smt proof - log.Infof("Calling tree.ComputeMerkleProof") + a.logger.Infof("Calling tree.ComputeMerkleProof") smtProof, calculatedL1InfoRoot, err := tree.ComputeMerkleProof(l2blockRaw.IndexL1InfoTree, aLeaves) if err != nil { - log.Errorf("Error computing merkle proof: %v", err) - + a.logger.Errorf("Error computing merkle proof: %v", err) return nil, err } @@ -1827,14 +1787,12 @@ func (a *Aggregator) buildInputProver( if batchToVerify.BatchNumber == 1 { virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(ctx, batchToVerify.BatchNumber) if err != nil { - log.Errorf("Error getting virtual batch: %v", err) - + a.logger.Errorf("Error getting virtual batch: %v", err) return nil, err } l1Block, err := a.l1Syncr.GetL1BlockByNumber(ctx, virtualBatch.BlockNumber) if err != nil { - log.Errorf("Error getting l1 block: %v", err) - + a.logger.Errorf("Error getting l1 block: %v", err) return nil, err } @@ -1876,17 +1834,19 @@ func (a *Aggregator) buildInputProver( return inputProver, nil } -func getWitness(batchNumber uint64, URL string, fullWitness bool) ([]byte, error) { - var witness string - var response rpc.Response - var err error +func (a *Aggregator) getWitness(batchNumber uint64, URL string, fullWitness bool) ([]byte, error) { + var ( + witness string + response rpc.Response + err error + ) witnessType := "trimmed" if fullWitness { witnessType = "full" } - log.Infof("Requesting witness for batch %d of type %s", batchNumber, witnessType) + a.logger.Infof("Requesting witness for batch %d of type %s", batchNumber, witnessType) response, err = rpc.JSONRPCCall(URL, "zkevm_getBatchWitness", batchNumber, witnessType) if err != nil { @@ -2022,12 +1982,12 @@ func (a *Aggregator) cleanupLockedProofs() { case <-time.After(a.timeCleanupLockedProofs.Duration): n, err := a.state.CleanupLockedProofs(a.ctx, a.cfg.GeneratingProofCleanupThreshold, nil) if err != nil { - log.Errorf("Failed to cleanup locked proofs: %v", err) + a.logger.Errorf("Failed to cleanup locked proofs: %v", err) } if n == 1 { - log.Warn("Found a stale proof and removed from cache") + a.logger.Warn("Found a stale proof and removed from cache") } else if n > 1 { - log.Warnf("Found %d stale proofs and removed from cache", n) + a.logger.Warnf("Found %d stale proofs and removed from cache", n) } } } diff --git a/cmd/run.go b/cmd/run.go index 386ba16d..868cfad5 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -19,6 +19,7 @@ import ( "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" + cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" "github.com/0xPolygon/cdk/dataavailability" "github.com/0xPolygon/cdk/dataavailability/datacommittee" @@ -133,9 +134,10 @@ func start(cliCtx *cli.Context) error { } func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator { + logger := log.WithFields("module", cdkcommon.AGGREGATOR) // Migrations if runMigrations { - log.Infof( + logger.Infof( "Running DB migrations host: %s:%s db:%s user:%s", c.Aggregator.DB.Host, c.Aggregator.DB.Port, c.Aggregator.DB.Name, c.Aggregator.DB.User, ) @@ -145,18 +147,18 @@ func createAggregator(ctx context.Context, c config.Config, runMigrations bool) // DB stateSQLDB, err := db.NewSQLDB(c.Aggregator.DB) if err != nil { - log.Fatal(err) + logger.Fatal(err) } etherman, err := newEtherman(c) if err != nil { - log.Fatal(err) + logger.Fatal(err) } // READ CHAIN ID FROM POE SC l2ChainID, err := etherman.GetL2ChainID() if err != nil { - log.Fatal(err) + logger.Fatal(err) } st := newState(&c, l2ChainID, stateSQLDB) @@ -169,9 +171,9 @@ func createAggregator(ctx context.Context, c config.Config, runMigrations bool) c.Aggregator.Synchronizer.Etherman.Contracts.RollupManagerAddr = c.NetworkConfig.L1Config.RollupManagerAddr c.Aggregator.Synchronizer.Etherman.Contracts.ZkEVMAddr = c.NetworkConfig.L1Config.ZkEVMAddr - aggregator, err := aggregator.New(ctx, c.Aggregator, st, etherman) + aggregator, err := aggregator.New(ctx, c.Aggregator, logger, st, etherman) if err != nil { - log.Fatal(err) + logger.Fatal(err) } return aggregator From eb313230110a17099a01ebf2c56247080982d10f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 15:02:18 +0200 Subject: [PATCH 05/17] feat: add module info to the aggoracle component --- aggoracle/chaingersender/evm.go | 9 ++++++--- aggoracle/oracle.go | 18 ++++++++--------- cmd/main.go | 4 ++-- cmd/run.go | 5 ++++- common/components.go | 2 +- log/log.go | 34 ++++++++++++++++----------------- test/helpers/aggoracle_e2e.go | 6 ++++-- 7 files changed, 43 insertions(+), 35 deletions(-) diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go index 6bd2ea76..ee02771e 100644 --- a/aggoracle/chaingersender/evm.go +++ b/aggoracle/chaingersender/evm.go @@ -40,6 +40,7 @@ type EthTxManager interface { } type EVMChainGERSender struct { + logger *log.Logger gerContract *pessimisticglobalexitroot.Pessimisticglobalexitroot gerAddr common.Address sender common.Address @@ -60,6 +61,7 @@ type EVMConfig struct { } func NewEVMChainGERSender( + logger *log.Logger, l2GlobalExitRoot, sender common.Address, l2Client EthClienter, ethTxMan EthTxManager, @@ -72,6 +74,7 @@ func NewEVMChainGERSender( } return &EVMChainGERSender{ + logger: logger, gerContract: gerContract, gerAddr: l2GlobalExitRoot, sender: sender, @@ -106,10 +109,10 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com } for { time.Sleep(c.waitPeriodMonitorTx) - log.Debugf("waiting for tx %s to be mined", id.Hex()) + c.logger.Debugf("waiting for tx %s to be mined", id.Hex()) res, err := c.ethTxMan.Result(ctx, id) if err != nil { - log.Error("error calling ethTxMan.Result: ", err) + c.logger.Error("error calling ethTxMan.Result: ", err) } switch res.Status { case ethtxmanager.MonitoredTxStatusCreated, @@ -122,7 +125,7 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com ethtxmanager.MonitoredTxStatusFinalized: return nil default: - log.Error("unexpected tx status: ", res.Status) + c.logger.Error("unexpected tx status: ", res.Status) } } } diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index 27383f43..91987189 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -23,6 +23,7 @@ type ChainSender interface { } type AggOracle struct { + logger *log.Logger ticker *time.Ticker l1Client ethereum.ChainReader l1Info L1InfoTreer @@ -31,6 +32,7 @@ type AggOracle struct { } func New( + logger *log.Logger, chainSender ChainSender, l1Client ethereum.ChainReader, l1InfoTreeSyncer L1InfoTreer, @@ -44,6 +46,7 @@ func New( } return &AggOracle{ + logger: logger, ticker: ticker, l1Client: l1Client, l1Info: l1InfoTreeSyncer, @@ -64,29 +67,26 @@ func (a *AggOracle) Start(ctx context.Context) { blockNumToFetch, gerToInject, err = a.getLastFinalisedGER(ctx, blockNumToFetch) if err != nil { if errors.Is(err, l1infotreesync.ErrBlockNotProcessed) { - log.Debugf("syncer is not ready for the block %d", blockNumToFetch) + a.logger.Debugf("syncer is not ready for the block %d", blockNumToFetch) } else if errors.Is(err, l1infotreesync.ErrNotFound) { blockNumToFetch = 0 - log.Debugf("syncer has not found any GER until block %d", blockNumToFetch) + a.logger.Debugf("syncer has not found any GER until block %d", blockNumToFetch) } else { - log.Error("error calling getLastFinalisedGER: ", err) + a.logger.Error("error calling getLastFinalisedGER: ", err) } continue } if alreadyInjected, err := a.chainSender.IsGERAlreadyInjected(gerToInject); err != nil { - log.Error("error calling isGERAlreadyInjected: ", err) - + a.logger.Error("error calling isGERAlreadyInjected: ", err) continue } else if alreadyInjected { - log.Debugf("GER %s already injected", gerToInject.Hex()) - + a.logger.Debugf("GER %s already injected", gerToInject.Hex()) continue } log.Infof("injecting new GER: %s", gerToInject.Hex()) if err := a.chainSender.UpdateGERWaitUntilMined(ctx, gerToInject); err != nil { - log.Errorf("error calling updateGERWaitUntilMined, when trying to inject GER %s: %v", gerToInject.Hex(), err) - + a.logger.Errorf("error calling updateGERWaitUntilMined, when trying to inject GER %s: %v", gerToInject.Hex(), err) continue } log.Infof("GER %s injected", gerToInject.Hex()) diff --git a/cmd/main.go b/cmd/main.go index e92f22b4..300851e7 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -13,8 +13,8 @@ import ( const appName = "cdk" const ( - // NETWORK_CONFIGFILE name to identify the netowk_custom (genesis) config-file - NETWORK_CONFIGFILE = "custom_network" + // NETWORK_CONFIGFILE name to identify the network_custom (genesis) config-file + NETWORK_CONFIGFILE = "custom_network" //nolint:stylecheck ) var ( diff --git a/cmd/run.go b/cmd/run.go index 868cfad5..223baa71 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -287,6 +287,7 @@ func createAggoracle( l2Client *ethclient.Client, syncer *l1infotreesync.L1InfoTreeSync, ) *aggoracle.AggOracle { + logger := log.WithFields("module", cdkcommon.AGGORACLE) var sender aggoracle.ChainSender switch cfg.AggOracle.TargetChainType { case aggoracle.EVMChain: @@ -301,6 +302,7 @@ func createAggoracle( } go ethTxManager.Start() sender, err = chaingersender.NewEVMChainGERSender( + logger, cfg.AggOracle.EVMSender.GlobalExitRootL2Addr, cfg.AggOracle.EVMSender.SenderAddr, l2Client, @@ -318,6 +320,7 @@ func createAggoracle( ) } aggOracle, err := aggoracle.New( + logger, sender, l1Client, syncer, @@ -325,7 +328,7 @@ func createAggoracle( cfg.AggOracle.WaitPeriodNextGER.Duration, ) if err != nil { - log.Fatal(err) + logger.Fatal(err) } return aggOracle diff --git a/common/components.go b/common/components.go index 7410400b..adb6d441 100644 --- a/common/components.go +++ b/common/components.go @@ -2,7 +2,7 @@ package common const ( // SEQUENCE_SENDER name to identify the sequence-sender component - SEQUENCE_SENDER = "sequence-sender" + SEQUENCE_SENDER = "sequence-sender" //nolint:stylecheck // AGGREGATOR name to identify the aggregator component AGGREGATOR = "aggregator" // AGGORACLE name to identify the aggoracle component diff --git a/log/log.go b/log/log.go index ea6cefc0..fc308e2c 100644 --- a/log/log.go +++ b/log/log.go @@ -30,7 +30,7 @@ type Logger struct { // root logger var log atomic.Pointer[Logger] -func getDefaultLog() *Logger { +func GetDefaultLogger() *Logger { l := log.Load() if l != nil { return l @@ -104,7 +104,7 @@ func NewLogger(cfg Config) (*zap.SugaredLogger, *zap.AtomicLevel, error) { // WithFields returns a new Logger (derived from the root one) with additional // fields as per keyValuePairs. The root Logger instance is not affected. func WithFields(keyValuePairs ...interface{}) *Logger { - l := getDefaultLog().WithFields(keyValuePairs...) + l := GetDefaultLogger().WithFields(keyValuePairs...) // since we are returning a new instance, remove one caller from the // stack, because we'll be calling the retruned Logger methods @@ -200,57 +200,57 @@ func (l *Logger) Errorf(template string, args ...interface{}) { // Debug calls log.Debug on the root Logger. func Debug(args ...interface{}) { - getDefaultLog().Debug(args...) + GetDefaultLogger().Debug(args...) } // Info calls log.Info on the root Logger. func Info(args ...interface{}) { - getDefaultLog().Info(args...) + GetDefaultLogger().Info(args...) } // Warn calls log.Warn on the root Logger. func Warn(args ...interface{}) { - getDefaultLog().Warn(args...) + GetDefaultLogger().Warn(args...) } // Error calls log.Error on the root Logger. func Error(args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Error(args...) + GetDefaultLogger().Error(args...) } // Fatal calls log.Fatal on the root Logger. func Fatal(args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Fatal(args...) + GetDefaultLogger().Fatal(args...) } // Debugf calls log.Debugf on the root Logger. func Debugf(template string, args ...interface{}) { - getDefaultLog().Debugf(template, args...) + GetDefaultLogger().Debugf(template, args...) } // Infof calls log.Infof on the root Logger. func Infof(template string, args ...interface{}) { - getDefaultLog().Infof(template, args...) + GetDefaultLogger().Infof(template, args...) } // Warnf calls log.Warnf on the root Logger. func Warnf(template string, args ...interface{}) { - getDefaultLog().Warnf(template, args...) + GetDefaultLogger().Warnf(template, args...) } // Fatalf calls log.Fatalf on the root Logger. func Fatalf(template string, args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Fatalf(template, args...) + GetDefaultLogger().Fatalf(template, args...) } // Errorf calls log.Errorf on the root logger and stores the error message into // the ErrorFile. func Errorf(template string, args ...interface{}) { args = appendStackTraceMaybeArgs(args) - getDefaultLog().Errorf(template, args...) + GetDefaultLogger().Errorf(template, args...) } // appendStackTraceMaybeKV will append the stacktrace to the KV @@ -295,27 +295,27 @@ func (l *Logger) Fatalw(msg string, kv ...interface{}) { // Debugw calls log.Debugw on the root Logger. func Debugw(msg string, kv ...interface{}) { - getDefaultLog().Debugw(msg, kv...) + GetDefaultLogger().Debugw(msg, kv...) } // Infow calls log.Infow on the root Logger. func Infow(msg string, kv ...interface{}) { - getDefaultLog().Infow(msg, kv...) + GetDefaultLogger().Infow(msg, kv...) } // Warnw calls log.Warnw on the root Logger. func Warnw(msg string, kv ...interface{}) { - getDefaultLog().Warnw(msg, kv...) + GetDefaultLogger().Warnw(msg, kv...) } // Errorw calls log.Errorw on the root Logger. func Errorw(msg string, kv ...interface{}) { msg = appendStackTraceMaybeKV(msg, kv) - getDefaultLog().Errorw(msg, kv...) + GetDefaultLogger().Errorw(msg, kv...) } // Fatalw calls log.Fatalw on the root Logger. func Fatalw(msg string, kv ...interface{}) { msg = appendStackTraceMaybeKV(msg, kv) - getDefaultLog().Fatalw(msg, kv...) + GetDefaultLogger().Fatalw(msg, kv...) } diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go index 77f5d99a..7823c597 100644 --- a/test/helpers/aggoracle_e2e.go +++ b/test/helpers/aggoracle_e2e.go @@ -16,6 +16,7 @@ import ( "github.com/0xPolygon/cdk/aggoracle/chaingersender" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -63,7 +64,7 @@ func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { ctx := context.Background() l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) - oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20) //nolint:gomnd + oracle, err := aggoracle.New(log.GetDefaultLogger(), sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20) //nolint:gomnd require.NoError(t, err) go oracle.Start(ctx) @@ -142,7 +143,8 @@ func EVMSetup(t *testing.T) ( l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2) require.NoError(t, err) ethTxManMock := NewEthTxManMock(t, l2Client, authL2) - sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:gomnd + sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), + gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:gomnd require.NoError(t, err) return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock From 1d06661ee7769a473f3548342d53dbe2ecfeecc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 16:05:45 +0200 Subject: [PATCH 06/17] fix: use require instead of log.Fatal in the test --- aggregator/prover/prover_test.go | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/aggregator/prover/prover_test.go b/aggregator/prover/prover_test.go index c98a1572..837a86d6 100644 --- a/aggregator/prover/prover_test.go +++ b/aggregator/prover/prover_test.go @@ -2,7 +2,6 @@ package prover_test import ( "fmt" - "log" "os" "testing" @@ -28,9 +27,7 @@ func TestCalculateStateRoots(t *testing.T) { // Read all files in the directory files, err := os.ReadDir(dir) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) for _, file := range files { if file.IsDir() { @@ -39,21 +36,15 @@ func TestCalculateStateRoots(t *testing.T) { // Read the file data, err := os.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name())) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) // Get the state root from the batch proof fileStateRoot, err := prover.GetStateRootFromProof(string(data)) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) // Get the expected state root expectedStateRoot, ok := expectedStateRoots[file.Name()] - if !ok { - log.Fatal("Expected state root not found") - } + require.True(t, ok, "Expected state root not found") // Compare the state roots require.Equal(t, expectedStateRoot, fileStateRoot.String(), "State roots do not match") From 0f3658d51c26e1f178f94c415f882a4a70d22299 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 16:10:30 +0200 Subject: [PATCH 07/17] feat: add module name to aggoracle logs (2nd part) --- aggoracle/oracle.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index 91987189..edbddd77 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -84,12 +84,12 @@ func (a *AggOracle) Start(ctx context.Context) { a.logger.Debugf("GER %s already injected", gerToInject.Hex()) continue } - log.Infof("injecting new GER: %s", gerToInject.Hex()) + a.logger.Infof("injecting new GER: %s", gerToInject.Hex()) if err := a.chainSender.UpdateGERWaitUntilMined(ctx, gerToInject); err != nil { a.logger.Errorf("error calling updateGERWaitUntilMined, when trying to inject GER %s: %v", gerToInject.Hex(), err) continue } - log.Infof("GER %s injected", gerToInject.Hex()) + a.logger.Infof("GER %s injected", gerToInject.Hex()) case <-ctx.Done(): return } From f66b2f9180e4e43b996a238d394e90b52bc1c43b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 16:47:54 +0200 Subject: [PATCH 08/17] feat: data committee module info --- cmd/run.go | 6 ++- .../datacommittee/datacommittee.go | 45 +++++++++---------- translator/translator_impl.go | 8 ++-- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 223baa71..34a01e85 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -338,8 +338,9 @@ func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavail if !c.Common.IsValidiumMode { return nil, nil } - translator := translator.NewTranslatorImpl() - log.Infof("Translator rules: %v", c.Common.Translator) + logger := log.WithFields("module", "da-committee") + translator := translator.NewTranslatorImpl(logger) + logger.Infof("Translator rules: %v", c.Common.Translator) translator.AddConfigRules(c.Common.Translator) // Backend specific config @@ -364,6 +365,7 @@ func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavail } daBackend, err = datacommittee.New( + logger, c.SequenceSender.EthTxManager.Etherman.URL, dacAddr, pk, diff --git a/dataavailability/datacommittee/datacommittee.go b/dataavailability/datacommittee/datacommittee.go index 2a3cca8e..bb74984c 100644 --- a/dataavailability/datacommittee/datacommittee.go +++ b/dataavailability/datacommittee/datacommittee.go @@ -42,6 +42,7 @@ type DataCommittee struct { // Backend implements the DAC integration type Backend struct { + logger *log.Logger dataCommitteeContract *polygondatacommittee.Polygondatacommittee privKey *ecdsa.PrivateKey dataCommitteeClientFactory client.Factory @@ -54,6 +55,7 @@ type Backend struct { // New creates an instance of Backend func New( + logger *log.Logger, l1RPCURL string, dataCommitteeAddr common.Address, privKey *ecdsa.PrivateKey, @@ -62,8 +64,7 @@ func New( ) (*Backend, error) { ethClient, err := ethclient.Dial(l1RPCURL) if err != nil { - log.Errorf("error connecting to %s: %+v", l1RPCURL, err) - + logger.Errorf("error connecting to %s: %+v", l1RPCURL, err) return nil, err } @@ -73,6 +74,7 @@ func New( } return &Backend{ + logger: logger, dataCommitteeContract: dataCommittee, privKey: privKey, dataCommitteeClientFactory: dataCommitteeClientFactory, @@ -120,11 +122,11 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { found := false for !found && intialMember != -1 { member := d.committeeMembers[d.selectedCommitteeMember] - log.Infof("trying to get data from %s at %s", member.Addr.Hex(), member.URL) + d.logger.Infof("trying to get data from %s at %s", member.Addr.Hex(), member.URL) c := d.dataCommitteeClientFactory.New(member.URL) data, err := c.GetOffChainData(d.ctx, hash) if err != nil { - log.Warnf( + d.logger.Warnf( "error getting data from DAC node %s at %s: %s", member.Addr.Hex(), member.URL, err, ) @@ -140,7 +142,7 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { unexpectedHash := fmt.Errorf( unexpectedHashTemplate, hash, actualTransactionsHash, ) - log.Warnf( + d.logger.Warnf( "error getting data from DAC node %s at %s: %s", member.Addr.Hex(), member.URL, unexpectedHash, ) @@ -193,11 +195,10 @@ func (d *Backend) PostSequenceElderberry(ctx context.Context, batchesData [][]by Sequence: sequence, Signature: signedSequence, } - go requestSignatureFromMember(signatureCtx, &signedSequenceElderberry, + go d.requestSignatureFromMember(signatureCtx, &signedSequenceElderberry, func(c client.Client) ([]byte, error) { return c.SignSequence(ctx, signedSequenceElderberry) }, member, ch) } - - return collectSignatures(committee, ch, cancelSignatureCollection) + return d.collectSignatures(committee, ch, cancelSignatureCollection) } // PostSequenceBanana submits a sequence to the data committee and collects the signed response from them. @@ -246,18 +247,17 @@ func (d *Backend) PostSequenceBanana(ctx context.Context, sequence etherman.Sequ Sequence: sequenceBanana, Signature: signature, } - go requestSignatureFromMember(signatureCtx, + go d.requestSignatureFromMember(signatureCtx, &signedSequenceBanana, func(c client.Client) ([]byte, error) { return c.SignSequenceBanana(ctx, signedSequenceBanana) }, member, ch) } - return collectSignatures(committee, ch, cancelSignatureCollection) + return d.collectSignatures(committee, ch, cancelSignatureCollection) } -func collectSignatures( - committee *DataCommittee, ch chan signatureMsg, cancelSignatureCollection context.CancelFunc, -) ([]byte, error) { +func (d *Backend) collectSignatures( + committee *DataCommittee, ch chan signatureMsg, cancelSignatureCollection context.CancelFunc) ([]byte, error) { // Collect signatures // Stop requesting as soon as we have N valid signatures var ( @@ -268,7 +268,7 @@ func collectSignatures( for collectedSignatures < committee.RequiredSignatures { msg := <-ch if msg.err != nil { - log.Errorf("error when trying to get signature from %s: %s", msg.addr, msg.err) + d.logger.Errorf("error when trying to get signature from %s: %s", msg.addr, msg.err) failedToCollect++ if len(committee.Members)-int(failedToCollect) < int(committee.RequiredSignatures) { cancelSignatureCollection() @@ -276,7 +276,7 @@ func collectSignatures( return nil, errors.New("too many members failed to send their signature") } } else { - log.Infof("received signature from %s", msg.addr) + d.logger.Infof("received signature from %s", msg.addr) collectedSignatures++ } msgs = append(msgs, msg) @@ -284,7 +284,7 @@ func collectSignatures( cancelSignatureCollection() - return buildSignaturesAndAddrs(msgs, committee.Members), nil + return d.buildSignaturesAndAddrs(msgs, committee.Members), nil } type funcSignType func(c client.Client) ([]byte, error) @@ -292,7 +292,7 @@ type funcSignType func(c client.Client) ([]byte, error) // funcSetSignatureType: is not possible to define a SetSignature function because // the type daTypes.SequenceBanana and daTypes.Sequence belong to different packages // So a future refactor is define a common interface for both -func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.SignedSequenceInterface, +func (d *Backend) requestSignatureFromMember(ctx context.Context, signedSequence daTypes.SignedSequenceInterface, funcSign funcSignType, member DataCommitteeMember, ch chan signatureMsg) { select { @@ -303,7 +303,7 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign // request c := client.New(member.URL) - log.Infof("sending request to sign the sequence to %s at %s", member.Addr.Hex(), member.URL) + d.logger.Infof("sending request to sign the sequence to %s at %s", member.Addr.Hex(), member.URL) //funcSign must call something like that c.SignSequenceBanana(ctx, signedSequence) signature, err := funcSign(c) @@ -340,22 +340,21 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign } } -func buildSignaturesAndAddrs(sigs signatureMsgs, members []DataCommitteeMember) []byte { +func (d *Backend) buildSignaturesAndAddrs(sigs signatureMsgs, members []DataCommitteeMember) []byte { const ( sigLen = 65 ) res := make([]byte, 0, len(sigs)*sigLen+len(members)*common.AddressLength) sort.Sort(sigs) for _, msg := range sigs { - log.Debugf("adding signature %s from %s", common.Bytes2Hex(msg.signature), msg.addr.Hex()) + d.logger.Debugf("adding signature %s from %s", common.Bytes2Hex(msg.signature), msg.addr.Hex()) res = append(res, msg.signature...) } for _, member := range members { - log.Debugf("adding addr %s", common.Bytes2Hex(member.Addr.Bytes())) + d.logger.Debugf("adding addr %s", common.Bytes2Hex(member.Addr.Bytes())) res = append(res, member.Addr.Bytes()...) } - log.Debugf("full res %s", common.Bytes2Hex(res)) - + d.logger.Debugf("full res %s", common.Bytes2Hex(res)) return res } diff --git a/translator/translator_impl.go b/translator/translator_impl.go index 33e07eef..cd7fbc42 100644 --- a/translator/translator_impl.go +++ b/translator/translator_impl.go @@ -1,6 +1,6 @@ package translator -import "github.com/0xPolygonHermez/zkevm-synchronizer-l1/log" +import "github.com/0xPolygon/cdk/log" type TranslatorFullMatchRule struct { // If null match any context @@ -32,11 +32,13 @@ func NewTranslatorFullMatchRule( } type TranslatorImpl struct { + logger *log.Logger FullMatchRules []TranslatorFullMatchRule } -func NewTranslatorImpl() *TranslatorImpl { +func NewTranslatorImpl(logger *log.Logger) *TranslatorImpl { return &TranslatorImpl{ + logger: logger, FullMatchRules: []TranslatorFullMatchRule{}, } } @@ -45,7 +47,7 @@ func (t *TranslatorImpl) Translate(contextName string, data string) string { for _, rule := range t.FullMatchRules { if rule.Match(contextName, data) { translated := rule.Translate(contextName, data) - log.Debugf("Translated (ctxName=%s) %s to %s", contextName, data, translated) + t.logger.Debugf("Translated (ctxName=%s) %s to %s", contextName, data, translated) return translated } } From eaf2d741d80b539d527aec4e205f91da73525c2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 16:48:11 +0200 Subject: [PATCH 09/17] feat: aggregator db logger --- aggregator/db/db.go | 16 ++++++---------- aggregator/db/logger.go | 6 +++--- cmd/run.go | 2 +- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/aggregator/db/db.go b/aggregator/db/db.go index 8f05e845..ecfffc11 100644 --- a/aggregator/db/db.go +++ b/aggregator/db/db.go @@ -9,25 +9,21 @@ import ( ) // NewSQLDB creates a new SQL DB -func NewSQLDB(cfg Config) (*pgxpool.Pool, error) { - config, err := pgxpool.ParseConfig(fmt.Sprintf( - "postgres://%s:%s@%s:%s/%s?pool_max_conns=%d", - cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, cfg.MaxConns, - )) +func NewSQLDB(logger *log.Logger, cfg Config) (*pgxpool.Pool, error) { + config, err := pgxpool.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s?pool_max_conns=%d", + cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, cfg.MaxConns)) if err != nil { - log.Errorf("Unable to parse DB config: %v\n", err) - + logger.Errorf("Unable to parse DB config: %v\n", err) return nil, err } if cfg.EnableLog { - config.ConnConfig.Logger = logger{} + config.ConnConfig.Logger = dbLoggerImpl{} } conn, err := pgxpool.ConnectConfig(context.Background(), config) if err != nil { - log.Errorf("Unable to connect to database: %v\n", err) - + logger.Errorf("Unable to connect to database: %v\n", err) return nil, err } diff --git a/aggregator/db/logger.go b/aggregator/db/logger.go index 3b425b13..e60a7b01 100644 --- a/aggregator/db/logger.go +++ b/aggregator/db/logger.go @@ -8,9 +8,9 @@ import ( "github.com/jackc/pgx/v4" ) -type logger struct{} +type dbLoggerImpl struct{} -func (l logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { +func (l dbLoggerImpl) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { m := fmt.Sprintf("%s %v", msg, data) switch level { @@ -21,7 +21,7 @@ func (l logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data ma case pgx.LogLevelError: log.Error(m) default: - m = fmt.Sprintf("%s %s %v", level.String(), msg, data) + m = fmt.Sprintf("[%s] %s %v", level.String(), msg, data) log.Debug(m) } } diff --git a/cmd/run.go b/cmd/run.go index 34a01e85..b6c8c43a 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -145,7 +145,7 @@ func createAggregator(ctx context.Context, c config.Config, runMigrations bool) } // DB - stateSQLDB, err := db.NewSQLDB(c.Aggregator.DB) + stateSQLDB, err := db.NewSQLDB(logger, c.Aggregator.DB) if err != nil { logger.Fatal(err) } From 15182bc55618448fa634e0b382169905f5ee9618 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 4 Sep 2024 20:22:27 +0200 Subject: [PATCH 10/17] feat: module name for claim sponsor, cleanups --- aggregator/aggregator.go | 5 ++-- aggregator/prover/prover.go | 33 +++++++++++++------------ aggregator/prover/prover_test.go | 3 ++- claimsponsor/claimsponsor.go | 42 +++++++++++++------------------- claimsponsor/e2e_test.go | 2 ++ claimsponsor/evmclaimsponsor.go | 3 +++ cmd/run.go | 39 ++++++++++++++++------------- common/components.go | 4 +++ config/config.go | 4 +-- test/helpers/aggoracle_e2e.go | 4 +-- test/helpers/ethtxmanmock_e2e.go | 3 ++- 11 files changed, 76 insertions(+), 66 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 0d95ff51..634c456c 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -787,12 +787,13 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro if ok { proverAddr = p.Addr } - prover, err := prover.New(stream, proverAddr, a.cfg.ProofStatePollingInterval) + proverLogger := log.WithFields("module", cdkcommon.PROVER) + prover, err := prover.New(proverLogger, stream, proverAddr, a.cfg.ProofStatePollingInterval) if err != nil { return err } - tmpLogger := a.logger.WithFields( + tmpLogger := proverLogger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), "proverAddr", prover.Addr(), diff --git a/aggregator/prover/prover.go b/aggregator/prover/prover.go index 285eadb2..cd6df050 100644 --- a/aggregator/prover/prover.go +++ b/aggregator/prover/prover.go @@ -34,6 +34,7 @@ var ( // Prover abstraction of the grpc prover client. type Prover struct { + logger *log.Logger name string id string address net.Addr @@ -42,10 +43,10 @@ type Prover struct { } // New returns a new Prover instance. -func New( - stream AggregatorService_ChannelServer, addr net.Addr, proofStatePollingInterval types.Duration, -) (*Prover, error) { +func New(logger *log.Logger, stream AggregatorService_ChannelServer, + addr net.Addr, proofStatePollingInterval types.Duration) (*Prover, error) { p := &Prover{ + logger: logger, stream: stream, address: addr, proofStatePollingInterval: proofStatePollingInterval, @@ -108,12 +109,11 @@ func (p *Prover) IsIdle() (bool, error) { func (p *Prover) SupportsForkID(forkID uint64) bool { status, err := p.Status() if err != nil { - log.Warnf("Error asking status for prover ID %s: %v", p.ID(), err) - + p.logger.Warnf("Error asking status for prover ID %s: %v", p.ID(), err) return false } - log.Debugf("Prover %s supports fork ID %d", p.ID(), status.ForkId) + p.logger.Debugf("Prover %s supports fork ID %d", p.ID(), status.ForkId) return status.ForkId == forkID } @@ -296,13 +296,13 @@ func (p *Prover) WaitRecursiveProof(ctx context.Context, proofID string) (string ) } - sr, err := GetStateRootFromProof(resProof.RecursiveProof) + sr, err := GetStateRootFromProof(p.logger, resProof.RecursiveProof) if err != nil && sr != (common.Hash{}) { - log.Errorf("Error getting state root from proof: %v", err) + p.logger.Errorf("Error getting state root from proof: %v", err) } if sr == (common.Hash{}) { - log.Info("Recursive proof does not contain state root. Possibly mock prover is in use.") + p.logger.Info("Recursive proof does not contain state root. Possibly mock prover is in use.") } return resProof.RecursiveProof, sr, nil @@ -396,9 +396,9 @@ func (p *Prover) call(req *AggregatorMessage) (*ProverMessage, error) { } // GetStateRootFromProof returns the state root from the proof. -func GetStateRootFromProof(proof string) (common.Hash, error) { +func GetStateRootFromProof(logger *log.Logger, proof string) (common.Hash, error) { // Log received proof - log.Debugf("Received proof to get SR from: %s", proof) + logger.Debugf("Received proof to get SR from: %s", proof) type Publics struct { Publics []string `mapstructure:"publics"` @@ -412,17 +412,18 @@ func GetStateRootFromProof(proof string) (common.Hash, error) { var publics Publics err := json.Unmarshal([]byte(proof), &publics) if err != nil { - log.Errorf("Error unmarshalling proof: %v", err) - + logger.Errorf("Error unmarshalling proof: %v", err) return common.Hash{}, err } - var v [8]uint64 - var j = 0 + var ( + v [8]uint64 + j = 0 + ) for i := stateRootStartIndex; i < stateRootFinalIndex; i++ { u64, err := strconv.ParseInt(publics.Publics[i], 10, 64) if err != nil { - log.Fatal(err) + logger.Fatal(err) } v[j] = uint64(u64) j++ diff --git a/aggregator/prover/prover_test.go b/aggregator/prover/prover_test.go index 837a86d6..737d5592 100644 --- a/aggregator/prover/prover_test.go +++ b/aggregator/prover/prover_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/log" "github.com/stretchr/testify/require" ) @@ -39,7 +40,7 @@ func TestCalculateStateRoots(t *testing.T) { require.NoError(t, err) // Get the state root from the batch proof - fileStateRoot, err := prover.GetStateRootFromProof(string(data)) + fileStateRoot, err := prover.GetStateRootFromProof(log.GetDefaultLogger(), string(data)) require.NoError(t, err) // Get the expected state root diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index bad29c86..30eb9123 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -64,6 +64,7 @@ type ClaimSender interface { } type ClaimSponsor struct { + logger *log.Logger db kv.RwDB sender ClaimSender rh *sync.RetryHandler @@ -72,6 +73,7 @@ type ClaimSponsor struct { } func newClaimSponsor( + logger *log.Logger, dbPath string, sender ClaimSender, retryAfterErrorPeriod time.Duration, @@ -100,6 +102,7 @@ func newClaimSponsor( } return &ClaimSponsor{ + logger: logger, db: db, sender: sender, rh: rh, @@ -121,8 +124,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) { tx, err2 := c.db.BeginRw(ctx) if err2 != nil { err = err2 - log.Errorf("error calling BeginRw: %v", err) - + c.logger.Errorf("error calling BeginRw: %v", err) continue } queueIndex, globalIndex, err2 := getFirstQueueIndex(tx) @@ -130,22 +132,20 @@ func (c *ClaimSponsor) Start(ctx context.Context) { err = err2 tx.Rollback() if errors.Is(err, ErrNotFound) { - log.Debugf("queue is empty") + c.logger.Debugf("queue is empty") err = nil time.Sleep(c.waitOnEmptyQueue) continue } - log.Errorf("error calling getFirstQueueIndex: %v", err) - + c.logger.Errorf("error calling getFirstQueueIndex: %v", err) continue } claim, err2 := getClaim(tx, globalIndex) if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err) - + c.logger.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err) continue } if claim.TxID == "" { @@ -153,8 +153,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err) - + c.logger.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err) continue } claim.TxID = txID @@ -163,33 +162,29 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) - + c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) continue } } err2 = tx.Commit() if err2 != nil { err = err2 - log.Errorf("error calling tx.Commit after putting claim: %v", err) - + c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) continue } - log.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, globalIndex.String()) + c.logger.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, globalIndex.String()) status, err2 := c.waitTxToBeSuccessOrFail(ctx, claim.TxID) if err2 != nil { err = err2 - log.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err) - + c.logger.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err) continue } - log.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status) + c.logger.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status) tx, err2 = c.db.BeginRw(ctx) if err2 != nil { err = err2 - log.Errorf("error calling BeginRw: %v", err) - + c.logger.Errorf("error calling BeginRw: %v", err) continue } claim.Status = status @@ -197,23 +192,20 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) - + c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) continue } err2 = tx.Delete(queueTable, dbCommon.Uint64ToBytes(queueIndex)) if err2 != nil { err = err2 tx.Rollback() - log.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err) - + c.logger.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err) continue } err2 = tx.Commit() if err2 != nil { err = err2 - log.Errorf("error calling tx.Commit after putting claim: %v", err) - + c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) continue } diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 904df8a3..8a037a58 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -12,6 +12,7 @@ import ( "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -31,6 +32,7 @@ func TestE2EL1toEVML2(t *testing.T) { // start claim sponsor dbPathClaimSponsor := t.TempDir() claimer, err := claimsponsor.NewEVMClaimSponsor( + log.GetDefaultLogger(), dbPathClaimSponsor, env.L2Client.Client(), env.BridgeL2Addr, diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go index 9f24c2ef..540f3203 100644 --- a/claimsponsor/evmclaimsponsor.go +++ b/claimsponsor/evmclaimsponsor.go @@ -8,6 +8,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" configTypes "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -79,6 +80,7 @@ type EVMClaimSponsorConfig struct { } func NewEVMClaimSponsor( + logger *log.Logger, dbPath string, l2Client EthClienter, bridge common.Address, @@ -109,6 +111,7 @@ func NewEVMClaimSponsor( ethTxManager: ethTxManager, } baseSponsor, err := newClaimSponsor( + logger, dbPath, evmSponsor, retryAfterErrorPeriod, diff --git a/cmd/run.go b/cmd/run.go index b6c8c43a..8a0e0f1f 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -18,7 +18,6 @@ import ( "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/common" cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" "github.com/0xPolygon/cdk/dataavailability" @@ -92,13 +91,13 @@ func start(cliCtx *cli.Context) error { for _, component := range components { switch component { - case common.SEQUENCE_SENDER: + case cdkcommon.SEQUENCE_SENDER: c.SequenceSender.Log = c.Log seqSender := createSequenceSender(*c, l1Client, l1InfoTreeSync) // start sequence sender in a goroutine, checking for errors go seqSender.Start(cliCtx.Context) - case common.AGGREGATOR: + case cdkcommon.AGGREGATOR: aggregator := createAggregator(cliCtx.Context, *c, !cliCtx.Bool(config.FlagMigrations)) // start aggregator in a goroutine, checking for errors go func() { @@ -106,10 +105,10 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } }() - case common.AGGORACLE: + case cdkcommon.AGGORACLE: aggOracle := createAggoracle(*c, l1Client, l2Client, l1InfoTreeSync) go aggOracle.Start(cliCtx.Context) - case common.RPC: + case cdkcommon.RPC: server := createRPC( c.RPC, c.Common.NetworkID, @@ -478,7 +477,7 @@ func runL1InfoTreeSyncerIfNeeded( l1Client *ethclient.Client, reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { - if !isNeeded([]string{common.AGGORACLE, common.RPC, common.SEQUENCE_SENDER}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.SEQUENCE_SENDER}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -504,7 +503,10 @@ func runL1InfoTreeSyncerIfNeeded( } func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client { - if !isNeeded([]string{common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC}, components) { + if !isNeeded([]string{ + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, + cdkcommon.AGGORACLE, cdkcommon.RPC, + }, components) { return nil } log.Debugf("dialing L1 client at: %s", urlRPCL1) @@ -517,7 +519,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client } func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client { - if !isNeeded([]string{common.AGGORACLE, common.RPC}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { return nil } log.Debugf("dialing L2 client at: %s", urlRPCL2) @@ -535,7 +537,7 @@ func runReorgDetectorL1IfNeeded( l1Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC}, components) { + if !isNeeded([]string{cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { return nil, nil } rd := newReorgDetector(cfg, l1Client) @@ -557,7 +559,7 @@ func runReorgDetectorL2IfNeeded( l2Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{common.AGGORACLE, common.RPC}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { return nil, nil } rd := newReorgDetector(cfg, l2Client) @@ -579,17 +581,20 @@ func runClaimSponsorIfNeeded( l2Client *ethclient.Client, cfg claimsponsor.EVMClaimSponsorConfig, ) *claimsponsor.ClaimSponsor { - if !isNeeded([]string{common.RPC}, components) || !cfg.Enabled { + if !isNeeded([]string{cdkcommon.RPC}, components) || !cfg.Enabled { return nil } + + logger := log.WithFields("module", cdkcommon.CLAIM_SPONSOR) // In the future there may support different backends other than EVM, and this will require different config. // But today only EVM is supported ethTxManagerL2, err := ethtxmanager.New(cfg.EthTxManager) if err != nil { - log.Fatal(err) + logger.Fatal(err) } go ethTxManagerL2.Start() cs, err := claimsponsor.NewEVMClaimSponsor( + logger, cfg.DBPath, l2Client, cfg.BridgeAddrL2, @@ -603,7 +608,7 @@ func runClaimSponsorIfNeeded( cfg.WaitTxToBeMinedPeriod.Duration, ) if err != nil { - log.Fatalf("error creating claim sponsor: %s", err) + logger.Fatalf("error creating claim sponsor: %s", err) } go cs.Start(ctx) @@ -618,7 +623,7 @@ func runL1Bridge2InfoIndexSyncIfNeeded( l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, l1Client *ethclient.Client, ) *l1bridge2infoindexsync.L1Bridge2InfoIndexSync { - if !isNeeded([]string{common.RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } l1Bridge2InfoIndexSync, err := l1bridge2infoindexsync.New( @@ -646,7 +651,7 @@ func runLastGERSyncIfNeeded( l2Client *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, ) *lastgersync.LastGERSync { - if !isNeeded([]string{common.RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } lastGERSync, err := lastgersync.New( @@ -677,7 +682,7 @@ func runBridgeSyncL1IfNeeded( reorgDetectorL1 *reorgdetector.ReorgDetector, l1Client *ethclient.Client, ) *bridgesync.BridgeSync { - if !isNeeded([]string{common.RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } bridgeSyncL1, err := bridgesync.NewL1( @@ -709,7 +714,7 @@ func runBridgeSyncL2IfNeeded( l2Client *ethclient.Client, ) *bridgesync.BridgeSync { // TODO: will be needed by AGGSENDER - if !isNeeded([]string{common.RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC}, components) { return nil } bridgeSyncL2, err := bridgesync.NewL2( diff --git a/common/components.go b/common/components.go index adb6d441..0c2df8d7 100644 --- a/common/components.go +++ b/common/components.go @@ -9,4 +9,8 @@ const ( AGGORACLE = "aggoracle" // RPC name to identify the rpc component RPC = "rpc" + // CLAIM_SPONSOR name to identify the claim sponsor component + CLAIM_SPONSOR = "claim-sponsor" //nolint:stylecheck + // PROVER name to identify the prover component + PROVER = "prover" ) diff --git a/config/config.go b/config/config.go index 9d00313d..cb899df8 100644 --- a/config/config.go +++ b/config/config.go @@ -157,9 +157,9 @@ func Load(ctx *cli.Context) (*Config, error) { if err != nil { var configNotFoundError viper.ConfigFileNotFoundError if errors.As(err, &configNotFoundError) { - log.Infof("config file not found") + log.Error("config file not found") } else { - log.Infof("error reading config file: ", err) + log.Errorf("error reading config file: ", err) return nil, err } diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go index 7823c597..b7bb504c 100644 --- a/test/helpers/aggoracle_e2e.go +++ b/test/helpers/aggoracle_e2e.go @@ -359,8 +359,8 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err) } if precalculatedAddr != checkGERAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error deploying bridge, unexpected Global Exit Root Manager address", + return nil, common.Address{}, nil, common.Address{}, nil, errors.New( + "error deploying bridge, unexpected GER Manager address", ) } diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go index c7d7455d..b6753c22 100644 --- a/test/helpers/ethtxmanmock_e2e.go +++ b/test/helpers/ethtxmanmock_e2e.go @@ -29,7 +29,8 @@ func NewEthTxManMock( ) ethTxMock := NewEthTxManagerMock(t) - ethTxMock.On("Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + ethTxMock.On( + "Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Run(func(args mock.Arguments) { ctx := context.Background() nonce, err := client.Client().PendingNonceAt(ctx, auth.From) From c734080f9338a54f9aed69e885b595080d79dd48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 5 Sep 2024 10:53:15 +0200 Subject: [PATCH 11/17] feat: module name for rpc component --- cmd/run.go | 6 ++++-- log/log.go | 5 +++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 8a0e0f1f..9171f3d4 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -748,7 +748,7 @@ func createRPC( bridgeL1 *bridgesync.BridgeSync, bridgeL2 *bridgesync.BridgeSync, ) *jRPC.Server { - return jRPC.NewServer(cfg, []jRPC.Service{ + services := []jRPC.Service{ { Name: rpc.BRIDGE, Service: rpc.NewBridgeEndpoints( @@ -763,5 +763,7 @@ func createRPC( bridgeL2, ), }, - }) + } + sugaredLogger := log.WithFields("module", cdkcommon.RPC).GetSugaredLogger() + return jRPC.NewServer(cfg, services, jRPC.WithLogger(sugaredLogger)) } diff --git a/log/log.go b/log/log.go index fc308e2c..63c9b346 100644 --- a/log/log.go +++ b/log/log.go @@ -122,6 +122,11 @@ func (l *Logger) WithFields(keyValuePairs ...interface{}) *Logger { } } +// GetSugaredLogger is a getter function that returns instance of already built zap.SugaredLogger. +func (l *Logger) GetSugaredLogger() *zap.SugaredLogger { + return l.x +} + func sprintStackTrace(st []tracerr.Frame) string { builder := strings.Builder{} // Skip deepest frame because it belongs to the go runtime and we don't From 8312a945ffde80479cc072aca2c1d4f5e87b5f07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 5 Sep 2024 11:42:22 +0200 Subject: [PATCH 12/17] feat: provide module name to L1InfoTree logger --- aggregator/aggregator.go | 32 ++++++++++++++++++-------------- l1infotree/tree.go | 23 +++++++++++------------ log/log.go | 4 ++++ 3 files changed, 33 insertions(+), 26 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 634c456c..f33d278f 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -32,6 +32,7 @@ import ( "github.com/0xPolygonHermez/zkevm-synchronizer-l1/state/entities" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" "github.com/ethereum/go-ethereum/common" + "go.uber.org/zap/zapcore" "google.golang.org/grpc" grpchealth "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/peer" @@ -1727,7 +1728,7 @@ func (a *Aggregator) buildInputProver( forcedBlockhashL1 := common.Hash{} l1InfoRoot := batchToVerify.L1InfoRoot.Bytes() if !isForcedBatch { - tree, err := l1infotree.NewL1InfoTree(32, [][32]byte{}) //nolint:gomnd + tree, err := l1infotree.NewL1InfoTree(a.logger, 32, [][32]byte{}) // nolint:gomnd if err != nil { return nil, err } @@ -1830,8 +1831,7 @@ func (a *Aggregator) buildInputProver( }, } - printInputProver(inputProver) - + printInputProver(a.logger, inputProver) return inputProver, nil } @@ -1877,17 +1877,21 @@ func (a *Aggregator) getWitness(batchNumber uint64, URL string, fullWitness bool return bytes, nil } -func printInputProver(inputProver *prover.StatelessInputProver) { - log.Debugf("Witness length: %v", len(inputProver.PublicInputs.Witness)) - log.Debugf("BatchL2Data length: %v", len(inputProver.PublicInputs.BatchL2Data)) - // log.Debugf("Full DataStream: %v", common.Bytes2Hex(inputProver.PublicInputs.DataStream)) - log.Debugf("OldAccInputHash: %v", common.BytesToHash(inputProver.PublicInputs.OldAccInputHash)) - log.Debugf("L1InfoRoot: %v", common.BytesToHash(inputProver.PublicInputs.L1InfoRoot)) - log.Debugf("TimestampLimit: %v", inputProver.PublicInputs.TimestampLimit) - log.Debugf("SequencerAddr: %v", inputProver.PublicInputs.SequencerAddr) - log.Debugf("AggregatorAddr: %v", inputProver.PublicInputs.AggregatorAddr) - log.Debugf("L1InfoTreeData: %+v", inputProver.PublicInputs.L1InfoTreeData) - log.Debugf("ForcedBlockhashL1: %v", common.BytesToHash(inputProver.PublicInputs.ForcedBlockhashL1)) +func printInputProver(logger *log.Logger, inputProver *prover.StatelessInputProver) { + if !logger.IsEnabledLogLevel(zapcore.DebugLevel) { + return + } + + logger.Debugf("Witness length: %v", len(inputProver.PublicInputs.Witness)) + logger.Debugf("BatchL2Data length: %v", len(inputProver.PublicInputs.BatchL2Data)) + // logger.Debugf("Full DataStream: %v", common.Bytes2Hex(inputProver.PublicInputs.DataStream)) + logger.Debugf("OldAccInputHash: %v", common.BytesToHash(inputProver.PublicInputs.OldAccInputHash)) + logger.Debugf("L1InfoRoot: %v", common.BytesToHash(inputProver.PublicInputs.L1InfoRoot)) + logger.Debugf("TimestampLimit: %v", inputProver.PublicInputs.TimestampLimit) + logger.Debugf("SequencerAddr: %v", inputProver.PublicInputs.SequencerAddr) + logger.Debugf("AggregatorAddr: %v", inputProver.PublicInputs.AggregatorAddr) + logger.Debugf("L1InfoTreeData: %+v", inputProver.PublicInputs.L1InfoTreeData) + logger.Debugf("ForcedBlockhashL1: %v", common.BytesToHash(inputProver.PublicInputs.ForcedBlockhashL1)) } // healthChecker will provide an implementation of the HealthCheck interface. diff --git a/l1infotree/tree.go b/l1infotree/tree.go index 6f6b7406..38d66381 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -9,6 +9,7 @@ import ( // L1InfoTree provides methods to compute L1InfoTree type L1InfoTree struct { + logger *log.Logger height uint8 zeroHashes [][32]byte count uint32 @@ -17,8 +18,9 @@ type L1InfoTree struct { } // NewL1InfoTree creates new L1InfoTree. -func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) { +func NewL1InfoTree(logger *log.Logger, height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) { mt := &L1InfoTree{ + logger: logger, zeroHashes: generateZeroHashes(height), height: height, count: uint32(len(initialLeaves)), @@ -26,20 +28,19 @@ func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) var err error mt.siblings, mt.currentRoot, err = mt.initSiblings(initialLeaves) if err != nil { - log.Error("error initializing siblings. Error: ", err) + mt.logger.Error("error initializing siblings. Error: ", err) return nil, err } - log.Debug("Initial count: ", mt.count) - log.Debug("Initial root: ", mt.currentRoot) - + mt.logger.Debug("Initial count: ", mt.count) + mt.logger.Debug("Initial root: ", mt.currentRoot) return mt, nil } // ResetL1InfoTree resets the L1InfoTree. func (mt *L1InfoTree) ResetL1InfoTree(initialLeaves [][32]byte) (*L1InfoTree, error) { const defaultTreeHeight = 32 - log.Info("Resetting L1InfoTree...") + mt.logger.Info("Resetting L1InfoTree...") newMT := &L1InfoTree{ zeroHashes: generateZeroHashes(defaultTreeHeight), height: defaultTreeHeight, @@ -48,13 +49,12 @@ func (mt *L1InfoTree) ResetL1InfoTree(initialLeaves [][32]byte) (*L1InfoTree, er var err error newMT.siblings, newMT.currentRoot, err = newMT.initSiblings(initialLeaves) if err != nil { - log.Error("error initializing siblings. Error: ", err) + mt.logger.Error("error initializing siblings. Error: ", err) return nil, err } - log.Debug("Reset initial count: ", newMT.count) - log.Debug("Reset initial root: ", newMT.currentRoot) - + mt.logger.Debug("Reset initial count: ", newMT.count) + mt.logger.Debug("Reset initial root: ", newMT.currentRoot) return newMT, nil } @@ -190,8 +190,7 @@ func (mt *L1InfoTree) initSiblings(initialLeaves [][32]byte) ([][32]byte, common } root, err := mt.BuildL1InfoRoot(initialLeaves) if err != nil { - log.Error("error calculating initial root: ", err) - + mt.logger.Error("error calculating initial root: ", err) return nil, [32]byte{}, err } diff --git a/log/log.go b/log/log.go index 63c9b346..1ef556db 100644 --- a/log/log.go +++ b/log/log.go @@ -324,3 +324,7 @@ func Fatalw(msg string, kv ...interface{}) { msg = appendStackTraceMaybeKV(msg, kv) GetDefaultLogger().Fatalw(msg, kv...) } + +func (l *Logger) IsEnabledLogLevel(lvl zapcore.Level) bool { + return l.x.Level().Enabled(lvl) +} From 2b808273e0101e1096c098efb2c99a037cddfb65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 5 Sep 2024 11:42:51 +0200 Subject: [PATCH 13/17] feat: expand sequence-sender module name to tx builders --- cmd/run.go | 22 ++++++++------ log/log.go | 30 +++++++++---------- sequencesender/sequencesender.go | 5 +--- sequencesender/txbuilder/banana_base.go | 3 ++ sequencesender/txbuilder/banana_base_test.go | 2 ++ sequencesender/txbuilder/banana_validium.go | 22 ++++++++------ .../txbuilder/banana_validium_test.go | 2 ++ sequencesender/txbuilder/banana_zkevm.go | 16 ++++++---- sequencesender/txbuilder/banana_zkevm_test.go | 2 ++ sequencesender/txbuilder/elderberry_base.go | 9 ++++-- .../txbuilder/elderberry_base_test.go | 5 ++-- .../txbuilder/elderberry_validium.go | 12 ++++---- .../txbuilder/elderberry_validium_test.go | 3 +- sequencesender/txbuilder/elderberry_zkevm.go | 10 +++---- .../txbuilder/elderberry_zkevm_test.go | 7 +++-- 15 files changed, 88 insertions(+), 62 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 9171f3d4..6a29cd4f 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -183,6 +183,7 @@ func createSequenceSender( l1Client *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, ) *sequencesender.SequenceSender { + logger := log.WithFields("module", cdkcommon.SEQUENCE_SENDER) ethman, err := etherman.NewClient(ethermanconfig.Config{ EthermanConfig: ethtxman.Config{ URL: cfg.SequenceSender.EthTxManager.Etherman.URL, @@ -196,27 +197,27 @@ func createSequenceSender( }, }, cfg.NetworkConfig.L1Config, cfg.Common) if err != nil { - log.Fatalf("Failed to create etherman. Err: %w, ", err) + logger.Fatalf("Failed to create etherman. Err: %w, ", err) } auth, _, err := ethman.LoadAuthFromKeyStore(cfg.SequenceSender.PrivateKey.Path, cfg.SequenceSender.PrivateKey.Password) if err != nil { - log.Fatal(err) + logger.Fatal(err) } cfg.SequenceSender.SenderAddress = auth.From blockFialityType := etherman.BlockNumberFinality(cfg.SequenceSender.BlockFinality) blockFinality, err := blockFialityType.ToBlockNum() if err != nil { - log.Fatalf("Failed to create block finality. Err: %w, ", err) + logger.Fatalf("Failed to create block finality. Err: %w, ", err) } - txBuilder, err := newTxBuilder(cfg, ethman, l1Client, l1InfoTreeSync, blockFinality) + txBuilder, err := newTxBuilder(cfg, logger, ethman, l1Client, l1InfoTreeSync, blockFinality) if err != nil { - log.Fatal(err) + logger.Fatal(err) } - seqSender, err := sequencesender.New(cfg.SequenceSender, ethman, txBuilder) + seqSender, err := sequencesender.New(cfg.SequenceSender, logger, ethman, txBuilder) if err != nil { - log.Fatal(err) + logger.Fatal(err) } return seqSender @@ -224,6 +225,7 @@ func createSequenceSender( func newTxBuilder( cfg config.Config, + logger *log.Logger, ethman *etherman.Client, l1Client *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, @@ -243,6 +245,7 @@ func newTxBuilder( case contracts.VersionBanana: if cfg.Common.IsValidiumMode { txBuilder = txbuilder.NewTxBuilderBananaValidium( + logger, ethman.Contracts.Banana.Rollup, ethman.Contracts.Banana.GlobalExitRoot, da, @@ -254,6 +257,7 @@ func newTxBuilder( ) } else { txBuilder = txbuilder.NewTxBuilderBananaZKEVM( + logger, ethman.Contracts.Banana.Rollup, ethman.Contracts.Banana.GlobalExitRoot, *auth, @@ -266,11 +270,11 @@ func newTxBuilder( case contracts.VersionElderberry: if cfg.Common.IsValidiumMode { txBuilder = txbuilder.NewTxBuilderElderberryValidium( - ethman.Contracts.Elderberry.Rollup, da, *auth, cfg.SequenceSender.MaxBatchesForL1, + logger, ethman.Contracts.Elderberry.Rollup, da, *auth, cfg.SequenceSender.MaxBatchesForL1, ) } else { txBuilder = txbuilder.NewTxBuilderElderberryZKEVM( - ethman.Contracts.Elderberry.Rollup, *auth, cfg.SequenceSender.MaxTxSizeForL1, + logger, ethman.Contracts.Elderberry.Rollup, *auth, cfg.SequenceSender.MaxTxSizeForL1, ) } default: diff --git a/log/log.go b/log/log.go index 1ef556db..201c8790 100644 --- a/log/log.go +++ b/log/log.go @@ -258,21 +258,6 @@ func Errorf(template string, args ...interface{}) { GetDefaultLogger().Errorf(template, args...) } -// appendStackTraceMaybeKV will append the stacktrace to the KV -func appendStackTraceMaybeKV(msg string, kv []interface{}) string { - for i := range kv { - if i%2 == 0 { - continue - } - if err, ok := kv[i].(error); ok { - err = tracerr.Wrap(err) - st := tracerr.StackTrace(err) - return fmt.Sprintf("%v: %v%v\n", msg, err, sprintStackTrace(st)) - } - } - return msg -} - // Debugw calls log.Debugw func (l *Logger) Debugw(msg string, kv ...interface{}) { l.x.Debugw(msg, kv...) @@ -325,6 +310,21 @@ func Fatalw(msg string, kv ...interface{}) { GetDefaultLogger().Fatalw(msg, kv...) } +// appendStackTraceMaybeKV will append the stacktrace to the KV +func appendStackTraceMaybeKV(msg string, kv []interface{}) string { + for i := range kv { + if i%2 == 0 { + continue + } + if err, ok := kv[i].(error); ok { + err = tracerr.Wrap(err) + st := tracerr.StackTrace(err) + return fmt.Sprintf("%v: %v%v\n", msg, err, sprintStackTrace(st)) + } + } + return msg +} + func (l *Logger) IsEnabledLogLevel(lvl zapcore.Level) bool { return l.x.Level().Enabled(lvl) } diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index aeda393a..8546d51d 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -13,7 +13,6 @@ import ( "time" "github.com/0xPolygon/cdk-rpc/rpc" - cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" @@ -84,9 +83,7 @@ type ethTxAdditionalData struct { } // New inits sequence sender -func New(cfg Config, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { - logger := log.WithFields("module", cdkcommon.SEQUENCE_SENDER) - +func New(cfg Config, logger *log.Logger, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { // Create sequencesender s := SequenceSender{ cfg: cfg, diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 871d02be..1d00dd6a 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -34,6 +34,7 @@ type l1Client interface { } type TxBuilderBananaBase struct { + logger *log.Logger rollupContract rollupBananaBaseContractor globalExitRootContract globalExitRootBananaContractor l1InfoTree l1InfoSyncer @@ -43,6 +44,7 @@ type TxBuilderBananaBase struct { } func NewTxBuilderBananaBase( + logger *log.Logger, rollupContract rollupBananaBaseContractor, gerContract globalExitRootBananaContractor, l1InfoTree l1InfoSyncer, @@ -51,6 +53,7 @@ func NewTxBuilderBananaBase( opts bind.TransactOpts, ) *TxBuilderBananaBase { return &TxBuilderBananaBase{ + logger: logger, rollupContract: rollupContract, globalExitRootContract: gerContract, l1InfoTree: l1InfoTree, diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index 71313191..5386e287 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" @@ -96,6 +97,7 @@ func newBananaBaseTestData(t *testing.T) *testDataBananaBase { l1Client := mocks_txbuilder.NewL1Client(t) l1InfoSyncer := mocks_txbuilder.NewL1InfoSyncer(t) sut := txbuilder.NewTxBuilderBananaBase( + log.GetDefaultLogger(), zkevmContractMock, gerContractMock, l1InfoSyncer, l1Client, big.NewInt(0), opts, diff --git a/sequencesender/txbuilder/banana_validium.go b/sequencesender/txbuilder/banana_validium.go index 882b2540..68fa6762 100644 --- a/sequencesender/txbuilder/banana_validium.go +++ b/sequencesender/txbuilder/banana_validium.go @@ -37,6 +37,7 @@ type rollupBananaValidiumContractor interface { } func NewTxBuilderBananaValidium( + logger *log.Logger, rollupContract rollupBananaValidiumContractor, gerContract globalExitRootBananaContractor, da dataavailability.SequenceSenderBanana, opts bind.TransactOpts, maxBatchesForL1 uint64, @@ -44,8 +45,11 @@ func NewTxBuilderBananaValidium( ethClient l1Client, blockFinality *big.Int, ) *TxBuilderBananaValidium { + txBuilderBase := *NewTxBuilderBananaBase(logger, rollupContract, + gerContract, l1InfoTree, ethClient, blockFinality, opts) + return &TxBuilderBananaValidium{ - TxBuilderBananaBase: *NewTxBuilderBananaBase(rollupContract, gerContract, l1InfoTree, ethClient, blockFinality, opts), + TxBuilderBananaBase: txBuilderBase, da: da, condNewSeq: NewConditionalNewSequenceNumBatches(maxBatchesForL1), rollupContract: rollupContract, @@ -74,25 +78,25 @@ func (t *TxBuilderBananaValidium) BuildSequenceBatchesTx( var err error ethseq, err := convertToSequenceBanana(sequences) if err != nil { - log.Error("error converting sequences to etherman: ", err) + t.logger.Error("error converting sequences to etherman: ", err) return nil, err } dataAvailabilityMessage, err = t.da.PostSequenceBanana(ctx, ethseq) if err != nil { - log.Error("error posting sequences to the data availability protocol: ", err) + t.logger.Error("error posting sequences to the data availability protocol: ", err) return nil, err } if dataAvailabilityMessage == nil { err := fmt.Errorf("data availability message is nil") - log.Error("error posting sequences to the data availability protocol: ", err.Error()) + t.logger.Error("error posting sequences to the data availability protocol: ", err.Error()) return nil, err } // Build sequence data tx, err := t.internalBuildSequenceBatchesTx(ethseq, dataAvailabilityMessage) if err != nil { - log.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) + t.logger.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) return nil, err } return tx, nil @@ -130,15 +134,15 @@ func (t *TxBuilderBananaValidium) sequenceBatchesValidium( } } - log.Infof("building banana sequence tx. AccInputHash: %s", sequence.AccInputHash.Hex()) + t.logger.Infof("building banana sequence tx. AccInputHash: %s", sequence.AccInputHash.Hex()) tx, err := t.rollupContract.SequenceBatchesValidium( &opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase, dataAvailabilityMessage, ) if err != nil { - log.Debugf("Batches to send: %+v", batches) - log.Debug("l2CoinBase: ", sequence.L2Coinbase) - log.Debug("Sequencer address: ", opts.From) + t.logger.Debugf("Batches to send: %+v", batches) + t.logger.Debug("l2CoinBase: ", sequence.L2Coinbase) + t.logger.Debug("Sequencer address: ", opts.From) } return tx, err diff --git a/sequencesender/txbuilder/banana_validium_test.go b/sequencesender/txbuilder/banana_validium_test.go index 75a9bf46..8f764595 100644 --- a/sequencesender/txbuilder/banana_validium_test.go +++ b/sequencesender/txbuilder/banana_validium_test.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygon/cdk/dataavailability/mocks_da" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" @@ -90,6 +91,7 @@ func newBananaValidiumTestData(t *testing.T, maxBatchesForL1 uint64) *testDataBa opts := bind.TransactOpts{} sut := txbuilder.NewTxBuilderBananaValidium( + log.GetDefaultLogger(), zkevmContractMock, gerContractMock, daMock, diff --git a/sequencesender/txbuilder/banana_zkevm.go b/sequencesender/txbuilder/banana_zkevm.go index 53856cd0..42668323 100644 --- a/sequencesender/txbuilder/banana_zkevm.go +++ b/sequencesender/txbuilder/banana_zkevm.go @@ -36,6 +36,7 @@ type globalExitRootBananaZKEVMContractor interface { } func NewTxBuilderBananaZKEVM( + logger *log.Logger, rollupContract rollupBananaZKEVMContractor, gerContract globalExitRootBananaZKEVMContractor, opts bind.TransactOpts, @@ -44,8 +45,11 @@ func NewTxBuilderBananaZKEVM( ethClient l1Client, blockFinality *big.Int, ) *TxBuilderBananaZKEVM { + txBuilderBase := *NewTxBuilderBananaBase(logger, rollupContract, + gerContract, l1InfoTree, ethClient, blockFinality, opts) + return &TxBuilderBananaZKEVM{ - TxBuilderBananaBase: *NewTxBuilderBananaBase(rollupContract, gerContract, l1InfoTree, ethClient, blockFinality, opts), + TxBuilderBananaBase: txBuilderBase, condNewSeq: NewConditionalNewSequenceMaxSize(maxTxSizeForL1), rollupContract: rollupContract, } @@ -70,7 +74,7 @@ func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx( var err error ethseq, err := convertToSequenceBanana(sequences) if err != nil { - log.Error("error converting sequences to etherman: ", err) + t.logger.Error("error converting sequences to etherman: ", err) return nil, err } newopts := t.opts @@ -83,7 +87,7 @@ func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx( // Build sequence data tx, err := t.sequenceBatchesRollup(newopts, ethseq) if err != nil { - log.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) + t.logger.Errorf("error estimating new sequenceBatches to add to ethtxmanager: ", err) return nil, err } return tx, nil @@ -111,9 +115,9 @@ func (t *TxBuilderBananaZKEVM) sequenceBatchesRollup( &opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase, ) if err != nil { - log.Debugf("Batches to send: %+v", batches) - log.Debug("l2CoinBase: ", sequence.L2Coinbase) - log.Debug("Sequencer address: ", opts.From) + t.logger.Debugf("Batches to send: %+v", batches) + t.logger.Debug("l2CoinBase: ", sequence.L2Coinbase) + t.logger.Debug("Sequencer address: ", opts.From) } return tx, err diff --git a/sequencesender/txbuilder/banana_zkevm_test.go b/sequencesender/txbuilder/banana_zkevm_test.go index 9252f91d..a4ff4bd7 100644 --- a/sequencesender/txbuilder/banana_zkevm_test.go +++ b/sequencesender/txbuilder/banana_zkevm_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" @@ -90,6 +91,7 @@ func newBananaZKEVMTestData(t *testing.T, maxTxSizeForL1 uint64) *testDataBanana l1Client := mocks_txbuilder.NewL1Client(t) l1InfoSyncer := mocks_txbuilder.NewL1InfoSyncer(t) sut := txbuilder.NewTxBuilderBananaZKEVM( + log.GetDefaultLogger(), zkevmContractMock, gerContractMock, opts, diff --git a/sequencesender/txbuilder/elderberry_base.go b/sequencesender/txbuilder/elderberry_base.go index 9022eae3..8e61e174 100644 --- a/sequencesender/txbuilder/elderberry_base.go +++ b/sequencesender/txbuilder/elderberry_base.go @@ -4,6 +4,7 @@ import ( "context" "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -11,12 +12,14 @@ import ( ) type TxBuilderElderberryBase struct { - opts bind.TransactOpts + logger *log.Logger + opts bind.TransactOpts } -func NewTxBuilderElderberryBase(opts bind.TransactOpts) *TxBuilderElderberryBase { +func NewTxBuilderElderberryBase(logger *log.Logger, opts bind.TransactOpts) *TxBuilderElderberryBase { return &TxBuilderElderberryBase{ - opts: opts, + logger: logger, + opts: opts, } } diff --git a/sequencesender/txbuilder/elderberry_base_test.go b/sequencesender/txbuilder/elderberry_base_test.go index 9483e2a8..806a47f8 100644 --- a/sequencesender/txbuilder/elderberry_base_test.go +++ b/sequencesender/txbuilder/elderberry_base_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -13,7 +14,7 @@ import ( func TestElderberryBaseNewSequence(t *testing.T) { opts := bind.TransactOpts{} - sut := NewTxBuilderElderberryBase(opts) + sut := NewTxBuilderElderberryBase(log.GetDefaultLogger(), opts) require.NotNil(t, sut) seq, err := sut.NewSequence(context.TODO(), nil, common.Address{}) require.NotNil(t, seq) @@ -94,7 +95,7 @@ func newElderberryBaseSUT(t *testing.T) *TxBuilderElderberryBase { t.Helper() opts := bind.TransactOpts{} - sut := NewTxBuilderElderberryBase(opts) + sut := NewTxBuilderElderberryBase(log.GetDefaultLogger(), opts) require.NotNil(t, sut) return sut } diff --git a/sequencesender/txbuilder/elderberry_validium.go b/sequencesender/txbuilder/elderberry_validium.go index 23e1ba08..62973b02 100644 --- a/sequencesender/txbuilder/elderberry_validium.go +++ b/sequencesender/txbuilder/elderberry_validium.go @@ -36,12 +36,14 @@ type rollupElderberryValidiumContractor interface { ) (*types.Transaction, error) } -func NewTxBuilderElderberryValidium(zkevm contracts.RollupElderberryType, +func NewTxBuilderElderberryValidium( + logger *log.Logger, + zkevm contracts.RollupElderberryType, da dataavailability.SequenceSenderElderberry, opts bind.TransactOpts, maxBatchesForL1 uint64) *TxBuilderElderberryValidium { return &TxBuilderElderberryValidium{ da: da, - TxBuilderElderberryBase: *NewTxBuilderElderberryBase(opts), + TxBuilderElderberryBase: *NewTxBuilderElderberryBase(logger, opts), condNewSeq: NewConditionalNewSequenceNumBatches(maxBatchesForL1), rollupContract: zkevm, } @@ -68,12 +70,12 @@ func (t *TxBuilderElderberryValidium) BuildSequenceBatchesTx( batchesData := convertToBatchesData(sequences) dataAvailabilityMessage, err := t.da.PostSequenceElderberry(ctx, batchesData) if err != nil { - log.Error("error posting sequences to the data availability protocol: ", err) + t.logger.Error("error posting sequences to the data availability protocol: ", err) return nil, err } if dataAvailabilityMessage == nil { err := fmt.Errorf("data availability message is nil") - log.Error("error posting sequences to the data availability protocol: ", err.Error()) + t.logger.Error("error posting sequences to the data availability protocol: ", err.Error()) return nil, err } newopts := t.opts @@ -103,7 +105,7 @@ func (t *TxBuilderElderberryValidium) buildSequenceBatchesTxValidium(opts *bind. } } lastSequencedBatchNumber := getLastSequencedBatchNumber(sequences) - log.Infof("SequenceBatchesValidium(from=%s, len(batches)=%d, MaxSequenceTimestamp=%d, "+ + t.logger.Infof("SequenceBatchesValidium(from=%s, len(batches)=%d, MaxSequenceTimestamp=%d, "+ "lastSequencedBatchNumber=%d, L2Coinbase=%s, dataAvailabilityMessage=%s)", t.opts.From.String(), len(batches), sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase().String(), hex.EncodeToString(dataAvailabilityMessage), diff --git a/sequencesender/txbuilder/elderberry_validium_test.go b/sequencesender/txbuilder/elderberry_validium_test.go index 7607576d..6ca80a58 100644 --- a/sequencesender/txbuilder/elderberry_validium_test.go +++ b/sequencesender/txbuilder/elderberry_validium_test.go @@ -10,6 +10,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonvalidiumetrog" "github.com/0xPolygon/cdk/dataavailability/mocks_da" "github.com/0xPolygon/cdk/etherman/contracts" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state/datastream" @@ -109,7 +110,7 @@ func newElderberryValidiumSUT(t *testing.T) *testDataElderberryValidium { da := mocks_da.NewSequenceSenderElderberry(t) - sut := txbuilder.NewTxBuilderElderberryValidium(*zkevmContract, da, *opts, uint64(100)) + sut := txbuilder.NewTxBuilderElderberryValidium(log.GetDefaultLogger(), *zkevmContract, da, *opts, uint64(100)) require.NotNil(t, sut) return &testDataElderberryValidium{ mockDA: da, diff --git a/sequencesender/txbuilder/elderberry_zkevm.go b/sequencesender/txbuilder/elderberry_zkevm.go index 3f446b7a..a4d3bb56 100644 --- a/sequencesender/txbuilder/elderberry_zkevm.go +++ b/sequencesender/txbuilder/elderberry_zkevm.go @@ -31,10 +31,11 @@ type rollupElderberryZKEVMContractor interface { } func NewTxBuilderElderberryZKEVM( - zkevm rollupElderberryZKEVMContractor, opts bind.TransactOpts, maxTxSizeForL1 uint64, + logger *log.Logger, zkevm rollupElderberryZKEVMContractor, + opts bind.TransactOpts, maxTxSizeForL1 uint64, ) *TxBuilderElderberryZKEVM { return &TxBuilderElderberryZKEVM{ - TxBuilderElderberryBase: *NewTxBuilderElderberryBase(opts), + TxBuilderElderberryBase: *NewTxBuilderElderberryBase(logger, opts), condNewSeq: NewConditionalNewSequenceMaxSize(maxTxSizeForL1), rollupContract: zkevm, } @@ -103,9 +104,8 @@ func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup( } func (t *TxBuilderElderberryZKEVM) warningMessage( - batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, l2Coinbase common.Address, opts *bind.TransactOpts, -) { - log.Warnf("Sequencer address: ", opts.From, "l2CoinBase: ", l2Coinbase, " Batches to send: %+v", batches) + batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, l2Coinbase common.Address, opts *bind.TransactOpts) { + t.logger.Warnf("Sequencer address: ", opts.From, "l2CoinBase: ", l2Coinbase, " Batches to send: %+v", batches) } func (t *TxBuilderElderberryZKEVM) String() string { diff --git a/sequencesender/txbuilder/elderberry_zkevm_test.go b/sequencesender/txbuilder/elderberry_zkevm_test.go index 27e54df8..3544a700 100644 --- a/sequencesender/txbuilder/elderberry_zkevm_test.go +++ b/sequencesender/txbuilder/elderberry_zkevm_test.go @@ -8,6 +8,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonvalidiumetrog" "github.com/0xPolygon/cdk/etherman/contracts" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state/datastream" @@ -20,7 +21,7 @@ import ( func TestElderberryZkevmName(t *testing.T) { zkevmContract := contracts.RollupElderberryType{} opts := bind.TransactOpts{} - sut := txbuilder.NewTxBuilderElderberryZKEVM(zkevmContract, opts, 100) + sut := txbuilder.NewTxBuilderElderberryZKEVM(log.GetDefaultLogger(), zkevmContract, opts, 100) require.NotNil(t, sut) require.True(t, strings.Contains(sut.String(), "Elderberry")) require.True(t, strings.Contains(sut.String(), "ZKEVM")) @@ -29,7 +30,7 @@ func TestElderberryZkevmName(t *testing.T) { func TestElderberryZkevmNewSequence(t *testing.T) { zkevmContract := contracts.RollupElderberryType{} opts := bind.TransactOpts{} - sut := txbuilder.NewTxBuilderElderberryZKEVM(zkevmContract, opts, 100) + sut := txbuilder.NewTxBuilderElderberryZKEVM(log.GetDefaultLogger(), zkevmContract, opts, 100) require.NotNil(t, sut) seq, err := sut.NewSequence(context.TODO(), nil, common.Address{}) require.NoError(t, err) @@ -106,7 +107,7 @@ func newElderberryZkevmSUT(t *testing.T) *txbuilder.TxBuilderElderberryZKEVM { require.NoError(t, err) opts, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1)) require.NoError(t, err) - sut := txbuilder.NewTxBuilderElderberryZKEVM(*zkevmContract, *opts, 100) + sut := txbuilder.NewTxBuilderElderberryZKEVM(log.GetDefaultLogger(), *zkevmContract, *opts, 100) require.NotNil(t, sut) return sut } From 093c689a6b48af8cba4c1a6ee9db742414db3aba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 5 Sep 2024 11:43:52 +0200 Subject: [PATCH 14/17] feat: upgrade cdk-rpc dependency --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f64ec3bb..a2ca38f4 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.4 require ( github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f github.com/0xPolygon/cdk-data-availability v0.0.9 - github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d + github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 diff --git a/go.sum b/go.sum index bc9eb188..818a9b5d 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f h1 github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= github.com/0xPolygon/cdk-data-availability v0.0.9 h1:KkP+hJH9nY5fljpSNtW2pfP5YQCJOsSRzfnl0yT78rI= github.com/0xPolygon/cdk-data-availability v0.0.9/go.mod h1:5A+CU4FGeyG8zTDJc0khMzRxPzFzmpRydeEWmLztgh4= -github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d h1:sxh6hZ2jF/sxxj2jd5o1vuNNCZjYmn4aRG9SRlVaEFs= -github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= +github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 h1:Jri+ydl8PudddGacnVLatrCuAG9e1Ut8W4J0GoawssU= +github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4= From a886ec3e43da98f99b35d0fd7ddcdcad2aedc073 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 5 Sep 2024 14:21:24 +0200 Subject: [PATCH 15/17] feat: provide logger to CalculateAccInputHash function --- aggregator/aggregator.go | 1 + cmd/run.go | 6 ++++-- common/common.go | 13 +++++++------ rpc/bridge.go | 13 ++++++++----- sequencesender/txbuilder/banana_base.go | 2 +- 5 files changed, 21 insertions(+), 14 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index f33d278f..eeecf561 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -551,6 +551,7 @@ func (a *Aggregator) handleReceivedDataStream( } accInputHash := cdkcommon.CalculateAccInputHash( + a.logger, oldDBBatch.Batch.AccInputHash, a.currentStreamBatch.BatchL2Data, a.currentStreamBatch.L1InfoRoot, diff --git a/cmd/run.go b/cmd/run.go index 6a29cd4f..de397ae7 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -752,10 +752,12 @@ func createRPC( bridgeL1 *bridgesync.BridgeSync, bridgeL2 *bridgesync.BridgeSync, ) *jRPC.Server { + logger := log.WithFields("module", cdkcommon.RPC) services := []jRPC.Service{ { Name: rpc.BRIDGE, Service: rpc.NewBridgeEndpoints( + logger, cfg.WriteTimeout.Duration, cfg.ReadTimeout.Duration, cdkNetworkID, @@ -768,6 +770,6 @@ func createRPC( ), }, } - sugaredLogger := log.WithFields("module", cdkcommon.RPC).GetSugaredLogger() - return jRPC.NewServer(cfg, services, jRPC.WithLogger(sugaredLogger)) + + return jRPC.NewServer(cfg, services, jRPC.WithLogger(logger.GetSugaredLogger())) } diff --git a/common/common.go b/common/common.go index c4dbdf18..cd5b5d70 100644 --- a/common/common.go +++ b/common/common.go @@ -41,6 +41,7 @@ func BytesToUint32(bytes []byte) uint32 { // CalculateAccInputHash computes the hash of accumulated input data for a given batch. func CalculateAccInputHash( + logger *log.Logger, oldAccInputHash common.Hash, batchData []byte, l1InfoRoot common.Hash, @@ -78,12 +79,12 @@ func CalculateAccInputHash( v2 = keccak256.Hash(v2) - log.Debugf("OldAccInputHash: %v", oldAccInputHash) - log.Debugf("BatchHashData: %v", common.Bytes2Hex(v2)) - log.Debugf("L1InfoRoot: %v", l1InfoRoot) - log.Debugf("TimeStampLimit: %v", timestampLimit) - log.Debugf("Sequencer Address: %v", sequencerAddr) - log.Debugf("Forced BlockHashL1: %v", forcedBlockhashL1) + logger.Debugf("OldAccInputHash: %v", oldAccInputHash) + logger.Debugf("BatchHashData: %v", common.Bytes2Hex(v2)) + logger.Debugf("L1InfoRoot: %v", l1InfoRoot) + logger.Debugf("TimeStampLimit: %v", timestampLimit) + logger.Debugf("Sequencer Address: %v", sequencerAddr) + logger.Debugf("Forced BlockHashL1: %v", forcedBlockhashL1) return common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) } diff --git a/rpc/bridge.go b/rpc/bridge.go index eb6da780..cd0d6855 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -28,6 +28,7 @@ const ( // BridgeEndpoints contains implementations for the "bridge" RPC endpoints type BridgeEndpoints struct { + logger *log.Logger meter metric.Meter readTimeout time.Duration writeTimeout time.Duration @@ -42,6 +43,7 @@ type BridgeEndpoints struct { // NewBridgeEndpoints returns InteropEndpoints func NewBridgeEndpoints( + logger *log.Logger, writeTimeout time.Duration, readTimeout time.Duration, networkID uint32, @@ -54,6 +56,7 @@ func NewBridgeEndpoints( ) *BridgeEndpoints { meter := otel.Meter(meterName) return &BridgeEndpoints{ + logger: logger, meter: meter, readTimeout: readTimeout, writeTimeout: writeTimeout, @@ -76,7 +79,7 @@ func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCoun c, merr := b.meter.Int64Counter("l1_info_tree_index_for_bridge") if merr != nil { - log.Warnf("failed to create l1_info_tree_index_for_bridge counter: %s", merr) + b.logger.Warnf("failed to create l1_info_tree_index_for_bridge counter: %s", merr) } c.Add(ctx, 1) @@ -112,7 +115,7 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd c, merr := b.meter.Int64Counter("injected_info_after_index") if merr != nil { - log.Warnf("failed to create injected_info_after_index counter: %s", merr) + b.logger.Warnf("failed to create injected_info_after_index counter: %s", merr) } c.Add(ctx, 1) @@ -157,7 +160,7 @@ func (b *BridgeEndpoints) ClaimProof( c, merr := b.meter.Int64Counter("claim_proof") if merr != nil { - log.Warnf("failed to create claim_proof counter: %s", merr) + b.logger.Warnf("failed to create claim_proof counter: %s", merr) } c.Add(ctx, 1) @@ -211,7 +214,7 @@ func (b *BridgeEndpoints) SponsorClaim(claim claimsponsor.Claim) (interface{}, r c, merr := b.meter.Int64Counter("sponsor_claim") if merr != nil { - log.Warnf("failed to create sponsor_claim counter: %s", merr) + b.logger.Warnf("failed to create sponsor_claim counter: %s", merr) } c.Add(ctx, 1) @@ -238,7 +241,7 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa c, merr := b.meter.Int64Counter("get_sponsored_claim_status") if merr != nil { - log.Warnf("failed to create get_sponsored_claim_status counter: %s", merr) + b.logger.Warnf("failed to create get_sponsored_claim_status counter: %s", merr) } c.Add(ctx, 1) diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 1d00dd6a..a48efd23 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -128,7 +128,7 @@ func (t *TxBuilderBananaBase) NewSequence( } accInputHash = cdkcommon.CalculateAccInputHash( - accInputHash, batch.L2Data, infoRootHash, timestamp, batch.LastCoinbase, blockHash, + t.logger, accInputHash, batch.L2Data, infoRootHash, timestamp, batch.LastCoinbase, blockHash, ) } From 3c03e9182e7f0f982ebb6747ecd939596ac544b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 16 Sep 2024 12:49:49 +0200 Subject: [PATCH 16/17] fix: remove unused field write --- aggregator/db/migrations_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aggregator/db/migrations_test.go b/aggregator/db/migrations_test.go index b688f341..0a118c69 100644 --- a/aggregator/db/migrations_test.go +++ b/aggregator/db/migrations_test.go @@ -11,7 +11,6 @@ func Test_checkMigrations(t *testing.T) { embedMigration := embedMigrations[AggregatorMigrationName] migrationSource := &migrate.EmbedFileSystemMigrationSource{ FileSystem: embedMigration, - Root: "migrations", } _, err := migrationSource.FileSystem.ReadFile("migrations/0001.sql") From 4346dc7a3869dca9665e8b59c3b635cef792f3a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 16 Sep 2024 12:59:47 +0200 Subject: [PATCH 17/17] fix: lint --- aggregator/aggregator.go | 2 +- cmd/run.go | 5 ++++- sequencesender/sequencesender.go | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index eeecf561..6c033f9f 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -1729,7 +1729,7 @@ func (a *Aggregator) buildInputProver( forcedBlockhashL1 := common.Hash{} l1InfoRoot := batchToVerify.L1InfoRoot.Bytes() if !isForcedBatch { - tree, err := l1infotree.NewL1InfoTree(a.logger, 32, [][32]byte{}) // nolint:gomnd + tree, err := l1infotree.NewL1InfoTree(a.logger, 32, [][32]byte{}) //nolint:gomnd if err != nil { return nil, err } diff --git a/cmd/run.go b/cmd/run.go index de397ae7..1eb86839 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -541,7 +541,10 @@ func runReorgDetectorL1IfNeeded( l1Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { + if !isNeeded([]string{ + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, + cdkcommon.AGGORACLE, cdkcommon.RPC}, + components) { return nil, nil } rd := newReorgDetector(cfg, l1Client) diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 8546d51d..263e729b 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -83,7 +83,8 @@ type ethTxAdditionalData struct { } // New inits sequence sender -func New(cfg Config, logger *log.Logger, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { +func New(cfg Config, logger *log.Logger, + etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { // Create sequencesender s := SequenceSender{ cfg: cfg,