Skip to content

Commit

Permalink
etrog: synchornizer-permisionless: first version working of sync with…
Browse files Browse the repository at this point in the history
… trusted state (0xPolygonHermez#2949)

- state: Changed `ProcessingContextV2` to support `L1InfoTreeData` (all the leafs of L1InfoTree used in a batch)
- state: removed debug logs from the `enconding_batch_v2.go`
- test: upgrade Mockery from 2.22.1 to 2.39.0 to support `max` function of golang 1.21
- synchronizer: tidy up interfaces. The repeated have been grouped
- synchronizer: simplified generation of mocks (stop using "mock_" prefix but are under `mocks/` folder)
- synchronizer: change cache system of syncTrusted
- synchronizer: to known if a trusted batch is closed  stop using empty StateRoot and use `Closed` field
- synchronizer: don't check batch timestamp because RPC is not providing the creational_stamp (issue 0xPolygonHermez#2953
- synchronizer: calls to executor pass the `L1InfoTreeData`
  • Loading branch information
joanestebanr authored Dec 21, 2023
1 parent 91ceee5 commit 4290837
Show file tree
Hide file tree
Showing 46 changed files with 5,667 additions and 305 deletions.
68 changes: 27 additions & 41 deletions state/batchV2.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,33 +25,15 @@ var (
type ProcessingContextV2 struct {
BatchNumber uint64
Coinbase common.Address
Timestamp time.Time // Batch timeStamp and also TimestampLimit
L1InfoRoot L1InfoTreeExitRootStorageEntry
Timestamp *time.Time // Batch timeStamp and also TimestampLimit
L1InfoRoot common.Hash
L1InfoTreeData map[uint32]L1DataV2
ForcedBatchNum *uint64
BatchL2Data *[]byte
ForcedBlockHashL1 *common.Hash
SkipVerifyL1InfoRoot uint32
}

// ProcessSequencerBatchV2 is used by the sequencers to process transactions into an open batch for forkID >= ETROG
func (s *State) ProcessSequencerBatchV2(ctx context.Context, batchNumber uint64, batchL2Data []byte, l1InfoTree L1InfoTreeExitRootStorageEntry, caller metrics.CallerLabel, dbTx pgx.Tx) (*ProcessBatchResponse, error) {
log.Debugf("*******************************************")
log.Debugf("ProcessSequencerBatchV2 start")

processBatchResponse, err := s.processBatchV2(ctx, batchNumber, batchL2Data, nil, l1InfoTree, cTrue, nil, caller, dbTx)
if err != nil {
return nil, err
}

result, err := s.convertToProcessBatchResponseV2(processBatchResponse)
if err != nil {
return nil, err
}
log.Debugf("ProcessSequencerBatchV2 end")
log.Debugf("*******************************************")
return result, nil
}

// ProcessBatchV2 processes a batch for forkID >= ETROG
func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, updateMerkleTree bool) (*ProcessBatchResponse, error) {
log.Debugf("*******************************************")
Expand Down Expand Up @@ -194,7 +176,7 @@ func (s *State) ExecuteBatchV2(ctx context.Context, batch Batch, l1InfoTree L1In
return processBatchResponse, err
}

func (s *State) processBatchV2(ctx context.Context, batchNumber uint64, batchL2Data []byte, timestampLimit *time.Time, l1InfoTree L1InfoTreeExitRootStorageEntry, skipVerifyL1InfoRoot uint32, forcedBlockHashL1 *common.Hash, caller metrics.CallerLabel, dbTx pgx.Tx) (*executor.ProcessBatchResponseV2, error) {
func (s *State) processBatchV2(ctx context.Context, processingCtx *ProcessingContextV2, caller metrics.CallerLabel, dbTx pgx.Tx) (*executor.ProcessBatchResponseV2, error) {
if dbTx == nil {
return nil, ErrDBTxNil
}
Expand All @@ -216,7 +198,7 @@ func (s *State) processBatchV2(ctx context.Context, batchNumber uint64, batchL2D
previousBatch = lastBatches[1]
}

isBatchClosed, err := s.IsBatchClosed(ctx, batchNumber, dbTx)
isBatchClosed, err := s.IsBatchClosed(ctx, processingCtx.BatchNumber, dbTx)
if err != nil {
return nil, err
}
Expand All @@ -225,46 +207,51 @@ func (s *State) processBatchV2(ctx context.Context, batchNumber uint64, batchL2D
}

// Check provided batch number is the latest in db
if lastBatch.BatchNumber != batchNumber {
if lastBatch.BatchNumber != processingCtx.BatchNumber {
return nil, ErrInvalidBatchNumber
}
forkID := s.GetForkIDByBatchNumber(lastBatch.BatchNumber)

var timestampLimitUnix uint64
if timestampLimit != nil {
timestampLimitUnix = uint64(timestampLimit.Unix())
if processingCtx.Timestamp != nil {
timestampLimitUnix = uint64(processingCtx.Timestamp.Unix())
} else {
timestampLimitUnix = uint64(time.Now().Unix())
}
// Create Batch
processBatchRequest := &executor.ProcessBatchRequestV2{
OldBatchNum: lastBatch.BatchNumber - 1,
Coinbase: lastBatch.Coinbase.String(),
BatchL2Data: batchL2Data,
BatchL2Data: *processingCtx.BatchL2Data,
OldStateRoot: previousBatch.StateRoot.Bytes(),
OldAccInputHash: previousBatch.AccInputHash.Bytes(),
TimestampLimit: timestampLimitUnix,
UpdateMerkleTree: cTrue,
ChainId: s.cfg.ChainID,
ForkId: forkID,
ContextId: uuid.NewString(),
SkipVerifyL1InfoRoot: skipVerifyL1InfoRoot,
L1InfoRoot: l1InfoTree.L1InfoTreeRoot.Bytes(),
SkipVerifyL1InfoRoot: processingCtx.SkipVerifyL1InfoRoot,
L1InfoRoot: processingCtx.L1InfoRoot.Bytes(),
}

if forcedBlockHashL1 != nil {
log.Debug("Setting ForcedBlockhashL1: ", forcedBlockHashL1)
processBatchRequest.ForcedBlockhashL1 = forcedBlockHashL1.Bytes()
if processingCtx.ForcedBlockHashL1 != nil {
log.Debug("Setting ForcedBlockhashL1: ", processingCtx.ForcedBlockHashL1)
processBatchRequest.ForcedBlockhashL1 = processingCtx.ForcedBlockHashL1.Bytes()
} else {
processBatchRequest.L1InfoTreeData = map[uint32]*executor.L1DataV2{l1InfoTree.L1InfoTreeIndex: {
GlobalExitRoot: l1InfoTree.L1InfoTreeLeaf.GlobalExitRoot.GlobalExitRoot.Bytes(),
BlockHashL1: l1InfoTree.L1InfoTreeLeaf.PreviousBlockHash.Bytes(),
MinTimestamp: uint64(l1InfoTree.L1InfoTreeLeaf.GlobalExitRoot.Timestamp.Unix()),
}}
l1InfoTreeData := make(map[uint32]*executor.L1DataV2)

for k, v := range processingCtx.L1InfoTreeData {
l1InfoTreeData[k] = &executor.L1DataV2{
GlobalExitRoot: v.GlobalExitRoot.Bytes(),
BlockHashL1: v.BlockHashL1.Bytes(),
MinTimestamp: v.MinTimestamp,
}
}
processBatchRequest.L1InfoTreeData = l1InfoTreeData
}

if l1InfoTree.L1InfoTreeRoot != (common.Hash{}) {
processBatchRequest.L1InfoRoot = l1InfoTree.L1InfoTreeRoot.Bytes()
if processingCtx.L1InfoRoot != (common.Hash{}) {
processBatchRequest.L1InfoRoot = processingCtx.L1InfoRoot.Bytes()
} else {
currentl1InfoRoot := s.GetCurrentL1InfoRoot()
processBatchRequest.L1InfoRoot = currentl1InfoRoot.Bytes()
Expand Down Expand Up @@ -401,8 +388,7 @@ func (s *State) ProcessAndStoreClosedBatchV2(ctx context.Context, processingCtx
log.Errorf("%s error OpenBatch: %v", debugPrefix, err)
return common.Hash{}, noFlushID, noProverID, err
}
processed, err := s.processBatchV2(ctx, processingCtx.BatchNumber, *BatchL2Data,
&processingCtx.Timestamp, processingCtx.L1InfoRoot, processingCtx.SkipVerifyL1InfoRoot, processingCtx.ForcedBlockHashL1, caller, dbTx)
processed, err := s.processBatchV2(ctx, &processingCtx, caller, dbTx)
if err != nil {
log.Errorf("%s error processBatchV2: %v", debugPrefix, err)
return common.Hash{}, noFlushID, noProverID, err
Expand Down
6 changes: 5 additions & 1 deletion state/convertersV2.go
Original file line number Diff line number Diff line change
Expand Up @@ -348,12 +348,16 @@ func convertToKeys(keys [][]byte) []merkletree.Key {
}

func convertProcessingContext(p *ProcessingContextV2) (*ProcessingContext, error) {
tstamp := time.Time{}
if p.Timestamp != nil {
tstamp = *p.Timestamp
}
result := ProcessingContext{
BatchNumber: p.BatchNumber,
Coinbase: p.Coinbase,
ForcedBatchNum: p.ForcedBatchNum,
BatchL2Data: p.BatchL2Data,
Timestamp: p.Timestamp,
Timestamp: tstamp,
}
return &result, nil
}
4 changes: 0 additions & 4 deletions state/encoding_batch_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,10 +151,8 @@ func DecodeBatchV2(txsData []byte) (*BatchRawV2, error) {
var currentBlock *L2BlockRaw
pos := int(0)
for pos < len(txsData) {
log.Debugf("pos: %d, data[]=%d pendingbytes:%d", pos, txsData[pos], len(txsData)-pos)
switch txsData[pos] {
case changeL2Block:
log.Debugf("pos: %d, changeL2Block", pos)
if currentBlock != nil {
blocks = append(blocks, *currentBlock)
}
Expand All @@ -165,7 +163,6 @@ func DecodeBatchV2(txsData []byte) (*BatchRawV2, error) {
// by RLP definition a tx never starts with a 0x0b. So, if is not a changeL2Block
// is a tx
default:
log.Debugf("pos: %d, Transaction", pos)
if currentBlock == nil {
_, _, err := decodeTxRLP(txsData, pos)
if err == nil {
Expand Down Expand Up @@ -236,7 +233,6 @@ func decodeTxRLP(txsData []byte, offset int) (int, *L2TxRaw, error) {
if err != nil {
return 0, nil, fmt.Errorf("can't get RLP length (offset=%d): %w", offset, err)
}
log.Debugf("length: %d (offset=%d)", length, offset)
endPos := uint64(offset) + length + rLength + sLength + vLength + EfficiencyPercentageByteLength
if endPos > uint64(len(txsData)) {
return 0, nil, fmt.Errorf("can't get tx because not enough data (endPos=%d lenData=%d): %w",
Expand Down
22 changes: 12 additions & 10 deletions synchronizer/actions/etrog/processor_l1_sequence_batches.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/actions"
syncCommon "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/jackc/pgx/v4"
Expand All @@ -33,8 +34,7 @@ type stateProcessSequenceBatches interface {
AddVirtualBatch(ctx context.Context, virtualBatch *state.VirtualBatch, dbTx pgx.Tx) error
AddTrustedReorg(ctx context.Context, trustedReorg *state.TrustedReorg, dbTx pgx.Tx) error
GetReorgedTransactions(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]*ethTypes.Transaction, error)
GetCurrentL1InfoRoot() common.Hash
GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error)
GetL1InfoTreeDataFromBatchL2Data(ctx context.Context, batchL2Data []byte, dbTx pgx.Tx) (map[uint32]state.L1DataV2, common.Hash, error)
}

type ethermanProcessSequenceBatches interface {
Expand All @@ -58,7 +58,7 @@ type ProcessorL1SequenceBatchesEtrog struct {
state stateProcessSequenceBatches
etherMan ethermanProcessSequenceBatches
pool poolProcessSequenceBatchesInterface
eventLog *event.EventLog
eventLog syncinterfaces.EventLogInterface
sync syncProcessSequenceBatchesInterface
timeProvider syncCommon.TimeProvider
}
Expand All @@ -67,7 +67,7 @@ type ProcessorL1SequenceBatchesEtrog struct {
func NewProcessorL1SequenceBatches(state stateProcessSequenceBatches,
etherMan ethermanProcessSequenceBatches,
pool poolProcessSequenceBatchesInterface,
eventLog *event.EventLog,
eventLog syncinterfaces.EventLogInterface,
sync syncProcessSequenceBatchesInterface,
timeProvider syncCommon.TimeProvider) *ProcessorL1SequenceBatchesEtrog {
return &ProcessorL1SequenceBatchesEtrog{
Expand Down Expand Up @@ -186,17 +186,18 @@ func (g *ProcessorL1SequenceBatchesEtrog) processSequenceBatches(ctx context.Con
},
L1InfoTreeRoot: sbatch.PolygonRollupBaseEtrogBatchData.ForcedGlobalExitRoot,
}
tstampLimit := time.Unix(int64(sbatch.PolygonRollupBaseEtrogBatchData.ForcedTimestamp), 0)
processCtx = state.ProcessingContextV2{
BatchNumber: 1,
Coinbase: sbatch.SequencerAddr,
Timestamp: time.Unix(int64(sbatch.PolygonRollupBaseEtrogBatchData.ForcedTimestamp), 0),
L1InfoRoot: currentL1InfoTree,
Timestamp: &tstampLimit,
L1InfoRoot: sbatch.PolygonRollupBaseEtrogBatchData.ForcedGlobalExitRoot,
BatchL2Data: &txs,
ForcedBlockHashL1: forcedBlockHashL1,
SkipVerifyL1InfoRoot: 1,
}
} else {
currentL1InfoTree, err = g.state.GetL1InfoRootLeafByL1InfoRoot(ctx, *sbatch.L1InfoRoot, dbTx)
leafs, l1InfoRoot, err := g.state.GetL1InfoTreeDataFromBatchL2Data(ctx, batch.BatchL2Data, dbTx)
if err != nil {
log.Errorf("error getting L1InfoRootLeafByL1InfoRoot. sbatch.L1InfoRoot: %v", *sbatch.L1InfoRoot)
rollbackErr := dbTx.Rollback(ctx)
Expand All @@ -209,8 +210,9 @@ func (g *ProcessorL1SequenceBatchesEtrog) processSequenceBatches(ctx context.Con
processCtx = state.ProcessingContextV2{
BatchNumber: batch.BatchNumber,
Coinbase: batch.Coinbase,
Timestamp: batch.Timestamp,
L1InfoRoot: currentL1InfoTree,
Timestamp: &batch.Timestamp,
L1InfoRoot: l1InfoRoot,
L1InfoTreeData: leafs,
ForcedBatchNum: batch.ForcedBatchNum,
BatchL2Data: &batch.BatchL2Data,
SkipVerifyL1InfoRoot: 0,
Expand Down Expand Up @@ -254,7 +256,7 @@ func (g *ProcessorL1SequenceBatchesEtrog) processSequenceBatches(ctx context.Con
} else {
// Reprocess batch to compare the stateRoot with tBatch.StateRoot and get accInputHash
var skipVerifyL1InfoRoot uint32 = 0 // false
p, err := g.state.ExecuteBatchV2(ctx, batch, currentL1InfoTree, processCtx.Timestamp, false, skipVerifyL1InfoRoot, processCtx.ForcedBlockHashL1, dbTx)
p, err := g.state.ExecuteBatchV2(ctx, batch, currentL1InfoTree, *processCtx.Timestamp, false, skipVerifyL1InfoRoot, processCtx.ForcedBlockHashL1, dbTx)
if err != nil {
log.Errorf("error executing L1 batch: %+v, error: %v", batch, err)
rollbackErr := dbTx.Rollback(ctx)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (
stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics"
"github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/actions"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/jackc/pgx/v4"
Expand Down Expand Up @@ -55,13 +56,13 @@ type ProcessorL1SequenceBatches struct {
state stateProcessSequenceBatches
etherMan ethermanProcessSequenceBatches
pool poolProcessSequenceBatchesInterface
eventLog *event.EventLog
eventLog syncinterfaces.EventLogInterface
sync syncProcessSequenceBatchesInterface
}

// NewProcessorL1SequenceBatches returns instance of a processor for SequenceBatchesOrder
func NewProcessorL1SequenceBatches(state stateProcessSequenceBatches,
etherMan ethermanProcessSequenceBatches, pool poolProcessSequenceBatchesInterface, eventLog *event.EventLog, sync syncProcessSequenceBatchesInterface) *ProcessorL1SequenceBatches {
etherMan ethermanProcessSequenceBatches, pool poolProcessSequenceBatchesInterface, eventLog syncinterfaces.EventLogInterface, sync syncProcessSequenceBatchesInterface) *ProcessorL1SequenceBatches {
return &ProcessorL1SequenceBatches{
ProcessorBase: actions.ProcessorBase[ProcessorL1SequenceBatches]{
SupportedEvent: []etherman.EventOrder{etherman.SequenceBatchesOrder},
Expand Down
9 changes: 9 additions & 0 deletions synchronizer/common/generic_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,15 @@ func (c *Cache[K, T]) Get(key K) (T, bool) {
return item.value, true
}

// GetOrDefault returns the value of the key and defaultValue if the key does not exist or is outdated
func (c *Cache[K, T]) GetOrDefault(key K, defaultValue T) T {
item, ok := c.Get(key)
if !ok {
return defaultValue
}
return item
}

// Set sets the value of the key
func (c *Cache[K, T]) Set(key K, value T) {
c.data[key] = cacheItem[T]{value: value, validTime: c.timerProvider.Now().Add(c.timeOfLiveItems)}
Expand Down
22 changes: 22 additions & 0 deletions synchronizer/common/generic_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,28 @@ func TestCacheGet(t *testing.T) {
assert.False(t, ok)
}

func TestCacheGetOrDefault(t *testing.T) {
noExistsString := "no_exists"
timerProvider := &MockTimerProvider{}
cache := NewCache[string, string](timerProvider, time.Hour)

// Add an item to the cache
cache.Set("key1", "value1")

// Test that the item can be retrieved from the cache
value := cache.GetOrDefault("key1", noExistsString)
assert.Equal(t, "value1", value)

// Test that an item that doesn't exist in the cache returns false
value = cache.GetOrDefault("key2", noExistsString)
assert.Equal(t, noExistsString, value)

// Test that an item that has expired is removed from the cache
timerProvider.now = time.Now().Add(2 * time.Hour)
value = cache.GetOrDefault("key1", noExistsString)
assert.Equal(t, noExistsString, value)
}

func TestCacheSet(t *testing.T) {
timerProvider := &MockTimerProvider{}
cache := NewCache[string, string](timerProvider, time.Hour)
Expand Down
42 changes: 42 additions & 0 deletions synchronizer/common/log_helper.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
package common

import (
"fmt"

"github.com/ethereum/go-ethereum/common"
)

// LogComparedBytes returns a string the bytes of two []bytes, starting from the first byte that is different
func LogComparedBytes(name1 string, name2 string, data1 []byte, data2 []byte, numBytesBefore int, numBytesAfter int) string {
findFirstByteDifferent := findFirstByteDifferent(data1, data2)
if findFirstByteDifferent == -1 {
return fmt.Sprintf("%s(%d) and %s(%d) are equal", name1, len(data1), name2, len(data2))
}
res := name1 + fmt.Sprintf("(%d)", len(data1)) + ": " + strSliceBytes(data1, findFirstByteDifferent, numBytesBefore, numBytesAfter) + "\n"
res += name2 + fmt.Sprintf("(%d)", len(data1)) + ": " + strSliceBytes(data2, findFirstByteDifferent, numBytesBefore, numBytesAfter)
return res
}

func strSliceBytes(data []byte, point int, before int, after int) string {
res := ""
startingPoint := max(0, point-before)
if startingPoint > 0 {
res += fmt.Sprintf("(%d)...", startingPoint)
}
endPoint := min(len(data), point+after)
res += fmt.Sprintf("%s*%s", common.Bytes2Hex(data[startingPoint:point]), common.Bytes2Hex(data[point:endPoint]))

if endPoint < len(data) {
res += fmt.Sprintf("...(%d)", len(data)-endPoint)
}
return res
}

func findFirstByteDifferent(data1 []byte, data2 []byte) int {
for i := 0; i < len(data1); i++ {
if data1[i] != data2[i] {
return i
}
}
return -1
}
Loading

0 comments on commit 4290837

Please sign in to comment.