Skip to content

Commit

Permalink
add: witness stage
Browse files Browse the repository at this point in the history
  • Loading branch information
anshalshukla committed Mar 19, 2024
1 parent 649bce8 commit 7f0c9f2
Show file tree
Hide file tree
Showing 13 changed files with 825 additions and 284 deletions.
25 changes: 14 additions & 11 deletions cmd/rpcdaemon/commands/zkevm_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ import (
"github.com/ledgerwatch/erigon/zk/legacy_executor_verifier"
types "github.com/ledgerwatch/erigon/zk/rpcdaemon"
"github.com/ledgerwatch/erigon/zk/sequencer"
zkstages "github.com/ledgerwatch/erigon/zk/stages"
"github.com/ledgerwatch/erigon/zk/syncer"
"github.com/ledgerwatch/erigon/zk/witness"
"github.com/ledgerwatch/erigon/zkevm/hex"
"github.com/ledgerwatch/erigon/zkevm/jsonrpc/client"
)
Expand Down Expand Up @@ -360,7 +360,7 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s
return nil, err
}

generator := witness.NewGenerator(
generator := zkstages.NewWitnessGenerator(
api.ethApi.dirs,
api.ethApi.historyV3(tx),
api.ethApi._agg,
Expand All @@ -379,22 +379,25 @@ func (api *ZkEvmAPIImpl) GetBatchWitness(ctx context.Context, batchNumber uint64
return nil, err
}

blocks, err := getAllBlocksInBatchNumber(tx, batchNumber)

hermezDb := hermez_db.NewHermezDbReader(tx)
w, err := hermezDb.GetWitnessByBatchNo(batchNumber)
if err != nil {
tx.Rollback()
return nil, err
}

tx.Rollback()
if w == nil {
var blocks []uint64
blocks, err = getAllBlocksInBatchNumber(tx, batchNumber)
if err != nil {
return nil, err
}

if len(blocks) == 0 {
return nil, errors.New("batch not found")
endBlock := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blocks[0]))
startBlock := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blocks[len(blocks)-1]))
w, err = api.getBlockRangeWitness(ctx, api.db, startBlock, endBlock, false)
}

endBlock := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blocks[0]))
startBlock := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blocks[len(blocks)-1]))
return api.getBlockRangeWitness(ctx, api.db, startBlock, endBlock, false)
return w, err
}

func (api *ZkEvmAPIImpl) GetProverInput(ctx context.Context, batchNumber uint64, debug *bool) (*legacy_executor_verifier.RpcPayload, error) {
Expand Down
16 changes: 16 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,16 @@ var (
Usage: "Allow the sequencer to proceed pre-EIP155 transactions",
Value: false,
}
WitnessStageFlag = cli.BoolFlag{
Name: "zkevm.witness-stage",
Usage: "Enable/Diable witness stage",
Value: true,
}
StoreNblocksWitness = cli.Uint64Flag{
Name: "zkevm.store-n-blocks-witness",
Usage: "Stores last N blocks witness in db",
Value: 1000,
}
RpcBatchConcurrencyFlag = cli.UintFlag{
Name: "rpc.batch.concurrency",
Usage: "Does limit amount of goroutines to process 1 batch request. Means 1 bach request can't overload server. 1 batch still can have unlimited amount of request",
Expand Down Expand Up @@ -1256,6 +1266,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *nodecfg.Config) {
setDataDir(ctx, cfg)
setNodeUserIdent(ctx, cfg)
SetP2PConfig(ctx, &cfg.P2P, cfg.NodeName(), cfg.Dirs.DataDir)
setWitnessStageConfig(ctx, cfg)

cfg.SentryLogPeerInfo = ctx.IsSet(SentryLogPeerInfoFlag.Name)
}
Expand Down Expand Up @@ -1291,6 +1302,11 @@ func setDataDir(ctx *cli.Context, cfg *nodecfg.Config) {
}
}

func setWitnessStageConfig(ctx *cli.Context, cfg *nodecfg.Config) {
cfg.WitnessStage = ctx.Bool(WitnessStageFlag.GetValue())
cfg.StoreNblocksWitness = ctx.Uint64(StoreNblocksWitness.GetValue())
}

func isPowerOfTwo(n uint64) bool {
if n == 0 { //corner case: if n is zero it will also consider as power 2
return true
Expand Down
3 changes: 1 addition & 2 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,6 @@ import (
zkStages "github.com/ledgerwatch/erigon/zk/stages"
"github.com/ledgerwatch/erigon/zk/syncer"
txpool2 "github.com/ledgerwatch/erigon/zk/txpool"
"github.com/ledgerwatch/erigon/zk/witness"
"github.com/ledgerwatch/erigon/zkevm/etherman"
)

Expand Down Expand Up @@ -741,7 +740,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
cfg.L1QueryDelay,
)

witnessGenerator := witness.NewGenerator(
witnessGenerator := zkStages.NewWitnessGenerator(
config.Dirs,
config.HistoryV3,
backend.agg,
Expand Down
3 changes: 3 additions & 0 deletions eth/ethconfig/config_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ type Zk struct {
AllowPreEIP155Transactions bool

RebuildTreeAfter uint64

WitnessStage bool
StoreNblocksWitness uint64
}

var DefaultZkConfig = &Zk{}
1 change: 1 addition & 0 deletions eth/stagedsync/stages/stages_zk.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ var (
L1Syncer SyncStage = "L1Syncer"
L1VerificationsBatchNo SyncStage = "L1VerificationsBatchNo"
Batches SyncStage = "Batches"
Witness SyncStage = "Witness"
HighestHashableL2BlockNo SyncStage = "HighestHashableL2BlockNo"
HighestSeenBatchNumber SyncStage = "HighestSeenBatchNumber"
VerificationsStateRootCheck SyncStage = "VerificationStateRootCheck"
Expand Down
4 changes: 4 additions & 0 deletions node/nodecfg/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,10 @@ type Config struct {
HealthCheck bool

Http httpcfg.HttpCfg

// WitnesssStage
WitnessStage bool
StoreNblocksWitness uint64
}

// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
Expand Down
2 changes: 2 additions & 0 deletions turbo/cli/default_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,4 +189,6 @@ var DefaultFlags = []cli.Flag{
&utils.AllowPreEIP155Transactions,
&utils.DataStreamHost,
&utils.DataStreamPort,
&utils.WitnessStageFlag,
&utils.StoreNblocksWitness,
}
2 changes: 2 additions & 0 deletions turbo/cli/flags_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
ExecutorStrictMode: ctx.Bool(utils.ExecutorStrictMode.Name),
AllowFreeTransactions: ctx.Bool(utils.AllowFreeTransactions.Name),
AllowPreEIP155Transactions: ctx.Bool(utils.AllowPreEIP155Transactions.Name),
WitnessStage: ctx.Bool(utils.WitnessStageFlag.Name),
StoreNblocksWitness: ctx.Uint64(utils.StoreNblocksWitness.Name),
}

checkFlag(utils.L2ChainIdFlag.Name, cfg.Zk.L2ChainId)
Expand Down
2 changes: 2 additions & 0 deletions turbo/stages/zk_stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ func NewDefaultZkStages(ctx context.Context,
agg,
cfg.Zk,
),
zkStages.GenerateWitnessCfg(db, dirs, blockReader, engine, controlServer.ChainConfig, agg, cfg.HistoryV3, cfg.Zk.WitnessStage, cfg.Zk.StoreNblocksWitness),
stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg),
zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk),
stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp),
Expand Down Expand Up @@ -132,6 +133,7 @@ func NewSequencerZkStages(ctx context.Context,
txPool,
txPoolDb,
),
zkStages.GenerateWitnessCfg(db, dirs, blockReader, engine, controlServer.ChainConfig, agg, cfg.HistoryV3, cfg.Zk.WitnessStage, cfg.Zk.StoreNblocksWitness),
stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg),
zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk),
zkStages.StageSequencerExecutorVerifyCfg(db, verifier),
Expand Down
189 changes: 189 additions & 0 deletions zk/hermez_db/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package hermez_db

import (
"fmt"
"strconv"

"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
Expand Down Expand Up @@ -29,6 +30,9 @@ const L1_BLOCK_HASHES = "l1_block_hashes" // l1 blo
const BLOCK_L1_BLOCK_HASHES = "block_l1_block_hashes" // block number -> l1 block hash
const L1_BLOCK_HASH_GER = "l1_block_hash_ger" // l1 block hash -> GER
const INTERMEDIATE_TX_STATEROOTS = "hermez_intermediate_tx_stateRoots" // l2blockno -> stateRoot
const BATCH_WITNESS = "batch_witness" // batch witness -> witness

const chunkSize = 100000 // 100KB

type HermezDb struct {
tx kv.RwTx
Expand Down Expand Up @@ -72,6 +76,7 @@ func CreateHermezBuckets(tx kv.RwTx) error {
BLOCK_L1_BLOCK_HASHES,
L1_BLOCK_HASH_GER,
INTERMEDIATE_TX_STATEROOTS,
BATCH_WITNESS,
}
for _, t := range tables {
if err := tx.CreateBucket(t); err != nil {
Expand Down Expand Up @@ -158,6 +163,26 @@ func (db *HermezDbReader) GetHighestBlockInBatch(batchNo uint64) (uint64, error)
return max, nil
}

func (db *HermezDbReader) GetLowestBlockInBatch(batchNo uint64) (uint64, error) {
blocks, err := db.GetL2BlockNosByBatch(batchNo)
if err != nil {
return 0, err
}

min := uint64(0)
if len(blocks) > 0 {
min = blocks[0]
}

for _, block := range blocks {
if block < min {
min = block
}
}

return min, nil
}

func (db *HermezDbReader) GetHighestVerifiedBlockNo() (uint64, error) {
v, err := db.GetLatestVerification()
if err != nil {
Expand Down Expand Up @@ -849,3 +874,167 @@ func (db *HermezDbReader) GetBlockInfoRoot(blockNumber uint64) (common.Hash, err
res := common.BytesToHash(data)
return res, nil
}

func (db *HermezDbReader) GetWitnessByBatchNo(batchNo uint64) ([]byte, error) {
w, err := ReadChunks(db.tx, BATCH_WITNESS, Uint64ToBytes(batchNo))
if err != nil {
return nil, err
}

return w, nil
}

func (db *HermezDbReader) GetLatestStoredWitnessBatchNo() (uint64, error) {
cursor, err := db.tx.Cursor(BATCH_WITNESS)
if err != nil {
return 0, err
}

k, _, err := cursor.Last()
if err != nil {
return 0, err
}

return BytesToUint64(k), nil
}

func (db *HermezDbReader) GetOldestStoredWitnessBatch() (uint64, error) {
cursor, err := db.tx.Cursor(BATCH_WITNESS)
if err != nil {
return 0, err
}

key, _, err := cursor.First()
if err != nil {
return 0, err
}

return BytesToUint64(key), nil
}

func (db *HermezDb) WriteWitnessByBatchNo(batchNo uint64, w []byte) error {
return WriteChunks(db.tx, BATCH_WITNESS, Uint64ToBytes(batchNo), w)
}

func (db *HermezDb) DeleteWitnessByBatchNo(batchNo uint64) error {
highestBlock, err := db.GetHighestBlockInBatch(batchNo)
if err != nil {
return err
}
// If highest block is 0, it implies that batch i is not present
if highestBlock == 0 {
return nil
}

return DeleteChunks(db.tx, BATCH_WITNESS, Uint64ToBytes(batchNo))
}

func (db *HermezDb) DeleteWitnessByBatchRange(fromBatch uint64, toBatch uint64) error {
cursor, err := db.tx.Cursor(BATCH_WITNESS)
if err != nil {
return err
}

fromBatchByte := append(Uint64ToBytes(fromBatch), []byte("_chunk_0")...)
cursor.Seek(fromBatchByte)
for {
k, _, err := cursor.Current()
if err != nil {
return err
}

batchNo := BytesToUint64(k)
if batchNo > toBatch {
break
}

err = db.DeleteWitnessByBatchNo(batchNo)
if err != nil {
return err
}

cursor.Next()
}

return nil
}

func (db *HermezDb) DeleteWitnessTillBatchNo(toBatch uint64) error {
cursor, err := db.tx.Cursor(BATCH_WITNESS)
if err != nil {
return err
}

k, _, err := cursor.First()
if err != nil {
return err
}

fromBatch := BytesToUint64(k)
return db.DeleteWitnessByBatchRange(fromBatch, toBatch)
}

func WriteChunks(tx kv.RwTx, tableName string, key []byte, valueBytes []byte) error {
// Split the valueBytes into chunks and write each chunk
for i := 0; i < len(valueBytes); i += chunkSize {
end := i + chunkSize
if end > len(valueBytes) {
end = len(valueBytes)
}
chunk := valueBytes[i:end]
chunkKey := append(key, []byte("_chunk_"+strconv.Itoa(i/chunkSize))...)

// Write each chunk to the KV store
if err := tx.Put(tableName, chunkKey, chunk); err != nil {
return err
}
}

return nil
}

func ReadChunks(tx kv.Tx, tableName string, key []byte) ([]byte, error) {
// Initialize a buffer to store the concatenated chunks
var result []byte

// Retrieve and concatenate each chunk
for i := 0; ; i++ {
chunkKey := append(key, []byte("_chunk_"+strconv.Itoa(i))...)
chunk, err := tx.GetOne(tableName, chunkKey)
if err != nil {
return nil, err
}

// Check if this is the last chunk
if len(chunk) == 0 {
break
}

// Append the chunk to the result
result = append(result, chunk...)
}

return result, nil
}

func DeleteChunks(tx kv.RwTx, tableName string, key []byte) error {
for i := 0; ; i++ {
chunkKey := append(key, []byte("_chunk_"+strconv.Itoa(i))...)
chunk, err := tx.GetOne(tableName, chunkKey)
if err != nil {
return err
}

err = tx.Delete(tableName, chunkKey)
if err != nil {
return err
}

// Check if this is the last chunk
if len(chunk) == 0 {
break
}
}

return nil
}
Loading

0 comments on commit 7f0c9f2

Please sign in to comment.