Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(blockifier_reexecution): offline reexecution from file #1764

Open
wants to merge 2 commits into
base: aner/serialize_block_to_file_poc
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 68 additions & 64 deletions crates/blockifier_reexecution/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
use blockifier_reexecution::state_reader::reexecution_state_reader::ReexecutionStateReader;
use blockifier::state::cached_state::CachedState;
use blockifier::state::state_api::StateReader;
use blockifier_reexecution::state_reader::test_state_reader::{
ConsecutiveStateReaders,
ConsecutiveTestStateReaders,
OfflineConsecutiveStateReaders,
SerializableOfflineReexecutionData,
};
use blockifier_reexecution::state_reader::utils::JSON_RPC_VERSION;
Expand Down Expand Up @@ -32,73 +34,83 @@ struct SharedArgs {
/// Block number.
#[clap(long, short = 'b')]
block_number: u64,

// Directory path to json files. Default:
// "./crates/blockifier_reexecution/resources/block_{block_number}".
#[clap(long, short = 'd', default_value = None)]
directory_path: Option<String>,
}

#[derive(Debug, Subcommand)]
enum Command {
/// Runs the RPC test.
RpcTest {
#[clap(flatten)]
url_and_block_number: SharedArgs,
shared_args: SharedArgs,
},

/// Writes the RPC queries to json files.
WriteRpcRepliesToJson {
#[clap(flatten)]
url_and_block_number: SharedArgs,
shared_args: SharedArgs,
},

/// Directory path to json files.
/// Default: "./crates/blockifier_reexecution/resources/block_{block_number}".
#[clap(long, default_value = None)]
directory_path: Option<String>,
// Reexecutes the block from JSON files.
ReexecuteBlock {
#[clap(flatten)]
shared_args: SharedArgs,
},
}

#[derive(Debug, Args)]
struct GlobalOptions {}

pub fn reexecution_test<S: StateReader + Send + Sync, T: ConsecutiveStateReaders<S>>(
consecutive_state_readers: T,
) -> Option<CachedState<S>> {
let mut expected_state_diff = consecutive_state_readers.get_next_block_state_diff().unwrap();

let all_txs_in_next_block = consecutive_state_readers.get_next_block_txs().unwrap();

let mut transaction_executor =
consecutive_state_readers.get_transaction_executor(None).unwrap();

transaction_executor.execute_txs(&all_txs_in_next_block);
// Finalize block and read actual statediff.
let (actual_state_diff, _, _) =
transaction_executor.finalize().expect("Couldn't finalize block");

// TODO(Aner): compute correct block hash at storage slot 0x1 instead of removing it.
expected_state_diff.storage_updates.shift_remove(&ContractAddress(1_u128.into()));
assert_eq!(expected_state_diff, actual_state_diff);

transaction_executor.block_state
}

/// Main entry point of the blockifier reexecution CLI.
fn main() {
let args = BlockifierReexecutionCliArgs::parse();

match args.command {
Command::RpcTest { url_and_block_number: SharedArgs { node_url, block_number } } => {
Command::RpcTest { shared_args: SharedArgs { node_url, block_number, .. } } => {
println!("Running RPC test for block number {block_number} using node url {node_url}.",);

let config = RpcStateReaderConfig {
url: node_url,
json_rpc_version: JSON_RPC_VERSION.to_string(),
};

let test_state_readers_last_and_current_block = ConsecutiveTestStateReaders::new(
reexecution_test(ConsecutiveTestStateReaders::new(
BlockNumber(block_number - 1),
Some(config),
false,
);

let all_txs_in_next_block =
test_state_readers_last_and_current_block.get_next_block_txs().unwrap();

let mut expected_state_diff =
test_state_readers_last_and_current_block.get_next_block_state_diff().unwrap();

let mut transaction_executor =
test_state_readers_last_and_current_block.get_transaction_executor(None).unwrap();

transaction_executor.execute_txs(&all_txs_in_next_block);
// Finalize block and read actual statediff.
let (actual_state_diff, _, _) =
transaction_executor.finalize().expect("Couldn't finalize block");
// TODO(Aner): compute correct block hash at storage slot 0x1 instead of removing it.
expected_state_diff.storage_updates.shift_remove(&ContractAddress(1_u128.into()));
assert_eq!(expected_state_diff, actual_state_diff);
));

println!("RPC test passed successfully.");
}

Command::WriteRpcRepliesToJson {
url_and_block_number: SharedArgs { node_url, block_number },
directory_path,
shared_args: SharedArgs { node_url, block_number, directory_path },
} => {
let directory_path = directory_path.unwrap_or(format!(
"./crates/blockifier_reexecution/resources/block_{block_number}/"
Expand All @@ -110,52 +122,44 @@ fn main() {
json_rpc_version: JSON_RPC_VERSION.to_string(),
};

let ConsecutiveTestStateReaders { last_block_state_reader, next_block_state_reader } =
let consecutive_state_readers =
ConsecutiveTestStateReaders::new(BlockNumber(block_number - 1), Some(config), true);

let block_info_next_block = next_block_state_reader.get_block_info().unwrap();

let starknet_version = next_block_state_reader.get_starknet_version().unwrap();

let state_diff_next_block = next_block_state_reader.get_state_diff().unwrap();

let transactions_next_block = next_block_state_reader.get_all_txs_in_block().unwrap();

let blockifier_transactions_next_block = &last_block_state_reader
.api_txs_to_blockifier_txs_next_block(transactions_next_block.clone())
.unwrap();

let mut transaction_executor = last_block_state_reader
.get_transaction_executor(
next_block_state_reader.get_block_context().unwrap(),
None,
)
.unwrap();

transaction_executor.execute_txs(blockifier_transactions_next_block);

let block_state = transaction_executor.block_state.unwrap();
let initial_reads = block_state.get_initial_reads().unwrap();
let serializable_data_next_block =
consecutive_state_readers.get_serializable_data_next_block().unwrap();

// Run the reexecution test and get the state maps and contract class mapping.
let block_state = reexecution_test(consecutive_state_readers).unwrap();
let state_maps = block_state.get_initial_reads().unwrap().into();
let contract_class_mapping =
block_state.state.get_contract_class_mapping_dumper().unwrap();

let serializable_offline_reexecution_data = SerializableOfflineReexecutionData {
state_maps: initial_reads.into(),
block_info_next_block,
starknet_version,
transactions_next_block,
// Write the reexecution data to a json file.
SerializableOfflineReexecutionData {
state_maps,
contract_class_mapping,
state_diff_next_block,
};

serializable_offline_reexecution_data
.write_to_file(&directory_path, "reexecution_data.json")
.unwrap();
serializable_data_next_block,
}
.write_to_file(&directory_path, "reexecution_data.json")
.unwrap();

println!(
"RPC replies required for reexecuting block {block_number} written to json file."
);
}

Command::ReexecuteBlock {
shared_args: SharedArgs { block_number, directory_path, .. },
} => {
let full_file_path = directory_path
.unwrap_or(format!("./crates/blockifier_reexecution/tmp/block_{block_number}/"))
+ "reexecution_data.json";

reexecution_test(
OfflineConsecutiveStateReaders::new_from_file(&full_file_path).unwrap(),
);

println!("Reexecution test for block {block_number} passed successfully.");
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,18 @@ pub struct OfflineReexecutionData {
}

#[derive(Serialize, Deserialize)]
pub struct SerializableOfflineReexecutionData {
pub state_maps: ReexecutionStateMaps,
pub struct SerializableDataNextBlock {
pub block_info_next_block: BlockInfo,
pub starknet_version: StarknetVersion,
pub transactions_next_block: Vec<(Transaction, TransactionHash)>,
pub state_diff_next_block: CommitmentStateDiff,
}

#[derive(Serialize, Deserialize)]
pub struct SerializableOfflineReexecutionData {
pub state_maps: ReexecutionStateMaps,
pub contract_class_mapping: StarknetContractClassMapping,
pub serializable_data_next_block: SerializableDataNextBlock,
}

impl SerializableOfflineReexecutionData {
Expand All @@ -82,30 +87,41 @@ impl SerializableOfflineReexecutionData {

impl From<SerializableOfflineReexecutionData> for OfflineReexecutionData {
fn from(value: SerializableOfflineReexecutionData) -> Self {
let SerializableOfflineReexecutionData {
state_maps,
contract_class_mapping,
serializable_data_next_block:
SerializableDataNextBlock {
block_info_next_block,
starknet_version,
transactions_next_block,
state_diff_next_block,
},
} = value;

let offline_state_reader_prev_block = OfflineStateReader {
state_maps: value.state_maps.try_into().expect("Failed to deserialize state maps."),
contract_class_mapping: value.contract_class_mapping,
state_maps: state_maps.try_into().expect("Failed to deserialize state maps."),
contract_class_mapping,
};
let transactions_next_block = offline_state_reader_prev_block
.api_txs_to_blockifier_txs_next_block(value.transactions_next_block)
.api_txs_to_blockifier_txs_next_block(transactions_next_block)
.expect("Failed to convert starknet-api transactions to blockifier transactions.");
Self {
offline_state_reader_prev_block,
block_context_next_block: BlockContext::new(
value.block_info_next_block,
block_info_next_block,
get_chain_info(),
VersionedConstants::get(&value.starknet_version).unwrap().clone(),
VersionedConstants::get(&starknet_version).unwrap().clone(),
BouncerConfig::max(),
),
transactions_next_block,
state_diff_next_block: value.state_diff_next_block,
state_diff_next_block,
}
}
}

pub struct TestStateReader {
rpc_state_reader: RpcStateReader,
#[allow(dead_code)]
contract_class_mapping_dumper: Arc<Mutex<Option<StarknetContractClassMapping>>>,
}

Expand Down Expand Up @@ -369,6 +385,15 @@ impl ConsecutiveTestStateReaders {
),
}
}

pub fn get_serializable_data_next_block(&self) -> ReexecutionResult<SerializableDataNextBlock> {
Ok(SerializableDataNextBlock {
block_info_next_block: self.next_block_state_reader.get_block_info()?,
starknet_version: self.next_block_state_reader.get_starknet_version()?,
transactions_next_block: self.next_block_state_reader.get_all_txs_in_block()?,
state_diff_next_block: self.next_block_state_reader.get_state_diff()?,
})
}
}

impl ConsecutiveStateReaders<TestStateReader> for ConsecutiveTestStateReaders {
Expand Down
Loading