Skip to main content

anvil/eth/backend/mem/
storage.rs

1//! In-memory blockchain storage
2use crate::eth::{
3    backend::{
4        db::{
5            MaybeFullDatabase, SerializableBlock, SerializableHistoricalStates,
6            SerializableTransaction, StateDb,
7        },
8        env::Env,
9        mem::cache::DiskStateCache,
10    },
11    pool::transactions::PoolTransaction,
12};
13use alloy_consensus::{Header, constants::EMPTY_WITHDRAWALS};
14use alloy_eips::eip7685::EMPTY_REQUESTS_HASH;
15use alloy_primitives::{
16    B256, Bytes, U256,
17    map::{B256HashMap, HashMap},
18};
19use alloy_rpc_types::{
20    BlockId, BlockNumberOrTag, TransactionInfo as RethTransactionInfo,
21    trace::{
22        otterscan::{InternalOperation, OperationType},
23        parity::LocalizedTransactionTrace,
24    },
25};
26use anvil_core::eth::{
27    block::{Block, create_block},
28    transaction::{MaybeImpersonatedTransaction, TransactionInfo},
29};
30use foundry_evm::{
31    backend::MemDb,
32    traces::{CallKind, ParityTraceBuilder, TracingInspectorConfig},
33};
34use foundry_primitives::{FoundryReceiptEnvelope, FoundryTxReceipt};
35use parking_lot::RwLock;
36use revm::{context::Block as RevmBlock, primitives::hardfork::SpecId};
37use std::{collections::VecDeque, fmt, path::PathBuf, sync::Arc, time::Duration};
38// use yansi::Paint;
39
40// === various limits in number of blocks ===
41
42pub const DEFAULT_HISTORY_LIMIT: usize = 500;
43const MIN_HISTORY_LIMIT: usize = 10;
44// 1hr of up-time at lowest 1s interval
45const MAX_ON_DISK_HISTORY_LIMIT: usize = 3_600;
46
47/// Represents the complete state of single block
48pub struct InMemoryBlockStates {
49    /// The states at a certain block
50    states: B256HashMap<StateDb>,
51    /// states which data is moved to disk
52    on_disk_states: B256HashMap<StateDb>,
53    /// How many states to store at most
54    in_memory_limit: usize,
55    /// minimum amount of states we keep in memory
56    min_in_memory_limit: usize,
57    /// maximum amount of states we keep on disk
58    ///
59    /// Limiting the states will prevent disk blow up, especially in interval mining mode
60    max_on_disk_limit: usize,
61    /// the oldest states written to disk
62    oldest_on_disk: VecDeque<B256>,
63    /// all states present, used to enforce `in_memory_limit`
64    present: VecDeque<B256>,
65    /// Stores old states on disk
66    disk_cache: DiskStateCache,
67}
68
69impl InMemoryBlockStates {
70    /// Creates a new instance with limited slots
71    pub fn new(in_memory_limit: usize, on_disk_limit: usize) -> Self {
72        Self {
73            states: Default::default(),
74            on_disk_states: Default::default(),
75            in_memory_limit,
76            min_in_memory_limit: in_memory_limit.min(MIN_HISTORY_LIMIT),
77            max_on_disk_limit: on_disk_limit,
78            oldest_on_disk: Default::default(),
79            present: Default::default(),
80            disk_cache: Default::default(),
81        }
82    }
83
84    /// Configures no disk caching
85    pub fn memory_only(mut self) -> Self {
86        self.max_on_disk_limit = 0;
87        self
88    }
89
90    /// Configures the path on disk where the states will cached.
91    pub fn disk_path(mut self, path: PathBuf) -> Self {
92        self.disk_cache = self.disk_cache.with_path(path);
93        self
94    }
95
96    /// This modifies the `limit` what to keep stored in memory.
97    ///
98    /// This will ensure the new limit adjusts based on the block time.
99    /// The lowest blocktime is 1s which should increase the limit slightly
100    pub fn update_interval_mine_block_time(&mut self, block_time: Duration) {
101        let block_time = block_time.as_secs();
102        // for block times lower than 2s we increase the mem limit since we're mining _small_ blocks
103        // very fast
104        // this will gradually be decreased once the max limit was reached
105        if block_time <= 2 {
106            self.in_memory_limit = DEFAULT_HISTORY_LIMIT * 3;
107            self.enforce_limits();
108        }
109    }
110
111    /// Returns true if only memory caching is supported.
112    fn is_memory_only(&self) -> bool {
113        self.max_on_disk_limit == 0
114    }
115
116    /// Inserts a new (hash -> state) pair
117    ///
118    /// When the configured limit for the number of states that can be stored in memory is reached,
119    /// the oldest state is removed.
120    ///
121    /// Since we keep a snapshot of the entire state as history, the size of the state will increase
122    /// with the transactions processed. To counter this, we gradually decrease the cache limit with
123    /// the number of states/blocks until we reached the `min_limit`.
124    ///
125    /// When a state that was previously written to disk is requested, it is simply read from disk.
126    pub fn insert(&mut self, hash: B256, state: StateDb) {
127        if !self.is_memory_only() && self.present.len() >= self.in_memory_limit {
128            // once we hit the max limit we gradually decrease it
129            self.in_memory_limit =
130                self.in_memory_limit.saturating_sub(1).max(self.min_in_memory_limit);
131        }
132
133        self.enforce_limits();
134
135        self.states.insert(hash, state);
136        self.present.push_back(hash);
137    }
138
139    /// Enforces configured limits
140    fn enforce_limits(&mut self) {
141        // enforce memory limits
142        while self.present.len() >= self.in_memory_limit {
143            // evict the oldest block
144            if let Some((hash, mut state)) = self
145                .present
146                .pop_front()
147                .and_then(|hash| self.states.remove(&hash).map(|state| (hash, state)))
148            {
149                // only write to disk if supported
150                if !self.is_memory_only() {
151                    let state_snapshot = state.0.clear_into_state_snapshot();
152                    if self.disk_cache.write(hash, &state_snapshot) {
153                        // Write succeeded, move state to on-disk tracking
154                        self.on_disk_states.insert(hash, state);
155                        self.oldest_on_disk.push_back(hash);
156                    } else {
157                        // Write failed, restore state to memory to avoid data loss
158                        state.init_from_state_snapshot(state_snapshot);
159                        self.states.insert(hash, state);
160                        self.present.push_front(hash);
161                        // Increase limit temporarily to prevent infinite retry loop
162                        self.in_memory_limit = self.in_memory_limit.saturating_add(1);
163                        break;
164                    }
165                }
166            }
167        }
168
169        // enforce on disk limit and purge the oldest state cached on disk
170        while !self.is_memory_only() && self.oldest_on_disk.len() >= self.max_on_disk_limit {
171            // evict the oldest block
172            if let Some(hash) = self.oldest_on_disk.pop_front() {
173                self.on_disk_states.remove(&hash);
174                self.disk_cache.remove(hash);
175            }
176        }
177    }
178
179    /// Returns the in-memory state for the given `hash` if present
180    pub fn get_state(&self, hash: &B256) -> Option<&StateDb> {
181        self.states.get(hash)
182    }
183
184    /// Returns on-disk state for the given `hash` if present
185    pub fn get_on_disk_state(&mut self, hash: &B256) -> Option<&StateDb> {
186        if let Some(state) = self.on_disk_states.get_mut(hash)
187            && let Some(cached) = self.disk_cache.read(*hash)
188        {
189            state.init_from_state_snapshot(cached);
190            return Some(state);
191        }
192
193        None
194    }
195
196    /// Sets the maximum number of stats we keep in memory
197    pub fn set_cache_limit(&mut self, limit: usize) {
198        self.in_memory_limit = limit;
199    }
200
201    /// Clears all entries
202    pub fn clear(&mut self) {
203        self.states.clear();
204        self.on_disk_states.clear();
205        self.present.clear();
206        for on_disk in std::mem::take(&mut self.oldest_on_disk) {
207            self.disk_cache.remove(on_disk)
208        }
209    }
210
211    /// Serialize all states to a list of serializable historical states
212    pub fn serialized_states(&mut self) -> SerializableHistoricalStates {
213        // Get in-memory states
214        let mut states = self
215            .states
216            .iter_mut()
217            .map(|(hash, state)| (*hash, state.serialize_state()))
218            .collect::<Vec<_>>();
219
220        // Get on-disk state snapshots
221        self.on_disk_states.iter().for_each(|(hash, _)| {
222            if let Some(state_snapshot) = self.disk_cache.read(*hash) {
223                states.push((*hash, state_snapshot));
224            }
225        });
226
227        SerializableHistoricalStates::new(states)
228    }
229
230    /// Load states from serialized data
231    pub fn load_states(&mut self, states: SerializableHistoricalStates) {
232        for (hash, state_snapshot) in states {
233            let mut state_db = StateDb::new(MemDb::default());
234            state_db.init_from_state_snapshot(state_snapshot);
235            self.insert(hash, state_db);
236        }
237    }
238}
239
240impl fmt::Debug for InMemoryBlockStates {
241    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
242        f.debug_struct("InMemoryBlockStates")
243            .field("in_memory_limit", &self.in_memory_limit)
244            .field("min_in_memory_limit", &self.min_in_memory_limit)
245            .field("max_on_disk_limit", &self.max_on_disk_limit)
246            .field("oldest_on_disk", &self.oldest_on_disk)
247            .field("present", &self.present)
248            .finish_non_exhaustive()
249    }
250}
251
252impl Default for InMemoryBlockStates {
253    fn default() -> Self {
254        // enough in memory to store `DEFAULT_HISTORY_LIMIT` blocks in memory
255        Self::new(DEFAULT_HISTORY_LIMIT, MAX_ON_DISK_HISTORY_LIMIT)
256    }
257}
258
259/// Stores the blockchain data (blocks, transactions)
260#[derive(Clone, Debug)]
261pub struct BlockchainStorage {
262    /// all stored blocks (block hash -> block)
263    pub blocks: B256HashMap<Block>,
264    /// mapping from block number -> block hash
265    pub hashes: HashMap<u64, B256>,
266    /// The current best hash
267    pub best_hash: B256,
268    /// The current best block number
269    pub best_number: u64,
270    /// genesis hash of the chain
271    pub genesis_hash: B256,
272    /// Mapping from the transaction hash to a tuple containing the transaction as well as the
273    /// transaction receipt
274    pub transactions: B256HashMap<MinedTransaction>,
275    /// The total difficulty of the chain until this block
276    pub total_difficulty: U256,
277}
278
279impl BlockchainStorage {
280    /// Creates a new storage with a genesis block
281    pub fn new(
282        env: &Env,
283        spec_id: SpecId,
284        base_fee: Option<u64>,
285        timestamp: u64,
286        genesis_number: u64,
287    ) -> Self {
288        let is_shanghai = spec_id >= SpecId::SHANGHAI;
289        let is_cancun = spec_id >= SpecId::CANCUN;
290        let is_prague = spec_id >= SpecId::PRAGUE;
291
292        // create a dummy genesis block
293        let header = Header {
294            timestamp,
295            base_fee_per_gas: base_fee,
296            gas_limit: env.evm_env.block_env.gas_limit,
297            beneficiary: env.evm_env.block_env.beneficiary,
298            difficulty: env.evm_env.block_env.difficulty,
299            blob_gas_used: env.evm_env.block_env.blob_excess_gas_and_price.as_ref().map(|_| 0),
300            excess_blob_gas: env.evm_env.block_env.blob_excess_gas(),
301            number: genesis_number,
302            parent_beacon_block_root: is_cancun.then_some(Default::default()),
303            withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS),
304            requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH),
305            ..Default::default()
306        };
307        let block = create_block(header, Vec::<MaybeImpersonatedTransaction>::new());
308        let genesis_hash = block.header.hash_slow();
309        let best_hash = genesis_hash;
310        let best_number = genesis_number;
311
312        let mut blocks = B256HashMap::default();
313        blocks.insert(genesis_hash, block);
314
315        let mut hashes = HashMap::default();
316        hashes.insert(best_number, genesis_hash);
317        Self {
318            blocks,
319            hashes,
320            best_hash,
321            best_number,
322            genesis_hash,
323            transactions: Default::default(),
324            total_difficulty: Default::default(),
325        }
326    }
327
328    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
329        let mut hashes = HashMap::default();
330        hashes.insert(block_number, block_hash);
331
332        Self {
333            blocks: B256HashMap::default(),
334            hashes,
335            best_hash: block_hash,
336            best_number: block_number,
337            genesis_hash: Default::default(),
338            transactions: Default::default(),
339            total_difficulty,
340        }
341    }
342
343    /// Unwind the chain state back to the given block in storage.
344    ///
345    /// The block identified by `block_number` and `block_hash` is __non-inclusive__, i.e. it will
346    /// remain in the state.
347    pub fn unwind_to(&mut self, block_number: u64, block_hash: B256) -> Vec<Block> {
348        let mut removed = vec![];
349        let best_num: u64 = self.best_number;
350        for i in (block_number + 1)..=best_num {
351            if let Some(hash) = self.hashes.get(&i).copied() {
352                // First remove the block's transactions while the mappings still exist
353                self.remove_block_transactions_by_number(i);
354
355                // Now remove the block from storage (may already be empty of txs) and drop mapping
356                if let Some(block) = self.blocks.remove(&hash) {
357                    removed.push(block);
358                }
359                self.hashes.remove(&i);
360            }
361        }
362        self.best_hash = block_hash;
363        self.best_number = block_number;
364        removed
365    }
366
367    pub fn empty() -> Self {
368        Self {
369            blocks: Default::default(),
370            hashes: Default::default(),
371            best_hash: Default::default(),
372            best_number: Default::default(),
373            genesis_hash: Default::default(),
374            transactions: Default::default(),
375            total_difficulty: Default::default(),
376        }
377    }
378
379    /// Removes all stored transactions for the given block number
380    pub fn remove_block_transactions_by_number(&mut self, num: u64) {
381        if let Some(hash) = self.hashes.get(&num).copied() {
382            self.remove_block_transactions(hash);
383        }
384    }
385
386    /// Removes all stored transactions for the given block hash
387    pub fn remove_block_transactions(&mut self, block_hash: B256) {
388        if let Some(block) = self.blocks.get_mut(&block_hash) {
389            for tx in &block.body.transactions {
390                self.transactions.remove(&tx.hash());
391            }
392            block.body.transactions.clear();
393        }
394    }
395}
396
397impl BlockchainStorage {
398    /// Returns the hash for [BlockNumberOrTag]
399    pub fn hash(&self, number: BlockNumberOrTag) -> Option<B256> {
400        let slots_in_an_epoch = 32;
401        match number {
402            BlockNumberOrTag::Latest => Some(self.best_hash),
403            BlockNumberOrTag::Earliest => Some(self.genesis_hash),
404            BlockNumberOrTag::Pending => None,
405            BlockNumberOrTag::Number(num) => self.hashes.get(&num).copied(),
406            BlockNumberOrTag::Safe => {
407                if self.best_number > (slots_in_an_epoch) {
408                    self.hashes.get(&(self.best_number - (slots_in_an_epoch))).copied()
409                } else {
410                    Some(self.genesis_hash) // treat the genesis block as safe "by definition"
411                }
412            }
413            BlockNumberOrTag::Finalized => {
414                if self.best_number > (slots_in_an_epoch * 2) {
415                    self.hashes.get(&(self.best_number - (slots_in_an_epoch * 2))).copied()
416                } else {
417                    Some(self.genesis_hash)
418                }
419            }
420        }
421    }
422
423    pub fn serialized_blocks(&self) -> Vec<SerializableBlock> {
424        self.blocks.values().map(|block| block.clone().into()).collect()
425    }
426
427    pub fn serialized_transactions(&self) -> Vec<SerializableTransaction> {
428        self.transactions.values().map(|tx: &MinedTransaction| tx.clone().into()).collect()
429    }
430
431    /// Deserialize and add all blocks data to the backend storage
432    pub fn load_blocks(&mut self, serializable_blocks: Vec<SerializableBlock>) {
433        for serializable_block in &serializable_blocks {
434            let block: Block = serializable_block.clone().into();
435            let block_hash = block.header.hash_slow();
436            let block_number = block.header.number;
437            self.blocks.insert(block_hash, block);
438            self.hashes.insert(block_number, block_hash);
439
440            // Update genesis_hash if we are loading block 0, so that Finalized/Safe/Earliest
441            // block tag lookups return the correct hash.
442            // See: https://github.com/foundry-rs/foundry/issues/12645
443            if block_number == 0 {
444                self.genesis_hash = block_hash;
445            }
446        }
447    }
448
449    /// Deserialize and add all blocks data to the backend storage
450    pub fn load_transactions(&mut self, serializable_transactions: Vec<SerializableTransaction>) {
451        for serializable_transaction in &serializable_transactions {
452            let transaction: MinedTransaction = serializable_transaction.clone().into();
453            self.transactions.insert(transaction.info.transaction_hash, transaction);
454        }
455    }
456}
457
458/// A simple in-memory blockchain
459#[derive(Clone, Debug)]
460pub struct Blockchain {
461    /// underlying storage that supports concurrent reads
462    pub storage: Arc<RwLock<BlockchainStorage>>,
463}
464
465impl Blockchain {
466    /// Creates a new storage with a genesis block
467    pub fn new(
468        env: &Env,
469        spec_id: SpecId,
470        base_fee: Option<u64>,
471        timestamp: u64,
472        genesis_number: u64,
473    ) -> Self {
474        Self {
475            storage: Arc::new(RwLock::new(BlockchainStorage::new(
476                env,
477                spec_id,
478                base_fee,
479                timestamp,
480                genesis_number,
481            ))),
482        }
483    }
484
485    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
486        Self {
487            storage: Arc::new(RwLock::new(BlockchainStorage::forked(
488                block_number,
489                block_hash,
490                total_difficulty,
491            ))),
492        }
493    }
494
495    /// returns the header hash of given block
496    pub fn hash(&self, id: BlockId) -> Option<B256> {
497        match id {
498            BlockId::Hash(h) => Some(h.block_hash),
499            BlockId::Number(num) => self.storage.read().hash(num),
500        }
501    }
502
503    pub fn get_block_by_hash(&self, hash: &B256) -> Option<Block> {
504        self.storage.read().blocks.get(hash).cloned()
505    }
506
507    pub fn get_transaction_by_hash(&self, hash: &B256) -> Option<MinedTransaction> {
508        self.storage.read().transactions.get(hash).cloned()
509    }
510
511    /// Returns the total number of blocks
512    pub fn blocks_count(&self) -> usize {
513        self.storage.read().blocks.len()
514    }
515}
516
517/// Represents the outcome of mining a new block
518#[derive(Clone, Debug)]
519pub struct MinedBlockOutcome {
520    /// The block that was mined
521    pub block_number: u64,
522    /// All transactions included in the block
523    pub included: Vec<Arc<PoolTransaction>>,
524    /// All transactions that were attempted to be included but were invalid at the time of
525    /// execution
526    pub invalid: Vec<Arc<PoolTransaction>>,
527}
528
529/// Container type for a mined transaction
530#[derive(Clone, Debug)]
531pub struct MinedTransaction {
532    pub info: TransactionInfo,
533    pub receipt: FoundryReceiptEnvelope,
534    pub block_hash: B256,
535    pub block_number: u64,
536}
537
538impl MinedTransaction {
539    /// Returns the traces of the transaction for `trace_transaction`
540    pub fn parity_traces(&self) -> Vec<LocalizedTransactionTrace> {
541        ParityTraceBuilder::new(
542            self.info.traces.clone(),
543            None,
544            TracingInspectorConfig::default_parity(),
545        )
546        .into_localized_transaction_traces(RethTransactionInfo {
547            hash: Some(self.info.transaction_hash),
548            index: Some(self.info.transaction_index),
549            block_hash: Some(self.block_hash),
550            block_number: Some(self.block_number),
551            base_fee: None,
552        })
553    }
554
555    pub fn ots_internal_operations(&self) -> Vec<InternalOperation> {
556        self.info
557            .traces
558            .iter()
559            .filter_map(|node| {
560                let r#type = match node.trace.kind {
561                    _ if node.is_selfdestruct() => OperationType::OpSelfDestruct,
562                    CallKind::Call if !node.trace.value.is_zero() => OperationType::OpTransfer,
563                    CallKind::Create => OperationType::OpCreate,
564                    CallKind::Create2 => OperationType::OpCreate2,
565                    _ => return None,
566                };
567                let mut from = node.trace.caller;
568                let mut to = node.trace.address;
569                let mut value = node.trace.value;
570                if node.is_selfdestruct() {
571                    from = node.trace.address;
572                    to = node.trace.selfdestruct_refund_target.unwrap_or_default();
573                    value = node.trace.selfdestruct_transferred_value.unwrap_or_default();
574                }
575                Some(InternalOperation { r#type, from, to, value })
576            })
577            .collect()
578    }
579}
580
581/// Intermediary Anvil representation of a receipt
582#[derive(Clone, Debug)]
583pub struct MinedTransactionReceipt {
584    /// The actual json rpc receipt object
585    pub inner: FoundryTxReceipt,
586    /// Output data for the transaction
587    pub out: Option<Bytes>,
588}
589
590#[cfg(test)]
591mod tests {
592    use super::*;
593    use crate::eth::backend::db::Db;
594    use alloy_primitives::{Address, hex};
595    use alloy_rlp::Decodable;
596    use foundry_primitives::FoundryTxEnvelope;
597    use revm::{database::DatabaseRef, state::AccountInfo};
598
599    #[test]
600    fn test_interval_update() {
601        let mut storage = InMemoryBlockStates::default();
602        storage.update_interval_mine_block_time(Duration::from_secs(1));
603        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT * 3);
604    }
605
606    #[test]
607    fn test_init_state_limits() {
608        let mut storage = InMemoryBlockStates::default();
609        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT);
610        assert_eq!(storage.min_in_memory_limit, MIN_HISTORY_LIMIT);
611        assert_eq!(storage.max_on_disk_limit, MAX_ON_DISK_HISTORY_LIMIT);
612
613        storage = storage.memory_only();
614        assert!(storage.is_memory_only());
615
616        storage = InMemoryBlockStates::new(1, 0);
617        assert!(storage.is_memory_only());
618        assert_eq!(storage.in_memory_limit, 1);
619        assert_eq!(storage.min_in_memory_limit, 1);
620        assert_eq!(storage.max_on_disk_limit, 0);
621
622        storage = InMemoryBlockStates::new(1, 2);
623        assert!(!storage.is_memory_only());
624        assert_eq!(storage.in_memory_limit, 1);
625        assert_eq!(storage.min_in_memory_limit, 1);
626        assert_eq!(storage.max_on_disk_limit, 2);
627    }
628
629    #[tokio::test(flavor = "multi_thread")]
630    async fn can_read_write_cached_state() {
631        let mut storage = InMemoryBlockStates::new(1, MAX_ON_DISK_HISTORY_LIMIT);
632        let one = B256::from(U256::from(1));
633        let two = B256::from(U256::from(2));
634
635        let mut state = MemDb::default();
636        let addr = Address::random();
637        let info = AccountInfo::from_balance(U256::from(1337));
638        state.insert_account(addr, info);
639        storage.insert(one, StateDb::new(state));
640        storage.insert(two, StateDb::new(MemDb::default()));
641
642        // wait for files to be flushed
643        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
644
645        assert_eq!(storage.on_disk_states.len(), 1);
646        assert!(storage.on_disk_states.contains_key(&one));
647
648        let loaded = storage.get_on_disk_state(&one).unwrap();
649
650        let acc = loaded.basic_ref(addr).unwrap().unwrap();
651        assert_eq!(acc.balance, U256::from(1337u64));
652    }
653
654    #[tokio::test(flavor = "multi_thread")]
655    async fn can_decrease_state_cache_size() {
656        let limit = 15;
657        let mut storage = InMemoryBlockStates::new(limit, MAX_ON_DISK_HISTORY_LIMIT);
658
659        let num_states = 30;
660        for idx in 0..num_states {
661            let mut state = MemDb::default();
662            let hash = B256::from(U256::from(idx));
663            let addr = Address::from_word(hash);
664            let balance = (idx * 2) as u64;
665            let info = AccountInfo::from_balance(U256::from(balance));
666            state.insert_account(addr, info);
667            storage.insert(hash, StateDb::new(state));
668        }
669
670        // wait for files to be flushed
671        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
672
673        let on_disk_states_len = num_states - storage.min_in_memory_limit;
674
675        assert_eq!(storage.on_disk_states.len(), on_disk_states_len);
676        assert_eq!(storage.present.len(), storage.min_in_memory_limit);
677
678        for idx in 0..num_states {
679            let hash = B256::from(U256::from(idx));
680            let addr = Address::from_word(hash);
681
682            let loaded = if idx < on_disk_states_len {
683                storage.get_on_disk_state(&hash).unwrap()
684            } else {
685                storage.get_state(&hash).unwrap()
686            };
687
688            let acc = loaded.basic_ref(addr).unwrap().unwrap();
689            let balance = (idx * 2) as u64;
690            assert_eq!(acc.balance, U256::from(balance));
691        }
692    }
693
694    // verifies that blocks and transactions in BlockchainStorage remain the same when dumped and
695    // reloaded
696    #[test]
697    fn test_storage_dump_reload_cycle() {
698        let mut dump_storage = BlockchainStorage::empty();
699
700        let header = Header { gas_limit: 123456, ..Default::default() };
701        let bytes_first = &mut &hex::decode("f86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18").unwrap()[..];
702        let tx: MaybeImpersonatedTransaction =
703            FoundryTxEnvelope::decode(&mut &bytes_first[..]).unwrap().into();
704        let block = create_block(header.clone(), vec![tx.clone()]);
705        let block_hash = block.header.hash_slow();
706        dump_storage.blocks.insert(block_hash, block);
707
708        let serialized_blocks = dump_storage.serialized_blocks();
709        let serialized_transactions = dump_storage.serialized_transactions();
710
711        let mut load_storage = BlockchainStorage::empty();
712
713        load_storage.load_blocks(serialized_blocks);
714        load_storage.load_transactions(serialized_transactions);
715
716        let loaded_block = load_storage.blocks.get(&block_hash).unwrap();
717        assert_eq!(loaded_block.header.gas_limit, { header.gas_limit });
718        let loaded_tx = loaded_block.body.transactions.first().unwrap();
719        assert_eq!(loaded_tx, &tx);
720    }
721}