Skip to main content

anvil/eth/backend/mem/
storage.rs

1//! In-memory blockchain storage
2use crate::eth::{
3    backend::{
4        db::{
5            MaybeFullDatabase, SerializableBlock, SerializableHistoricalStates,
6            SerializableTransaction, StateDb,
7        },
8        env::Env,
9        mem::cache::DiskStateCache,
10    },
11    pool::transactions::PoolTransaction,
12};
13use alloy_consensus::{BlockHeader, Header, constants::EMPTY_WITHDRAWALS};
14use alloy_eips::eip7685::EMPTY_REQUESTS_HASH;
15use alloy_network::Network;
16use alloy_primitives::{
17    B256, Bytes, U256,
18    map::{B256HashMap, HashMap},
19};
20use alloy_rpc_types::{
21    BlockId, BlockNumberOrTag, TransactionInfo as RethTransactionInfo,
22    trace::{
23        otterscan::{InternalOperation, OperationType},
24        parity::LocalizedTransactionTrace,
25    },
26};
27use anvil_core::eth::{
28    block::{Block, create_block},
29    transaction::{MaybeImpersonatedTransaction, TransactionInfo},
30};
31use foundry_evm::{
32    backend::MemDb,
33    traces::{CallKind, ParityTraceBuilder, TracingInspectorConfig},
34};
35use foundry_primitives::{FoundryNetwork, FoundryTxEnvelope};
36use parking_lot::RwLock;
37use revm::{context::Block as RevmBlock, primitives::hardfork::SpecId};
38use std::{collections::VecDeque, fmt, path::PathBuf, sync::Arc, time::Duration};
39// use yansi::Paint;
40
41// === various limits in number of blocks ===
42
43pub const DEFAULT_HISTORY_LIMIT: usize = 500;
44const MIN_HISTORY_LIMIT: usize = 10;
45// 1hr of up-time at lowest 1s interval
46const MAX_ON_DISK_HISTORY_LIMIT: usize = 3_600;
47
48/// Represents the complete state of single block
49pub struct InMemoryBlockStates {
50    /// The states at a certain block
51    states: B256HashMap<StateDb>,
52    /// states which data is moved to disk
53    on_disk_states: B256HashMap<StateDb>,
54    /// How many states to store at most
55    in_memory_limit: usize,
56    /// minimum amount of states we keep in memory
57    min_in_memory_limit: usize,
58    /// maximum amount of states we keep on disk
59    ///
60    /// Limiting the states will prevent disk blow up, especially in interval mining mode
61    max_on_disk_limit: usize,
62    /// the oldest states written to disk
63    oldest_on_disk: VecDeque<B256>,
64    /// all states present, used to enforce `in_memory_limit`
65    present: VecDeque<B256>,
66    /// Stores old states on disk
67    disk_cache: DiskStateCache,
68}
69
70impl InMemoryBlockStates {
71    /// Creates a new instance with limited slots
72    pub fn new(in_memory_limit: usize, on_disk_limit: usize) -> Self {
73        Self {
74            states: Default::default(),
75            on_disk_states: Default::default(),
76            in_memory_limit,
77            min_in_memory_limit: in_memory_limit.min(MIN_HISTORY_LIMIT),
78            max_on_disk_limit: on_disk_limit,
79            oldest_on_disk: Default::default(),
80            present: Default::default(),
81            disk_cache: Default::default(),
82        }
83    }
84
85    /// Configures no disk caching
86    pub fn memory_only(mut self) -> Self {
87        self.max_on_disk_limit = 0;
88        self
89    }
90
91    /// Configures the path on disk where the states will cached.
92    pub fn disk_path(mut self, path: PathBuf) -> Self {
93        self.disk_cache = self.disk_cache.with_path(path);
94        self
95    }
96
97    /// This modifies the `limit` what to keep stored in memory.
98    ///
99    /// This will ensure the new limit adjusts based on the block time.
100    /// The lowest blocktime is 1s which should increase the limit slightly
101    pub fn update_interval_mine_block_time(&mut self, block_time: Duration) {
102        let block_time = block_time.as_secs();
103        // for block times lower than 2s we increase the mem limit since we're mining _small_ blocks
104        // very fast
105        // this will gradually be decreased once the max limit was reached
106        if block_time <= 2 {
107            self.in_memory_limit = DEFAULT_HISTORY_LIMIT * 3;
108            self.enforce_limits();
109        }
110    }
111
112    /// Returns true if only memory caching is supported.
113    fn is_memory_only(&self) -> bool {
114        self.max_on_disk_limit == 0
115    }
116
117    /// Inserts a new (hash -> state) pair
118    ///
119    /// When the configured limit for the number of states that can be stored in memory is reached,
120    /// the oldest state is removed.
121    ///
122    /// Since we keep a snapshot of the entire state as history, the size of the state will increase
123    /// with the transactions processed. To counter this, we gradually decrease the cache limit with
124    /// the number of states/blocks until we reached the `min_limit`.
125    ///
126    /// When a state that was previously written to disk is requested, it is simply read from disk.
127    pub fn insert(&mut self, hash: B256, state: StateDb) {
128        if !self.is_memory_only() && self.present.len() >= self.in_memory_limit {
129            // once we hit the max limit we gradually decrease it
130            self.in_memory_limit =
131                self.in_memory_limit.saturating_sub(1).max(self.min_in_memory_limit);
132        }
133
134        self.enforce_limits();
135
136        self.states.insert(hash, state);
137        self.present.push_back(hash);
138    }
139
140    /// Enforces configured limits
141    fn enforce_limits(&mut self) {
142        // enforce memory limits
143        while self.present.len() >= self.in_memory_limit {
144            // evict the oldest block
145            if let Some((hash, mut state)) = self
146                .present
147                .pop_front()
148                .and_then(|hash| self.states.remove(&hash).map(|state| (hash, state)))
149            {
150                // only write to disk if supported
151                if !self.is_memory_only() {
152                    let state_snapshot = state.0.clear_into_state_snapshot();
153                    if self.disk_cache.write(hash, &state_snapshot) {
154                        // Write succeeded, move state to on-disk tracking
155                        self.on_disk_states.insert(hash, state);
156                        self.oldest_on_disk.push_back(hash);
157                    } else {
158                        // Write failed, restore state to memory to avoid data loss
159                        state.init_from_state_snapshot(state_snapshot);
160                        self.states.insert(hash, state);
161                        self.present.push_front(hash);
162                        // Increase limit temporarily to prevent infinite retry loop
163                        self.in_memory_limit = self.in_memory_limit.saturating_add(1);
164                        break;
165                    }
166                }
167            }
168        }
169
170        // enforce on disk limit and purge the oldest state cached on disk
171        while !self.is_memory_only() && self.oldest_on_disk.len() >= self.max_on_disk_limit {
172            // evict the oldest block
173            if let Some(hash) = self.oldest_on_disk.pop_front() {
174                self.on_disk_states.remove(&hash);
175                self.disk_cache.remove(hash);
176            }
177        }
178    }
179
180    /// Returns the in-memory state for the given `hash` if present
181    pub fn get_state(&self, hash: &B256) -> Option<&StateDb> {
182        self.states.get(hash)
183    }
184
185    /// Returns on-disk state for the given `hash` if present
186    pub fn get_on_disk_state(&mut self, hash: &B256) -> Option<&StateDb> {
187        if let Some(state) = self.on_disk_states.get_mut(hash)
188            && let Some(cached) = self.disk_cache.read(*hash)
189        {
190            state.init_from_state_snapshot(cached);
191            return Some(state);
192        }
193
194        None
195    }
196
197    /// Sets the maximum number of stats we keep in memory
198    pub fn set_cache_limit(&mut self, limit: usize) {
199        self.in_memory_limit = limit;
200    }
201
202    /// Clears all entries
203    pub fn clear(&mut self) {
204        self.states.clear();
205        self.on_disk_states.clear();
206        self.present.clear();
207        for on_disk in std::mem::take(&mut self.oldest_on_disk) {
208            self.disk_cache.remove(on_disk)
209        }
210    }
211
212    /// Serialize all states to a list of serializable historical states
213    pub fn serialized_states(&mut self) -> SerializableHistoricalStates {
214        // Get in-memory states
215        let mut states = self
216            .states
217            .iter_mut()
218            .map(|(hash, state)| (*hash, state.serialize_state()))
219            .collect::<Vec<_>>();
220
221        // Get on-disk state snapshots
222        self.on_disk_states.iter().for_each(|(hash, _)| {
223            if let Some(state_snapshot) = self.disk_cache.read(*hash) {
224                states.push((*hash, state_snapshot));
225            }
226        });
227
228        SerializableHistoricalStates::new(states)
229    }
230
231    /// Load states from serialized data
232    pub fn load_states(&mut self, states: SerializableHistoricalStates) {
233        for (hash, state_snapshot) in states {
234            let mut state_db = StateDb::new(MemDb::default());
235            state_db.init_from_state_snapshot(state_snapshot);
236            self.insert(hash, state_db);
237        }
238    }
239}
240
241impl fmt::Debug for InMemoryBlockStates {
242    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
243        f.debug_struct("InMemoryBlockStates")
244            .field("in_memory_limit", &self.in_memory_limit)
245            .field("min_in_memory_limit", &self.min_in_memory_limit)
246            .field("max_on_disk_limit", &self.max_on_disk_limit)
247            .field("oldest_on_disk", &self.oldest_on_disk)
248            .field("present", &self.present)
249            .finish_non_exhaustive()
250    }
251}
252
253impl Default for InMemoryBlockStates {
254    fn default() -> Self {
255        // enough in memory to store `DEFAULT_HISTORY_LIMIT` blocks in memory
256        Self::new(DEFAULT_HISTORY_LIMIT, MAX_ON_DISK_HISTORY_LIMIT)
257    }
258}
259
260/// Stores the blockchain data (blocks, transactions)
261#[derive(Clone, Debug)]
262pub struct BlockchainStorage<N: Network> {
263    /// all stored blocks (block hash -> block)
264    pub blocks: B256HashMap<Block>,
265    /// mapping from block number -> block hash
266    pub hashes: HashMap<u64, B256>,
267    /// The current best hash
268    pub best_hash: B256,
269    /// The current best block number
270    pub best_number: u64,
271    /// genesis hash of the chain
272    pub genesis_hash: B256,
273    /// Mapping from the transaction hash to a tuple containing the transaction as well as the
274    /// transaction receipt
275    pub transactions: B256HashMap<MinedTransaction<N>>,
276    /// The total difficulty of the chain until this block
277    pub total_difficulty: U256,
278}
279
280impl<N: Network> BlockchainStorage<N> {
281    /// Creates a new storage with a genesis block
282    pub fn new(
283        env: &Env,
284        spec_id: SpecId,
285        base_fee: Option<u64>,
286        timestamp: u64,
287        genesis_number: u64,
288    ) -> Self {
289        let is_shanghai = spec_id >= SpecId::SHANGHAI;
290        let is_cancun = spec_id >= SpecId::CANCUN;
291        let is_prague = spec_id >= SpecId::PRAGUE;
292
293        // create a dummy genesis block
294        let header = Header {
295            timestamp,
296            base_fee_per_gas: base_fee,
297            gas_limit: env.evm_env.block_env.gas_limit,
298            beneficiary: env.evm_env.block_env.beneficiary,
299            difficulty: env.evm_env.block_env.difficulty,
300            blob_gas_used: env.evm_env.block_env.blob_excess_gas_and_price.as_ref().map(|_| 0),
301            excess_blob_gas: env.evm_env.block_env.blob_excess_gas(),
302            number: genesis_number,
303            parent_beacon_block_root: is_cancun.then_some(Default::default()),
304            withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS),
305            requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH),
306            ..Default::default()
307        };
308        let block =
309            create_block(header, Vec::<MaybeImpersonatedTransaction<FoundryTxEnvelope>>::new());
310        let genesis_hash = block.header.hash_slow();
311        let best_hash = genesis_hash;
312        let best_number = genesis_number;
313
314        let mut blocks = B256HashMap::default();
315        blocks.insert(genesis_hash, block);
316
317        let mut hashes = HashMap::default();
318        hashes.insert(best_number, genesis_hash);
319        Self {
320            blocks,
321            hashes,
322            best_hash,
323            best_number,
324            genesis_hash,
325            transactions: Default::default(),
326            total_difficulty: Default::default(),
327        }
328    }
329
330    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
331        let mut hashes = HashMap::default();
332        hashes.insert(block_number, block_hash);
333
334        Self {
335            blocks: B256HashMap::default(),
336            hashes,
337            best_hash: block_hash,
338            best_number: block_number,
339            genesis_hash: Default::default(),
340            transactions: Default::default(),
341            total_difficulty,
342        }
343    }
344
345    /// Unwind the chain state back to the given block in storage.
346    ///
347    /// The block identified by `block_number` and `block_hash` is __non-inclusive__, i.e. it will
348    /// remain in the state.
349    pub fn unwind_to(&mut self, block_number: u64, block_hash: B256) -> Vec<Block> {
350        let mut removed = vec![];
351        let best_num: u64 = self.best_number;
352        for i in (block_number + 1)..=best_num {
353            if let Some(hash) = self.hashes.get(&i).copied() {
354                // First remove the block's transactions while the mappings still exist
355                self.remove_block_transactions_by_number(i);
356
357                // Now remove the block from storage (may already be empty of txs) and drop mapping
358                if let Some(block) = self.blocks.remove(&hash) {
359                    removed.push(block);
360                }
361                self.hashes.remove(&i);
362            }
363        }
364        self.best_hash = block_hash;
365        self.best_number = block_number;
366        removed
367    }
368
369    pub fn empty() -> Self {
370        Self {
371            blocks: Default::default(),
372            hashes: Default::default(),
373            best_hash: Default::default(),
374            best_number: Default::default(),
375            genesis_hash: Default::default(),
376            transactions: Default::default(),
377            total_difficulty: Default::default(),
378        }
379    }
380
381    /// Removes all stored transactions for the given block number
382    pub fn remove_block_transactions_by_number(&mut self, num: u64) {
383        if let Some(hash) = self.hashes.get(&num).copied() {
384            self.remove_block_transactions(hash);
385        }
386    }
387
388    /// Removes all stored transactions for the given block hash
389    pub fn remove_block_transactions(&mut self, block_hash: B256) {
390        if let Some(block) = self.blocks.get_mut(&block_hash) {
391            for tx in &block.body.transactions {
392                self.transactions.remove(&tx.hash());
393            }
394            block.body.transactions.clear();
395        }
396    }
397
398    /// Serialize all blocks in storage
399    pub fn serialized_blocks(&self) -> Vec<SerializableBlock> {
400        self.blocks.values().map(|block| block.clone().into()).collect()
401    }
402
403    /// Deserialize and add all blocks data to the backend storage
404    pub fn load_blocks(&mut self, serializable_blocks: Vec<SerializableBlock>) {
405        for serializable_block in &serializable_blocks {
406            let block: Block = serializable_block.clone().into();
407            let block_hash = block.header.hash_slow();
408            let block_number = block.header.number();
409            self.blocks.insert(block_hash, block);
410            self.hashes.insert(block_number, block_hash);
411
412            // Update genesis_hash if we are loading block 0, so that Finalized/Safe/Earliest
413            // block tag lookups return the correct hash.
414            // See: https://github.com/foundry-rs/foundry/issues/12645
415            if block_number == 0 {
416                self.genesis_hash = block_hash;
417            }
418        }
419    }
420
421    /// Returns the hash for [BlockNumberOrTag]
422    pub fn hash(&self, number: BlockNumberOrTag) -> Option<B256> {
423        let slots_in_an_epoch = 32;
424        match number {
425            BlockNumberOrTag::Latest => Some(self.best_hash),
426            BlockNumberOrTag::Earliest => Some(self.genesis_hash),
427            BlockNumberOrTag::Pending => None,
428            BlockNumberOrTag::Number(num) => self.hashes.get(&num).copied(),
429            BlockNumberOrTag::Safe => {
430                if self.best_number > (slots_in_an_epoch) {
431                    self.hashes.get(&(self.best_number - (slots_in_an_epoch))).copied()
432                } else {
433                    Some(self.genesis_hash) // treat the genesis block as safe "by definition"
434                }
435            }
436            BlockNumberOrTag::Finalized => {
437                if self.best_number > (slots_in_an_epoch * 2) {
438                    self.hashes.get(&(self.best_number - (slots_in_an_epoch * 2))).copied()
439                } else {
440                    Some(self.genesis_hash)
441                }
442            }
443        }
444    }
445}
446
447impl BlockchainStorage<FoundryNetwork> {
448    pub fn serialized_transactions(&self) -> Vec<SerializableTransaction> {
449        self.transactions
450            .values()
451            .map(|tx: &MinedTransaction<FoundryNetwork>| tx.clone().into())
452            .collect()
453    }
454
455    /// Deserialize and add all transactions data to the backend storage
456    pub fn load_transactions(&mut self, serializable_transactions: Vec<SerializableTransaction>) {
457        for serializable_transaction in &serializable_transactions {
458            let transaction: MinedTransaction<FoundryNetwork> =
459                serializable_transaction.clone().into();
460            self.transactions.insert(transaction.info.transaction_hash, transaction);
461        }
462    }
463}
464
465/// A simple in-memory blockchain
466#[derive(Clone, Debug)]
467pub struct Blockchain<N: Network> {
468    /// underlying storage that supports concurrent reads
469    pub storage: Arc<RwLock<BlockchainStorage<N>>>,
470}
471
472impl<N: Network> Blockchain<N> {
473    /// Creates a new storage with a genesis block
474    pub fn new(
475        env: &Env,
476        spec_id: SpecId,
477        base_fee: Option<u64>,
478        timestamp: u64,
479        genesis_number: u64,
480    ) -> Self {
481        Self {
482            storage: Arc::new(RwLock::new(BlockchainStorage::new(
483                env,
484                spec_id,
485                base_fee,
486                timestamp,
487                genesis_number,
488            ))),
489        }
490    }
491
492    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
493        Self {
494            storage: Arc::new(RwLock::new(BlockchainStorage::forked(
495                block_number,
496                block_hash,
497                total_difficulty,
498            ))),
499        }
500    }
501
502    /// returns the header hash of given block
503    pub fn hash(&self, id: BlockId) -> Option<B256> {
504        match id {
505            BlockId::Hash(h) => Some(h.block_hash),
506            BlockId::Number(num) => self.storage.read().hash(num),
507        }
508    }
509
510    pub fn get_block_by_hash(&self, hash: &B256) -> Option<Block> {
511        self.storage.read().blocks.get(hash).cloned()
512    }
513
514    pub fn get_transaction_by_hash(&self, hash: &B256) -> Option<MinedTransaction<N>> {
515        self.storage.read().transactions.get(hash).cloned()
516    }
517
518    /// Returns the total number of blocks
519    pub fn blocks_count(&self) -> usize {
520        self.storage.read().blocks.len()
521    }
522}
523
524/// Represents the outcome of mining a new block
525pub struct MinedBlockOutcome<T> {
526    /// The block that was mined
527    pub block_number: u64,
528    /// All transactions included in the block
529    pub included: Vec<Arc<PoolTransaction<T>>>,
530    /// All transactions that were attempted to be included but were invalid at the time of
531    /// execution
532    pub invalid: Vec<Arc<PoolTransaction<T>>>,
533}
534
535impl<T> Clone for MinedBlockOutcome<T> {
536    fn clone(&self) -> Self {
537        Self {
538            block_number: self.block_number,
539            included: self.included.clone(),
540            invalid: self.invalid.clone(),
541        }
542    }
543}
544
545impl<T> fmt::Debug for MinedBlockOutcome<T> {
546    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
547        f.debug_struct("MinedBlockOutcome")
548            .field("block_number", &self.block_number)
549            .field("included", &self.included.len())
550            .field("invalid", &self.invalid.len())
551            .finish()
552    }
553}
554
555/// Container type for a mined transaction
556#[derive(Clone, Debug)]
557pub struct MinedTransaction<N: Network> {
558    pub info: TransactionInfo,
559    pub receipt: N::ReceiptEnvelope,
560    pub block_hash: B256,
561    pub block_number: u64,
562}
563
564impl<N: Network> MinedTransaction<N> {
565    /// Returns the traces of the transaction for `trace_transaction`
566    pub fn parity_traces(&self) -> Vec<LocalizedTransactionTrace> {
567        ParityTraceBuilder::new(
568            self.info.traces.clone(),
569            None,
570            TracingInspectorConfig::default_parity(),
571        )
572        .into_localized_transaction_traces(RethTransactionInfo {
573            hash: Some(self.info.transaction_hash),
574            index: Some(self.info.transaction_index),
575            block_hash: Some(self.block_hash),
576            block_number: Some(self.block_number),
577            base_fee: None,
578            block_timestamp: None,
579        })
580    }
581
582    pub fn ots_internal_operations(&self) -> Vec<InternalOperation> {
583        self.info
584            .traces
585            .iter()
586            .filter_map(|node| {
587                let r#type = match node.trace.kind {
588                    _ if node.is_selfdestruct() => OperationType::OpSelfDestruct,
589                    CallKind::Call if !node.trace.value.is_zero() => OperationType::OpTransfer,
590                    CallKind::Create => OperationType::OpCreate,
591                    CallKind::Create2 => OperationType::OpCreate2,
592                    _ => return None,
593                };
594                let mut from = node.trace.caller;
595                let mut to = node.trace.address;
596                let mut value = node.trace.value;
597                if node.is_selfdestruct() {
598                    from = node.trace.address;
599                    to = node.trace.selfdestruct_refund_target.unwrap_or_default();
600                    value = node.trace.selfdestruct_transferred_value.unwrap_or_default();
601                }
602                Some(InternalOperation { r#type, from, to, value })
603            })
604            .collect()
605    }
606}
607
608/// Intermediary Anvil representation of a receipt
609#[derive(Clone, Debug)]
610pub struct MinedTransactionReceipt<N: Network> {
611    /// The actual json rpc receipt object
612    pub inner: N::ReceiptResponse,
613    /// Output data for the transaction
614    pub out: Option<Bytes>,
615}
616
617#[cfg(test)]
618mod tests {
619    use super::*;
620    use crate::eth::backend::db::Db;
621    use alloy_primitives::{Address, hex};
622    use alloy_rlp::Decodable;
623    use revm::{database::DatabaseRef, state::AccountInfo};
624
625    #[test]
626    fn test_interval_update() {
627        let mut storage = InMemoryBlockStates::default();
628        storage.update_interval_mine_block_time(Duration::from_secs(1));
629        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT * 3);
630    }
631
632    #[test]
633    fn test_init_state_limits() {
634        let mut storage = InMemoryBlockStates::default();
635        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT);
636        assert_eq!(storage.min_in_memory_limit, MIN_HISTORY_LIMIT);
637        assert_eq!(storage.max_on_disk_limit, MAX_ON_DISK_HISTORY_LIMIT);
638
639        storage = storage.memory_only();
640        assert!(storage.is_memory_only());
641
642        storage = InMemoryBlockStates::new(1, 0);
643        assert!(storage.is_memory_only());
644        assert_eq!(storage.in_memory_limit, 1);
645        assert_eq!(storage.min_in_memory_limit, 1);
646        assert_eq!(storage.max_on_disk_limit, 0);
647
648        storage = InMemoryBlockStates::new(1, 2);
649        assert!(!storage.is_memory_only());
650        assert_eq!(storage.in_memory_limit, 1);
651        assert_eq!(storage.min_in_memory_limit, 1);
652        assert_eq!(storage.max_on_disk_limit, 2);
653    }
654
655    #[tokio::test(flavor = "multi_thread")]
656    async fn can_read_write_cached_state() {
657        let mut storage = InMemoryBlockStates::new(1, MAX_ON_DISK_HISTORY_LIMIT);
658        let one = B256::from(U256::from(1));
659        let two = B256::from(U256::from(2));
660
661        let mut state = MemDb::default();
662        let addr = Address::random();
663        let info = AccountInfo::from_balance(U256::from(1337));
664        state.insert_account(addr, info);
665        storage.insert(one, StateDb::new(state));
666        storage.insert(two, StateDb::new(MemDb::default()));
667
668        // wait for files to be flushed
669        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
670
671        assert_eq!(storage.on_disk_states.len(), 1);
672        assert!(storage.on_disk_states.contains_key(&one));
673
674        let loaded = storage.get_on_disk_state(&one).unwrap();
675
676        let acc = loaded.basic_ref(addr).unwrap().unwrap();
677        assert_eq!(acc.balance, U256::from(1337u64));
678    }
679
680    #[tokio::test(flavor = "multi_thread")]
681    async fn can_decrease_state_cache_size() {
682        let limit = 15;
683        let mut storage = InMemoryBlockStates::new(limit, MAX_ON_DISK_HISTORY_LIMIT);
684
685        let num_states = 30;
686        for idx in 0..num_states {
687            let mut state = MemDb::default();
688            let hash = B256::from(U256::from(idx));
689            let addr = Address::from_word(hash);
690            let balance = (idx * 2) as u64;
691            let info = AccountInfo::from_balance(U256::from(balance));
692            state.insert_account(addr, info);
693            storage.insert(hash, StateDb::new(state));
694        }
695
696        // wait for files to be flushed
697        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
698
699        let on_disk_states_len = num_states - storage.min_in_memory_limit;
700
701        assert_eq!(storage.on_disk_states.len(), on_disk_states_len);
702        assert_eq!(storage.present.len(), storage.min_in_memory_limit);
703
704        for idx in 0..num_states {
705            let hash = B256::from(U256::from(idx));
706            let addr = Address::from_word(hash);
707
708            let loaded = if idx < on_disk_states_len {
709                storage.get_on_disk_state(&hash).unwrap()
710            } else {
711                storage.get_state(&hash).unwrap()
712            };
713
714            let acc = loaded.basic_ref(addr).unwrap().unwrap();
715            let balance = (idx * 2) as u64;
716            assert_eq!(acc.balance, U256::from(balance));
717        }
718    }
719
720    // verifies that blocks and transactions in BlockchainStorage remain the same when dumped and
721    // reloaded
722    #[test]
723    fn test_storage_dump_reload_cycle() {
724        let mut dump_storage = BlockchainStorage::<FoundryNetwork>::empty();
725
726        let header = Header { gas_limit: 123456, ..Default::default() };
727        let bytes_first = &mut &hex::decode("f86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18").unwrap()[..];
728        let tx: MaybeImpersonatedTransaction<FoundryTxEnvelope> =
729            FoundryTxEnvelope::decode(&mut &bytes_first[..]).unwrap().into();
730        let block = create_block(header.clone(), vec![tx.clone()]);
731        let block_hash = block.header.hash_slow();
732        dump_storage.blocks.insert(block_hash, block);
733
734        let serialized_blocks = dump_storage.serialized_blocks();
735        let serialized_transactions = dump_storage.serialized_transactions();
736
737        let mut load_storage = BlockchainStorage::<FoundryNetwork>::empty();
738
739        load_storage.load_blocks(serialized_blocks);
740        load_storage.load_transactions(serialized_transactions);
741
742        let loaded_block = load_storage.blocks.get(&block_hash).unwrap();
743        assert_eq!(loaded_block.header.gas_limit(), header.gas_limit());
744        let loaded_tx = loaded_block.body.transactions.first().unwrap();
745        assert_eq!(loaded_tx, &tx);
746    }
747}