Skip to main content

anvil/eth/backend/mem/
storage.rs

1//! In-memory blockchain storage
2use crate::eth::{
3    backend::{
4        db::{
5            MaybeFullDatabase, SerializableBlock, SerializableHistoricalStates,
6            SerializableTransaction, StateDb,
7        },
8        mem::cache::DiskStateCache,
9    },
10    pool::transactions::PoolTransaction,
11};
12use alloy_consensus::{BlockHeader, Header, constants::EMPTY_WITHDRAWALS};
13use alloy_eips::eip7685::EMPTY_REQUESTS_HASH;
14use alloy_evm::EvmEnv;
15use alloy_network::Network;
16use alloy_primitives::{
17    B256, Bytes, U256,
18    map::{B256HashMap, HashMap},
19};
20use alloy_rpc_types::{
21    BlockId, BlockNumberOrTag, TransactionInfo as RethTransactionInfo,
22    trace::{
23        otterscan::{InternalOperation, OperationType},
24        parity::LocalizedTransactionTrace,
25    },
26};
27use anvil_core::eth::{
28    block::{Block, create_block},
29    transaction::{MaybeImpersonatedTransaction, TransactionInfo},
30};
31use foundry_evm::{
32    backend::MemDb,
33    traces::{CallKind, ParityTraceBuilder, TracingInspectorConfig},
34};
35#[cfg(test)]
36use foundry_primitives::FoundryNetwork;
37use foundry_primitives::{FoundryReceiptEnvelope, FoundryTxEnvelope};
38use parking_lot::RwLock;
39use revm::{context::Block as RevmBlock, primitives::hardfork::SpecId};
40use std::{collections::VecDeque, fmt, path::PathBuf, sync::Arc, time::Duration};
41// use yansi::Paint;
42
43// === various limits in number of blocks ===
44
45pub const DEFAULT_HISTORY_LIMIT: usize = 500;
46const MIN_HISTORY_LIMIT: usize = 10;
47// 1hr of up-time at lowest 1s interval
48const MAX_ON_DISK_HISTORY_LIMIT: usize = 3_600;
49
50/// Represents the complete state of single block
51pub struct InMemoryBlockStates {
52    /// The states at a certain block
53    states: B256HashMap<StateDb>,
54    /// states which data is moved to disk
55    on_disk_states: B256HashMap<StateDb>,
56    /// How many states to store at most
57    in_memory_limit: usize,
58    /// minimum amount of states we keep in memory
59    min_in_memory_limit: usize,
60    /// maximum amount of states we keep on disk
61    ///
62    /// Limiting the states will prevent disk blow up, especially in interval mining mode
63    max_on_disk_limit: usize,
64    /// the oldest states written to disk
65    oldest_on_disk: VecDeque<B256>,
66    /// all states present, used to enforce `in_memory_limit`
67    present: VecDeque<B256>,
68    /// Stores old states on disk
69    disk_cache: DiskStateCache,
70}
71
72impl InMemoryBlockStates {
73    /// Creates a new instance with limited slots
74    pub fn new(in_memory_limit: usize, on_disk_limit: usize) -> Self {
75        Self {
76            states: Default::default(),
77            on_disk_states: Default::default(),
78            in_memory_limit,
79            min_in_memory_limit: in_memory_limit.min(MIN_HISTORY_LIMIT),
80            max_on_disk_limit: on_disk_limit,
81            oldest_on_disk: Default::default(),
82            present: Default::default(),
83            disk_cache: Default::default(),
84        }
85    }
86
87    /// Configures no disk caching
88    pub const fn memory_only(mut self) -> Self {
89        self.max_on_disk_limit = 0;
90        self
91    }
92
93    /// Configures the path on disk where the states will cached.
94    pub fn disk_path(mut self, path: PathBuf) -> Self {
95        self.disk_cache = self.disk_cache.with_path(path);
96        self
97    }
98
99    /// This modifies the `limit` what to keep stored in memory.
100    ///
101    /// This will ensure the new limit adjusts based on the block time.
102    /// The lowest blocktime is 1s which should increase the limit slightly
103    pub fn update_interval_mine_block_time(&mut self, block_time: Duration) {
104        let block_time = block_time.as_secs();
105        // for block times lower than 2s we increase the mem limit since we're mining _small_ blocks
106        // very fast
107        // this will gradually be decreased once the max limit was reached
108        if block_time <= 2 {
109            self.in_memory_limit = DEFAULT_HISTORY_LIMIT * 3;
110            self.enforce_limits();
111        }
112    }
113
114    /// Returns true if only memory caching is supported.
115    const fn is_memory_only(&self) -> bool {
116        self.max_on_disk_limit == 0
117    }
118
119    /// Inserts a new (hash -> state) pair
120    ///
121    /// When the configured limit for the number of states that can be stored in memory is reached,
122    /// the oldest state is removed.
123    ///
124    /// Since we keep a snapshot of the entire state as history, the size of the state will increase
125    /// with the transactions processed. To counter this, we gradually decrease the cache limit with
126    /// the number of states/blocks until we reached the `min_limit`.
127    ///
128    /// When a state that was previously written to disk is requested, it is simply read from disk.
129    pub fn insert(&mut self, hash: B256, state: StateDb) {
130        if !self.is_memory_only() && self.present.len() >= self.in_memory_limit {
131            // once we hit the max limit we gradually decrease it
132            self.in_memory_limit =
133                self.in_memory_limit.saturating_sub(1).max(self.min_in_memory_limit);
134        }
135
136        self.enforce_limits();
137
138        self.states.insert(hash, state);
139        self.present.push_back(hash);
140    }
141
142    /// Enforces configured limits
143    fn enforce_limits(&mut self) {
144        // enforce memory limits
145        while self.present.len() >= self.in_memory_limit {
146            // evict the oldest block
147            if let Some((hash, mut state)) = self
148                .present
149                .pop_front()
150                .and_then(|hash| self.states.remove(&hash).map(|state| (hash, state)))
151            {
152                // only write to disk if supported
153                if !self.is_memory_only() {
154                    let state_snapshot = state.0.clear_into_state_snapshot();
155                    if self.disk_cache.write(hash, &state_snapshot) {
156                        // Write succeeded, move state to on-disk tracking
157                        self.on_disk_states.insert(hash, state);
158                        self.oldest_on_disk.push_back(hash);
159                    } else {
160                        // Write failed, restore state to memory to avoid data loss
161                        state.init_from_state_snapshot(state_snapshot);
162                        self.states.insert(hash, state);
163                        self.present.push_front(hash);
164                        // Increase limit temporarily to prevent infinite retry loop
165                        self.in_memory_limit = self.in_memory_limit.saturating_add(1);
166                        break;
167                    }
168                }
169            }
170        }
171
172        // enforce on disk limit and purge the oldest state cached on disk
173        while !self.is_memory_only() && self.oldest_on_disk.len() >= self.max_on_disk_limit {
174            // evict the oldest block
175            if let Some(hash) = self.oldest_on_disk.pop_front() {
176                self.on_disk_states.remove(&hash);
177                self.disk_cache.remove(hash);
178            }
179        }
180    }
181
182    /// Returns the in-memory state for the given `hash` if present
183    pub fn get_state(&self, hash: &B256) -> Option<&StateDb> {
184        self.states.get(hash)
185    }
186
187    /// Returns on-disk state for the given `hash` if present
188    pub fn get_on_disk_state(&mut self, hash: &B256) -> Option<&StateDb> {
189        if let Some(state) = self.on_disk_states.get_mut(hash)
190            && let Some(cached) = self.disk_cache.read(*hash)
191        {
192            state.init_from_state_snapshot(cached);
193            return Some(state);
194        }
195
196        None
197    }
198
199    /// Sets the maximum number of stats we keep in memory
200    pub const fn set_cache_limit(&mut self, limit: usize) {
201        self.in_memory_limit = limit;
202    }
203
204    /// Clears all entries
205    pub fn clear(&mut self) {
206        self.states.clear();
207        self.on_disk_states.clear();
208        self.present.clear();
209        for on_disk in std::mem::take(&mut self.oldest_on_disk) {
210            self.disk_cache.remove(on_disk)
211        }
212    }
213
214    /// Removes states for the given block hashes.
215    ///
216    /// This is used during chain rollback to clean up states for blocks that are no longer part
217    /// of the canonical chain.
218    pub fn remove_block_states(&mut self, hashes: &[B256]) {
219        for hash in hashes {
220            self.states.remove(hash);
221            self.on_disk_states.remove(hash);
222            self.disk_cache.remove(*hash);
223        }
224        self.present.retain(|h| !hashes.contains(h));
225        self.oldest_on_disk.retain(|h| !hashes.contains(h));
226    }
227
228    /// Serialize all states to a list of serializable historical states
229    pub fn serialized_states(&mut self) -> SerializableHistoricalStates {
230        // Get in-memory states
231        let mut states = self
232            .states
233            .iter_mut()
234            .map(|(hash, state)| (*hash, state.serialize_state()))
235            .collect::<Vec<_>>();
236
237        // Get on-disk state snapshots
238        self.on_disk_states.iter().for_each(|(hash, _)| {
239            if let Some(state_snapshot) = self.disk_cache.read(*hash) {
240                states.push((*hash, state_snapshot));
241            }
242        });
243
244        SerializableHistoricalStates::new(states)
245    }
246
247    /// Load states from serialized data
248    pub fn load_states(&mut self, states: SerializableHistoricalStates) {
249        for (hash, state_snapshot) in states {
250            let mut state_db = StateDb::new(MemDb::default());
251            state_db.init_from_state_snapshot(state_snapshot);
252            self.insert(hash, state_db);
253        }
254    }
255}
256
257impl fmt::Debug for InMemoryBlockStates {
258    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
259        f.debug_struct("InMemoryBlockStates")
260            .field("in_memory_limit", &self.in_memory_limit)
261            .field("min_in_memory_limit", &self.min_in_memory_limit)
262            .field("max_on_disk_limit", &self.max_on_disk_limit)
263            .field("oldest_on_disk", &self.oldest_on_disk)
264            .field("present", &self.present)
265            .finish_non_exhaustive()
266    }
267}
268
269impl Default for InMemoryBlockStates {
270    fn default() -> Self {
271        // enough in memory to store `DEFAULT_HISTORY_LIMIT` blocks in memory
272        Self::new(DEFAULT_HISTORY_LIMIT, MAX_ON_DISK_HISTORY_LIMIT)
273    }
274}
275
276/// Stores the blockchain data (blocks, transactions)
277#[derive(Clone, Debug)]
278pub struct BlockchainStorage<N: Network> {
279    /// all stored blocks (block hash -> block)
280    pub blocks: B256HashMap<Block>,
281    /// mapping from block number -> block hash
282    pub hashes: HashMap<u64, B256>,
283    /// The current best hash
284    pub best_hash: B256,
285    /// The current best block number
286    pub best_number: u64,
287    /// genesis hash of the chain
288    pub genesis_hash: B256,
289    /// Mapping from the transaction hash to a tuple containing the transaction as well as the
290    /// transaction receipt
291    pub transactions: B256HashMap<MinedTransaction<N>>,
292    /// The total difficulty of the chain until this block
293    pub total_difficulty: U256,
294}
295
296impl<N: Network> BlockchainStorage<N> {
297    /// Creates a new storage with a genesis block
298    pub fn new(
299        evm_env: &EvmEnv,
300        base_fee: Option<u64>,
301        timestamp: u64,
302        genesis_number: u64,
303    ) -> Self {
304        let is_shanghai = *evm_env.spec_id() >= SpecId::SHANGHAI;
305        let is_cancun = *evm_env.spec_id() >= SpecId::CANCUN;
306        let is_prague = *evm_env.spec_id() >= SpecId::PRAGUE;
307
308        // create a dummy genesis block
309        let header = Header {
310            timestamp,
311            base_fee_per_gas: base_fee,
312            gas_limit: evm_env.block_env.gas_limit,
313            beneficiary: evm_env.block_env.beneficiary,
314            difficulty: evm_env.block_env.difficulty,
315            blob_gas_used: evm_env.block_env.blob_excess_gas_and_price.as_ref().map(|_| 0),
316            excess_blob_gas: evm_env.block_env.blob_excess_gas(),
317            number: genesis_number,
318            parent_beacon_block_root: is_cancun.then_some(Default::default()),
319            withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS),
320            requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH),
321            ..Default::default()
322        };
323        let block =
324            create_block(header, Vec::<MaybeImpersonatedTransaction<FoundryTxEnvelope>>::new());
325        let genesis_hash = block.header.hash_slow();
326        let best_hash = genesis_hash;
327        let best_number = genesis_number;
328
329        let mut blocks = B256HashMap::default();
330        blocks.insert(genesis_hash, block);
331
332        let mut hashes = HashMap::default();
333        hashes.insert(best_number, genesis_hash);
334        Self {
335            blocks,
336            hashes,
337            best_hash,
338            best_number,
339            genesis_hash,
340            transactions: Default::default(),
341            total_difficulty: Default::default(),
342        }
343    }
344
345    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
346        let mut hashes = HashMap::default();
347        hashes.insert(block_number, block_hash);
348
349        Self {
350            blocks: B256HashMap::default(),
351            hashes,
352            best_hash: block_hash,
353            best_number: block_number,
354            genesis_hash: Default::default(),
355            transactions: Default::default(),
356            total_difficulty,
357        }
358    }
359
360    /// Unwind the chain state back to the given block in storage.
361    ///
362    /// The block identified by `block_number` and `block_hash` is __non-inclusive__, i.e. it will
363    /// remain in the state.
364    pub fn unwind_to(&mut self, block_number: u64, block_hash: B256) -> Vec<Block> {
365        let mut removed = vec![];
366        let best_num: u64 = self.best_number;
367        for i in (block_number + 1)..=best_num {
368            if let Some(hash) = self.hashes.get(&i).copied() {
369                // First remove the block's transactions while the mappings still exist
370                self.remove_block_transactions_by_number(i);
371
372                // Now remove the block from storage (may already be empty of txs) and drop mapping
373                if let Some(block) = self.blocks.remove(&hash) {
374                    removed.push(block);
375                }
376                self.hashes.remove(&i);
377            }
378        }
379        self.best_hash = block_hash;
380        self.best_number = block_number;
381        removed
382    }
383
384    pub fn empty() -> Self {
385        Self {
386            blocks: Default::default(),
387            hashes: Default::default(),
388            best_hash: Default::default(),
389            best_number: Default::default(),
390            genesis_hash: Default::default(),
391            transactions: Default::default(),
392            total_difficulty: Default::default(),
393        }
394    }
395
396    /// Removes all stored transactions for the given block number
397    pub fn remove_block_transactions_by_number(&mut self, num: u64) {
398        if let Some(hash) = self.hashes.get(&num).copied() {
399            self.remove_block_transactions(hash);
400        }
401    }
402
403    /// Removes all stored transactions for the given block hash
404    pub fn remove_block_transactions(&mut self, block_hash: B256) {
405        if let Some(block) = self.blocks.get_mut(&block_hash) {
406            for tx in &block.body.transactions {
407                self.transactions.remove(&tx.hash());
408            }
409            block.body.transactions.clear();
410        }
411    }
412
413    /// Serialize all blocks in storage
414    pub fn serialized_blocks(&self) -> Vec<SerializableBlock> {
415        self.blocks.values().map(|block| block.clone().into()).collect()
416    }
417
418    /// Deserialize and add all blocks data to the backend storage
419    pub fn load_blocks(&mut self, serializable_blocks: Vec<SerializableBlock>) {
420        for serializable_block in &serializable_blocks {
421            let block: Block = serializable_block.clone().into();
422            let block_hash = block.header.hash_slow();
423            let block_number = block.header.number();
424            self.blocks.insert(block_hash, block);
425            self.hashes.insert(block_number, block_hash);
426
427            // Update genesis_hash if we are loading block 0, so that Finalized/Safe/Earliest
428            // block tag lookups return the correct hash.
429            // See: https://github.com/foundry-rs/foundry/issues/12645
430            if block_number == 0 {
431                self.genesis_hash = block_hash;
432            }
433        }
434    }
435
436    /// Returns the hash for [BlockNumberOrTag]
437    pub fn hash(&self, number: BlockNumberOrTag) -> Option<B256> {
438        let slots_in_an_epoch = 32;
439        match number {
440            BlockNumberOrTag::Latest => Some(self.best_hash),
441            BlockNumberOrTag::Earliest => Some(self.genesis_hash),
442            BlockNumberOrTag::Pending => None,
443            BlockNumberOrTag::Number(num) => self.hashes.get(&num).copied(),
444            BlockNumberOrTag::Safe => {
445                if self.best_number > (slots_in_an_epoch) {
446                    self.hashes.get(&(self.best_number - (slots_in_an_epoch))).copied()
447                } else {
448                    Some(self.genesis_hash) // treat the genesis block as safe "by definition"
449                }
450            }
451            BlockNumberOrTag::Finalized => {
452                if self.best_number > (slots_in_an_epoch * 2) {
453                    self.hashes.get(&(self.best_number - (slots_in_an_epoch * 2))).copied()
454                } else {
455                    Some(self.genesis_hash)
456                }
457            }
458        }
459    }
460}
461
462impl<N: Network<ReceiptEnvelope = FoundryReceiptEnvelope>> BlockchainStorage<N> {
463    pub fn serialized_transactions(&self) -> Vec<SerializableTransaction> {
464        self.transactions.values().map(|tx: &MinedTransaction<N>| tx.clone().into()).collect()
465    }
466
467    /// Deserialize and add all transactions data to the backend storage
468    pub fn load_transactions(&mut self, serializable_transactions: Vec<SerializableTransaction>) {
469        for serializable_transaction in &serializable_transactions {
470            let transaction: MinedTransaction<N> = serializable_transaction.clone().into();
471            self.transactions.insert(transaction.info.transaction_hash, transaction);
472        }
473    }
474}
475
476/// A simple in-memory blockchain
477#[derive(Clone, Debug)]
478pub struct Blockchain<N: Network> {
479    /// underlying storage that supports concurrent reads
480    pub storage: Arc<RwLock<BlockchainStorage<N>>>,
481}
482
483impl<N: Network> Blockchain<N> {
484    /// Creates a new storage with a genesis block
485    pub fn new(
486        evm_env: &EvmEnv,
487        base_fee: Option<u64>,
488        timestamp: u64,
489        genesis_number: u64,
490    ) -> Self {
491        Self {
492            storage: Arc::new(RwLock::new(BlockchainStorage::new(
493                evm_env,
494                base_fee,
495                timestamp,
496                genesis_number,
497            ))),
498        }
499    }
500
501    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
502        Self {
503            storage: Arc::new(RwLock::new(BlockchainStorage::forked(
504                block_number,
505                block_hash,
506                total_difficulty,
507            ))),
508        }
509    }
510
511    /// returns the header hash of given block
512    pub fn hash(&self, id: BlockId) -> Option<B256> {
513        match id {
514            BlockId::Hash(h) => Some(h.block_hash),
515            BlockId::Number(num) => self.storage.read().hash(num),
516        }
517    }
518
519    pub fn get_block_by_hash(&self, hash: &B256) -> Option<Block> {
520        self.storage.read().blocks.get(hash).cloned()
521    }
522
523    pub fn get_transaction_by_hash(&self, hash: &B256) -> Option<MinedTransaction<N>> {
524        self.storage.read().transactions.get(hash).cloned()
525    }
526
527    /// Returns the total number of blocks
528    pub fn blocks_count(&self) -> usize {
529        self.storage.read().blocks.len()
530    }
531}
532
533/// Represents the outcome of mining a new block
534pub struct MinedBlockOutcome<T> {
535    /// The block that was mined
536    pub block_number: u64,
537    /// All transactions included in the block
538    pub included: Vec<Arc<PoolTransaction<T>>>,
539    /// All transactions that were attempted to be included but were invalid at the time of
540    /// execution
541    pub invalid: Vec<Arc<PoolTransaction<T>>>,
542    /// Transactions skipped because they're not yet valid (e.g., valid_after in the future).
543    /// These remain in the pool and should be retried later.
544    pub not_yet_valid: Vec<Arc<PoolTransaction<T>>>,
545}
546
547impl<T> Clone for MinedBlockOutcome<T> {
548    fn clone(&self) -> Self {
549        Self {
550            block_number: self.block_number,
551            included: self.included.clone(),
552            invalid: self.invalid.clone(),
553            not_yet_valid: self.not_yet_valid.clone(),
554        }
555    }
556}
557
558impl<T> fmt::Debug for MinedBlockOutcome<T> {
559    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
560        f.debug_struct("MinedBlockOutcome")
561            .field("block_number", &self.block_number)
562            .field("included", &self.included.len())
563            .field("invalid", &self.invalid.len())
564            .field("not_yet_valid", &self.not_yet_valid.len())
565            .finish()
566    }
567}
568
569/// Container type for a mined transaction
570#[derive(Clone, Debug)]
571pub struct MinedTransaction<N: Network> {
572    pub info: TransactionInfo,
573    pub receipt: N::ReceiptEnvelope,
574    pub block_hash: B256,
575    pub block_number: u64,
576}
577
578impl<N: Network> MinedTransaction<N> {
579    /// Returns the traces of the transaction for `trace_transaction`
580    pub fn parity_traces(&self) -> Vec<LocalizedTransactionTrace> {
581        ParityTraceBuilder::new(
582            self.info.traces.clone(),
583            None,
584            TracingInspectorConfig::default_parity(),
585        )
586        .into_localized_transaction_traces(RethTransactionInfo {
587            hash: Some(self.info.transaction_hash),
588            index: Some(self.info.transaction_index),
589            block_hash: Some(self.block_hash),
590            block_number: Some(self.block_number),
591            base_fee: None,
592            block_timestamp: None,
593        })
594    }
595
596    pub fn ots_internal_operations(&self) -> Vec<InternalOperation> {
597        self.info
598            .traces
599            .iter()
600            .filter_map(|node| {
601                let r#type = match node.trace.kind {
602                    _ if node.is_selfdestruct() => OperationType::OpSelfDestruct,
603                    CallKind::Call if !node.trace.value.is_zero() => OperationType::OpTransfer,
604                    CallKind::Create => OperationType::OpCreate,
605                    CallKind::Create2 => OperationType::OpCreate2,
606                    _ => return None,
607                };
608                let (from, to, value) = if node.is_selfdestruct() {
609                    (
610                        node.trace.address,
611                        node.trace.selfdestruct_refund_target.unwrap_or_default(),
612                        node.trace.selfdestruct_transferred_value.unwrap_or_default(),
613                    )
614                } else {
615                    (node.trace.caller, node.trace.address, node.trace.value)
616                };
617                Some(InternalOperation { r#type, from, to, value })
618            })
619            .collect()
620    }
621}
622
623/// Intermediary Anvil representation of a receipt
624#[derive(Clone, Debug)]
625pub struct MinedTransactionReceipt<N: Network> {
626    /// The actual json rpc receipt object
627    pub inner: N::ReceiptResponse,
628    /// Output data for the transaction
629    pub out: Option<Bytes>,
630}
631
632#[cfg(test)]
633mod tests {
634    use super::*;
635    use crate::eth::backend::db::Db;
636    use alloy_primitives::{Address, hex};
637    use alloy_rlp::Decodable;
638    use revm::{database::DatabaseRef, state::AccountInfo};
639
640    #[test]
641    fn test_interval_update() {
642        let mut storage = InMemoryBlockStates::default();
643        storage.update_interval_mine_block_time(Duration::from_secs(1));
644        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT * 3);
645    }
646
647    #[test]
648    fn test_init_state_limits() {
649        let mut storage = InMemoryBlockStates::default();
650        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT);
651        assert_eq!(storage.min_in_memory_limit, MIN_HISTORY_LIMIT);
652        assert_eq!(storage.max_on_disk_limit, MAX_ON_DISK_HISTORY_LIMIT);
653
654        storage = storage.memory_only();
655        assert!(storage.is_memory_only());
656
657        storage = InMemoryBlockStates::new(1, 0);
658        assert!(storage.is_memory_only());
659        assert_eq!(storage.in_memory_limit, 1);
660        assert_eq!(storage.min_in_memory_limit, 1);
661        assert_eq!(storage.max_on_disk_limit, 0);
662
663        storage = InMemoryBlockStates::new(1, 2);
664        assert!(!storage.is_memory_only());
665        assert_eq!(storage.in_memory_limit, 1);
666        assert_eq!(storage.min_in_memory_limit, 1);
667        assert_eq!(storage.max_on_disk_limit, 2);
668    }
669
670    #[tokio::test(flavor = "multi_thread")]
671    async fn can_read_write_cached_state() {
672        let mut storage = InMemoryBlockStates::new(1, MAX_ON_DISK_HISTORY_LIMIT);
673        let one = B256::from(U256::from(1));
674        let two = B256::from(U256::from(2));
675
676        let mut state = MemDb::default();
677        let addr = Address::random();
678        let info = AccountInfo::from_balance(U256::from(1337));
679        state.insert_account(addr, info);
680        storage.insert(one, StateDb::new(state));
681        storage.insert(two, StateDb::new(MemDb::default()));
682
683        // wait for files to be flushed
684        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
685
686        assert_eq!(storage.on_disk_states.len(), 1);
687        assert!(storage.on_disk_states.contains_key(&one));
688
689        let loaded = storage.get_on_disk_state(&one).unwrap();
690
691        let acc = loaded.basic_ref(addr).unwrap().unwrap();
692        assert_eq!(acc.balance, U256::from(1337u64));
693    }
694
695    #[tokio::test(flavor = "multi_thread")]
696    async fn can_decrease_state_cache_size() {
697        let limit = 15;
698        let mut storage = InMemoryBlockStates::new(limit, MAX_ON_DISK_HISTORY_LIMIT);
699
700        let num_states = 30;
701        for idx in 0..num_states {
702            let mut state = MemDb::default();
703            let hash = B256::from(U256::from(idx));
704            let addr = Address::from_word(hash);
705            let balance = (idx * 2) as u64;
706            let info = AccountInfo::from_balance(U256::from(balance));
707            state.insert_account(addr, info);
708            storage.insert(hash, StateDb::new(state));
709        }
710
711        // wait for files to be flushed
712        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
713
714        let on_disk_states_len = num_states - storage.min_in_memory_limit;
715
716        assert_eq!(storage.on_disk_states.len(), on_disk_states_len);
717        assert_eq!(storage.present.len(), storage.min_in_memory_limit);
718
719        for idx in 0..num_states {
720            let hash = B256::from(U256::from(idx));
721            let addr = Address::from_word(hash);
722
723            let loaded = if idx < on_disk_states_len {
724                storage.get_on_disk_state(&hash).unwrap()
725            } else {
726                storage.get_state(&hash).unwrap()
727            };
728
729            let acc = loaded.basic_ref(addr).unwrap().unwrap();
730            let balance = (idx * 2) as u64;
731            assert_eq!(acc.balance, U256::from(balance));
732        }
733    }
734
735    #[test]
736    fn test_remove_block_states_on_rollback() {
737        let mut storage = InMemoryBlockStates::new(10, MAX_ON_DISK_HISTORY_LIMIT);
738
739        // Insert 5 states
740        let hashes: Vec<B256> = (0..5)
741            .map(|i| {
742                let hash = B256::from(U256::from(i));
743                let mut state = MemDb::default();
744                let addr = Address::from_word(hash);
745                state.insert_account(addr, AccountInfo::from_balance(U256::from(i * 100)));
746                storage.insert(hash, StateDb::new(state));
747                hash
748            })
749            .collect();
750
751        assert_eq!(storage.present.len(), 5);
752
753        // Simulate rollback: remove the last 3 blocks
754        let removed_hashes = &hashes[2..];
755        storage.remove_block_states(removed_hashes);
756
757        // Only the first 2 states should remain
758        assert_eq!(storage.present.len(), 2);
759        assert!(storage.get_state(&hashes[0]).is_some());
760        assert!(storage.get_state(&hashes[1]).is_some());
761        for h in removed_hashes {
762            assert!(storage.get_state(h).is_none());
763            assert!(!storage.present.contains(h));
764        }
765    }
766
767    #[tokio::test(flavor = "multi_thread")]
768    async fn test_remove_block_states_cleans_disk_cache() {
769        // Use limit=1 to force states to disk
770        let mut storage = InMemoryBlockStates::new(1, MAX_ON_DISK_HISTORY_LIMIT);
771
772        let hash_a = B256::from(U256::from(1));
773        let hash_b = B256::from(U256::from(2));
774
775        storage.insert(hash_a, StateDb::new(MemDb::default()));
776        storage.insert(hash_b, StateDb::new(MemDb::default()));
777
778        // Wait for disk flush
779        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
780
781        assert!(storage.on_disk_states.contains_key(&hash_a));
782
783        // Remove hash_a (on disk)
784        storage.remove_block_states(&[hash_a]);
785
786        assert!(!storage.on_disk_states.contains_key(&hash_a));
787        assert!(!storage.oldest_on_disk.contains(&hash_a));
788        assert!(storage.get_on_disk_state(&hash_a).is_none());
789    }
790
791    // verifies that blocks and transactions in BlockchainStorage remain the same when dumped and
792    // reloaded
793    #[test]
794    fn test_storage_dump_reload_cycle() {
795        let mut dump_storage = BlockchainStorage::<FoundryNetwork>::empty();
796
797        let header = Header { gas_limit: 123456, ..Default::default() };
798        let bytes_first = &mut &hex::decode("f86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18").unwrap()[..];
799        let tx: MaybeImpersonatedTransaction<FoundryTxEnvelope> =
800            FoundryTxEnvelope::decode(&mut &bytes_first[..]).unwrap().into();
801        let block = create_block(header.clone(), vec![tx.clone()]);
802        let block_hash = block.header.hash_slow();
803        dump_storage.blocks.insert(block_hash, block);
804
805        let serialized_blocks = dump_storage.serialized_blocks();
806        let serialized_transactions = dump_storage.serialized_transactions();
807
808        let mut load_storage = BlockchainStorage::<FoundryNetwork>::empty();
809
810        load_storage.load_blocks(serialized_blocks);
811        load_storage.load_transactions(serialized_transactions);
812
813        let loaded_block = load_storage.blocks.get(&block_hash).unwrap();
814        assert_eq!(loaded_block.header.gas_limit(), header.gas_limit());
815        let loaded_tx = loaded_block.body.transactions.first().unwrap();
816        assert_eq!(loaded_tx, &tx);
817    }
818}