anvil/eth/backend/mem/
storage.rs

1//! In-memory blockchain storage
2use crate::eth::{
3    backend::{
4        db::{
5            MaybeFullDatabase, SerializableBlock, SerializableHistoricalStates,
6            SerializableTransaction, StateDb,
7        },
8        env::Env,
9        mem::cache::DiskStateCache,
10    },
11    pool::transactions::PoolTransaction,
12};
13use alloy_consensus::constants::EMPTY_WITHDRAWALS;
14use alloy_eips::eip7685::EMPTY_REQUESTS_HASH;
15use alloy_primitives::{
16    B256, Bytes, U256,
17    map::{B256HashMap, HashMap},
18};
19use alloy_rpc_types::{
20    BlockId, BlockNumberOrTag, TransactionInfo as RethTransactionInfo,
21    trace::{
22        otterscan::{InternalOperation, OperationType},
23        parity::LocalizedTransactionTrace,
24    },
25};
26use anvil_core::eth::{
27    block::{Block, PartialHeader},
28    transaction::{MaybeImpersonatedTransaction, ReceiptResponse, TransactionInfo, TypedReceipt},
29};
30use foundry_evm::{
31    backend::MemDb,
32    traces::{CallKind, ParityTraceBuilder, TracingInspectorConfig},
33};
34use parking_lot::RwLock;
35use revm::{context::Block as RevmBlock, primitives::hardfork::SpecId};
36use std::{collections::VecDeque, fmt, path::PathBuf, sync::Arc, time::Duration};
37// use yansi::Paint;
38
39// === various limits in number of blocks ===
40
41pub const DEFAULT_HISTORY_LIMIT: usize = 500;
42const MIN_HISTORY_LIMIT: usize = 10;
43// 1hr of up-time at lowest 1s interval
44const MAX_ON_DISK_HISTORY_LIMIT: usize = 3_600;
45
46/// Represents the complete state of single block
47pub struct InMemoryBlockStates {
48    /// The states at a certain block
49    states: B256HashMap<StateDb>,
50    /// states which data is moved to disk
51    on_disk_states: B256HashMap<StateDb>,
52    /// How many states to store at most
53    in_memory_limit: usize,
54    /// minimum amount of states we keep in memory
55    min_in_memory_limit: usize,
56    /// maximum amount of states we keep on disk
57    ///
58    /// Limiting the states will prevent disk blow up, especially in interval mining mode
59    max_on_disk_limit: usize,
60    /// the oldest states written to disk
61    oldest_on_disk: VecDeque<B256>,
62    /// all states present, used to enforce `in_memory_limit`
63    present: VecDeque<B256>,
64    /// Stores old states on disk
65    disk_cache: DiskStateCache,
66}
67
68impl InMemoryBlockStates {
69    /// Creates a new instance with limited slots
70    pub fn new(in_memory_limit: usize, on_disk_limit: usize) -> Self {
71        Self {
72            states: Default::default(),
73            on_disk_states: Default::default(),
74            in_memory_limit,
75            min_in_memory_limit: in_memory_limit.min(MIN_HISTORY_LIMIT),
76            max_on_disk_limit: on_disk_limit,
77            oldest_on_disk: Default::default(),
78            present: Default::default(),
79            disk_cache: Default::default(),
80        }
81    }
82
83    /// Configures no disk caching
84    pub fn memory_only(mut self) -> Self {
85        self.max_on_disk_limit = 0;
86        self
87    }
88
89    /// Configures the path on disk where the states will cached.
90    pub fn disk_path(mut self, path: PathBuf) -> Self {
91        self.disk_cache = self.disk_cache.with_path(path);
92        self
93    }
94
95    /// This modifies the `limit` what to keep stored in memory.
96    ///
97    /// This will ensure the new limit adjusts based on the block time.
98    /// The lowest blocktime is 1s which should increase the limit slightly
99    pub fn update_interval_mine_block_time(&mut self, block_time: Duration) {
100        let block_time = block_time.as_secs();
101        // for block times lower than 2s we increase the mem limit since we're mining _small_ blocks
102        // very fast
103        // this will gradually be decreased once the max limit was reached
104        if block_time <= 2 {
105            self.in_memory_limit = DEFAULT_HISTORY_LIMIT * 3;
106            self.enforce_limits();
107        }
108    }
109
110    /// Returns true if only memory caching is supported.
111    fn is_memory_only(&self) -> bool {
112        self.max_on_disk_limit == 0
113    }
114
115    /// Inserts a new (hash -> state) pair
116    ///
117    /// When the configured limit for the number of states that can be stored in memory is reached,
118    /// the oldest state is removed.
119    ///
120    /// Since we keep a snapshot of the entire state as history, the size of the state will increase
121    /// with the transactions processed. To counter this, we gradually decrease the cache limit with
122    /// the number of states/blocks until we reached the `min_limit`.
123    ///
124    /// When a state that was previously written to disk is requested, it is simply read from disk.
125    pub fn insert(&mut self, hash: B256, state: StateDb) {
126        if !self.is_memory_only() && self.present.len() >= self.in_memory_limit {
127            // once we hit the max limit we gradually decrease it
128            self.in_memory_limit =
129                self.in_memory_limit.saturating_sub(1).max(self.min_in_memory_limit);
130        }
131
132        self.enforce_limits();
133
134        self.states.insert(hash, state);
135        self.present.push_back(hash);
136    }
137
138    /// Enforces configured limits
139    fn enforce_limits(&mut self) {
140        // enforce memory limits
141        while self.present.len() >= self.in_memory_limit {
142            // evict the oldest block
143            if let Some((hash, mut state)) = self
144                .present
145                .pop_front()
146                .and_then(|hash| self.states.remove(&hash).map(|state| (hash, state)))
147            {
148                // only write to disk if supported
149                if !self.is_memory_only() {
150                    let state_snapshot = state.0.clear_into_state_snapshot();
151                    self.disk_cache.write(hash, state_snapshot);
152                    self.on_disk_states.insert(hash, state);
153                    self.oldest_on_disk.push_back(hash);
154                }
155            }
156        }
157
158        // enforce on disk limit and purge the oldest state cached on disk
159        while !self.is_memory_only() && self.oldest_on_disk.len() >= self.max_on_disk_limit {
160            // evict the oldest block
161            if let Some(hash) = self.oldest_on_disk.pop_front() {
162                self.on_disk_states.remove(&hash);
163                self.disk_cache.remove(hash);
164            }
165        }
166    }
167
168    /// Returns the in-memory state for the given `hash` if present
169    pub fn get_state(&self, hash: &B256) -> Option<&StateDb> {
170        self.states.get(hash)
171    }
172
173    /// Returns on-disk state for the given `hash` if present
174    pub fn get_on_disk_state(&mut self, hash: &B256) -> Option<&StateDb> {
175        if let Some(state) = self.on_disk_states.get_mut(hash)
176            && let Some(cached) = self.disk_cache.read(*hash)
177        {
178            state.init_from_state_snapshot(cached);
179            return Some(state);
180        }
181
182        None
183    }
184
185    /// Sets the maximum number of stats we keep in memory
186    pub fn set_cache_limit(&mut self, limit: usize) {
187        self.in_memory_limit = limit;
188    }
189
190    /// Clears all entries
191    pub fn clear(&mut self) {
192        self.states.clear();
193        self.on_disk_states.clear();
194        self.present.clear();
195        for on_disk in std::mem::take(&mut self.oldest_on_disk) {
196            self.disk_cache.remove(on_disk)
197        }
198    }
199
200    /// Serialize all states to a list of serializable historical states
201    pub fn serialized_states(&mut self) -> SerializableHistoricalStates {
202        // Get in-memory states
203        let mut states = self
204            .states
205            .iter_mut()
206            .map(|(hash, state)| (*hash, state.serialize_state()))
207            .collect::<Vec<_>>();
208
209        // Get on-disk state snapshots
210        self.on_disk_states.iter().for_each(|(hash, _)| {
211            if let Some(state_snapshot) = self.disk_cache.read(*hash) {
212                states.push((*hash, state_snapshot));
213            }
214        });
215
216        SerializableHistoricalStates::new(states)
217    }
218
219    /// Load states from serialized data
220    pub fn load_states(&mut self, states: SerializableHistoricalStates) {
221        for (hash, state_snapshot) in states {
222            let mut state_db = StateDb::new(MemDb::default());
223            state_db.init_from_state_snapshot(state_snapshot);
224            self.insert(hash, state_db);
225        }
226    }
227}
228
229impl fmt::Debug for InMemoryBlockStates {
230    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
231        f.debug_struct("InMemoryBlockStates")
232            .field("in_memory_limit", &self.in_memory_limit)
233            .field("min_in_memory_limit", &self.min_in_memory_limit)
234            .field("max_on_disk_limit", &self.max_on_disk_limit)
235            .field("oldest_on_disk", &self.oldest_on_disk)
236            .field("present", &self.present)
237            .finish_non_exhaustive()
238    }
239}
240
241impl Default for InMemoryBlockStates {
242    fn default() -> Self {
243        // enough in memory to store `DEFAULT_HISTORY_LIMIT` blocks in memory
244        Self::new(DEFAULT_HISTORY_LIMIT, MAX_ON_DISK_HISTORY_LIMIT)
245    }
246}
247
248/// Stores the blockchain data (blocks, transactions)
249#[derive(Clone, Debug)]
250pub struct BlockchainStorage {
251    /// all stored blocks (block hash -> block)
252    pub blocks: B256HashMap<Block>,
253    /// mapping from block number -> block hash
254    pub hashes: HashMap<u64, B256>,
255    /// The current best hash
256    pub best_hash: B256,
257    /// The current best block number
258    pub best_number: u64,
259    /// genesis hash of the chain
260    pub genesis_hash: B256,
261    /// Mapping from the transaction hash to a tuple containing the transaction as well as the
262    /// transaction receipt
263    pub transactions: B256HashMap<MinedTransaction>,
264    /// The total difficulty of the chain until this block
265    pub total_difficulty: U256,
266}
267
268impl BlockchainStorage {
269    /// Creates a new storage with a genesis block
270    pub fn new(
271        env: &Env,
272        spec_id: SpecId,
273        base_fee: Option<u64>,
274        timestamp: u64,
275        genesis_number: u64,
276    ) -> Self {
277        let is_shanghai = spec_id >= SpecId::SHANGHAI;
278        let is_cancun = spec_id >= SpecId::CANCUN;
279        let is_prague = spec_id >= SpecId::PRAGUE;
280
281        // create a dummy genesis block
282        let partial_header = PartialHeader {
283            timestamp,
284            base_fee,
285            gas_limit: env.evm_env.block_env.gas_limit,
286            beneficiary: env.evm_env.block_env.beneficiary,
287            difficulty: env.evm_env.block_env.difficulty,
288            blob_gas_used: env.evm_env.block_env.blob_excess_gas_and_price.as_ref().map(|_| 0),
289            excess_blob_gas: env.evm_env.block_env.blob_excess_gas(),
290            number: genesis_number,
291            parent_beacon_block_root: is_cancun.then_some(Default::default()),
292            withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS),
293            requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH),
294            ..Default::default()
295        };
296        let block = Block::new::<MaybeImpersonatedTransaction>(partial_header, vec![]);
297        let genesis_hash = block.header.hash_slow();
298        let best_hash = genesis_hash;
299        let best_number = genesis_number;
300
301        let mut blocks = B256HashMap::default();
302        blocks.insert(genesis_hash, block);
303
304        let mut hashes = HashMap::default();
305        hashes.insert(best_number, genesis_hash);
306        Self {
307            blocks,
308            hashes,
309            best_hash,
310            best_number,
311            genesis_hash,
312            transactions: Default::default(),
313            total_difficulty: Default::default(),
314        }
315    }
316
317    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
318        let mut hashes = HashMap::default();
319        hashes.insert(block_number, block_hash);
320
321        Self {
322            blocks: B256HashMap::default(),
323            hashes,
324            best_hash: block_hash,
325            best_number: block_number,
326            genesis_hash: Default::default(),
327            transactions: Default::default(),
328            total_difficulty,
329        }
330    }
331
332    /// Unwind the chain state back to the given block in storage.
333    ///
334    /// The block identified by `block_number` and `block_hash` is __non-inclusive__, i.e. it will
335    /// remain in the state.
336    pub fn unwind_to(&mut self, block_number: u64, block_hash: B256) -> Vec<Block> {
337        let mut removed = vec![];
338        let best_num: u64 = self.best_number;
339        for i in (block_number + 1)..=best_num {
340            if let Some(hash) = self.hashes.remove(&i)
341                && let Some(block) = self.blocks.remove(&hash)
342            {
343                self.remove_block_transactions_by_number(block.header.number);
344                removed.push(block);
345            }
346        }
347        self.best_hash = block_hash;
348        self.best_number = block_number;
349        removed
350    }
351
352    pub fn empty() -> Self {
353        Self {
354            blocks: Default::default(),
355            hashes: Default::default(),
356            best_hash: Default::default(),
357            best_number: Default::default(),
358            genesis_hash: Default::default(),
359            transactions: Default::default(),
360            total_difficulty: Default::default(),
361        }
362    }
363
364    /// Removes all stored transactions for the given block number
365    pub fn remove_block_transactions_by_number(&mut self, num: u64) {
366        if let Some(hash) = self.hashes.get(&num).copied() {
367            self.remove_block_transactions(hash);
368        }
369    }
370
371    /// Removes all stored transactions for the given block hash
372    pub fn remove_block_transactions(&mut self, block_hash: B256) {
373        if let Some(block) = self.blocks.get_mut(&block_hash) {
374            for tx in &block.transactions {
375                self.transactions.remove(&tx.hash());
376            }
377            block.transactions.clear();
378        }
379    }
380}
381
382impl BlockchainStorage {
383    /// Returns the hash for [BlockNumberOrTag]
384    pub fn hash(&self, number: BlockNumberOrTag) -> Option<B256> {
385        let slots_in_an_epoch = 32;
386        match number {
387            BlockNumberOrTag::Latest => Some(self.best_hash),
388            BlockNumberOrTag::Earliest => Some(self.genesis_hash),
389            BlockNumberOrTag::Pending => None,
390            BlockNumberOrTag::Number(num) => self.hashes.get(&num).copied(),
391            BlockNumberOrTag::Safe => {
392                if self.best_number > (slots_in_an_epoch) {
393                    self.hashes.get(&(self.best_number - (slots_in_an_epoch))).copied()
394                } else {
395                    Some(self.genesis_hash) // treat the genesis block as safe "by definition"
396                }
397            }
398            BlockNumberOrTag::Finalized => {
399                if self.best_number > (slots_in_an_epoch * 2) {
400                    self.hashes.get(&(self.best_number - (slots_in_an_epoch * 2))).copied()
401                } else {
402                    Some(self.genesis_hash)
403                }
404            }
405        }
406    }
407
408    pub fn serialized_blocks(&self) -> Vec<SerializableBlock> {
409        self.blocks.values().map(|block| block.clone().into()).collect()
410    }
411
412    pub fn serialized_transactions(&self) -> Vec<SerializableTransaction> {
413        self.transactions.values().map(|tx: &MinedTransaction| tx.clone().into()).collect()
414    }
415
416    /// Deserialize and add all blocks data to the backend storage
417    pub fn load_blocks(&mut self, serializable_blocks: Vec<SerializableBlock>) {
418        for serializable_block in &serializable_blocks {
419            let block: Block = serializable_block.clone().into();
420            let block_hash = block.header.hash_slow();
421            let block_number = block.header.number;
422            self.blocks.insert(block_hash, block);
423            self.hashes.insert(block_number, block_hash);
424        }
425    }
426
427    /// Deserialize and add all blocks data to the backend storage
428    pub fn load_transactions(&mut self, serializable_transactions: Vec<SerializableTransaction>) {
429        for serializable_transaction in &serializable_transactions {
430            let transaction: MinedTransaction = serializable_transaction.clone().into();
431            self.transactions.insert(transaction.info.transaction_hash, transaction);
432        }
433    }
434}
435
436/// A simple in-memory blockchain
437#[derive(Clone, Debug)]
438pub struct Blockchain {
439    /// underlying storage that supports concurrent reads
440    pub storage: Arc<RwLock<BlockchainStorage>>,
441}
442
443impl Blockchain {
444    /// Creates a new storage with a genesis block
445    pub fn new(
446        env: &Env,
447        spec_id: SpecId,
448        base_fee: Option<u64>,
449        timestamp: u64,
450        genesis_number: u64,
451    ) -> Self {
452        Self {
453            storage: Arc::new(RwLock::new(BlockchainStorage::new(
454                env,
455                spec_id,
456                base_fee,
457                timestamp,
458                genesis_number,
459            ))),
460        }
461    }
462
463    pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
464        Self {
465            storage: Arc::new(RwLock::new(BlockchainStorage::forked(
466                block_number,
467                block_hash,
468                total_difficulty,
469            ))),
470        }
471    }
472
473    /// returns the header hash of given block
474    pub fn hash(&self, id: BlockId) -> Option<B256> {
475        match id {
476            BlockId::Hash(h) => Some(h.block_hash),
477            BlockId::Number(num) => self.storage.read().hash(num),
478        }
479    }
480
481    pub fn get_block_by_hash(&self, hash: &B256) -> Option<Block> {
482        self.storage.read().blocks.get(hash).cloned()
483    }
484
485    pub fn get_transaction_by_hash(&self, hash: &B256) -> Option<MinedTransaction> {
486        self.storage.read().transactions.get(hash).cloned()
487    }
488
489    /// Returns the total number of blocks
490    pub fn blocks_count(&self) -> usize {
491        self.storage.read().blocks.len()
492    }
493}
494
495/// Represents the outcome of mining a new block
496#[derive(Clone, Debug)]
497pub struct MinedBlockOutcome {
498    /// The block that was mined
499    pub block_number: u64,
500    /// All transactions included in the block
501    pub included: Vec<Arc<PoolTransaction>>,
502    /// All transactions that were attempted to be included but were invalid at the time of
503    /// execution
504    pub invalid: Vec<Arc<PoolTransaction>>,
505}
506
507/// Container type for a mined transaction
508#[derive(Clone, Debug)]
509pub struct MinedTransaction {
510    pub info: TransactionInfo,
511    pub receipt: TypedReceipt,
512    pub block_hash: B256,
513    pub block_number: u64,
514}
515
516impl MinedTransaction {
517    /// Returns the traces of the transaction for `trace_transaction`
518    pub fn parity_traces(&self) -> Vec<LocalizedTransactionTrace> {
519        ParityTraceBuilder::new(
520            self.info.traces.clone(),
521            None,
522            TracingInspectorConfig::default_parity(),
523        )
524        .into_localized_transaction_traces(RethTransactionInfo {
525            hash: Some(self.info.transaction_hash),
526            index: Some(self.info.transaction_index),
527            block_hash: Some(self.block_hash),
528            block_number: Some(self.block_number),
529            base_fee: None,
530        })
531    }
532
533    pub fn ots_internal_operations(&self) -> Vec<InternalOperation> {
534        self.info
535            .traces
536            .iter()
537            .filter_map(|node| {
538                let r#type = match node.trace.kind {
539                    _ if node.is_selfdestruct() => OperationType::OpSelfDestruct,
540                    CallKind::Call if !node.trace.value.is_zero() => OperationType::OpTransfer,
541                    CallKind::Create => OperationType::OpCreate,
542                    CallKind::Create2 => OperationType::OpCreate2,
543                    _ => return None,
544                };
545                let mut from = node.trace.caller;
546                let mut to = node.trace.address;
547                let mut value = node.trace.value;
548                if node.is_selfdestruct() {
549                    from = node.trace.address;
550                    to = node.trace.selfdestruct_refund_target.unwrap_or_default();
551                    value = node.trace.selfdestruct_transferred_value.unwrap_or_default();
552                }
553                Some(InternalOperation { r#type, from, to, value })
554            })
555            .collect()
556    }
557}
558
559/// Intermediary Anvil representation of a receipt
560#[derive(Clone, Debug)]
561pub struct MinedTransactionReceipt {
562    /// The actual json rpc receipt object
563    pub inner: ReceiptResponse,
564    /// Output data for the transaction
565    pub out: Option<Bytes>,
566}
567
568#[cfg(test)]
569mod tests {
570    use super::*;
571    use crate::eth::backend::db::Db;
572    use alloy_primitives::{Address, hex};
573    use alloy_rlp::Decodable;
574    use anvil_core::eth::transaction::TypedTransaction;
575    use revm::{database::DatabaseRef, state::AccountInfo};
576
577    #[test]
578    fn test_interval_update() {
579        let mut storage = InMemoryBlockStates::default();
580        storage.update_interval_mine_block_time(Duration::from_secs(1));
581        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT * 3);
582    }
583
584    #[test]
585    fn test_init_state_limits() {
586        let mut storage = InMemoryBlockStates::default();
587        assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT);
588        assert_eq!(storage.min_in_memory_limit, MIN_HISTORY_LIMIT);
589        assert_eq!(storage.max_on_disk_limit, MAX_ON_DISK_HISTORY_LIMIT);
590
591        storage = storage.memory_only();
592        assert!(storage.is_memory_only());
593
594        storage = InMemoryBlockStates::new(1, 0);
595        assert!(storage.is_memory_only());
596        assert_eq!(storage.in_memory_limit, 1);
597        assert_eq!(storage.min_in_memory_limit, 1);
598        assert_eq!(storage.max_on_disk_limit, 0);
599
600        storage = InMemoryBlockStates::new(1, 2);
601        assert!(!storage.is_memory_only());
602        assert_eq!(storage.in_memory_limit, 1);
603        assert_eq!(storage.min_in_memory_limit, 1);
604        assert_eq!(storage.max_on_disk_limit, 2);
605    }
606
607    #[tokio::test(flavor = "multi_thread")]
608    async fn can_read_write_cached_state() {
609        let mut storage = InMemoryBlockStates::new(1, MAX_ON_DISK_HISTORY_LIMIT);
610        let one = B256::from(U256::from(1));
611        let two = B256::from(U256::from(2));
612
613        let mut state = MemDb::default();
614        let addr = Address::random();
615        let info = AccountInfo::from_balance(U256::from(1337));
616        state.insert_account(addr, info);
617        storage.insert(one, StateDb::new(state));
618        storage.insert(two, StateDb::new(MemDb::default()));
619
620        // wait for files to be flushed
621        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
622
623        assert_eq!(storage.on_disk_states.len(), 1);
624        assert!(storage.on_disk_states.contains_key(&one));
625
626        let loaded = storage.get_on_disk_state(&one).unwrap();
627
628        let acc = loaded.basic_ref(addr).unwrap().unwrap();
629        assert_eq!(acc.balance, U256::from(1337u64));
630    }
631
632    #[tokio::test(flavor = "multi_thread")]
633    async fn can_decrease_state_cache_size() {
634        let limit = 15;
635        let mut storage = InMemoryBlockStates::new(limit, MAX_ON_DISK_HISTORY_LIMIT);
636
637        let num_states = 30;
638        for idx in 0..num_states {
639            let mut state = MemDb::default();
640            let hash = B256::from(U256::from(idx));
641            let addr = Address::from_word(hash);
642            let balance = (idx * 2) as u64;
643            let info = AccountInfo::from_balance(U256::from(balance));
644            state.insert_account(addr, info);
645            storage.insert(hash, StateDb::new(state));
646        }
647
648        // wait for files to be flushed
649        tokio::time::sleep(std::time::Duration::from_secs(1)).await;
650
651        let on_disk_states_len = num_states - storage.min_in_memory_limit;
652
653        assert_eq!(storage.on_disk_states.len(), on_disk_states_len);
654        assert_eq!(storage.present.len(), storage.min_in_memory_limit);
655
656        for idx in 0..num_states {
657            let hash = B256::from(U256::from(idx));
658            let addr = Address::from_word(hash);
659
660            let loaded = if idx < on_disk_states_len {
661                storage.get_on_disk_state(&hash).unwrap()
662            } else {
663                storage.get_state(&hash).unwrap()
664            };
665
666            let acc = loaded.basic_ref(addr).unwrap().unwrap();
667            let balance = (idx * 2) as u64;
668            assert_eq!(acc.balance, U256::from(balance));
669        }
670    }
671
672    // verifies that blocks and transactions in BlockchainStorage remain the same when dumped and
673    // reloaded
674    #[test]
675    fn test_storage_dump_reload_cycle() {
676        let mut dump_storage = BlockchainStorage::empty();
677
678        let partial_header = PartialHeader { gas_limit: 123456, ..Default::default() };
679        let bytes_first = &mut &hex::decode("f86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18").unwrap()[..];
680        let tx: MaybeImpersonatedTransaction =
681            TypedTransaction::decode(&mut &bytes_first[..]).unwrap().into();
682        let block =
683            Block::new::<MaybeImpersonatedTransaction>(partial_header.clone(), vec![tx.clone()]);
684        let block_hash = block.header.hash_slow();
685        dump_storage.blocks.insert(block_hash, block);
686
687        let serialized_blocks = dump_storage.serialized_blocks();
688        let serialized_transactions = dump_storage.serialized_transactions();
689
690        let mut load_storage = BlockchainStorage::empty();
691
692        load_storage.load_blocks(serialized_blocks);
693        load_storage.load_transactions(serialized_transactions);
694
695        let loaded_block = load_storage.blocks.get(&block_hash).unwrap();
696        assert_eq!(loaded_block.header.gas_limit, { partial_header.gas_limit });
697        let loaded_tx = loaded_block.transactions.first().unwrap();
698        assert_eq!(loaded_tx, &tx);
699    }
700}