Skip to main content

foundry_evm_core/fork/
database.rs

1//! A revm database that forks off a remote client
2
3use crate::{
4    backend::{RevertStateSnapshotAction, StateSnapshot},
5    state_snapshot::StateSnapshots,
6};
7use alloy_network::Network;
8use alloy_primitives::{Address, B256, U256};
9use alloy_rpc_types::BlockId;
10use foundry_fork_db::{BlockchainDb, DatabaseError, ForkBlockEnv, SharedBackend};
11use parking_lot::Mutex;
12use revm::{
13    Database, DatabaseCommit,
14    bytecode::Bytecode,
15    context::BlockEnv,
16    database::{CacheDB, DatabaseRef},
17    primitives::AddressMap,
18    state::{Account, AccountInfo},
19};
20use std::sync::Arc;
21
22/// a [revm::Database] that's forked off another client
23///
24/// The `backend` is used to retrieve (missing) data, which is then fetched from the remote
25/// endpoint. The inner in-memory database holds this storage and will be used for write operations.
26/// This database uses the `backend` for read and the `db` for write operations. But note the
27/// `backend` will also write (missing) data to the `db` in the background
28#[derive(Clone, Debug)]
29pub struct ForkedDatabase<N: Network, B: ForkBlockEnv = BlockEnv> {
30    /// Responsible for fetching missing data.
31    ///
32    /// This is responsible for getting data.
33    backend: SharedBackend<N, B>,
34    /// Cached Database layer, ensures that changes are not written to the database that
35    /// exclusively stores the state of the remote client.
36    ///
37    /// This separates Read/Write operations
38    ///   - reads from the `SharedBackend as DatabaseRef` writes to the internal cache storage.
39    cache_db: CacheDB<SharedBackend<N, B>>,
40    /// Contains all the data already fetched.
41    ///
42    /// This exclusively stores the _unchanged_ remote client state.
43    db: BlockchainDb<B>,
44    /// Holds the state snapshots of a blockchain.
45    state_snapshots: Arc<Mutex<StateSnapshots<ForkDbStateSnapshot<N, B>>>>,
46}
47
48impl<N: Network, B: ForkBlockEnv> ForkedDatabase<N, B> {
49    /// Creates a new instance of this DB
50    pub fn new(backend: SharedBackend<N, B>, db: BlockchainDb<B>) -> Self {
51        Self {
52            cache_db: CacheDB::new(backend.clone()),
53            backend,
54            db,
55            state_snapshots: Arc::new(Mutex::new(Default::default())),
56        }
57    }
58
59    pub fn database(&self) -> &CacheDB<SharedBackend<N, B>> {
60        &self.cache_db
61    }
62
63    pub fn database_mut(&mut self) -> &mut CacheDB<SharedBackend<N, B>> {
64        &mut self.cache_db
65    }
66
67    pub fn state_snapshots(&self) -> &Arc<Mutex<StateSnapshots<ForkDbStateSnapshot<N, B>>>> {
68        &self.state_snapshots
69    }
70
71    /// Reset the fork to a fresh forked state, and optionally update the fork config
72    pub fn reset(
73        &mut self,
74        _url: Option<String>,
75        block_number: impl Into<BlockId>,
76    ) -> Result<(), String> {
77        self.backend.set_pinned_block(block_number).map_err(|err| err.to_string())?;
78
79        // TODO need to find a way to update generic provider via url
80
81        // wipe the storage retrieved from remote
82        self.inner().db().clear();
83        // create a fresh `CacheDB`, effectively wiping modified state
84        self.cache_db = CacheDB::new(self.backend.clone());
85        trace!(target: "backend::forkdb", "Cleared database");
86        Ok(())
87    }
88
89    /// Flushes the cache to disk if configured
90    pub fn flush_cache(&self) {
91        self.db.cache().flush()
92    }
93
94    /// Returns the database that holds the remote state
95    pub fn inner(&self) -> &BlockchainDb<B> {
96        &self.db
97    }
98
99    pub fn create_state_snapshot(&self) -> ForkDbStateSnapshot<N, B> {
100        let db = self.db.db();
101        let state_snapshot = StateSnapshot {
102            accounts: db.accounts.read().clone(),
103            storage: db.storage.read().clone(),
104            block_hashes: db.block_hashes.read().clone(),
105        };
106        ForkDbStateSnapshot { local: self.cache_db.clone(), state_snapshot }
107    }
108
109    pub fn insert_state_snapshot(&self) -> U256 {
110        let state_snapshot = self.create_state_snapshot();
111        let mut state_snapshots = self.state_snapshots().lock();
112        let id = state_snapshots.insert(state_snapshot);
113        trace!(target: "backend::forkdb", "Created new snapshot {}", id);
114        id
115    }
116
117    /// Removes the snapshot from the tracked snapshot and sets it as the current state
118    pub fn revert_state_snapshot(&mut self, id: U256, action: RevertStateSnapshotAction) -> bool {
119        let state_snapshot = { self.state_snapshots().lock().remove_at(id) };
120        if let Some(state_snapshot) = state_snapshot {
121            if action.is_keep() {
122                self.state_snapshots().lock().insert_at(state_snapshot.clone(), id);
123            }
124            let ForkDbStateSnapshot {
125                local,
126                state_snapshot: StateSnapshot { accounts, storage, block_hashes },
127            } = state_snapshot;
128            let db = self.inner().db();
129            {
130                let mut accounts_lock = db.accounts.write();
131                accounts_lock.clear();
132                accounts_lock.extend(accounts);
133            }
134            {
135                let mut storage_lock = db.storage.write();
136                storage_lock.clear();
137                storage_lock.extend(storage);
138            }
139            {
140                let mut block_hashes_lock = db.block_hashes.write();
141                block_hashes_lock.clear();
142                block_hashes_lock.extend(block_hashes);
143            }
144
145            self.cache_db = local;
146
147            trace!(target: "backend::forkdb", "Reverted snapshot {}", id);
148            true
149        } else {
150            warn!(target: "backend::forkdb", "No snapshot to revert for {}", id);
151            false
152        }
153    }
154}
155
156impl<N: Network, B: ForkBlockEnv> Database for ForkedDatabase<N, B> {
157    type Error = DatabaseError;
158
159    fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
160        // Note: this will always return Some, since the `SharedBackend` will always load the
161        // account, this differs from `<CacheDB as Database>::basic`, See also
162        // [MemDb::ensure_loaded](crate::backend::MemDb::ensure_loaded)
163        Database::basic(&mut self.cache_db, address)
164    }
165
166    fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
167        Database::code_by_hash(&mut self.cache_db, code_hash)
168    }
169
170    fn storage(&mut self, address: Address, index: U256) -> Result<U256, Self::Error> {
171        Database::storage(&mut self.cache_db, address, index)
172    }
173
174    fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
175        Database::block_hash(&mut self.cache_db, number)
176    }
177}
178
179impl<N: Network, B: ForkBlockEnv> DatabaseRef for ForkedDatabase<N, B> {
180    type Error = DatabaseError;
181
182    fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
183        self.cache_db.basic_ref(address)
184    }
185
186    fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
187        self.cache_db.code_by_hash_ref(code_hash)
188    }
189
190    fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
191        DatabaseRef::storage_ref(&self.cache_db, address, index)
192    }
193
194    fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
195        self.cache_db.block_hash_ref(number)
196    }
197}
198
199impl<N: Network, B: ForkBlockEnv> DatabaseCommit for ForkedDatabase<N, B> {
200    fn commit(&mut self, changes: AddressMap<Account>) {
201        self.database_mut().commit(changes)
202    }
203}
204
205/// Represents a snapshot of the database
206///
207/// This mimics `revm::CacheDB`
208#[derive(Clone, Debug)]
209pub struct ForkDbStateSnapshot<N: Network, B: ForkBlockEnv = BlockEnv> {
210    pub local: CacheDB<SharedBackend<N, B>>,
211    pub state_snapshot: StateSnapshot,
212}
213
214impl<N: Network, B: ForkBlockEnv> ForkDbStateSnapshot<N, B> {
215    fn get_storage(&self, address: Address, index: U256) -> Option<U256> {
216        self.local
217            .cache
218            .accounts
219            .get(&address)
220            .and_then(|account| account.storage.get(&index))
221            .copied()
222    }
223}
224
225// This `DatabaseRef` implementation works similar to `CacheDB` which prioritizes modified elements,
226// and uses another db as fallback
227// We prioritize stored changed accounts/storage
228impl<N: Network, B: ForkBlockEnv> DatabaseRef for ForkDbStateSnapshot<N, B> {
229    type Error = DatabaseError;
230
231    fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
232        match self.local.cache.accounts.get(&address) {
233            Some(account) => Ok(Some(account.info.clone())),
234            None => {
235                let mut acc = self.state_snapshot.accounts.get(&address).cloned();
236
237                if acc.is_none() {
238                    acc = self.local.basic_ref(address)?;
239                }
240                Ok(acc)
241            }
242        }
243    }
244
245    fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
246        self.local.code_by_hash_ref(code_hash)
247    }
248
249    fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
250        match self.local.cache.accounts.get(&address) {
251            Some(account) => match account.storage.get(&index) {
252                Some(entry) => Ok(*entry),
253                None => match self.get_storage(address, index) {
254                    None => DatabaseRef::storage_ref(&self.local, address, index),
255                    Some(storage) => Ok(storage),
256                },
257            },
258            None => match self.get_storage(address, index) {
259                None => DatabaseRef::storage_ref(&self.local, address, index),
260                Some(storage) => Ok(storage),
261            },
262        }
263    }
264
265    fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
266        match self.state_snapshot.block_hashes.get(&U256::from(number)).copied() {
267            None => self.local.block_hash_ref(number),
268            Some(block_hash) => Ok(block_hash),
269        }
270    }
271}
272
273#[cfg(test)]
274mod tests {
275    use super::*;
276    use crate::backend::BlockchainDbMeta;
277    use foundry_common::provider::get_http_provider;
278
279    /// Demonstrates that `Database::basic` for `ForkedDatabase` will always return the
280    /// `AccountInfo`
281    #[tokio::test(flavor = "multi_thread")]
282    async fn fork_db_insert_basic_default() {
283        let rpc = foundry_test_utils::rpc::next_http_rpc_endpoint();
284        let provider = get_http_provider(rpc.clone());
285        let meta = BlockchainDbMeta::new(BlockEnv::default(), rpc);
286
287        let db = BlockchainDb::new(meta, None);
288
289        let backend = SharedBackend::spawn_backend(Arc::new(provider), db.clone(), None).await;
290
291        let mut db = ForkedDatabase::new(backend, db);
292        let address = Address::random();
293
294        let info = Database::basic(&mut db, address).unwrap();
295        assert!(info.is_some());
296        let mut info = info.unwrap();
297        info.balance = U256::from(500u64);
298
299        // insert the modified account info
300        db.database_mut().insert_account_info(address, info.clone());
301
302        let loaded = Database::basic(&mut db, address).unwrap();
303        assert!(loaded.is_some());
304        assert_eq!(loaded.unwrap(), info);
305    }
306}