Skip to main content

anvil/
lib.rs

1//! Anvil is a fast local Ethereum development node.
2
3#![cfg_attr(not(test), warn(unused_crate_dependencies))]
4#![cfg_attr(docsrs, feature(doc_cfg))]
5
6use crate::{
7    error::{NodeError, NodeResult},
8    eth::{
9        EthApi,
10        backend::{info::StorageInfo, mem},
11        fees::{FeeHistoryService, FeeManager},
12        miner::{Miner, MiningMode},
13        pool::Pool,
14        sign::{DevSigner, Signer as EthSigner},
15    },
16    filter::Filters,
17    logging::{LoggingManager, NodeLogLayer},
18    service::NodeService,
19    shutdown::Signal,
20    tasks::TaskManager,
21};
22use alloy_eips::eip7840::BlobParams;
23use alloy_primitives::{Address, U256};
24use alloy_signer_local::PrivateKeySigner;
25use eth::backend::fork::ClientFork;
26use eyre::{Result, WrapErr};
27use foundry_common::provider::{ProviderBuilder, RetryProvider};
28pub use foundry_evm::hardfork::EthereumHardfork;
29use foundry_primitives::FoundryNetwork;
30use futures::{FutureExt, TryFutureExt};
31use parking_lot::Mutex;
32use revm::primitives::hardfork::SpecId;
33use server::try_spawn_ipc;
34use std::{
35    net::SocketAddr,
36    pin::Pin,
37    sync::Arc,
38    task::{Context, Poll},
39};
40use tokio::{
41    runtime::Handle,
42    task::{JoinError, JoinHandle},
43};
44use tracing_subscriber::EnvFilter;
45
46/// contains the background service that drives the node
47mod service;
48
49mod config;
50pub use config::{
51    AccountGenerator, CHAIN_ID, DEFAULT_GAS_LIMIT, ForkChoice, NodeConfig, VERSION_MESSAGE,
52};
53
54mod error;
55/// ethereum related implementations
56pub mod eth;
57/// Evm related abstractions
58mod evm;
59pub use evm::PrecompileFactory;
60
61/// support for polling filters
62pub mod filter;
63/// commandline output
64pub mod logging;
65/// types for subscriptions
66pub mod pubsub;
67/// axum RPC server implementations
68pub mod server;
69/// Futures for shutdown signal
70mod shutdown;
71/// additional task management
72mod tasks;
73
74/// contains cli command
75#[cfg(feature = "cmd")]
76pub mod cmd;
77
78#[cfg(feature = "cmd")]
79pub mod args;
80
81#[cfg(feature = "cmd")]
82pub mod opts;
83
84#[macro_use]
85extern crate foundry_common;
86
87#[macro_use]
88extern crate tracing;
89
90/// Creates the node and runs the server.
91///
92/// Returns the [EthApi] that can be used to interact with the node and the [JoinHandle] of the
93/// task.
94///
95/// # Panics
96///
97/// Panics if any error occurs. For a non-panicking version, use [`try_spawn`].
98///
99///
100/// # Examples
101///
102/// ```no_run
103/// # use anvil::NodeConfig;
104/// # async fn spawn() -> eyre::Result<()> {
105/// let config = NodeConfig::default();
106/// let (api, handle) = anvil::spawn(config).await;
107///
108/// // use api
109///
110/// // wait forever
111/// handle.await.unwrap().unwrap();
112/// # Ok(())
113/// # }
114/// ```
115pub async fn spawn(config: NodeConfig) -> (EthApi<FoundryNetwork>, NodeHandle) {
116    try_spawn(config).await.expect("failed to spawn node")
117}
118
119/// Creates the node and runs the server
120///
121/// Returns the [EthApi] that can be used to interact with the node and the [JoinHandle] of the
122/// task.
123///
124/// # Examples
125///
126/// ```no_run
127/// # use anvil::NodeConfig;
128/// # async fn spawn() -> eyre::Result<()> {
129/// let config = NodeConfig::default();
130/// let (api, handle) = anvil::try_spawn(config).await?;
131///
132/// // use api
133///
134/// // wait forever
135/// handle.await??;
136/// # Ok(())
137/// # }
138/// ```
139pub async fn try_spawn(mut config: NodeConfig) -> Result<(EthApi<FoundryNetwork>, NodeHandle)> {
140    let logger = if config.enable_tracing { init_tracing() } else { Default::default() };
141    logger.set_enabled(!config.silent);
142
143    let backend = config.setup::<FoundryNetwork>().await?;
144
145    if let Some(state) = config.init_state.clone() {
146        backend.load_state(state).await.wrap_err("failed to load init state")?;
147    }
148
149    let backend = Arc::new(backend);
150
151    if config.enable_auto_impersonate {
152        backend.auto_impersonate_account(true);
153    }
154
155    let fork = backend.get_fork();
156
157    let NodeConfig {
158        signer_accounts,
159        block_time,
160        port,
161        max_transactions,
162        server_config,
163        no_mining,
164        transaction_order,
165        genesis,
166        mixed_mining,
167        ..
168    } = config.clone();
169
170    let pool = Arc::new(Pool::default());
171
172    let mode = if let Some(block_time) = block_time {
173        if mixed_mining {
174            let listener = pool.add_ready_listener();
175            MiningMode::mixed(max_transactions, listener, block_time)
176        } else {
177            MiningMode::interval(block_time)
178        }
179    } else if no_mining {
180        MiningMode::None
181    } else {
182        // get a listener for ready transactions
183        let listener = pool.add_ready_listener();
184        MiningMode::instant(max_transactions, listener)
185    };
186
187    let miner = match &fork {
188        Some(fork) => {
189            Miner::new(mode).with_forced_transactions(fork.config.read().force_transactions.clone())
190        }
191        _ => Miner::new(mode),
192    };
193
194    let dev_signer: Box<dyn EthSigner<foundry_primitives::FoundryNetwork>> =
195        Box::new(DevSigner::new(signer_accounts));
196    let mut signers = vec![dev_signer];
197    if let Some(genesis) = genesis {
198        let genesis_signers = genesis
199            .alloc
200            .values()
201            .filter_map(|acc| acc.private_key)
202            .flat_map(|k| PrivateKeySigner::from_bytes(&k))
203            .collect::<Vec<_>>();
204        if !genesis_signers.is_empty() {
205            signers.push(Box::new(DevSigner::new(genesis_signers)));
206        }
207    }
208
209    let fee_history_cache = Arc::new(Mutex::new(Default::default()));
210    let fee_history_service = FeeHistoryService::new(
211        match backend.spec_id() {
212            SpecId::OSAKA => BlobParams::osaka(),
213            SpecId::PRAGUE => BlobParams::prague(),
214            _ => BlobParams::cancun(),
215        },
216        backend.new_block_notifications(),
217        Arc::clone(&fee_history_cache),
218        StorageInfo::new(Arc::clone(&backend)),
219    );
220    // create an entry for the best block
221    if let Some(header) = backend.get_block(backend.best_number()).map(|block| block.header) {
222        fee_history_service.insert_cache_entry_for_block(header.hash_slow(), &header);
223    }
224
225    let filters = Filters::default();
226
227    // create the cloneable api wrapper
228    let api = EthApi::new(
229        Arc::clone(&pool),
230        Arc::clone(&backend),
231        Arc::new(signers),
232        fee_history_cache,
233        fee_history_service.fee_history_limit(),
234        miner.clone(),
235        logger,
236        filters.clone(),
237        transaction_order,
238    );
239
240    // spawn the node service
241    let node_service =
242        tokio::task::spawn(NodeService::new(pool, backend, miner, fee_history_service, filters));
243
244    let mut servers = Vec::with_capacity(config.host.len());
245    let mut addresses = Vec::with_capacity(config.host.len());
246
247    for addr in &config.host {
248        let sock_addr = SocketAddr::new(*addr, port);
249
250        // Create a TCP listener.
251        let tcp_listener = tokio::net::TcpListener::bind(sock_addr).await?;
252        addresses.push(tcp_listener.local_addr()?);
253
254        // Spawn the server future on a new task.
255        let srv = server::serve_on(tcp_listener, api.clone(), server_config.clone());
256        servers.push(tokio::task::spawn(srv.map_err(Into::into)));
257    }
258
259    let tokio_handle = Handle::current();
260    let (signal, on_shutdown) = shutdown::signal();
261    let task_manager = TaskManager::new(tokio_handle, on_shutdown);
262
263    let ipc_task =
264        config.get_ipc_path().map(|path| try_spawn_ipc(api.clone(), path)).transpose()?;
265
266    let handle = NodeHandle {
267        config,
268        node_service,
269        servers,
270        ipc_task,
271        addresses,
272        _signal: Some(signal),
273        task_manager,
274    };
275
276    handle.print(fork.as_ref())?;
277
278    Ok((api, handle))
279}
280
281type IpcTask = JoinHandle<()>;
282
283/// A handle to the spawned node and server tasks.
284///
285/// This future will resolve if either the node or server task resolve/fail.
286pub struct NodeHandle {
287    config: NodeConfig,
288    /// The address of the running rpc server.
289    addresses: Vec<SocketAddr>,
290    /// Join handle for the Node Service.
291    pub node_service: JoinHandle<Result<(), NodeError>>,
292    /// Join handles (one per socket) for the Anvil server.
293    pub servers: Vec<JoinHandle<Result<(), NodeError>>>,
294    /// The future that joins the ipc server, if any.
295    ipc_task: Option<IpcTask>,
296    /// A signal that fires the shutdown, fired on drop.
297    _signal: Option<Signal>,
298    /// A task manager that can be used to spawn additional tasks.
299    task_manager: TaskManager,
300}
301
302impl Drop for NodeHandle {
303    fn drop(&mut self) {
304        // Fire shutdown signal to make sure anvil instance is terminated.
305        if let Some(signal) = self._signal.take() {
306            let _ = signal.fire();
307        }
308    }
309}
310
311impl NodeHandle {
312    /// The [NodeConfig] the node was launched with.
313    pub fn config(&self) -> &NodeConfig {
314        &self.config
315    }
316
317    /// Prints the launch info.
318    pub(crate) fn print(&self, fork: Option<&ClientFork>) -> Result<()> {
319        self.config.print(fork)?;
320        if !self.config.silent {
321            if let Some(ipc_path) = self.ipc_path() {
322                sh_println!("IPC path: {ipc_path}")?;
323            }
324            sh_println!(
325                "Listening on {}",
326                self.addresses
327                    .iter()
328                    .map(|addr| { addr.to_string() })
329                    .collect::<Vec<String>>()
330                    .join(", ")
331            )?;
332        }
333        Ok(())
334    }
335
336    /// The address of the launched server.
337    ///
338    /// **N.B.** this may not necessarily be the same `host + port` as configured in the
339    /// `NodeConfig`, if port was set to 0, then the OS auto picks an available port.
340    pub fn socket_address(&self) -> &SocketAddr {
341        &self.addresses[0]
342    }
343
344    /// Returns the http endpoint.
345    pub fn http_endpoint(&self) -> String {
346        format!("http://{}", self.socket_address())
347    }
348
349    /// Returns the websocket endpoint.
350    pub fn ws_endpoint(&self) -> String {
351        format!("ws://{}", self.socket_address())
352    }
353
354    /// Returns the path of the launched ipc server, if any.
355    pub fn ipc_path(&self) -> Option<String> {
356        self.config.get_ipc_path()
357    }
358
359    /// Constructs a [`RetryProvider`] for this handle's HTTP endpoint.
360    pub fn http_provider(&self) -> RetryProvider {
361        ProviderBuilder::new(&self.http_endpoint()).build().expect("failed to build HTTP provider")
362    }
363
364    /// Constructs a [`RetryProvider`] for this handle's WS endpoint.
365    pub fn ws_provider(&self) -> RetryProvider {
366        ProviderBuilder::new(&self.ws_endpoint()).build().expect("failed to build WS provider")
367    }
368
369    /// Constructs a [`RetryProvider`] for this handle's IPC endpoint, if any.
370    pub fn ipc_provider(&self) -> Option<RetryProvider> {
371        ProviderBuilder::new(&self.config.get_ipc_path()?).build().ok()
372    }
373
374    /// Signer accounts that can sign messages/transactions from the EVM node.
375    pub fn dev_accounts(&self) -> impl Iterator<Item = Address> + '_ {
376        self.config.signer_accounts.iter().map(|wallet| wallet.address())
377    }
378
379    /// Signer accounts that can sign messages/transactions from the EVM node.
380    pub fn dev_wallets(&self) -> impl Iterator<Item = PrivateKeySigner> + '_ {
381        self.config.signer_accounts.iter().cloned()
382    }
383
384    /// Accounts that will be initialised with `genesis_balance` in the genesis block.
385    pub fn genesis_accounts(&self) -> impl Iterator<Item = Address> + '_ {
386        self.config.genesis_accounts.iter().map(|w| w.address())
387    }
388
389    /// Native token balance of every genesis account in the genesis block.
390    pub fn genesis_balance(&self) -> U256 {
391        self.config.genesis_balance
392    }
393
394    /// Default gas price for all txs.
395    pub fn gas_price(&self) -> u128 {
396        self.config.get_gas_price()
397    }
398
399    /// Returns the shutdown signal.
400    pub fn shutdown_signal(&self) -> &Option<Signal> {
401        &self._signal
402    }
403
404    /// Returns mutable access to the shutdown signal.
405    ///
406    /// This can be used to extract the Signal.
407    pub fn shutdown_signal_mut(&mut self) -> &mut Option<Signal> {
408        &mut self._signal
409    }
410
411    /// Returns the task manager that can be used to spawn new tasks.
412    ///
413    /// ```
414    /// use anvil::NodeHandle;
415    /// # fn t(handle: NodeHandle) {
416    /// let task_manager = handle.task_manager();
417    /// let on_shutdown = task_manager.on_shutdown();
418    ///
419    /// task_manager.spawn(async move {
420    ///     on_shutdown.await;
421    ///     // do something
422    /// });
423    ///
424    /// # }
425    /// ```
426    pub fn task_manager(&self) -> &TaskManager {
427        &self.task_manager
428    }
429}
430
431impl Future for NodeHandle {
432    type Output = Result<NodeResult<()>, JoinError>;
433
434    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
435        let pin = self.get_mut();
436
437        // poll the ipc task
438        if let Some(mut ipc) = pin.ipc_task.take() {
439            if let Poll::Ready(res) = ipc.poll_unpin(cx) {
440                return Poll::Ready(res.map(|()| Ok(())));
441            }
442            pin.ipc_task = Some(ipc);
443        }
444
445        // poll the node service task
446        if let Poll::Ready(res) = pin.node_service.poll_unpin(cx) {
447            return Poll::Ready(res);
448        }
449
450        // poll the axum server handles
451        for server in &mut pin.servers {
452            if let Poll::Ready(res) = server.poll_unpin(cx) {
453                return Poll::Ready(res);
454            }
455        }
456
457        Poll::Pending
458    }
459}
460
461#[doc(hidden)]
462pub fn init_tracing() -> LoggingManager {
463    use tracing_subscriber::prelude::*;
464
465    let manager = LoggingManager::default();
466
467    let _ = if let Ok(rust_log_val) = std::env::var("RUST_LOG")
468        && !rust_log_val.contains('=')
469    {
470        // Mutate the given filter to include `node` logs if it is not already present.
471        // This prevents the unexpected behaviour of not seeing any node logs if a RUST_LOG
472        // is already present that doesn't set it.
473        let rust_log_val = if rust_log_val.contains("node") {
474            rust_log_val
475        } else {
476            format!("{rust_log_val},node=info")
477        };
478
479        let env_filter: EnvFilter =
480            rust_log_val.parse().expect("failed to parse modified RUST_LOG");
481        tracing_subscriber::registry()
482            .with(env_filter)
483            .with(tracing_subscriber::fmt::layer())
484            .try_init()
485    } else {
486        tracing_subscriber::Registry::default()
487            .with(NodeLogLayer::new(manager.clone()))
488            .with(
489                tracing_subscriber::fmt::layer()
490                    .without_time()
491                    .with_target(false)
492                    .with_level(false),
493            )
494            .try_init()
495    };
496
497    manager
498}