use crate::{
config::{ForkChoice, DEFAULT_MNEMONIC},
eth::{backend::db::SerializableState, pool::transactions::TransactionOrder, EthApi},
hardfork::OptimismHardfork,
AccountGenerator, EthereumHardfork, NodeConfig, CHAIN_ID,
};
use alloy_genesis::Genesis;
use alloy_primitives::{utils::Unit, B256, U256};
use alloy_signer_local::coins_bip39::{English, Mnemonic};
use anvil_server::ServerConfig;
use clap::Parser;
use core::fmt;
use foundry_common::shell;
use foundry_config::{Chain, Config, FigmentProviders};
use futures::FutureExt;
use rand::{rngs::StdRng, SeedableRng};
use std::{
future::Future,
net::IpAddr,
path::{Path, PathBuf},
pin::Pin,
str::FromStr,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
use tokio::time::{Instant, Interval};
#[derive(Clone, Debug, Parser)]
pub struct NodeArgs {
#[arg(long, short, default_value = "8545", value_name = "NUM")]
pub port: u16,
#[arg(long, short, default_value = "10", value_name = "NUM")]
pub accounts: u64,
#[arg(long, default_value = "10000", value_name = "NUM")]
pub balance: u64,
#[arg(long, value_name = "NUM")]
pub timestamp: Option<u64>,
#[arg(long, short, conflicts_with_all = &["mnemonic_seed", "mnemonic_random"])]
pub mnemonic: Option<String>,
#[arg(long, conflicts_with_all = &["mnemonic", "mnemonic_seed"], default_missing_value = "12", num_args(0..=1))]
pub mnemonic_random: Option<usize>,
#[arg(long = "mnemonic-seed-unsafe", conflicts_with_all = &["mnemonic", "mnemonic_random"])]
pub mnemonic_seed: Option<u64>,
#[arg(long)]
pub derivation_path: Option<String>,
#[arg(long)]
pub hardfork: Option<String>,
#[arg(short, long, visible_alias = "blockTime", value_name = "SECONDS", value_parser = duration_from_secs_f64)]
pub block_time: Option<Duration>,
#[arg(long, value_name = "SLOTS_IN_AN_EPOCH", default_value_t = 32)]
pub slots_in_an_epoch: u64,
#[arg(long, value_name = "OUT_FILE")]
pub config_out: Option<String>,
#[arg(long, visible_alias = "no-mine", conflicts_with = "block_time")]
pub no_mining: bool,
#[arg(long, visible_alias = "mixed-mining", requires = "block_time")]
pub mixed_mining: bool,
#[arg(
long,
value_name = "IP_ADDR",
env = "ANVIL_IP_ADDR",
default_value = "127.0.0.1",
help_heading = "Server options",
value_delimiter = ','
)]
pub host: Vec<IpAddr>,
#[arg(long, default_value = "fees")]
pub order: TransactionOrder,
#[arg(long, value_name = "PATH", value_parser= read_genesis_file)]
pub init: Option<Genesis>,
#[arg(
long,
value_name = "PATH",
value_parser = StateFile::parse,
conflicts_with_all = &[
"init",
"dump_state",
"load_state"
]
)]
pub state: Option<StateFile>,
#[arg(short, long, value_name = "SECONDS")]
pub state_interval: Option<u64>,
#[arg(long, value_name = "PATH", conflicts_with = "init")]
pub dump_state: Option<PathBuf>,
#[arg(long, conflicts_with = "init", default_value = "false")]
pub preserve_historical_states: bool,
#[arg(
long,
value_name = "PATH",
value_parser = SerializableState::parse,
conflicts_with = "init"
)]
pub load_state: Option<SerializableState>,
#[arg(long, help = IPC_HELP, value_name = "PATH", visible_alias = "ipcpath")]
pub ipc: Option<Option<String>>,
#[arg(long)]
pub prune_history: Option<Option<usize>>,
#[arg(long)]
pub max_persisted_states: Option<usize>,
#[arg(long)]
pub transaction_block_keeper: Option<usize>,
#[command(flatten)]
pub evm_opts: AnvilEvmArgs,
#[command(flatten)]
pub server_config: ServerConfig,
#[arg(long, value_name = "PATH")]
pub cache_path: Option<PathBuf>,
}
#[cfg(windows)]
const IPC_HELP: &str =
"Launch an ipc server at the given path or default path = `\\.\\pipe\\anvil.ipc`";
#[cfg(not(windows))]
const IPC_HELP: &str = "Launch an ipc server at the given path or default path = `/tmp/anvil.ipc`";
const DEFAULT_DUMP_INTERVAL: Duration = Duration::from_secs(60);
impl NodeArgs {
pub fn into_node_config(self) -> eyre::Result<NodeConfig> {
let genesis_balance = Unit::ETHER.wei().saturating_mul(U256::from(self.balance));
let compute_units_per_second = if self.evm_opts.no_rate_limit {
Some(u64::MAX)
} else {
self.evm_opts.compute_units_per_second
};
let hardfork = match &self.hardfork {
Some(hf) => {
if self.evm_opts.optimism {
Some(OptimismHardfork::from_str(hf)?.into())
} else {
Some(EthereumHardfork::from_str(hf)?.into())
}
}
None => None,
};
Ok(NodeConfig::default()
.with_gas_limit(self.evm_opts.gas_limit)
.disable_block_gas_limit(self.evm_opts.disable_block_gas_limit)
.with_gas_price(self.evm_opts.gas_price)
.with_hardfork(hardfork)
.with_blocktime(self.block_time)
.with_no_mining(self.no_mining)
.with_mixed_mining(self.mixed_mining, self.block_time)
.with_account_generator(self.account_generator())
.with_genesis_balance(genesis_balance)
.with_genesis_timestamp(self.timestamp)
.with_port(self.port)
.with_fork_choice(
match (self.evm_opts.fork_block_number, self.evm_opts.fork_transaction_hash) {
(Some(block), None) => Some(ForkChoice::Block(block)),
(None, Some(hash)) => Some(ForkChoice::Transaction(hash)),
_ => {
self.evm_opts.fork_url.as_ref().and_then(|f| f.block).map(ForkChoice::Block)
}
},
)
.with_fork_headers(self.evm_opts.fork_headers)
.with_fork_chain_id(self.evm_opts.fork_chain_id.map(u64::from).map(U256::from))
.fork_request_timeout(self.evm_opts.fork_request_timeout.map(Duration::from_millis))
.fork_request_retries(self.evm_opts.fork_request_retries)
.fork_retry_backoff(self.evm_opts.fork_retry_backoff.map(Duration::from_millis))
.fork_compute_units_per_second(compute_units_per_second)
.with_eth_rpc_url(self.evm_opts.fork_url.map(|fork| fork.url))
.with_base_fee(self.evm_opts.block_base_fee_per_gas)
.disable_min_priority_fee(self.evm_opts.disable_min_priority_fee)
.with_storage_caching(self.evm_opts.no_storage_caching)
.with_server_config(self.server_config)
.with_host(self.host)
.set_silent(shell::is_quiet())
.set_config_out(self.config_out)
.with_chain_id(self.evm_opts.chain_id)
.with_transaction_order(self.order)
.with_genesis(self.init)
.with_steps_tracing(self.evm_opts.steps_tracing)
.with_print_logs(!self.evm_opts.disable_console_log)
.with_auto_impersonate(self.evm_opts.auto_impersonate)
.with_ipc(self.ipc)
.with_code_size_limit(self.evm_opts.code_size_limit)
.disable_code_size_limit(self.evm_opts.disable_code_size_limit)
.set_pruned_history(self.prune_history)
.with_init_state(self.load_state.or_else(|| self.state.and_then(|s| s.state)))
.with_transaction_block_keeper(self.transaction_block_keeper)
.with_max_persisted_states(self.max_persisted_states)
.with_optimism(self.evm_opts.optimism)
.with_alphanet(self.evm_opts.alphanet)
.with_disable_default_create2_deployer(self.evm_opts.disable_default_create2_deployer)
.with_slots_in_an_epoch(self.slots_in_an_epoch)
.with_memory_limit(self.evm_opts.memory_limit)
.with_cache_path(self.cache_path))
}
fn account_generator(&self) -> AccountGenerator {
let mut gen = AccountGenerator::new(self.accounts as usize)
.phrase(DEFAULT_MNEMONIC)
.chain_id(self.evm_opts.chain_id.unwrap_or_else(|| CHAIN_ID.into()));
if let Some(ref mnemonic) = self.mnemonic {
gen = gen.phrase(mnemonic);
} else if let Some(count) = self.mnemonic_random {
let mut rng = rand::thread_rng();
let mnemonic = match Mnemonic::<English>::new_with_count(&mut rng, count) {
Ok(mnemonic) => mnemonic.to_phrase(),
Err(_) => DEFAULT_MNEMONIC.to_string(),
};
gen = gen.phrase(mnemonic);
} else if let Some(seed) = self.mnemonic_seed {
let mut seed = StdRng::seed_from_u64(seed);
let mnemonic = Mnemonic::<English>::new(&mut seed).to_phrase();
gen = gen.phrase(mnemonic);
}
if let Some(ref derivation) = self.derivation_path {
gen = gen.derivation_path(derivation);
}
gen
}
fn dump_state_path(&self) -> Option<PathBuf> {
self.dump_state.as_ref().or_else(|| self.state.as_ref().map(|s| &s.path)).cloned()
}
pub async fn run(self) -> eyre::Result<()> {
let dump_state = self.dump_state_path();
let dump_interval =
self.state_interval.map(Duration::from_secs).unwrap_or(DEFAULT_DUMP_INTERVAL);
let preserve_historical_states = self.preserve_historical_states;
let (api, mut handle) = crate::try_spawn(self.into_node_config()?).await?;
let mut fork = api.get_fork();
let running = Arc::new(AtomicUsize::new(0));
let mut signal = handle.shutdown_signal_mut().take();
let task_manager = handle.task_manager();
let mut on_shutdown = task_manager.on_shutdown();
let mut state_dumper =
PeriodicStateDumper::new(api, dump_state, dump_interval, preserve_historical_states);
task_manager.spawn(async move {
#[cfg(unix)]
let mut sigterm = Box::pin(async {
if let Ok(mut stream) =
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
{
stream.recv().await;
} else {
futures::future::pending::<()>().await;
}
});
#[cfg(not(unix))]
let mut sigterm = Box::pin(futures::future::pending::<()>());
tokio::select! {
_ = &mut sigterm => {
trace!("received sigterm signal, shutting down");
},
_ = &mut on_shutdown =>{
}
_ = &mut state_dumper =>{}
}
state_dumper.dump().await;
if let Some(fork) = fork.take() {
trace!("flushing cache on shutdown");
fork.database
.read()
.await
.maybe_flush_cache()
.expect("Could not flush cache on fork DB");
}
std::process::exit(0);
});
ctrlc::set_handler(move || {
let prev = running.fetch_add(1, Ordering::SeqCst);
if prev == 0 {
trace!("received shutdown signal, shutting down");
let _ = signal.take();
}
})
.expect("Error setting Ctrl-C handler");
Ok(handle.await??)
}
}
#[derive(Clone, Debug, Parser)]
#[command(next_help_heading = "EVM options")]
pub struct AnvilEvmArgs {
#[arg(
long,
short,
visible_alias = "rpc-url",
value_name = "URL",
help_heading = "Fork config"
)]
pub fork_url: Option<ForkUrl>,
#[arg(
long = "fork-header",
value_name = "HEADERS",
help_heading = "Fork config",
requires = "fork_url"
)]
pub fork_headers: Vec<String>,
#[arg(id = "timeout", long = "timeout", help_heading = "Fork config", requires = "fork_url")]
pub fork_request_timeout: Option<u64>,
#[arg(id = "retries", long = "retries", help_heading = "Fork config", requires = "fork_url")]
pub fork_request_retries: Option<u32>,
#[arg(long, requires = "fork_url", value_name = "BLOCK", help_heading = "Fork config")]
pub fork_block_number: Option<u64>,
#[arg(
long,
requires = "fork_url",
value_name = "TRANSACTION",
help_heading = "Fork config",
conflicts_with = "fork_block_number"
)]
pub fork_transaction_hash: Option<B256>,
#[arg(long, requires = "fork_url", value_name = "BACKOFF", help_heading = "Fork config")]
pub fork_retry_backoff: Option<u64>,
#[arg(
long,
help_heading = "Fork config",
value_name = "CHAIN",
requires = "fork_block_number"
)]
pub fork_chain_id: Option<Chain>,
#[arg(
long,
requires = "fork_url",
alias = "cups",
value_name = "CUPS",
help_heading = "Fork config"
)]
pub compute_units_per_second: Option<u64>,
#[arg(
long,
requires = "fork_url",
value_name = "NO_RATE_LIMITS",
help_heading = "Fork config",
visible_alias = "no-rpc-rate-limit"
)]
pub no_rate_limit: bool,
#[arg(long, requires = "fork_url", help_heading = "Fork config")]
pub no_storage_caching: bool,
#[arg(long, alias = "block-gas-limit", help_heading = "Environment config")]
pub gas_limit: Option<u128>,
#[arg(
long,
value_name = "DISABLE_GAS_LIMIT",
help_heading = "Environment config",
alias = "disable-gas-limit",
conflicts_with = "gas_limit"
)]
pub disable_block_gas_limit: bool,
#[arg(long, value_name = "CODE_SIZE", help_heading = "Environment config")]
pub code_size_limit: Option<usize>,
#[arg(
long,
value_name = "DISABLE_CODE_SIZE_LIMIT",
conflicts_with = "code_size_limit",
help_heading = "Environment config"
)]
pub disable_code_size_limit: bool,
#[arg(long, help_heading = "Environment config")]
pub gas_price: Option<u128>,
#[arg(
long,
visible_alias = "base-fee",
value_name = "FEE",
help_heading = "Environment config"
)]
pub block_base_fee_per_gas: Option<u64>,
#[arg(long, visible_alias = "no-priority-fee", help_heading = "Environment config")]
pub disable_min_priority_fee: bool,
#[arg(long, alias = "chain", help_heading = "Environment config")]
pub chain_id: Option<Chain>,
#[arg(long, visible_alias = "tracing")]
pub steps_tracing: bool,
#[arg(long, visible_alias = "no-console-log")]
pub disable_console_log: bool,
#[arg(long, visible_alias = "auto-unlock")]
pub auto_impersonate: bool,
#[arg(long, visible_alias = "optimism")]
pub optimism: bool,
#[arg(long, visible_alias = "no-create2")]
pub disable_default_create2_deployer: bool,
#[arg(long)]
pub memory_limit: Option<u64>,
#[arg(long, visible_alias = "odyssey")]
pub alphanet: bool,
}
impl AnvilEvmArgs {
pub fn resolve_rpc_alias(&mut self) {
if let Some(fork_url) = &self.fork_url {
let config = Config::load_with_providers(FigmentProviders::Anvil);
if let Some(Ok(url)) = config.get_rpc_url_with_alias(&fork_url.url) {
self.fork_url = Some(ForkUrl { url: url.to_string(), block: fork_url.block });
}
}
}
}
struct PeriodicStateDumper {
in_progress_dump: Option<Pin<Box<dyn Future<Output = ()> + Send + Sync + 'static>>>,
api: EthApi,
dump_state: Option<PathBuf>,
preserve_historical_states: bool,
interval: Interval,
}
impl PeriodicStateDumper {
fn new(
api: EthApi,
dump_state: Option<PathBuf>,
interval: Duration,
preserve_historical_states: bool,
) -> Self {
let dump_state = dump_state.map(|mut dump_state| {
if dump_state.is_dir() {
dump_state = dump_state.join("state.json");
}
dump_state
});
let interval = tokio::time::interval_at(Instant::now() + interval, interval);
Self { in_progress_dump: None, api, dump_state, preserve_historical_states, interval }
}
async fn dump(&self) {
if let Some(state) = self.dump_state.clone() {
Self::dump_state(self.api.clone(), state, self.preserve_historical_states).await
}
}
async fn dump_state(api: EthApi, dump_state: PathBuf, preserve_historical_states: bool) {
trace!(path=?dump_state, "Dumping state on shutdown");
match api.serialized_state(preserve_historical_states).await {
Ok(state) => {
if let Err(err) = foundry_common::fs::write_json_file(&dump_state, &state) {
error!(?err, "Failed to dump state");
} else {
trace!(path=?dump_state, "Dumped state on shutdown");
}
}
Err(err) => {
error!(?err, "Failed to extract state");
}
}
}
}
impl Future for PeriodicStateDumper {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
if this.dump_state.is_none() {
return Poll::Pending
}
loop {
if let Some(mut flush) = this.in_progress_dump.take() {
match flush.poll_unpin(cx) {
Poll::Ready(_) => {
this.interval.reset();
}
Poll::Pending => {
this.in_progress_dump = Some(flush);
return Poll::Pending
}
}
}
if this.interval.poll_tick(cx).is_ready() {
let api = this.api.clone();
let path = this.dump_state.clone().expect("exists; see above");
this.in_progress_dump =
Some(Box::pin(Self::dump_state(api, path, this.preserve_historical_states)));
} else {
break
}
}
Poll::Pending
}
}
#[derive(Clone, Debug)]
pub struct StateFile {
pub path: PathBuf,
pub state: Option<SerializableState>,
}
impl StateFile {
fn parse(path: &str) -> Result<Self, String> {
Self::parse_path(path)
}
pub fn parse_path(path: impl AsRef<Path>) -> Result<Self, String> {
let mut path = path.as_ref().to_path_buf();
if path.is_dir() {
path = path.join("state.json");
}
let mut state = Self { path, state: None };
if !state.path.exists() {
return Ok(state)
}
state.state = Some(SerializableState::load(&state.path).map_err(|err| err.to_string())?);
Ok(state)
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ForkUrl {
pub url: String,
pub block: Option<u64>,
}
impl fmt::Display for ForkUrl {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.url.fmt(f)?;
if let Some(block) = self.block {
write!(f, "@{block}")?;
}
Ok(())
}
}
impl FromStr for ForkUrl {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some((url, block)) = s.rsplit_once('@') {
if block == "latest" {
return Ok(Self { url: url.to_string(), block: None })
}
if !block.is_empty() && !block.contains(':') && !block.contains('.') {
let block: u64 = block
.parse()
.map_err(|_| format!("Failed to parse block number: `{block}`"))?;
return Ok(Self { url: url.to_string(), block: Some(block) })
}
}
Ok(Self { url: s.to_string(), block: None })
}
}
fn read_genesis_file(path: &str) -> Result<Genesis, String> {
foundry_common::fs::read_json_file(path.as_ref()).map_err(|err| err.to_string())
}
fn duration_from_secs_f64(s: &str) -> Result<Duration, String> {
let s = s.parse::<f64>().map_err(|e| e.to_string())?;
if s == 0.0 {
return Err("Duration must be greater than 0".to_string());
}
Duration::try_from_secs_f64(s).map_err(|e| e.to_string())
}
#[cfg(test)]
mod tests {
use crate::EthereumHardfork;
use super::*;
use std::{env, net::Ipv4Addr};
#[test]
fn test_parse_fork_url() {
let fork: ForkUrl = "http://localhost:8545@1000000".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "http://localhost:8545".to_string(), block: Some(1000000) }
);
let fork: ForkUrl = "http://localhost:8545".parse().unwrap();
assert_eq!(fork, ForkUrl { url: "http://localhost:8545".to_string(), block: None });
let fork: ForkUrl = "wss://user:password@example.com/".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "wss://user:password@example.com/".to_string(), block: None }
);
let fork: ForkUrl = "wss://user:password@example.com/@latest".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "wss://user:password@example.com/".to_string(), block: None }
);
let fork: ForkUrl = "wss://user:password@example.com/@100000".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "wss://user:password@example.com/".to_string(), block: Some(100000) }
);
}
#[test]
fn can_parse_ethereum_hardfork() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--hardfork", "berlin"]);
let config = args.into_node_config().unwrap();
assert_eq!(config.hardfork, Some(EthereumHardfork::Berlin.into()));
}
#[test]
fn can_parse_optimism_hardfork() {
let args: NodeArgs =
NodeArgs::parse_from(["anvil", "--optimism", "--hardfork", "Regolith"]);
let config = args.into_node_config().unwrap();
assert_eq!(config.hardfork, Some(OptimismHardfork::Regolith.into()));
}
#[test]
fn cant_parse_invalid_hardfork() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--hardfork", "Regolith"]);
let config = args.into_node_config();
assert!(config.is_err());
}
#[test]
fn can_parse_fork_headers() {
let args: NodeArgs = NodeArgs::parse_from([
"anvil",
"--fork-url",
"http,://localhost:8545",
"--fork-header",
"User-Agent: test-agent",
"--fork-header",
"Referrer: example.com",
]);
assert_eq!(
args.evm_opts.fork_headers,
vec!["User-Agent: test-agent", "Referrer: example.com"]
);
}
#[test]
fn can_parse_prune_config() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--prune-history"]);
assert!(args.prune_history.is_some());
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--prune-history", "100"]);
assert_eq!(args.prune_history, Some(Some(100)));
}
#[test]
fn can_parse_max_persisted_states_config() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--max-persisted-states", "500"]);
assert_eq!(args.max_persisted_states, (Some(500)));
}
#[test]
fn can_parse_disable_block_gas_limit() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--disable-block-gas-limit"]);
assert!(args.evm_opts.disable_block_gas_limit);
let args =
NodeArgs::try_parse_from(["anvil", "--disable-block-gas-limit", "--gas-limit", "100"]);
assert!(args.is_err());
}
#[test]
fn can_parse_disable_code_size_limit() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--disable-code-size-limit"]);
assert!(args.evm_opts.disable_code_size_limit);
let args = NodeArgs::try_parse_from([
"anvil",
"--disable-code-size-limit",
"--code-size-limit",
"100",
]);
assert!(args.is_err());
}
#[test]
fn can_parse_host() {
let args = NodeArgs::parse_from(["anvil"]);
assert_eq!(args.host, vec![IpAddr::V4(Ipv4Addr::LOCALHOST)]);
let args = NodeArgs::parse_from([
"anvil", "--host", "::1", "--host", "1.1.1.1", "--host", "2.2.2.2",
]);
assert_eq!(
args.host,
["::1", "1.1.1.1", "2.2.2.2"].map(|ip| ip.parse::<IpAddr>().unwrap()).to_vec()
);
let args = NodeArgs::parse_from(["anvil", "--host", "::1,1.1.1.1,2.2.2.2"]);
assert_eq!(
args.host,
["::1", "1.1.1.1", "2.2.2.2"].map(|ip| ip.parse::<IpAddr>().unwrap()).to_vec()
);
env::set_var("ANVIL_IP_ADDR", "1.1.1.1");
let args = NodeArgs::parse_from(["anvil"]);
assert_eq!(args.host, vec!["1.1.1.1".parse::<IpAddr>().unwrap()]);
env::set_var("ANVIL_IP_ADDR", "::1,1.1.1.1,2.2.2.2");
let args = NodeArgs::parse_from(["anvil"]);
assert_eq!(
args.host,
["::1", "1.1.1.1", "2.2.2.2"].map(|ip| ip.parse::<IpAddr>().unwrap()).to_vec()
);
}
}