forge/cmd/test/
mod.rs

1use super::{install, test::filter::ProjectPathsAwareFilter, watch::WatchArgs};
2use crate::{
3    MultiContractRunner, MultiContractRunnerBuilder, TestFilter,
4    decode::decode_console_logs,
5    gas_report::GasReport,
6    multi_runner::matches_contract,
7    result::{SuiteResult, TestOutcome, TestStatus},
8    traces::{
9        CallTraceDecoderBuilder, InternalTraceMode, TraceKind,
10        debug::{ContractSources, DebugTraceIdentifier},
11        decode_trace_arena, folded_stack_trace,
12        identifier::SignaturesIdentifier,
13    },
14};
15use alloy_primitives::U256;
16use chrono::Utc;
17use clap::{Parser, ValueHint};
18use eyre::{Context, OptionExt, Result, bail};
19use foundry_block_explorers::EtherscanApiVersion;
20use foundry_cli::{
21    opts::{BuildOpts, GlobalArgs},
22    utils::{self, LoadConfig},
23};
24use foundry_common::{TestFunctionExt, compile::ProjectCompiler, evm::EvmArgs, fs, shell};
25use foundry_compilers::{
26    ProjectCompileOutput,
27    artifacts::output_selection::OutputSelection,
28    compilers::{
29        Language,
30        multi::{MultiCompiler, MultiCompilerLanguage},
31    },
32    utils::source_files_iter,
33};
34use foundry_config::{
35    Config, figment,
36    figment::{
37        Metadata, Profile, Provider,
38        value::{Dict, Map},
39    },
40    filter::GlobMatcher,
41};
42use foundry_debugger::Debugger;
43use foundry_evm::traces::identifier::TraceIdentifiers;
44use regex::Regex;
45use std::{
46    collections::{BTreeMap, BTreeSet},
47    fmt::Write,
48    path::PathBuf,
49    sync::{Arc, mpsc::channel},
50    time::{Duration, Instant},
51};
52use yansi::Paint;
53
54mod filter;
55mod summary;
56use crate::{result::TestKind, traces::render_trace_arena_inner};
57pub use filter::FilterArgs;
58use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
59use summary::{TestSummaryReport, format_invariant_metrics_table};
60
61// Loads project's figment and merges the build cli arguments into it
62foundry_config::merge_impl_figment_convert!(TestArgs, build, evm);
63
64/// CLI arguments for `forge test`.
65#[derive(Clone, Debug, Parser)]
66#[command(next_help_heading = "Test options")]
67pub struct TestArgs {
68    // Include global options for users of this struct.
69    #[command(flatten)]
70    pub global: GlobalArgs,
71
72    /// The contract file you want to test, it's a shortcut for --match-path.
73    #[arg(value_hint = ValueHint::FilePath)]
74    pub path: Option<GlobMatcher>,
75
76    /// Run a single test in the debugger.
77    ///
78    /// The matching test will be opened in the debugger regardless of the outcome of the test.
79    ///
80    /// If the matching test is a fuzz test, then it will open the debugger on the first failure
81    /// case. If the fuzz test does not fail, it will open the debugger on the last fuzz case.
82    #[arg(long, conflicts_with_all = ["flamegraph", "flamechart", "decode_internal", "rerun"])]
83    debug: bool,
84
85    /// Generate a flamegraph for a single test. Implies `--decode-internal`.
86    ///
87    /// A flame graph is used to visualize which functions or operations within the smart contract
88    /// are consuming the most gas overall in a sorted manner.
89    #[arg(long)]
90    flamegraph: bool,
91
92    /// Generate a flamechart for a single test. Implies `--decode-internal`.
93    ///
94    /// A flame chart shows the gas usage over time, illustrating when each function is
95    /// called (execution order) and how much gas it consumes at each point in the timeline.
96    #[arg(long, conflicts_with = "flamegraph")]
97    flamechart: bool,
98
99    /// Identify internal functions in traces.
100    ///
101    /// This will trace internal functions and decode stack parameters.
102    ///
103    /// Parameters stored in memory (such as bytes or arrays) are currently decoded only when a
104    /// single function is matched, similarly to `--debug`, for performance reasons.
105    #[arg(long)]
106    decode_internal: bool,
107
108    /// Dumps all debugger steps to file.
109    #[arg(
110        long,
111        requires = "debug",
112        value_hint = ValueHint::FilePath,
113        value_name = "PATH"
114    )]
115    dump: Option<PathBuf>,
116
117    /// Print a gas report.
118    #[arg(long, env = "FORGE_GAS_REPORT")]
119    gas_report: bool,
120
121    /// Check gas snapshots against previous runs.
122    #[arg(long, env = "FORGE_SNAPSHOT_CHECK")]
123    gas_snapshot_check: Option<bool>,
124
125    /// Enable/disable recording of gas snapshot results.
126    #[arg(long, env = "FORGE_SNAPSHOT_EMIT")]
127    gas_snapshot_emit: Option<bool>,
128
129    /// Exit with code 0 even if a test fails.
130    #[arg(long, env = "FORGE_ALLOW_FAILURE")]
131    allow_failure: bool,
132
133    /// Suppress successful test traces and show only traces for failures.
134    #[arg(long, short, env = "FORGE_SUPPRESS_SUCCESSFUL_TRACES", help_heading = "Display options")]
135    suppress_successful_traces: bool,
136
137    /// Output test results as JUnit XML report.
138    #[arg(long, conflicts_with_all = ["quiet", "json", "gas_report", "summary", "list", "show_progress"], help_heading = "Display options")]
139    pub junit: bool,
140
141    /// Stop running tests after the first failure.
142    #[arg(long)]
143    pub fail_fast: bool,
144
145    /// The Etherscan (or equivalent) API key.
146    #[arg(long, env = "ETHERSCAN_API_KEY", value_name = "KEY")]
147    etherscan_api_key: Option<String>,
148
149    /// The Etherscan API version.
150    #[arg(long, env = "ETHERSCAN_API_VERSION", value_name = "VERSION")]
151    etherscan_api_version: Option<EtherscanApiVersion>,
152
153    /// List tests instead of running them.
154    #[arg(long, short, conflicts_with_all = ["show_progress", "decode_internal", "summary"], help_heading = "Display options")]
155    list: bool,
156
157    /// Set seed used to generate randomness during your fuzz runs.
158    #[arg(long)]
159    pub fuzz_seed: Option<U256>,
160
161    #[arg(long, env = "FOUNDRY_FUZZ_RUNS", value_name = "RUNS")]
162    pub fuzz_runs: Option<u64>,
163
164    /// Timeout for each fuzz run in seconds.
165    #[arg(long, env = "FOUNDRY_FUZZ_TIMEOUT", value_name = "TIMEOUT")]
166    pub fuzz_timeout: Option<u64>,
167
168    /// File to rerun fuzz failures from.
169    #[arg(long)]
170    pub fuzz_input_file: Option<String>,
171
172    /// Show test execution progress.
173    #[arg(long, conflicts_with_all = ["quiet", "json"], help_heading = "Display options")]
174    pub show_progress: bool,
175
176    /// Re-run recorded test failures from last run.
177    /// If no failure recorded then regular test run is performed.
178    #[arg(long)]
179    pub rerun: bool,
180
181    /// Print test summary table.
182    #[arg(long, help_heading = "Display options")]
183    pub summary: bool,
184
185    /// Print detailed test summary table.
186    #[arg(long, help_heading = "Display options", requires = "summary")]
187    pub detailed: bool,
188
189    #[command(flatten)]
190    filter: FilterArgs,
191
192    #[command(flatten)]
193    evm: EvmArgs,
194
195    #[command(flatten)]
196    pub build: BuildOpts,
197
198    #[command(flatten)]
199    pub watch: WatchArgs,
200}
201
202impl TestArgs {
203    pub async fn run(self) -> Result<TestOutcome> {
204        trace!(target: "forge::test", "executing test command");
205        self.execute_tests().await
206    }
207
208    /// Returns sources which include any tests to be executed.
209    /// If no filters are provided, sources are filtered by existence of test/invariant methods in
210    /// them, If filters are provided, sources are additionally filtered by them.
211    pub fn get_sources_to_compile(
212        &self,
213        config: &Config,
214        filter: &ProjectPathsAwareFilter,
215    ) -> Result<BTreeSet<PathBuf>> {
216        let mut project = config.create_project(true, true)?;
217        project.update_output_selection(|selection| {
218            *selection = OutputSelection::common_output_selection(["abi".to_string()]);
219        });
220
221        let output = project.compile()?;
222
223        if output.has_compiler_errors() {
224            sh_println!("{output}")?;
225            eyre::bail!("Compilation failed");
226        }
227
228        // ABIs of all sources
229        let abis = output
230            .into_artifacts()
231            .filter_map(|(id, artifact)| artifact.abi.map(|abi| (id, abi)))
232            .collect::<BTreeMap<_, _>>();
233
234        // Filter sources by their abis and contract names.
235        let mut test_sources = abis
236            .iter()
237            .filter(|(id, abi)| matches_contract(id, abi, filter))
238            .map(|(id, _)| id.source.clone())
239            .collect::<BTreeSet<_>>();
240
241        if test_sources.is_empty() {
242            if filter.is_empty() {
243                sh_println!(
244                    "No tests found in project! \
245                        Forge looks for functions that starts with `test`."
246                )?;
247            } else {
248                sh_println!("No tests match the provided pattern:")?;
249                sh_print!("{filter}")?;
250
251                // Try to suggest a test when there's no match
252                if let Some(test_pattern) = &filter.args().test_pattern {
253                    let test_name = test_pattern.as_str();
254                    let candidates = abis
255                        .into_iter()
256                        .filter(|(id, _)| {
257                            filter.matches_path(&id.source) && filter.matches_contract(&id.name)
258                        })
259                        .flat_map(|(_, abi)| abi.functions.into_keys())
260                        .collect::<Vec<_>>();
261                    if let Some(suggestion) = utils::did_you_mean(test_name, candidates).pop() {
262                        sh_println!("\nDid you mean `{suggestion}`?")?;
263                    }
264                }
265            }
266
267            eyre::bail!("No tests to run");
268        }
269
270        // Always recompile all sources to ensure that `getCode` cheatcode can use any artifact.
271        test_sources.extend(source_files_iter(
272            &project.paths.sources,
273            MultiCompilerLanguage::FILE_EXTENSIONS,
274        ));
275
276        Ok(test_sources)
277    }
278
279    /// Executes all the tests in the project.
280    ///
281    /// This will trigger the build process first. On success all test contracts that match the
282    /// configured filter will be executed
283    ///
284    /// Returns the test results for all matching tests.
285    pub async fn execute_tests(mut self) -> Result<TestOutcome> {
286        // Merge all configs.
287        let (mut config, mut evm_opts) = self.load_config_and_evm_opts()?;
288
289        // Explicitly enable isolation for gas reports for more correct gas accounting.
290        if self.gas_report {
291            evm_opts.isolate = true;
292        } else {
293            // Do not collect gas report traces if gas report is not enabled.
294            config.fuzz.gas_report_samples = 0;
295            config.invariant.gas_report_samples = 0;
296        }
297
298        // Install missing dependencies.
299        if install::install_missing_dependencies(&mut config) && config.auto_detect_remappings {
300            // need to re-configure here to also catch additional remappings
301            config = self.load_config()?;
302        }
303
304        // Set up the project.
305        let project = config.project()?;
306
307        let filter = self.filter(&config)?;
308        trace!(target: "forge::test", ?filter, "using filter");
309
310        let sources_to_compile = self.get_sources_to_compile(&config, &filter)?;
311
312        let compiler = ProjectCompiler::new()
313            .dynamic_test_linking(config.dynamic_test_linking)
314            .quiet(shell::is_json() || self.junit)
315            .files(sources_to_compile);
316
317        let output = compiler.compile(&project)?;
318
319        // Create test options from general project settings and compiler output.
320        let project_root = &project.paths.root;
321
322        let should_debug = self.debug;
323        let should_draw = self.flamegraph || self.flamechart;
324
325        // Determine print verbosity and executor verbosity.
326        let verbosity = evm_opts.verbosity;
327        if (self.gas_report && evm_opts.verbosity < 3) || self.flamegraph || self.flamechart {
328            evm_opts.verbosity = 3;
329        }
330
331        let env = evm_opts.evm_env().await?;
332
333        // Enable internal tracing for more informative flamegraph.
334        if should_draw && !self.decode_internal {
335            self.decode_internal = true;
336        }
337
338        // Choose the internal function tracing mode, if --decode-internal is provided.
339        let decode_internal = if self.decode_internal {
340            // If more than one function matched, we enable simple tracing.
341            // If only one function matched, we enable full tracing. This is done in `run_tests`.
342            InternalTraceMode::Simple
343        } else {
344            InternalTraceMode::None
345        };
346
347        // Prepare the test builder.
348        let config = Arc::new(config);
349        let runner = MultiContractRunnerBuilder::new(config.clone())
350            .set_debug(should_debug)
351            .set_decode_internal(decode_internal)
352            .initial_balance(evm_opts.initial_balance)
353            .evm_spec(config.evm_spec_id())
354            .sender(evm_opts.sender)
355            .with_fork(evm_opts.get_fork(&config, env.clone()))
356            .enable_isolation(evm_opts.isolate)
357            .odyssey(evm_opts.odyssey)
358            .build::<MultiCompiler>(project_root, &output, env, evm_opts)?;
359
360        let libraries = runner.libraries.clone();
361        let mut outcome = self.run_tests(runner, config, verbosity, &filter, &output).await?;
362
363        if should_draw {
364            let (suite_name, test_name, mut test_result) =
365                outcome.remove_first().ok_or_eyre("no tests were executed")?;
366
367            let (_, arena) = test_result
368                .traces
369                .iter_mut()
370                .find(|(kind, _)| *kind == TraceKind::Execution)
371                .unwrap();
372
373            // Decode traces.
374            let decoder = outcome.last_run_decoder.as_ref().unwrap();
375            decode_trace_arena(arena, decoder).await;
376            let mut fst = folded_stack_trace::build(arena);
377
378            let label = if self.flamegraph { "flamegraph" } else { "flamechart" };
379            let contract = suite_name.split(':').next_back().unwrap();
380            let test_name = test_name.trim_end_matches("()");
381            let file_name = format!("cache/{label}_{contract}_{test_name}.svg");
382            let file = std::fs::File::create(&file_name).wrap_err("failed to create file")?;
383            let file = std::io::BufWriter::new(file);
384
385            let mut options = inferno::flamegraph::Options::default();
386            options.title = format!("{label} {contract}::{test_name}");
387            options.count_name = "gas".to_string();
388            if self.flamechart {
389                options.flame_chart = true;
390                fst.reverse();
391            }
392
393            // Generate SVG.
394            inferno::flamegraph::from_lines(&mut options, fst.iter().map(String::as_str), file)
395                .wrap_err("failed to write svg")?;
396            sh_println!("Saved to {file_name}")?;
397
398            // Open SVG in default program.
399            if let Err(e) = opener::open(&file_name) {
400                sh_err!("Failed to open {file_name}; please open it manually: {e}")?;
401            }
402        }
403
404        if should_debug {
405            // Get first non-empty suite result. We will have only one such entry.
406            let (_, _, test_result) =
407                outcome.remove_first().ok_or_eyre("no tests were executed")?;
408
409            let sources =
410                ContractSources::from_project_output(&output, project.root(), Some(&libraries))?;
411
412            // Run the debugger.
413            let mut builder = Debugger::builder()
414                .traces(
415                    test_result.traces.iter().filter(|(t, _)| t.is_execution()).cloned().collect(),
416                )
417                .sources(sources)
418                .breakpoints(test_result.breakpoints.clone());
419
420            if let Some(decoder) = &outcome.last_run_decoder {
421                builder = builder.decoder(decoder);
422            }
423
424            let mut debugger = builder.build();
425            if let Some(dump_path) = self.dump {
426                debugger.dump_to_file(&dump_path)?;
427            } else {
428                debugger.try_run_tui()?;
429            }
430        }
431
432        Ok(outcome)
433    }
434
435    /// Run all tests that matches the filter predicate from a test runner
436    pub async fn run_tests(
437        &self,
438        mut runner: MultiContractRunner,
439        config: Arc<Config>,
440        verbosity: u8,
441        filter: &ProjectPathsAwareFilter,
442        output: &ProjectCompileOutput,
443    ) -> eyre::Result<TestOutcome> {
444        if self.list {
445            return list(runner, filter);
446        }
447
448        trace!(target: "forge::test", "running all tests");
449
450        // If we need to render to a serialized format, we should not print anything else to stdout.
451        let silent = self.gas_report && shell::is_json() || self.summary && shell::is_json();
452
453        let num_filtered = runner.matching_test_functions(filter).count();
454        if num_filtered != 1 && (self.debug || self.flamegraph || self.flamechart) {
455            let action = if self.flamegraph {
456                "generate a flamegraph"
457            } else if self.flamechart {
458                "generate a flamechart"
459            } else {
460                "run the debugger"
461            };
462            let filter = if filter.is_empty() {
463                String::new()
464            } else {
465                format!("\n\nFilter used:\n{filter}")
466            };
467            eyre::bail!(
468                "{num_filtered} tests matched your criteria, but exactly 1 test must match in order to {action}.\n\n\
469                 Use --match-contract and --match-path to further limit the search.{filter}",
470            );
471        }
472
473        // If exactly one test matched, we enable full tracing.
474        if num_filtered == 1 && self.decode_internal {
475            runner.decode_internal = InternalTraceMode::Full;
476        }
477
478        // Run tests in a non-streaming fashion and collect results for serialization.
479        if !self.gas_report && !self.summary && shell::is_json() {
480            let mut results = runner.test_collect(filter)?;
481            results.values_mut().for_each(|suite_result| {
482                for test_result in suite_result.test_results.values_mut() {
483                    if verbosity >= 2 {
484                        // Decode logs at level 2 and above.
485                        test_result.decoded_logs = decode_console_logs(&test_result.logs);
486                    } else {
487                        // Empty logs for non verbose runs.
488                        test_result.logs = vec![];
489                    }
490                }
491            });
492            sh_println!("{}", serde_json::to_string(&results)?)?;
493            return Ok(TestOutcome::new(results, self.allow_failure));
494        }
495
496        if self.junit {
497            let results = runner.test_collect(filter)?;
498            sh_println!("{}", junit_xml_report(&results, verbosity).to_string()?)?;
499            return Ok(TestOutcome::new(results, self.allow_failure));
500        }
501
502        let remote_chain_id = runner.evm_opts.get_remote_chain_id().await;
503        let known_contracts = runner.known_contracts.clone();
504
505        let libraries = runner.libraries.clone();
506
507        // Run tests in a streaming fashion.
508        let (tx, rx) = channel::<(String, SuiteResult)>();
509        let timer = Instant::now();
510        let show_progress = config.show_progress;
511        let handle = tokio::task::spawn_blocking({
512            let filter = filter.clone();
513            move || runner.test(&filter, tx, show_progress)
514        });
515
516        // Set up trace identifiers.
517        let mut identifier = TraceIdentifiers::new().with_local(&known_contracts);
518
519        // Avoid using etherscan for gas report as we decode more traces and this will be
520        // expensive.
521        if !self.gas_report {
522            identifier = identifier.with_etherscan(&config, remote_chain_id)?;
523        }
524
525        // Build the trace decoder.
526        let mut builder = CallTraceDecoderBuilder::new()
527            .with_known_contracts(&known_contracts)
528            .with_verbosity(verbosity);
529        // Signatures are of no value for gas reports.
530        if !self.gas_report {
531            builder =
532                builder.with_signature_identifier(SignaturesIdentifier::from_config(&config)?);
533        }
534
535        if self.decode_internal {
536            let sources =
537                ContractSources::from_project_output(output, &config.root, Some(&libraries))?;
538            builder = builder.with_debug_identifier(DebugTraceIdentifier::new(sources));
539        }
540        let mut decoder = builder.build();
541
542        let mut gas_report = self.gas_report.then(|| {
543            GasReport::new(
544                config.gas_reports.clone(),
545                config.gas_reports_ignore.clone(),
546                config.gas_reports_include_tests,
547            )
548        });
549
550        let mut gas_snapshots = BTreeMap::<String, BTreeMap<String, String>>::new();
551
552        let mut outcome = TestOutcome::empty(self.allow_failure);
553
554        let mut any_test_failed = false;
555        for (contract_name, suite_result) in rx {
556            let tests = &suite_result.test_results;
557
558            // Clear the addresses and labels from previous test.
559            decoder.clear_addresses();
560
561            // We identify addresses if we're going to print *any* trace or gas report.
562            let identify_addresses = verbosity >= 3
563                || self.gas_report
564                || self.debug
565                || self.flamegraph
566                || self.flamechart;
567
568            // Print suite header.
569            if !silent {
570                sh_println!()?;
571                for warning in &suite_result.warnings {
572                    sh_warn!("{warning}")?;
573                }
574                if !tests.is_empty() {
575                    let len = tests.len();
576                    let tests = if len > 1 { "tests" } else { "test" };
577                    sh_println!("Ran {len} {tests} for {contract_name}")?;
578                }
579            }
580
581            // Process individual test results, printing logs and traces when necessary.
582            for (name, result) in tests {
583                let show_traces =
584                    !self.suppress_successful_traces || result.status == TestStatus::Failure;
585                if !silent {
586                    sh_println!("{}", result.short_result(name))?;
587
588                    // Display invariant metrics if invariant kind.
589                    if let TestKind::Invariant { metrics, .. } = &result.kind
590                        && !metrics.is_empty()
591                    {
592                        let _ = sh_println!("\n{}\n", format_invariant_metrics_table(metrics));
593                    }
594
595                    // We only display logs at level 2 and above
596                    if verbosity >= 2 && show_traces {
597                        // We only decode logs from Hardhat and DS-style console events
598                        let console_logs = decode_console_logs(&result.logs);
599                        if !console_logs.is_empty() {
600                            sh_println!("Logs:")?;
601                            for log in console_logs {
602                                sh_println!("  {log}")?;
603                            }
604                            sh_println!()?;
605                        }
606                    }
607                }
608
609                // We shouldn't break out of the outer loop directly here so that we finish
610                // processing the remaining tests and print the suite summary.
611                any_test_failed |= result.status == TestStatus::Failure;
612
613                // Clear the addresses and labels from previous runs.
614                decoder.clear_addresses();
615                decoder
616                    .labels
617                    .extend(result.labeled_addresses.iter().map(|(k, v)| (*k, v.clone())));
618
619                // Identify addresses and decode traces.
620                let mut decoded_traces = Vec::with_capacity(result.traces.len());
621                for (kind, arena) in &mut result.traces.clone() {
622                    if identify_addresses {
623                        decoder.identify(arena, &mut identifier);
624                    }
625
626                    // verbosity:
627                    // - 0..3: nothing
628                    // - 3: only display traces for failed tests
629                    // - 4: also display the setup trace for failed tests
630                    // - 5..: display all traces for all tests, including storage changes
631                    let should_include = match kind {
632                        TraceKind::Execution => {
633                            (verbosity == 3 && result.status.is_failure()) || verbosity >= 4
634                        }
635                        TraceKind::Setup => {
636                            (verbosity == 4 && result.status.is_failure()) || verbosity >= 5
637                        }
638                        TraceKind::Deployment => false,
639                    };
640
641                    if should_include {
642                        decode_trace_arena(arena, &decoder).await;
643                        decoded_traces.push(render_trace_arena_inner(arena, false, verbosity > 4));
644                    }
645                }
646
647                if !silent && show_traces && !decoded_traces.is_empty() {
648                    sh_println!("Traces:")?;
649                    for trace in &decoded_traces {
650                        sh_println!("{trace}")?;
651                    }
652                }
653
654                if let Some(gas_report) = &mut gas_report {
655                    gas_report.analyze(result.traces.iter().map(|(_, a)| &a.arena), &decoder).await;
656
657                    for trace in &result.gas_report_traces {
658                        decoder.clear_addresses();
659
660                        // Re-execute setup and deployment traces to collect identities created in
661                        // setUp and constructor.
662                        for (kind, arena) in &result.traces {
663                            if !matches!(kind, TraceKind::Execution) {
664                                decoder.identify(arena, &mut identifier);
665                            }
666                        }
667
668                        for arena in trace {
669                            decoder.identify(arena, &mut identifier);
670                            gas_report.analyze([arena], &decoder).await;
671                        }
672                    }
673                }
674
675                // Collect and merge gas snapshots.
676                for (group, new_snapshots) in &result.gas_snapshots {
677                    gas_snapshots.entry(group.clone()).or_default().extend(new_snapshots.clone());
678                }
679            }
680
681            // Write gas snapshots to disk if any were collected.
682            if !gas_snapshots.is_empty() {
683                // By default `gas_snapshot_check` is set to `false` in the config.
684                //
685                // The user can either:
686                // - Set `FORGE_SNAPSHOT_CHECK=true` in the environment.
687                // - Pass `--gas-snapshot-check=true` as a CLI argument.
688                // - Set `gas_snapshot_check = true` in the config.
689                //
690                // If the user passes `--gas-snapshot-check=<bool>` then it will override the config
691                // and the environment variable, disabling the check if `false` is passed.
692                //
693                // Exiting early with code 1 if differences are found.
694                if self.gas_snapshot_check.unwrap_or(config.gas_snapshot_check) {
695                    let differences_found = gas_snapshots.clone().into_iter().fold(
696                        false,
697                        |mut found, (group, snapshots)| {
698                            // If the snapshot file doesn't exist, we can't compare so we skip.
699                            if !&config.snapshots.join(format!("{group}.json")).exists() {
700                                return false;
701                            }
702
703                            let previous_snapshots: BTreeMap<String, String> =
704                                fs::read_json_file(&config.snapshots.join(format!("{group}.json")))
705                                    .expect("Failed to read snapshots from disk");
706
707                            let diff: BTreeMap<_, _> = snapshots
708                                .iter()
709                                .filter_map(|(k, v)| {
710                                    previous_snapshots.get(k).and_then(|previous_snapshot| {
711                                        if previous_snapshot != v {
712                                            Some((
713                                                k.clone(),
714                                                (previous_snapshot.clone(), v.clone()),
715                                            ))
716                                        } else {
717                                            None
718                                        }
719                                    })
720                                })
721                                .collect();
722
723                            if !diff.is_empty() {
724                                let _ = sh_eprintln!(
725                                    "{}",
726                                    format!("\n[{group}] Failed to match snapshots:").red().bold()
727                                );
728
729                                for (key, (previous_snapshot, snapshot)) in &diff {
730                                    let _ = sh_eprintln!(
731                                        "{}",
732                                        format!("- [{key}] {previous_snapshot} → {snapshot}").red()
733                                    );
734                                }
735
736                                found = true;
737                            }
738
739                            found
740                        },
741                    );
742
743                    if differences_found {
744                        sh_eprintln!()?;
745                        eyre::bail!("Snapshots differ from previous run");
746                    }
747                }
748
749                // By default `gas_snapshot_emit` is set to `true` in the config.
750                //
751                // The user can either:
752                // - Set `FORGE_SNAPSHOT_EMIT=false` in the environment.
753                // - Pass `--gas-snapshot-emit=false` as a CLI argument.
754                // - Set `gas_snapshot_emit = false` in the config.
755                //
756                // If the user passes `--gas-snapshot-emit=<bool>` then it will override the config
757                // and the environment variable, enabling the check if `true` is passed.
758                if self.gas_snapshot_emit.unwrap_or(config.gas_snapshot_emit) {
759                    // Create `snapshots` directory if it doesn't exist.
760                    fs::create_dir_all(&config.snapshots)?;
761
762                    // Write gas snapshots to disk per group.
763                    gas_snapshots.clone().into_iter().for_each(|(group, snapshots)| {
764                        fs::write_pretty_json_file(
765                            &config.snapshots.join(format!("{group}.json")),
766                            &snapshots,
767                        )
768                        .expect("Failed to write gas snapshots to disk");
769                    });
770                }
771            }
772
773            // Print suite summary.
774            if !silent {
775                sh_println!("{}", suite_result.summary())?;
776            }
777
778            // Add the suite result to the outcome.
779            outcome.results.insert(contract_name, suite_result);
780
781            // Stop processing the remaining suites if any test failed and `fail_fast` is set.
782            if self.fail_fast && any_test_failed {
783                break;
784            }
785        }
786        outcome.last_run_decoder = Some(decoder);
787        let duration = timer.elapsed();
788
789        trace!(target: "forge::test", len=outcome.results.len(), %any_test_failed, "done with results");
790
791        if let Some(gas_report) = gas_report {
792            let finalized = gas_report.finalize();
793            sh_println!("{}", &finalized)?;
794            outcome.gas_report = Some(finalized);
795        }
796
797        if !self.summary && !shell::is_json() {
798            sh_println!("{}", outcome.summary(duration))?;
799        }
800
801        if self.summary && !outcome.results.is_empty() {
802            let summary_report = TestSummaryReport::new(self.detailed, outcome.clone());
803            sh_println!("{}", &summary_report)?;
804        }
805
806        // Reattach the task.
807        if let Err(e) = handle.await {
808            match e.try_into_panic() {
809                Ok(payload) => std::panic::resume_unwind(payload),
810                Err(e) => return Err(e.into()),
811            }
812        }
813
814        // Persist test run failures to enable replaying.
815        persist_run_failures(&config, &outcome);
816
817        Ok(outcome)
818    }
819
820    /// Returns the flattened [`FilterArgs`] arguments merged with [`Config`].
821    /// Loads and applies filter from file if only last test run failures performed.
822    pub fn filter(&self, config: &Config) -> Result<ProjectPathsAwareFilter> {
823        let mut filter = self.filter.clone();
824        if self.rerun {
825            filter.test_pattern = last_run_failures(config);
826        }
827        if filter.path_pattern.is_some() {
828            if self.path.is_some() {
829                bail!("Can not supply both --match-path and |path|");
830            }
831        } else {
832            filter.path_pattern = self.path.clone();
833        }
834        Ok(filter.merge_with_config(config))
835    }
836
837    /// Returns whether `BuildArgs` was configured with `--watch`
838    pub fn is_watch(&self) -> bool {
839        self.watch.watch.is_some()
840    }
841
842    /// Returns the [`watchexec::Config`] necessary to bootstrap a new watch loop.
843    pub(crate) fn watchexec_config(&self) -> Result<watchexec::Config> {
844        self.watch.watchexec_config(|| {
845            let config = self.load_config()?;
846            Ok([config.src, config.test])
847        })
848    }
849}
850
851impl Provider for TestArgs {
852    fn metadata(&self) -> Metadata {
853        Metadata::named("Core Build Args Provider")
854    }
855
856    fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
857        let mut dict = Dict::default();
858
859        let mut fuzz_dict = Dict::default();
860        if let Some(fuzz_seed) = self.fuzz_seed {
861            fuzz_dict.insert("seed".to_string(), fuzz_seed.to_string().into());
862        }
863        if let Some(fuzz_runs) = self.fuzz_runs {
864            fuzz_dict.insert("runs".to_string(), fuzz_runs.into());
865        }
866        if let Some(fuzz_timeout) = self.fuzz_timeout {
867            fuzz_dict.insert("timeout".to_string(), fuzz_timeout.into());
868        }
869        if let Some(fuzz_input_file) = self.fuzz_input_file.clone() {
870            fuzz_dict.insert("failure_persist_file".to_string(), fuzz_input_file.into());
871        }
872        dict.insert("fuzz".to_string(), fuzz_dict.into());
873
874        if let Some(etherscan_api_key) =
875            self.etherscan_api_key.as_ref().filter(|s| !s.trim().is_empty())
876        {
877            dict.insert("etherscan_api_key".to_string(), etherscan_api_key.to_string().into());
878        }
879
880        if let Some(api_version) = &self.etherscan_api_version {
881            dict.insert("etherscan_api_version".to_string(), api_version.to_string().into());
882        }
883
884        if self.show_progress {
885            dict.insert("show_progress".to_string(), true.into());
886        }
887
888        Ok(Map::from([(Config::selected_profile(), dict)]))
889    }
890}
891
892/// Lists all matching tests
893fn list(runner: MultiContractRunner, filter: &ProjectPathsAwareFilter) -> Result<TestOutcome> {
894    let results = runner.list(filter);
895
896    if shell::is_json() {
897        sh_println!("{}", serde_json::to_string(&results)?)?;
898    } else {
899        for (file, contracts) in &results {
900            sh_println!("{file}")?;
901            for (contract, tests) in contracts {
902                sh_println!("  {contract}")?;
903                sh_println!("    {}\n", tests.join("\n    "))?;
904            }
905        }
906    }
907    Ok(TestOutcome::empty(false))
908}
909
910/// Load persisted filter (with last test run failures) from file.
911fn last_run_failures(config: &Config) -> Option<regex::Regex> {
912    match fs::read_to_string(&config.test_failures_file) {
913        Ok(filter) => Some(Regex::new(&filter).unwrap()),
914        Err(_) => None,
915    }
916}
917
918/// Persist filter with last test run failures (only if there's any failure).
919fn persist_run_failures(config: &Config, outcome: &TestOutcome) {
920    if outcome.failed() > 0 && fs::create_file(&config.test_failures_file).is_ok() {
921        let mut filter = String::new();
922        let mut failures = outcome.failures().peekable();
923        while let Some((test_name, _)) = failures.next() {
924            if test_name.is_any_test()
925                && let Some(test_match) = test_name.split("(").next()
926            {
927                filter.push_str(test_match);
928                if failures.peek().is_some() {
929                    filter.push('|');
930                }
931            }
932        }
933        let _ = fs::write(&config.test_failures_file, filter);
934    }
935}
936
937/// Generate test report in JUnit XML report format.
938fn junit_xml_report(results: &BTreeMap<String, SuiteResult>, verbosity: u8) -> Report {
939    let mut total_duration = Duration::default();
940    let mut junit_report = Report::new("Test run");
941    junit_report.set_timestamp(Utc::now());
942    for (suite_name, suite_result) in results {
943        let mut test_suite = TestSuite::new(suite_name);
944        total_duration += suite_result.duration;
945        test_suite.set_time(suite_result.duration);
946        test_suite.set_system_out(suite_result.summary());
947        for (test_name, test_result) in &suite_result.test_results {
948            let mut test_status = match test_result.status {
949                TestStatus::Success => TestCaseStatus::success(),
950                TestStatus::Failure => TestCaseStatus::non_success(NonSuccessKind::Failure),
951                TestStatus::Skipped => TestCaseStatus::skipped(),
952            };
953            if let Some(reason) = &test_result.reason {
954                test_status.set_message(reason);
955            }
956
957            let mut test_case = TestCase::new(test_name, test_status);
958            test_case.set_time(test_result.duration);
959
960            let mut sys_out = String::new();
961            let result_report = test_result.kind.report();
962            write!(sys_out, "{test_result} {test_name} {result_report}").unwrap();
963            if verbosity >= 2 && !test_result.logs.is_empty() {
964                write!(sys_out, "\\nLogs:\\n").unwrap();
965                let console_logs = decode_console_logs(&test_result.logs);
966                for log in console_logs {
967                    write!(sys_out, "  {log}\\n").unwrap();
968                }
969            }
970
971            test_case.set_system_out(sys_out);
972            test_suite.add_test_case(test_case);
973        }
974        junit_report.add_test_suite(test_suite);
975    }
976    junit_report.set_time(total_duration);
977    junit_report
978}
979
980#[cfg(test)]
981mod tests {
982    use super::*;
983    use foundry_config::Chain;
984
985    #[test]
986    fn watch_parse() {
987        let args: TestArgs = TestArgs::parse_from(["foundry-cli", "-vw"]);
988        assert!(args.watch.watch.is_some());
989    }
990
991    #[test]
992    fn fuzz_seed() {
993        let args: TestArgs = TestArgs::parse_from(["foundry-cli", "--fuzz-seed", "0x10"]);
994        assert!(args.fuzz_seed.is_some());
995    }
996
997    // <https://github.com/foundry-rs/foundry/issues/5913>
998    #[test]
999    fn fuzz_seed_exists() {
1000        let args: TestArgs =
1001            TestArgs::parse_from(["foundry-cli", "-vvv", "--gas-report", "--fuzz-seed", "0x10"]);
1002        assert!(args.fuzz_seed.is_some());
1003    }
1004
1005    #[test]
1006    fn extract_chain() {
1007        let test = |arg: &str, expected: Chain| {
1008            let args = TestArgs::parse_from(["foundry-cli", arg]);
1009            assert_eq!(args.evm.env.chain, Some(expected));
1010            let (config, evm_opts) = args.load_config_and_evm_opts().unwrap();
1011            assert_eq!(config.chain, Some(expected));
1012            assert_eq!(evm_opts.env.chain_id, Some(expected.id()));
1013        };
1014        test("--chain-id=1", Chain::mainnet());
1015        test("--chain-id=42", Chain::from_id(42));
1016    }
1017}