forge/cmd/test/
mod.rs

1use super::{install, test::filter::ProjectPathsAwareFilter, watch::WatchArgs};
2use crate::{
3    decode::decode_console_logs,
4    gas_report::GasReport,
5    multi_runner::matches_contract,
6    result::{SuiteResult, TestOutcome, TestStatus},
7    traces::{
8        debug::{ContractSources, DebugTraceIdentifier},
9        decode_trace_arena, folded_stack_trace,
10        identifier::SignaturesIdentifier,
11        CallTraceDecoderBuilder, InternalTraceMode, TraceKind,
12    },
13    MultiContractRunner, MultiContractRunnerBuilder, TestFilter,
14};
15use alloy_primitives::U256;
16use chrono::Utc;
17use clap::{Parser, ValueHint};
18use eyre::{bail, Context, OptionExt, Result};
19use foundry_cli::{
20    opts::{BuildOpts, GlobalArgs},
21    utils::{self, LoadConfig},
22};
23use foundry_common::{compile::ProjectCompiler, evm::EvmArgs, fs, shell, TestFunctionExt};
24use foundry_compilers::{
25    artifacts::output_selection::OutputSelection,
26    compilers::{
27        multi::{MultiCompiler, MultiCompilerLanguage},
28        Language,
29    },
30    utils::source_files_iter,
31    ProjectCompileOutput,
32};
33use foundry_config::{
34    figment,
35    figment::{
36        value::{Dict, Map},
37        Metadata, Profile, Provider,
38    },
39    filter::GlobMatcher,
40    Config,
41};
42use foundry_debugger::Debugger;
43use foundry_evm::traces::identifier::TraceIdentifiers;
44use regex::Regex;
45use std::{
46    collections::{BTreeMap, BTreeSet},
47    fmt::Write,
48    path::PathBuf,
49    sync::{mpsc::channel, Arc},
50    time::{Duration, Instant},
51};
52use yansi::Paint;
53
54mod filter;
55mod summary;
56use crate::{result::TestKind, traces::render_trace_arena_inner};
57pub use filter::FilterArgs;
58use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
59use summary::{format_invariant_metrics_table, TestSummaryReport};
60
61// Loads project's figment and merges the build cli arguments into it
62foundry_config::merge_impl_figment_convert!(TestArgs, build, evm);
63
64/// CLI arguments for `forge test`.
65#[derive(Clone, Debug, Parser)]
66#[command(next_help_heading = "Test options")]
67pub struct TestArgs {
68    // Include global options for users of this struct.
69    #[command(flatten)]
70    pub global: GlobalArgs,
71
72    /// The contract file you want to test, it's a shortcut for --match-path.
73    #[arg(value_hint = ValueHint::FilePath)]
74    pub path: Option<GlobMatcher>,
75
76    /// Run a single test in the debugger.
77    ///
78    /// The matching test will be opened in the debugger regardless of the outcome of the test.
79    ///
80    /// If the matching test is a fuzz test, then it will open the debugger on the first failure
81    /// case. If the fuzz test does not fail, it will open the debugger on the last fuzz case.
82    #[arg(long, conflicts_with_all = ["flamegraph", "flamechart", "decode_internal", "rerun"])]
83    debug: bool,
84
85    /// Generate a flamegraph for a single test. Implies `--decode-internal`.
86    ///
87    /// A flame graph is used to visualize which functions or operations within the smart contract
88    /// are consuming the most gas overall in a sorted manner.
89    #[arg(long)]
90    flamegraph: bool,
91
92    /// Generate a flamechart for a single test. Implies `--decode-internal`.
93    ///
94    /// A flame chart shows the gas usage over time, illustrating when each function is
95    /// called (execution order) and how much gas it consumes at each point in the timeline.
96    #[arg(long, conflicts_with = "flamegraph")]
97    flamechart: bool,
98
99    /// Identify internal functions in traces.
100    ///
101    /// This will trace internal functions and decode stack parameters.
102    ///
103    /// Parameters stored in memory (such as bytes or arrays) are currently decoded only when a
104    /// single function is matched, similarly to `--debug`, for performance reasons.
105    #[arg(long)]
106    decode_internal: bool,
107
108    /// Dumps all debugger steps to file.
109    #[arg(
110        long,
111        requires = "debug",
112        value_hint = ValueHint::FilePath,
113        value_name = "PATH"
114    )]
115    dump: Option<PathBuf>,
116
117    /// Print a gas report.
118    #[arg(long, env = "FORGE_GAS_REPORT")]
119    gas_report: bool,
120
121    /// Check gas snapshots against previous runs.
122    #[arg(long, env = "FORGE_SNAPSHOT_CHECK")]
123    gas_snapshot_check: Option<bool>,
124
125    /// Enable/disable recording of gas snapshot results.
126    #[arg(long, env = "FORGE_SNAPSHOT_EMIT")]
127    gas_snapshot_emit: Option<bool>,
128
129    /// Exit with code 0 even if a test fails.
130    #[arg(long, env = "FORGE_ALLOW_FAILURE")]
131    allow_failure: bool,
132
133    /// Suppress successful test traces and show only traces for failures.
134    #[arg(long, short, env = "FORGE_SUPPRESS_SUCCESSFUL_TRACES", help_heading = "Display options")]
135    suppress_successful_traces: bool,
136
137    /// Output test results as JUnit XML report.
138    #[arg(long, conflicts_with_all = ["quiet", "json", "gas_report", "summary", "list", "show_progress"], help_heading = "Display options")]
139    pub junit: bool,
140
141    /// Stop running tests after the first failure.
142    #[arg(long)]
143    pub fail_fast: bool,
144
145    /// The Etherscan (or equivalent) API key.
146    #[arg(long, env = "ETHERSCAN_API_KEY", value_name = "KEY")]
147    etherscan_api_key: Option<String>,
148
149    /// List tests instead of running them.
150    #[arg(long, short, conflicts_with_all = ["show_progress", "decode_internal", "summary"], help_heading = "Display options")]
151    list: bool,
152
153    /// Set seed used to generate randomness during your fuzz runs.
154    #[arg(long)]
155    pub fuzz_seed: Option<U256>,
156
157    #[arg(long, env = "FOUNDRY_FUZZ_RUNS", value_name = "RUNS")]
158    pub fuzz_runs: Option<u64>,
159
160    /// Timeout for each fuzz run in seconds.
161    #[arg(long, env = "FOUNDRY_FUZZ_TIMEOUT", value_name = "TIMEOUT")]
162    pub fuzz_timeout: Option<u64>,
163
164    /// File to rerun fuzz failures from.
165    #[arg(long)]
166    pub fuzz_input_file: Option<String>,
167
168    /// Show test execution progress.
169    #[arg(long, conflicts_with_all = ["quiet", "json"], help_heading = "Display options")]
170    pub show_progress: bool,
171
172    /// Re-run recorded test failures from last run.
173    /// If no failure recorded then regular test run is performed.
174    #[arg(long)]
175    pub rerun: bool,
176
177    /// Print test summary table.
178    #[arg(long, help_heading = "Display options")]
179    pub summary: bool,
180
181    /// Print detailed test summary table.
182    #[arg(long, help_heading = "Display options", requires = "summary")]
183    pub detailed: bool,
184
185    #[command(flatten)]
186    filter: FilterArgs,
187
188    #[command(flatten)]
189    evm: EvmArgs,
190
191    #[command(flatten)]
192    pub build: BuildOpts,
193
194    #[command(flatten)]
195    pub watch: WatchArgs,
196}
197
198impl TestArgs {
199    pub async fn run(self) -> Result<TestOutcome> {
200        trace!(target: "forge::test", "executing test command");
201        self.execute_tests().await
202    }
203
204    /// Returns sources which include any tests to be executed.
205    /// If no filters are provided, sources are filtered by existence of test/invariant methods in
206    /// them, If filters are provided, sources are additionally filtered by them.
207    pub fn get_sources_to_compile(
208        &self,
209        config: &Config,
210        filter: &ProjectPathsAwareFilter,
211    ) -> Result<BTreeSet<PathBuf>> {
212        let mut project = config.create_project(true, true)?;
213        project.update_output_selection(|selection| {
214            *selection = OutputSelection::common_output_selection(["abi".to_string()]);
215        });
216
217        let output = project.compile()?;
218
219        if output.has_compiler_errors() {
220            sh_println!("{output}")?;
221            eyre::bail!("Compilation failed");
222        }
223
224        // ABIs of all sources
225        let abis = output
226            .into_artifacts()
227            .filter_map(|(id, artifact)| artifact.abi.map(|abi| (id, abi)))
228            .collect::<BTreeMap<_, _>>();
229
230        // Filter sources by their abis and contract names.
231        let mut test_sources = abis
232            .iter()
233            .filter(|(id, abi)| matches_contract(id, abi, filter))
234            .map(|(id, _)| id.source.clone())
235            .collect::<BTreeSet<_>>();
236
237        if test_sources.is_empty() {
238            if filter.is_empty() {
239                sh_println!(
240                    "No tests found in project! \
241                        Forge looks for functions that starts with `test`."
242                )?;
243            } else {
244                sh_println!("No tests match the provided pattern:")?;
245                sh_print!("{filter}")?;
246
247                // Try to suggest a test when there's no match
248                if let Some(test_pattern) = &filter.args().test_pattern {
249                    let test_name = test_pattern.as_str();
250                    let candidates = abis
251                        .into_iter()
252                        .filter(|(id, _)| {
253                            filter.matches_path(&id.source) && filter.matches_contract(&id.name)
254                        })
255                        .flat_map(|(_, abi)| abi.functions.into_keys())
256                        .collect::<Vec<_>>();
257                    if let Some(suggestion) = utils::did_you_mean(test_name, candidates).pop() {
258                        sh_println!("\nDid you mean `{suggestion}`?")?;
259                    }
260                }
261            }
262
263            eyre::bail!("No tests to run");
264        }
265
266        // Always recompile all sources to ensure that `getCode` cheatcode can use any artifact.
267        test_sources.extend(source_files_iter(
268            &project.paths.sources,
269            MultiCompilerLanguage::FILE_EXTENSIONS,
270        ));
271
272        Ok(test_sources)
273    }
274
275    /// Executes all the tests in the project.
276    ///
277    /// This will trigger the build process first. On success all test contracts that match the
278    /// configured filter will be executed
279    ///
280    /// Returns the test results for all matching tests.
281    pub async fn execute_tests(mut self) -> Result<TestOutcome> {
282        // Merge all configs.
283        let (mut config, mut evm_opts) = self.load_config_and_evm_opts()?;
284
285        // Explicitly enable isolation for gas reports for more correct gas accounting.
286        if self.gas_report {
287            evm_opts.isolate = true;
288        } else {
289            // Do not collect gas report traces if gas report is not enabled.
290            config.fuzz.gas_report_samples = 0;
291            config.invariant.gas_report_samples = 0;
292        }
293
294        // Install missing dependencies.
295        if install::install_missing_dependencies(&mut config) && config.auto_detect_remappings {
296            // need to re-configure here to also catch additional remappings
297            config = self.load_config()?;
298        }
299
300        // Set up the project.
301        let project = config.project()?;
302
303        let filter = self.filter(&config)?;
304        trace!(target: "forge::test", ?filter, "using filter");
305
306        let sources_to_compile = self.get_sources_to_compile(&config, &filter)?;
307
308        let compiler = ProjectCompiler::new()
309            .dynamic_test_linking(config.dynamic_test_linking)
310            .quiet(shell::is_json() || self.junit)
311            .files(sources_to_compile);
312
313        let output = compiler.compile(&project)?;
314
315        // Create test options from general project settings and compiler output.
316        let project_root = &project.paths.root;
317
318        let should_debug = self.debug;
319        let should_draw = self.flamegraph || self.flamechart;
320
321        // Determine print verbosity and executor verbosity.
322        let verbosity = evm_opts.verbosity;
323        if (self.gas_report && evm_opts.verbosity < 3) || self.flamegraph || self.flamechart {
324            evm_opts.verbosity = 3;
325        }
326
327        let env = evm_opts.evm_env().await?;
328
329        // Enable internal tracing for more informative flamegraph.
330        if should_draw && !self.decode_internal {
331            self.decode_internal = true;
332        }
333
334        // Choose the internal function tracing mode, if --decode-internal is provided.
335        let decode_internal = if self.decode_internal {
336            // If more than one function matched, we enable simple tracing.
337            // If only one function matched, we enable full tracing. This is done in `run_tests`.
338            InternalTraceMode::Simple
339        } else {
340            InternalTraceMode::None
341        };
342
343        // Prepare the test builder.
344        let config = Arc::new(config);
345        let runner = MultiContractRunnerBuilder::new(config.clone())
346            .set_debug(should_debug)
347            .set_decode_internal(decode_internal)
348            .initial_balance(evm_opts.initial_balance)
349            .evm_spec(config.evm_spec_id())
350            .sender(evm_opts.sender)
351            .with_fork(evm_opts.get_fork(&config, env.clone()))
352            .enable_isolation(evm_opts.isolate)
353            .odyssey(evm_opts.odyssey)
354            .build::<MultiCompiler>(project_root, &output, env, evm_opts)?;
355
356        let libraries = runner.libraries.clone();
357        let mut outcome = self.run_tests(runner, config, verbosity, &filter, &output).await?;
358
359        if should_draw {
360            let (suite_name, test_name, mut test_result) =
361                outcome.remove_first().ok_or_eyre("no tests were executed")?;
362
363            let (_, arena) = test_result
364                .traces
365                .iter_mut()
366                .find(|(kind, _)| *kind == TraceKind::Execution)
367                .unwrap();
368
369            // Decode traces.
370            let decoder = outcome.last_run_decoder.as_ref().unwrap();
371            decode_trace_arena(arena, decoder).await?;
372            let mut fst = folded_stack_trace::build(arena);
373
374            let label = if self.flamegraph { "flamegraph" } else { "flamechart" };
375            let contract = suite_name.split(':').next_back().unwrap();
376            let test_name = test_name.trim_end_matches("()");
377            let file_name = format!("cache/{label}_{contract}_{test_name}.svg");
378            let file = std::fs::File::create(&file_name).wrap_err("failed to create file")?;
379            let file = std::io::BufWriter::new(file);
380
381            let mut options = inferno::flamegraph::Options::default();
382            options.title = format!("{label} {contract}::{test_name}");
383            options.count_name = "gas".to_string();
384            if self.flamechart {
385                options.flame_chart = true;
386                fst.reverse();
387            }
388
389            // Generate SVG.
390            inferno::flamegraph::from_lines(&mut options, fst.iter().map(String::as_str), file)
391                .wrap_err("failed to write svg")?;
392            sh_println!("Saved to {file_name}")?;
393
394            // Open SVG in default program.
395            if let Err(e) = opener::open(&file_name) {
396                sh_err!("Failed to open {file_name}; please open it manually: {e}")?;
397            }
398        }
399
400        if should_debug {
401            // Get first non-empty suite result. We will have only one such entry.
402            let (_, _, test_result) =
403                outcome.remove_first().ok_or_eyre("no tests were executed")?;
404
405            let sources =
406                ContractSources::from_project_output(&output, project.root(), Some(&libraries))?;
407
408            // Run the debugger.
409            let mut builder = Debugger::builder()
410                .traces(
411                    test_result.traces.iter().filter(|(t, _)| t.is_execution()).cloned().collect(),
412                )
413                .sources(sources)
414                .breakpoints(test_result.breakpoints.clone());
415
416            if let Some(decoder) = &outcome.last_run_decoder {
417                builder = builder.decoder(decoder);
418            }
419
420            let mut debugger = builder.build();
421            if let Some(dump_path) = self.dump {
422                debugger.dump_to_file(&dump_path)?;
423            } else {
424                debugger.try_run_tui()?;
425            }
426        }
427
428        Ok(outcome)
429    }
430
431    /// Run all tests that matches the filter predicate from a test runner
432    pub async fn run_tests(
433        &self,
434        mut runner: MultiContractRunner,
435        config: Arc<Config>,
436        verbosity: u8,
437        filter: &ProjectPathsAwareFilter,
438        output: &ProjectCompileOutput,
439    ) -> eyre::Result<TestOutcome> {
440        if self.list {
441            return list(runner, filter);
442        }
443
444        trace!(target: "forge::test", "running all tests");
445
446        // If we need to render to a serialized format, we should not print anything else to stdout.
447        let silent = self.gas_report && shell::is_json() || self.summary && shell::is_json();
448
449        let num_filtered = runner.matching_test_functions(filter).count();
450        if num_filtered != 1 && (self.debug || self.flamegraph || self.flamechart) {
451            let action = if self.flamegraph {
452                "generate a flamegraph"
453            } else if self.flamechart {
454                "generate a flamechart"
455            } else {
456                "run the debugger"
457            };
458            let filter = if filter.is_empty() {
459                String::new()
460            } else {
461                format!("\n\nFilter used:\n{filter}")
462            };
463            eyre::bail!(
464                "{num_filtered} tests matched your criteria, but exactly 1 test must match in order to {action}.\n\n\
465                 Use --match-contract and --match-path to further limit the search.{filter}",
466            );
467        }
468
469        // If exactly one test matched, we enable full tracing.
470        if num_filtered == 1 && self.decode_internal {
471            runner.decode_internal = InternalTraceMode::Full;
472        }
473
474        // Run tests in a non-streaming fashion and collect results for serialization.
475        if !self.gas_report && !self.summary && shell::is_json() {
476            let mut results = runner.test_collect(filter)?;
477            results.values_mut().for_each(|suite_result| {
478                for test_result in suite_result.test_results.values_mut() {
479                    if verbosity >= 2 {
480                        // Decode logs at level 2 and above.
481                        test_result.decoded_logs = decode_console_logs(&test_result.logs);
482                    } else {
483                        // Empty logs for non verbose runs.
484                        test_result.logs = vec![];
485                    }
486                }
487            });
488            sh_println!("{}", serde_json::to_string(&results)?)?;
489            return Ok(TestOutcome::new(results, self.allow_failure));
490        }
491
492        if self.junit {
493            let results = runner.test_collect(filter)?;
494            sh_println!("{}", junit_xml_report(&results, verbosity).to_string()?)?;
495            return Ok(TestOutcome::new(results, self.allow_failure));
496        }
497
498        let remote_chain_id = runner.evm_opts.get_remote_chain_id().await;
499        let known_contracts = runner.known_contracts.clone();
500
501        let libraries = runner.libraries.clone();
502
503        // Run tests in a streaming fashion.
504        let (tx, rx) = channel::<(String, SuiteResult)>();
505        let timer = Instant::now();
506        let show_progress = config.show_progress;
507        let handle = tokio::task::spawn_blocking({
508            let filter = filter.clone();
509            move || runner.test(&filter, tx, show_progress)
510        });
511
512        // Set up trace identifiers.
513        let mut identifier = TraceIdentifiers::new().with_local(&known_contracts);
514
515        // Avoid using etherscan for gas report as we decode more traces and this will be
516        // expensive.
517        if !self.gas_report {
518            identifier = identifier.with_etherscan(&config, remote_chain_id)?;
519        }
520
521        // Build the trace decoder.
522        let mut builder = CallTraceDecoderBuilder::new()
523            .with_known_contracts(&known_contracts)
524            .with_verbosity(verbosity);
525        // Signatures are of no value for gas reports.
526        if !self.gas_report {
527            builder = builder.with_signature_identifier(SignaturesIdentifier::new(
528                Config::foundry_cache_dir(),
529                config.offline,
530            )?);
531        }
532
533        if self.decode_internal {
534            let sources =
535                ContractSources::from_project_output(output, &config.root, Some(&libraries))?;
536            builder = builder.with_debug_identifier(DebugTraceIdentifier::new(sources));
537        }
538        let mut decoder = builder.build();
539
540        let mut gas_report = self.gas_report.then(|| {
541            GasReport::new(
542                config.gas_reports.clone(),
543                config.gas_reports_ignore.clone(),
544                config.gas_reports_include_tests,
545            )
546        });
547
548        let mut gas_snapshots = BTreeMap::<String, BTreeMap<String, String>>::new();
549
550        let mut outcome = TestOutcome::empty(self.allow_failure);
551
552        let mut any_test_failed = false;
553        for (contract_name, suite_result) in rx {
554            let tests = &suite_result.test_results;
555
556            // Clear the addresses and labels from previous test.
557            decoder.clear_addresses();
558
559            // We identify addresses if we're going to print *any* trace or gas report.
560            let identify_addresses = verbosity >= 3 ||
561                self.gas_report ||
562                self.debug ||
563                self.flamegraph ||
564                self.flamechart;
565
566            // Print suite header.
567            if !silent {
568                sh_println!()?;
569                for warning in &suite_result.warnings {
570                    sh_warn!("{warning}")?;
571                }
572                if !tests.is_empty() {
573                    let len = tests.len();
574                    let tests = if len > 1 { "tests" } else { "test" };
575                    sh_println!("Ran {len} {tests} for {contract_name}")?;
576                }
577            }
578
579            // Process individual test results, printing logs and traces when necessary.
580            for (name, result) in tests {
581                let show_traces =
582                    !self.suppress_successful_traces || result.status == TestStatus::Failure;
583                if !silent {
584                    sh_println!("{}", result.short_result(name))?;
585
586                    // Display invariant metrics if invariant kind.
587                    if let TestKind::Invariant { metrics, .. } = &result.kind {
588                        if !metrics.is_empty() {
589                            let _ = sh_println!("\n{}\n", format_invariant_metrics_table(metrics));
590                        }
591                    }
592
593                    // We only display logs at level 2 and above
594                    if verbosity >= 2 && show_traces {
595                        // We only decode logs from Hardhat and DS-style console events
596                        let console_logs = decode_console_logs(&result.logs);
597                        if !console_logs.is_empty() {
598                            sh_println!("Logs:")?;
599                            for log in console_logs {
600                                sh_println!("  {log}")?;
601                            }
602                            sh_println!()?;
603                        }
604                    }
605                }
606
607                // We shouldn't break out of the outer loop directly here so that we finish
608                // processing the remaining tests and print the suite summary.
609                any_test_failed |= result.status == TestStatus::Failure;
610
611                // Clear the addresses and labels from previous runs.
612                decoder.clear_addresses();
613                decoder
614                    .labels
615                    .extend(result.labeled_addresses.iter().map(|(k, v)| (*k, v.clone())));
616
617                // Identify addresses and decode traces.
618                let mut decoded_traces = Vec::with_capacity(result.traces.len());
619                for (kind, arena) in &mut result.traces.clone() {
620                    if identify_addresses {
621                        decoder.identify(arena, &mut identifier);
622                    }
623
624                    // verbosity:
625                    // - 0..3: nothing
626                    // - 3: only display traces for failed tests
627                    // - 4: also display the setup trace for failed tests
628                    // - 5..: display all traces for all tests, including storage changes
629                    let should_include = match kind {
630                        TraceKind::Execution => {
631                            (verbosity == 3 && result.status.is_failure()) || verbosity >= 4
632                        }
633                        TraceKind::Setup => {
634                            (verbosity == 4 && result.status.is_failure()) || verbosity >= 5
635                        }
636                        TraceKind::Deployment => false,
637                    };
638
639                    if should_include {
640                        decode_trace_arena(arena, &decoder).await?;
641                        decoded_traces.push(render_trace_arena_inner(arena, false, verbosity > 4));
642                    }
643                }
644
645                if !silent && show_traces && !decoded_traces.is_empty() {
646                    sh_println!("Traces:")?;
647                    for trace in &decoded_traces {
648                        sh_println!("{trace}")?;
649                    }
650                }
651
652                if let Some(gas_report) = &mut gas_report {
653                    gas_report.analyze(result.traces.iter().map(|(_, a)| &a.arena), &decoder).await;
654
655                    for trace in &result.gas_report_traces {
656                        decoder.clear_addresses();
657
658                        // Re-execute setup and deployment traces to collect identities created in
659                        // setUp and constructor.
660                        for (kind, arena) in &result.traces {
661                            if !matches!(kind, TraceKind::Execution) {
662                                decoder.identify(arena, &mut identifier);
663                            }
664                        }
665
666                        for arena in trace {
667                            decoder.identify(arena, &mut identifier);
668                            gas_report.analyze([arena], &decoder).await;
669                        }
670                    }
671                }
672
673                // Collect and merge gas snapshots.
674                for (group, new_snapshots) in &result.gas_snapshots {
675                    gas_snapshots.entry(group.clone()).or_default().extend(new_snapshots.clone());
676                }
677            }
678
679            // Write gas snapshots to disk if any were collected.
680            if !gas_snapshots.is_empty() {
681                // By default `gas_snapshot_check` is set to `false` in the config.
682                //
683                // The user can either:
684                // - Set `FORGE_SNAPSHOT_CHECK=true` in the environment.
685                // - Pass `--gas-snapshot-check=true` as a CLI argument.
686                // - Set `gas_snapshot_check = true` in the config.
687                //
688                // If the user passes `--gas-snapshot-check=<bool>` then it will override the config
689                // and the environment variable, disabling the check if `false` is passed.
690                //
691                // Exiting early with code 1 if differences are found.
692                if self.gas_snapshot_check.unwrap_or(config.gas_snapshot_check) {
693                    let differences_found = gas_snapshots.clone().into_iter().fold(
694                        false,
695                        |mut found, (group, snapshots)| {
696                            // If the snapshot file doesn't exist, we can't compare so we skip.
697                            if !&config.snapshots.join(format!("{group}.json")).exists() {
698                                return false;
699                            }
700
701                            let previous_snapshots: BTreeMap<String, String> =
702                                fs::read_json_file(&config.snapshots.join(format!("{group}.json")))
703                                    .expect("Failed to read snapshots from disk");
704
705                            let diff: BTreeMap<_, _> = snapshots
706                                .iter()
707                                .filter_map(|(k, v)| {
708                                    previous_snapshots.get(k).and_then(|previous_snapshot| {
709                                        if previous_snapshot != v {
710                                            Some((
711                                                k.clone(),
712                                                (previous_snapshot.clone(), v.clone()),
713                                            ))
714                                        } else {
715                                            None
716                                        }
717                                    })
718                                })
719                                .collect();
720
721                            if !diff.is_empty() {
722                                let _ = sh_eprintln!(
723                                    "{}",
724                                    format!("\n[{group}] Failed to match snapshots:").red().bold()
725                                );
726
727                                for (key, (previous_snapshot, snapshot)) in &diff {
728                                    let _ = sh_eprintln!(
729                                        "{}",
730                                        format!("- [{key}] {previous_snapshot} → {snapshot}").red()
731                                    );
732                                }
733
734                                found = true;
735                            }
736
737                            found
738                        },
739                    );
740
741                    if differences_found {
742                        sh_eprintln!()?;
743                        eyre::bail!("Snapshots differ from previous run");
744                    }
745                }
746
747                // By default `gas_snapshot_emit` is set to `true` in the config.
748                //
749                // The user can either:
750                // - Set `FORGE_SNAPSHOT_EMIT=false` in the environment.
751                // - Pass `--gas-snapshot-emit=false` as a CLI argument.
752                // - Set `gas_snapshot_emit = false` in the config.
753                //
754                // If the user passes `--gas-snapshot-emit=<bool>` then it will override the config
755                // and the environment variable, enabling the check if `true` is passed.
756                if self.gas_snapshot_emit.unwrap_or(config.gas_snapshot_emit) {
757                    // Create `snapshots` directory if it doesn't exist.
758                    fs::create_dir_all(&config.snapshots)?;
759
760                    // Write gas snapshots to disk per group.
761                    gas_snapshots.clone().into_iter().for_each(|(group, snapshots)| {
762                        fs::write_pretty_json_file(
763                            &config.snapshots.join(format!("{group}.json")),
764                            &snapshots,
765                        )
766                        .expect("Failed to write gas snapshots to disk");
767                    });
768                }
769            }
770
771            // Print suite summary.
772            if !silent {
773                sh_println!("{}", suite_result.summary())?;
774            }
775
776            // Add the suite result to the outcome.
777            outcome.results.insert(contract_name, suite_result);
778
779            // Stop processing the remaining suites if any test failed and `fail_fast` is set.
780            if self.fail_fast && any_test_failed {
781                break;
782            }
783        }
784        outcome.last_run_decoder = Some(decoder);
785        let duration = timer.elapsed();
786
787        trace!(target: "forge::test", len=outcome.results.len(), %any_test_failed, "done with results");
788
789        if let Some(gas_report) = gas_report {
790            let finalized = gas_report.finalize();
791            sh_println!("{}", &finalized)?;
792            outcome.gas_report = Some(finalized);
793        }
794
795        if !self.summary && !shell::is_json() {
796            sh_println!("{}", outcome.summary(duration))?;
797        }
798
799        if self.summary && !outcome.results.is_empty() {
800            let summary_report = TestSummaryReport::new(self.detailed, outcome.clone());
801            sh_println!("{}", &summary_report)?;
802        }
803
804        // Reattach the task.
805        if let Err(e) = handle.await {
806            match e.try_into_panic() {
807                Ok(payload) => std::panic::resume_unwind(payload),
808                Err(e) => return Err(e.into()),
809            }
810        }
811
812        // Persist test run failures to enable replaying.
813        persist_run_failures(&config, &outcome);
814
815        Ok(outcome)
816    }
817
818    /// Returns the flattened [`FilterArgs`] arguments merged with [`Config`].
819    /// Loads and applies filter from file if only last test run failures performed.
820    pub fn filter(&self, config: &Config) -> Result<ProjectPathsAwareFilter> {
821        let mut filter = self.filter.clone();
822        if self.rerun {
823            filter.test_pattern = last_run_failures(config);
824        }
825        if filter.path_pattern.is_some() {
826            if self.path.is_some() {
827                bail!("Can not supply both --match-path and |path|");
828            }
829        } else {
830            filter.path_pattern = self.path.clone();
831        }
832        Ok(filter.merge_with_config(config))
833    }
834
835    /// Returns whether `BuildArgs` was configured with `--watch`
836    pub fn is_watch(&self) -> bool {
837        self.watch.watch.is_some()
838    }
839
840    /// Returns the [`watchexec::Config`] necessary to bootstrap a new watch loop.
841    pub(crate) fn watchexec_config(&self) -> Result<watchexec::Config> {
842        self.watch.watchexec_config(|| {
843            let config = self.load_config()?;
844            Ok([config.src, config.test])
845        })
846    }
847}
848
849impl Provider for TestArgs {
850    fn metadata(&self) -> Metadata {
851        Metadata::named("Core Build Args Provider")
852    }
853
854    fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
855        let mut dict = Dict::default();
856
857        let mut fuzz_dict = Dict::default();
858        if let Some(fuzz_seed) = self.fuzz_seed {
859            fuzz_dict.insert("seed".to_string(), fuzz_seed.to_string().into());
860        }
861        if let Some(fuzz_runs) = self.fuzz_runs {
862            fuzz_dict.insert("runs".to_string(), fuzz_runs.into());
863        }
864        if let Some(fuzz_timeout) = self.fuzz_timeout {
865            fuzz_dict.insert("timeout".to_string(), fuzz_timeout.into());
866        }
867        if let Some(fuzz_input_file) = self.fuzz_input_file.clone() {
868            fuzz_dict.insert("failure_persist_file".to_string(), fuzz_input_file.into());
869        }
870        dict.insert("fuzz".to_string(), fuzz_dict.into());
871
872        if let Some(etherscan_api_key) =
873            self.etherscan_api_key.as_ref().filter(|s| !s.trim().is_empty())
874        {
875            dict.insert("etherscan_api_key".to_string(), etherscan_api_key.to_string().into());
876        }
877
878        if self.show_progress {
879            dict.insert("show_progress".to_string(), true.into());
880        }
881
882        Ok(Map::from([(Config::selected_profile(), dict)]))
883    }
884}
885
886/// Lists all matching tests
887fn list(runner: MultiContractRunner, filter: &ProjectPathsAwareFilter) -> Result<TestOutcome> {
888    let results = runner.list(filter);
889
890    if shell::is_json() {
891        sh_println!("{}", serde_json::to_string(&results)?)?;
892    } else {
893        for (file, contracts) in &results {
894            sh_println!("{file}")?;
895            for (contract, tests) in contracts {
896                sh_println!("  {contract}")?;
897                sh_println!("    {}\n", tests.join("\n    "))?;
898            }
899        }
900    }
901    Ok(TestOutcome::empty(false))
902}
903
904/// Load persisted filter (with last test run failures) from file.
905fn last_run_failures(config: &Config) -> Option<regex::Regex> {
906    match fs::read_to_string(&config.test_failures_file) {
907        Ok(filter) => Some(Regex::new(&filter).unwrap()),
908        Err(_) => None,
909    }
910}
911
912/// Persist filter with last test run failures (only if there's any failure).
913fn persist_run_failures(config: &Config, outcome: &TestOutcome) {
914    if outcome.failed() > 0 && fs::create_file(&config.test_failures_file).is_ok() {
915        let mut filter = String::new();
916        let mut failures = outcome.failures().peekable();
917        while let Some((test_name, _)) = failures.next() {
918            if test_name.is_any_test() {
919                if let Some(test_match) = test_name.split("(").next() {
920                    filter.push_str(test_match);
921                    if failures.peek().is_some() {
922                        filter.push('|');
923                    }
924                }
925            }
926        }
927        let _ = fs::write(&config.test_failures_file, filter);
928    }
929}
930
931/// Generate test report in JUnit XML report format.
932fn junit_xml_report(results: &BTreeMap<String, SuiteResult>, verbosity: u8) -> Report {
933    let mut total_duration = Duration::default();
934    let mut junit_report = Report::new("Test run");
935    junit_report.set_timestamp(Utc::now());
936    for (suite_name, suite_result) in results {
937        let mut test_suite = TestSuite::new(suite_name);
938        total_duration += suite_result.duration;
939        test_suite.set_time(suite_result.duration);
940        test_suite.set_system_out(suite_result.summary());
941        for (test_name, test_result) in &suite_result.test_results {
942            let mut test_status = match test_result.status {
943                TestStatus::Success => TestCaseStatus::success(),
944                TestStatus::Failure => TestCaseStatus::non_success(NonSuccessKind::Failure),
945                TestStatus::Skipped => TestCaseStatus::skipped(),
946            };
947            if let Some(reason) = &test_result.reason {
948                test_status.set_message(reason);
949            }
950
951            let mut test_case = TestCase::new(test_name, test_status);
952            test_case.set_time(test_result.duration);
953
954            let mut sys_out = String::new();
955            let result_report = test_result.kind.report();
956            write!(sys_out, "{test_result} {test_name} {result_report}").unwrap();
957            if verbosity >= 2 && !test_result.logs.is_empty() {
958                write!(sys_out, "\\nLogs:\\n").unwrap();
959                let console_logs = decode_console_logs(&test_result.logs);
960                for log in console_logs {
961                    write!(sys_out, "  {log}\\n").unwrap();
962                }
963            }
964
965            test_case.set_system_out(sys_out);
966            test_suite.add_test_case(test_case);
967        }
968        junit_report.add_test_suite(test_suite);
969    }
970    junit_report.set_time(total_duration);
971    junit_report
972}
973
974#[cfg(test)]
975mod tests {
976    use super::*;
977    use foundry_config::Chain;
978
979    #[test]
980    fn watch_parse() {
981        let args: TestArgs = TestArgs::parse_from(["foundry-cli", "-vw"]);
982        assert!(args.watch.watch.is_some());
983    }
984
985    #[test]
986    fn fuzz_seed() {
987        let args: TestArgs = TestArgs::parse_from(["foundry-cli", "--fuzz-seed", "0x10"]);
988        assert!(args.fuzz_seed.is_some());
989    }
990
991    // <https://github.com/foundry-rs/foundry/issues/5913>
992    #[test]
993    fn fuzz_seed_exists() {
994        let args: TestArgs =
995            TestArgs::parse_from(["foundry-cli", "-vvv", "--gas-report", "--fuzz-seed", "0x10"]);
996        assert!(args.fuzz_seed.is_some());
997    }
998
999    #[test]
1000    fn extract_chain() {
1001        let test = |arg: &str, expected: Chain| {
1002            let args = TestArgs::parse_from(["foundry-cli", arg]);
1003            assert_eq!(args.evm.env.chain, Some(expected));
1004            let (config, evm_opts) = args.load_config_and_evm_opts().unwrap();
1005            assert_eq!(config.chain, Some(expected));
1006            assert_eq!(evm_opts.env.chain_id, Some(expected.id()));
1007        };
1008        test("--chain-id=1", Chain::mainnet());
1009        test("--chain-id=42", Chain::from_id(42));
1010    }
1011}