Skip to main content

forge/cmd/test/
mod.rs

1use super::{install, test::filter::ProjectPathsAwareFilter, watch::WatchArgs};
2use crate::{
3    MultiContractRunner, MultiContractRunnerBuilder,
4    decode::decode_console_logs,
5    gas_report::GasReport,
6    multi_runner::matches_artifact,
7    result::{SuiteResult, TestOutcome, TestStatus},
8    traces::{
9        CallTraceDecoderBuilder, InternalTraceMode, TraceKind,
10        debug::{ContractSources, DebugTraceIdentifier},
11        decode_trace_arena, folded_stack_trace,
12        identifier::SignaturesIdentifier,
13    },
14};
15use alloy_primitives::U256;
16use chrono::Utc;
17use clap::{Parser, ValueHint};
18use eyre::{Context, OptionExt, Result, bail};
19use foundry_cli::{
20    opts::{BuildOpts, EvmArgs, GlobalArgs},
21    utils::{self, LoadConfig},
22};
23use foundry_common::{EmptyTestFilter, TestFunctionExt, compile::ProjectCompiler, fs, shell};
24use foundry_compilers::{
25    ProjectCompileOutput,
26    artifacts::output_selection::OutputSelection,
27    compilers::{
28        Language,
29        multi::{MultiCompiler, MultiCompilerLanguage},
30    },
31    utils::source_files_iter,
32};
33use foundry_config::{
34    Config, figment,
35    figment::{
36        Metadata, Profile, Provider,
37        value::{Dict, Map},
38    },
39    filter::GlobMatcher,
40};
41use foundry_debugger::Debugger;
42use foundry_evm::{
43    opts::EvmOpts,
44    traces::{backtrace::BacktraceBuilder, identifier::TraceIdentifiers, prune_trace_depth},
45};
46use rand::Rng;
47use regex::Regex;
48use std::{
49    collections::{BTreeMap, BTreeSet},
50    fmt::Write,
51    path::{Path, PathBuf},
52    sync::{Arc, mpsc::channel},
53    time::{Duration, Instant},
54};
55use yansi::Paint;
56
57mod filter;
58mod summary;
59use crate::{result::TestKind, traces::render_trace_arena_inner};
60pub use filter::FilterArgs;
61use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
62use summary::{TestSummaryReport, format_invariant_metrics_table};
63
64// Loads project's figment and merges the build cli arguments into it
65foundry_config::merge_impl_figment_convert!(TestArgs, build, evm);
66
67/// CLI arguments for `forge test`.
68#[derive(Clone, Debug, Parser)]
69#[command(next_help_heading = "Test options")]
70pub struct TestArgs {
71    // Include global options for users of this struct.
72    #[command(flatten)]
73    pub global: GlobalArgs,
74
75    /// The contract file you want to test, it's a shortcut for --match-path.
76    #[arg(value_hint = ValueHint::FilePath)]
77    pub path: Option<GlobMatcher>,
78
79    /// Run a single test in the debugger.
80    ///
81    /// The matching test will be opened in the debugger regardless of the outcome of the test.
82    ///
83    /// If the matching test is a fuzz test, then it will open the debugger on the first failure
84    /// case. If the fuzz test does not fail, it will open the debugger on the last fuzz case.
85    #[arg(long, conflicts_with_all = ["flamegraph", "flamechart", "decode_internal", "rerun"])]
86    debug: bool,
87
88    /// Generate a flamegraph for a single test. Implies `--decode-internal`.
89    ///
90    /// A flame graph is used to visualize which functions or operations within the smart contract
91    /// are consuming the most gas overall in a sorted manner.
92    #[arg(long)]
93    flamegraph: bool,
94
95    /// Generate a flamechart for a single test. Implies `--decode-internal`.
96    ///
97    /// A flame chart shows the gas usage over time, illustrating when each function is
98    /// called (execution order) and how much gas it consumes at each point in the timeline.
99    #[arg(long, conflicts_with = "flamegraph")]
100    flamechart: bool,
101
102    /// Identify internal functions in traces.
103    ///
104    /// This will trace internal functions and decode stack parameters.
105    ///
106    /// Parameters stored in memory (such as bytes or arrays) are currently decoded only when a
107    /// single function is matched, similarly to `--debug`, for performance reasons.
108    #[arg(long)]
109    decode_internal: bool,
110
111    /// Dumps all debugger steps to file.
112    #[arg(
113        long,
114        requires = "debug",
115        value_hint = ValueHint::FilePath,
116        value_name = "PATH"
117    )]
118    dump: Option<PathBuf>,
119
120    /// Print a gas report.
121    #[arg(long, env = "FORGE_GAS_REPORT")]
122    gas_report: bool,
123
124    /// Check gas snapshots against previous runs.
125    #[arg(long, env = "FORGE_SNAPSHOT_CHECK")]
126    gas_snapshot_check: Option<bool>,
127
128    /// Enable/disable recording of gas snapshot results.
129    #[arg(long, env = "FORGE_SNAPSHOT_EMIT")]
130    gas_snapshot_emit: Option<bool>,
131
132    /// Exit with code 0 even if a test fails.
133    #[arg(long, env = "FORGE_ALLOW_FAILURE")]
134    allow_failure: bool,
135
136    /// Suppress successful test traces and show only traces for failures.
137    #[arg(long, short, env = "FORGE_SUPPRESS_SUCCESSFUL_TRACES", help_heading = "Display options")]
138    suppress_successful_traces: bool,
139
140    /// Defines the depth of a trace
141    #[arg(long)]
142    trace_depth: Option<usize>,
143
144    /// Output test results as JUnit XML report.
145    #[arg(long, conflicts_with_all = ["quiet", "json", "gas_report", "summary", "list", "show_progress"], help_heading = "Display options")]
146    pub junit: bool,
147
148    /// Stop running tests after the first failure.
149    #[arg(long)]
150    pub fail_fast: bool,
151
152    /// The Etherscan (or equivalent) API key.
153    #[arg(long, env = "ETHERSCAN_API_KEY", value_name = "KEY")]
154    etherscan_api_key: Option<String>,
155
156    /// List tests instead of running them.
157    #[arg(long, short, conflicts_with_all = ["show_progress", "decode_internal", "summary"], help_heading = "Display options")]
158    list: bool,
159
160    /// Set seed used to generate randomness during your fuzz runs.
161    #[arg(long)]
162    pub fuzz_seed: Option<U256>,
163
164    #[arg(long, env = "FOUNDRY_FUZZ_RUNS", value_name = "RUNS")]
165    pub fuzz_runs: Option<u64>,
166
167    /// Timeout for each fuzz run in seconds.
168    #[arg(long, env = "FOUNDRY_FUZZ_TIMEOUT", value_name = "TIMEOUT")]
169    pub fuzz_timeout: Option<u64>,
170
171    /// File to rerun fuzz failures from.
172    #[arg(long)]
173    pub fuzz_input_file: Option<String>,
174
175    /// Show test execution progress.
176    #[arg(long, conflicts_with_all = ["quiet", "json"], help_heading = "Display options")]
177    pub show_progress: bool,
178
179    /// Re-run recorded test failures from last run.
180    /// If no failure recorded then regular test run is performed.
181    #[arg(long)]
182    pub rerun: bool,
183
184    /// Print test summary table.
185    #[arg(long, help_heading = "Display options")]
186    pub summary: bool,
187
188    /// Print detailed test summary table.
189    #[arg(long, help_heading = "Display options", requires = "summary")]
190    pub detailed: bool,
191
192    /// Disables the labels in the traces.
193    #[arg(long, help_heading = "Display options")]
194    pub disable_labels: bool,
195
196    #[command(flatten)]
197    filter: FilterArgs,
198
199    #[command(flatten)]
200    evm: EvmArgs,
201
202    #[command(flatten)]
203    pub build: BuildOpts,
204
205    #[command(flatten)]
206    pub watch: WatchArgs,
207}
208
209impl TestArgs {
210    pub async fn run(mut self) -> Result<TestOutcome> {
211        trace!(target: "forge::test", "executing test command");
212        self.compile_and_run().await
213    }
214
215    /// Returns a list of files that need to be compiled in order to run all the tests that match
216    /// the given filter.
217    ///
218    /// This means that it will return all sources that are not test contracts or that match the
219    /// filter. We want to compile all non-test sources always because tests might depend on them
220    /// dynamically through cheatcodes.
221    #[instrument(target = "forge::test", skip_all)]
222    pub fn get_sources_to_compile(
223        &self,
224        config: &Config,
225        test_filter: &ProjectPathsAwareFilter,
226    ) -> Result<BTreeSet<PathBuf>> {
227        // An empty filter doesn't filter out anything.
228        // We can still optimize slightly by excluding scripts.
229        if test_filter.is_empty() {
230            return Ok(source_files_iter(&config.src, MultiCompilerLanguage::FILE_EXTENSIONS)
231                .chain(source_files_iter(&config.test, MultiCompilerLanguage::FILE_EXTENSIONS))
232                .collect());
233        }
234
235        let mut project = config.create_project(true, true)?;
236        project.update_output_selection(|selection| {
237            *selection = OutputSelection::common_output_selection(["abi".to_string()]);
238        });
239        let output = project.compile()?;
240        if output.has_compiler_errors() {
241            sh_println!("{output}")?;
242            eyre::bail!("Compilation failed");
243        }
244
245        Ok(output
246            .artifact_ids()
247            .filter_map(|(id, artifact)| artifact.abi.as_ref().map(|abi| (id, abi)))
248            .filter(|(id, abi)| {
249                id.source.starts_with(&config.src) || matches_artifact(test_filter, id, abi)
250            })
251            .map(|(id, _)| id.source)
252            .collect())
253    }
254
255    /// Executes all the tests in the project.
256    ///
257    /// This will trigger the build process first. On success all test contracts that match the
258    /// configured filter will be executed
259    ///
260    /// Returns the test results for all matching tests.
261    pub async fn compile_and_run(&mut self) -> Result<TestOutcome> {
262        // Merge all configs.
263        let (mut config, evm_opts) = self.load_config_and_evm_opts()?;
264
265        // Install missing dependencies.
266        if install::install_missing_dependencies(&mut config).await && config.auto_detect_remappings
267        {
268            // need to re-configure here to also catch additional remappings
269            config = self.load_config()?;
270        }
271
272        // Set up the project.
273        let project = config.project()?;
274
275        let filter = self.filter(&config)?;
276        trace!(target: "forge::test", ?filter, "using filter");
277
278        let compiler = ProjectCompiler::new()
279            .dynamic_test_linking(config.dynamic_test_linking)
280            .quiet(shell::is_json() || self.junit)
281            .files(self.get_sources_to_compile(&config, &filter)?);
282        let output = compiler.compile(&project)?;
283
284        self.run_tests(&project.paths.root, config, evm_opts, &output, &filter, false).await
285    }
286
287    /// Executes all the tests in the project.
288    ///
289    /// See [`Self::compile_and_run`] for more details.
290    pub async fn run_tests(
291        &mut self,
292        project_root: &Path,
293        mut config: Config,
294        mut evm_opts: EvmOpts,
295        output: &ProjectCompileOutput,
296        filter: &ProjectPathsAwareFilter,
297        coverage: bool,
298    ) -> Result<TestOutcome> {
299        // Explicitly enable isolation for gas reports for more correct gas accounting.
300        if self.gas_report {
301            evm_opts.isolate = true;
302        } else {
303            // Do not collect gas report traces if gas report is not enabled.
304            config.fuzz.gas_report_samples = 0;
305            config.invariant.gas_report_samples = 0;
306        }
307
308        // Generate a random fuzz seed if none provided, for reproducibility.
309        config.fuzz.seed = config
310            .fuzz
311            .seed
312            .or_else(|| Some(U256::from_be_bytes(rand::rng().random::<[u8; 32]>())));
313
314        // Create test options from general project settings and compiler output.
315        let should_debug = self.debug;
316        let should_draw = self.flamegraph || self.flamechart;
317
318        // Determine print verbosity and executor verbosity.
319        let verbosity = evm_opts.verbosity;
320        if (self.gas_report && evm_opts.verbosity < 3) || self.flamegraph || self.flamechart {
321            evm_opts.verbosity = 3;
322        }
323
324        let env = evm_opts.evm_env().await?;
325
326        // Enable internal tracing for more informative flamegraph.
327        if should_draw && !self.decode_internal {
328            self.decode_internal = true;
329        }
330
331        // Choose the internal function tracing mode, if --decode-internal is provided.
332        let decode_internal = if self.decode_internal {
333            // If more than one function matched, we enable simple tracing.
334            // If only one function matched, we enable full tracing. This is done in `run_tests`.
335            InternalTraceMode::Simple
336        } else {
337            InternalTraceMode::None
338        };
339
340        // Prepare the test builder.
341        let config = Arc::new(config);
342        let runner = MultiContractRunnerBuilder::new(config.clone())
343            .set_debug(should_debug)
344            .set_decode_internal(decode_internal)
345            .initial_balance(evm_opts.initial_balance)
346            .evm_spec(config.evm_spec_id())
347            .sender(evm_opts.sender)
348            .with_fork(evm_opts.get_fork(&config, env.clone()))
349            .enable_isolation(evm_opts.isolate)
350            .networks(evm_opts.networks)
351            .fail_fast(self.fail_fast)
352            .set_coverage(coverage)
353            .build::<MultiCompiler>(output, env, evm_opts)?;
354
355        let libraries = runner.libraries.clone();
356        let mut outcome = self.run_tests_inner(runner, config, verbosity, filter, output).await?;
357
358        if should_draw {
359            let (suite_name, test_name, mut test_result) =
360                outcome.remove_first().ok_or_eyre("no tests were executed")?;
361
362            let (_, arena) = test_result
363                .traces
364                .iter_mut()
365                .find(|(kind, _)| *kind == TraceKind::Execution)
366                .unwrap();
367
368            // Decode traces.
369            let decoder = outcome.last_run_decoder.as_ref().unwrap();
370            decode_trace_arena(arena, decoder).await;
371            let mut fst = folded_stack_trace::build(arena, self.evm.isolate);
372
373            let label = if self.flamegraph { "flamegraph" } else { "flamechart" };
374            let contract = suite_name.split(':').next_back().unwrap();
375            let test_name = test_name.trim_end_matches("()");
376            let file_name = format!("cache/{label}_{contract}_{test_name}.svg");
377            let file = std::fs::File::create(&file_name).wrap_err("failed to create file")?;
378            let file = std::io::BufWriter::new(file);
379
380            let mut options = inferno::flamegraph::Options::default();
381            options.title = format!("{label} {contract}::{test_name}");
382            options.count_name = "gas".to_string();
383            if self.flamechart {
384                options.flame_chart = true;
385                fst.reverse();
386            }
387
388            // Generate SVG.
389            inferno::flamegraph::from_lines(&mut options, fst.iter().map(String::as_str), file)
390                .wrap_err("failed to write svg")?;
391            sh_println!("Saved to {file_name}")?;
392
393            // Open SVG in default program.
394            if let Err(e) = opener::open(&file_name) {
395                sh_err!("Failed to open {file_name}; please open it manually: {e}")?;
396            }
397        }
398
399        if should_debug {
400            // Get first non-empty suite result. We will have only one such entry.
401            let (_, _, test_result) =
402                outcome.remove_first().ok_or_eyre("no tests were executed")?;
403
404            let sources =
405                ContractSources::from_project_output(output, project_root, Some(&libraries))?;
406
407            // Run the debugger.
408            let mut builder = Debugger::builder()
409                .traces(
410                    test_result.traces.iter().filter(|(t, _)| t.is_execution()).cloned().collect(),
411                )
412                .sources(sources)
413                .breakpoints(test_result.breakpoints.clone());
414
415            if let Some(decoder) = &outcome.last_run_decoder {
416                builder = builder.decoder(decoder);
417            }
418
419            let mut debugger = builder.build();
420            if let Some(dump_path) = &self.dump {
421                debugger.dump_to_file(dump_path)?;
422            } else {
423                debugger.try_run_tui()?;
424            }
425        }
426
427        Ok(outcome)
428    }
429
430    /// Run all tests that matches the filter predicate from a test runner
431    async fn run_tests_inner(
432        &self,
433        mut runner: MultiContractRunner,
434        config: Arc<Config>,
435        verbosity: u8,
436        filter: &ProjectPathsAwareFilter,
437        output: &ProjectCompileOutput,
438    ) -> eyre::Result<TestOutcome> {
439        let fuzz_seed = config.fuzz.seed;
440        if self.list {
441            return list(runner, filter);
442        }
443
444        trace!(target: "forge::test", "running all tests");
445
446        // If we need to render to a serialized format, we should not print anything else to stdout.
447        let silent = self.gas_report && shell::is_json() || self.summary && shell::is_json();
448
449        let num_filtered = runner.matching_test_functions(filter).count();
450
451        if num_filtered == 0 {
452            let mut total_tests = num_filtered;
453            if !filter.is_empty() {
454                total_tests = runner.matching_test_functions(&EmptyTestFilter::default()).count();
455            }
456            if total_tests == 0 {
457                sh_println!(
458                    "No tests found in project! Forge looks for functions that start with `test`"
459                )?;
460            } else {
461                let mut msg = format!("no tests match the provided pattern:\n{filter}");
462                // Try to suggest a test when there's no match.
463                if let Some(test_pattern) = &filter.args().test_pattern {
464                    let test_name = test_pattern.as_str();
465                    // Filter contracts but not test functions.
466                    let candidates = runner.all_test_functions(filter).map(|f| &f.name);
467                    if let Some(suggestion) = utils::did_you_mean(test_name, candidates).pop() {
468                        write!(msg, "\nDid you mean `{suggestion}`?")?;
469                    }
470                }
471                sh_warn!("{msg}")?;
472            }
473            return Ok(TestOutcome::empty(Some(runner), false));
474        }
475
476        if num_filtered != 1 && (self.debug || self.flamegraph || self.flamechart) {
477            let action = if self.flamegraph {
478                "generate a flamegraph"
479            } else if self.flamechart {
480                "generate a flamechart"
481            } else {
482                "run the debugger"
483            };
484            let filter = if filter.is_empty() {
485                String::new()
486            } else {
487                format!("\n\nFilter used:\n{filter}")
488            };
489            eyre::bail!(
490                "{num_filtered} tests matched your criteria, but exactly 1 test must match in order to {action}.\n\n\
491                 Use --match-contract and --match-path to further limit the search.{filter}",
492            );
493        }
494
495        // If exactly one test matched, we enable full tracing.
496        if num_filtered == 1 && self.decode_internal {
497            runner.decode_internal = InternalTraceMode::Full;
498        }
499
500        // Run tests in a non-streaming fashion and collect results for serialization.
501        if !self.gas_report && !self.summary && shell::is_json() {
502            let mut results = runner.test_collect(filter)?;
503            results.values_mut().for_each(|suite_result| {
504                for test_result in suite_result.test_results.values_mut() {
505                    if verbosity >= 2 {
506                        // Decode logs at level 2 and above.
507                        test_result.decoded_logs = decode_console_logs(&test_result.logs);
508                    } else {
509                        // Empty logs for non verbose runs.
510                        test_result.logs = vec![];
511                    }
512                }
513            });
514            sh_println!("{}", serde_json::to_string(&results)?)?;
515            return Ok(TestOutcome::new(Some(runner), results, self.allow_failure, fuzz_seed));
516        }
517
518        if self.junit {
519            let results = runner.test_collect(filter)?;
520            sh_println!("{}", junit_xml_report(&results, verbosity).to_string()?)?;
521            return Ok(TestOutcome::new(Some(runner), results, self.allow_failure, fuzz_seed));
522        }
523
524        let remote_chain =
525            if runner.fork.is_some() { runner.env.tx.chain_id.map(Into::into) } else { None };
526        let known_contracts = runner.known_contracts.clone();
527
528        let libraries = runner.libraries.clone();
529
530        // Run tests in a streaming fashion.
531        let (tx, rx) = channel::<(String, SuiteResult)>();
532        let timer = Instant::now();
533        let show_progress = config.show_progress;
534        let handle = tokio::task::spawn_blocking({
535            let filter = filter.clone();
536            move || runner.test(&filter, tx, show_progress).map(|()| runner)
537        });
538
539        // Set up trace identifiers.
540        let mut identifier = TraceIdentifiers::new().with_local(&known_contracts);
541
542        // Avoid using external identifiers for gas report as we decode more traces and this will be
543        // expensive. Also skip external identifiers for local tests (no remote chain) to avoid
544        // unnecessary Etherscan API calls that significantly slow down test execution.
545        if !self.gas_report && remote_chain.is_some() {
546            identifier = identifier.with_external(&config, remote_chain)?;
547        }
548
549        // Build the trace decoder.
550        let mut builder = CallTraceDecoderBuilder::new()
551            .with_known_contracts(&known_contracts)
552            .with_label_disabled(self.disable_labels)
553            .with_verbosity(verbosity);
554        // Signatures are of no value for gas reports.
555        if !self.gas_report {
556            builder =
557                builder.with_signature_identifier(SignaturesIdentifier::from_config(&config)?);
558        }
559
560        if self.decode_internal {
561            let sources =
562                ContractSources::from_project_output(output, &config.root, Some(&libraries))?;
563            builder = builder.with_debug_identifier(DebugTraceIdentifier::new(sources));
564        }
565        let mut decoder = builder.build();
566
567        let mut gas_report = self.gas_report.then(|| {
568            GasReport::new(
569                config.gas_reports.clone(),
570                config.gas_reports_ignore.clone(),
571                config.gas_reports_include_tests,
572            )
573        });
574
575        let mut gas_snapshots = BTreeMap::<String, BTreeMap<String, String>>::new();
576
577        let mut outcome = TestOutcome::empty(None, self.allow_failure);
578        outcome.fuzz_seed = fuzz_seed;
579
580        let mut any_test_failed = false;
581        let mut backtrace_builder = None;
582        for (contract_name, mut suite_result) in rx {
583            let tests = &mut suite_result.test_results;
584            let has_tests = !tests.is_empty();
585
586            // Clear the addresses and labels from previous test.
587            decoder.clear_addresses();
588
589            // We identify addresses if we're going to print *any* trace or gas report.
590            let identify_addresses = verbosity >= 3
591                || self.gas_report
592                || self.debug
593                || self.flamegraph
594                || self.flamechart;
595
596            // Print suite header.
597            if !silent {
598                sh_println!()?;
599                for warning in &suite_result.warnings {
600                    sh_warn!("{warning}")?;
601                }
602                if has_tests {
603                    let len = tests.len();
604                    let tests = if len > 1 { "tests" } else { "test" };
605                    sh_println!("Ran {len} {tests} for {contract_name}")?;
606                }
607            }
608
609            // Process individual test results, printing logs and traces when necessary.
610            for (name, result) in tests {
611                let show_traces =
612                    !self.suppress_successful_traces || result.status == TestStatus::Failure;
613                if !silent {
614                    sh_println!("{}", result.short_result(name))?;
615
616                    // Display invariant metrics if invariant kind.
617                    if let TestKind::Invariant { metrics, .. } = &result.kind
618                        && !metrics.is_empty()
619                    {
620                        let _ = sh_println!("\n{}\n", format_invariant_metrics_table(metrics));
621                    }
622
623                    // We only display logs at level 2 and above
624                    if verbosity >= 2 && show_traces {
625                        // We only decode logs from Hardhat and DS-style console events
626                        let console_logs = decode_console_logs(&result.logs);
627                        if !console_logs.is_empty() {
628                            sh_println!("Logs:")?;
629                            for log in console_logs {
630                                sh_println!("  {log}")?;
631                            }
632                            sh_println!()?;
633                        }
634                    }
635                }
636
637                // We shouldn't break out of the outer loop directly here so that we finish
638                // processing the remaining tests and print the suite summary.
639                any_test_failed |= result.status == TestStatus::Failure;
640
641                // Clear the addresses and labels from previous runs.
642                decoder.clear_addresses();
643                decoder.labels.extend(result.labels.iter().map(|(k, v)| (*k, v.clone())));
644
645                // Identify addresses and decode traces.
646                let mut decoded_traces = Vec::with_capacity(result.traces.len());
647                for (kind, arena) in &mut result.traces {
648                    if identify_addresses {
649                        decoder.identify(arena, &mut identifier);
650                    }
651
652                    // verbosity:
653                    // - 0..3: nothing
654                    // - 3: only display traces for failed tests
655                    // - 4: also display the setup trace for failed tests
656                    // - 5..: display all traces for all tests, including storage changes
657                    let should_include = match kind {
658                        TraceKind::Execution => {
659                            (verbosity == 3 && result.status.is_failure()) || verbosity >= 4
660                        }
661                        TraceKind::Setup => {
662                            (verbosity == 4 && result.status.is_failure()) || verbosity >= 5
663                        }
664                        TraceKind::Deployment => false,
665                    };
666
667                    if should_include {
668                        decode_trace_arena(arena, &decoder).await;
669
670                        if let Some(trace_depth) = self.trace_depth {
671                            prune_trace_depth(arena, trace_depth);
672                        }
673
674                        decoded_traces.push(render_trace_arena_inner(arena, false, verbosity > 4));
675                    }
676                }
677
678                if !silent && show_traces && !decoded_traces.is_empty() {
679                    sh_println!("Traces:")?;
680                    for trace in &decoded_traces {
681                        sh_println!("{trace}")?;
682                    }
683                }
684
685                // Extract and display backtrace for failed tests when verbosity >= 3
686                if !silent
687                    && result.status.is_failure()
688                    && verbosity >= 3
689                    && !result.traces.is_empty()
690                    && let Some((_, arena)) =
691                        result.traces.iter().find(|(kind, _)| matches!(kind, TraceKind::Execution))
692                {
693                    // Lazily initialize the backtrace builder on first failure
694                    let builder = backtrace_builder.get_or_insert_with(|| {
695                        BacktraceBuilder::new(
696                            output,
697                            config.root.clone(),
698                            config.parsed_libraries().ok(),
699                            config.via_ir,
700                        )
701                    });
702
703                    let backtrace = builder.from_traces(arena);
704
705                    if !backtrace.is_empty() {
706                        sh_println!("{}", backtrace)?;
707                    }
708                }
709
710                if let Some(gas_report) = &mut gas_report {
711                    gas_report.analyze(result.traces.iter().map(|(_, a)| &a.arena), &decoder).await;
712
713                    for trace in &result.gas_report_traces {
714                        decoder.clear_addresses();
715
716                        // Re-execute setup and deployment traces to collect identities created in
717                        // setUp and constructor.
718                        for (kind, arena) in &result.traces {
719                            if !matches!(kind, TraceKind::Execution) {
720                                decoder.identify(arena, &mut identifier);
721                            }
722                        }
723
724                        for arena in trace {
725                            decoder.identify(arena, &mut identifier);
726                            gas_report.analyze([arena], &decoder).await;
727                        }
728                    }
729                }
730                // Clear memory.
731                result.gas_report_traces = Default::default();
732
733                // Collect and merge gas snapshots.
734                for (group, new_snapshots) in &result.gas_snapshots {
735                    gas_snapshots.entry(group.clone()).or_default().extend(new_snapshots.clone());
736                }
737            }
738
739            // Write gas snapshots to disk if any were collected.
740            if !gas_snapshots.is_empty() {
741                // By default `gas_snapshot_check` is set to `false` in the config.
742                //
743                // The user can either:
744                // - Set `FORGE_SNAPSHOT_CHECK=true` in the environment.
745                // - Pass `--gas-snapshot-check=true` as a CLI argument.
746                // - Set `gas_snapshot_check = true` in the config.
747                //
748                // If the user passes `--gas-snapshot-check=<bool>` then it will override the config
749                // and the environment variable, disabling the check if `false` is passed.
750                //
751                // Exiting early with code 1 if differences are found.
752                if self.gas_snapshot_check.unwrap_or(config.gas_snapshot_check) {
753                    let differences_found = gas_snapshots.clone().into_iter().fold(
754                        false,
755                        |mut found, (group, snapshots)| {
756                            // If the snapshot file doesn't exist, we can't compare so we skip.
757                            if !&config.snapshots.join(format!("{group}.json")).exists() {
758                                return false;
759                            }
760
761                            let previous_snapshots: BTreeMap<String, String> =
762                                fs::read_json_file(&config.snapshots.join(format!("{group}.json")))
763                                    .expect("Failed to read snapshots from disk");
764
765                            let diff: BTreeMap<_, _> = snapshots
766                                .iter()
767                                .filter_map(|(k, v)| {
768                                    previous_snapshots.get(k).and_then(|previous_snapshot| {
769                                        if previous_snapshot != v {
770                                            Some((
771                                                k.clone(),
772                                                (previous_snapshot.clone(), v.clone()),
773                                            ))
774                                        } else {
775                                            None
776                                        }
777                                    })
778                                })
779                                .collect();
780
781                            if !diff.is_empty() {
782                                let _ = sh_eprintln!(
783                                    "{}",
784                                    format!("\n[{group}] Failed to match snapshots:").red().bold()
785                                );
786
787                                for (key, (previous_snapshot, snapshot)) in &diff {
788                                    let _ = sh_eprintln!(
789                                        "{}",
790                                        format!("- [{key}] {previous_snapshot} → {snapshot}").red()
791                                    );
792                                }
793
794                                found = true;
795                            }
796
797                            found
798                        },
799                    );
800
801                    if differences_found {
802                        sh_eprintln!()?;
803                        eyre::bail!("Snapshots differ from previous run");
804                    }
805                }
806
807                // By default `gas_snapshot_emit` is set to `true` in the config.
808                //
809                // The user can either:
810                // - Set `FORGE_SNAPSHOT_EMIT=false` in the environment.
811                // - Pass `--gas-snapshot-emit=false` as a CLI argument.
812                // - Set `gas_snapshot_emit = false` in the config.
813                //
814                // If the user passes `--gas-snapshot-emit=<bool>` then it will override the config
815                // and the environment variable, enabling the check if `true` is passed.
816                if self.gas_snapshot_emit.unwrap_or(config.gas_snapshot_emit) {
817                    // Create `snapshots` directory if it doesn't exist.
818                    fs::create_dir_all(&config.snapshots)?;
819
820                    // Write gas snapshots to disk per group.
821                    gas_snapshots.clone().into_iter().for_each(|(group, snapshots)| {
822                        fs::write_pretty_json_file(
823                            &config.snapshots.join(format!("{group}.json")),
824                            &snapshots,
825                        )
826                        .expect("Failed to write gas snapshots to disk");
827                    });
828                }
829            }
830
831            // Print suite summary.
832            if !silent && has_tests {
833                sh_println!("{}", suite_result.summary())?;
834            }
835
836            // Add the suite result to the outcome.
837            outcome.results.insert(contract_name, suite_result);
838
839            // Stop processing the remaining suites if any test failed and `fail_fast` is set.
840            if self.fail_fast && any_test_failed {
841                break;
842            }
843        }
844        outcome.last_run_decoder = Some(decoder);
845        let duration = timer.elapsed();
846
847        trace!(target: "forge::test", len=outcome.results.len(), %any_test_failed, "done with results");
848
849        if let Some(gas_report) = gas_report {
850            let finalized = gas_report.finalize();
851            sh_println!("{}", &finalized)?;
852            outcome.gas_report = Some(finalized);
853        }
854
855        if !self.summary && !shell::is_json() {
856            sh_println!("{}", outcome.summary(duration))?;
857        }
858
859        if self.summary && !outcome.results.is_empty() {
860            let summary_report = TestSummaryReport::new(self.detailed, outcome.clone());
861            sh_println!("{}", &summary_report)?;
862        }
863
864        // Reattach the task.
865        match handle.await {
866            Ok(result) => outcome.runner = Some(result?),
867            Err(e) => match e.try_into_panic() {
868                Ok(payload) => std::panic::resume_unwind(payload),
869                Err(e) => return Err(e.into()),
870            },
871        }
872
873        // Persist test run failures to enable replaying.
874        persist_run_failures(&config, &outcome);
875
876        Ok(outcome)
877    }
878
879    /// Returns the flattened [`FilterArgs`] arguments merged with [`Config`].
880    /// Loads and applies filter from file if only last test run failures performed.
881    pub fn filter(&self, config: &Config) -> Result<ProjectPathsAwareFilter> {
882        let mut filter = self.filter.clone();
883        if self.rerun {
884            filter.test_pattern = last_run_failures(config);
885        }
886        if filter.path_pattern.is_some() {
887            if self.path.is_some() {
888                bail!("Can not supply both --match-path and |path|");
889            }
890        } else {
891            filter.path_pattern = self.path.clone();
892        }
893        Ok(filter.merge_with_config(config))
894    }
895
896    /// Returns whether `BuildArgs` was configured with `--watch`
897    pub fn is_watch(&self) -> bool {
898        self.watch.watch.is_some()
899    }
900
901    /// Returns the [`watchexec::Config`] necessary to bootstrap a new watch loop.
902    pub(crate) fn watchexec_config(&self) -> Result<watchexec::Config> {
903        self.watch.watchexec_config(|| {
904            let config = self.load_config()?;
905            Ok([config.src, config.test])
906        })
907    }
908}
909
910impl Provider for TestArgs {
911    fn metadata(&self) -> Metadata {
912        Metadata::named("Core Build Args Provider")
913    }
914
915    fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
916        let mut dict = Dict::default();
917
918        let mut fuzz_dict = Dict::default();
919        if let Some(fuzz_seed) = self.fuzz_seed {
920            fuzz_dict.insert("seed".to_string(), fuzz_seed.to_string().into());
921        }
922        if let Some(fuzz_runs) = self.fuzz_runs {
923            fuzz_dict.insert("runs".to_string(), fuzz_runs.into());
924        }
925        if let Some(fuzz_timeout) = self.fuzz_timeout {
926            fuzz_dict.insert("timeout".to_string(), fuzz_timeout.into());
927        }
928        if let Some(fuzz_input_file) = self.fuzz_input_file.clone() {
929            fuzz_dict.insert("failure_persist_file".to_string(), fuzz_input_file.into());
930        }
931        dict.insert("fuzz".to_string(), fuzz_dict.into());
932
933        if let Some(etherscan_api_key) =
934            self.etherscan_api_key.as_ref().filter(|s| !s.trim().is_empty())
935        {
936            dict.insert("etherscan_api_key".to_string(), etherscan_api_key.to_string().into());
937        }
938
939        if self.show_progress {
940            dict.insert("show_progress".to_string(), true.into());
941        }
942
943        Ok(Map::from([(Config::selected_profile(), dict)]))
944    }
945}
946
947/// Lists all matching tests
948fn list(runner: MultiContractRunner, filter: &ProjectPathsAwareFilter) -> Result<TestOutcome> {
949    let results = runner.list(filter);
950
951    if shell::is_json() {
952        sh_println!("{}", serde_json::to_string(&results)?)?;
953    } else {
954        for (file, contracts) in &results {
955            sh_println!("{file}")?;
956            for (contract, tests) in contracts {
957                sh_println!("  {contract}")?;
958                sh_println!("    {}\n", tests.join("\n    "))?;
959            }
960        }
961    }
962    Ok(TestOutcome::empty(Some(runner), false))
963}
964
965/// Load persisted filter (with last test run failures) from file.
966fn last_run_failures(config: &Config) -> Option<regex::Regex> {
967    match fs::read_to_string(&config.test_failures_file) {
968        Ok(filter) => Regex::new(&filter)
969            .inspect_err(|e| {
970                _ = sh_warn!(
971                    "failed to parse test filter from {:?}: {e}",
972                    config.test_failures_file
973                )
974            })
975            .ok(),
976        Err(_) => None,
977    }
978}
979
980/// Persist filter with last test run failures (only if there's any failure).
981fn persist_run_failures(config: &Config, outcome: &TestOutcome) {
982    if outcome.failed() > 0 && fs::create_file(&config.test_failures_file).is_ok() {
983        let mut filter = String::new();
984        let mut failures = outcome.failures().peekable();
985        while let Some((test_name, _)) = failures.next() {
986            if test_name.is_any_test()
987                && let Some(test_match) = test_name.split("(").next()
988            {
989                filter.push_str(test_match);
990                if failures.peek().is_some() {
991                    filter.push('|');
992                }
993            }
994        }
995        let _ = fs::write(&config.test_failures_file, filter);
996    }
997}
998
999/// Generate test report in JUnit XML report format.
1000fn junit_xml_report(results: &BTreeMap<String, SuiteResult>, verbosity: u8) -> Report {
1001    let mut total_duration = Duration::default();
1002    let mut junit_report = Report::new("Test run");
1003    junit_report.set_timestamp(Utc::now());
1004    for (suite_name, suite_result) in results {
1005        let mut test_suite = TestSuite::new(suite_name);
1006        total_duration += suite_result.duration;
1007        test_suite.set_time(suite_result.duration);
1008        test_suite.set_system_out(suite_result.summary());
1009        for (test_name, test_result) in &suite_result.test_results {
1010            let mut test_status = match test_result.status {
1011                TestStatus::Success => TestCaseStatus::success(),
1012                TestStatus::Failure => TestCaseStatus::non_success(NonSuccessKind::Failure),
1013                TestStatus::Skipped => TestCaseStatus::skipped(),
1014            };
1015            if let Some(reason) = &test_result.reason {
1016                test_status.set_message(reason);
1017            }
1018
1019            let mut test_case = TestCase::new(test_name, test_status);
1020            test_case.set_time(test_result.duration);
1021
1022            let mut sys_out = String::new();
1023            let result_report = test_result.kind.report();
1024            write!(sys_out, "{test_result} {test_name} {result_report}").unwrap();
1025            if verbosity >= 2 && !test_result.logs.is_empty() {
1026                write!(sys_out, "\\nLogs:\\n").unwrap();
1027                let console_logs = decode_console_logs(&test_result.logs);
1028                for log in console_logs {
1029                    write!(sys_out, "  {log}\\n").unwrap();
1030                }
1031            }
1032
1033            test_case.set_system_out(sys_out);
1034            test_suite.add_test_case(test_case);
1035        }
1036        junit_report.add_test_suite(test_suite);
1037    }
1038    junit_report.set_time(total_duration);
1039    junit_report
1040}
1041
1042#[cfg(test)]
1043mod tests {
1044    use super::*;
1045    use foundry_config::Chain;
1046
1047    #[test]
1048    fn watch_parse() {
1049        let args: TestArgs = TestArgs::parse_from(["foundry-cli", "-vw"]);
1050        assert!(args.watch.watch.is_some());
1051    }
1052
1053    #[test]
1054    fn fuzz_seed() {
1055        let args: TestArgs = TestArgs::parse_from(["foundry-cli", "--fuzz-seed", "0x10"]);
1056        assert!(args.fuzz_seed.is_some());
1057    }
1058
1059    #[test]
1060    fn depth_trace() {
1061        let args: TestArgs = TestArgs::parse_from(["foundry-cli", "--trace-depth", "2"]);
1062        assert!(args.trace_depth.is_some());
1063    }
1064
1065    // <https://github.com/foundry-rs/foundry/issues/5913>
1066    #[test]
1067    fn fuzz_seed_exists() {
1068        let args: TestArgs =
1069            TestArgs::parse_from(["foundry-cli", "-vvv", "--gas-report", "--fuzz-seed", "0x10"]);
1070        assert!(args.fuzz_seed.is_some());
1071    }
1072
1073    #[test]
1074    fn extract_chain() {
1075        let test = |arg: &str, expected: Chain| {
1076            let args = TestArgs::parse_from(["foundry-cli", arg]);
1077            assert_eq!(args.evm.env.chain, Some(expected));
1078            let (config, evm_opts) = args.load_config_and_evm_opts().unwrap();
1079            assert_eq!(config.chain, Some(expected));
1080            assert_eq!(evm_opts.env.chain_id, Some(expected.id()));
1081        };
1082        test("--chain-id=1", Chain::mainnet());
1083        test("--chain-id=42", Chain::from_id(42));
1084    }
1085}