Skip to main content

foundry_evm/executors/fuzz/
mod.rs

1use crate::executors::{
2    DURATION_BETWEEN_METRICS_REPORT, EarlyExit, Executor, FuzzTestTimer, RawCallResult,
3    corpus::{GlobalCorpusMetrics, WorkerCorpus},
4};
5use alloy_dyn_abi::JsonAbiExt;
6use alloy_json_abi::Function;
7use alloy_primitives::{Address, Bytes, Log, U256, keccak256, map::HashMap};
8use eyre::Result;
9use foundry_common::sh_println;
10use foundry_config::FuzzConfig;
11use foundry_evm_core::{
12    Breakpoints,
13    constants::{CHEATCODE_ADDRESS, MAGIC_ASSUME},
14    decode::{RevertDecoder, SkipReason},
15    evm::FoundryEvmNetwork,
16};
17use foundry_evm_coverage::HitMaps;
18use foundry_evm_fuzz::{
19    BaseCounterExample, BasicTxDetails, CallDetails, CounterExample, FuzzCase, FuzzError,
20    FuzzFixtures, FuzzTestResult,
21    strategies::{EvmFuzzState, fuzz_calldata, fuzz_calldata_from_state},
22};
23use foundry_evm_traces::SparsedTraceArena;
24use indicatif::ProgressBar;
25use proptest::{
26    strategy::Strategy,
27    test_runner::{RngAlgorithm, TestCaseError, TestRng, TestRunner},
28};
29use rayon::iter::{IntoParallelIterator, ParallelIterator};
30use serde_json::json;
31use std::{
32    sync::{
33        Arc, OnceLock,
34        atomic::{AtomicU32, Ordering},
35    },
36    time::{Instant, SystemTime, UNIX_EPOCH},
37};
38
39mod types;
40pub use types::{CaseOutcome, CounterExampleOutcome, FuzzOutcome};
41
42/// Corpus syncs across workers every `SYNC_INTERVAL` runs.
43const SYNC_INTERVAL: u32 = 1000;
44
45/// Minimum number of runs per worker.
46/// This is mainly to reduce the overall number of rayon jobs.
47const MIN_RUNS_PER_WORKER: u32 = 64;
48
49struct WorkerState<FEN: FoundryEvmNetwork> {
50    /// Worker identifier
51    id: usize,
52    /// First fuzz case this worker encountered (with global run number)
53    first_case: Option<(u32, FuzzCase)>,
54    /// Gas usage for all cases this worker ran
55    gas_by_case: Vec<(u64, u64)>,
56    /// Counterexample if this worker found one
57    counterexample: (Bytes, RawCallResult<FEN>),
58    /// Traces collected by this worker
59    ///
60    /// Stores up to `max_traces_to_collect` which is `config.gas_report_samples / num_workers`
61    traces: Vec<SparsedTraceArena>,
62    /// Last breakpoints from this worker
63    breakpoints: Option<Breakpoints>,
64    /// Coverage collected by this worker
65    coverage: Option<HitMaps>,
66    /// Logs from all cases this worker ran
67    logs: Vec<Log>,
68    /// Deprecated cheatcodes seen by this worker
69    deprecated_cheatcodes: HashMap<&'static str, Option<&'static str>>,
70    /// Number of runs this worker completed
71    runs: u32,
72    /// Failure reason if this worker failed
73    failure: Option<TestCaseError>,
74    /// Last run timestamp in milliseconds
75    ///
76    /// Used to identify which worker ran last and collect its traces and call breakpoints
77    last_run_timestamp: u128,
78    /// Failed corpus replays
79    failed_corpus_replays: usize,
80}
81
82impl<FEN: FoundryEvmNetwork> WorkerState<FEN> {
83    fn new(worker_id: usize) -> Self {
84        Self {
85            id: worker_id,
86            first_case: None,
87            gas_by_case: Vec::new(),
88            counterexample: (Bytes::new(), RawCallResult::default()),
89            traces: Vec::new(),
90            breakpoints: None,
91            coverage: None,
92            logs: Vec::new(),
93            deprecated_cheatcodes: HashMap::default(),
94            runs: 0,
95            failure: None,
96            last_run_timestamp: 0,
97            failed_corpus_replays: 0,
98        }
99    }
100}
101
102/// Shared state for coordinating parallel fuzz workers
103struct SharedFuzzState {
104    state: EvmFuzzState,
105    /// Total runs across workers
106    total_runs: Arc<AtomicU32>,
107    /// Found failure
108    ///
109    /// The worker that found the failure sets it's ID.
110    ///
111    /// This ID is then used to correctly extract the failure reason and counterexample.
112    failed_worker_id: OnceLock<usize>,
113    /// Total rejects across workers
114    total_rejects: Arc<AtomicU32>,
115    /// Fuzz timer
116    timer: FuzzTestTimer,
117    /// Global corpus metrics
118    global_corpus_metrics: GlobalCorpusMetrics,
119
120    /// Global test suite early exit.
121    global_early_exit: EarlyExit,
122    /// Local fuzz early exit.
123    local_early_exit: EarlyExit,
124}
125
126impl SharedFuzzState {
127    fn new(state: EvmFuzzState, timeout: Option<u32>, early_exit: EarlyExit) -> Self {
128        Self {
129            state,
130            total_runs: Arc::new(AtomicU32::new(0)),
131            failed_worker_id: OnceLock::new(),
132            total_rejects: Arc::new(AtomicU32::new(0)),
133            timer: FuzzTestTimer::new(timeout),
134            global_corpus_metrics: GlobalCorpusMetrics::default(),
135            global_early_exit: early_exit,
136            local_early_exit: EarlyExit::new(true),
137        }
138    }
139
140    /// Increments the number of runs and returns the new value.
141    fn increment_runs(&self) -> u32 {
142        self.total_runs.fetch_add(1, Ordering::Relaxed) + 1
143    }
144
145    /// Increments and returns the new value of the number of rejected tests.
146    fn increment_rejects(&self) -> u32 {
147        self.total_rejects.fetch_add(1, Ordering::Relaxed) + 1
148    }
149
150    /// Returns `true` if the worker should continue running.
151    fn should_continue(&self) -> bool {
152        !(self.global_early_exit.should_stop()
153            || self.local_early_exit.should_stop()
154            || self.timer.is_timed_out())
155    }
156
157    /// Returns true if the worker was able to claim the failure, false if failure was set by
158    /// another worker
159    fn try_claim_failure(&self, worker_id: usize) -> bool {
160        let mut claimed = false;
161        let _ = self.failed_worker_id.get_or_init(|| {
162            claimed = true;
163            self.local_early_exit.record_failure();
164            worker_id
165        });
166        claimed
167    }
168}
169
170/// Wrapper around an [`Executor`] which provides fuzzing support using [`proptest`].
171///
172/// After instantiation, calling `fuzz` will proceed to hammer the deployed smart contract with
173/// inputs, until it finds a counterexample. The provided [`TestRunner`] contains all the
174/// configuration which can be overridden via [environment variables](proptest::test_runner::Config)
175pub struct FuzzedExecutor<FEN: FoundryEvmNetwork> {
176    /// The EVM executor.
177    executor_f: Executor<FEN>,
178    /// The fuzzer
179    runner: TestRunner,
180    /// The account that calls tests.
181    sender: Address,
182    /// The fuzz configuration.
183    config: FuzzConfig,
184    /// The persisted counterexample to be replayed, if any.
185    persisted_failure: Option<BaseCounterExample>,
186    /// The number of parallel workers.
187    num_workers: usize,
188}
189
190impl<FEN: FoundryEvmNetwork> FuzzedExecutor<FEN> {
191    /// Instantiates a fuzzed executor given a testrunner
192    pub fn new(
193        executor: Executor<FEN>,
194        runner: TestRunner,
195        sender: Address,
196        config: FuzzConfig,
197        persisted_failure: Option<BaseCounterExample>,
198    ) -> Self {
199        let max_workers =
200            if config.runs == 0 { 0 } else { Ord::max(1, config.runs / MIN_RUNS_PER_WORKER) };
201        let num_workers = Ord::min(rayon::current_num_threads(), max_workers as usize);
202        Self { executor_f: executor, runner, sender, config, persisted_failure, num_workers }
203    }
204
205    /// Fuzzes the provided function, assuming it is available at the contract at `address`
206    /// If `should_fail` is set to `true`, then it will stop only when there's a success
207    /// test case.
208    ///
209    /// Returns a list of all the consumed gas and calldata of every fuzz case.
210    #[allow(clippy::too_many_arguments)]
211    pub fn fuzz(
212        &mut self,
213        func: &Function,
214        fuzz_fixtures: &FuzzFixtures,
215        state: EvmFuzzState,
216        address: Address,
217        rd: &RevertDecoder,
218        progress: Option<&ProgressBar>,
219        early_exit: &EarlyExit,
220        tokio_handle: &tokio::runtime::Handle,
221    ) -> Result<FuzzTestResult> {
222        let shared_state = SharedFuzzState::new(state, self.config.timeout, early_exit.clone());
223
224        debug!(n = self.num_workers, "spawning workers");
225        let workers = (0..self.num_workers)
226            .into_par_iter()
227            .map(|worker_id| {
228                let _guard = tokio_handle.enter();
229                let _guard = info_span!("fuzz_worker", id = worker_id).entered();
230                let timer = Instant::now();
231                let r = self.run_worker(
232                    worker_id,
233                    func,
234                    fuzz_fixtures,
235                    address,
236                    rd,
237                    &shared_state,
238                    progress,
239                );
240                debug!("finished in {:?}", timer.elapsed());
241                r
242            })
243            .collect::<Result<Vec<_>>>()?;
244
245        Ok(self.aggregate_results(workers, func, &shared_state))
246    }
247
248    /// Granular and single-step function that runs only one fuzz and returns either a `CaseOutcome`
249    /// or a `CounterExampleOutcome`
250    fn single_fuzz(
251        &self,
252        executor: &Executor<FEN>,
253        address: Address,
254        calldata: Bytes,
255        coverage_metrics: &mut WorkerCorpus,
256    ) -> Result<FuzzOutcome<FEN>, TestCaseError> {
257        let mut call = executor
258            .call_raw(self.sender, address, calldata.clone(), U256::ZERO)
259            .map_err(|e| TestCaseError::fail(e.to_string()))?;
260        let new_coverage = coverage_metrics.merge_edge_coverage(&mut call);
261        coverage_metrics.process_inputs(
262            &[BasicTxDetails {
263                warp: None,
264                roll: None,
265                sender: self.sender,
266                call_details: CallDetails { target: address, calldata: calldata.clone() },
267            }],
268            new_coverage,
269            None,
270        );
271
272        // Handle `vm.assume`.
273        if call.result.as_ref() == MAGIC_ASSUME {
274            return Err(TestCaseError::reject(FuzzError::AssumeReject));
275        }
276
277        let (breakpoints, deprecated_cheatcodes) =
278            call.cheatcodes.as_ref().map_or_else(Default::default, |cheats| {
279                (cheats.breakpoints.clone(), cheats.deprecated.clone())
280            });
281
282        // Consider call success if test should not fail on reverts and reverter is not the
283        // cheatcode or test address.
284        let success = if !self.config.fail_on_revert
285            && call
286                .reverter
287                .is_some_and(|reverter| reverter != address && reverter != CHEATCODE_ADDRESS)
288        {
289            true
290        } else {
291            executor.is_raw_call_mut_success(address, &mut call, false)
292        };
293
294        if success {
295            Ok(FuzzOutcome::Case(CaseOutcome {
296                case: FuzzCase { gas: call.gas_used, stipend: call.stipend },
297                traces: call.traces,
298                coverage: call.line_coverage,
299                breakpoints,
300                logs: call.logs,
301                deprecated_cheatcodes,
302            }))
303        } else {
304            Ok(FuzzOutcome::CounterExample(CounterExampleOutcome {
305                exit_reason: call.exit_reason,
306                counterexample: (calldata, call),
307                breakpoints,
308            }))
309        }
310    }
311
312    /// Aggregates the results from all workers
313    fn aggregate_results(
314        &self,
315        mut workers: Vec<WorkerState<FEN>>,
316        func: &Function,
317        shared_state: &SharedFuzzState,
318    ) -> FuzzTestResult {
319        let mut result = FuzzTestResult::default();
320        if workers.is_empty() {
321            result.success = true;
322            return result;
323        }
324
325        // Find first case and last run worker. Set `failed_corpus_replays`.
326        let mut first_case_candidate = None;
327        let mut last_run_worker = None;
328        for (i, worker) in workers.iter().enumerate() {
329            if let Some((run, ref case)) = worker.first_case
330                && first_case_candidate.as_ref().is_none_or(|&(r, _)| run < r)
331            {
332                first_case_candidate = Some((run, case.clone()));
333            }
334
335            if last_run_worker.is_none_or(|(t, _)| worker.last_run_timestamp > t) {
336                last_run_worker = Some((worker.last_run_timestamp, i));
337            }
338
339            // Only set replays from master which is responsible for replaying persisted corpus.
340            if worker.id == 0 {
341                result.failed_corpus_replays = worker.failed_corpus_replays;
342            }
343        }
344        result.first_case = first_case_candidate.map(|(_, case)| case).unwrap_or_default();
345        let (_, last_run_worker_idx) = last_run_worker.expect("at least one worker");
346
347        if let Some(&failed_worker_id) = shared_state.failed_worker_id.get() {
348            result.success = false;
349
350            let failed_worker_idx = workers.iter().position(|w| w.id == failed_worker_id).unwrap();
351            let failed_worker = &mut workers[failed_worker_idx];
352
353            let (calldata, call) = std::mem::take(&mut failed_worker.counterexample);
354            result.labels = call.labels;
355            result.traces = call.traces.clone();
356            result.breakpoints = call.cheatcodes.map(|c| c.breakpoints);
357
358            match &failed_worker.failure {
359                Some(TestCaseError::Fail(reason)) => {
360                    let reason = reason.to_string();
361                    result.reason = (!reason.is_empty()).then_some(reason);
362                    let args = if let Some(data) = calldata.get(4..) {
363                        func.abi_decode_input(data).unwrap_or_default()
364                    } else {
365                        vec![]
366                    };
367                    result.counterexample = Some(CounterExample::Single(
368                        BaseCounterExample::from_fuzz_call(calldata, args, call.traces),
369                    ));
370                }
371                Some(TestCaseError::Reject(reason)) => {
372                    let reason = reason.to_string();
373                    result.reason = (!reason.is_empty()).then_some(reason);
374                }
375                None => {}
376            }
377        } else {
378            let last_run_worker = &workers[last_run_worker_idx];
379            result.success = true;
380            result.traces = last_run_worker.traces.last().cloned();
381            result.breakpoints = last_run_worker.breakpoints.clone();
382        }
383
384        if !self.config.show_logs {
385            result.logs = workers[last_run_worker_idx].logs.clone();
386        }
387
388        for mut worker in workers {
389            result.gas_by_case.append(&mut worker.gas_by_case);
390            if self.config.show_logs {
391                result.logs.append(&mut worker.logs);
392            }
393            result.gas_report_traces.extend(worker.traces.into_iter().map(|t| t.arena));
394            HitMaps::merge_opt(&mut result.line_coverage, worker.coverage);
395            result.deprecated_cheatcodes.extend(worker.deprecated_cheatcodes);
396        }
397
398        if let Some(reason) = &result.reason
399            && let Some(reason) = SkipReason::decode_self(reason)
400        {
401            result.skipped = true;
402            result.reason = reason.0;
403        }
404
405        result
406    }
407
408    /// Runs a single fuzz worker
409    #[allow(clippy::too_many_arguments)]
410    fn run_worker(
411        &self,
412        worker_id: usize,
413        func: &Function,
414        fuzz_fixtures: &FuzzFixtures,
415        address: Address,
416        rd: &RevertDecoder,
417        shared_state: &SharedFuzzState,
418        progress: Option<&ProgressBar>,
419    ) -> Result<WorkerState<FEN>> {
420        // Prepare
421        let dictionary_weight = self.config.dictionary.dictionary_weight.min(100);
422        let strategy = proptest::prop_oneof![
423            100 - dictionary_weight => fuzz_calldata(func.clone(), fuzz_fixtures),
424            dictionary_weight => fuzz_calldata_from_state(func.clone(), &shared_state.state),
425        ]
426        .prop_map(move |calldata| BasicTxDetails {
427            warp: None,
428            roll: None,
429            sender: Default::default(),
430            call_details: CallDetails { target: Default::default(), calldata },
431        });
432
433        let mut corpus = WorkerCorpus::new(
434            worker_id,
435            self.config.corpus.clone(),
436            strategy.boxed(),
437            // Master worker replays the persisted corpus using the executor
438            (worker_id == 0).then_some(&self.executor_f),
439            Some(func),
440            None, // fuzzed_contracts for invariant tests
441        )?;
442        let mut executor = self.executor_f.clone();
443
444        let mut worker = WorkerState::new(worker_id);
445        // We want to collect at least one trace which will be displayed to user.
446        let max_traces_to_collect =
447            std::cmp::max(1, self.config.gas_report_samples / self.num_workers as u32);
448
449        let worker_runs = self.runs_per_worker(worker_id);
450        debug!(worker_runs);
451
452        let mut runner_config = self.runner.config().clone();
453        runner_config.cases = worker_runs;
454
455        let mut runner = if let Some(seed) = self.config.seed {
456            // For deterministic parallel fuzzing, derive a unique seed for each worker
457            let worker_seed = if worker_id == 0 {
458                // Master worker uses the provided seed as is.
459                seed
460            } else {
461                // Derive a worker-specific seed using keccak256(seed || worker_id)
462                let seed_data =
463                    [&seed.to_be_bytes::<32>()[..], &worker_id.to_be_bytes()[..]].concat();
464                U256::from_be_bytes(keccak256(seed_data).0)
465            };
466            trace!(target: "forge::test", ?worker_seed, "deterministic seed for worker {worker_id}");
467            let rng = TestRng::from_seed(RngAlgorithm::ChaCha, &worker_seed.to_be_bytes::<32>());
468            TestRunner::new_with_rng(runner_config, rng)
469        } else {
470            TestRunner::new(runner_config)
471        };
472
473        let mut persisted_failure = self.persisted_failure.as_ref().filter(|_| worker_id == 0);
474
475        // Offset to stagger corpus syncs across workers; so that workers don't sync at the same
476        // time.
477        let sync_offset = worker_id as u32 * 100;
478        let sync_threshold = SYNC_INTERVAL + sync_offset;
479        let mut runs_since_sync = sync_threshold; // Always sync at the start.
480        let mut last_metrics_report = Instant::now();
481        // Continue while:
482        // 1. Global state allows (not timed out, not at global limit, no failure found)
483        // 2. Worker hasn't reached its specific run limit
484        'stop: while shared_state.should_continue() && worker.runs < worker_runs {
485            // If counterexample recorded, replay it first, without incrementing runs.
486            let input = if worker_id == 0
487                && let Some(failure) = persisted_failure.take()
488                && failure.calldata.get(..4).is_some_and(|selector| func.selector() == selector)
489            {
490                failure.calldata.clone()
491            } else {
492                runs_since_sync += 1;
493                if runs_since_sync >= sync_threshold {
494                    let timer = Instant::now();
495                    corpus.sync(
496                        self.num_workers,
497                        &executor,
498                        Some(func),
499                        None,
500                        &shared_state.global_corpus_metrics,
501                    )?;
502                    trace!("finished corpus sync in {:?}", timer.elapsed());
503                    runs_since_sync = 0;
504                }
505
506                if let Some(cheats) = executor.inspector_mut().cheatcodes.as_mut()
507                    && let Some(seed) = self.config.seed
508                {
509                    cheats.set_seed(seed.wrapping_add(U256::from(worker.runs)));
510                }
511
512                match corpus.new_input(&mut runner, &shared_state.state, func) {
513                    Ok(input) => input,
514                    Err(err) => {
515                        worker.failure = Some(TestCaseError::fail(format!(
516                            "failed to generate fuzzed input in worker {}: {err}",
517                            worker.id
518                        )));
519                        shared_state.try_claim_failure(worker_id);
520                        break 'stop;
521                    }
522                }
523            };
524
525            let mut inc_runs = || {
526                let total_runs = shared_state.increment_runs();
527                debug_assert!(
528                    shared_state.timer.is_enabled() || total_runs <= self.config.runs,
529                    "worker runs were not distributed correctly"
530                );
531                worker.runs += 1;
532                if let Some(progress) = progress {
533                    progress.inc(1);
534                }
535                total_runs
536            };
537
538            worker.last_run_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis();
539            match self.single_fuzz(&executor, address, input, &mut corpus) {
540                Ok(fuzz_outcome) => match fuzz_outcome {
541                    FuzzOutcome::Case(case) => {
542                        let total_runs = inc_runs();
543
544                        if worker_id == 0 && self.config.corpus.collect_edge_coverage() {
545                            if let Some(progress) = progress {
546                                corpus.sync_metrics(&shared_state.global_corpus_metrics);
547                                progress
548                                    .set_message(format!("{}", shared_state.global_corpus_metrics));
549                            } else if last_metrics_report.elapsed()
550                                > DURATION_BETWEEN_METRICS_REPORT
551                            {
552                                corpus.sync_metrics(&shared_state.global_corpus_metrics);
553                                // Display metrics inline.
554                                let metrics = json!({
555                                    "timestamp": SystemTime::now()
556                                        .duration_since(UNIX_EPOCH)?
557                                        .as_secs(),
558                                    "test": func.name,
559                                    "metrics": shared_state.global_corpus_metrics.load(),
560                                });
561                                let _ = sh_println!("{metrics}");
562                                last_metrics_report = Instant::now();
563                            }
564                        }
565
566                        worker.gas_by_case.push((case.case.gas, case.case.stipend));
567
568                        if worker.first_case.is_none() {
569                            worker.first_case = Some((total_runs, case.case));
570                        }
571
572                        if let Some(call_traces) = case.traces {
573                            if worker.traces.len() == max_traces_to_collect as usize {
574                                worker.traces.pop();
575                            }
576                            worker.traces.push(call_traces);
577                            worker.breakpoints = Some(case.breakpoints);
578                        }
579
580                        // Always store logs from the last run in test_data.logs for display at
581                        // verbosity >= 2. When show_logs is true,
582                        // accumulate all logs. When false, only keep the last run's logs.
583                        if self.config.show_logs {
584                            worker.logs.extend(case.logs);
585                        } else {
586                            worker.logs = case.logs;
587                        }
588
589                        HitMaps::merge_opt(&mut worker.coverage, case.coverage);
590                        worker.deprecated_cheatcodes = case.deprecated_cheatcodes;
591                    }
592                    FuzzOutcome::CounterExample(CounterExampleOutcome {
593                        exit_reason: status,
594                        counterexample: outcome,
595                        ..
596                    }) => {
597                        inc_runs();
598
599                        // Only classify magic skip payloads when the revert originates from the
600                        // cheatcode address.
601                        let reason = if outcome.1.reverter == Some(CHEATCODE_ADDRESS) {
602                            SkipReason::decode(&outcome.1.result)
603                                .map(|reason| reason.to_string())
604                                .or_else(|| rd.maybe_decode(&outcome.1.result, status))
605                        } else {
606                            rd.maybe_decode(&outcome.1.result, status)
607                        };
608                        worker.logs.extend(outcome.1.logs.clone());
609                        worker.counterexample = outcome;
610                        worker.failure = Some(TestCaseError::fail(reason.unwrap_or_default()));
611                        shared_state.try_claim_failure(worker_id);
612                        break 'stop;
613                    }
614                },
615                Err(err) => match err {
616                    TestCaseError::Fail(_) => {
617                        worker.failure = Some(err);
618                        shared_state.try_claim_failure(worker_id);
619                        break 'stop;
620                    }
621                    TestCaseError::Reject(_) => {
622                        let max = self.config.max_test_rejects;
623
624                        let total = shared_state.increment_rejects();
625
626                        // Update progress bar to reflect rejected runs.
627                        // TODO(dani): (pre-existing) conflicts with corpus metrics `set_message`
628                        if !self.config.corpus.collect_edge_coverage()
629                            && let Some(progress) = progress
630                        {
631                            progress.set_message(format!("([{total}] rejected)"));
632                        }
633
634                        if max > 0 && total > max {
635                            worker.failure =
636                                Some(TestCaseError::reject(FuzzError::TooManyRejects(max)));
637                            shared_state.try_claim_failure(worker_id);
638                            break 'stop;
639                        }
640                    }
641                },
642            }
643        }
644
645        if worker_id == 0 {
646            worker.failed_corpus_replays = corpus.failed_replays;
647        }
648
649        // Logs stats
650        trace!("worker {worker_id} fuzz stats");
651        shared_state.state.log_stats();
652
653        Ok(worker)
654    }
655
656    /// Determines the number of runs per worker.
657    const fn runs_per_worker(&self, worker_id: usize) -> u32 {
658        let worker_id = worker_id as u32;
659        let total_runs = self.config.runs;
660        let n = self.num_workers as u32;
661        let runs = total_runs / n;
662        let remainder = total_runs % n;
663        // Distribute the remainder evenly among the first `remainder` workers,
664        // assuming `worker_id` is in `0..n`.
665        if worker_id < remainder { runs + 1 } else { runs }
666    }
667}