Skip to main content

foundry_evm/executors/fuzz/
mod.rs

1use crate::executors::{
2    DURATION_BETWEEN_METRICS_REPORT, EarlyExit, Executor, FuzzTestTimer, RawCallResult,
3    corpus::{GlobalCorpusMetrics, WorkerCorpus},
4};
5use alloy_dyn_abi::JsonAbiExt;
6use alloy_json_abi::Function;
7use alloy_primitives::{Address, Bytes, Log, U256, keccak256, map::HashMap};
8use eyre::Result;
9use foundry_common::sh_println;
10use foundry_config::FuzzConfig;
11use foundry_evm_core::{
12    Breakpoints,
13    constants::{CHEATCODE_ADDRESS, MAGIC_ASSUME},
14    decode::{RevertDecoder, SkipReason},
15    evm::FoundryEvmNetwork,
16};
17use foundry_evm_coverage::HitMaps;
18use foundry_evm_fuzz::{
19    BaseCounterExample, BasicTxDetails, CallDetails, CounterExample, FuzzCase, FuzzError,
20    FuzzFixtures, FuzzTestResult,
21    strategies::{EvmFuzzState, fuzz_calldata, fuzz_calldata_from_state},
22};
23use foundry_evm_traces::SparsedTraceArena;
24use indicatif::ProgressBar;
25use proptest::{
26    strategy::Strategy,
27    test_runner::{RngAlgorithm, TestCaseError, TestRng, TestRunner},
28};
29use rayon::iter::{IntoParallelIterator, ParallelIterator};
30use serde_json::json;
31use std::{
32    sync::{
33        Arc, OnceLock,
34        atomic::{AtomicU32, Ordering},
35    },
36    time::{Instant, SystemTime, UNIX_EPOCH},
37};
38
39mod types;
40pub use types::{CaseOutcome, CounterExampleOutcome, FuzzOutcome};
41
42/// Corpus syncs across workers every `SYNC_INTERVAL` runs.
43const SYNC_INTERVAL: u32 = 1000;
44
45/// Minimum number of runs per worker.
46/// This is mainly to reduce the overall number of rayon jobs.
47const MIN_RUNS_PER_WORKER: u32 = 64;
48
49struct WorkerState<FEN: FoundryEvmNetwork> {
50    /// Worker identifier
51    id: usize,
52    /// First fuzz case this worker encountered (with global run number)
53    first_case: Option<(u32, FuzzCase)>,
54    /// Gas usage for all cases this worker ran
55    gas_by_case: Vec<(u64, u64)>,
56    /// Counterexample if this worker found one
57    counterexample: (Bytes, RawCallResult<FEN>),
58    /// Traces collected by this worker
59    ///
60    /// Stores up to `max_traces_to_collect` which is `config.gas_report_samples / num_workers`
61    traces: Vec<SparsedTraceArena>,
62    /// Last breakpoints from this worker
63    breakpoints: Option<Breakpoints>,
64    /// Coverage collected by this worker
65    coverage: Option<HitMaps>,
66    /// Logs from all cases this worker ran
67    logs: Vec<Log>,
68    /// Deprecated cheatcodes seen by this worker
69    deprecated_cheatcodes: HashMap<&'static str, Option<&'static str>>,
70    /// Number of runs this worker completed
71    runs: u32,
72    /// Failure reason if this worker failed
73    failure: Option<TestCaseError>,
74    /// Last run timestamp in milliseconds
75    ///
76    /// Used to identify which worker ran last and collect its traces and call breakpoints
77    last_run_timestamp: u128,
78    /// Failed corpus replays
79    failed_corpus_replays: usize,
80}
81
82impl<FEN: FoundryEvmNetwork> WorkerState<FEN> {
83    fn new(worker_id: usize) -> Self {
84        Self {
85            id: worker_id,
86            first_case: None,
87            gas_by_case: Vec::new(),
88            counterexample: (Bytes::new(), RawCallResult::default()),
89            traces: Vec::new(),
90            breakpoints: None,
91            coverage: None,
92            logs: Vec::new(),
93            deprecated_cheatcodes: HashMap::default(),
94            runs: 0,
95            failure: None,
96            last_run_timestamp: 0,
97            failed_corpus_replays: 0,
98        }
99    }
100}
101
102/// Shared state for coordinating parallel fuzz workers
103struct SharedFuzzState {
104    state: EvmFuzzState,
105    /// Total runs across workers
106    total_runs: Arc<AtomicU32>,
107    /// Found failure
108    ///
109    /// The worker that found the failure sets it's ID.
110    ///
111    /// This ID is then used to correctly extract the failure reason and counterexample.
112    failed_worker_id: OnceLock<usize>,
113    /// Total rejects across workers
114    total_rejects: Arc<AtomicU32>,
115    /// Fuzz timer
116    timer: FuzzTestTimer,
117    /// Global corpus metrics
118    global_corpus_metrics: GlobalCorpusMetrics,
119
120    /// Global test suite early exit.
121    global_early_exit: EarlyExit,
122    /// Local fuzz early exit.
123    local_early_exit: EarlyExit,
124}
125
126impl SharedFuzzState {
127    fn new(state: EvmFuzzState, timeout: Option<u32>, early_exit: EarlyExit) -> Self {
128        Self {
129            state,
130            total_runs: Arc::new(AtomicU32::new(0)),
131            failed_worker_id: OnceLock::new(),
132            total_rejects: Arc::new(AtomicU32::new(0)),
133            timer: FuzzTestTimer::new(timeout),
134            global_corpus_metrics: GlobalCorpusMetrics::default(),
135            global_early_exit: early_exit,
136            local_early_exit: EarlyExit::new(true),
137        }
138    }
139
140    /// Increments the number of runs and returns the new value.
141    fn increment_runs(&self) -> u32 {
142        self.total_runs.fetch_add(1, Ordering::Relaxed) + 1
143    }
144
145    /// Increments and returns the new value of the number of rejected tests.
146    fn increment_rejects(&self) -> u32 {
147        self.total_rejects.fetch_add(1, Ordering::Relaxed) + 1
148    }
149
150    /// Returns `true` if the worker should continue running.
151    fn should_continue(&self) -> bool {
152        !(self.global_early_exit.should_stop()
153            || self.local_early_exit.should_stop()
154            || self.timer.is_timed_out())
155    }
156
157    /// Returns true if the worker was able to claim the failure, false if failure was set by
158    /// another worker
159    fn try_claim_failure(&self, worker_id: usize) -> bool {
160        let mut claimed = false;
161        let _ = self.failed_worker_id.get_or_init(|| {
162            claimed = true;
163            self.local_early_exit.record_failure();
164            worker_id
165        });
166        claimed
167    }
168}
169
170/// Wrapper around an [`Executor`] which provides fuzzing support using [`proptest`].
171///
172/// After instantiation, calling `fuzz` will proceed to hammer the deployed smart contract with
173/// inputs, until it finds a counterexample. The provided [`TestRunner`] contains all the
174/// configuration which can be overridden via [environment variables](proptest::test_runner::Config)
175pub struct FuzzedExecutor<FEN: FoundryEvmNetwork> {
176    /// The EVM executor.
177    executor_f: Executor<FEN>,
178    /// The fuzzer
179    runner: TestRunner,
180    /// The account that calls tests.
181    sender: Address,
182    /// The fuzz configuration.
183    config: FuzzConfig,
184    /// The persisted counterexample to be replayed, if any.
185    persisted_failure: Option<BaseCounterExample>,
186    /// The number of parallel workers.
187    num_workers: usize,
188}
189
190impl<FEN: FoundryEvmNetwork> FuzzedExecutor<FEN> {
191    /// Instantiates a fuzzed executor given a testrunner
192    pub fn new(
193        executor: Executor<FEN>,
194        runner: TestRunner,
195        sender: Address,
196        config: FuzzConfig,
197        persisted_failure: Option<BaseCounterExample>,
198    ) -> Self {
199        let mut max_workers = Ord::max(1, config.runs / MIN_RUNS_PER_WORKER);
200        if config.runs == 0 {
201            max_workers = 0;
202        }
203        let num_workers = Ord::min(rayon::current_num_threads(), max_workers as usize);
204        Self { executor_f: executor, runner, sender, config, persisted_failure, num_workers }
205    }
206
207    /// Fuzzes the provided function, assuming it is available at the contract at `address`
208    /// If `should_fail` is set to `true`, then it will stop only when there's a success
209    /// test case.
210    ///
211    /// Returns a list of all the consumed gas and calldata of every fuzz case.
212    #[allow(clippy::too_many_arguments)]
213    pub fn fuzz(
214        &mut self,
215        func: &Function,
216        fuzz_fixtures: &FuzzFixtures,
217        state: EvmFuzzState,
218        address: Address,
219        rd: &RevertDecoder,
220        progress: Option<&ProgressBar>,
221        early_exit: &EarlyExit,
222        tokio_handle: &tokio::runtime::Handle,
223    ) -> Result<FuzzTestResult> {
224        let shared_state = SharedFuzzState::new(state, self.config.timeout, early_exit.clone());
225
226        debug!(n = self.num_workers, "spawning workers");
227        let workers = (0..self.num_workers)
228            .into_par_iter()
229            .map(|worker_id| {
230                let _guard = tokio_handle.enter();
231                let _guard = info_span!("fuzz_worker", id = worker_id).entered();
232                let timer = Instant::now();
233                let r = self.run_worker(
234                    worker_id,
235                    func,
236                    fuzz_fixtures,
237                    address,
238                    rd,
239                    &shared_state,
240                    progress,
241                );
242                debug!("finished in {:?}", timer.elapsed());
243                r
244            })
245            .collect::<Result<Vec<_>>>()?;
246
247        Ok(self.aggregate_results(workers, func, &shared_state))
248    }
249
250    /// Granular and single-step function that runs only one fuzz and returns either a `CaseOutcome`
251    /// or a `CounterExampleOutcome`
252    fn single_fuzz(
253        &self,
254        executor: &Executor<FEN>,
255        address: Address,
256        calldata: Bytes,
257        coverage_metrics: &mut WorkerCorpus,
258    ) -> Result<FuzzOutcome<FEN>, TestCaseError> {
259        let mut call = executor
260            .call_raw(self.sender, address, calldata.clone(), U256::ZERO)
261            .map_err(|e| TestCaseError::fail(e.to_string()))?;
262        let new_coverage = coverage_metrics.merge_edge_coverage(&mut call);
263        coverage_metrics.process_inputs(
264            &[BasicTxDetails {
265                warp: None,
266                roll: None,
267                sender: self.sender,
268                call_details: CallDetails { target: address, calldata: calldata.clone() },
269            }],
270            new_coverage,
271            None,
272        );
273
274        // Handle `vm.assume`.
275        if call.result.as_ref() == MAGIC_ASSUME {
276            return Err(TestCaseError::reject(FuzzError::AssumeReject));
277        }
278
279        let (breakpoints, deprecated_cheatcodes) =
280            call.cheatcodes.as_ref().map_or_else(Default::default, |cheats| {
281                (cheats.breakpoints.clone(), cheats.deprecated.clone())
282            });
283
284        // Consider call success if test should not fail on reverts and reverter is not the
285        // cheatcode or test address.
286        let success = if !self.config.fail_on_revert
287            && call
288                .reverter
289                .is_some_and(|reverter| reverter != address && reverter != CHEATCODE_ADDRESS)
290        {
291            true
292        } else {
293            executor.is_raw_call_mut_success(address, &mut call, false)
294        };
295
296        if success {
297            Ok(FuzzOutcome::Case(CaseOutcome {
298                case: FuzzCase { gas: call.gas_used, stipend: call.stipend },
299                traces: call.traces,
300                coverage: call.line_coverage,
301                breakpoints,
302                logs: call.logs,
303                deprecated_cheatcodes,
304            }))
305        } else {
306            Ok(FuzzOutcome::CounterExample(CounterExampleOutcome {
307                exit_reason: call.exit_reason,
308                counterexample: (calldata, call),
309                breakpoints,
310            }))
311        }
312    }
313
314    /// Aggregates the results from all workers
315    fn aggregate_results(
316        &self,
317        mut workers: Vec<WorkerState<FEN>>,
318        func: &Function,
319        shared_state: &SharedFuzzState,
320    ) -> FuzzTestResult {
321        let mut result = FuzzTestResult::default();
322        if workers.is_empty() {
323            result.success = true;
324            return result;
325        }
326
327        // Find first case and last run worker. Set `failed_corpus_replays`.
328        let mut first_case_candidate = None;
329        let mut last_run_worker = None;
330        for (i, worker) in workers.iter().enumerate() {
331            if let Some((run, ref case)) = worker.first_case
332                && first_case_candidate.as_ref().is_none_or(|&(r, _)| run < r)
333            {
334                first_case_candidate = Some((run, case.clone()));
335            }
336
337            if last_run_worker.is_none_or(|(t, _)| worker.last_run_timestamp > t) {
338                last_run_worker = Some((worker.last_run_timestamp, i));
339            }
340
341            // Only set replays from master which is responsible for replaying persisted corpus.
342            if worker.id == 0 {
343                result.failed_corpus_replays = worker.failed_corpus_replays;
344            }
345        }
346        result.first_case = first_case_candidate.map(|(_, case)| case).unwrap_or_default();
347        let (_, last_run_worker_idx) = last_run_worker.expect("at least one worker");
348
349        if let Some(&failed_worker_id) = shared_state.failed_worker_id.get() {
350            result.success = false;
351
352            let failed_worker_idx = workers.iter().position(|w| w.id == failed_worker_id).unwrap();
353            let failed_worker = &mut workers[failed_worker_idx];
354
355            let (calldata, call) = std::mem::take(&mut failed_worker.counterexample);
356            result.labels = call.labels;
357            result.traces = call.traces.clone();
358            result.breakpoints = call.cheatcodes.map(|c| c.breakpoints);
359
360            match &failed_worker.failure {
361                Some(TestCaseError::Fail(reason)) => {
362                    let reason = reason.to_string();
363                    result.reason = (!reason.is_empty()).then_some(reason);
364                    let args = if let Some(data) = calldata.get(4..) {
365                        func.abi_decode_input(data).unwrap_or_default()
366                    } else {
367                        vec![]
368                    };
369                    result.counterexample = Some(CounterExample::Single(
370                        BaseCounterExample::from_fuzz_call(calldata, args, call.traces),
371                    ));
372                }
373                Some(TestCaseError::Reject(reason)) => {
374                    let reason = reason.to_string();
375                    result.reason = (!reason.is_empty()).then_some(reason);
376                }
377                None => {}
378            }
379        } else {
380            let last_run_worker = &workers[last_run_worker_idx];
381            result.success = true;
382            result.traces = last_run_worker.traces.last().cloned();
383            result.breakpoints = last_run_worker.breakpoints.clone();
384        }
385
386        if !self.config.show_logs {
387            result.logs = workers[last_run_worker_idx].logs.clone();
388        }
389
390        for mut worker in workers {
391            result.gas_by_case.append(&mut worker.gas_by_case);
392            if self.config.show_logs {
393                result.logs.append(&mut worker.logs);
394            }
395            result.gas_report_traces.extend(worker.traces.into_iter().map(|t| t.arena));
396            HitMaps::merge_opt(&mut result.line_coverage, worker.coverage);
397            result.deprecated_cheatcodes.extend(worker.deprecated_cheatcodes);
398        }
399
400        if let Some(reason) = &result.reason
401            && let Some(reason) = SkipReason::decode_self(reason)
402        {
403            result.skipped = true;
404            result.reason = reason.0;
405        }
406
407        result
408    }
409
410    /// Runs a single fuzz worker
411    #[allow(clippy::too_many_arguments)]
412    fn run_worker(
413        &self,
414        worker_id: usize,
415        func: &Function,
416        fuzz_fixtures: &FuzzFixtures,
417        address: Address,
418        rd: &RevertDecoder,
419        shared_state: &SharedFuzzState,
420        progress: Option<&ProgressBar>,
421    ) -> Result<WorkerState<FEN>> {
422        // Prepare
423        let dictionary_weight = self.config.dictionary.dictionary_weight.min(100);
424        let strategy = proptest::prop_oneof![
425            100 - dictionary_weight => fuzz_calldata(func.clone(), fuzz_fixtures),
426            dictionary_weight => fuzz_calldata_from_state(func.clone(), &shared_state.state),
427        ]
428        .prop_map(move |calldata| BasicTxDetails {
429            warp: None,
430            roll: None,
431            sender: Default::default(),
432            call_details: CallDetails { target: Default::default(), calldata },
433        });
434
435        let mut corpus = WorkerCorpus::new(
436            worker_id,
437            self.config.corpus.clone(),
438            strategy.boxed(),
439            // Master worker replays the persisted corpus using the executor
440            (worker_id == 0).then_some(&self.executor_f),
441            Some(func),
442            None, // fuzzed_contracts for invariant tests
443        )?;
444        let mut executor = self.executor_f.clone();
445
446        let mut worker = WorkerState::new(worker_id);
447        // We want to collect at least one trace which will be displayed to user.
448        let max_traces_to_collect =
449            std::cmp::max(1, self.config.gas_report_samples / self.num_workers as u32);
450
451        let worker_runs = self.runs_per_worker(worker_id);
452        debug!(worker_runs);
453
454        let mut runner_config = self.runner.config().clone();
455        runner_config.cases = worker_runs;
456
457        let mut runner = if let Some(seed) = self.config.seed {
458            // For deterministic parallel fuzzing, derive a unique seed for each worker
459            let worker_seed = if worker_id == 0 {
460                // Master worker uses the provided seed as is.
461                seed
462            } else {
463                // Derive a worker-specific seed using keccak256(seed || worker_id)
464                let seed_data =
465                    [&seed.to_be_bytes::<32>()[..], &worker_id.to_be_bytes()[..]].concat();
466                U256::from_be_bytes(keccak256(seed_data).0)
467            };
468            trace!(target: "forge::test", ?worker_seed, "deterministic seed for worker {worker_id}");
469            let rng = TestRng::from_seed(RngAlgorithm::ChaCha, &worker_seed.to_be_bytes::<32>());
470            TestRunner::new_with_rng(runner_config, rng)
471        } else {
472            TestRunner::new(runner_config)
473        };
474
475        let mut persisted_failure = self.persisted_failure.as_ref().filter(|_| worker_id == 0);
476
477        // Offset to stagger corpus syncs across workers; so that workers don't sync at the same
478        // time.
479        let sync_offset = worker_id as u32 * 100;
480        let sync_threshold = SYNC_INTERVAL + sync_offset;
481        let mut runs_since_sync = sync_threshold; // Always sync at the start.
482        let mut last_metrics_report = Instant::now();
483        // Continue while:
484        // 1. Global state allows (not timed out, not at global limit, no failure found)
485        // 2. Worker hasn't reached its specific run limit
486        'stop: while shared_state.should_continue() && worker.runs < worker_runs {
487            // If counterexample recorded, replay it first, without incrementing runs.
488            let input = if worker_id == 0
489                && let Some(failure) = persisted_failure.take()
490                && failure.calldata.get(..4).is_some_and(|selector| func.selector() == selector)
491            {
492                failure.calldata.clone()
493            } else {
494                runs_since_sync += 1;
495                if runs_since_sync >= sync_threshold {
496                    let timer = Instant::now();
497                    corpus.sync(
498                        self.num_workers,
499                        &executor,
500                        Some(func),
501                        None,
502                        &shared_state.global_corpus_metrics,
503                    )?;
504                    trace!("finished corpus sync in {:?}", timer.elapsed());
505                    runs_since_sync = 0;
506                }
507
508                if let Some(cheats) = executor.inspector_mut().cheatcodes.as_mut()
509                    && let Some(seed) = self.config.seed
510                {
511                    cheats.set_seed(seed.wrapping_add(U256::from(worker.runs)));
512                }
513
514                match corpus.new_input(&mut runner, &shared_state.state, func) {
515                    Ok(input) => input,
516                    Err(err) => {
517                        worker.failure = Some(TestCaseError::fail(format!(
518                            "failed to generate fuzzed input in worker {}: {err}",
519                            worker.id
520                        )));
521                        shared_state.try_claim_failure(worker_id);
522                        break 'stop;
523                    }
524                }
525            };
526
527            let mut inc_runs = || {
528                let total_runs = shared_state.increment_runs();
529                debug_assert!(
530                    shared_state.timer.is_enabled() || total_runs <= self.config.runs,
531                    "worker runs were not distributed correctly"
532                );
533                worker.runs += 1;
534                if let Some(progress) = progress {
535                    progress.inc(1);
536                }
537                total_runs
538            };
539
540            worker.last_run_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis();
541            match self.single_fuzz(&executor, address, input, &mut corpus) {
542                Ok(fuzz_outcome) => match fuzz_outcome {
543                    FuzzOutcome::Case(case) => {
544                        let total_runs = inc_runs();
545
546                        if worker_id == 0 && self.config.corpus.collect_edge_coverage() {
547                            if let Some(progress) = progress {
548                                corpus.sync_metrics(&shared_state.global_corpus_metrics);
549                                progress
550                                    .set_message(format!("{}", shared_state.global_corpus_metrics));
551                            } else if last_metrics_report.elapsed()
552                                > DURATION_BETWEEN_METRICS_REPORT
553                            {
554                                corpus.sync_metrics(&shared_state.global_corpus_metrics);
555                                // Display metrics inline.
556                                let metrics = json!({
557                                    "timestamp": SystemTime::now()
558                                        .duration_since(UNIX_EPOCH)?
559                                        .as_secs(),
560                                    "test": func.name,
561                                    "metrics": shared_state.global_corpus_metrics.load(),
562                                });
563                                let _ = sh_println!("{metrics}");
564                                last_metrics_report = Instant::now();
565                            }
566                        }
567
568                        worker.gas_by_case.push((case.case.gas, case.case.stipend));
569
570                        if worker.first_case.is_none() {
571                            worker.first_case = Some((total_runs, case.case));
572                        }
573
574                        if let Some(call_traces) = case.traces {
575                            if worker.traces.len() == max_traces_to_collect as usize {
576                                worker.traces.pop();
577                            }
578                            worker.traces.push(call_traces);
579                            worker.breakpoints = Some(case.breakpoints);
580                        }
581
582                        // Always store logs from the last run in test_data.logs for display at
583                        // verbosity >= 2. When show_logs is true,
584                        // accumulate all logs. When false, only keep the last run's logs.
585                        if self.config.show_logs {
586                            worker.logs.extend(case.logs);
587                        } else {
588                            worker.logs = case.logs;
589                        }
590
591                        HitMaps::merge_opt(&mut worker.coverage, case.coverage);
592                        worker.deprecated_cheatcodes = case.deprecated_cheatcodes;
593                    }
594                    FuzzOutcome::CounterExample(CounterExampleOutcome {
595                        exit_reason: status,
596                        counterexample: outcome,
597                        ..
598                    }) => {
599                        inc_runs();
600
601                        let reason = rd.maybe_decode(&outcome.1.result, status);
602                        worker.logs.extend(outcome.1.logs.clone());
603                        worker.counterexample = outcome;
604                        worker.failure = Some(TestCaseError::fail(reason.unwrap_or_default()));
605                        shared_state.try_claim_failure(worker_id);
606                        break 'stop;
607                    }
608                },
609                Err(err) => match err {
610                    TestCaseError::Fail(_) => {
611                        worker.failure = Some(err);
612                        shared_state.try_claim_failure(worker_id);
613                        break 'stop;
614                    }
615                    TestCaseError::Reject(_) => {
616                        let max = self.config.max_test_rejects;
617
618                        let total = shared_state.increment_rejects();
619
620                        // Update progress bar to reflect rejected runs.
621                        // TODO(dani): (pre-existing) conflicts with corpus metrics `set_message`
622                        if !self.config.corpus.collect_edge_coverage()
623                            && let Some(progress) = progress
624                        {
625                            progress.set_message(format!("([{total}] rejected)"));
626                        }
627
628                        if max > 0 && total > max {
629                            worker.failure =
630                                Some(TestCaseError::reject(FuzzError::TooManyRejects(max)));
631                            shared_state.try_claim_failure(worker_id);
632                            break 'stop;
633                        }
634                    }
635                },
636            }
637        }
638
639        if worker_id == 0 {
640            worker.failed_corpus_replays = corpus.failed_replays;
641        }
642
643        // Logs stats
644        trace!("worker {worker_id} fuzz stats");
645        shared_state.state.log_stats();
646
647        Ok(worker)
648    }
649
650    /// Determines the number of runs per worker.
651    fn runs_per_worker(&self, worker_id: usize) -> u32 {
652        let worker_id = worker_id as u32;
653        let total_runs = self.config.runs;
654        let n = self.num_workers as u32;
655        let runs = total_runs / n;
656        let remainder = total_runs % n;
657        // Distribute the remainder evenly among the first `remainder` workers,
658        // assuming `worker_id` is in `0..n`.
659        if worker_id < remainder { runs + 1 } else { runs }
660    }
661}