Skip to main content

foundry_evm_sancov/
lib.rs

1//! SanitizerCoverage callbacks for coverage-guided fuzzing of native Rust code.
2//!
3//! Provides LLVM SanitizerCoverage callbacks and a coverage map that can be set
4//! by the fuzzing executor to collect edge coverage from instrumented Rust
5//! crates (e.g. precompile implementations compiled with `-Cpasses=sancov-module`).
6//!
7//! Additionally provides trace-cmp callbacks that capture comparison operands
8//! and surface them to the fuzzer's dictionary, enabling it to solve comparison
9//! guards (balance checks, overflow guards, etc.).
10//!
11//! Only crates compiled with sancov instrumentation (via a `RUSTC_WRAPPER`)
12//! will trigger these callbacks — no runtime filtering needed.
13
14use std::sync::atomic::{AtomicPtr, AtomicU32, AtomicUsize, Ordering};
15
16static COVERAGE_MAP_PTR: AtomicPtr<u8> = AtomicPtr::new(std::ptr::null_mut());
17static COVERAGE_MAP_LEN: AtomicUsize = AtomicUsize::new(0);
18
19/// Point the coverage map at the given buffer. Subsequent `__sanitizer_cov_trace_pc_guard`
20/// calls will record hits into this buffer.
21pub fn set_coverage_map(ptr: *mut u8, len: usize) {
22    COVERAGE_MAP_PTR.store(ptr, Ordering::Release);
23    COVERAGE_MAP_LEN.store(len, Ordering::Release);
24}
25
26/// Deactivate the coverage map.
27pub fn clear_coverage_map() {
28    COVERAGE_MAP_PTR.store(std::ptr::null_mut(), Ordering::Release);
29    COVERAGE_MAP_LEN.store(0, Ordering::Release);
30}
31
32/// Whether a coverage map is currently active.
33pub fn is_active() -> bool {
34    !COVERAGE_MAP_PTR.load(Ordering::Relaxed).is_null()
35}
36
37static NEXT_SANCOV_IDX: AtomicUsize = AtomicUsize::new(0);
38
39static GUARD_LOOKUP: std::sync::RwLock<Vec<usize>> = std::sync::RwLock::new(Vec::new());
40
41const UNASSIGNED: usize = usize::MAX;
42
43/// Record a hit for the given guard ID into the active coverage map.
44#[inline(always)]
45pub fn record_hit(guard_id: u32) {
46    let ptr = COVERAGE_MAP_PTR.load(Ordering::Relaxed);
47    if ptr.is_null() {
48        return;
49    }
50    let len = COVERAGE_MAP_LEN.load(Ordering::Relaxed);
51    if len == 0 {
52        return;
53    }
54
55    let gid = guard_id as usize;
56
57    // Fast path: read lock, check if already assigned.
58    let idx = {
59        let lookup = GUARD_LOOKUP.read().unwrap();
60        (gid < lookup.len() && lookup[gid] != UNASSIGNED).then(|| lookup[gid])
61    };
62
63    let idx = idx.unwrap_or_else(|| {
64        // Slow path: write lock, assign new index (double-check after acquiring).
65        let mut lookup = GUARD_LOOKUP.write().unwrap();
66        if gid >= lookup.len() {
67            lookup.resize(gid + 1, UNASSIGNED);
68        }
69        if lookup[gid] == UNASSIGNED {
70            lookup[gid] = NEXT_SANCOV_IDX.fetch_add(1, Ordering::Relaxed);
71        }
72        lookup[gid]
73    });
74
75    if idx >= len {
76        return;
77    }
78    unsafe {
79        let slot = ptr.add(idx);
80        *slot = (*slot).wrapping_add(1);
81    }
82}
83
84/// Number of unique sancov edges discovered so far.
85pub fn sancov_edge_count() -> usize {
86    NEXT_SANCOV_IDX.load(Ordering::Relaxed)
87}
88
89static GUARD_COUNTER: AtomicU32 = AtomicU32::new(1);
90
91/// # Safety
92///
93/// Called by the LLVM SanitizerCoverage runtime at startup. `[start, stop)` must be a valid
94/// range of mutable `u32` guard slots allocated by the compiler for the current DSO.
95#[unsafe(no_mangle)]
96pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard_init(mut start: *mut u32, stop: *mut u32) {
97    while start < stop {
98        let id = GUARD_COUNTER.fetch_add(1, Ordering::Relaxed);
99        unsafe {
100            *start = id;
101            start = start.add(1);
102        }
103    }
104}
105
106/// # Safety
107///
108/// Called by the LLVM SanitizerCoverage runtime at every instrumented CFG edge.
109/// `guard` must point to a valid `u32` guard slot initialized by
110/// `__sanitizer_cov_trace_pc_guard_init`.
111#[unsafe(no_mangle)]
112pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) {
113    let id = unsafe { *guard };
114    if id == 0 {
115        return;
116    }
117    record_hit(id);
118}
119
120// ---------------------------------------------------------------------------
121// Trace-cmp: capture comparison operands from instrumented code
122// ---------------------------------------------------------------------------
123
124const MAX_CMP_OPERANDS: usize = 512;
125
126/// A single comparison operand captured by a trace-cmp callback.
127#[derive(Clone, Copy, Debug)]
128pub struct CmpSample {
129    /// Bit-width of the original comparison (8, 16, 32, or 64).
130    pub width: u8,
131    /// The operand value, right-aligned in a 32-byte buffer.
132    pub value: [u8; 32],
133}
134
135thread_local! {
136    static CMP_OPERANDS: std::cell::RefCell<Vec<CmpSample>> =
137        const { std::cell::RefCell::new(Vec::new()) };
138}
139
140#[inline(always)]
141fn record_cmp(width: u8, arg1: u64, arg2: u64) {
142    if !is_active() {
143        return;
144    }
145    if arg1 == 0 && arg2 == 0 {
146        return;
147    }
148    CMP_OPERANDS.with(|ops| {
149        let mut ops = ops.borrow_mut();
150        if ops.len() >= MAX_CMP_OPERANDS {
151            return;
152        }
153        if arg1 != 0 {
154            let mut buf = [0u8; 32];
155            buf[24..].copy_from_slice(&arg1.to_be_bytes());
156            ops.push(CmpSample { width, value: buf });
157        }
158        if arg2 != 0 && arg2 != arg1 {
159            let mut buf = [0u8; 32];
160            buf[24..].copy_from_slice(&arg2.to_be_bytes());
161            ops.push(CmpSample { width, value: buf });
162        }
163    });
164}
165
166/// Drain all captured comparison operands from the current thread.
167pub fn drain_cmp_operands() -> Vec<CmpSample> {
168    CMP_OPERANDS.with(|ops| {
169        let mut ops = ops.borrow_mut();
170        std::mem::take(&mut *ops)
171    })
172}
173
174/// Clear all captured comparison operands on the current thread.
175pub fn clear_cmp_operands() {
176    CMP_OPERANDS.with(|ops| ops.borrow_mut().clear());
177}
178
179/// # Safety
180///
181/// Called by LLVM SanitizerCoverage at 1-byte comparison instructions.
182#[unsafe(no_mangle)]
183pub unsafe extern "C" fn __sanitizer_cov_trace_cmp1(arg1: u8, arg2: u8) {
184    record_cmp(8, arg1 as u64, arg2 as u64);
185}
186
187/// # Safety
188///
189/// Called by LLVM SanitizerCoverage at 2-byte comparison instructions.
190#[unsafe(no_mangle)]
191pub unsafe extern "C" fn __sanitizer_cov_trace_cmp2(arg1: u16, arg2: u16) {
192    record_cmp(16, arg1 as u64, arg2 as u64);
193}
194
195/// # Safety
196///
197/// Called by LLVM SanitizerCoverage at 4-byte comparison instructions.
198#[unsafe(no_mangle)]
199pub unsafe extern "C" fn __sanitizer_cov_trace_cmp4(arg1: u32, arg2: u32) {
200    record_cmp(32, arg1 as u64, arg2 as u64);
201}
202
203/// # Safety
204///
205/// Called by LLVM SanitizerCoverage at 8-byte comparison instructions.
206#[unsafe(no_mangle)]
207pub unsafe extern "C" fn __sanitizer_cov_trace_cmp8(arg1: u64, arg2: u64) {
208    record_cmp(64, arg1, arg2);
209}
210
211/// # Safety
212///
213/// Called by LLVM SanitizerCoverage at 1-byte constant comparison instructions.
214#[unsafe(no_mangle)]
215pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp1(arg1: u8, arg2: u8) {
216    record_cmp(8, arg1 as u64, arg2 as u64);
217}
218
219/// # Safety
220///
221/// Called by LLVM SanitizerCoverage at 2-byte constant comparison instructions.
222#[unsafe(no_mangle)]
223pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp2(arg1: u16, arg2: u16) {
224    record_cmp(16, arg1 as u64, arg2 as u64);
225}
226
227/// # Safety
228///
229/// Called by LLVM SanitizerCoverage at 4-byte constant comparison instructions.
230#[unsafe(no_mangle)]
231pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp4(arg1: u32, arg2: u32) {
232    record_cmp(32, arg1 as u64, arg2 as u64);
233}
234
235/// # Safety
236///
237/// Called by LLVM SanitizerCoverage at 8-byte constant comparison instructions.
238#[unsafe(no_mangle)]
239pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp8(arg1: u64, arg2: u64) {
240    record_cmp(64, arg1, arg2);
241}
242
243/// # Safety
244///
245/// Called by LLVM SanitizerCoverage before switch statements.
246/// `cases[0]` is the number of cases, `cases[1]` is bit-width of `val`,
247/// `cases[2..]` are the case constants.
248#[unsafe(no_mangle)]
249pub unsafe extern "C" fn __sanitizer_cov_trace_switch(val: u64, cases: *const u64) {
250    if !is_active() || cases.is_null() {
251        return;
252    }
253    let n = unsafe { *cases } as usize;
254    for i in 0..n.min(16) {
255        let case_val = unsafe { *cases.add(2 + i) };
256        record_cmp(64, val, case_val);
257    }
258}