foundry_evm_sancov/
lib.rs1use std::sync::atomic::{AtomicPtr, AtomicU32, AtomicUsize, Ordering};
15
16static COVERAGE_MAP_PTR: AtomicPtr<u8> = AtomicPtr::new(std::ptr::null_mut());
17static COVERAGE_MAP_LEN: AtomicUsize = AtomicUsize::new(0);
18
19pub fn set_coverage_map(ptr: *mut u8, len: usize) {
22 COVERAGE_MAP_PTR.store(ptr, Ordering::Release);
23 COVERAGE_MAP_LEN.store(len, Ordering::Release);
24}
25
26pub fn clear_coverage_map() {
28 COVERAGE_MAP_PTR.store(std::ptr::null_mut(), Ordering::Release);
29 COVERAGE_MAP_LEN.store(0, Ordering::Release);
30}
31
32pub fn is_active() -> bool {
34 !COVERAGE_MAP_PTR.load(Ordering::Relaxed).is_null()
35}
36
37static NEXT_SANCOV_IDX: AtomicUsize = AtomicUsize::new(0);
38
39static GUARD_LOOKUP: std::sync::RwLock<Vec<usize>> = std::sync::RwLock::new(Vec::new());
40
41const UNASSIGNED: usize = usize::MAX;
42
43#[inline(always)]
45pub fn record_hit(guard_id: u32) {
46 let ptr = COVERAGE_MAP_PTR.load(Ordering::Relaxed);
47 if ptr.is_null() {
48 return;
49 }
50 let len = COVERAGE_MAP_LEN.load(Ordering::Relaxed);
51 if len == 0 {
52 return;
53 }
54
55 let gid = guard_id as usize;
56
57 let idx = {
59 let lookup = GUARD_LOOKUP.read().unwrap();
60 (gid < lookup.len() && lookup[gid] != UNASSIGNED).then(|| lookup[gid])
61 };
62
63 let idx = idx.unwrap_or_else(|| {
64 let mut lookup = GUARD_LOOKUP.write().unwrap();
66 if gid >= lookup.len() {
67 lookup.resize(gid + 1, UNASSIGNED);
68 }
69 if lookup[gid] == UNASSIGNED {
70 lookup[gid] = NEXT_SANCOV_IDX.fetch_add(1, Ordering::Relaxed);
71 }
72 lookup[gid]
73 });
74
75 if idx >= len {
76 return;
77 }
78 unsafe {
79 let slot = ptr.add(idx);
80 *slot = (*slot).wrapping_add(1);
81 }
82}
83
84pub fn sancov_edge_count() -> usize {
86 NEXT_SANCOV_IDX.load(Ordering::Relaxed)
87}
88
89static GUARD_COUNTER: AtomicU32 = AtomicU32::new(1);
90
91#[unsafe(no_mangle)]
96pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard_init(mut start: *mut u32, stop: *mut u32) {
97 while start < stop {
98 let id = GUARD_COUNTER.fetch_add(1, Ordering::Relaxed);
99 unsafe {
100 *start = id;
101 start = start.add(1);
102 }
103 }
104}
105
106#[unsafe(no_mangle)]
112pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) {
113 let id = unsafe { *guard };
114 if id == 0 {
115 return;
116 }
117 record_hit(id);
118}
119
120const MAX_CMP_OPERANDS: usize = 512;
125
126#[derive(Clone, Copy, Debug)]
128pub struct CmpSample {
129 pub width: u8,
131 pub value: [u8; 32],
133}
134
135thread_local! {
136 static CMP_OPERANDS: std::cell::RefCell<Vec<CmpSample>> =
137 const { std::cell::RefCell::new(Vec::new()) };
138}
139
140#[inline(always)]
141fn record_cmp(width: u8, arg1: u64, arg2: u64) {
142 if !is_active() {
143 return;
144 }
145 if arg1 == 0 && arg2 == 0 {
146 return;
147 }
148 CMP_OPERANDS.with(|ops| {
149 let mut ops = ops.borrow_mut();
150 if ops.len() >= MAX_CMP_OPERANDS {
151 return;
152 }
153 if arg1 != 0 {
154 let mut buf = [0u8; 32];
155 buf[24..].copy_from_slice(&arg1.to_be_bytes());
156 ops.push(CmpSample { width, value: buf });
157 }
158 if arg2 != 0 && arg2 != arg1 {
159 let mut buf = [0u8; 32];
160 buf[24..].copy_from_slice(&arg2.to_be_bytes());
161 ops.push(CmpSample { width, value: buf });
162 }
163 });
164}
165
166pub fn drain_cmp_operands() -> Vec<CmpSample> {
168 CMP_OPERANDS.with(|ops| {
169 let mut ops = ops.borrow_mut();
170 std::mem::take(&mut *ops)
171 })
172}
173
174pub fn clear_cmp_operands() {
176 CMP_OPERANDS.with(|ops| ops.borrow_mut().clear());
177}
178
179#[unsafe(no_mangle)]
183pub unsafe extern "C" fn __sanitizer_cov_trace_cmp1(arg1: u8, arg2: u8) {
184 record_cmp(8, arg1 as u64, arg2 as u64);
185}
186
187#[unsafe(no_mangle)]
191pub unsafe extern "C" fn __sanitizer_cov_trace_cmp2(arg1: u16, arg2: u16) {
192 record_cmp(16, arg1 as u64, arg2 as u64);
193}
194
195#[unsafe(no_mangle)]
199pub unsafe extern "C" fn __sanitizer_cov_trace_cmp4(arg1: u32, arg2: u32) {
200 record_cmp(32, arg1 as u64, arg2 as u64);
201}
202
203#[unsafe(no_mangle)]
207pub unsafe extern "C" fn __sanitizer_cov_trace_cmp8(arg1: u64, arg2: u64) {
208 record_cmp(64, arg1, arg2);
209}
210
211#[unsafe(no_mangle)]
215pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp1(arg1: u8, arg2: u8) {
216 record_cmp(8, arg1 as u64, arg2 as u64);
217}
218
219#[unsafe(no_mangle)]
223pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp2(arg1: u16, arg2: u16) {
224 record_cmp(16, arg1 as u64, arg2 as u64);
225}
226
227#[unsafe(no_mangle)]
231pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp4(arg1: u32, arg2: u32) {
232 record_cmp(32, arg1 as u64, arg2 as u64);
233}
234
235#[unsafe(no_mangle)]
239pub unsafe extern "C" fn __sanitizer_cov_trace_const_cmp8(arg1: u64, arg2: u64) {
240 record_cmp(64, arg1, arg2);
241}
242
243#[unsafe(no_mangle)]
249pub unsafe extern "C" fn __sanitizer_cov_trace_switch(val: u64, cases: *const u64) {
250 if !is_active() || cases.is_null() {
251 return;
252 }
253 let n = unsafe { *cases } as usize;
254 for i in 0..n.min(16) {
255 let case_val = unsafe { *cases.add(2 + i) };
256 record_cmp(64, val, case_val);
257 }
258}