]> git.saurik.com Git - apple/xnu.git/blob - san/ksancov.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / san / ksancov.h
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _KSANCOV_H_
30 #define _KSANCOV_H_
31
32 #include <stdint.h>
33 #include <stdatomic.h>
34 #include <sys/ioccom.h>
35
36 #define KSANCOV_DEVNODE "ksancov"
37 #define KSANCOV_PATH "/dev/" KSANCOV_DEVNODE
38
39 /*
40 * ioctl
41 */
42
43 struct ksancov_buf_desc {
44 uintptr_t ptr; /* ptr to shared buffer [out] */
45 size_t sz; /* size of shared buffer [out] */
46 };
47
48 /* Set mode */
49 #define KSANCOV_IOC_TRACE _IOW('K', 1, size_t) /* number of pcs */
50 #define KSANCOV_IOC_COUNTERS _IO('K', 2)
51
52 /* Establish a shared mapping of the coverage buffer. */
53 #define KSANCOV_IOC_MAP _IOWR('K', 8, struct ksancov_buf_desc)
54
55 /* Establish a shared mapping of the edge address buffer. */
56 #define KSANCOV_IOC_MAP_EDGEMAP _IOWR('K', 9, struct ksancov_buf_desc)
57
58 /* Log the current thread */
59 #define KSANCOV_IOC_START _IOW('K', 10, uintptr_t)
60
61 #define KSANCOV_IOC_NEDGES _IOR('K', 50, size_t)
62
63 #define KSANCOV_IOC_TESTPANIC _IOW('K', 20, uint64_t)
64
65
66 /*
67 * shared kernel-user mapping
68 */
69
70 #define KSANCOV_MAX_EDGES 512UL*1024
71 #define KSANCOV_MAX_HITS UINT8_MAX
72 #define KSANCOV_TRACE_MAGIC (uint32_t)0x5AD17F5BU
73 #define KSANCOV_COUNTERS_MAGIC (uint32_t)0x5AD27F6BU
74 #define KSANCOV_EDGEMAP_MAGIC (uint32_t)0x5AD37F7BU
75
76 struct ksancov_header {
77 uint32_t magic;
78 _Atomic uint32_t enabled;
79 };
80
81 struct ksancov_trace {
82 /* userspace R/O fields */
83 union {
84 struct ksancov_header hdr;
85 struct {
86 uint32_t magic;
87 _Atomic uint32_t enabled;
88 };
89 };
90
91 uintptr_t offset; /* pc entries relative to this */
92 uint32_t maxpcs;
93 _Atomic uint32_t head;
94 uint32_t pcs[];
95 };
96
97 struct ksancov_counters {
98 union {
99 struct ksancov_header hdr;
100 struct {
101 uint32_t magic;
102 _Atomic uint32_t enabled;
103 };
104 };
105
106 uint32_t nedges; /* total number of edges */
107 uint8_t hits[]; /* hits on each edge (8bit saturating) */
108 };
109
110 struct ksancov_edgemap {
111 uint32_t magic;
112 uint32_t nedges;
113 uintptr_t offset; /* edge addrs relative to this */
114 uint32_t addrs[]; /* address of each edge relative to 'offset' */
115 };
116
117 #if XNU_KERNEL_PRIVATE
118 /*
119 * On arm64 the VIM_MIN_KERNEL_ADDRESS is too far from %pc to fit into 32-bit value. As a result
120 * ksancov reports invalid %pcs. To make at least kernel %pc values corect a different base has
121 * to be used for arm.
122 */
123 #if defined(__x86_64__) || defined(__i386__)
124 #define KSANCOV_PC_OFFSET VM_MIN_KERNEL_ADDRESS
125 #elif defined(__arm__) || defined(__arm64__)
126 #define KSANCOV_PC_OFFSET VM_KERNEL_LINK_ADDRESS
127 #else
128 #error "Unsupported platform"
129 #endif
130
131 int ksancov_init_dev(void);
132 void **__sanitizer_get_thread_data(thread_t);
133
134 /*
135 * SanitizerCoverage ABI
136 */
137 extern void __sanitizer_cov_trace_pc_guard(uint32_t *guard);
138 extern void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop);
139 extern void __sanitizer_cov_pcs_init(uintptr_t *start, uintptr_t *stop);
140 extern void __sanitizer_cov_trace_pc(void);
141 extern void __sanitizer_cov_trace_pc_indirect(void *callee);
142 #endif
143
144 #ifndef KERNEL
145
146 #include <strings.h>
147 #include <assert.h>
148 #include <unistd.h>
149
150 /*
151 * ksancov userspace API
152 *
153 * Usage:
154 * 1) open the ksancov device
155 * 2) set the coverage mode (trace or edge counters)
156 * 3) map the coverage buffer
157 * 4) start the trace on a thread
158 * 5) flip the enable bit
159 */
160
161 static inline int
162 ksancov_open(void)
163 {
164 return open(KSANCOV_PATH, 0);
165 }
166
167 static inline int
168 ksancov_map(int fd, uintptr_t *buf, size_t *sz)
169 {
170 int ret;
171 struct ksancov_buf_desc mc = {0};
172
173 ret = ioctl(fd, KSANCOV_IOC_MAP, &mc);
174 if (ret == -1) {
175 return errno;
176 }
177
178 *buf = mc.ptr;
179 if (sz) {
180 *sz = mc.sz;
181 }
182
183 struct ksancov_trace *trace = (void *)mc.ptr;
184 assert(trace->magic == KSANCOV_TRACE_MAGIC ||
185 trace->magic == KSANCOV_COUNTERS_MAGIC);
186
187 return 0;
188 }
189
190 static inline int
191 ksancov_map_edgemap(int fd, uintptr_t *buf, size_t *sz)
192 {
193 int ret;
194 struct ksancov_buf_desc mc = {0};
195
196 ret = ioctl(fd, KSANCOV_IOC_MAP_EDGEMAP, &mc);
197 if (ret == -1) {
198 return errno;
199 }
200
201 *buf = mc.ptr;
202 if (sz) {
203 *sz = mc.sz;
204 }
205
206 struct ksancov_trace *trace = (void *)mc.ptr;
207 assert(trace->magic == KSANCOV_EDGEMAP_MAGIC);
208
209 return 0;
210 }
211
212 static inline size_t
213 ksancov_nedges(int fd)
214 {
215 size_t nedges;
216 int ret = ioctl(fd, KSANCOV_IOC_NEDGES, &nedges);
217 if (ret == -1) {
218 return SIZE_MAX;
219 }
220 return nedges;
221 }
222
223 static inline int
224 ksancov_mode_trace(int fd, size_t entries)
225 {
226 int ret;
227 ret = ioctl(fd, KSANCOV_IOC_TRACE, &entries);
228 if (ret == -1) {
229 return errno;
230 }
231 return 0;
232 }
233
234 static inline int
235 ksancov_mode_counters(int fd)
236 {
237 int ret;
238 ret = ioctl(fd, KSANCOV_IOC_COUNTERS);
239 if (ret == -1) {
240 return errno;
241 }
242 return 0;
243 }
244
245 static inline int
246 ksancov_thread_self(int fd)
247 {
248 int ret;
249 uintptr_t th = 0;
250 ret = ioctl(fd, KSANCOV_IOC_START, &th);
251 if (ret == -1) {
252 return errno;
253 }
254 return 0;
255 }
256
257 static inline int
258 ksancov_start(void *buf)
259 {
260 struct ksancov_header *hdr = (struct ksancov_header *)buf;
261 atomic_store_explicit(&hdr->enabled, 1, memory_order_relaxed);
262 return 0;
263 }
264
265 static inline int
266 ksancov_stop(void *buf)
267 {
268 struct ksancov_header *hdr = (struct ksancov_header *)buf;
269 atomic_store_explicit(&hdr->enabled, 0, memory_order_relaxed);
270 return 0;
271 }
272
273 static inline int
274 ksancov_reset(void *buf)
275 {
276 struct ksancov_header *hdr = (struct ksancov_header *)buf;
277 if (hdr->magic == KSANCOV_TRACE_MAGIC) {
278 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
279 atomic_store_explicit(&trace->head, 0, memory_order_relaxed);
280 } else if (hdr->magic == KSANCOV_COUNTERS_MAGIC) {
281 struct ksancov_counters *counters = (struct ksancov_counters *)buf;
282 bzero(counters->hits, counters->nedges);
283 } else {
284 return EINVAL;
285 }
286 return 0;
287 }
288
289 static inline uintptr_t
290 ksancov_edge_addr(struct ksancov_edgemap *addrs, size_t idx)
291 {
292 assert(addrs);
293 if (idx >= addrs->nedges) {
294 return 0;
295 }
296 return addrs->addrs[idx] + addrs->offset;
297 }
298
299 static inline size_t
300 ksancov_trace_max_pcs(struct ksancov_trace *trace)
301 {
302 return trace->maxpcs;
303 }
304
305 static inline uintptr_t
306 ksancov_trace_offset(struct ksancov_trace *trace)
307 {
308 assert(trace);
309 return trace->offset;
310 }
311
312 static inline size_t
313 ksancov_trace_head(struct ksancov_trace *trace)
314 {
315 size_t maxlen = trace->maxpcs;
316 size_t head = atomic_load_explicit(&trace->head, memory_order_acquire);
317 return head < maxlen ? head : maxlen;
318 }
319
320 static inline uintptr_t
321 ksancov_trace_entry(struct ksancov_trace *trace, size_t i)
322 {
323 if (i >= trace->head) {
324 return 0;
325 }
326
327 return trace->pcs[i] + trace->offset;
328 }
329
330 #endif
331
332 #endif /* _KSANCOV_H_ */