]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2019 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <string.h> | |
30 | #include <stdbool.h> | |
31 | ||
32 | #include <kern/assert.h> | |
33 | #include <kern/cpu_data.h> | |
34 | #include <kern/locks.h> | |
35 | #include <kern/debug.h> | |
36 | #include <kern/kalloc.h> | |
37 | #include <kern/zalloc.h> | |
38 | #include <kern/task.h> | |
39 | #include <kern/thread.h> | |
40 | ||
41 | #include <vm/vm_kern.h> | |
42 | #include <vm/vm_protos.h> | |
43 | ||
44 | #include <mach/mach_vm.h> | |
45 | #include <mach/mach_types.h> | |
46 | #include <mach/mach_port.h> | |
47 | #include <mach/vm_map.h> | |
48 | #include <mach/vm_param.h> | |
49 | #include <mach/machine/vm_param.h> | |
50 | ||
51 | #include <sys/stat.h> /* dev_t */ | |
52 | #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */ | |
53 | #include <sys/conf.h> /* must come after sys/stat.h */ | |
54 | ||
55 | #include <libkern/libkern.h> | |
56 | #include <os/atomic_private.h> | |
57 | #include <os/overflow.h> | |
58 | ||
59 | #include <san/ksancov.h> | |
60 | ||
61 | /* header mess... */ | |
62 | struct uthread; | |
63 | typedef struct uthread * uthread_t; | |
64 | ||
65 | #include <sys/sysproto.h> | |
66 | #include <sys/queue.h> | |
67 | #include <sys/sysctl.h> | |
68 | ||
69 | #define USE_PC_TABLE 0 | |
70 | #define KSANCOV_MAX_DEV 64 | |
71 | #define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */ | |
72 | ||
73 | enum { | |
74 | KS_MODE_NONE, | |
75 | KS_MODE_TRACE, | |
76 | KS_MODE_COUNTERS, | |
77 | KS_MODE_MAX | |
78 | }; | |
79 | ||
80 | struct ksancov_dev { | |
81 | unsigned mode; | |
82 | ||
83 | union { | |
84 | struct ksancov_trace *trace; | |
85 | struct ksancov_counters *counters; | |
86 | }; | |
87 | size_t sz; /* size of allocated trace/counters buffer */ | |
88 | ||
89 | size_t maxpcs; | |
90 | ||
91 | thread_t thread; | |
92 | dev_t dev; | |
93 | lck_mtx_t lock; | |
94 | }; | |
95 | typedef struct ksancov_dev * ksancov_dev_t; | |
96 | ||
97 | extern boolean_t ml_at_interrupt_context(void); | |
98 | extern boolean_t ml_get_interrupts_enabled(void); | |
99 | ||
100 | static void ksancov_detach(ksancov_dev_t); | |
101 | ||
102 | static int dev_major; | |
103 | static size_t nedges = 0; | |
104 | static uint32_t __unused npcs = 0; | |
105 | ||
106 | static _Atomic unsigned active_devs; | |
107 | ||
108 | static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp"); | |
109 | static lck_rw_t *ksancov_devs_lck; | |
110 | ||
111 | /* array of devices indexed by devnode minor */ | |
112 | static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV]; | |
113 | static struct ksancov_edgemap *ksancov_edgemap; | |
114 | ||
115 | ||
116 | static ksancov_dev_t | |
117 | create_dev(dev_t dev) | |
118 | { | |
119 | ksancov_dev_t d = kalloc_tag(sizeof(struct ksancov_dev), VM_KERN_MEMORY_DIAG); | |
120 | if (!d) { | |
121 | return NULL; | |
122 | } | |
123 | ||
124 | d->mode = KS_MODE_NONE; | |
125 | d->trace = NULL; | |
126 | d->sz = 0; | |
127 | d->maxpcs = KSANCOV_MAX_PCS; | |
128 | d->dev = dev; | |
129 | d->thread = THREAD_NULL; | |
130 | lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL); | |
131 | ||
132 | return d; | |
133 | } | |
134 | ||
135 | static void | |
136 | free_dev(ksancov_dev_t d) | |
137 | { | |
138 | if (d->mode == KS_MODE_TRACE && d->trace) { | |
139 | kmem_free(kernel_map, (uintptr_t)d->trace, d->sz); | |
140 | } else if (d->mode == KS_MODE_COUNTERS && d->counters) { | |
141 | kmem_free(kernel_map, (uintptr_t)d->counters, d->sz); | |
142 | } | |
143 | lck_mtx_destroy(&d->lock, &ksancov_lck_grp); | |
144 | kfree(d, sizeof(struct ksancov_dev)); | |
145 | } | |
146 | ||
147 | void | |
148 | __sanitizer_cov_trace_pc_indirect(void * __unused callee) | |
149 | { | |
150 | return; | |
151 | } | |
152 | ||
153 | #define GUARD_SEEN (uint32_t)0x80000000 | |
154 | #define GUARD_IDX_MASK (uint32_t)0x0fffffff | |
155 | ||
156 | static inline void __attribute__((always_inline)) | |
157 | trace_pc_guard(uint32_t *guardp, void *caller) | |
158 | { | |
159 | /* record the pc for this guard */ | |
160 | if (guardp) { | |
161 | uint32_t gd = *guardp; | |
162 | if (__improbable(gd && !(gd & GUARD_SEEN) && ksancov_edgemap)) { | |
163 | size_t idx = gd & GUARD_IDX_MASK; | |
164 | if (idx < ksancov_edgemap->nedges) { | |
165 | ksancov_edgemap->addrs[idx] = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - KSANCOV_PC_OFFSET - 1); | |
166 | *guardp |= GUARD_SEEN; | |
167 | } | |
168 | } | |
169 | } | |
170 | ||
171 | if (__probable(os_atomic_load(&active_devs, relaxed) == 0)) { | |
172 | /* early exit when nothing is active */ | |
173 | return; | |
174 | } | |
175 | ||
176 | if (ml_at_interrupt_context()) { | |
177 | return; | |
178 | } | |
179 | ||
180 | uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - KSANCOV_PC_OFFSET - 1); | |
181 | ||
182 | thread_t th = current_thread(); | |
183 | if (__improbable(th == THREAD_NULL)) { | |
184 | return; | |
185 | } | |
186 | ||
187 | ksancov_dev_t dev = *(ksancov_dev_t *)__sanitizer_get_thread_data(th); | |
188 | if (__probable(dev == NULL)) { | |
189 | return; | |
190 | } | |
191 | ||
192 | if (dev->mode == KS_MODE_TRACE) { | |
193 | struct ksancov_trace *trace = dev->trace; | |
194 | if (os_atomic_load(&trace->enabled, relaxed) == 0) { | |
195 | return; | |
196 | } | |
197 | ||
198 | if (os_atomic_load(&trace->head, relaxed) >= dev->maxpcs) { | |
199 | return; /* overflow */ | |
200 | } | |
201 | ||
202 | uint32_t idx = os_atomic_inc_orig(&trace->head, relaxed); | |
203 | if (__improbable(idx >= dev->maxpcs)) { | |
204 | return; | |
205 | } | |
206 | ||
207 | trace->pcs[idx] = pc; | |
208 | } else { | |
209 | size_t idx = *guardp & GUARD_IDX_MASK; | |
210 | ||
211 | struct ksancov_counters *counters = dev->counters; | |
212 | if (os_atomic_load(&counters->enabled, relaxed) == 0) { | |
213 | return; | |
214 | } | |
215 | ||
216 | /* saturating 8bit add */ | |
217 | if (counters->hits[idx] < KSANCOV_MAX_HITS) { | |
218 | counters->hits[idx]++; | |
219 | } | |
220 | } | |
221 | } | |
222 | ||
223 | void __attribute__((noinline)) | |
224 | __sanitizer_cov_trace_pc(void) | |
225 | { | |
226 | trace_pc_guard(NULL, __builtin_return_address(0)); | |
227 | } | |
228 | ||
229 | void __attribute__((noinline)) | |
230 | __sanitizer_cov_trace_pc_guard(uint32_t *guardp) | |
231 | { | |
232 | trace_pc_guard(guardp, __builtin_return_address(0)); | |
233 | } | |
234 | ||
235 | void | |
236 | __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) | |
237 | { | |
238 | /* assign a unique number to each guard */ | |
239 | for (; start != stop; start++) { | |
240 | if (*start == 0) { | |
241 | if (nedges < KSANCOV_MAX_EDGES) { | |
242 | *start = (uint32_t)++nedges; | |
243 | } | |
244 | } | |
245 | } | |
246 | } | |
247 | ||
248 | void | |
249 | __sanitizer_cov_pcs_init(uintptr_t *start, uintptr_t *stop) | |
250 | { | |
251 | #if USE_PC_TABLE | |
252 | static const uintptr_t pc_table_seen_flag = 0x100; | |
253 | ||
254 | for (; start < stop; start += 2) { | |
255 | uintptr_t pc = start[0]; | |
256 | uintptr_t flags = start[1]; | |
257 | ||
258 | /* | |
259 | * This function gets called multiple times on the same range, so mark the | |
260 | * ones we've seen using unused bits in the flags field. | |
261 | */ | |
262 | if (flags & pc_table_seen_flag) { | |
263 | continue; | |
264 | } | |
265 | ||
266 | start[1] |= pc_table_seen_flag; | |
267 | assert(npcs < KSANCOV_MAX_EDGES - 1); | |
268 | edge_addrs[++npcs] = pc; | |
269 | } | |
270 | #else | |
271 | (void)start; | |
272 | (void)stop; | |
273 | #endif | |
274 | } | |
275 | ||
276 | static void * | |
277 | ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot) | |
278 | { | |
279 | kern_return_t kr; | |
280 | mach_port_t mem_entry = MACH_PORT_NULL; | |
281 | mach_vm_address_t user_addr = 0; | |
282 | memory_object_size_t size = sz; | |
283 | ||
284 | kr = mach_make_memory_entry_64(kernel_map, | |
285 | &size, | |
286 | (mach_vm_offset_t)base, | |
287 | MAP_MEM_VM_SHARE | prot, | |
288 | &mem_entry, | |
289 | MACH_PORT_NULL); | |
290 | if (kr != KERN_SUCCESS) { | |
291 | return NULL; | |
292 | } | |
293 | ||
294 | kr = mach_vm_map_kernel(get_task_map(current_task()), | |
295 | &user_addr, | |
296 | size, | |
297 | 0, | |
298 | VM_FLAGS_ANYWHERE, | |
299 | VM_MAP_KERNEL_FLAGS_NONE, | |
300 | VM_KERN_MEMORY_NONE, | |
301 | mem_entry, | |
302 | 0, | |
303 | FALSE, | |
304 | prot, | |
305 | prot, | |
306 | VM_INHERIT_SHARE); | |
307 | ||
308 | /* | |
309 | * At this point, either vm_map() has taken a reference on the memory entry | |
310 | * and we can release our local reference, or the map failed and the entry | |
311 | * needs to be freed. | |
312 | */ | |
313 | mach_memory_entry_port_release(mem_entry); | |
314 | ||
315 | if (kr != KERN_SUCCESS) { | |
316 | return NULL; | |
317 | } | |
318 | ||
319 | return (void *)user_addr; | |
320 | } | |
321 | ||
322 | /* | |
323 | * map the sancov buffer into the current process | |
324 | */ | |
325 | static int | |
326 | ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep) | |
327 | { | |
328 | uintptr_t addr; | |
329 | size_t size = d->sz; | |
330 | ||
331 | if (d->mode == KS_MODE_TRACE) { | |
332 | if (!d->trace) { | |
333 | return EINVAL; | |
334 | } | |
335 | addr = (uintptr_t)d->trace; | |
336 | } else if (d->mode == KS_MODE_COUNTERS) { | |
337 | if (!d->counters) { | |
338 | return EINVAL; | |
339 | } | |
340 | addr = (uintptr_t)d->counters; | |
341 | } else { | |
342 | return EINVAL; /* not configured */ | |
343 | } | |
344 | ||
345 | void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE); | |
346 | if (buf == NULL) { | |
347 | return ENOMEM; | |
348 | } | |
349 | ||
350 | *bufp = (uintptr_t)buf; | |
351 | *sizep = size; | |
352 | ||
353 | return 0; | |
354 | } | |
355 | ||
356 | /* | |
357 | * map the edge -> pc mapping as read-only | |
358 | */ | |
359 | static int | |
360 | ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep) | |
361 | { | |
362 | uintptr_t addr = (uintptr_t)ksancov_edgemap; | |
363 | size_t size = sizeof(struct ksancov_edgemap) + ksancov_edgemap->nedges * sizeof(uint32_t); | |
364 | ||
365 | void *buf = ksancov_do_map(addr, size, VM_PROT_READ); | |
366 | if (buf == NULL) { | |
367 | return ENOMEM; | |
368 | } | |
369 | ||
370 | *bufp = (uintptr_t)buf; | |
371 | *sizep = size; | |
372 | return 0; | |
373 | } | |
374 | ||
375 | /* | |
376 | * Device node management | |
377 | */ | |
378 | ||
379 | static int | |
380 | ksancov_open(dev_t dev, int flags, int devtype, proc_t p) | |
381 | { | |
382 | #pragma unused(flags,devtype,p) | |
383 | const int minor_num = minor(dev); | |
384 | ||
385 | if (minor_num >= KSANCOV_MAX_DEV) { | |
386 | return EBUSY; | |
387 | } | |
388 | ||
389 | lck_rw_lock_exclusive(ksancov_devs_lck); | |
390 | ||
391 | if (ksancov_devs[minor_num]) { | |
392 | lck_rw_unlock_exclusive(ksancov_devs_lck); | |
393 | return EBUSY; | |
394 | } | |
395 | ||
396 | ksancov_dev_t d = create_dev(dev); | |
397 | if (!d) { | |
398 | lck_rw_unlock_exclusive(ksancov_devs_lck); | |
399 | return ENOMEM; | |
400 | } | |
401 | ksancov_devs[minor_num] = d; | |
402 | ||
403 | lck_rw_unlock_exclusive(ksancov_devs_lck); | |
404 | ||
405 | return 0; | |
406 | } | |
407 | ||
408 | static int | |
409 | ksancov_trace_alloc(ksancov_dev_t d, size_t maxpcs) | |
410 | { | |
411 | if (d->mode != KS_MODE_NONE) { | |
412 | return EBUSY; /* trace/counters already created */ | |
413 | } | |
414 | assert(d->trace == NULL); | |
415 | ||
416 | uintptr_t buf; | |
417 | size_t sz; | |
418 | if (os_mul_and_add_overflow(maxpcs, sizeof(uint32_t), sizeof(struct ksancov_trace), &sz)) { | |
419 | return EINVAL; | |
420 | } | |
421 | ||
422 | /* allocate the shared memory buffer */ | |
423 | kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO); | |
424 | if (kr != KERN_SUCCESS) { | |
425 | return ENOMEM; | |
426 | } | |
427 | ||
428 | struct ksancov_trace *trace = (struct ksancov_trace *)buf; | |
429 | trace->magic = KSANCOV_TRACE_MAGIC; | |
430 | trace->offset = KSANCOV_PC_OFFSET; | |
431 | os_atomic_init(&trace->head, 0); | |
432 | os_atomic_init(&trace->enabled, 0); | |
433 | trace->maxpcs = (uint32_t)maxpcs; | |
434 | ||
435 | d->trace = trace; | |
436 | d->sz = sz; | |
437 | d->maxpcs = maxpcs; | |
438 | d->mode = KS_MODE_TRACE; | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
443 | static int | |
444 | ksancov_counters_alloc(ksancov_dev_t d) | |
445 | { | |
446 | if (d->mode != KS_MODE_NONE) { | |
447 | return EBUSY; /* trace/counters already created */ | |
448 | } | |
449 | assert(d->counters == NULL); | |
450 | ||
451 | uintptr_t buf; | |
452 | size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->nedges * sizeof(uint8_t); | |
453 | ||
454 | /* allocate the shared memory buffer */ | |
455 | kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO); | |
456 | if (kr != KERN_SUCCESS) { | |
457 | return ENOMEM; | |
458 | } | |
459 | ||
460 | struct ksancov_counters *counters = (struct ksancov_counters *)buf; | |
461 | counters->magic = KSANCOV_COUNTERS_MAGIC; | |
462 | counters->nedges = ksancov_edgemap->nedges; | |
463 | os_atomic_init(&counters->enabled, 0); | |
464 | ||
465 | d->counters = counters; | |
466 | d->sz = sz; | |
467 | d->mode = KS_MODE_COUNTERS; | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | /* | |
473 | * attach a thread to a ksancov dev instance | |
474 | */ | |
475 | static int | |
476 | ksancov_attach(ksancov_dev_t d, thread_t th) | |
477 | { | |
478 | if (d->mode == KS_MODE_NONE) { | |
479 | return EINVAL; /* not configured */ | |
480 | } | |
481 | ||
482 | if (th != current_thread()) { | |
483 | /* can only attach to self presently */ | |
484 | return EINVAL; | |
485 | } | |
486 | ||
487 | ksancov_dev_t *devp = (void *)__sanitizer_get_thread_data(th); | |
488 | if (*devp) { | |
489 | return EBUSY; /* one dev per thread */ | |
490 | } | |
491 | ||
492 | if (d->thread != THREAD_NULL) { | |
493 | ksancov_detach(d); | |
494 | } | |
495 | ||
496 | d->thread = th; | |
497 | thread_reference(d->thread); | |
498 | ||
499 | os_atomic_store(devp, d, relaxed); | |
500 | os_atomic_add(&active_devs, 1, relaxed); | |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
505 | extern void | |
506 | thread_wait( | |
507 | thread_t thread, | |
508 | boolean_t until_not_runnable); | |
509 | ||
510 | ||
511 | /* | |
512 | * disconnect thread from ksancov dev | |
513 | */ | |
514 | static void | |
515 | ksancov_detach(ksancov_dev_t d) | |
516 | { | |
517 | if (d->thread == THREAD_NULL) { | |
518 | /* no thread attached */ | |
519 | return; | |
520 | } | |
521 | ||
522 | /* disconnect dev from thread */ | |
523 | ksancov_dev_t *devp = (void *)__sanitizer_get_thread_data(d->thread); | |
524 | if (*devp != NULL) { | |
525 | assert(*devp == d); | |
526 | os_atomic_store(devp, NULL, relaxed); | |
527 | } | |
528 | ||
529 | if (d->thread != current_thread()) { | |
530 | /* wait until it's safe to yank */ | |
531 | thread_wait(d->thread, TRUE); | |
532 | } | |
533 | ||
534 | /* drop our thread reference */ | |
535 | thread_deallocate(d->thread); | |
536 | d->thread = THREAD_NULL; | |
537 | } | |
538 | ||
539 | static int | |
540 | ksancov_close(dev_t dev, int flags, int devtype, proc_t p) | |
541 | { | |
542 | #pragma unused(flags,devtype,p) | |
543 | const int minor_num = minor(dev); | |
544 | ||
545 | lck_rw_lock_exclusive(ksancov_devs_lck); | |
546 | ksancov_dev_t d = ksancov_devs[minor_num]; | |
547 | ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */ | |
548 | lck_rw_unlock_exclusive(ksancov_devs_lck); | |
549 | ||
550 | /* | |
551 | * No need to lock d here as there is and will be no one having its | |
552 | * reference except for this thread and the one which is going to | |
553 | * be detached below. | |
554 | */ | |
555 | ||
556 | if (!d) { | |
557 | return ENXIO; | |
558 | } | |
559 | ||
560 | if (d->mode == KS_MODE_TRACE && d->trace) { | |
561 | os_atomic_sub(&active_devs, 1, relaxed); | |
562 | os_atomic_store(&d->trace->enabled, 0, relaxed); /* stop tracing */ | |
563 | } else if (d->mode == KS_MODE_COUNTERS && d->counters) { | |
564 | os_atomic_sub(&active_devs, 1, relaxed); | |
565 | os_atomic_store(&d->counters->enabled, 0, relaxed); /* stop tracing */ | |
566 | } | |
567 | ||
568 | ksancov_detach(d); | |
569 | free_dev(d); | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | static void | |
575 | ksancov_testpanic(volatile uint64_t guess) | |
576 | { | |
577 | const uint64_t tgt = 0xf85de3b12891c817UL; | |
578 | ||
579 | #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n)))) | |
580 | ||
581 | if (X(0)) { | |
582 | if (X(1)) { | |
583 | if (X(2)) { | |
584 | if (X(3)) { | |
585 | if (X(4)) { | |
586 | if (X(5)) { | |
587 | if (X(6)) { | |
588 | if (X(7)) { | |
589 | if (X(8)) { | |
590 | if (X(9)) { | |
591 | if (X(10)) { | |
592 | if (X(11)) { | |
593 | if (X(12)) { | |
594 | if (X(13)) { | |
595 | if (X(14)) { | |
596 | if (X(15)) { | |
597 | panic("ksancov: found test value\n"); | |
598 | } | |
599 | } | |
600 | } | |
601 | } | |
602 | } | |
603 | } | |
604 | } | |
605 | } | |
606 | } | |
607 | } | |
608 | } | |
609 | } | |
610 | } | |
611 | } | |
612 | } | |
613 | } | |
614 | } | |
615 | ||
616 | static int | |
617 | ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p) | |
618 | { | |
619 | #pragma unused(fflag,p) | |
620 | struct ksancov_buf_desc *mcmd; | |
621 | void *data = (void *)_data; | |
622 | ||
623 | lck_rw_lock_shared(ksancov_devs_lck); | |
624 | ksancov_dev_t d = ksancov_devs[minor(dev)]; | |
625 | if (!d) { | |
626 | lck_rw_unlock_shared(ksancov_devs_lck); | |
627 | return EINVAL; /* dev not open */ | |
628 | } | |
629 | ||
630 | int ret = 0; | |
631 | ||
632 | switch (cmd) { | |
633 | case KSANCOV_IOC_TRACE: | |
634 | lck_mtx_lock(&d->lock); | |
635 | ret = ksancov_trace_alloc(d, *(size_t *)data); | |
636 | lck_mtx_unlock(&d->lock); | |
637 | break; | |
638 | case KSANCOV_IOC_COUNTERS: | |
639 | lck_mtx_lock(&d->lock); | |
640 | ret = ksancov_counters_alloc(d); | |
641 | lck_mtx_unlock(&d->lock); | |
642 | break; | |
643 | case KSANCOV_IOC_MAP: | |
644 | mcmd = (struct ksancov_buf_desc *)data; | |
645 | lck_mtx_lock(&d->lock); | |
646 | ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz); | |
647 | lck_mtx_unlock(&d->lock); | |
648 | break; | |
649 | case KSANCOV_IOC_MAP_EDGEMAP: | |
650 | mcmd = (struct ksancov_buf_desc *)data; | |
651 | ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz); | |
652 | break; | |
653 | case KSANCOV_IOC_START: | |
654 | lck_mtx_lock(&d->lock); | |
655 | ret = ksancov_attach(d, current_thread()); | |
656 | lck_mtx_unlock(&d->lock); | |
657 | break; | |
658 | case KSANCOV_IOC_NEDGES: | |
659 | *(size_t *)data = nedges; | |
660 | break; | |
661 | case KSANCOV_IOC_TESTPANIC: | |
662 | ksancov_testpanic(*(uint64_t *)data); | |
663 | break; | |
664 | default: | |
665 | ret = EINVAL; | |
666 | break; | |
667 | } | |
668 | ||
669 | lck_rw_unlock_shared(ksancov_devs_lck); | |
670 | ||
671 | return ret; | |
672 | } | |
673 | ||
674 | static int | |
675 | ksancov_dev_clone(dev_t dev, int action) | |
676 | { | |
677 | #pragma unused(dev) | |
678 | if (action == DEVFS_CLONE_ALLOC) { | |
679 | for (int i = 0; i < KSANCOV_MAX_DEV; i++) { | |
680 | if (ksancov_devs[i] == NULL) { | |
681 | return i; | |
682 | } | |
683 | } | |
684 | } else if (action == DEVFS_CLONE_FREE) { | |
685 | return 0; | |
686 | } | |
687 | ||
688 | return -1; | |
689 | } | |
690 | ||
691 | static const struct cdevsw | |
692 | ksancov_cdev = { | |
693 | .d_open = ksancov_open, | |
694 | .d_close = ksancov_close, | |
695 | .d_ioctl = ksancov_ioctl, | |
696 | ||
697 | .d_read = eno_rdwrt, | |
698 | .d_write = eno_rdwrt, | |
699 | .d_stop = eno_stop, | |
700 | .d_reset = eno_reset, | |
701 | .d_select = eno_select, | |
702 | .d_mmap = eno_mmap, | |
703 | .d_strategy = eno_strat, | |
704 | .d_type = 0 | |
705 | }; | |
706 | ||
707 | int | |
708 | ksancov_init_dev(void) | |
709 | { | |
710 | dev_major = cdevsw_add(-1, &ksancov_cdev); | |
711 | if (dev_major < 0) { | |
712 | printf("ksancov: failed to allocate major device node\n"); | |
713 | return -1; | |
714 | } | |
715 | ||
716 | dev_t dev = makedev(dev_major, 0); | |
717 | void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, | |
718 | ksancov_dev_clone, KSANCOV_DEVNODE); | |
719 | if (!node) { | |
720 | printf("ksancov: failed to create device node\n"); | |
721 | return -1; | |
722 | } | |
723 | ||
724 | /* This could be moved to the first use of /dev/ksancov to save memory */ | |
725 | uintptr_t buf; | |
726 | size_t sz = sizeof(struct ksancov_edgemap) + KSANCOV_MAX_EDGES * sizeof(uint32_t); | |
727 | ||
728 | kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO); | |
729 | if (kr) { | |
730 | printf("ksancov: failed to allocate edge addr map\n"); | |
731 | return -1; | |
732 | } | |
733 | ||
734 | ksancov_edgemap = (void *)buf; | |
735 | ksancov_edgemap->magic = KSANCOV_EDGEMAP_MAGIC; | |
736 | ksancov_edgemap->nedges = (uint32_t)nedges; | |
737 | ksancov_edgemap->offset = KSANCOV_PC_OFFSET; | |
738 | ||
739 | ksancov_devs_lck = lck_rw_alloc_init(&ksancov_lck_grp, LCK_ATTR_NULL); | |
740 | ||
741 | return 0; | |
742 | } |