]> git.saurik.com Git - apple/xnu.git/blame_incremental - san/ksancov.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / san / ksancov.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <string.h>
30#include <stdbool.h>
31
32#include <kern/assert.h>
33#include <kern/cpu_data.h>
34#include <kern/locks.h>
35#include <kern/debug.h>
36#include <kern/kalloc.h>
37#include <kern/zalloc.h>
38#include <kern/task.h>
39#include <kern/thread.h>
40
41#include <vm/vm_kern.h>
42#include <vm/vm_protos.h>
43
44#include <mach/mach_vm.h>
45#include <mach/mach_types.h>
46#include <mach/mach_port.h>
47#include <mach/vm_map.h>
48#include <mach/vm_param.h>
49#include <mach/machine/vm_param.h>
50
51#include <sys/stat.h> /* dev_t */
52#include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
53#include <sys/conf.h> /* must come after sys/stat.h */
54
55#include <libkern/libkern.h>
56#include <os/atomic_private.h>
57#include <os/overflow.h>
58
59#include <san/ksancov.h>
60
61/* header mess... */
62struct uthread;
63typedef struct uthread * uthread_t;
64
65#include <sys/sysproto.h>
66#include <sys/queue.h>
67#include <sys/sysctl.h>
68
69#define USE_PC_TABLE 0
70#define KSANCOV_MAX_DEV 64
71#define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */
72
73enum {
74 KS_MODE_NONE,
75 KS_MODE_TRACE,
76 KS_MODE_COUNTERS,
77 KS_MODE_MAX
78};
79
80struct ksancov_dev {
81 unsigned mode;
82
83 union {
84 struct ksancov_trace *trace;
85 struct ksancov_counters *counters;
86 };
87 size_t sz; /* size of allocated trace/counters buffer */
88
89 size_t maxpcs;
90
91 thread_t thread;
92 dev_t dev;
93 lck_mtx_t lock;
94};
95typedef struct ksancov_dev * ksancov_dev_t;
96
97extern boolean_t ml_at_interrupt_context(void);
98extern boolean_t ml_get_interrupts_enabled(void);
99
100static void ksancov_detach(ksancov_dev_t);
101
102static int dev_major;
103static size_t nedges = 0;
104static uint32_t __unused npcs = 0;
105
106static _Atomic unsigned active_devs;
107
108static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp");
109static LCK_RW_DECLARE(ksancov_devs_lck, &ksancov_lck_grp);
110
111/* array of devices indexed by devnode minor */
112static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV];
113static struct ksancov_edgemap *ksancov_edgemap;
114
115
116static ksancov_dev_t
117create_dev(dev_t dev)
118{
119 ksancov_dev_t d = kalloc_tag(sizeof(struct ksancov_dev), VM_KERN_MEMORY_DIAG);
120 if (!d) {
121 return NULL;
122 }
123
124 d->mode = KS_MODE_NONE;
125 d->trace = NULL;
126 d->sz = 0;
127 d->maxpcs = KSANCOV_MAX_PCS;
128 d->dev = dev;
129 d->thread = THREAD_NULL;
130 lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL);
131
132 return d;
133}
134
135static void
136free_dev(ksancov_dev_t d)
137{
138 if (d->mode == KS_MODE_TRACE && d->trace) {
139 kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
140 } else if (d->mode == KS_MODE_COUNTERS && d->counters) {
141 kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
142 }
143 lck_mtx_destroy(&d->lock, &ksancov_lck_grp);
144 kfree(d, sizeof(struct ksancov_dev));
145}
146
147void
148__sanitizer_cov_trace_pc_indirect(void * __unused callee)
149{
150 return;
151}
152
153#define GUARD_SEEN (uint32_t)0x80000000
154#define GUARD_IDX_MASK (uint32_t)0x0fffffff
155
156static inline void __attribute__((always_inline))
157trace_pc_guard(uint32_t *guardp, void *caller)
158{
159 /* record the pc for this guard */
160 if (guardp) {
161 uint32_t gd = *guardp;
162 if (__improbable(gd && !(gd & GUARD_SEEN) && ksancov_edgemap)) {
163 size_t idx = gd & GUARD_IDX_MASK;
164 if (idx < ksancov_edgemap->nedges) {
165 ksancov_edgemap->addrs[idx] = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - KSANCOV_PC_OFFSET - 1);
166 *guardp |= GUARD_SEEN;
167 }
168 }
169 }
170
171 if (__probable(os_atomic_load(&active_devs, relaxed) == 0)) {
172 /* early exit when nothing is active */
173 return;
174 }
175
176 if (ml_at_interrupt_context()) {
177 return;
178 }
179
180 uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - KSANCOV_PC_OFFSET - 1);
181
182 thread_t th = current_thread();
183 if (__improbable(th == THREAD_NULL)) {
184 return;
185 }
186
187 ksancov_dev_t dev = *(ksancov_dev_t *)__sanitizer_get_thread_data(th);
188 if (__probable(dev == NULL)) {
189 return;
190 }
191
192 if (dev->mode == KS_MODE_TRACE) {
193 struct ksancov_trace *trace = dev->trace;
194 if (os_atomic_load(&trace->enabled, relaxed) == 0) {
195 return;
196 }
197
198 if (os_atomic_load(&trace->head, relaxed) >= dev->maxpcs) {
199 return; /* overflow */
200 }
201
202 uint32_t idx = os_atomic_inc_orig(&trace->head, relaxed);
203 if (__improbable(idx >= dev->maxpcs)) {
204 return;
205 }
206
207 trace->pcs[idx] = pc;
208 } else {
209 size_t idx = *guardp & GUARD_IDX_MASK;
210
211 struct ksancov_counters *counters = dev->counters;
212 if (os_atomic_load(&counters->enabled, relaxed) == 0) {
213 return;
214 }
215
216 /* saturating 8bit add */
217 if (counters->hits[idx] < KSANCOV_MAX_HITS) {
218 counters->hits[idx]++;
219 }
220 }
221}
222
223void __attribute__((noinline))
224__sanitizer_cov_trace_pc(void)
225{
226 trace_pc_guard(NULL, __builtin_return_address(0));
227}
228
229void __attribute__((noinline))
230__sanitizer_cov_trace_pc_guard(uint32_t *guardp)
231{
232 trace_pc_guard(guardp, __builtin_return_address(0));
233}
234
235void
236__sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
237{
238 /* assign a unique number to each guard */
239 for (; start != stop; start++) {
240 if (*start == 0) {
241 if (nedges < KSANCOV_MAX_EDGES) {
242 *start = (uint32_t)++nedges;
243 }
244 }
245 }
246}
247
248void
249__sanitizer_cov_pcs_init(uintptr_t *start, uintptr_t *stop)
250{
251#if USE_PC_TABLE
252 static const uintptr_t pc_table_seen_flag = 0x100;
253
254 for (; start < stop; start += 2) {
255 uintptr_t pc = start[0];
256 uintptr_t flags = start[1];
257
258 /*
259 * This function gets called multiple times on the same range, so mark the
260 * ones we've seen using unused bits in the flags field.
261 */
262 if (flags & pc_table_seen_flag) {
263 continue;
264 }
265
266 start[1] |= pc_table_seen_flag;
267 assert(npcs < KSANCOV_MAX_EDGES - 1);
268 edge_addrs[++npcs] = pc;
269 }
270#else
271 (void)start;
272 (void)stop;
273#endif
274}
275
276static void *
277ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
278{
279 kern_return_t kr;
280 mach_port_t mem_entry = MACH_PORT_NULL;
281 mach_vm_address_t user_addr = 0;
282 memory_object_size_t size = sz;
283
284 kr = mach_make_memory_entry_64(kernel_map,
285 &size,
286 (mach_vm_offset_t)base,
287 MAP_MEM_VM_SHARE | prot,
288 &mem_entry,
289 MACH_PORT_NULL);
290 if (kr != KERN_SUCCESS) {
291 return NULL;
292 }
293
294 kr = mach_vm_map_kernel(get_task_map(current_task()),
295 &user_addr,
296 size,
297 0,
298 VM_FLAGS_ANYWHERE,
299 VM_MAP_KERNEL_FLAGS_NONE,
300 VM_KERN_MEMORY_NONE,
301 mem_entry,
302 0,
303 FALSE,
304 prot,
305 prot,
306 VM_INHERIT_SHARE);
307
308 /*
309 * At this point, either vm_map() has taken a reference on the memory entry
310 * and we can release our local reference, or the map failed and the entry
311 * needs to be freed.
312 */
313 mach_memory_entry_port_release(mem_entry);
314
315 if (kr != KERN_SUCCESS) {
316 return NULL;
317 }
318
319 return (void *)user_addr;
320}
321
322/*
323 * map the sancov buffer into the current process
324 */
325static int
326ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
327{
328 uintptr_t addr;
329 size_t size = d->sz;
330
331 if (d->mode == KS_MODE_TRACE) {
332 if (!d->trace) {
333 return EINVAL;
334 }
335 addr = (uintptr_t)d->trace;
336 } else if (d->mode == KS_MODE_COUNTERS) {
337 if (!d->counters) {
338 return EINVAL;
339 }
340 addr = (uintptr_t)d->counters;
341 } else {
342 return EINVAL; /* not configured */
343 }
344
345 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
346 if (buf == NULL) {
347 return ENOMEM;
348 }
349
350 *bufp = (uintptr_t)buf;
351 *sizep = size;
352
353 return 0;
354}
355
356/*
357 * map the edge -> pc mapping as read-only
358 */
359static int
360ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep)
361{
362 uintptr_t addr = (uintptr_t)ksancov_edgemap;
363 size_t size = sizeof(struct ksancov_edgemap) + ksancov_edgemap->nedges * sizeof(uint32_t);
364
365 void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
366 if (buf == NULL) {
367 return ENOMEM;
368 }
369
370 *bufp = (uintptr_t)buf;
371 *sizep = size;
372 return 0;
373}
374
375/*
376 * Device node management
377 */
378
379static int
380ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
381{
382#pragma unused(flags,devtype,p)
383 const int minor_num = minor(dev);
384
385 if (minor_num >= KSANCOV_MAX_DEV) {
386 return EBUSY;
387 }
388
389 lck_rw_lock_exclusive(&ksancov_devs_lck);
390
391 if (ksancov_devs[minor_num]) {
392 lck_rw_unlock_exclusive(&ksancov_devs_lck);
393 return EBUSY;
394 }
395
396 ksancov_dev_t d = create_dev(dev);
397 if (!d) {
398 lck_rw_unlock_exclusive(&ksancov_devs_lck);
399 return ENOMEM;
400 }
401 ksancov_devs[minor_num] = d;
402
403 lck_rw_unlock_exclusive(&ksancov_devs_lck);
404
405 return 0;
406}
407
408static int
409ksancov_trace_alloc(ksancov_dev_t d, size_t maxpcs)
410{
411 if (d->mode != KS_MODE_NONE) {
412 return EBUSY; /* trace/counters already created */
413 }
414 assert(d->trace == NULL);
415
416 uintptr_t buf;
417 size_t sz;
418 if (os_mul_and_add_overflow(maxpcs, sizeof(uint32_t), sizeof(struct ksancov_trace), &sz)) {
419 return EINVAL;
420 }
421
422 /* allocate the shared memory buffer */
423 kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO);
424 if (kr != KERN_SUCCESS) {
425 return ENOMEM;
426 }
427
428 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
429 trace->magic = KSANCOV_TRACE_MAGIC;
430 trace->offset = KSANCOV_PC_OFFSET;
431 os_atomic_init(&trace->head, 0);
432 os_atomic_init(&trace->enabled, 0);
433 trace->maxpcs = (uint32_t)maxpcs;
434
435 d->trace = trace;
436 d->sz = sz;
437 d->maxpcs = maxpcs;
438 d->mode = KS_MODE_TRACE;
439
440 return 0;
441}
442
443static int
444ksancov_counters_alloc(ksancov_dev_t d)
445{
446 if (d->mode != KS_MODE_NONE) {
447 return EBUSY; /* trace/counters already created */
448 }
449 assert(d->counters == NULL);
450
451 uintptr_t buf;
452 size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->nedges * sizeof(uint8_t);
453
454 /* allocate the shared memory buffer */
455 kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO);
456 if (kr != KERN_SUCCESS) {
457 return ENOMEM;
458 }
459
460 struct ksancov_counters *counters = (struct ksancov_counters *)buf;
461 counters->magic = KSANCOV_COUNTERS_MAGIC;
462 counters->nedges = ksancov_edgemap->nedges;
463 os_atomic_init(&counters->enabled, 0);
464
465 d->counters = counters;
466 d->sz = sz;
467 d->mode = KS_MODE_COUNTERS;
468
469 return 0;
470}
471
472/*
473 * attach a thread to a ksancov dev instance
474 */
475static int
476ksancov_attach(ksancov_dev_t d, thread_t th)
477{
478 if (d->mode == KS_MODE_NONE) {
479 return EINVAL; /* not configured */
480 }
481
482 if (th != current_thread()) {
483 /* can only attach to self presently */
484 return EINVAL;
485 }
486
487 ksancov_dev_t *devp = (void *)__sanitizer_get_thread_data(th);
488 if (*devp) {
489 return EBUSY; /* one dev per thread */
490 }
491
492 if (d->thread != THREAD_NULL) {
493 ksancov_detach(d);
494 }
495
496 d->thread = th;
497 thread_reference(d->thread);
498
499 os_atomic_store(devp, d, relaxed);
500 os_atomic_add(&active_devs, 1, relaxed);
501
502 return 0;
503}
504
505extern void
506thread_wait(
507 thread_t thread,
508 boolean_t until_not_runnable);
509
510
511/*
512 * disconnect thread from ksancov dev
513 */
514static void
515ksancov_detach(ksancov_dev_t d)
516{
517 if (d->thread == THREAD_NULL) {
518 /* no thread attached */
519 return;
520 }
521
522 /* disconnect dev from thread */
523 ksancov_dev_t *devp = (void *)__sanitizer_get_thread_data(d->thread);
524 if (*devp != NULL) {
525 assert(*devp == d);
526 os_atomic_store(devp, NULL, relaxed);
527 }
528
529 if (d->thread != current_thread()) {
530 /* wait until it's safe to yank */
531 thread_wait(d->thread, TRUE);
532 }
533
534 assert(active_devs >= 1);
535 os_atomic_sub(&active_devs, 1, relaxed);
536
537 /* drop our thread reference */
538 thread_deallocate(d->thread);
539 d->thread = THREAD_NULL;
540}
541
542static int
543ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
544{
545#pragma unused(flags,devtype,p)
546 const int minor_num = minor(dev);
547
548 lck_rw_lock_exclusive(&ksancov_devs_lck);
549 ksancov_dev_t d = ksancov_devs[minor_num];
550 ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */
551 lck_rw_unlock_exclusive(&ksancov_devs_lck);
552
553 /*
554 * No need to lock d here as there is and will be no one having its
555 * reference except for this thread and the one which is going to
556 * be detached below.
557 */
558
559 if (!d) {
560 return ENXIO;
561 }
562
563 if (d->mode == KS_MODE_TRACE && d->trace) {
564 os_atomic_store(&d->trace->enabled, 0, relaxed); /* stop tracing */
565 } else if (d->mode == KS_MODE_COUNTERS && d->counters) {
566 os_atomic_store(&d->counters->enabled, 0, relaxed); /* stop tracing */
567 }
568
569 ksancov_detach(d);
570 free_dev(d);
571
572 return 0;
573}
574
575static void
576ksancov_testpanic(volatile uint64_t guess)
577{
578 const uint64_t tgt = 0xf85de3b12891c817UL;
579
580#define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
581
582 if (X(0)) {
583 if (X(1)) {
584 if (X(2)) {
585 if (X(3)) {
586 if (X(4)) {
587 if (X(5)) {
588 if (X(6)) {
589 if (X(7)) {
590 if (X(8)) {
591 if (X(9)) {
592 if (X(10)) {
593 if (X(11)) {
594 if (X(12)) {
595 if (X(13)) {
596 if (X(14)) {
597 if (X(15)) {
598 panic("ksancov: found test value\n");
599 }
600 }
601 }
602 }
603 }
604 }
605 }
606 }
607 }
608 }
609 }
610 }
611 }
612 }
613 }
614 }
615}
616
617static int
618ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
619{
620#pragma unused(fflag,p)
621 struct ksancov_buf_desc *mcmd;
622 void *data = (void *)_data;
623
624 lck_rw_lock_shared(&ksancov_devs_lck);
625 ksancov_dev_t d = ksancov_devs[minor(dev)];
626 if (!d) {
627 lck_rw_unlock_shared(&ksancov_devs_lck);
628 return EINVAL; /* dev not open */
629 }
630
631 int ret = 0;
632
633 switch (cmd) {
634 case KSANCOV_IOC_TRACE:
635 lck_mtx_lock(&d->lock);
636 ret = ksancov_trace_alloc(d, *(size_t *)data);
637 lck_mtx_unlock(&d->lock);
638 break;
639 case KSANCOV_IOC_COUNTERS:
640 lck_mtx_lock(&d->lock);
641 ret = ksancov_counters_alloc(d);
642 lck_mtx_unlock(&d->lock);
643 break;
644 case KSANCOV_IOC_MAP:
645 mcmd = (struct ksancov_buf_desc *)data;
646 lck_mtx_lock(&d->lock);
647 ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz);
648 lck_mtx_unlock(&d->lock);
649 break;
650 case KSANCOV_IOC_MAP_EDGEMAP:
651 mcmd = (struct ksancov_buf_desc *)data;
652 ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz);
653 break;
654 case KSANCOV_IOC_START:
655 lck_mtx_lock(&d->lock);
656 ret = ksancov_attach(d, current_thread());
657 lck_mtx_unlock(&d->lock);
658 break;
659 case KSANCOV_IOC_NEDGES:
660 *(size_t *)data = nedges;
661 break;
662 case KSANCOV_IOC_TESTPANIC:
663 ksancov_testpanic(*(uint64_t *)data);
664 break;
665 default:
666 ret = EINVAL;
667 break;
668 }
669
670 lck_rw_unlock_shared(&ksancov_devs_lck);
671
672 return ret;
673}
674
675static int
676ksancov_dev_clone(dev_t dev, int action)
677{
678#pragma unused(dev)
679 if (action == DEVFS_CLONE_ALLOC) {
680 for (int i = 0; i < KSANCOV_MAX_DEV; i++) {
681 if (ksancov_devs[i] == NULL) {
682 return i;
683 }
684 }
685 } else if (action == DEVFS_CLONE_FREE) {
686 return 0;
687 }
688
689 return -1;
690}
691
692static const struct cdevsw
693 ksancov_cdev = {
694 .d_open = ksancov_open,
695 .d_close = ksancov_close,
696 .d_ioctl = ksancov_ioctl,
697
698 .d_read = eno_rdwrt,
699 .d_write = eno_rdwrt,
700 .d_stop = eno_stop,
701 .d_reset = eno_reset,
702 .d_select = eno_select,
703 .d_mmap = eno_mmap,
704 .d_strategy = eno_strat,
705 .d_type = 0
706};
707
708int
709ksancov_init_dev(void)
710{
711 dev_major = cdevsw_add(-1, &ksancov_cdev);
712 if (dev_major < 0) {
713 printf("ksancov: failed to allocate major device node\n");
714 return -1;
715 }
716
717 dev_t dev = makedev(dev_major, 0);
718 void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
719 ksancov_dev_clone, KSANCOV_DEVNODE);
720 if (!node) {
721 printf("ksancov: failed to create device node\n");
722 return -1;
723 }
724
725 /* This could be moved to the first use of /dev/ksancov to save memory */
726 uintptr_t buf;
727 size_t sz = sizeof(struct ksancov_edgemap) + KSANCOV_MAX_EDGES * sizeof(uint32_t);
728
729 kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO);
730 if (kr) {
731 printf("ksancov: failed to allocate edge addr map\n");
732 return -1;
733 }
734
735 ksancov_edgemap = (void *)buf;
736 ksancov_edgemap->magic = KSANCOV_EDGEMAP_MAGIC;
737 ksancov_edgemap->nedges = (uint32_t)nedges;
738 ksancov_edgemap->offset = KSANCOV_PC_OFFSET;
739
740 return 0;
741}