]> git.saurik.com Git - apple/xnu.git/blame - san/ksancov.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / san / ksancov.c
CommitLineData
cb323159
A
1/*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <string.h>
30#include <stdbool.h>
31#include <stdatomic.h>
32
33#include <kern/assert.h>
34#include <kern/cpu_data.h>
35#include <kern/locks.h>
36#include <kern/debug.h>
37#include <kern/kalloc.h>
38#include <kern/zalloc.h>
39#include <kern/task.h>
40#include <kern/thread.h>
41
42#include <vm/vm_kern.h>
43#include <vm/vm_protos.h>
44
45#include <mach/mach_vm.h>
46#include <mach/mach_types.h>
47#include <mach/mach_port.h>
48#include <mach/vm_map.h>
49#include <mach/vm_param.h>
50#include <mach/machine/vm_param.h>
51#include <machine/atomic.h>
52
53#include <sys/stat.h> /* dev_t */
54#include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
55#include <sys/conf.h> /* must come after sys/stat.h */
56
57#include <libkern/libkern.h>
58#include <libkern/OSAtomic.h>
59#include <os/overflow.h>
60
61#include <san/ksancov.h>
62
63/* header mess... */
64struct uthread;
65typedef struct uthread * uthread_t;
66
67#include <sys/sysproto.h>
68#include <sys/queue.h>
69#include <sys/sysctl.h>
70
71#define USE_PC_TABLE 0
72#define KSANCOV_MAX_DEV 64
73
74extern boolean_t ml_at_interrupt_context(void);
75extern boolean_t ml_get_interrupts_enabled(void);
76
77static int ksancov_detach(dev_t dev);
78
79static int dev_major;
80static size_t nedges = 0;
81static uint32_t __unused npcs = 0;
82
83static _Atomic unsigned active_devs;
84
85enum {
86 KS_MODE_NONE,
87 KS_MODE_TRACE,
88 KS_MODE_COUNTERS,
89 KS_MODE_MAX
90};
91
92struct ksancov_dev {
93 unsigned mode;
94
95 union {
96 struct ksancov_trace *trace;
97 struct ksancov_counters *counters;
98 };
99 size_t sz; /* size of allocated trace/counters buffer */
100
101 size_t maxpcs;
102
103 thread_t thread;
104 dev_t dev;
105};
106
107/* array of devices indexed by devnode minor */
108static struct ksancov_dev *ksancov_devs[KSANCOV_MAX_DEV];
109
110static struct ksancov_edgemap *ksancov_edgemap;
111
112static inline struct ksancov_dev *
113get_dev(dev_t dev)
114{
115 int mn = minor(dev);
116 return ksancov_devs[mn];
117}
118
119void
120__sanitizer_cov_trace_pc_indirect(void * __unused callee)
121{
122 return;
123}
124
125#define GUARD_SEEN (uint32_t)0x80000000
126#define GUARD_IDX_MASK (uint32_t)0x0fffffff
127
128static inline void __attribute__((always_inline))
129trace_pc_guard(uint32_t *guardp, void *caller)
130{
131 /* record the pc for this guard */
132 if (guardp) {
133 uint32_t gd = *guardp;
134 if (__improbable(gd && !(gd & GUARD_SEEN) && ksancov_edgemap)) {
135 size_t idx = gd & GUARD_IDX_MASK;
136 if (idx < ksancov_edgemap->nedges) {
137 ksancov_edgemap->addrs[idx] = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1);
138 *guardp |= GUARD_SEEN;
139 }
140 }
141 }
142
143 if (__probable(os_atomic_load(&active_devs, relaxed) == 0)) {
144 /* early exit when nothing is active */
145 return;
146 }
147
148 if (ml_at_interrupt_context()) {
149 return;
150 }
151
152 uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1);
153
154 thread_t th = current_thread();
155 if (__improbable(th == THREAD_NULL)) {
156 return;
157 }
158
159 struct ksancov_dev *dev = *(struct ksancov_dev **)__sanitizer_get_thread_data(th);
160 if (__probable(dev == NULL)) {
161 return;
162 }
163
164 if (dev->mode == KS_MODE_TRACE) {
165 struct ksancov_trace *trace = dev->trace;
166 if (os_atomic_load(&trace->enabled, relaxed) == 0) {
167 return;
168 }
169
170 if (os_atomic_load(&trace->head, relaxed) >= dev->maxpcs) {
171 return; /* overflow */
172 }
173
174 uint32_t idx = os_atomic_inc_orig(&trace->head, relaxed);
175 if (__improbable(idx >= dev->maxpcs)) {
176 return;
177 }
178
179 trace->pcs[idx] = pc;
180 } else {
181 size_t idx = *guardp & GUARD_IDX_MASK;
182
183 struct ksancov_counters *counters = dev->counters;
184 if (os_atomic_load(&counters->enabled, relaxed) == 0) {
185 return;
186 }
187
188 /* saturating 8bit add */
189 if (counters->hits[idx] < KSANCOV_MAX_HITS) {
190 counters->hits[idx]++;
191 }
192 }
193}
194
195void __attribute__((noinline))
196__sanitizer_cov_trace_pc(void)
197{
198 trace_pc_guard(NULL, __builtin_return_address(0));
199}
200
201void __attribute__((noinline))
202__sanitizer_cov_trace_pc_guard(uint32_t *guardp)
203{
204 trace_pc_guard(guardp, __builtin_return_address(0));
205}
206
207void
208__sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
209{
210 /* assign a unique number to each guard */
211 for (; start != stop; start++) {
212 if (*start == 0) {
213 if (nedges < KSANCOV_MAX_EDGES) {
214 *start = ++nedges;
215 }
216 }
217 }
218}
219
220void
221__sanitizer_cov_pcs_init(uintptr_t *start, uintptr_t *stop)
222{
223#if USE_PC_TABLE
224 static const uintptr_t pc_table_seen_flag = 0x100;
225
226 for (; start < stop; start += 2) {
227 uintptr_t pc = start[0];
228 uintptr_t flags = start[1];
229
230 /*
231 * This function gets called multiple times on the same range, so mark the
232 * ones we've seen using unused bits in the flags field.
233 */
234 if (flags & pc_table_seen_flag) {
235 continue;
236 }
237
238 start[1] |= pc_table_seen_flag;
239 assert(npcs < KSANCOV_MAX_EDGES - 1);
240 edge_addrs[++npcs] = pc;
241 }
242#else
243 (void)start;
244 (void)stop;
245#endif
246}
247
248static void *
249ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
250{
251 kern_return_t kr;
252 mach_port_t mem_entry = MACH_PORT_NULL;
253 mach_vm_address_t user_addr = 0;
254 memory_object_size_t size = sz;
255
256 kr = mach_make_memory_entry_64(kernel_map,
257 &size,
258 (mach_vm_offset_t)base,
259 MAP_MEM_VM_SHARE | prot,
260 &mem_entry,
261 MACH_PORT_NULL);
262 if (kr != KERN_SUCCESS) {
263 return NULL;
264 }
265
266 kr = mach_vm_map_kernel(get_task_map(current_task()),
267 &user_addr,
268 size,
269 0,
270 VM_FLAGS_ANYWHERE,
271 VM_MAP_KERNEL_FLAGS_NONE,
272 VM_KERN_MEMORY_NONE,
273 mem_entry,
274 0,
275 FALSE,
276 prot,
277 prot,
278 VM_INHERIT_SHARE);
279
280 /*
281 * At this point, either vm_map() has taken a reference on the memory entry
282 * and we can release our local reference, or the map failed and the entry
283 * needs to be freed.
284 */
285 mach_memory_entry_port_release(mem_entry);
286
287 if (kr != KERN_SUCCESS) {
288 return NULL;
289 }
290
291 return (void *)user_addr;
292}
293
294/*
295 * map the sancov buffer into the current process
296 */
297static int
298ksancov_map(dev_t dev, void **bufp, size_t *sizep)
299{
300 struct ksancov_dev *d = get_dev(dev);
301 if (!d) {
302 return EINVAL;
303 }
304
305 uintptr_t addr;
306 size_t size = d->sz;
307
308 if (d->mode == KS_MODE_TRACE) {
309 if (!d->trace) {
310 return EINVAL;
311 }
312 addr = (uintptr_t)d->trace;
313 } else if (d->mode == KS_MODE_COUNTERS) {
314 if (!d->counters) {
315 return EINVAL;
316 }
317 addr = (uintptr_t)d->counters;
318 } else {
319 return EINVAL; /* not configured */
320 }
321
322 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
323 if (buf == NULL) {
324 return ENOMEM;
325 }
326
327 *bufp = buf;
328 *sizep = size;
329 return 0;
330}
331
332/*
333 * map the edge -> pc mapping as read-only
334 */
335static int
336ksancov_map_edgemap(dev_t dev, void **bufp, size_t *sizep)
337{
338 struct ksancov_dev *d = get_dev(dev);
339 if (!d) {
340 return EINVAL;
341 }
342
343 uintptr_t addr = (uintptr_t)ksancov_edgemap;
344 size_t size = sizeof(struct ksancov_edgemap) + ksancov_edgemap->nedges * sizeof(uint32_t);
345
346 void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
347 if (buf == NULL) {
348 return ENOMEM;
349 }
350
351 *bufp = buf;
352 *sizep = size;
353 return 0;
354}
355
356
357/*
358 * Device node management
359 */
360
361static int
362ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
363{
364#pragma unused(flags,devtype,p)
365 if (minor(dev) >= KSANCOV_MAX_DEV) {
366 return EBUSY;
367 }
368
369 /* allocate a device entry */
370 struct ksancov_dev *d = kalloc_tag(sizeof(struct ksancov_dev), VM_KERN_MEMORY_DIAG);
371 if (!d) {
372 return ENOMEM;
373 }
374
375 d->mode = KS_MODE_NONE;
376 d->trace = NULL;
377 d->maxpcs = 1024U * 64; /* default to 256k buffer => 64k pcs */
378 d->dev = dev;
379 d->thread = THREAD_NULL;
380
381 ksancov_devs[minor(dev)] = d;
382
383 return 0;
384}
385
386static int
387ksancov_trace_alloc(dev_t dev, size_t maxpcs)
388{
389 struct ksancov_dev *d = get_dev(dev);
390 if (!d) {
391 return EINVAL;
392 }
393
394 if (d->mode != KS_MODE_NONE) {
395 return EBUSY; /* trace/counters already created */
396 }
397 assert(d->trace == NULL);
398
399 uintptr_t buf;
400 size_t sz;
401 if (os_mul_and_add_overflow(maxpcs, sizeof(uint32_t), sizeof(struct ksancov_trace), &sz)) {
402 return EINVAL;
403 }
404
405 /* allocate the shared memory buffer */
406 kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO);
407 if (kr != KERN_SUCCESS) {
408 return ENOMEM;
409 }
410
411 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
412 trace->magic = KSANCOV_TRACE_MAGIC;
413 trace->offset = VM_MIN_KERNEL_ADDRESS;
414 trace->head = 0;
415 trace->enabled = 0;
416 trace->maxpcs = maxpcs;
417
418 d->trace = trace;
419 d->sz = sz;
420 d->maxpcs = maxpcs;
421 d->mode = KS_MODE_TRACE;
422
423 return 0;
424}
425
426static int
427ksancov_counters_alloc(dev_t dev)
428{
429 struct ksancov_dev *d = get_dev(dev);
430 if (!d) {
431 return EINVAL;
432 }
433
434 if (d->mode != KS_MODE_NONE) {
435 return EBUSY; /* trace/counters already created */
436 }
437 assert(d->counters == NULL);
438
439 uintptr_t buf;
440 size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->nedges * sizeof(uint8_t);
441
442 /* allocate the shared memory buffer */
443 kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO);
444 if (kr != KERN_SUCCESS) {
445 return ENOMEM;
446 }
447
448 struct ksancov_counters *counters = (struct ksancov_counters *)buf;
449 counters->magic = KSANCOV_COUNTERS_MAGIC;
450 counters->nedges = ksancov_edgemap->nedges;
451 counters->enabled = 0;
452
453 d->counters = counters;
454 d->sz = sz;
455 d->mode = KS_MODE_COUNTERS;
456
457 return 0;
458}
459
460/*
461 * attach a thread to a ksancov dev instance
462 */
463static int
464ksancov_attach(dev_t dev, thread_t th)
465{
466 struct ksancov_dev *d = get_dev(dev);
467 if (!d) {
468 return EINVAL;
469 }
470
471 if (d->thread != THREAD_NULL) {
472 int ret = ksancov_detach(dev);
473 if (ret) {
474 return ret;
475 }
476 }
477
478 if (th != current_thread()) {
479 /* can only attach to self presently */
480 return EINVAL;
481 }
482
483 struct ksancov_dev **devp = (void *)__sanitizer_get_thread_data(th);
484 if (*devp) {
485 return EBUSY; /* one dev per thread */
486 }
487
488 d->thread = th;
489 thread_reference(d->thread);
490
491 os_atomic_store(devp, d, relaxed);
492 os_atomic_add(&active_devs, 1, relaxed);
493
494 return 0;
495}
496
497extern void
498thread_wait(
499 thread_t thread,
500 boolean_t until_not_runnable);
501
502
503/*
504 * disconnect thread from ksancov dev
505 */
506static int
507ksancov_detach(dev_t dev)
508{
509 struct ksancov_dev *d = get_dev(dev);
510 if (!d) {
511 return EINVAL;
512 }
513
514 if (d->thread == THREAD_NULL) {
515 /* no thread attached */
516 return 0;
517 }
518
519 /* disconnect dev from thread */
520 struct ksancov_dev **devp = (void *)__sanitizer_get_thread_data(d->thread);
521 if (*devp != NULL) {
522 assert(*devp == d);
523 os_atomic_store(devp, NULL, relaxed);
524 }
525
526 if (d->thread != current_thread()) {
527 /* wait until it's safe to yank */
528 thread_wait(d->thread, TRUE);
529 }
530
531 /* drop our thread reference */
532 thread_deallocate(d->thread);
533 d->thread = THREAD_NULL;
534
535 return 0;
536}
537
538static int
539ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
540{
541#pragma unused(flags,devtype,p)
542 struct ksancov_dev *d = get_dev(dev);
543 if (!d) {
544 return EINVAL;
545 }
546
547 if (d->mode == KS_MODE_TRACE) {
548 struct ksancov_trace *trace = d->trace;
549 if (trace) {
550 /* trace allocated - delete it */
551
552 os_atomic_sub(&active_devs, 1, relaxed);
553 os_atomic_store(&trace->enabled, 0, relaxed); /* stop tracing */
554
555 ksancov_detach(dev);
556
557 /* free trace */
558 kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
559 d->trace = NULL;
560 d->sz = 0;
561 }
562 } else if (d->mode == KS_MODE_COUNTERS) {
563 struct ksancov_counters *counters = d->counters;
564 if (counters) {
565 os_atomic_sub(&active_devs, 1, relaxed);
566 os_atomic_store(&counters->enabled, 0, relaxed); /* stop tracing */
567
568 ksancov_detach(dev);
569
570 /* free counters */
571 kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
572 d->counters = NULL;
573 d->sz = 0;
574 }
575 }
576
577 ksancov_devs[minor(dev)] = NULL; /* dev no longer discoverable */
578
579 /* free the ksancov device instance */
580 kfree(d, sizeof(struct ksancov_dev));
581
582 return 0;
583}
584
585static void
586ksancov_testpanic(volatile uint64_t guess)
587{
588 const uint64_t tgt = 0xf85de3b12891c817UL;
589
590#define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
591
592 if (X(0)) {
593 if (X(1)) {
594 if (X(2)) {
595 if (X(3)) {
596 if (X(4)) {
597 if (X(5)) {
598 if (X(6)) {
599 if (X(7)) {
600 if (X(8)) {
601 if (X(9)) {
602 if (X(10)) {
603 if (X(11)) {
604 if (X(12)) {
605 if (X(13)) {
606 if (X(14)) {
607 if (X(15)) {
608 panic("ksancov: found test value\n");
609 }
610 }
611 }
612 }
613 }
614 }
615 }
616 }
617 }
618 }
619 }
620 }
621 }
622 }
623 }
624 }
625}
626
627static int
628ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
629{
630#pragma unused(fflag,p)
631 int ret = 0;
632 void *data = (void *)_data;
633
634 struct ksancov_dev *d = get_dev(dev);
635 if (!d) {
636 return EINVAL; /* dev not open */
637 }
638
639 if (cmd == KSANCOV_IOC_TRACE) {
640 size_t maxpcs = *(size_t *)data;
641 ret = ksancov_trace_alloc(dev, maxpcs);
642 if (ret) {
643 return ret;
644 }
645 } else if (cmd == KSANCOV_IOC_COUNTERS) {
646 ret = ksancov_counters_alloc(dev);
647 if (ret) {
648 return ret;
649 }
650 } else if (cmd == KSANCOV_IOC_MAP) {
651 struct ksancov_buf_desc *mcmd = (struct ksancov_buf_desc *)data;
652
653 if (d->mode == KS_MODE_NONE) {
654 return EINVAL; /* mode not configured */
655 }
656
657 /* map buffer into the userspace VA space */
658 void *buf;
659 size_t size;
660 ret = ksancov_map(dev, &buf, &size);
661 if (ret) {
662 return ret;
663 }
664
665 mcmd->ptr = (uintptr_t)buf;
666 mcmd->sz = size;
667 } else if (cmd == KSANCOV_IOC_MAP_EDGEMAP) {
668 struct ksancov_buf_desc *mcmd = (struct ksancov_buf_desc *)data;
669
670 /* map buffer into the userspace VA space */
671 void *buf;
672 size_t size;
673 ret = ksancov_map_edgemap(dev, &buf, &size);
674 if (ret) {
675 return ret;
676 }
677
678 mcmd->ptr = (uintptr_t)buf;
679 mcmd->sz = size;
680 } else if (cmd == KSANCOV_IOC_START) {
681 if (d->mode == KS_MODE_NONE) {
682 return EINVAL; /* not configured */
683 }
684
685 ret = ksancov_attach(dev, current_thread());
686 if (ret) {
687 return ret;
688 }
689 } else if (cmd == KSANCOV_IOC_NEDGES) {
690 size_t *nptr = (size_t *)data;
691 *nptr = nedges;
692 } else if (cmd == KSANCOV_IOC_TESTPANIC) {
693 uint64_t guess = *(uint64_t *)data;
694 ksancov_testpanic(guess);
695 } else {
696 /* unknown ioctl */
697 return ENODEV;
698 }
699
700 return ret;
701}
702
703static int
704ksancov_dev_clone(dev_t dev, int action)
705{
706#pragma unused(dev)
707 if (action == DEVFS_CLONE_ALLOC) {
708 for (size_t i = 0; i < KSANCOV_MAX_DEV; i++) {
709 if (ksancov_devs[i] == NULL) {
710 return i;
711 }
712 }
713 } else if (action == DEVFS_CLONE_FREE) {
714 return 0;
715 }
716
717 return -1;
718}
719
720static struct cdevsw
721 ksancov_cdev = {
722 .d_open = ksancov_open,
723 .d_close = ksancov_close,
724 .d_ioctl = ksancov_ioctl,
725
726 .d_read = eno_rdwrt,
727 .d_write = eno_rdwrt,
728 .d_stop = eno_stop,
729 .d_reset = eno_reset,
730 .d_select = eno_select,
731 .d_mmap = eno_mmap,
732 .d_strategy = eno_strat,
733 .d_type = 0
734};
735
736int
737ksancov_init_dev(void)
738{
739 dev_major = cdevsw_add(-1, &ksancov_cdev);
740 if (dev_major < 0) {
741 printf("ksancov: failed to allocate major device node\n");
742 return -1;
743 }
744
745 dev_t dev = makedev(dev_major, 0);
746 void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
747 ksancov_dev_clone, KSANCOV_DEVNODE);
748 if (!node) {
749 printf("ksancov: failed to create device node\n");
750 return -1;
751 }
752
753 /* This could be moved to the first use of /dev/ksancov to save memory */
754 uintptr_t buf;
755 size_t sz = sizeof(struct ksancov_edgemap) + KSANCOV_MAX_EDGES * sizeof(uint32_t);
756
757 kern_return_t kr = kmem_alloc_flags(kernel_map, &buf, sz, VM_KERN_MEMORY_DIAG, KMA_ZERO);
758 if (kr) {
759 printf("ksancov: failed to allocate edge addr map\n");
760 return -1;
761 }
762
763 ksancov_edgemap = (void *)buf;
764 ksancov_edgemap->magic = KSANCOV_EDGEMAP_MAGIC;
765 ksancov_edgemap->nedges = nedges;
766 ksancov_edgemap->offset = VM_MIN_KERNEL_ADDRESS;
767
768 return 0;
769}