2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <stdatomic.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/locks.h>
36 #include <kern/debug.h>
37 #include <kern/kalloc.h>
38 #include <kern/zalloc.h>
39 #include <kern/task.h>
40 #include <kern/thread.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_protos.h>
45 #include <mach/mach_vm.h>
46 #include <mach/mach_types.h>
47 #include <mach/mach_port.h>
48 #include <mach/vm_map.h>
49 #include <mach/vm_param.h>
50 #include <mach/machine/vm_param.h>
51 #include <machine/atomic.h>
53 #include <sys/stat.h> /* dev_t */
54 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
55 #include <sys/conf.h> /* must come after sys/stat.h */
57 #include <libkern/libkern.h>
58 #include <libkern/OSAtomic.h>
59 #include <os/overflow.h>
61 #include <san/ksancov.h>
65 typedef struct uthread
* uthread_t
;
67 #include <sys/sysproto.h>
68 #include <sys/queue.h>
69 #include <sys/sysctl.h>
71 #define USE_PC_TABLE 0
72 #define KSANCOV_MAX_DEV 64
74 extern boolean_t
ml_at_interrupt_context(void);
75 extern boolean_t
ml_get_interrupts_enabled(void);
77 static int ksancov_detach(dev_t dev
);
80 static size_t nedges
= 0;
81 static uint32_t __unused npcs
= 0;
83 static _Atomic
unsigned active_devs
;
96 struct ksancov_trace
*trace
;
97 struct ksancov_counters
*counters
;
99 size_t sz
; /* size of allocated trace/counters buffer */
107 /* array of devices indexed by devnode minor */
108 static struct ksancov_dev
*ksancov_devs
[KSANCOV_MAX_DEV
];
110 static struct ksancov_edgemap
*ksancov_edgemap
;
112 static inline struct ksancov_dev
*
116 return ksancov_devs
[mn
];
120 __sanitizer_cov_trace_pc_indirect(void * __unused callee
)
125 #define GUARD_SEEN (uint32_t)0x80000000
126 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
128 static inline void __attribute__((always_inline
))
129 trace_pc_guard(uint32_t *guardp
, void *caller
)
131 /* record the pc for this guard */
133 uint32_t gd
= *guardp
;
134 if (__improbable(gd
&& !(gd
& GUARD_SEEN
) && ksancov_edgemap
)) {
135 size_t idx
= gd
& GUARD_IDX_MASK
;
136 if (idx
< ksancov_edgemap
->nedges
) {
137 ksancov_edgemap
->addrs
[idx
] = (uint32_t)(VM_KERNEL_UNSLIDE(caller
) - VM_MIN_KERNEL_ADDRESS
- 1);
138 *guardp
|= GUARD_SEEN
;
143 if (__probable(os_atomic_load(&active_devs
, relaxed
) == 0)) {
144 /* early exit when nothing is active */
148 if (ml_at_interrupt_context()) {
152 uint32_t pc
= (uint32_t)(VM_KERNEL_UNSLIDE(caller
) - VM_MIN_KERNEL_ADDRESS
- 1);
154 thread_t th
= current_thread();
155 if (__improbable(th
== THREAD_NULL
)) {
159 struct ksancov_dev
*dev
= *(struct ksancov_dev
**)__sanitizer_get_thread_data(th
);
160 if (__probable(dev
== NULL
)) {
164 if (dev
->mode
== KS_MODE_TRACE
) {
165 struct ksancov_trace
*trace
= dev
->trace
;
166 if (os_atomic_load(&trace
->enabled
, relaxed
) == 0) {
170 if (os_atomic_load(&trace
->head
, relaxed
) >= dev
->maxpcs
) {
171 return; /* overflow */
174 uint32_t idx
= os_atomic_inc_orig(&trace
->head
, relaxed
);
175 if (__improbable(idx
>= dev
->maxpcs
)) {
179 trace
->pcs
[idx
] = pc
;
181 size_t idx
= *guardp
& GUARD_IDX_MASK
;
183 struct ksancov_counters
*counters
= dev
->counters
;
184 if (os_atomic_load(&counters
->enabled
, relaxed
) == 0) {
188 /* saturating 8bit add */
189 if (counters
->hits
[idx
] < KSANCOV_MAX_HITS
) {
190 counters
->hits
[idx
]++;
195 void __attribute__((noinline
))
196 __sanitizer_cov_trace_pc(void)
198 trace_pc_guard(NULL
, __builtin_return_address(0));
201 void __attribute__((noinline
))
202 __sanitizer_cov_trace_pc_guard(uint32_t *guardp
)
204 trace_pc_guard(guardp
, __builtin_return_address(0));
208 __sanitizer_cov_trace_pc_guard_init(uint32_t *start
, uint32_t *stop
)
210 /* assign a unique number to each guard */
211 for (; start
!= stop
; start
++) {
213 if (nedges
< KSANCOV_MAX_EDGES
) {
221 __sanitizer_cov_pcs_init(uintptr_t *start
, uintptr_t *stop
)
224 static const uintptr_t pc_table_seen_flag
= 0x100;
226 for (; start
< stop
; start
+= 2) {
227 uintptr_t pc
= start
[0];
228 uintptr_t flags
= start
[1];
231 * This function gets called multiple times on the same range, so mark the
232 * ones we've seen using unused bits in the flags field.
234 if (flags
& pc_table_seen_flag
) {
238 start
[1] |= pc_table_seen_flag
;
239 assert(npcs
< KSANCOV_MAX_EDGES
- 1);
240 edge_addrs
[++npcs
] = pc
;
249 ksancov_do_map(uintptr_t base
, size_t sz
, vm_prot_t prot
)
252 mach_port_t mem_entry
= MACH_PORT_NULL
;
253 mach_vm_address_t user_addr
= 0;
254 memory_object_size_t size
= sz
;
256 kr
= mach_make_memory_entry_64(kernel_map
,
258 (mach_vm_offset_t
)base
,
259 MAP_MEM_VM_SHARE
| prot
,
262 if (kr
!= KERN_SUCCESS
) {
266 kr
= mach_vm_map_kernel(get_task_map(current_task()),
271 VM_MAP_KERNEL_FLAGS_NONE
,
281 * At this point, either vm_map() has taken a reference on the memory entry
282 * and we can release our local reference, or the map failed and the entry
285 mach_memory_entry_port_release(mem_entry
);
287 if (kr
!= KERN_SUCCESS
) {
291 return (void *)user_addr
;
295 * map the sancov buffer into the current process
298 ksancov_map(dev_t dev
, void **bufp
, size_t *sizep
)
300 struct ksancov_dev
*d
= get_dev(dev
);
308 if (d
->mode
== KS_MODE_TRACE
) {
312 addr
= (uintptr_t)d
->trace
;
313 } else if (d
->mode
== KS_MODE_COUNTERS
) {
317 addr
= (uintptr_t)d
->counters
;
319 return EINVAL
; /* not configured */
322 void *buf
= ksancov_do_map(addr
, size
, VM_PROT_READ
| VM_PROT_WRITE
);
333 * map the edge -> pc mapping as read-only
336 ksancov_map_edgemap(dev_t dev
, void **bufp
, size_t *sizep
)
338 struct ksancov_dev
*d
= get_dev(dev
);
343 uintptr_t addr
= (uintptr_t)ksancov_edgemap
;
344 size_t size
= sizeof(struct ksancov_edgemap
) + ksancov_edgemap
->nedges
* sizeof(uint32_t);
346 void *buf
= ksancov_do_map(addr
, size
, VM_PROT_READ
);
358 * Device node management
362 ksancov_open(dev_t dev
, int flags
, int devtype
, proc_t p
)
364 #pragma unused(flags,devtype,p)
365 if (minor(dev
) >= KSANCOV_MAX_DEV
) {
369 /* allocate a device entry */
370 struct ksancov_dev
*d
= kalloc_tag(sizeof(struct ksancov_dev
), VM_KERN_MEMORY_DIAG
);
375 d
->mode
= KS_MODE_NONE
;
377 d
->maxpcs
= 1024U * 64; /* default to 256k buffer => 64k pcs */
379 d
->thread
= THREAD_NULL
;
381 ksancov_devs
[minor(dev
)] = d
;
387 ksancov_trace_alloc(dev_t dev
, size_t maxpcs
)
389 struct ksancov_dev
*d
= get_dev(dev
);
394 if (d
->mode
!= KS_MODE_NONE
) {
395 return EBUSY
; /* trace/counters already created */
397 assert(d
->trace
== NULL
);
401 if (os_mul_and_add_overflow(maxpcs
, sizeof(uint32_t), sizeof(struct ksancov_trace
), &sz
)) {
405 /* allocate the shared memory buffer */
406 kern_return_t kr
= kmem_alloc_flags(kernel_map
, &buf
, sz
, VM_KERN_MEMORY_DIAG
, KMA_ZERO
);
407 if (kr
!= KERN_SUCCESS
) {
411 struct ksancov_trace
*trace
= (struct ksancov_trace
*)buf
;
412 trace
->magic
= KSANCOV_TRACE_MAGIC
;
413 trace
->offset
= VM_MIN_KERNEL_ADDRESS
;
416 trace
->maxpcs
= maxpcs
;
421 d
->mode
= KS_MODE_TRACE
;
427 ksancov_counters_alloc(dev_t dev
)
429 struct ksancov_dev
*d
= get_dev(dev
);
434 if (d
->mode
!= KS_MODE_NONE
) {
435 return EBUSY
; /* trace/counters already created */
437 assert(d
->counters
== NULL
);
440 size_t sz
= sizeof(struct ksancov_counters
) + ksancov_edgemap
->nedges
* sizeof(uint8_t);
442 /* allocate the shared memory buffer */
443 kern_return_t kr
= kmem_alloc_flags(kernel_map
, &buf
, sz
, VM_KERN_MEMORY_DIAG
, KMA_ZERO
);
444 if (kr
!= KERN_SUCCESS
) {
448 struct ksancov_counters
*counters
= (struct ksancov_counters
*)buf
;
449 counters
->magic
= KSANCOV_COUNTERS_MAGIC
;
450 counters
->nedges
= ksancov_edgemap
->nedges
;
451 counters
->enabled
= 0;
453 d
->counters
= counters
;
455 d
->mode
= KS_MODE_COUNTERS
;
461 * attach a thread to a ksancov dev instance
464 ksancov_attach(dev_t dev
, thread_t th
)
466 struct ksancov_dev
*d
= get_dev(dev
);
471 if (d
->thread
!= THREAD_NULL
) {
472 int ret
= ksancov_detach(dev
);
478 if (th
!= current_thread()) {
479 /* can only attach to self presently */
483 struct ksancov_dev
**devp
= (void *)__sanitizer_get_thread_data(th
);
485 return EBUSY
; /* one dev per thread */
489 thread_reference(d
->thread
);
491 os_atomic_store(devp
, d
, relaxed
);
492 os_atomic_add(&active_devs
, 1, relaxed
);
500 boolean_t until_not_runnable
);
504 * disconnect thread from ksancov dev
507 ksancov_detach(dev_t dev
)
509 struct ksancov_dev
*d
= get_dev(dev
);
514 if (d
->thread
== THREAD_NULL
) {
515 /* no thread attached */
519 /* disconnect dev from thread */
520 struct ksancov_dev
**devp
= (void *)__sanitizer_get_thread_data(d
->thread
);
523 os_atomic_store(devp
, NULL
, relaxed
);
526 if (d
->thread
!= current_thread()) {
527 /* wait until it's safe to yank */
528 thread_wait(d
->thread
, TRUE
);
531 /* drop our thread reference */
532 thread_deallocate(d
->thread
);
533 d
->thread
= THREAD_NULL
;
539 ksancov_close(dev_t dev
, int flags
, int devtype
, proc_t p
)
541 #pragma unused(flags,devtype,p)
542 struct ksancov_dev
*d
= get_dev(dev
);
547 if (d
->mode
== KS_MODE_TRACE
) {
548 struct ksancov_trace
*trace
= d
->trace
;
550 /* trace allocated - delete it */
552 os_atomic_sub(&active_devs
, 1, relaxed
);
553 os_atomic_store(&trace
->enabled
, 0, relaxed
); /* stop tracing */
558 kmem_free(kernel_map
, (uintptr_t)d
->trace
, d
->sz
);
562 } else if (d
->mode
== KS_MODE_COUNTERS
) {
563 struct ksancov_counters
*counters
= d
->counters
;
565 os_atomic_sub(&active_devs
, 1, relaxed
);
566 os_atomic_store(&counters
->enabled
, 0, relaxed
); /* stop tracing */
571 kmem_free(kernel_map
, (uintptr_t)d
->counters
, d
->sz
);
577 ksancov_devs
[minor(dev
)] = NULL
; /* dev no longer discoverable */
579 /* free the ksancov device instance */
580 kfree(d
, sizeof(struct ksancov_dev
));
586 ksancov_testpanic(volatile uint64_t guess
)
588 const uint64_t tgt
= 0xf85de3b12891c817UL
;
590 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
608 panic("ksancov: found test value\n");
628 ksancov_ioctl(dev_t dev
, unsigned long cmd
, caddr_t _data
, int fflag
, proc_t p
)
630 #pragma unused(fflag,p)
632 void *data
= (void *)_data
;
634 struct ksancov_dev
*d
= get_dev(dev
);
636 return EINVAL
; /* dev not open */
639 if (cmd
== KSANCOV_IOC_TRACE
) {
640 size_t maxpcs
= *(size_t *)data
;
641 ret
= ksancov_trace_alloc(dev
, maxpcs
);
645 } else if (cmd
== KSANCOV_IOC_COUNTERS
) {
646 ret
= ksancov_counters_alloc(dev
);
650 } else if (cmd
== KSANCOV_IOC_MAP
) {
651 struct ksancov_buf_desc
*mcmd
= (struct ksancov_buf_desc
*)data
;
653 if (d
->mode
== KS_MODE_NONE
) {
654 return EINVAL
; /* mode not configured */
657 /* map buffer into the userspace VA space */
660 ret
= ksancov_map(dev
, &buf
, &size
);
665 mcmd
->ptr
= (uintptr_t)buf
;
667 } else if (cmd
== KSANCOV_IOC_MAP_EDGEMAP
) {
668 struct ksancov_buf_desc
*mcmd
= (struct ksancov_buf_desc
*)data
;
670 /* map buffer into the userspace VA space */
673 ret
= ksancov_map_edgemap(dev
, &buf
, &size
);
678 mcmd
->ptr
= (uintptr_t)buf
;
680 } else if (cmd
== KSANCOV_IOC_START
) {
681 if (d
->mode
== KS_MODE_NONE
) {
682 return EINVAL
; /* not configured */
685 ret
= ksancov_attach(dev
, current_thread());
689 } else if (cmd
== KSANCOV_IOC_NEDGES
) {
690 size_t *nptr
= (size_t *)data
;
692 } else if (cmd
== KSANCOV_IOC_TESTPANIC
) {
693 uint64_t guess
= *(uint64_t *)data
;
694 ksancov_testpanic(guess
);
704 ksancov_dev_clone(dev_t dev
, int action
)
707 if (action
== DEVFS_CLONE_ALLOC
) {
708 for (size_t i
= 0; i
< KSANCOV_MAX_DEV
; i
++) {
709 if (ksancov_devs
[i
] == NULL
) {
713 } else if (action
== DEVFS_CLONE_FREE
) {
722 .d_open
= ksancov_open
,
723 .d_close
= ksancov_close
,
724 .d_ioctl
= ksancov_ioctl
,
727 .d_write
= eno_rdwrt
,
729 .d_reset
= eno_reset
,
730 .d_select
= eno_select
,
732 .d_strategy
= eno_strat
,
737 ksancov_init_dev(void)
739 dev_major
= cdevsw_add(-1, &ksancov_cdev
);
741 printf("ksancov: failed to allocate major device node\n");
745 dev_t dev
= makedev(dev_major
, 0);
746 void *node
= devfs_make_node_clone(dev
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666,
747 ksancov_dev_clone
, KSANCOV_DEVNODE
);
749 printf("ksancov: failed to create device node\n");
753 /* This could be moved to the first use of /dev/ksancov to save memory */
755 size_t sz
= sizeof(struct ksancov_edgemap
) + KSANCOV_MAX_EDGES
* sizeof(uint32_t);
757 kern_return_t kr
= kmem_alloc_flags(kernel_map
, &buf
, sz
, VM_KERN_MEMORY_DIAG
, KMA_ZERO
);
759 printf("ksancov: failed to allocate edge addr map\n");
763 ksancov_edgemap
= (void *)buf
;
764 ksancov_edgemap
->magic
= KSANCOV_EDGEMAP_MAGIC
;
765 ksancov_edgemap
->nedges
= nedges
;
766 ksancov_edgemap
->offset
= VM_MIN_KERNEL_ADDRESS
;