2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <kern/assert.h>
33 #include <kern/cpu_data.h>
34 #include <kern/locks.h>
35 #include <kern/debug.h>
36 #include <kern/kalloc.h>
37 #include <kern/zalloc.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
41 #include <vm/vm_kern.h>
42 #include <vm/vm_protos.h>
44 #include <mach/mach_vm.h>
45 #include <mach/mach_types.h>
46 #include <mach/mach_port.h>
47 #include <mach/vm_map.h>
48 #include <mach/vm_param.h>
49 #include <mach/machine/vm_param.h>
51 #include <sys/stat.h> /* dev_t */
52 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
53 #include <sys/conf.h> /* must come after sys/stat.h */
55 #include <libkern/libkern.h>
56 #include <os/atomic_private.h>
57 #include <os/overflow.h>
59 #include <san/ksancov.h>
63 typedef struct uthread
* uthread_t
;
65 #include <sys/sysproto.h>
66 #include <sys/queue.h>
67 #include <sys/sysctl.h>
69 #define USE_PC_TABLE 0
70 #define KSANCOV_MAX_DEV 64
71 #define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */
84 struct ksancov_trace
*trace
;
85 struct ksancov_counters
*counters
;
87 size_t sz
; /* size of allocated trace/counters buffer */
95 typedef struct ksancov_dev
* ksancov_dev_t
;
97 extern boolean_t
ml_at_interrupt_context(void);
98 extern boolean_t
ml_get_interrupts_enabled(void);
100 static void ksancov_detach(ksancov_dev_t
);
102 static int dev_major
;
103 static size_t nedges
= 0;
104 static uint32_t __unused npcs
= 0;
106 static _Atomic
unsigned active_devs
;
108 static LCK_GRP_DECLARE(ksancov_lck_grp
, "ksancov_lck_grp");
109 static LCK_RW_DECLARE(ksancov_devs_lck
, &ksancov_lck_grp
);
111 /* array of devices indexed by devnode minor */
112 static ksancov_dev_t ksancov_devs
[KSANCOV_MAX_DEV
];
113 static struct ksancov_edgemap
*ksancov_edgemap
;
117 create_dev(dev_t dev
)
119 ksancov_dev_t d
= kalloc_tag(sizeof(struct ksancov_dev
), VM_KERN_MEMORY_DIAG
);
124 d
->mode
= KS_MODE_NONE
;
127 d
->maxpcs
= KSANCOV_MAX_PCS
;
129 d
->thread
= THREAD_NULL
;
130 lck_mtx_init(&d
->lock
, &ksancov_lck_grp
, LCK_ATTR_NULL
);
136 free_dev(ksancov_dev_t d
)
138 if (d
->mode
== KS_MODE_TRACE
&& d
->trace
) {
139 kmem_free(kernel_map
, (uintptr_t)d
->trace
, d
->sz
);
140 } else if (d
->mode
== KS_MODE_COUNTERS
&& d
->counters
) {
141 kmem_free(kernel_map
, (uintptr_t)d
->counters
, d
->sz
);
143 lck_mtx_destroy(&d
->lock
, &ksancov_lck_grp
);
144 kfree(d
, sizeof(struct ksancov_dev
));
148 __sanitizer_cov_trace_pc_indirect(void * __unused callee
)
153 #define GUARD_SEEN (uint32_t)0x80000000
154 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
156 static inline void __attribute__((always_inline
))
157 trace_pc_guard(uint32_t *guardp
, void *caller
)
159 /* record the pc for this guard */
161 uint32_t gd
= *guardp
;
162 if (__improbable(gd
&& !(gd
& GUARD_SEEN
) && ksancov_edgemap
)) {
163 size_t idx
= gd
& GUARD_IDX_MASK
;
164 if (idx
< ksancov_edgemap
->nedges
) {
165 ksancov_edgemap
->addrs
[idx
] = (uint32_t)(VM_KERNEL_UNSLIDE(caller
) - KSANCOV_PC_OFFSET
- 1);
166 *guardp
|= GUARD_SEEN
;
171 if (__probable(os_atomic_load(&active_devs
, relaxed
) == 0)) {
172 /* early exit when nothing is active */
176 if (ml_at_interrupt_context()) {
180 uint32_t pc
= (uint32_t)(VM_KERNEL_UNSLIDE(caller
) - KSANCOV_PC_OFFSET
- 1);
182 thread_t th
= current_thread();
183 if (__improbable(th
== THREAD_NULL
)) {
187 ksancov_dev_t dev
= *(ksancov_dev_t
*)__sanitizer_get_thread_data(th
);
188 if (__probable(dev
== NULL
)) {
192 if (dev
->mode
== KS_MODE_TRACE
) {
193 struct ksancov_trace
*trace
= dev
->trace
;
194 if (os_atomic_load(&trace
->enabled
, relaxed
) == 0) {
198 if (os_atomic_load(&trace
->head
, relaxed
) >= dev
->maxpcs
) {
199 return; /* overflow */
202 uint32_t idx
= os_atomic_inc_orig(&trace
->head
, relaxed
);
203 if (__improbable(idx
>= dev
->maxpcs
)) {
207 trace
->pcs
[idx
] = pc
;
209 size_t idx
= *guardp
& GUARD_IDX_MASK
;
211 struct ksancov_counters
*counters
= dev
->counters
;
212 if (os_atomic_load(&counters
->enabled
, relaxed
) == 0) {
216 /* saturating 8bit add */
217 if (counters
->hits
[idx
] < KSANCOV_MAX_HITS
) {
218 counters
->hits
[idx
]++;
223 void __attribute__((noinline
))
224 __sanitizer_cov_trace_pc(void)
226 trace_pc_guard(NULL
, __builtin_return_address(0));
229 void __attribute__((noinline
))
230 __sanitizer_cov_trace_pc_guard(uint32_t *guardp
)
232 trace_pc_guard(guardp
, __builtin_return_address(0));
236 __sanitizer_cov_trace_pc_guard_init(uint32_t *start
, uint32_t *stop
)
238 /* assign a unique number to each guard */
239 for (; start
!= stop
; start
++) {
241 if (nedges
< KSANCOV_MAX_EDGES
) {
242 *start
= (uint32_t)++nedges
;
249 __sanitizer_cov_pcs_init(uintptr_t *start
, uintptr_t *stop
)
252 static const uintptr_t pc_table_seen_flag
= 0x100;
254 for (; start
< stop
; start
+= 2) {
255 uintptr_t pc
= start
[0];
256 uintptr_t flags
= start
[1];
259 * This function gets called multiple times on the same range, so mark the
260 * ones we've seen using unused bits in the flags field.
262 if (flags
& pc_table_seen_flag
) {
266 start
[1] |= pc_table_seen_flag
;
267 assert(npcs
< KSANCOV_MAX_EDGES
- 1);
268 edge_addrs
[++npcs
] = pc
;
277 ksancov_do_map(uintptr_t base
, size_t sz
, vm_prot_t prot
)
280 mach_port_t mem_entry
= MACH_PORT_NULL
;
281 mach_vm_address_t user_addr
= 0;
282 memory_object_size_t size
= sz
;
284 kr
= mach_make_memory_entry_64(kernel_map
,
286 (mach_vm_offset_t
)base
,
287 MAP_MEM_VM_SHARE
| prot
,
290 if (kr
!= KERN_SUCCESS
) {
294 kr
= mach_vm_map_kernel(get_task_map(current_task()),
299 VM_MAP_KERNEL_FLAGS_NONE
,
309 * At this point, either vm_map() has taken a reference on the memory entry
310 * and we can release our local reference, or the map failed and the entry
313 mach_memory_entry_port_release(mem_entry
);
315 if (kr
!= KERN_SUCCESS
) {
319 return (void *)user_addr
;
323 * map the sancov buffer into the current process
326 ksancov_map(ksancov_dev_t d
, uintptr_t *bufp
, size_t *sizep
)
331 if (d
->mode
== KS_MODE_TRACE
) {
335 addr
= (uintptr_t)d
->trace
;
336 } else if (d
->mode
== KS_MODE_COUNTERS
) {
340 addr
= (uintptr_t)d
->counters
;
342 return EINVAL
; /* not configured */
345 void *buf
= ksancov_do_map(addr
, size
, VM_PROT_READ
| VM_PROT_WRITE
);
350 *bufp
= (uintptr_t)buf
;
357 * map the edge -> pc mapping as read-only
360 ksancov_map_edgemap(uintptr_t *bufp
, size_t *sizep
)
362 uintptr_t addr
= (uintptr_t)ksancov_edgemap
;
363 size_t size
= sizeof(struct ksancov_edgemap
) + ksancov_edgemap
->nedges
* sizeof(uint32_t);
365 void *buf
= ksancov_do_map(addr
, size
, VM_PROT_READ
);
370 *bufp
= (uintptr_t)buf
;
376 * Device node management
380 ksancov_open(dev_t dev
, int flags
, int devtype
, proc_t p
)
382 #pragma unused(flags,devtype,p)
383 const int minor_num
= minor(dev
);
385 if (minor_num
>= KSANCOV_MAX_DEV
) {
389 lck_rw_lock_exclusive(&ksancov_devs_lck
);
391 if (ksancov_devs
[minor_num
]) {
392 lck_rw_unlock_exclusive(&ksancov_devs_lck
);
396 ksancov_dev_t d
= create_dev(dev
);
398 lck_rw_unlock_exclusive(&ksancov_devs_lck
);
401 ksancov_devs
[minor_num
] = d
;
403 lck_rw_unlock_exclusive(&ksancov_devs_lck
);
409 ksancov_trace_alloc(ksancov_dev_t d
, size_t maxpcs
)
411 if (d
->mode
!= KS_MODE_NONE
) {
412 return EBUSY
; /* trace/counters already created */
414 assert(d
->trace
== NULL
);
418 if (os_mul_and_add_overflow(maxpcs
, sizeof(uint32_t), sizeof(struct ksancov_trace
), &sz
)) {
422 /* allocate the shared memory buffer */
423 kern_return_t kr
= kmem_alloc_flags(kernel_map
, &buf
, sz
, VM_KERN_MEMORY_DIAG
, KMA_ZERO
);
424 if (kr
!= KERN_SUCCESS
) {
428 struct ksancov_trace
*trace
= (struct ksancov_trace
*)buf
;
429 trace
->magic
= KSANCOV_TRACE_MAGIC
;
430 trace
->offset
= KSANCOV_PC_OFFSET
;
431 os_atomic_init(&trace
->head
, 0);
432 os_atomic_init(&trace
->enabled
, 0);
433 trace
->maxpcs
= (uint32_t)maxpcs
;
438 d
->mode
= KS_MODE_TRACE
;
444 ksancov_counters_alloc(ksancov_dev_t d
)
446 if (d
->mode
!= KS_MODE_NONE
) {
447 return EBUSY
; /* trace/counters already created */
449 assert(d
->counters
== NULL
);
452 size_t sz
= sizeof(struct ksancov_counters
) + ksancov_edgemap
->nedges
* sizeof(uint8_t);
454 /* allocate the shared memory buffer */
455 kern_return_t kr
= kmem_alloc_flags(kernel_map
, &buf
, sz
, VM_KERN_MEMORY_DIAG
, KMA_ZERO
);
456 if (kr
!= KERN_SUCCESS
) {
460 struct ksancov_counters
*counters
= (struct ksancov_counters
*)buf
;
461 counters
->magic
= KSANCOV_COUNTERS_MAGIC
;
462 counters
->nedges
= ksancov_edgemap
->nedges
;
463 os_atomic_init(&counters
->enabled
, 0);
465 d
->counters
= counters
;
467 d
->mode
= KS_MODE_COUNTERS
;
473 * attach a thread to a ksancov dev instance
476 ksancov_attach(ksancov_dev_t d
, thread_t th
)
478 if (d
->mode
== KS_MODE_NONE
) {
479 return EINVAL
; /* not configured */
482 if (th
!= current_thread()) {
483 /* can only attach to self presently */
487 ksancov_dev_t
*devp
= (void *)__sanitizer_get_thread_data(th
);
489 return EBUSY
; /* one dev per thread */
492 if (d
->thread
!= THREAD_NULL
) {
497 thread_reference(d
->thread
);
499 os_atomic_store(devp
, d
, relaxed
);
500 os_atomic_add(&active_devs
, 1, relaxed
);
508 boolean_t until_not_runnable
);
512 * disconnect thread from ksancov dev
515 ksancov_detach(ksancov_dev_t d
)
517 if (d
->thread
== THREAD_NULL
) {
518 /* no thread attached */
522 /* disconnect dev from thread */
523 ksancov_dev_t
*devp
= (void *)__sanitizer_get_thread_data(d
->thread
);
526 os_atomic_store(devp
, NULL
, relaxed
);
529 if (d
->thread
!= current_thread()) {
530 /* wait until it's safe to yank */
531 thread_wait(d
->thread
, TRUE
);
534 assert(active_devs
>= 1);
535 os_atomic_sub(&active_devs
, 1, relaxed
);
537 /* drop our thread reference */
538 thread_deallocate(d
->thread
);
539 d
->thread
= THREAD_NULL
;
543 ksancov_close(dev_t dev
, int flags
, int devtype
, proc_t p
)
545 #pragma unused(flags,devtype,p)
546 const int minor_num
= minor(dev
);
548 lck_rw_lock_exclusive(&ksancov_devs_lck
);
549 ksancov_dev_t d
= ksancov_devs
[minor_num
];
550 ksancov_devs
[minor_num
] = NULL
; /* dev no longer discoverable */
551 lck_rw_unlock_exclusive(&ksancov_devs_lck
);
554 * No need to lock d here as there is and will be no one having its
555 * reference except for this thread and the one which is going to
563 if (d
->mode
== KS_MODE_TRACE
&& d
->trace
) {
564 os_atomic_store(&d
->trace
->enabled
, 0, relaxed
); /* stop tracing */
565 } else if (d
->mode
== KS_MODE_COUNTERS
&& d
->counters
) {
566 os_atomic_store(&d
->counters
->enabled
, 0, relaxed
); /* stop tracing */
576 ksancov_testpanic(volatile uint64_t guess
)
578 const uint64_t tgt
= 0xf85de3b12891c817UL
;
580 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
598 panic("ksancov: found test value\n");
618 ksancov_ioctl(dev_t dev
, unsigned long cmd
, caddr_t _data
, int fflag
, proc_t p
)
620 #pragma unused(fflag,p)
621 struct ksancov_buf_desc
*mcmd
;
622 void *data
= (void *)_data
;
624 lck_rw_lock_shared(&ksancov_devs_lck
);
625 ksancov_dev_t d
= ksancov_devs
[minor(dev
)];
627 lck_rw_unlock_shared(&ksancov_devs_lck
);
628 return EINVAL
; /* dev not open */
634 case KSANCOV_IOC_TRACE
:
635 lck_mtx_lock(&d
->lock
);
636 ret
= ksancov_trace_alloc(d
, *(size_t *)data
);
637 lck_mtx_unlock(&d
->lock
);
639 case KSANCOV_IOC_COUNTERS
:
640 lck_mtx_lock(&d
->lock
);
641 ret
= ksancov_counters_alloc(d
);
642 lck_mtx_unlock(&d
->lock
);
644 case KSANCOV_IOC_MAP
:
645 mcmd
= (struct ksancov_buf_desc
*)data
;
646 lck_mtx_lock(&d
->lock
);
647 ret
= ksancov_map(d
, &mcmd
->ptr
, &mcmd
->sz
);
648 lck_mtx_unlock(&d
->lock
);
650 case KSANCOV_IOC_MAP_EDGEMAP
:
651 mcmd
= (struct ksancov_buf_desc
*)data
;
652 ret
= ksancov_map_edgemap(&mcmd
->ptr
, &mcmd
->sz
);
654 case KSANCOV_IOC_START
:
655 lck_mtx_lock(&d
->lock
);
656 ret
= ksancov_attach(d
, current_thread());
657 lck_mtx_unlock(&d
->lock
);
659 case KSANCOV_IOC_NEDGES
:
660 *(size_t *)data
= nedges
;
662 case KSANCOV_IOC_TESTPANIC
:
663 ksancov_testpanic(*(uint64_t *)data
);
670 lck_rw_unlock_shared(&ksancov_devs_lck
);
676 ksancov_dev_clone(dev_t dev
, int action
)
679 if (action
== DEVFS_CLONE_ALLOC
) {
680 for (int i
= 0; i
< KSANCOV_MAX_DEV
; i
++) {
681 if (ksancov_devs
[i
] == NULL
) {
685 } else if (action
== DEVFS_CLONE_FREE
) {
692 static const struct cdevsw
694 .d_open
= ksancov_open
,
695 .d_close
= ksancov_close
,
696 .d_ioctl
= ksancov_ioctl
,
699 .d_write
= eno_rdwrt
,
701 .d_reset
= eno_reset
,
702 .d_select
= eno_select
,
704 .d_strategy
= eno_strat
,
709 ksancov_init_dev(void)
711 dev_major
= cdevsw_add(-1, &ksancov_cdev
);
713 printf("ksancov: failed to allocate major device node\n");
717 dev_t dev
= makedev(dev_major
, 0);
718 void *node
= devfs_make_node_clone(dev
, DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0666,
719 ksancov_dev_clone
, KSANCOV_DEVNODE
);
721 printf("ksancov: failed to create device node\n");
725 /* This could be moved to the first use of /dev/ksancov to save memory */
727 size_t sz
= sizeof(struct ksancov_edgemap
) + KSANCOV_MAX_EDGES
* sizeof(uint32_t);
729 kern_return_t kr
= kmem_alloc_flags(kernel_map
, &buf
, sz
, VM_KERN_MEMORY_DIAG
, KMA_ZERO
);
731 printf("ksancov: failed to allocate edge addr map\n");
735 ksancov_edgemap
= (void *)buf
;
736 ksancov_edgemap
->magic
= KSANCOV_EDGEMAP_MAGIC
;
737 ksancov_edgemap
->nedges
= (uint32_t)nedges
;
738 ksancov_edgemap
->offset
= KSANCOV_PC_OFFSET
;