]>
git.saurik.com Git - apple/xnu.git/blob - san/ksancov.h
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 #include <stdatomic.h>
34 #include <sys/ioccom.h>
36 #define KSANCOV_DEVNODE "ksancov"
37 #define KSANCOV_PATH "/dev/" KSANCOV_DEVNODE
43 struct ksancov_buf_desc
{
44 uintptr_t ptr
; /* ptr to shared buffer [out] */
45 size_t sz
; /* size of shared buffer [out] */
49 #define KSANCOV_IOC_TRACE _IOW('K', 1, size_t) /* number of pcs */
50 #define KSANCOV_IOC_COUNTERS _IO('K', 2)
52 /* Establish a shared mapping of the coverage buffer. */
53 #define KSANCOV_IOC_MAP _IOWR('K', 8, struct ksancov_buf_desc)
55 /* Establish a shared mapping of the edge address buffer. */
56 #define KSANCOV_IOC_MAP_EDGEMAP _IOWR('K', 9, struct ksancov_buf_desc)
58 /* Log the current thread */
59 #define KSANCOV_IOC_START _IOW('K', 10, uintptr_t)
61 #define KSANCOV_IOC_NEDGES _IOR('K', 50, size_t)
63 #define KSANCOV_IOC_TESTPANIC _IOW('K', 20, uint64_t)
67 * shared kernel-user mapping
70 #define KSANCOV_MAX_EDGES 512UL*1024
71 #define KSANCOV_MAX_HITS UINT8_MAX
72 #define KSANCOV_TRACE_MAGIC (uint32_t)0x5AD17F5BU
73 #define KSANCOV_COUNTERS_MAGIC (uint32_t)0x5AD27F6BU
74 #define KSANCOV_EDGEMAP_MAGIC (uint32_t)0x5AD37F7BU
76 struct ksancov_header
{
78 _Atomic
uint32_t enabled
;
81 struct ksancov_trace
{
82 /* userspace R/O fields */
84 struct ksancov_header hdr
;
87 _Atomic
uint32_t enabled
;
91 uintptr_t offset
; /* pc entries relative to this */
93 _Atomic
uint32_t head
;
97 struct ksancov_counters
{
99 struct ksancov_header hdr
;
102 _Atomic
uint32_t enabled
;
106 uint32_t nedges
; /* total number of edges */
107 uint8_t hits
[]; /* hits on each edge (8bit saturating) */
110 struct ksancov_edgemap
{
113 uintptr_t offset
; /* edge addrs relative to this */
114 uint32_t addrs
[]; /* address of each edge relative to 'offset' */
117 #if XNU_KERNEL_PRIVATE
119 * On arm64 the VIM_MIN_KERNEL_ADDRESS is too far from %pc to fit into 32-bit value. As a result
120 * ksancov reports invalid %pcs. To make at least kernel %pc values corect a different base has
121 * to be used for arm.
123 #if defined(__x86_64__) || defined(__i386__)
124 #define KSANCOV_PC_OFFSET VM_MIN_KERNEL_ADDRESS
125 #elif defined(__arm__) || defined(__arm64__)
126 #define KSANCOV_PC_OFFSET VM_KERNEL_LINK_ADDRESS
128 #error "Unsupported platform"
131 int ksancov_init_dev(void);
132 void **__sanitizer_get_thread_data(thread_t
);
135 * SanitizerCoverage ABI
137 extern void __sanitizer_cov_trace_pc_guard(uint32_t *guard
);
138 extern void __sanitizer_cov_trace_pc_guard_init(uint32_t *start
, uint32_t *stop
);
139 extern void __sanitizer_cov_pcs_init(uintptr_t *start
, uintptr_t *stop
);
140 extern void __sanitizer_cov_trace_pc(void);
141 extern void __sanitizer_cov_trace_pc_indirect(void *callee
);
151 * ksancov userspace API
154 * 1) open the ksancov device
155 * 2) set the coverage mode (trace or edge counters)
156 * 3) map the coverage buffer
157 * 4) start the trace on a thread
158 * 5) flip the enable bit
164 return open(KSANCOV_PATH
, 0);
168 ksancov_map(int fd
, uintptr_t *buf
, size_t *sz
)
171 struct ksancov_buf_desc mc
= {0};
173 ret
= ioctl(fd
, KSANCOV_IOC_MAP
, &mc
);
183 struct ksancov_trace
*trace
= (void *)mc
.ptr
;
184 assert(trace
->magic
== KSANCOV_TRACE_MAGIC
||
185 trace
->magic
== KSANCOV_COUNTERS_MAGIC
);
191 ksancov_map_edgemap(int fd
, uintptr_t *buf
, size_t *sz
)
194 struct ksancov_buf_desc mc
= {0};
196 ret
= ioctl(fd
, KSANCOV_IOC_MAP_EDGEMAP
, &mc
);
206 struct ksancov_trace
*trace
= (void *)mc
.ptr
;
207 assert(trace
->magic
== KSANCOV_EDGEMAP_MAGIC
);
213 ksancov_nedges(int fd
)
216 int ret
= ioctl(fd
, KSANCOV_IOC_NEDGES
, &nedges
);
224 ksancov_mode_trace(int fd
, size_t entries
)
227 ret
= ioctl(fd
, KSANCOV_IOC_TRACE
, &entries
);
235 ksancov_mode_counters(int fd
)
238 ret
= ioctl(fd
, KSANCOV_IOC_COUNTERS
);
246 ksancov_thread_self(int fd
)
250 ret
= ioctl(fd
, KSANCOV_IOC_START
, &th
);
258 ksancov_start(void *buf
)
260 struct ksancov_header
*hdr
= (struct ksancov_header
*)buf
;
261 atomic_store_explicit(&hdr
->enabled
, 1, memory_order_relaxed
);
266 ksancov_stop(void *buf
)
268 struct ksancov_header
*hdr
= (struct ksancov_header
*)buf
;
269 atomic_store_explicit(&hdr
->enabled
, 0, memory_order_relaxed
);
274 ksancov_reset(void *buf
)
276 struct ksancov_header
*hdr
= (struct ksancov_header
*)buf
;
277 if (hdr
->magic
== KSANCOV_TRACE_MAGIC
) {
278 struct ksancov_trace
*trace
= (struct ksancov_trace
*)buf
;
279 atomic_store_explicit(&trace
->head
, 0, memory_order_relaxed
);
280 } else if (hdr
->magic
== KSANCOV_COUNTERS_MAGIC
) {
281 struct ksancov_counters
*counters
= (struct ksancov_counters
*)buf
;
282 bzero(counters
->hits
, counters
->nedges
);
289 static inline uintptr_t
290 ksancov_edge_addr(struct ksancov_edgemap
*addrs
, size_t idx
)
293 if (idx
>= addrs
->nedges
) {
296 return addrs
->addrs
[idx
] + addrs
->offset
;
300 ksancov_trace_max_pcs(struct ksancov_trace
*trace
)
302 return trace
->maxpcs
;
305 static inline uintptr_t
306 ksancov_trace_offset(struct ksancov_trace
*trace
)
309 return trace
->offset
;
313 ksancov_trace_head(struct ksancov_trace
*trace
)
315 size_t maxlen
= trace
->maxpcs
;
316 size_t head
= atomic_load_explicit(&trace
->head
, memory_order_acquire
);
317 return head
< maxlen
? head
: maxlen
;
320 static inline uintptr_t
321 ksancov_trace_entry(struct ksancov_trace
*trace
, size_t i
)
323 if (i
>= trace
->head
) {
327 return trace
->pcs
[i
] + trace
->offset
;
332 #endif /* _KSANCOV_H_ */