]> git.saurik.com Git - apple/xnu.git/blob - san/kasan-fakestack.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / san / kasan-fakestack.c
1 /*
2 * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <stdbool.h>
31 #include <kern/assert.h>
32 #include <kern/zalloc.h>
33 #include <mach/mach_vm.h>
34 #include <mach/vm_param.h>
35 #include <libkern/libkern.h>
36 #include <libkern/OSAtomic.h>
37 #include <sys/queue.h>
38 #include <kern/thread.h>
39 #include <kern/debug.h>
40
41 #include <kasan.h>
42 #include <kasan_internal.h>
43
44 int __asan_option_detect_stack_use_after_return = 0;
45 int fakestack_enabled = 0;
46
47 #define FAKESTACK_HEADER_SZ 64
48 #define FAKESTACK_NUM_SZCLASS 7
49
50 #define FAKESTACK_UNUSED 0 /* waiting to be collected at next gc - forced by clang */
51 #define FAKESTACK_ALLOCATED 1
52 #define FAKESTACK_FREED 2
53
54 #if FAKESTACK
55
56 struct fakestack_header {
57 LIST_ENTRY(fakestack_header) list;
58 void *site; /* allocation site */
59 struct {
60 uint8_t flag;
61 vm_size_t realsz : 52;
62 vm_size_t sz_class : 4;
63 };
64 uint64_t __pad0;
65 };
66 _Static_assert(sizeof(struct fakestack_header) <= FAKESTACK_HEADER_SZ, "fakestack_header size mismatch");
67
68 static zone_t fakestack_zones[FAKESTACK_NUM_SZCLASS];
69 static char fakestack_names[FAKESTACK_NUM_SZCLASS][16];
70 static const unsigned long fakestack_min = 1 << 6;
71 static const unsigned long __unused fakestack_max = 1 << 16;
72
73 /*
74 * Enter a fakestack critical section in a reentrant-safe fashion. Returns true on
75 * success with the kasan lock held.
76 */
77 static bool
78 thread_enter_fakestack(boolean_t *flags)
79 {
80 thread_t cur = current_thread();
81 if (cur && kasan_lock_held(cur)) {
82 /* current thread is already in kasan - fail */
83 return false;
84 }
85 kasan_lock(flags);
86 return true;
87 }
88
89 static volatile long suspend_count;
90 static const long suspend_threshold = 20;
91
92 void
93 kasan_fakestack_suspend(void)
94 {
95 if (OSIncrementAtomicLong(&suspend_count) == suspend_threshold) {
96 __asan_option_detect_stack_use_after_return = 0;
97 }
98 }
99
100 void
101 kasan_fakestack_resume(void)
102 {
103 long orig = OSDecrementAtomicLong(&suspend_count);
104 assert(orig >= 0);
105
106 if (fakestack_enabled && orig == suspend_threshold) {
107 __asan_option_detect_stack_use_after_return = 1;
108 }
109 }
110
111 static bool
112 ptr_is_on_stack(uptr ptr)
113 {
114 vm_offset_t base = dtrace_get_kernel_stack(current_thread());
115
116 if (ptr >= base && ptr < (base + kernel_stack_size)) {
117 return true;
118 } else {
119 return false;
120 }
121 }
122
123 /* free all unused fakestack entries */
124 void
125 kasan_fakestack_gc(thread_t thread)
126 {
127 struct fakestack_header *cur, *tmp;
128 LIST_HEAD(, fakestack_header) tofree = LIST_HEAD_INITIALIZER(tofree);
129
130 boolean_t flags;
131 if (!thread_enter_fakestack(&flags)) {
132 panic("expected success entering fakestack\n");
133 }
134
135 /* move the unused objects off the per-thread list... */
136 struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head;
137 LIST_FOREACH_SAFE(cur, head, list, tmp) {
138 if (cur->flag == FAKESTACK_UNUSED) {
139 LIST_REMOVE(cur, list);
140 LIST_INSERT_HEAD(&tofree, cur, list);
141 cur->flag = FAKESTACK_FREED;
142 }
143 }
144
145 kasan_unlock(flags);
146
147 /* ... then actually free them */
148 LIST_FOREACH_SAFE(cur, &tofree, list, tmp) {
149 LIST_REMOVE(cur, list);
150
151 zone_t zone = fakestack_zones[cur->sz_class];
152 size_t sz = (fakestack_min << cur->sz_class) + FAKESTACK_HEADER_SZ;
153
154 void *ptr = (void *)cur;
155 kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 0, FAKESTACK_QUARANTINE);
156 if (ptr) {
157 zfree(zone, ptr);
158 }
159 }
160 }
161
162 static uint8_t **
163 fakestack_flag_ptr(vm_offset_t ptr, vm_size_t sz)
164 {
165 uint8_t **x = (uint8_t **)ptr;
166 size_t idx = sz / 8;
167 return &x[idx - 1];
168 }
169
170 static uptr ALWAYS_INLINE
171 kasan_fakestack_alloc(int sz_class, size_t realsz)
172 {
173 if (!__asan_option_detect_stack_use_after_return) {
174 return 0;
175 }
176
177 if (sz_class >= FAKESTACK_NUM_SZCLASS) {
178 return 0;
179 }
180
181 uptr ret = 0;
182 size_t sz = fakestack_min << sz_class;
183 assert(realsz <= sz);
184 assert(sz <= fakestack_max);
185 zone_t zone = fakestack_zones[sz_class];
186
187 boolean_t flags;
188 if (!thread_enter_fakestack(&flags)) {
189 return 0;
190 }
191
192 ret = (uptr)zalloc_noblock(zone);
193
194 if (ret) {
195 size_t leftrz = 32 + FAKESTACK_HEADER_SZ;
196 size_t validsz = realsz - 32 - 16; /* remove redzones */
197 size_t rightrz = sz - validsz - 32; /* 16 bytes, plus whatever is left over */
198 struct fakestack_header *hdr = (struct fakestack_header *)ret;
199
200 kasan_poison(ret, validsz, leftrz, rightrz, ASAN_STACK_RZ);
201
202 hdr->site = __builtin_return_address(0);
203 hdr->realsz = realsz;
204 hdr->sz_class = sz_class;
205 hdr->flag = FAKESTACK_ALLOCATED;
206 ret += FAKESTACK_HEADER_SZ;
207
208 *fakestack_flag_ptr(ret, sz) = &hdr->flag; /* back ptr to the slot */
209 struct fakestack_header_list *head = &kasan_get_thread_data(current_thread())->fakestack_head;
210 LIST_INSERT_HEAD(head, hdr, list);
211 }
212
213 kasan_unlock(flags);
214 return ret;
215 }
216
217 static void NOINLINE
218 kasan_fakestack_free(int sz_class, uptr dst, size_t realsz)
219 {
220 if (ptr_is_on_stack(dst)) {
221 return;
222 }
223
224 assert(realsz <= (fakestack_min << sz_class));
225
226 vm_size_t sz = fakestack_min << sz_class;
227 zone_t zone = fakestack_zones[sz_class];
228 assert(zone);
229
230 /* TODO: check the magic? */
231
232 dst -= FAKESTACK_HEADER_SZ;
233 sz += FAKESTACK_HEADER_SZ;
234
235 struct fakestack_header *hdr = (struct fakestack_header *)dst;
236 assert(hdr->sz_class == sz_class);
237
238 boolean_t flags;
239 kasan_lock(&flags);
240
241 LIST_REMOVE(hdr, list);
242
243 kasan_free_internal((void **)&dst, &sz, KASAN_HEAP_FAKESTACK, &zone, realsz, 1, FAKESTACK_QUARANTINE);
244 if (dst) {
245 zfree(zone, dst);
246 }
247
248 kasan_unlock(flags);
249 }
250
251 void NOINLINE
252 kasan_fakestack_drop(thread_t thread)
253 {
254 boolean_t flags;
255 if (!thread_enter_fakestack(&flags)) {
256 panic("expected success entering fakestack\n");
257 }
258
259 struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head;
260 struct fakestack_header *cur;
261 LIST_FOREACH(cur, head, list) {
262 if (cur->flag == FAKESTACK_ALLOCATED) {
263 cur->flag = FAKESTACK_UNUSED;
264 }
265 }
266
267 kasan_unlock(flags);
268 }
269
270 void NOINLINE
271 kasan_init_fakestack(void)
272 {
273 /* allocate the fakestack zones */
274 for (int i = 0; i < FAKESTACK_NUM_SZCLASS; i++) {
275 unsigned long sz = (fakestack_min << i) + FAKESTACK_HEADER_SZ;
276 size_t maxsz = 256UL * 1024;
277
278 if (i <= 3) {
279 /* size classes 0..3 are much more common */
280 maxsz *= 4;
281 }
282
283 snprintf(fakestack_names[i], 16, "fakestack.%d", i);
284 fakestack_zones[i] = zone_create_ext(fakestack_names[i], sz,
285 ZC_NOCALLOUT | ZC_NOGC | ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE,
286 ZONE_ID_ANY, ^(zone_t z) {
287 zone_set_exhaustible(z, maxsz);
288 });
289 zfill(fakestack_zones[i], (int)maxsz / sz);
290 }
291
292 /* globally enable */
293 if (fakestack_enabled) {
294 __asan_option_detect_stack_use_after_return = 1;
295 }
296 }
297
298 #else /* FAKESTACK */
299
300 void
301 kasan_init_fakestack(void)
302 {
303 assert(__asan_option_detect_stack_use_after_return == 0);
304 }
305
306 void
307 kasan_unpoison_fakestack(thread_t __unused thread)
308 {
309 assert(__asan_option_detect_stack_use_after_return == 0);
310 }
311
312 static uptr
313 kasan_fakestack_alloc(int __unused sz_class, size_t __unused realsz)
314 {
315 assert(__asan_option_detect_stack_use_after_return == 0);
316 return 0;
317 }
318
319 static void
320 kasan_fakestack_free(int __unused sz_class, uptr __unused dst, size_t __unused realsz)
321 {
322 assert(__asan_option_detect_stack_use_after_return == 0);
323 panic("fakestack_free called on non-FAKESTACK config\n");
324 }
325
326 #endif
327
328 void
329 kasan_init_thread(struct kasan_thread_data *td)
330 {
331 LIST_INIT(&td->fakestack_head);
332 }
333
334 #define FAKESTACK_DECLARE(szclass) \
335 uptr __asan_stack_malloc_##szclass(size_t sz) { return kasan_fakestack_alloc(szclass, sz); } \
336 void __asan_stack_free_##szclass(uptr dst, size_t sz) { kasan_fakestack_free(szclass, dst, sz); }
337
338 FAKESTACK_DECLARE(0)
339 FAKESTACK_DECLARE(1)
340 FAKESTACK_DECLARE(2)
341 FAKESTACK_DECLARE(3)
342 FAKESTACK_DECLARE(4)
343 FAKESTACK_DECLARE(5)
344 FAKESTACK_DECLARE(6)
345 FAKESTACK_DECLARE(7)
346 FAKESTACK_DECLARE(8)
347 FAKESTACK_DECLARE(9)
348 FAKESTACK_DECLARE(10)