]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <stdint.h> | |
30 | #include <stdbool.h> | |
31 | #include <kern/assert.h> | |
32 | #include <kern/zalloc.h> | |
33 | #include <mach/mach_vm.h> | |
34 | #include <mach/vm_param.h> | |
35 | #include <libkern/libkern.h> | |
36 | #include <libkern/OSAtomic.h> | |
37 | #include <sys/queue.h> | |
38 | #include <kern/thread.h> | |
39 | #include <kern/debug.h> | |
40 | ||
41 | #include <kasan.h> | |
42 | #include <kasan_internal.h> | |
43 | ||
44 | int __asan_option_detect_stack_use_after_return = 0; | |
45 | ||
46 | #define FAKESTACK_HEADER_SZ 64 | |
47 | #define FAKESTACK_NUM_SZCLASS 7 | |
48 | ||
49 | #define FAKESTACK_FREED 0 /* forced by clang */ | |
50 | #define FAKESTACK_ALLOCATED 1 | |
51 | ||
52 | #if FAKESTACK | |
53 | ||
54 | struct fakestack_header { | |
55 | LIST_ENTRY(fakestack_header) list; | |
56 | void *site; /* allocation site */ | |
57 | struct { | |
58 | uint8_t flag; | |
59 | vm_size_t realsz : 52; | |
60 | vm_size_t sz_class : 4; | |
61 | }; | |
62 | uint64_t __pad0; | |
63 | }; | |
64 | _Static_assert(sizeof(struct fakestack_header) <= FAKESTACK_HEADER_SZ, "fakestack_header size mismatch"); | |
65 | ||
66 | static zone_t fakestack_zones[FAKESTACK_NUM_SZCLASS]; | |
67 | static char fakestack_names[FAKESTACK_NUM_SZCLASS][16]; | |
68 | static const unsigned long fakestack_min = 1 << 6; | |
69 | static const unsigned long __unused fakestack_max = 1 << 16; | |
70 | ||
71 | /* | |
72 | * Mark the current thread as being in a fakestack operation, to avoid reentrancy | |
73 | * issues. If set, disable fakestack allocation. | |
74 | */ | |
75 | static boolean_t | |
76 | thread_enter_fakestack(void) | |
77 | { | |
78 | thread_t thread = current_thread(); | |
79 | if (thread) { | |
80 | return OSIncrementAtomic(&kasan_get_thread_data(current_thread())->in_fakestack); | |
81 | } else { | |
82 | return 0; | |
83 | } | |
84 | } | |
85 | ||
86 | static boolean_t | |
87 | thread_exit_fakestack(void) | |
88 | { | |
89 | thread_t thread = current_thread(); | |
90 | if (thread) { | |
91 | return OSDecrementAtomic(&kasan_get_thread_data(current_thread())->in_fakestack); | |
92 | } else { | |
93 | return 0; | |
94 | } | |
95 | } | |
96 | ||
97 | static bool | |
98 | ptr_is_on_stack(uptr ptr) | |
99 | { | |
100 | vm_offset_t base = dtrace_get_kernel_stack(current_thread()); | |
101 | ||
102 | if (ptr >= base && ptr < (base + kernel_stack_size)) { | |
103 | return true; | |
104 | } else { | |
105 | return false; | |
106 | } | |
107 | } | |
108 | ||
109 | /* free all unused fakestack entries */ | |
110 | static void NOINLINE | |
111 | kasan_fakestack_gc(thread_t thread) | |
112 | { | |
113 | struct fakestack_header *cur, *tmp; | |
114 | LIST_HEAD(, fakestack_header) tofree = LIST_HEAD_INITIALIZER(tofree); | |
115 | ||
116 | /* move all the freed elements off the main list */ | |
117 | struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head; | |
118 | LIST_FOREACH_SAFE(cur, head, list, tmp) { | |
119 | if (cur->flag == FAKESTACK_FREED) { | |
120 | LIST_REMOVE(cur, list); | |
121 | LIST_INSERT_HEAD(&tofree, cur, list); | |
122 | } | |
123 | } | |
124 | ||
125 | /* ... then actually free them */ | |
126 | LIST_FOREACH_SAFE(cur, &tofree, list, tmp) { | |
127 | zone_t zone = fakestack_zones[cur->sz_class]; | |
128 | size_t sz = (fakestack_min << cur->sz_class) + FAKESTACK_HEADER_SZ; | |
129 | LIST_REMOVE(cur, list); | |
130 | ||
131 | void *ptr = (void *)cur; | |
132 | kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 1, FAKESTACK_QUARANTINE); | |
133 | if (ptr) { | |
134 | zfree(zone, ptr); | |
135 | } | |
136 | } | |
137 | } | |
138 | ||
139 | static uint8_t ** | |
140 | fakestack_flag_ptr(vm_offset_t ptr, vm_size_t sz) | |
141 | { | |
142 | uint8_t **x = (uint8_t **)ptr; | |
143 | size_t idx = sz / 8; | |
144 | return &x[idx - 1]; | |
145 | } | |
146 | ||
147 | static uptr ALWAYS_INLINE | |
148 | kasan_fakestack_alloc(int sz_class, size_t realsz) | |
149 | { | |
150 | if (!__asan_option_detect_stack_use_after_return) { | |
151 | return 0; | |
152 | } | |
153 | ||
154 | if (sz_class >= FAKESTACK_NUM_SZCLASS) { | |
155 | return 0; | |
156 | } | |
157 | ||
158 | boolean_t flags; | |
159 | uptr ret = 0; | |
160 | size_t sz = fakestack_min << sz_class; | |
161 | assert(realsz <= sz); | |
162 | assert(sz <= fakestack_max); | |
163 | zone_t zone = fakestack_zones[sz_class]; | |
164 | ||
165 | if (thread_enter_fakestack()) { | |
166 | return 0; | |
167 | } | |
168 | ||
169 | kasan_lock(&flags); | |
170 | kasan_fakestack_gc(current_thread()); /* XXX: optimal? */ | |
171 | ||
172 | ret = (uptr)zget(zone); | |
173 | ||
174 | thread_exit_fakestack(); | |
175 | ||
176 | if (ret) { | |
177 | size_t leftrz = 32 + FAKESTACK_HEADER_SZ; | |
178 | size_t validsz = realsz - 32 - 16; /* remove redzones */ | |
179 | size_t rightrz = sz - validsz - 32; /* 16 bytes, plus whatever is left over */ | |
180 | struct fakestack_header *hdr = (struct fakestack_header *)ret; | |
181 | ||
182 | kasan_poison(ret, validsz, leftrz, rightrz, ASAN_STACK_RZ); | |
183 | ||
184 | hdr->site = __builtin_return_address(0); | |
185 | hdr->realsz = realsz; | |
186 | hdr->sz_class = sz_class; | |
187 | hdr->flag = FAKESTACK_ALLOCATED; | |
188 | ret += FAKESTACK_HEADER_SZ; | |
189 | ||
190 | *fakestack_flag_ptr(ret, sz) = &hdr->flag; /* back ptr to the slot */ | |
191 | struct fakestack_header_list *head = &kasan_get_thread_data(current_thread())->fakestack_head; | |
192 | LIST_INSERT_HEAD(head, hdr, list); | |
193 | } | |
194 | ||
195 | kasan_unlock(flags); | |
196 | return ret; | |
197 | } | |
198 | ||
199 | static void NOINLINE | |
200 | kasan_fakestack_free(int sz_class, uptr dst, size_t realsz) | |
201 | { | |
202 | if (ptr_is_on_stack(dst)) { | |
203 | return; | |
204 | } | |
205 | ||
206 | assert(realsz <= (fakestack_min << sz_class)); | |
207 | assert(__asan_option_detect_stack_use_after_return); | |
208 | ||
209 | vm_size_t sz = fakestack_min << sz_class; | |
210 | zone_t zone = fakestack_zones[sz_class]; | |
211 | assert(zone); | |
212 | ||
213 | /* TODO: check the magic? */ | |
214 | ||
215 | dst -= FAKESTACK_HEADER_SZ; | |
216 | sz += FAKESTACK_HEADER_SZ; | |
217 | ||
218 | struct fakestack_header *hdr = (struct fakestack_header *)dst; | |
219 | assert(hdr->sz_class == sz_class); | |
220 | ||
221 | boolean_t flags; | |
222 | kasan_lock(&flags); | |
223 | ||
224 | LIST_REMOVE(hdr, list); | |
225 | ||
226 | kasan_free_internal((void **)&dst, &sz, KASAN_HEAP_FAKESTACK, &zone, realsz, 1, FAKESTACK_QUARANTINE); | |
227 | if (dst) { | |
228 | zfree(zone, (void *)dst); | |
229 | } | |
230 | ||
231 | kasan_unlock(flags); | |
232 | } | |
233 | ||
234 | void NOINLINE | |
235 | kasan_unpoison_fakestack(thread_t thread) | |
236 | { | |
237 | if (!__asan_option_detect_stack_use_after_return) { | |
238 | return; | |
239 | } | |
240 | ||
241 | boolean_t flags; | |
242 | kasan_lock(&flags); | |
243 | ||
244 | thread_enter_fakestack(); | |
245 | ||
246 | struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head; | |
247 | struct fakestack_header *cur; | |
248 | LIST_FOREACH(cur, head, list) { | |
249 | if (cur->flag == FAKESTACK_ALLOCATED) { | |
250 | cur->flag = FAKESTACK_FREED; | |
251 | } | |
252 | } | |
253 | ||
254 | kasan_fakestack_gc(thread); | |
255 | thread_exit_fakestack(); | |
256 | kasan_unlock(flags); | |
257 | } | |
258 | ||
259 | void NOINLINE | |
260 | kasan_init_fakestack(void) | |
261 | { | |
262 | /* allocate the fakestack zones */ | |
263 | for (int i = 0; i < FAKESTACK_NUM_SZCLASS; i++) { | |
264 | zone_t z; | |
265 | unsigned long sz = (fakestack_min << i) + FAKESTACK_HEADER_SZ; | |
266 | size_t maxsz = 256UL * 1024; | |
267 | ||
268 | if (i <= 3) { | |
269 | /* size classes 0..3 are much more common */ | |
270 | maxsz *= 4; | |
271 | } | |
272 | ||
273 | snprintf(fakestack_names[i], 16, "fakestack.%d", i); | |
274 | z = zinit(sz, maxsz, sz, fakestack_names[i]); | |
275 | assert(z); | |
276 | zone_change(z, Z_NOCALLOUT, TRUE); | |
277 | zone_change(z, Z_EXHAUST, TRUE); | |
278 | zone_change(z, Z_EXPAND, FALSE); | |
279 | zone_change(z, Z_COLLECT, FALSE); | |
280 | zone_change(z, Z_KASAN_QUARANTINE, FALSE); | |
281 | zfill(z, maxsz / sz); | |
282 | fakestack_zones[i] = z; | |
283 | } | |
284 | ||
285 | /* globally enable */ | |
286 | __asan_option_detect_stack_use_after_return = 1; | |
287 | } | |
288 | ||
289 | #else /* FAKESTACK */ | |
290 | ||
291 | void | |
292 | kasan_init_fakestack(void) | |
293 | { | |
294 | assert(__asan_option_detect_stack_use_after_return == 0); | |
295 | } | |
296 | ||
297 | void | |
298 | kasan_unpoison_fakestack(thread_t __unused thread) | |
299 | { | |
300 | assert(__asan_option_detect_stack_use_after_return == 0); | |
301 | } | |
302 | ||
303 | static uptr | |
304 | kasan_fakestack_alloc(int __unused sz_class, size_t __unused realsz) | |
305 | { | |
306 | assert(__asan_option_detect_stack_use_after_return == 0); | |
307 | return 0; | |
308 | } | |
309 | ||
310 | static void | |
311 | kasan_fakestack_free(int __unused sz_class, uptr __unused dst, size_t __unused realsz) | |
312 | { | |
313 | assert(__asan_option_detect_stack_use_after_return == 0); | |
314 | panic("fakestack_free called on non-FAKESTACK config\n"); | |
315 | } | |
316 | ||
317 | #endif | |
318 | ||
319 | void kasan_init_thread(struct kasan_thread_data *td) | |
320 | { | |
321 | td->in_fakestack = 0; | |
322 | LIST_INIT(&td->fakestack_head); | |
323 | } | |
324 | ||
325 | #define FAKESTACK_DECLARE(szclass) \ | |
326 | uptr __asan_stack_malloc_##szclass(size_t sz) { return kasan_fakestack_alloc(szclass, sz); } \ | |
327 | void __asan_stack_free_##szclass(uptr dst, size_t sz) { kasan_fakestack_free(szclass, dst, sz); } | |
328 | ||
329 | FAKESTACK_DECLARE(0) | |
330 | FAKESTACK_DECLARE(1) | |
331 | FAKESTACK_DECLARE(2) | |
332 | FAKESTACK_DECLARE(3) | |
333 | FAKESTACK_DECLARE(4) | |
334 | FAKESTACK_DECLARE(5) | |
335 | FAKESTACK_DECLARE(6) | |
336 | FAKESTACK_DECLARE(7) | |
337 | FAKESTACK_DECLARE(8) | |
338 | FAKESTACK_DECLARE(9) | |
339 | FAKESTACK_DECLARE(10) |