]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/stack.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
1 /*
2 * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Kernel stack management routines.
30 */
31
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/lock_group.h>
38 #include <kern/mach_param.h>
39 #include <kern/processor.h>
40 #include <kern/thread.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
43 #include <kern/ledger.h>
44
45 #include <vm/vm_map.h>
46 #include <vm/vm_kern.h>
47
48 #include <mach_debug.h>
49 #include <san/kasan.h>
50
51 /*
52 * We allocate stacks from generic kernel VM.
53 *
54 * The stack_free_list can only be accessed at splsched,
55 * because stack_alloc_try/thread_invoke operate at splsched.
56 */
57
58 decl_simple_lock_data(static, stack_lock_data);
59 #define stack_lock() simple_lock(&stack_lock_data, LCK_GRP_NULL)
60 #define stack_unlock() simple_unlock(&stack_lock_data)
61
62 #define STACK_CACHE_SIZE 2
63
64 static vm_offset_t stack_free_list;
65
66 static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
67 static unsigned int stack_hiwat;
68 unsigned int stack_total; /* current total count */
69 unsigned long long stack_allocs; /* total count of allocations */
70
71 static int stack_fake_zone_index = -1; /* index in zone_info array */
72
73 static unsigned int stack_free_target;
74 static int stack_free_delta;
75
76 static unsigned int stack_new_count; /* total new stack allocations */
77
78 static vm_offset_t stack_addr_mask;
79
80 unsigned int kernel_stack_pages;
81 vm_offset_t kernel_stack_size;
82 vm_offset_t kernel_stack_mask;
83 vm_offset_t kernel_stack_depth_max;
84
85 /*
86 * The next field is at the base of the stack,
87 * so the low end is left unsullied.
88 */
89 #define stack_next(stack) \
90 (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
91
92 static inline int
93 log2(vm_offset_t size)
94 {
95 int result;
96 for (result = 0; size > 0; result++) {
97 size >>= 1;
98 }
99 return result;
100 }
101
102 static inline vm_offset_t
103 roundup_pow2(vm_offset_t size)
104 {
105 return 1UL << (log2(size - 1) + 1);
106 }
107
108 static vm_offset_t stack_alloc_internal(void);
109 static void stack_free_stack(vm_offset_t);
110
111 void
112 stack_init(void)
113 {
114 simple_lock_init(&stack_lock_data, 0);
115
116 kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
117 kernel_stack_size = KERNEL_STACK_SIZE;
118 kernel_stack_mask = -KERNEL_STACK_SIZE;
119 kernel_stack_depth_max = 0;
120
121 if (PE_parse_boot_argn("kernel_stack_pages",
122 &kernel_stack_pages,
123 sizeof(kernel_stack_pages))) {
124 kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
125 printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
126 kernel_stack_pages, (void *) kernel_stack_size);
127 }
128
129 if (kernel_stack_size < round_page(kernel_stack_size)) {
130 panic("stack_init: stack size %p not a multiple of page size %d\n",
131 (void *) kernel_stack_size, PAGE_SIZE);
132 }
133
134 stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
135 kernel_stack_mask = ~stack_addr_mask;
136 }
137
138 /*
139 * stack_alloc:
140 *
141 * Allocate a stack for a thread, may
142 * block.
143 */
144
145 static vm_offset_t
146 stack_alloc_internal(void)
147 {
148 vm_offset_t stack = 0;
149 spl_t s;
150 int flags = 0;
151 kern_return_t kr = KERN_SUCCESS;
152
153 s = splsched();
154 stack_lock();
155 stack_allocs++;
156 stack = stack_free_list;
157 if (stack != 0) {
158 stack_free_list = stack_next(stack);
159 stack_free_count--;
160 } else {
161 if (++stack_total > stack_hiwat) {
162 stack_hiwat = stack_total;
163 }
164 stack_new_count++;
165 }
166 stack_free_delta--;
167 stack_unlock();
168 splx(s);
169
170 if (stack == 0) {
171 /*
172 * Request guard pages on either side of the stack. Ask
173 * kernel_memory_allocate() for two extra pages to account
174 * for these.
175 */
176
177 flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT | KMA_ZERO;
178 kr = kernel_memory_allocate(kernel_map, &stack,
179 kernel_stack_size + (2 * PAGE_SIZE),
180 stack_addr_mask,
181 flags,
182 VM_KERN_MEMORY_STACK);
183 if (kr != KERN_SUCCESS) {
184 panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2 * PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
185 }
186
187 /*
188 * The stack address that comes back is the address of the lower
189 * guard page. Skip past it to get the actual stack base address.
190 */
191
192 stack += PAGE_SIZE;
193 }
194 return stack;
195 }
196
197 void
198 stack_alloc(
199 thread_t thread)
200 {
201 assert(thread->kernel_stack == 0);
202 machine_stack_attach(thread, stack_alloc_internal());
203 }
204
205 void
206 stack_handoff(thread_t from, thread_t to)
207 {
208 assert(from == current_thread());
209 machine_stack_handoff(from, to);
210 }
211
212 /*
213 * stack_free:
214 *
215 * Detach and free the stack for a thread.
216 */
217 void
218 stack_free(
219 thread_t thread)
220 {
221 vm_offset_t stack = machine_stack_detach(thread);
222
223 assert(stack);
224 if (stack != thread->reserved_stack) {
225 stack_free_stack(stack);
226 }
227 }
228
229 void
230 stack_free_reserved(
231 thread_t thread)
232 {
233 if (thread->reserved_stack != thread->kernel_stack) {
234 stack_free_stack(thread->reserved_stack);
235 }
236 }
237
238 static void
239 stack_free_stack(
240 vm_offset_t stack)
241 {
242 struct stack_cache *cache;
243 spl_t s;
244
245 #if KASAN_DEBUG
246 /* Sanity check - stack should be unpoisoned by now */
247 assert(kasan_check_shadow(stack, kernel_stack_size, 0));
248 #endif
249
250 s = splsched();
251 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
252 if (cache->count < STACK_CACHE_SIZE) {
253 stack_next(stack) = cache->free;
254 cache->free = stack;
255 cache->count++;
256 } else {
257 stack_lock();
258 stack_next(stack) = stack_free_list;
259 stack_free_list = stack;
260 if (++stack_free_count > stack_free_hiwat) {
261 stack_free_hiwat = stack_free_count;
262 }
263 stack_free_delta++;
264 stack_unlock();
265 }
266 splx(s);
267 }
268
269 /*
270 * stack_alloc_try:
271 *
272 * Non-blocking attempt to allocate a
273 * stack for a thread.
274 *
275 * Returns TRUE on success.
276 *
277 * Called at splsched.
278 */
279 boolean_t
280 stack_alloc_try(
281 thread_t thread)
282 {
283 struct stack_cache *cache;
284 vm_offset_t stack;
285
286 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
287 stack = cache->free;
288 if (stack != 0) {
289 cache->free = stack_next(stack);
290 cache->count--;
291 } else {
292 if (stack_free_list != 0) {
293 stack_lock();
294 stack = stack_free_list;
295 if (stack != 0) {
296 stack_free_list = stack_next(stack);
297 stack_free_count--;
298 stack_free_delta--;
299 }
300 stack_unlock();
301 }
302 }
303
304 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
305 machine_stack_attach(thread, stack);
306 return TRUE;
307 }
308
309 return FALSE;
310 }
311
312 static unsigned int stack_collect_tick, last_stack_tick;
313
314 /*
315 * stack_collect:
316 *
317 * Free excess kernel stacks, may
318 * block.
319 */
320 void
321 stack_collect(void)
322 {
323 if (stack_collect_tick != last_stack_tick) {
324 unsigned int target;
325 vm_offset_t stack;
326 spl_t s;
327
328 s = splsched();
329 stack_lock();
330
331 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
332 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
333
334 while (stack_free_count > target) {
335 stack = stack_free_list;
336 stack_free_list = stack_next(stack);
337 stack_free_count--; stack_total--;
338 stack_unlock();
339 splx(s);
340
341 /*
342 * Get the stack base address, then decrement by one page
343 * to account for the lower guard page. Add two extra pages
344 * to the size to account for the guard pages on both ends
345 * that were originally requested when the stack was allocated
346 * back in stack_alloc().
347 */
348
349 stack = (vm_offset_t)vm_map_trunc_page(
350 stack,
351 VM_MAP_PAGE_MASK(kernel_map));
352 stack -= PAGE_SIZE;
353 if (vm_map_remove(
354 kernel_map,
355 stack,
356 stack + kernel_stack_size + (2 * PAGE_SIZE),
357 VM_MAP_REMOVE_KUNWIRE)
358 != KERN_SUCCESS) {
359 panic("stack_collect: vm_map_remove");
360 }
361 stack = 0;
362
363 s = splsched();
364 stack_lock();
365
366 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
367 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
368 }
369
370 last_stack_tick = stack_collect_tick;
371
372 stack_unlock();
373 splx(s);
374 }
375 }
376
377 /*
378 * compute_stack_target:
379 *
380 * Computes a new target free list count
381 * based on recent alloc / free activity.
382 *
383 * Limits stack collection to once per
384 * computation period.
385 */
386 void
387 compute_stack_target(
388 __unused void *arg)
389 {
390 spl_t s;
391
392 s = splsched();
393 stack_lock();
394
395 if (stack_free_target > 5) {
396 stack_free_target = (4 * stack_free_target) / 5;
397 } else if (stack_free_target > 0) {
398 stack_free_target--;
399 }
400
401 stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
402
403 stack_free_delta = 0;
404 stack_collect_tick++;
405
406 stack_unlock();
407 splx(s);
408 }
409
410 void
411 stack_fake_zone_init(int zone_index)
412 {
413 stack_fake_zone_index = zone_index;
414 }
415
416 void
417 stack_fake_zone_info(int *count,
418 vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
419 uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
420 {
421 unsigned int total, hiwat, free;
422 unsigned long long all;
423 spl_t s;
424
425 s = splsched();
426 stack_lock();
427 all = stack_allocs;
428 total = stack_total;
429 hiwat = stack_hiwat;
430 free = stack_free_count;
431 stack_unlock();
432 splx(s);
433
434 *count = total - free;
435 *cur_size = kernel_stack_size * total;
436 *max_size = kernel_stack_size * hiwat;
437 *elem_size = kernel_stack_size;
438 *alloc_size = kernel_stack_size;
439 *sum_size = all * kernel_stack_size;
440
441 *collectable = 1;
442 *exhaustable = 0;
443 *caller_acct = 1;
444 }
445
446 /* OBSOLETE */
447 void stack_privilege(
448 thread_t thread);
449
450 void
451 stack_privilege(
452 __unused thread_t thread)
453 {
454 /* OBSOLETE */
455 }
456
457 /*
458 * Return info on stack usage for threads in a specific processor set
459 */
460 kern_return_t
461 processor_set_stack_usage(
462 processor_set_t pset,
463 unsigned int *totalp,
464 vm_size_t *spacep,
465 vm_size_t *residentp,
466 vm_size_t *maxusagep,
467 vm_offset_t *maxstackp)
468 {
469 #if !MACH_DEBUG
470 return KERN_NOT_SUPPORTED;
471 #else
472 unsigned int total;
473 vm_size_t maxusage;
474 vm_offset_t maxstack;
475
476 thread_t *thread_list;
477 thread_t thread;
478
479 unsigned int actual; /* this many things */
480 unsigned int i;
481
482 vm_size_t size, size_needed;
483 void *addr;
484
485 if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
486 return KERN_INVALID_ARGUMENT;
487 }
488
489 size = 0;
490 addr = NULL;
491
492 for (;;) {
493 lck_mtx_lock(&tasks_threads_lock);
494
495 actual = threads_count;
496
497 /* do we have the memory we need? */
498
499 size_needed = actual * sizeof(thread_t);
500 if (size_needed <= size) {
501 break;
502 }
503
504 lck_mtx_unlock(&tasks_threads_lock);
505
506 if (size != 0) {
507 kfree(addr, size);
508 }
509
510 assert(size_needed > 0);
511 size = size_needed;
512
513 addr = kalloc(size);
514 if (addr == 0) {
515 return KERN_RESOURCE_SHORTAGE;
516 }
517 }
518
519 /* OK, have memory and list is locked */
520 thread_list = (thread_t *) addr;
521 for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
522 !queue_end(&threads, (queue_entry_t) thread);
523 thread = (thread_t)(void *) queue_next(&thread->threads)) {
524 thread_reference_internal(thread);
525 thread_list[i++] = thread;
526 }
527 assert(i <= actual);
528
529 lck_mtx_unlock(&tasks_threads_lock);
530
531 /* calculate maxusage and free thread references */
532
533 total = 0;
534 maxusage = 0;
535 maxstack = 0;
536 while (i > 0) {
537 thread_t threadref = thread_list[--i];
538
539 if (threadref->kernel_stack != 0) {
540 total++;
541 }
542
543 thread_deallocate(threadref);
544 }
545
546 if (size != 0) {
547 kfree(addr, size);
548 }
549
550 *totalp = total;
551 *residentp = *spacep = total * round_page(kernel_stack_size);
552 *maxusagep = maxusage;
553 *maxstackp = maxstack;
554 return KERN_SUCCESS;
555
556 #endif /* MACH_DEBUG */
557 }
558
559 vm_offset_t
560 min_valid_stack_address(void)
561 {
562 return (vm_offset_t)vm_map_min(kernel_map);
563 }
564
565 vm_offset_t
566 max_valid_stack_address(void)
567 {
568 return (vm_offset_t)vm_map_max(kernel_map);
569 }