]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/stack.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
CommitLineData
91447636 1/*
cb323159 2 * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
d9a64523 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
d9a64523 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
d9a64523 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
d9a64523 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * Kernel stack management routines.
30 */
31
32#include <mach/mach_host.h>
33#include <mach/mach_types.h>
34#include <mach/processor_set.h>
35
36#include <kern/kern_types.h>
0a7de745 37#include <kern/lock_group.h>
91447636
A
38#include <kern/mach_param.h>
39#include <kern/processor.h>
40#include <kern/thread.h>
41#include <kern/zalloc.h>
42#include <kern/kalloc.h>
316670eb 43#include <kern/ledger.h>
91447636
A
44
45#include <vm/vm_map.h>
46#include <vm/vm_kern.h>
47
48#include <mach_debug.h>
5ba3f43e 49#include <san/kasan.h>
91447636
A
50
51/*
52 * We allocate stacks from generic kernel VM.
53 *
54 * The stack_free_list can only be accessed at splsched,
55 * because stack_alloc_try/thread_invoke operate at splsched.
56 */
57
cb323159 58decl_simple_lock_data(static, stack_lock_data);
0a7de745
A
59#define stack_lock() simple_lock(&stack_lock_data, LCK_GRP_NULL)
60#define stack_unlock() simple_unlock(&stack_lock_data)
91447636 61
0a7de745 62#define STACK_CACHE_SIZE 2
91447636 63
0a7de745 64static vm_offset_t stack_free_list;
91447636 65
0a7de745
A
66static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
67static unsigned int stack_hiwat;
68unsigned int stack_total; /* current total count */
69unsigned long long stack_allocs; /* total count of allocations */
6d2010ae 70
0a7de745 71static int stack_fake_zone_index = -1; /* index in zone_info array */
91447636 72
0a7de745
A
73static unsigned int stack_free_target;
74static int stack_free_delta;
91447636 75
0a7de745 76static unsigned int stack_new_count; /* total new stack allocations */
91447636 77
0a7de745 78static vm_offset_t stack_addr_mask;
91447636 79
0a7de745
A
80unsigned int kernel_stack_pages;
81vm_offset_t kernel_stack_size;
82vm_offset_t kernel_stack_mask;
83vm_offset_t kernel_stack_depth_max;
b0d623f7 84
91447636
A
85/*
86 * The next field is at the base of the stack,
87 * so the low end is left unsullied.
88 */
0a7de745 89#define stack_next(stack) \
b0d623f7
A
90 (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
91
92static inline int
93log2(vm_offset_t size)
94{
0a7de745
A
95 int result;
96 for (result = 0; size > 0; result++) {
b0d623f7 97 size >>= 1;
0a7de745 98 }
b0d623f7
A
99 return result;
100}
101
102static inline vm_offset_t
103roundup_pow2(vm_offset_t size)
104{
d9a64523 105 return 1UL << (log2(size - 1) + 1);
b0d623f7 106}
91447636 107
6d2010ae
A
108static vm_offset_t stack_alloc_internal(void);
109static void stack_free_stack(vm_offset_t);
110
91447636
A
111void
112stack_init(void)
113{
91447636 114 simple_lock_init(&stack_lock_data, 0);
d9a64523 115
fe8ab488
A
116 kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
117 kernel_stack_size = KERNEL_STACK_SIZE;
118 kernel_stack_mask = -KERNEL_STACK_SIZE;
119 kernel_stack_depth_max = 0;
120
b0d623f7 121 if (PE_parse_boot_argn("kernel_stack_pages",
0a7de745
A
122 &kernel_stack_pages,
123 sizeof(kernel_stack_pages))) {
b0d623f7
A
124 kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
125 printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
0a7de745 126 kernel_stack_pages, (void *) kernel_stack_size);
b0d623f7
A
127 }
128
0a7de745 129 if (kernel_stack_size < round_page(kernel_stack_size)) {
b0d623f7 130 panic("stack_init: stack size %p not a multiple of page size %d\n",
0a7de745
A
131 (void *) kernel_stack_size, PAGE_SIZE);
132 }
d9a64523 133
b0d623f7
A
134 stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
135 kernel_stack_mask = ~stack_addr_mask;
91447636
A
136}
137
138/*
139 * stack_alloc:
140 *
141 * Allocate a stack for a thread, may
142 * block.
143 */
6d2010ae 144
d9a64523 145static vm_offset_t
6d2010ae 146stack_alloc_internal(void)
91447636 147{
0a7de745
A
148 vm_offset_t stack = 0;
149 spl_t s;
150 int flags = 0;
151 kern_return_t kr = KERN_SUCCESS;
91447636 152
91447636
A
153 s = splsched();
154 stack_lock();
6d2010ae 155 stack_allocs++;
91447636
A
156 stack = stack_free_list;
157 if (stack != 0) {
158 stack_free_list = stack_next(stack);
159 stack_free_count--;
0a7de745
A
160 } else {
161 if (++stack_total > stack_hiwat) {
91447636 162 stack_hiwat = stack_total;
0a7de745 163 }
91447636
A
164 stack_new_count++;
165 }
166 stack_free_delta--;
167 stack_unlock();
168 splx(s);
d9a64523 169
91447636 170 if (stack == 0) {
2d21ac55
A
171 /*
172 * Request guard pages on either side of the stack. Ask
173 * kernel_memory_allocate() for two extra pages to account
174 * for these.
175 */
176
d9a64523 177 flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT | KMA_ZERO;
5ba3f43e 178 kr = kernel_memory_allocate(kernel_map, &stack,
0a7de745
A
179 kernel_stack_size + (2 * PAGE_SIZE),
180 stack_addr_mask,
181 flags,
182 VM_KERN_MEMORY_STACK);
5ba3f43e 183 if (kr != KERN_SUCCESS) {
0a7de745 184 panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2 * PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
5ba3f43e 185 }
2d21ac55
A
186
187 /*
188 * The stack address that comes back is the address of the lower
189 * guard page. Skip past it to get the actual stack base address.
190 */
191
192 stack += PAGE_SIZE;
91447636 193 }
6d2010ae
A
194 return stack;
195}
91447636 196
6d2010ae
A
197void
198stack_alloc(
0a7de745 199 thread_t thread)
6d2010ae 200{
6d2010ae
A
201 assert(thread->kernel_stack == 0);
202 machine_stack_attach(thread, stack_alloc_internal());
6d2010ae
A
203}
204
205void
206stack_handoff(thread_t from, thread_t to)
207{
208 assert(from == current_thread());
209 machine_stack_handoff(from, to);
91447636
A
210}
211
212/*
213 * stack_free:
214 *
215 * Detach and free the stack for a thread.
216 */
217void
218stack_free(
0a7de745 219 thread_t thread)
91447636 220{
0a7de745 221 vm_offset_t stack = machine_stack_detach(thread);
91447636
A
222
223 assert(stack);
6d2010ae 224 if (stack != thread->reserved_stack) {
2d21ac55 225 stack_free_stack(stack);
6d2010ae 226 }
91447636
A
227}
228
229void
6d2010ae 230stack_free_reserved(
0a7de745 231 thread_t thread)
6d2010ae
A
232{
233 if (thread->reserved_stack != thread->kernel_stack) {
234 stack_free_stack(thread->reserved_stack);
6d2010ae
A
235 }
236}
237
238static void
91447636 239stack_free_stack(
0a7de745 240 vm_offset_t stack)
91447636 241{
0a7de745
A
242 struct stack_cache *cache;
243 spl_t s;
91447636 244
d9a64523
A
245#if KASAN_DEBUG
246 /* Sanity check - stack should be unpoisoned by now */
247 assert(kasan_check_shadow(stack, kernel_stack_size, 0));
248#endif
249
91447636
A
250 s = splsched();
251 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
252 if (cache->count < STACK_CACHE_SIZE) {
253 stack_next(stack) = cache->free;
254 cache->free = stack;
255 cache->count++;
0a7de745 256 } else {
91447636
A
257 stack_lock();
258 stack_next(stack) = stack_free_list;
259 stack_free_list = stack;
0a7de745 260 if (++stack_free_count > stack_free_hiwat) {
91447636 261 stack_free_hiwat = stack_free_count;
0a7de745 262 }
91447636
A
263 stack_free_delta++;
264 stack_unlock();
265 }
266 splx(s);
267}
268
269/*
270 * stack_alloc_try:
271 *
272 * Non-blocking attempt to allocate a
273 * stack for a thread.
274 *
275 * Returns TRUE on success.
276 *
277 * Called at splsched.
278 */
279boolean_t
280stack_alloc_try(
0a7de745 281 thread_t thread)
91447636 282{
0a7de745
A
283 struct stack_cache *cache;
284 vm_offset_t stack;
91447636
A
285
286 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
287 stack = cache->free;
288 if (stack != 0) {
289 cache->free = stack_next(stack);
290 cache->count--;
0a7de745 291 } else {
91447636
A
292 if (stack_free_list != 0) {
293 stack_lock();
294 stack = stack_free_list;
295 if (stack != 0) {
296 stack_free_list = stack_next(stack);
297 stack_free_count--;
298 stack_free_delta--;
299 }
300 stack_unlock();
301 }
302 }
303
304 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
305 machine_stack_attach(thread, stack);
0a7de745 306 return TRUE;
91447636
A
307 }
308
0a7de745 309 return FALSE;
91447636
A
310}
311
0a7de745 312static unsigned int stack_collect_tick, last_stack_tick;
91447636
A
313
314/*
315 * stack_collect:
316 *
317 * Free excess kernel stacks, may
318 * block.
319 */
320void
321stack_collect(void)
322{
323 if (stack_collect_tick != last_stack_tick) {
0a7de745
A
324 unsigned int target;
325 vm_offset_t stack;
326 spl_t s;
91447636
A
327
328 s = splsched();
329 stack_lock();
330
331 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
332 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
333
334 while (stack_free_count > target) {
335 stack = stack_free_list;
336 stack_free_list = stack_next(stack);
337 stack_free_count--; stack_total--;
338 stack_unlock();
339 splx(s);
340
2d21ac55
A
341 /*
342 * Get the stack base address, then decrement by one page
343 * to account for the lower guard page. Add two extra pages
344 * to the size to account for the guard pages on both ends
345 * that were originally requested when the stack was allocated
346 * back in stack_alloc().
347 */
348
39236c6e
A
349 stack = (vm_offset_t)vm_map_trunc_page(
350 stack,
351 VM_MAP_PAGE_MASK(kernel_map));
2d21ac55
A
352 stack -= PAGE_SIZE;
353 if (vm_map_remove(
354 kernel_map,
355 stack,
0a7de745 356 stack + kernel_stack_size + (2 * PAGE_SIZE),
2d21ac55 357 VM_MAP_REMOVE_KUNWIRE)
0a7de745 358 != KERN_SUCCESS) {
91447636 359 panic("stack_collect: vm_map_remove");
0a7de745 360 }
2d21ac55 361 stack = 0;
91447636
A
362
363 s = splsched();
364 stack_lock();
365
366 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
367 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
368 }
369
370 last_stack_tick = stack_collect_tick;
371
372 stack_unlock();
373 splx(s);
374 }
375}
376
377/*
378 * compute_stack_target:
379 *
380 * Computes a new target free list count
381 * based on recent alloc / free activity.
382 *
383 * Limits stack collection to once per
384 * computation period.
385 */
386void
387compute_stack_target(
0a7de745 388 __unused void *arg)
91447636 389{
0a7de745 390 spl_t s;
91447636
A
391
392 s = splsched();
393 stack_lock();
394
0a7de745 395 if (stack_free_target > 5) {
91447636 396 stack_free_target = (4 * stack_free_target) / 5;
0a7de745 397 } else if (stack_free_target > 0) {
91447636 398 stack_free_target--;
0a7de745 399 }
91447636
A
400
401 stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
402
403 stack_free_delta = 0;
404 stack_collect_tick++;
405
406 stack_unlock();
407 splx(s);
408}
409
410void
6d2010ae
A
411stack_fake_zone_init(int zone_index)
412{
413 stack_fake_zone_index = zone_index;
414}
415
416void
d9a64523 417stack_fake_zone_info(int *count,
0a7de745
A
418 vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
419 uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
91447636 420{
0a7de745 421 unsigned int total, hiwat, free;
6d2010ae 422 unsigned long long all;
0a7de745 423 spl_t s;
91447636
A
424
425 s = splsched();
426 stack_lock();
6d2010ae 427 all = stack_allocs;
91447636
A
428 total = stack_total;
429 hiwat = stack_hiwat;
430 free = stack_free_count;
431 stack_unlock();
432 splx(s);
433
434 *count = total - free;
b0d623f7
A
435 *cur_size = kernel_stack_size * total;
436 *max_size = kernel_stack_size * hiwat;
437 *elem_size = kernel_stack_size;
438 *alloc_size = kernel_stack_size;
6d2010ae
A
439 *sum_size = all * kernel_stack_size;
440
91447636
A
441 *collectable = 1;
442 *exhaustable = 0;
6d2010ae 443 *caller_acct = 1;
91447636
A
444}
445
446/* OBSOLETE */
0a7de745
A
447void stack_privilege(
448 thread_t thread);
91447636
A
449
450void
451stack_privilege(
0a7de745 452 __unused thread_t thread)
91447636
A
453{
454 /* OBSOLETE */
455}
456
457/*
458 * Return info on stack usage for threads in a specific processor set
459 */
460kern_return_t
461processor_set_stack_usage(
0a7de745
A
462 processor_set_t pset,
463 unsigned int *totalp,
464 vm_size_t *spacep,
465 vm_size_t *residentp,
466 vm_size_t *maxusagep,
467 vm_offset_t *maxstackp)
91447636
A
468{
469#if !MACH_DEBUG
0a7de745 470 return KERN_NOT_SUPPORTED;
91447636
A
471#else
472 unsigned int total;
473 vm_size_t maxusage;
474 vm_offset_t maxstack;
475
39037602
A
476 thread_t *thread_list;
477 thread_t thread;
91447636 478
0a7de745 479 unsigned int actual; /* this many things */
91447636
A
480 unsigned int i;
481
482 vm_size_t size, size_needed;
483 void *addr;
484
0a7de745 485 if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
91447636 486 return KERN_INVALID_ARGUMENT;
0a7de745 487 }
91447636 488
2d21ac55
A
489 size = 0;
490 addr = NULL;
91447636
A
491
492 for (;;) {
b0d623f7 493 lck_mtx_lock(&tasks_threads_lock);
91447636 494
2d21ac55 495 actual = threads_count;
91447636
A
496
497 /* do we have the memory we need? */
498
499 size_needed = actual * sizeof(thread_t);
0a7de745 500 if (size_needed <= size) {
91447636 501 break;
0a7de745 502 }
91447636 503
b0d623f7 504 lck_mtx_unlock(&tasks_threads_lock);
91447636 505
0a7de745 506 if (size != 0) {
91447636 507 kfree(addr, size);
0a7de745 508 }
91447636
A
509
510 assert(size_needed > 0);
511 size = size_needed;
512
513 addr = kalloc(size);
0a7de745 514 if (addr == 0) {
91447636 515 return KERN_RESOURCE_SHORTAGE;
0a7de745 516 }
91447636
A
517 }
518
2d21ac55
A
519 /* OK, have memory and list is locked */
520 thread_list = (thread_t *) addr;
39236c6e 521 for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
0a7de745
A
522 !queue_end(&threads, (queue_entry_t) thread);
523 thread = (thread_t)(void *) queue_next(&thread->threads)) {
91447636 524 thread_reference_internal(thread);
2d21ac55 525 thread_list[i++] = thread;
91447636
A
526 }
527 assert(i <= actual);
528
b0d623f7 529 lck_mtx_unlock(&tasks_threads_lock);
91447636
A
530
531 /* calculate maxusage and free thread references */
532
533 total = 0;
534 maxusage = 0;
535 maxstack = 0;
536 while (i > 0) {
2d21ac55 537 thread_t threadref = thread_list[--i];
91447636 538
0a7de745 539 if (threadref->kernel_stack != 0) {
91447636 540 total++;
0a7de745 541 }
91447636
A
542
543 thread_deallocate(threadref);
544 }
545
0a7de745 546 if (size != 0) {
91447636 547 kfree(addr, size);
0a7de745 548 }
91447636
A
549
550 *totalp = total;
b0d623f7 551 *residentp = *spacep = total * round_page(kernel_stack_size);
91447636
A
552 *maxusagep = maxusage;
553 *maxstackp = maxstack;
554 return KERN_SUCCESS;
555
0a7de745 556#endif /* MACH_DEBUG */
91447636
A
557}
558
0a7de745
A
559vm_offset_t
560min_valid_stack_address(void)
91447636 561{
b0d623f7 562 return (vm_offset_t)vm_map_min(kernel_map);
91447636
A
563}
564
0a7de745
A
565vm_offset_t
566max_valid_stack_address(void)
91447636 567{
b0d623f7 568 return (vm_offset_t)vm_map_max(kernel_map);
91447636 569}