]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/stack.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
CommitLineData
91447636 1/*
cb323159 2 * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
d9a64523 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
d9a64523 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
d9a64523 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
d9a64523 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * Kernel stack management routines.
30 */
31
32#include <mach/mach_host.h>
33#include <mach/mach_types.h>
34#include <mach/processor_set.h>
35
36#include <kern/kern_types.h>
0a7de745 37#include <kern/lock_group.h>
91447636 38#include <kern/mach_param.h>
f427ee49 39#include <kern/percpu.h>
91447636
A
40#include <kern/processor.h>
41#include <kern/thread.h>
42#include <kern/zalloc.h>
43#include <kern/kalloc.h>
316670eb 44#include <kern/ledger.h>
91447636
A
45
46#include <vm/vm_map.h>
47#include <vm/vm_kern.h>
48
49#include <mach_debug.h>
5ba3f43e 50#include <san/kasan.h>
91447636
A
51
52/*
53 * We allocate stacks from generic kernel VM.
54 *
55 * The stack_free_list can only be accessed at splsched,
56 * because stack_alloc_try/thread_invoke operate at splsched.
57 */
58
cb323159 59decl_simple_lock_data(static, stack_lock_data);
0a7de745
A
60#define stack_lock() simple_lock(&stack_lock_data, LCK_GRP_NULL)
61#define stack_unlock() simple_unlock(&stack_lock_data)
91447636 62
0a7de745 63#define STACK_CACHE_SIZE 2
91447636 64
0a7de745 65static vm_offset_t stack_free_list;
91447636 66
0a7de745
A
67static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
68static unsigned int stack_hiwat;
69unsigned int stack_total; /* current total count */
70unsigned long long stack_allocs; /* total count of allocations */
6d2010ae 71
0a7de745 72static unsigned int stack_free_target;
f427ee49 73static int stack_free_delta;
91447636 74
0a7de745 75static unsigned int stack_new_count; /* total new stack allocations */
91447636 76
0a7de745 77static vm_offset_t stack_addr_mask;
91447636 78
0a7de745
A
79unsigned int kernel_stack_pages;
80vm_offset_t kernel_stack_size;
81vm_offset_t kernel_stack_mask;
82vm_offset_t kernel_stack_depth_max;
b0d623f7 83
f427ee49
A
84struct stack_cache {
85 vm_offset_t free;
86 unsigned int count;
87};
88static struct stack_cache PERCPU_DATA(stack_cache);
89
91447636
A
90/*
91 * The next field is at the base of the stack,
92 * so the low end is left unsullied.
93 */
0a7de745 94#define stack_next(stack) \
b0d623f7
A
95 (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
96
97static inline int
98log2(vm_offset_t size)
99{
0a7de745
A
100 int result;
101 for (result = 0; size > 0; result++) {
b0d623f7 102 size >>= 1;
0a7de745 103 }
b0d623f7
A
104 return result;
105}
106
107static inline vm_offset_t
108roundup_pow2(vm_offset_t size)
109{
d9a64523 110 return 1UL << (log2(size - 1) + 1);
b0d623f7 111}
91447636 112
6d2010ae
A
113static vm_offset_t stack_alloc_internal(void);
114static void stack_free_stack(vm_offset_t);
115
91447636
A
116void
117stack_init(void)
118{
91447636 119 simple_lock_init(&stack_lock_data, 0);
d9a64523 120
fe8ab488
A
121 kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
122 kernel_stack_size = KERNEL_STACK_SIZE;
123 kernel_stack_mask = -KERNEL_STACK_SIZE;
124 kernel_stack_depth_max = 0;
125
b0d623f7 126 if (PE_parse_boot_argn("kernel_stack_pages",
0a7de745
A
127 &kernel_stack_pages,
128 sizeof(kernel_stack_pages))) {
b0d623f7
A
129 kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
130 printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
0a7de745 131 kernel_stack_pages, (void *) kernel_stack_size);
b0d623f7
A
132 }
133
0a7de745 134 if (kernel_stack_size < round_page(kernel_stack_size)) {
b0d623f7 135 panic("stack_init: stack size %p not a multiple of page size %d\n",
0a7de745
A
136 (void *) kernel_stack_size, PAGE_SIZE);
137 }
d9a64523 138
b0d623f7
A
139 stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
140 kernel_stack_mask = ~stack_addr_mask;
91447636
A
141}
142
143/*
144 * stack_alloc:
145 *
146 * Allocate a stack for a thread, may
147 * block.
148 */
6d2010ae 149
d9a64523 150static vm_offset_t
6d2010ae 151stack_alloc_internal(void)
91447636 152{
0a7de745
A
153 vm_offset_t stack = 0;
154 spl_t s;
155 int flags = 0;
156 kern_return_t kr = KERN_SUCCESS;
91447636 157
91447636
A
158 s = splsched();
159 stack_lock();
6d2010ae 160 stack_allocs++;
91447636
A
161 stack = stack_free_list;
162 if (stack != 0) {
163 stack_free_list = stack_next(stack);
164 stack_free_count--;
0a7de745
A
165 } else {
166 if (++stack_total > stack_hiwat) {
91447636 167 stack_hiwat = stack_total;
0a7de745 168 }
91447636
A
169 stack_new_count++;
170 }
171 stack_free_delta--;
172 stack_unlock();
173 splx(s);
d9a64523 174
91447636 175 if (stack == 0) {
2d21ac55
A
176 /*
177 * Request guard pages on either side of the stack. Ask
178 * kernel_memory_allocate() for two extra pages to account
179 * for these.
180 */
181
d9a64523 182 flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT | KMA_ZERO;
5ba3f43e 183 kr = kernel_memory_allocate(kernel_map, &stack,
0a7de745
A
184 kernel_stack_size + (2 * PAGE_SIZE),
185 stack_addr_mask,
186 flags,
187 VM_KERN_MEMORY_STACK);
5ba3f43e 188 if (kr != KERN_SUCCESS) {
0a7de745 189 panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2 * PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
5ba3f43e 190 }
2d21ac55
A
191
192 /*
193 * The stack address that comes back is the address of the lower
194 * guard page. Skip past it to get the actual stack base address.
195 */
196
197 stack += PAGE_SIZE;
91447636 198 }
6d2010ae
A
199 return stack;
200}
91447636 201
6d2010ae
A
202void
203stack_alloc(
0a7de745 204 thread_t thread)
6d2010ae 205{
6d2010ae
A
206 assert(thread->kernel_stack == 0);
207 machine_stack_attach(thread, stack_alloc_internal());
6d2010ae
A
208}
209
210void
211stack_handoff(thread_t from, thread_t to)
212{
213 assert(from == current_thread());
214 machine_stack_handoff(from, to);
91447636
A
215}
216
217/*
218 * stack_free:
219 *
220 * Detach and free the stack for a thread.
221 */
222void
223stack_free(
0a7de745 224 thread_t thread)
91447636 225{
0a7de745 226 vm_offset_t stack = machine_stack_detach(thread);
91447636
A
227
228 assert(stack);
6d2010ae 229 if (stack != thread->reserved_stack) {
2d21ac55 230 stack_free_stack(stack);
6d2010ae 231 }
91447636
A
232}
233
234void
6d2010ae 235stack_free_reserved(
0a7de745 236 thread_t thread)
6d2010ae
A
237{
238 if (thread->reserved_stack != thread->kernel_stack) {
239 stack_free_stack(thread->reserved_stack);
6d2010ae
A
240 }
241}
242
243static void
91447636 244stack_free_stack(
0a7de745 245 vm_offset_t stack)
91447636 246{
0a7de745
A
247 struct stack_cache *cache;
248 spl_t s;
91447636 249
d9a64523
A
250#if KASAN_DEBUG
251 /* Sanity check - stack should be unpoisoned by now */
252 assert(kasan_check_shadow(stack, kernel_stack_size, 0));
253#endif
254
91447636 255 s = splsched();
f427ee49 256 cache = PERCPU_GET(stack_cache);
91447636
A
257 if (cache->count < STACK_CACHE_SIZE) {
258 stack_next(stack) = cache->free;
259 cache->free = stack;
260 cache->count++;
0a7de745 261 } else {
91447636
A
262 stack_lock();
263 stack_next(stack) = stack_free_list;
264 stack_free_list = stack;
0a7de745 265 if (++stack_free_count > stack_free_hiwat) {
91447636 266 stack_free_hiwat = stack_free_count;
0a7de745 267 }
91447636
A
268 stack_free_delta++;
269 stack_unlock();
270 }
271 splx(s);
272}
273
274/*
275 * stack_alloc_try:
276 *
277 * Non-blocking attempt to allocate a
278 * stack for a thread.
279 *
280 * Returns TRUE on success.
281 *
282 * Called at splsched.
283 */
284boolean_t
285stack_alloc_try(
0a7de745 286 thread_t thread)
91447636 287{
0a7de745
A
288 struct stack_cache *cache;
289 vm_offset_t stack;
91447636 290
f427ee49 291 cache = PERCPU_GET(stack_cache);
91447636
A
292 stack = cache->free;
293 if (stack != 0) {
294 cache->free = stack_next(stack);
295 cache->count--;
0a7de745 296 } else {
91447636
A
297 if (stack_free_list != 0) {
298 stack_lock();
299 stack = stack_free_list;
300 if (stack != 0) {
301 stack_free_list = stack_next(stack);
302 stack_free_count--;
303 stack_free_delta--;
304 }
305 stack_unlock();
306 }
307 }
308
309 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
310 machine_stack_attach(thread, stack);
0a7de745 311 return TRUE;
91447636
A
312 }
313
0a7de745 314 return FALSE;
91447636
A
315}
316
0a7de745 317static unsigned int stack_collect_tick, last_stack_tick;
91447636
A
318
319/*
320 * stack_collect:
321 *
322 * Free excess kernel stacks, may
323 * block.
324 */
325void
326stack_collect(void)
327{
328 if (stack_collect_tick != last_stack_tick) {
0a7de745
A
329 unsigned int target;
330 vm_offset_t stack;
331 spl_t s;
91447636
A
332
333 s = splsched();
334 stack_lock();
335
336 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
337 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
338
339 while (stack_free_count > target) {
340 stack = stack_free_list;
341 stack_free_list = stack_next(stack);
342 stack_free_count--; stack_total--;
343 stack_unlock();
344 splx(s);
345
2d21ac55
A
346 /*
347 * Get the stack base address, then decrement by one page
348 * to account for the lower guard page. Add two extra pages
349 * to the size to account for the guard pages on both ends
350 * that were originally requested when the stack was allocated
351 * back in stack_alloc().
352 */
353
39236c6e
A
354 stack = (vm_offset_t)vm_map_trunc_page(
355 stack,
356 VM_MAP_PAGE_MASK(kernel_map));
2d21ac55
A
357 stack -= PAGE_SIZE;
358 if (vm_map_remove(
359 kernel_map,
360 stack,
0a7de745 361 stack + kernel_stack_size + (2 * PAGE_SIZE),
2d21ac55 362 VM_MAP_REMOVE_KUNWIRE)
0a7de745 363 != KERN_SUCCESS) {
91447636 364 panic("stack_collect: vm_map_remove");
0a7de745 365 }
2d21ac55 366 stack = 0;
91447636
A
367
368 s = splsched();
369 stack_lock();
370
371 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
372 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
373 }
374
375 last_stack_tick = stack_collect_tick;
376
377 stack_unlock();
378 splx(s);
379 }
380}
381
382/*
383 * compute_stack_target:
384 *
385 * Computes a new target free list count
386 * based on recent alloc / free activity.
387 *
388 * Limits stack collection to once per
389 * computation period.
390 */
391void
392compute_stack_target(
0a7de745 393 __unused void *arg)
91447636 394{
0a7de745 395 spl_t s;
91447636
A
396
397 s = splsched();
398 stack_lock();
399
0a7de745 400 if (stack_free_target > 5) {
91447636 401 stack_free_target = (4 * stack_free_target) / 5;
0a7de745 402 } else if (stack_free_target > 0) {
91447636 403 stack_free_target--;
0a7de745 404 }
91447636
A
405
406 stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
407
408 stack_free_delta = 0;
409 stack_collect_tick++;
410
411 stack_unlock();
412 splx(s);
413}
414
91447636 415/* OBSOLETE */
0a7de745
A
416void stack_privilege(
417 thread_t thread);
91447636
A
418
419void
420stack_privilege(
0a7de745 421 __unused thread_t thread)
91447636
A
422{
423 /* OBSOLETE */
424}
425
426/*
427 * Return info on stack usage for threads in a specific processor set
428 */
429kern_return_t
430processor_set_stack_usage(
0a7de745
A
431 processor_set_t pset,
432 unsigned int *totalp,
433 vm_size_t *spacep,
434 vm_size_t *residentp,
435 vm_size_t *maxusagep,
436 vm_offset_t *maxstackp)
91447636
A
437{
438#if !MACH_DEBUG
0a7de745 439 return KERN_NOT_SUPPORTED;
91447636
A
440#else
441 unsigned int total;
442 vm_size_t maxusage;
443 vm_offset_t maxstack;
444
39037602
A
445 thread_t *thread_list;
446 thread_t thread;
91447636 447
0a7de745 448 unsigned int actual; /* this many things */
91447636
A
449 unsigned int i;
450
451 vm_size_t size, size_needed;
452 void *addr;
453
0a7de745 454 if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
91447636 455 return KERN_INVALID_ARGUMENT;
0a7de745 456 }
91447636 457
2d21ac55
A
458 size = 0;
459 addr = NULL;
91447636
A
460
461 for (;;) {
b0d623f7 462 lck_mtx_lock(&tasks_threads_lock);
91447636 463
2d21ac55 464 actual = threads_count;
91447636
A
465
466 /* do we have the memory we need? */
467
468 size_needed = actual * sizeof(thread_t);
0a7de745 469 if (size_needed <= size) {
91447636 470 break;
0a7de745 471 }
91447636 472
b0d623f7 473 lck_mtx_unlock(&tasks_threads_lock);
91447636 474
0a7de745 475 if (size != 0) {
f427ee49 476 kheap_free(KHEAP_TEMP, addr, size);
0a7de745 477 }
91447636
A
478
479 assert(size_needed > 0);
480 size = size_needed;
481
f427ee49 482 addr = kheap_alloc(KHEAP_TEMP, size, Z_WAITOK);
0a7de745 483 if (addr == 0) {
91447636 484 return KERN_RESOURCE_SHORTAGE;
0a7de745 485 }
91447636
A
486 }
487
2d21ac55
A
488 /* OK, have memory and list is locked */
489 thread_list = (thread_t *) addr;
39236c6e 490 for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
0a7de745
A
491 !queue_end(&threads, (queue_entry_t) thread);
492 thread = (thread_t)(void *) queue_next(&thread->threads)) {
91447636 493 thread_reference_internal(thread);
2d21ac55 494 thread_list[i++] = thread;
91447636
A
495 }
496 assert(i <= actual);
497
b0d623f7 498 lck_mtx_unlock(&tasks_threads_lock);
91447636
A
499
500 /* calculate maxusage and free thread references */
501
502 total = 0;
503 maxusage = 0;
504 maxstack = 0;
505 while (i > 0) {
2d21ac55 506 thread_t threadref = thread_list[--i];
91447636 507
0a7de745 508 if (threadref->kernel_stack != 0) {
91447636 509 total++;
0a7de745 510 }
91447636
A
511
512 thread_deallocate(threadref);
513 }
514
0a7de745 515 if (size != 0) {
f427ee49 516 kheap_free(KHEAP_TEMP, addr, size);
0a7de745 517 }
91447636
A
518
519 *totalp = total;
b0d623f7 520 *residentp = *spacep = total * round_page(kernel_stack_size);
91447636
A
521 *maxusagep = maxusage;
522 *maxstackp = maxstack;
523 return KERN_SUCCESS;
524
0a7de745 525#endif /* MACH_DEBUG */
91447636
A
526}
527
0a7de745
A
528vm_offset_t
529min_valid_stack_address(void)
91447636 530{
b0d623f7 531 return (vm_offset_t)vm_map_min(kernel_map);
91447636
A
532}
533
0a7de745
A
534vm_offset_t
535max_valid_stack_address(void)
91447636 536{
b0d623f7 537 return (vm_offset_t)vm_map_max(kernel_map);
91447636 538}