2 * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Kernel stack management routines.
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
36 #include <kern/kern_types.h>
37 #include <kern/lock_group.h>
38 #include <kern/mach_param.h>
39 #include <kern/processor.h>
40 #include <kern/thread.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
43 #include <kern/ledger.h>
45 #include <vm/vm_map.h>
46 #include <vm/vm_kern.h>
48 #include <mach_debug.h>
49 #include <san/kasan.h>
52 * We allocate stacks from generic kernel VM.
54 * The stack_free_list can only be accessed at splsched,
55 * because stack_alloc_try/thread_invoke operate at splsched.
58 decl_simple_lock_data(static, stack_lock_data
);
59 #define stack_lock() simple_lock(&stack_lock_data, LCK_GRP_NULL)
60 #define stack_unlock() simple_unlock(&stack_lock_data)
62 #define STACK_CACHE_SIZE 2
64 static vm_offset_t stack_free_list
;
66 static unsigned int stack_free_count
, stack_free_hiwat
; /* free list count */
67 static unsigned int stack_hiwat
;
68 unsigned int stack_total
; /* current total count */
69 unsigned long long stack_allocs
; /* total count of allocations */
71 static int stack_fake_zone_index
= -1; /* index in zone_info array */
73 static unsigned int stack_free_target
;
74 static int stack_free_delta
;
76 static unsigned int stack_new_count
; /* total new stack allocations */
78 static vm_offset_t stack_addr_mask
;
80 unsigned int kernel_stack_pages
;
81 vm_offset_t kernel_stack_size
;
82 vm_offset_t kernel_stack_mask
;
83 vm_offset_t kernel_stack_depth_max
;
86 * The next field is at the base of the stack,
87 * so the low end is left unsullied.
89 #define stack_next(stack) \
90 (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
93 log2(vm_offset_t size
)
96 for (result
= 0; size
> 0; result
++) {
102 static inline vm_offset_t
103 roundup_pow2(vm_offset_t size
)
105 return 1UL << (log2(size
- 1) + 1);
108 static vm_offset_t
stack_alloc_internal(void);
109 static void stack_free_stack(vm_offset_t
);
114 simple_lock_init(&stack_lock_data
, 0);
116 kernel_stack_pages
= KERNEL_STACK_SIZE
/ PAGE_SIZE
;
117 kernel_stack_size
= KERNEL_STACK_SIZE
;
118 kernel_stack_mask
= -KERNEL_STACK_SIZE
;
119 kernel_stack_depth_max
= 0;
121 if (PE_parse_boot_argn("kernel_stack_pages",
123 sizeof(kernel_stack_pages
))) {
124 kernel_stack_size
= kernel_stack_pages
* PAGE_SIZE
;
125 printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
126 kernel_stack_pages
, (void *) kernel_stack_size
);
129 if (kernel_stack_size
< round_page(kernel_stack_size
)) {
130 panic("stack_init: stack size %p not a multiple of page size %d\n",
131 (void *) kernel_stack_size
, PAGE_SIZE
);
134 stack_addr_mask
= roundup_pow2(kernel_stack_size
) - 1;
135 kernel_stack_mask
= ~stack_addr_mask
;
141 * Allocate a stack for a thread, may
146 stack_alloc_internal(void)
148 vm_offset_t stack
= 0;
151 kern_return_t kr
= KERN_SUCCESS
;
156 stack
= stack_free_list
;
158 stack_free_list
= stack_next(stack
);
161 if (++stack_total
> stack_hiwat
) {
162 stack_hiwat
= stack_total
;
172 * Request guard pages on either side of the stack. Ask
173 * kernel_memory_allocate() for two extra pages to account
177 flags
= KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_KSTACK
| KMA_KOBJECT
| KMA_ZERO
;
178 kr
= kernel_memory_allocate(kernel_map
, &stack
,
179 kernel_stack_size
+ (2 * PAGE_SIZE
),
182 VM_KERN_MEMORY_STACK
);
183 if (kr
!= KERN_SUCCESS
) {
184 panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size
+ (2 * PAGE_SIZE
)), (uint64_t)stack_addr_mask
, flags
, kr
);
188 * The stack address that comes back is the address of the lower
189 * guard page. Skip past it to get the actual stack base address.
201 assert(thread
->kernel_stack
== 0);
202 machine_stack_attach(thread
, stack_alloc_internal());
206 stack_handoff(thread_t from
, thread_t to
)
208 assert(from
== current_thread());
209 machine_stack_handoff(from
, to
);
215 * Detach and free the stack for a thread.
221 vm_offset_t stack
= machine_stack_detach(thread
);
224 if (stack
!= thread
->reserved_stack
) {
225 stack_free_stack(stack
);
233 if (thread
->reserved_stack
!= thread
->kernel_stack
) {
234 stack_free_stack(thread
->reserved_stack
);
242 struct stack_cache
*cache
;
246 /* Sanity check - stack should be unpoisoned by now */
247 assert(kasan_check_shadow(stack
, kernel_stack_size
, 0));
251 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
252 if (cache
->count
< STACK_CACHE_SIZE
) {
253 stack_next(stack
) = cache
->free
;
258 stack_next(stack
) = stack_free_list
;
259 stack_free_list
= stack
;
260 if (++stack_free_count
> stack_free_hiwat
) {
261 stack_free_hiwat
= stack_free_count
;
272 * Non-blocking attempt to allocate a
273 * stack for a thread.
275 * Returns TRUE on success.
277 * Called at splsched.
283 struct stack_cache
*cache
;
286 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
289 cache
->free
= stack_next(stack
);
292 if (stack_free_list
!= 0) {
294 stack
= stack_free_list
;
296 stack_free_list
= stack_next(stack
);
304 if (stack
!= 0 || (stack
= thread
->reserved_stack
) != 0) {
305 machine_stack_attach(thread
, stack
);
312 static unsigned int stack_collect_tick
, last_stack_tick
;
317 * Free excess kernel stacks, may
323 if (stack_collect_tick
!= last_stack_tick
) {
331 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
332 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
334 while (stack_free_count
> target
) {
335 stack
= stack_free_list
;
336 stack_free_list
= stack_next(stack
);
337 stack_free_count
--; stack_total
--;
342 * Get the stack base address, then decrement by one page
343 * to account for the lower guard page. Add two extra pages
344 * to the size to account for the guard pages on both ends
345 * that were originally requested when the stack was allocated
346 * back in stack_alloc().
349 stack
= (vm_offset_t
)vm_map_trunc_page(
351 VM_MAP_PAGE_MASK(kernel_map
));
356 stack
+ kernel_stack_size
+ (2 * PAGE_SIZE
),
357 VM_MAP_REMOVE_KUNWIRE
)
359 panic("stack_collect: vm_map_remove");
366 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
367 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
370 last_stack_tick
= stack_collect_tick
;
378 * compute_stack_target:
380 * Computes a new target free list count
381 * based on recent alloc / free activity.
383 * Limits stack collection to once per
384 * computation period.
387 compute_stack_target(
395 if (stack_free_target
> 5) {
396 stack_free_target
= (4 * stack_free_target
) / 5;
397 } else if (stack_free_target
> 0) {
401 stack_free_target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
403 stack_free_delta
= 0;
404 stack_collect_tick
++;
411 stack_fake_zone_init(int zone_index
)
413 stack_fake_zone_index
= zone_index
;
417 stack_fake_zone_info(int *count
,
418 vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
, vm_size_t
*alloc_size
,
419 uint64_t *sum_size
, int *collectable
, int *exhaustable
, int *caller_acct
)
421 unsigned int total
, hiwat
, free
;
422 unsigned long long all
;
430 free
= stack_free_count
;
434 *count
= total
- free
;
435 *cur_size
= kernel_stack_size
* total
;
436 *max_size
= kernel_stack_size
* hiwat
;
437 *elem_size
= kernel_stack_size
;
438 *alloc_size
= kernel_stack_size
;
439 *sum_size
= all
* kernel_stack_size
;
447 void stack_privilege(
452 __unused thread_t thread
)
458 * Return info on stack usage for threads in a specific processor set
461 processor_set_stack_usage(
462 processor_set_t pset
,
463 unsigned int *totalp
,
465 vm_size_t
*residentp
,
466 vm_size_t
*maxusagep
,
467 vm_offset_t
*maxstackp
)
470 return KERN_NOT_SUPPORTED
;
474 vm_offset_t maxstack
;
476 thread_t
*thread_list
;
479 unsigned int actual
; /* this many things */
482 vm_size_t size
, size_needed
;
485 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
) {
486 return KERN_INVALID_ARGUMENT
;
493 lck_mtx_lock(&tasks_threads_lock
);
495 actual
= threads_count
;
497 /* do we have the memory we need? */
499 size_needed
= actual
* sizeof(thread_t
);
500 if (size_needed
<= size
) {
504 lck_mtx_unlock(&tasks_threads_lock
);
510 assert(size_needed
> 0);
515 return KERN_RESOURCE_SHORTAGE
;
519 /* OK, have memory and list is locked */
520 thread_list
= (thread_t
*) addr
;
521 for (i
= 0, thread
= (thread_t
)(void *) queue_first(&threads
);
522 !queue_end(&threads
, (queue_entry_t
) thread
);
523 thread
= (thread_t
)(void *) queue_next(&thread
->threads
)) {
524 thread_reference_internal(thread
);
525 thread_list
[i
++] = thread
;
529 lck_mtx_unlock(&tasks_threads_lock
);
531 /* calculate maxusage and free thread references */
537 thread_t threadref
= thread_list
[--i
];
539 if (threadref
->kernel_stack
!= 0) {
543 thread_deallocate(threadref
);
551 *residentp
= *spacep
= total
* round_page(kernel_stack_size
);
552 *maxusagep
= maxusage
;
553 *maxstackp
= maxstack
;
556 #endif /* MACH_DEBUG */
560 min_valid_stack_address(void)
562 return (vm_offset_t
)vm_map_min(kernel_map
);
566 max_valid_stack_address(void)
568 return (vm_offset_t
)vm_map_max(kernel_map
);