2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Kernel stack management routines.
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
36 #include <kern/kern_types.h>
37 #include <kern/mach_param.h>
38 #include <kern/processor.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 #include <kern/kalloc.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_kern.h>
46 #include <mach_debug.h>
49 * We allocate stacks from generic kernel VM.
51 * The stack_free_list can only be accessed at splsched,
52 * because stack_alloc_try/thread_invoke operate at splsched.
55 decl_simple_lock_data(static,stack_lock_data
)
56 #define stack_lock() simple_lock(&stack_lock_data)
57 #define stack_unlock() simple_unlock(&stack_lock_data)
59 #define STACK_CACHE_SIZE 2
61 static vm_offset_t stack_free_list
;
63 static unsigned int stack_free_count
, stack_free_hiwat
; /* free list count */
64 static unsigned int stack_hiwat
;
65 unsigned int stack_total
; /* current total count */
67 static unsigned int stack_free_target
;
68 static int stack_free_delta
;
70 static unsigned int stack_new_count
; /* total new stack allocations */
72 static vm_offset_t stack_addr_mask
;
75 * The next field is at the base of the stack,
76 * so the low end is left unsullied.
78 #define stack_next(stack) \
79 (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
84 simple_lock_init(&stack_lock_data
, 0);
86 if (KERNEL_STACK_SIZE
< round_page(KERNEL_STACK_SIZE
))
87 panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE
, PAGE_SIZE
);
89 stack_addr_mask
= KERNEL_STACK_SIZE
- 1;
95 * Allocate a stack for a thread, may
106 assert(thread
->kernel_stack
== 0);
110 stack
= stack_free_list
;
112 stack_free_list
= stack_next(stack
);
116 if (++stack_total
> stack_hiwat
)
117 stack_hiwat
= stack_total
;
127 * Request guard pages on either side of the stack. Ask
128 * kernel_memory_allocate() for two extra pages to account
132 guard_flags
= KMA_GUARD_FIRST
| KMA_GUARD_LAST
;
133 if (kernel_memory_allocate(kernel_map
, &stack
,
134 KERNEL_STACK_SIZE
+ (2*PAGE_SIZE
),
136 KMA_KOBJECT
| guard_flags
)
138 panic("stack_alloc: kernel_memory_allocate");
141 * The stack address that comes back is the address of the lower
142 * guard page. Skip past it to get the actual stack base address.
148 machine_stack_attach(thread
, stack
);
154 * Detach and free the stack for a thread.
160 vm_offset_t stack
= machine_stack_detach(thread
);
163 if (stack
!= thread
->reserved_stack
)
164 stack_free_stack(stack
);
171 struct stack_cache
*cache
;
175 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
176 if (cache
->count
< STACK_CACHE_SIZE
) {
177 stack_next(stack
) = cache
->free
;
183 stack_next(stack
) = stack_free_list
;
184 stack_free_list
= stack
;
185 if (++stack_free_count
> stack_free_hiwat
)
186 stack_free_hiwat
= stack_free_count
;
196 * Non-blocking attempt to allocate a
197 * stack for a thread.
199 * Returns TRUE on success.
201 * Called at splsched.
207 struct stack_cache
*cache
;
210 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
213 cache
->free
= stack_next(stack
);
217 if (stack_free_list
!= 0) {
219 stack
= stack_free_list
;
221 stack_free_list
= stack_next(stack
);
229 if (stack
!= 0 || (stack
= thread
->reserved_stack
) != 0) {
230 machine_stack_attach(thread
, stack
);
237 static unsigned int stack_collect_tick
, last_stack_tick
;
242 * Free excess kernel stacks, may
248 if (stack_collect_tick
!= last_stack_tick
) {
256 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
257 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
259 while (stack_free_count
> target
) {
260 stack
= stack_free_list
;
261 stack_free_list
= stack_next(stack
);
262 stack_free_count
--; stack_total
--;
267 * Get the stack base address, then decrement by one page
268 * to account for the lower guard page. Add two extra pages
269 * to the size to account for the guard pages on both ends
270 * that were originally requested when the stack was allocated
271 * back in stack_alloc().
274 stack
= vm_map_trunc_page(stack
);
279 stack
+ KERNEL_STACK_SIZE
+(2*PAGE_SIZE
),
280 VM_MAP_REMOVE_KUNWIRE
)
282 panic("stack_collect: vm_map_remove");
288 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
289 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
292 last_stack_tick
= stack_collect_tick
;
300 * compute_stack_target:
302 * Computes a new target free list count
303 * based on recent alloc / free activity.
305 * Limits stack collection to once per
306 * computation period.
309 compute_stack_target(
317 if (stack_free_target
> 5)
318 stack_free_target
= (4 * stack_free_target
) / 5;
320 if (stack_free_target
> 0)
323 stack_free_target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
325 stack_free_delta
= 0;
326 stack_collect_tick
++;
333 stack_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
334 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
336 unsigned int total
, hiwat
, free
;
343 free
= stack_free_count
;
347 *count
= total
- free
;
348 *cur_size
= KERNEL_STACK_SIZE
* total
;
349 *max_size
= KERNEL_STACK_SIZE
* hiwat
;
350 *elem_size
= KERNEL_STACK_SIZE
;
351 *alloc_size
= KERNEL_STACK_SIZE
;
357 void stack_privilege(
362 __unused thread_t thread
)
368 * Return info on stack usage for threads in a specific processor set
371 processor_set_stack_usage(
372 processor_set_t pset
,
373 unsigned int *totalp
,
375 vm_size_t
*residentp
,
376 vm_size_t
*maxusagep
,
377 vm_offset_t
*maxstackp
)
380 return KERN_NOT_SUPPORTED
;
384 vm_offset_t maxstack
;
386 register thread_t
*thread_list
;
387 register thread_t thread
;
389 unsigned int actual
; /* this many things */
392 vm_size_t size
, size_needed
;
395 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
396 return KERN_INVALID_ARGUMENT
;
402 mutex_lock(&tasks_threads_lock
);
404 actual
= threads_count
;
406 /* do we have the memory we need? */
408 size_needed
= actual
* sizeof(thread_t
);
409 if (size_needed
<= size
)
412 mutex_unlock(&tasks_threads_lock
);
417 assert(size_needed
> 0);
422 return KERN_RESOURCE_SHORTAGE
;
425 /* OK, have memory and list is locked */
426 thread_list
= (thread_t
*) addr
;
427 for (i
= 0, thread
= (thread_t
) queue_first(&threads
);
428 !queue_end(&threads
, (queue_entry_t
) thread
);
429 thread
= (thread_t
) queue_next(&thread
->threads
)) {
430 thread_reference_internal(thread
);
431 thread_list
[i
++] = thread
;
435 mutex_unlock(&tasks_threads_lock
);
437 /* calculate maxusage and free thread references */
443 thread_t threadref
= thread_list
[--i
];
445 if (threadref
->kernel_stack
!= 0)
448 thread_deallocate(threadref
);
455 *residentp
= *spacep
= total
* round_page(KERNEL_STACK_SIZE
);
456 *maxusagep
= maxusage
;
457 *maxstackp
= maxstack
;
460 #endif /* MACH_DEBUG */
463 vm_offset_t
min_valid_stack_address(void)
465 return vm_map_min(kernel_map
);
468 vm_offset_t
max_valid_stack_address(void)
470 return vm_map_max(kernel_map
);