2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Kernel stack management routines.
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
36 #include <kern/kern_types.h>
37 #include <kern/mach_param.h>
38 #include <kern/processor.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 #include <kern/kalloc.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_kern.h>
46 #include <mach_debug.h>
49 * We allocate stacks from generic kernel VM.
51 * The stack_free_list can only be accessed at splsched,
52 * because stack_alloc_try/thread_invoke operate at splsched.
55 decl_simple_lock_data(static,stack_lock_data
)
56 #define stack_lock() simple_lock(&stack_lock_data)
57 #define stack_unlock() simple_unlock(&stack_lock_data)
59 #define STACK_CACHE_SIZE 2
61 static vm_offset_t stack_free_list
;
63 static unsigned int stack_free_count
, stack_free_hiwat
; /* free list count */
64 static unsigned int stack_total
, stack_hiwat
; /* current total count */
66 static unsigned int stack_free_target
;
67 static int stack_free_delta
;
69 static unsigned int stack_new_count
; /* total new stack allocations */
71 static vm_offset_t stack_addr_mask
;
74 * The next field is at the base of the stack,
75 * so the low end is left unsullied.
77 #define stack_next(stack) \
78 (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
83 simple_lock_init(&stack_lock_data
, 0);
85 if (KERNEL_STACK_SIZE
< round_page(KERNEL_STACK_SIZE
))
86 panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE
, PAGE_SIZE
);
88 stack_addr_mask
= KERNEL_STACK_SIZE
- 1;
94 * Allocate a stack for a thread, may
105 assert(thread
->kernel_stack
== 0);
109 stack
= stack_free_list
;
111 stack_free_list
= stack_next(stack
);
115 if (++stack_total
> stack_hiwat
)
116 stack_hiwat
= stack_total
;
126 * Request guard pages on either side of the stack. Ask
127 * kernel_memory_allocate() for two extra pages to account
131 guard_flags
= KMA_GUARD_FIRST
| KMA_GUARD_LAST
;
132 if (kernel_memory_allocate(kernel_map
, &stack
,
133 KERNEL_STACK_SIZE
+ (2*PAGE_SIZE
),
135 KMA_KOBJECT
| guard_flags
)
137 panic("stack_alloc: kernel_memory_allocate");
140 * The stack address that comes back is the address of the lower
141 * guard page. Skip past it to get the actual stack base address.
147 machine_stack_attach(thread
, stack
);
153 * Detach and free the stack for a thread.
159 vm_offset_t stack
= machine_stack_detach(thread
);
162 if (stack
!= thread
->reserved_stack
)
163 stack_free_stack(stack
);
170 struct stack_cache
*cache
;
174 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
175 if (cache
->count
< STACK_CACHE_SIZE
) {
176 stack_next(stack
) = cache
->free
;
182 stack_next(stack
) = stack_free_list
;
183 stack_free_list
= stack
;
184 if (++stack_free_count
> stack_free_hiwat
)
185 stack_free_hiwat
= stack_free_count
;
195 * Non-blocking attempt to allocate a
196 * stack for a thread.
198 * Returns TRUE on success.
200 * Called at splsched.
206 struct stack_cache
*cache
;
209 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
212 cache
->free
= stack_next(stack
);
216 if (stack_free_list
!= 0) {
218 stack
= stack_free_list
;
220 stack_free_list
= stack_next(stack
);
228 if (stack
!= 0 || (stack
= thread
->reserved_stack
) != 0) {
229 machine_stack_attach(thread
, stack
);
236 static unsigned int stack_collect_tick
, last_stack_tick
;
241 * Free excess kernel stacks, may
247 if (stack_collect_tick
!= last_stack_tick
) {
255 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
256 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
258 while (stack_free_count
> target
) {
259 stack
= stack_free_list
;
260 stack_free_list
= stack_next(stack
);
261 stack_free_count
--; stack_total
--;
266 * Get the stack base address, then decrement by one page
267 * to account for the lower guard page. Add two extra pages
268 * to the size to account for the guard pages on both ends
269 * that were originally requested when the stack was allocated
270 * back in stack_alloc().
273 stack
= vm_map_trunc_page(stack
);
278 stack
+ KERNEL_STACK_SIZE
+(2*PAGE_SIZE
),
279 VM_MAP_REMOVE_KUNWIRE
)
281 panic("stack_collect: vm_map_remove");
287 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
288 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
291 last_stack_tick
= stack_collect_tick
;
299 * compute_stack_target:
301 * Computes a new target free list count
302 * based on recent alloc / free activity.
304 * Limits stack collection to once per
305 * computation period.
308 compute_stack_target(
316 if (stack_free_target
> 5)
317 stack_free_target
= (4 * stack_free_target
) / 5;
319 if (stack_free_target
> 0)
322 stack_free_target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
324 stack_free_delta
= 0;
325 stack_collect_tick
++;
332 stack_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
333 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
335 unsigned int total
, hiwat
, free
;
342 free
= stack_free_count
;
346 *count
= total
- free
;
347 *cur_size
= KERNEL_STACK_SIZE
* total
;
348 *max_size
= KERNEL_STACK_SIZE
* hiwat
;
349 *elem_size
= KERNEL_STACK_SIZE
;
350 *alloc_size
= KERNEL_STACK_SIZE
;
356 void stack_privilege(
361 __unused thread_t thread
)
367 * Return info on stack usage for threads in a specific processor set
370 processor_set_stack_usage(
371 processor_set_t pset
,
372 unsigned int *totalp
,
374 vm_size_t
*residentp
,
375 vm_size_t
*maxusagep
,
376 vm_offset_t
*maxstackp
)
379 return KERN_NOT_SUPPORTED
;
383 vm_offset_t maxstack
;
385 register thread_t
*thread_list
;
386 register thread_t thread
;
388 unsigned int actual
; /* this many things */
391 vm_size_t size
, size_needed
;
394 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
395 return KERN_INVALID_ARGUMENT
;
401 mutex_lock(&tasks_threads_lock
);
403 actual
= threads_count
;
405 /* do we have the memory we need? */
407 size_needed
= actual
* sizeof(thread_t
);
408 if (size_needed
<= size
)
411 mutex_unlock(&tasks_threads_lock
);
416 assert(size_needed
> 0);
421 return KERN_RESOURCE_SHORTAGE
;
424 /* OK, have memory and list is locked */
425 thread_list
= (thread_t
*) addr
;
426 for (i
= 0, thread
= (thread_t
) queue_first(&threads
);
427 !queue_end(&threads
, (queue_entry_t
) thread
);
428 thread
= (thread_t
) queue_next(&thread
->threads
)) {
429 thread_reference_internal(thread
);
430 thread_list
[i
++] = thread
;
434 mutex_unlock(&tasks_threads_lock
);
436 /* calculate maxusage and free thread references */
442 thread_t threadref
= thread_list
[--i
];
444 if (threadref
->kernel_stack
!= 0)
447 thread_deallocate(threadref
);
454 *residentp
= *spacep
= total
* round_page(KERNEL_STACK_SIZE
);
455 *maxusagep
= maxusage
;
456 *maxstackp
= maxstack
;
459 #endif /* MACH_DEBUG */
462 vm_offset_t
min_valid_stack_address(void)
464 return vm_map_min(kernel_map
);
467 vm_offset_t
max_valid_stack_address(void)
469 return vm_map_max(kernel_map
);