2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 * Kernel stack management routines.
34 #include <mach/mach_host.h>
35 #include <mach/mach_types.h>
36 #include <mach/processor_set.h>
38 #include <kern/kern_types.h>
39 #include <kern/mach_param.h>
40 #include <kern/processor.h>
41 #include <kern/thread.h>
42 #include <kern/zalloc.h>
43 #include <kern/kalloc.h>
45 #include <vm/vm_map.h>
46 #include <vm/vm_kern.h>
48 #include <mach_debug.h>
51 * We allocate stacks from generic kernel VM.
53 * The stack_free_list can only be accessed at splsched,
54 * because stack_alloc_try/thread_invoke operate at splsched.
57 decl_simple_lock_data(static,stack_lock_data
)
58 #define stack_lock() simple_lock(&stack_lock_data)
59 #define stack_unlock() simple_unlock(&stack_lock_data)
61 #define STACK_CACHE_SIZE 2
63 static vm_map_t stack_map
;
64 static vm_offset_t stack_free_list
;
66 static unsigned int stack_free_count
, stack_free_hiwat
; /* free list count */
67 static unsigned int stack_total
, stack_hiwat
; /* current total count */
69 static unsigned int stack_free_target
;
70 static int stack_free_delta
;
72 static unsigned int stack_new_count
; /* total new stack allocations */
74 static vm_offset_t stack_addr_mask
;
77 * The next field is at the base of the stack,
78 * so the low end is left unsullied.
80 #define stack_next(stack) \
81 (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
86 vm_offset_t stacks
, boundary
;
87 vm_map_offset_t map_addr
;
89 simple_lock_init(&stack_lock_data
, 0);
91 if (KERNEL_STACK_SIZE
< round_page(KERNEL_STACK_SIZE
))
92 panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE
, PAGE_SIZE
);
94 for (boundary
= PAGE_SIZE
; boundary
<= KERNEL_STACK_SIZE
; )
97 stack_addr_mask
= boundary
- 1;
99 if (kmem_suballoc(kernel_map
, &stacks
, (boundary
* (2 * THREAD_MAX
+ 64)),
100 FALSE
, VM_FLAGS_ANYWHERE
, &stack_map
) != KERN_SUCCESS
)
101 panic("stack_init: kmem_suballoc");
103 map_addr
= vm_map_min(stack_map
);
104 if (vm_map_enter(stack_map
, &map_addr
, vm_map_round_page(PAGE_SIZE
), 0, (VM_MAKE_TAG(VM_MEMORY_STACK
) | VM_FLAGS_FIXED
),
105 VM_OBJECT_NULL
, 0, FALSE
, VM_PROT_NONE
, VM_PROT_NONE
, VM_INHERIT_DEFAULT
) != KERN_SUCCESS
)
106 panic("stack_init: vm_map_enter");
112 * Allocate a stack for a thread, may
122 assert(thread
->kernel_stack
== 0);
126 stack
= stack_free_list
;
128 stack_free_list
= stack_next(stack
);
132 if (++stack_total
> stack_hiwat
)
133 stack_hiwat
= stack_total
;
141 if (kernel_memory_allocate(stack_map
, &stack
, KERNEL_STACK_SIZE
, stack_addr_mask
, KMA_KOBJECT
) != KERN_SUCCESS
)
142 panic("stack_alloc: kernel_memory_allocate");
145 machine_stack_attach(thread
, stack
);
151 * Detach and free the stack for a thread.
157 vm_offset_t stack
= machine_stack_detach(thread
);
160 if (stack
!= thread
->reserved_stack
) {
161 struct stack_cache
*cache
;
165 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
166 if (cache
->count
< STACK_CACHE_SIZE
) {
167 stack_next(stack
) = cache
->free
;
173 stack_next(stack
) = stack_free_list
;
174 stack_free_list
= stack
;
175 if (++stack_free_count
> stack_free_hiwat
)
176 stack_free_hiwat
= stack_free_count
;
188 struct stack_cache
*cache
;
192 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
193 if (cache
->count
< STACK_CACHE_SIZE
) {
194 stack_next(stack
) = cache
->free
;
200 stack_next(stack
) = stack_free_list
;
201 stack_free_list
= stack
;
202 if (++stack_free_count
> stack_free_hiwat
)
203 stack_free_hiwat
= stack_free_count
;
213 * Non-blocking attempt to allocate a
214 * stack for a thread.
216 * Returns TRUE on success.
218 * Called at splsched.
224 struct stack_cache
*cache
;
227 cache
= &PROCESSOR_DATA(current_processor(), stack_cache
);
230 cache
->free
= stack_next(stack
);
234 if (stack_free_list
!= 0) {
236 stack
= stack_free_list
;
238 stack_free_list
= stack_next(stack
);
246 if (stack
!= 0 || (stack
= thread
->reserved_stack
) != 0) {
247 machine_stack_attach(thread
, stack
);
254 static unsigned int stack_collect_tick
, last_stack_tick
;
259 * Free excess kernel stacks, may
265 if (stack_collect_tick
!= last_stack_tick
) {
273 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
274 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
276 while (stack_free_count
> target
) {
277 stack
= stack_free_list
;
278 stack_free_list
= stack_next(stack
);
279 stack_free_count
--; stack_total
--;
283 if (vm_map_remove(stack_map
, vm_map_trunc_page(stack
),
284 vm_map_round_page(stack
+ KERNEL_STACK_SIZE
), VM_MAP_REMOVE_KUNWIRE
) != KERN_SUCCESS
)
285 panic("stack_collect: vm_map_remove");
290 target
= stack_free_target
+ (STACK_CACHE_SIZE
* processor_count
);
291 target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
294 last_stack_tick
= stack_collect_tick
;
302 * compute_stack_target:
304 * Computes a new target free list count
305 * based on recent alloc / free activity.
307 * Limits stack collection to once per
308 * computation period.
311 compute_stack_target(
319 if (stack_free_target
> 5)
320 stack_free_target
= (4 * stack_free_target
) / 5;
322 if (stack_free_target
> 0)
325 stack_free_target
+= (stack_free_delta
>= 0)? stack_free_delta
: -stack_free_delta
;
327 stack_free_delta
= 0;
328 stack_collect_tick
++;
335 stack_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
336 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
338 unsigned int total
, hiwat
, free
;
345 free
= stack_free_count
;
349 *count
= total
- free
;
350 *cur_size
= KERNEL_STACK_SIZE
* total
;
351 *max_size
= KERNEL_STACK_SIZE
* hiwat
;
352 *elem_size
= KERNEL_STACK_SIZE
;
353 *alloc_size
= KERNEL_STACK_SIZE
;
359 void stack_privilege(
364 __unused thread_t thread
)
370 * Return info on stack usage for threads in a specific processor set
373 processor_set_stack_usage(
374 processor_set_t pset
,
375 unsigned int *totalp
,
377 vm_size_t
*residentp
,
378 vm_size_t
*maxusagep
,
379 vm_offset_t
*maxstackp
)
382 return KERN_NOT_SUPPORTED
;
386 vm_offset_t maxstack
;
388 register thread_t
*threads
;
389 register thread_t thread
;
391 unsigned int actual
; /* this many things */
394 vm_size_t size
, size_needed
;
397 if (pset
== PROCESSOR_SET_NULL
)
398 return KERN_INVALID_ARGUMENT
;
406 return KERN_INVALID_ARGUMENT
;
409 actual
= pset
->thread_count
;
411 /* do we have the memory we need? */
413 size_needed
= actual
* sizeof(thread_t
);
414 if (size_needed
<= size
)
417 /* unlock the pset and allocate more memory */
423 assert(size_needed
> 0);
428 return KERN_RESOURCE_SHORTAGE
;
431 /* OK, have memory and the processor_set is locked & active */
432 threads
= (thread_t
*) addr
;
433 for (i
= 0, thread
= (thread_t
) queue_first(&pset
->threads
);
434 !queue_end(&pset
->threads
, (queue_entry_t
) thread
);
435 thread
= (thread_t
) queue_next(&thread
->pset_threads
)) {
436 thread_reference_internal(thread
);
437 threads
[i
++] = thread
;
441 /* can unlock processor set now that we have the thread refs */
444 /* calculate maxusage and free thread references */
450 thread_t threadref
= threads
[--i
];
452 if (threadref
->kernel_stack
!= 0)
455 thread_deallocate(threadref
);
462 *residentp
= *spacep
= total
* round_page(KERNEL_STACK_SIZE
);
463 *maxusagep
= maxusage
;
464 *maxstackp
= maxstack
;
467 #endif /* MACH_DEBUG */
470 vm_offset_t
min_valid_stack_address(void)
472 return vm_map_min(stack_map
);
475 vm_offset_t
max_valid_stack_address(void)
477 return vm_map_max(stack_map
);