]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/stack.c
fe792f9979ac4f257c262166705a13111664eb91
[apple/xnu.git] / osfmk / kern / stack.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Kernel stack management routines.
30 */
31
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/mach_param.h>
38 #include <kern/processor.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 #include <kern/kalloc.h>
42
43 #include <vm/vm_map.h>
44 #include <vm/vm_kern.h>
45
46 #include <mach_debug.h>
47
48 /*
49 * We allocate stacks from generic kernel VM.
50 *
51 * The stack_free_list can only be accessed at splsched,
52 * because stack_alloc_try/thread_invoke operate at splsched.
53 */
54
55 decl_simple_lock_data(static,stack_lock_data)
56 #define stack_lock() simple_lock(&stack_lock_data)
57 #define stack_unlock() simple_unlock(&stack_lock_data)
58
59 #define STACK_CACHE_SIZE 2
60
61 static vm_offset_t stack_free_list;
62
63 static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
64 static unsigned int stack_hiwat;
65 unsigned int stack_total; /* current total count */
66
67 static unsigned int stack_free_target;
68 static int stack_free_delta;
69
70 static unsigned int stack_new_count; /* total new stack allocations */
71
72 static vm_offset_t stack_addr_mask;
73
74 /*
75 * The next field is at the base of the stack,
76 * so the low end is left unsullied.
77 */
78 #define stack_next(stack) \
79 (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
80
81 void
82 stack_init(void)
83 {
84 simple_lock_init(&stack_lock_data, 0);
85
86 if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE))
87 panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE);
88
89 stack_addr_mask = KERNEL_STACK_SIZE - 1;
90 }
91
92 /*
93 * stack_alloc:
94 *
95 * Allocate a stack for a thread, may
96 * block.
97 */
98 void
99 stack_alloc(
100 thread_t thread)
101 {
102 vm_offset_t stack;
103 spl_t s;
104 int guard_flags;
105
106 assert(thread->kernel_stack == 0);
107
108 s = splsched();
109 stack_lock();
110 stack = stack_free_list;
111 if (stack != 0) {
112 stack_free_list = stack_next(stack);
113 stack_free_count--;
114 }
115 else {
116 if (++stack_total > stack_hiwat)
117 stack_hiwat = stack_total;
118 stack_new_count++;
119 }
120 stack_free_delta--;
121 stack_unlock();
122 splx(s);
123
124 if (stack == 0) {
125
126 /*
127 * Request guard pages on either side of the stack. Ask
128 * kernel_memory_allocate() for two extra pages to account
129 * for these.
130 */
131
132 guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
133 if (kernel_memory_allocate(kernel_map, &stack,
134 KERNEL_STACK_SIZE + (2*PAGE_SIZE),
135 stack_addr_mask,
136 KMA_KOBJECT | guard_flags)
137 != KERN_SUCCESS)
138 panic("stack_alloc: kernel_memory_allocate");
139
140 /*
141 * The stack address that comes back is the address of the lower
142 * guard page. Skip past it to get the actual stack base address.
143 */
144
145 stack += PAGE_SIZE;
146 }
147
148 machine_stack_attach(thread, stack);
149 }
150
151 /*
152 * stack_free:
153 *
154 * Detach and free the stack for a thread.
155 */
156 void
157 stack_free(
158 thread_t thread)
159 {
160 vm_offset_t stack = machine_stack_detach(thread);
161
162 assert(stack);
163 if (stack != thread->reserved_stack)
164 stack_free_stack(stack);
165 }
166
167 void
168 stack_free_stack(
169 vm_offset_t stack)
170 {
171 struct stack_cache *cache;
172 spl_t s;
173
174 s = splsched();
175 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
176 if (cache->count < STACK_CACHE_SIZE) {
177 stack_next(stack) = cache->free;
178 cache->free = stack;
179 cache->count++;
180 }
181 else {
182 stack_lock();
183 stack_next(stack) = stack_free_list;
184 stack_free_list = stack;
185 if (++stack_free_count > stack_free_hiwat)
186 stack_free_hiwat = stack_free_count;
187 stack_free_delta++;
188 stack_unlock();
189 }
190 splx(s);
191 }
192
193 /*
194 * stack_alloc_try:
195 *
196 * Non-blocking attempt to allocate a
197 * stack for a thread.
198 *
199 * Returns TRUE on success.
200 *
201 * Called at splsched.
202 */
203 boolean_t
204 stack_alloc_try(
205 thread_t thread)
206 {
207 struct stack_cache *cache;
208 vm_offset_t stack;
209
210 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
211 stack = cache->free;
212 if (stack != 0) {
213 cache->free = stack_next(stack);
214 cache->count--;
215 }
216 else {
217 if (stack_free_list != 0) {
218 stack_lock();
219 stack = stack_free_list;
220 if (stack != 0) {
221 stack_free_list = stack_next(stack);
222 stack_free_count--;
223 stack_free_delta--;
224 }
225 stack_unlock();
226 }
227 }
228
229 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
230 machine_stack_attach(thread, stack);
231 return (TRUE);
232 }
233
234 return (FALSE);
235 }
236
237 static unsigned int stack_collect_tick, last_stack_tick;
238
239 /*
240 * stack_collect:
241 *
242 * Free excess kernel stacks, may
243 * block.
244 */
245 void
246 stack_collect(void)
247 {
248 if (stack_collect_tick != last_stack_tick) {
249 unsigned int target;
250 vm_offset_t stack;
251 spl_t s;
252
253 s = splsched();
254 stack_lock();
255
256 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
257 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
258
259 while (stack_free_count > target) {
260 stack = stack_free_list;
261 stack_free_list = stack_next(stack);
262 stack_free_count--; stack_total--;
263 stack_unlock();
264 splx(s);
265
266 /*
267 * Get the stack base address, then decrement by one page
268 * to account for the lower guard page. Add two extra pages
269 * to the size to account for the guard pages on both ends
270 * that were originally requested when the stack was allocated
271 * back in stack_alloc().
272 */
273
274 stack = vm_map_trunc_page(stack);
275 stack -= PAGE_SIZE;
276 if (vm_map_remove(
277 kernel_map,
278 stack,
279 stack + KERNEL_STACK_SIZE+(2*PAGE_SIZE),
280 VM_MAP_REMOVE_KUNWIRE)
281 != KERN_SUCCESS)
282 panic("stack_collect: vm_map_remove");
283 stack = 0;
284
285 s = splsched();
286 stack_lock();
287
288 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
289 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
290 }
291
292 last_stack_tick = stack_collect_tick;
293
294 stack_unlock();
295 splx(s);
296 }
297 }
298
299 /*
300 * compute_stack_target:
301 *
302 * Computes a new target free list count
303 * based on recent alloc / free activity.
304 *
305 * Limits stack collection to once per
306 * computation period.
307 */
308 void
309 compute_stack_target(
310 __unused void *arg)
311 {
312 spl_t s;
313
314 s = splsched();
315 stack_lock();
316
317 if (stack_free_target > 5)
318 stack_free_target = (4 * stack_free_target) / 5;
319 else
320 if (stack_free_target > 0)
321 stack_free_target--;
322
323 stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
324
325 stack_free_delta = 0;
326 stack_collect_tick++;
327
328 stack_unlock();
329 splx(s);
330 }
331
332 void
333 stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
334 vm_size_t *alloc_size, int *collectable, int *exhaustable)
335 {
336 unsigned int total, hiwat, free;
337 spl_t s;
338
339 s = splsched();
340 stack_lock();
341 total = stack_total;
342 hiwat = stack_hiwat;
343 free = stack_free_count;
344 stack_unlock();
345 splx(s);
346
347 *count = total - free;
348 *cur_size = KERNEL_STACK_SIZE * total;
349 *max_size = KERNEL_STACK_SIZE * hiwat;
350 *elem_size = KERNEL_STACK_SIZE;
351 *alloc_size = KERNEL_STACK_SIZE;
352 *collectable = 1;
353 *exhaustable = 0;
354 }
355
356 /* OBSOLETE */
357 void stack_privilege(
358 thread_t thread);
359
360 void
361 stack_privilege(
362 __unused thread_t thread)
363 {
364 /* OBSOLETE */
365 }
366
367 /*
368 * Return info on stack usage for threads in a specific processor set
369 */
370 kern_return_t
371 processor_set_stack_usage(
372 processor_set_t pset,
373 unsigned int *totalp,
374 vm_size_t *spacep,
375 vm_size_t *residentp,
376 vm_size_t *maxusagep,
377 vm_offset_t *maxstackp)
378 {
379 #if !MACH_DEBUG
380 return KERN_NOT_SUPPORTED;
381 #else
382 unsigned int total;
383 vm_size_t maxusage;
384 vm_offset_t maxstack;
385
386 register thread_t *thread_list;
387 register thread_t thread;
388
389 unsigned int actual; /* this many things */
390 unsigned int i;
391
392 vm_size_t size, size_needed;
393 void *addr;
394
395 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
396 return KERN_INVALID_ARGUMENT;
397
398 size = 0;
399 addr = NULL;
400
401 for (;;) {
402 mutex_lock(&tasks_threads_lock);
403
404 actual = threads_count;
405
406 /* do we have the memory we need? */
407
408 size_needed = actual * sizeof(thread_t);
409 if (size_needed <= size)
410 break;
411
412 mutex_unlock(&tasks_threads_lock);
413
414 if (size != 0)
415 kfree(addr, size);
416
417 assert(size_needed > 0);
418 size = size_needed;
419
420 addr = kalloc(size);
421 if (addr == 0)
422 return KERN_RESOURCE_SHORTAGE;
423 }
424
425 /* OK, have memory and list is locked */
426 thread_list = (thread_t *) addr;
427 for (i = 0, thread = (thread_t) queue_first(&threads);
428 !queue_end(&threads, (queue_entry_t) thread);
429 thread = (thread_t) queue_next(&thread->threads)) {
430 thread_reference_internal(thread);
431 thread_list[i++] = thread;
432 }
433 assert(i <= actual);
434
435 mutex_unlock(&tasks_threads_lock);
436
437 /* calculate maxusage and free thread references */
438
439 total = 0;
440 maxusage = 0;
441 maxstack = 0;
442 while (i > 0) {
443 thread_t threadref = thread_list[--i];
444
445 if (threadref->kernel_stack != 0)
446 total++;
447
448 thread_deallocate(threadref);
449 }
450
451 if (size != 0)
452 kfree(addr, size);
453
454 *totalp = total;
455 *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
456 *maxusagep = maxusage;
457 *maxstackp = maxstack;
458 return KERN_SUCCESS;
459
460 #endif /* MACH_DEBUG */
461 }
462
463 vm_offset_t min_valid_stack_address(void)
464 {
465 return vm_map_min(kernel_map);
466 }
467
468 vm_offset_t max_valid_stack_address(void)
469 {
470 return vm_map_max(kernel_map);
471 }