]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/stack.c
400cedcf99491ea95e5dfdb2bbf0eaa54ce1147d
[apple/xnu.git] / osfmk / kern / stack.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Kernel stack management routines.
30 */
31
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/mach_param.h>
38 #include <kern/processor.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 #include <kern/kalloc.h>
42 #include <kern/ledger.h>
43
44 #include <vm/vm_map.h>
45 #include <vm/vm_kern.h>
46
47 #include <mach_debug.h>
48
49 /*
50 * We allocate stacks from generic kernel VM.
51 *
52 * The stack_free_list can only be accessed at splsched,
53 * because stack_alloc_try/thread_invoke operate at splsched.
54 */
55
56 decl_simple_lock_data(static,stack_lock_data)
57 #define stack_lock() simple_lock(&stack_lock_data)
58 #define stack_unlock() simple_unlock(&stack_lock_data)
59
60 #define STACK_CACHE_SIZE 2
61
62 static vm_offset_t stack_free_list;
63
64 static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
65 static unsigned int stack_hiwat;
66 unsigned int stack_total; /* current total count */
67 unsigned long long stack_allocs; /* total count of allocations */
68
69 static int stack_fake_zone_index = -1; /* index in zone_info array */
70
71 static unsigned int stack_free_target;
72 static int stack_free_delta;
73
74 static unsigned int stack_new_count; /* total new stack allocations */
75
76 static vm_offset_t stack_addr_mask;
77
78 unsigned int kernel_stack_pages;
79 vm_offset_t kernel_stack_size;
80 vm_offset_t kernel_stack_mask;
81 vm_offset_t kernel_stack_depth_max;
82
83 static inline void
84 STACK_ZINFO_PALLOC(thread_t thread)
85 {
86 task_t task;
87 zinfo_usage_t zinfo;
88
89 ledger_credit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
90
91 if (stack_fake_zone_index != -1 &&
92 (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
93 OSAddAtomic64(kernel_stack_size,
94 (int64_t *)&zinfo[stack_fake_zone_index].alloc);
95 }
96
97 static inline void
98 STACK_ZINFO_PFREE(thread_t thread)
99 {
100 task_t task;
101 zinfo_usage_t zinfo;
102
103 ledger_debit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
104
105 if (stack_fake_zone_index != -1 &&
106 (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
107 OSAddAtomic64(kernel_stack_size,
108 (int64_t *)&zinfo[stack_fake_zone_index].free);
109 }
110
111 static inline void
112 STACK_ZINFO_HANDOFF(thread_t from, thread_t to)
113 {
114 ledger_debit(from->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
115 ledger_credit(to->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
116
117 if (stack_fake_zone_index != -1) {
118 task_t task;
119 zinfo_usage_t zinfo;
120
121 if ((task = from->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
122 OSAddAtomic64(kernel_stack_size,
123 (int64_t *)&zinfo[stack_fake_zone_index].free);
124
125 if ((task = to->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
126 OSAddAtomic64(kernel_stack_size,
127 (int64_t *)&zinfo[stack_fake_zone_index].alloc);
128 }
129 }
130
131 /*
132 * The next field is at the base of the stack,
133 * so the low end is left unsullied.
134 */
135 #define stack_next(stack) \
136 (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
137
138 static inline int
139 log2(vm_offset_t size)
140 {
141 int result;
142 for (result = 0; size > 0; result++)
143 size >>= 1;
144 return result;
145 }
146
147 static inline vm_offset_t
148 roundup_pow2(vm_offset_t size)
149 {
150 return 1UL << (log2(size - 1) + 1);
151 }
152
153 static vm_offset_t stack_alloc_internal(void);
154 static void stack_free_stack(vm_offset_t);
155
156 void
157 stack_init(void)
158 {
159 simple_lock_init(&stack_lock_data, 0);
160
161 kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
162 kernel_stack_size = KERNEL_STACK_SIZE;
163 kernel_stack_mask = -KERNEL_STACK_SIZE;
164 kernel_stack_depth_max = 0;
165
166 if (PE_parse_boot_argn("kernel_stack_pages",
167 &kernel_stack_pages,
168 sizeof (kernel_stack_pages))) {
169 kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
170 printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
171 kernel_stack_pages, (void *) kernel_stack_size);
172 }
173
174 if (kernel_stack_size < round_page(kernel_stack_size))
175 panic("stack_init: stack size %p not a multiple of page size %d\n",
176 (void *) kernel_stack_size, PAGE_SIZE);
177
178 stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
179 kernel_stack_mask = ~stack_addr_mask;
180 }
181
182 /*
183 * stack_alloc:
184 *
185 * Allocate a stack for a thread, may
186 * block.
187 */
188
189 static vm_offset_t
190 stack_alloc_internal(void)
191 {
192 vm_offset_t stack;
193 spl_t s;
194 int guard_flags;
195
196 s = splsched();
197 stack_lock();
198 stack_allocs++;
199 stack = stack_free_list;
200 if (stack != 0) {
201 stack_free_list = stack_next(stack);
202 stack_free_count--;
203 }
204 else {
205 if (++stack_total > stack_hiwat)
206 stack_hiwat = stack_total;
207 stack_new_count++;
208 }
209 stack_free_delta--;
210 stack_unlock();
211 splx(s);
212
213 if (stack == 0) {
214
215 /*
216 * Request guard pages on either side of the stack. Ask
217 * kernel_memory_allocate() for two extra pages to account
218 * for these.
219 */
220
221 guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
222 if (kernel_memory_allocate(kernel_map, &stack,
223 kernel_stack_size + (2*PAGE_SIZE),
224 stack_addr_mask,
225 KMA_KSTACK | KMA_KOBJECT | guard_flags,
226 VM_KERN_MEMORY_STACK)
227 != KERN_SUCCESS)
228 panic("stack_alloc: kernel_memory_allocate");
229
230 /*
231 * The stack address that comes back is the address of the lower
232 * guard page. Skip past it to get the actual stack base address.
233 */
234
235 stack += PAGE_SIZE;
236 }
237 return stack;
238 }
239
240 void
241 stack_alloc(
242 thread_t thread)
243 {
244
245 assert(thread->kernel_stack == 0);
246 machine_stack_attach(thread, stack_alloc_internal());
247 STACK_ZINFO_PALLOC(thread);
248 }
249
250 void
251 stack_handoff(thread_t from, thread_t to)
252 {
253 assert(from == current_thread());
254 machine_stack_handoff(from, to);
255 STACK_ZINFO_HANDOFF(from, to);
256 }
257
258 /*
259 * stack_free:
260 *
261 * Detach and free the stack for a thread.
262 */
263 void
264 stack_free(
265 thread_t thread)
266 {
267 vm_offset_t stack = machine_stack_detach(thread);
268
269 assert(stack);
270 if (stack != thread->reserved_stack) {
271 STACK_ZINFO_PFREE(thread);
272 stack_free_stack(stack);
273 }
274 }
275
276 void
277 stack_free_reserved(
278 thread_t thread)
279 {
280 if (thread->reserved_stack != thread->kernel_stack) {
281 stack_free_stack(thread->reserved_stack);
282 STACK_ZINFO_PFREE(thread);
283 }
284 }
285
286 static void
287 stack_free_stack(
288 vm_offset_t stack)
289 {
290 struct stack_cache *cache;
291 spl_t s;
292
293 s = splsched();
294 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
295 if (cache->count < STACK_CACHE_SIZE) {
296 stack_next(stack) = cache->free;
297 cache->free = stack;
298 cache->count++;
299 }
300 else {
301 stack_lock();
302 stack_next(stack) = stack_free_list;
303 stack_free_list = stack;
304 if (++stack_free_count > stack_free_hiwat)
305 stack_free_hiwat = stack_free_count;
306 stack_free_delta++;
307 stack_unlock();
308 }
309 splx(s);
310 }
311
312 /*
313 * stack_alloc_try:
314 *
315 * Non-blocking attempt to allocate a
316 * stack for a thread.
317 *
318 * Returns TRUE on success.
319 *
320 * Called at splsched.
321 */
322 boolean_t
323 stack_alloc_try(
324 thread_t thread)
325 {
326 struct stack_cache *cache;
327 vm_offset_t stack;
328
329 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
330 stack = cache->free;
331 if (stack != 0) {
332 STACK_ZINFO_PALLOC(thread);
333 cache->free = stack_next(stack);
334 cache->count--;
335 }
336 else {
337 if (stack_free_list != 0) {
338 stack_lock();
339 stack = stack_free_list;
340 if (stack != 0) {
341 STACK_ZINFO_PALLOC(thread);
342 stack_free_list = stack_next(stack);
343 stack_free_count--;
344 stack_free_delta--;
345 }
346 stack_unlock();
347 }
348 }
349
350 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
351 machine_stack_attach(thread, stack);
352 return (TRUE);
353 }
354
355 return (FALSE);
356 }
357
358 static unsigned int stack_collect_tick, last_stack_tick;
359
360 /*
361 * stack_collect:
362 *
363 * Free excess kernel stacks, may
364 * block.
365 */
366 void
367 stack_collect(void)
368 {
369 if (stack_collect_tick != last_stack_tick) {
370 unsigned int target;
371 vm_offset_t stack;
372 spl_t s;
373
374 s = splsched();
375 stack_lock();
376
377 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
378 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
379
380 while (stack_free_count > target) {
381 stack = stack_free_list;
382 stack_free_list = stack_next(stack);
383 stack_free_count--; stack_total--;
384 stack_unlock();
385 splx(s);
386
387 /*
388 * Get the stack base address, then decrement by one page
389 * to account for the lower guard page. Add two extra pages
390 * to the size to account for the guard pages on both ends
391 * that were originally requested when the stack was allocated
392 * back in stack_alloc().
393 */
394
395 stack = (vm_offset_t)vm_map_trunc_page(
396 stack,
397 VM_MAP_PAGE_MASK(kernel_map));
398 stack -= PAGE_SIZE;
399 if (vm_map_remove(
400 kernel_map,
401 stack,
402 stack + kernel_stack_size+(2*PAGE_SIZE),
403 VM_MAP_REMOVE_KUNWIRE)
404 != KERN_SUCCESS)
405 panic("stack_collect: vm_map_remove");
406 stack = 0;
407
408 s = splsched();
409 stack_lock();
410
411 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
412 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
413 }
414
415 last_stack_tick = stack_collect_tick;
416
417 stack_unlock();
418 splx(s);
419 }
420 }
421
422 /*
423 * compute_stack_target:
424 *
425 * Computes a new target free list count
426 * based on recent alloc / free activity.
427 *
428 * Limits stack collection to once per
429 * computation period.
430 */
431 void
432 compute_stack_target(
433 __unused void *arg)
434 {
435 spl_t s;
436
437 s = splsched();
438 stack_lock();
439
440 if (stack_free_target > 5)
441 stack_free_target = (4 * stack_free_target) / 5;
442 else
443 if (stack_free_target > 0)
444 stack_free_target--;
445
446 stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
447
448 stack_free_delta = 0;
449 stack_collect_tick++;
450
451 stack_unlock();
452 splx(s);
453 }
454
455 void
456 stack_fake_zone_init(int zone_index)
457 {
458 stack_fake_zone_index = zone_index;
459 }
460
461 void
462 stack_fake_zone_info(int *count,
463 vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
464 uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
465 {
466 unsigned int total, hiwat, free;
467 unsigned long long all;
468 spl_t s;
469
470 s = splsched();
471 stack_lock();
472 all = stack_allocs;
473 total = stack_total;
474 hiwat = stack_hiwat;
475 free = stack_free_count;
476 stack_unlock();
477 splx(s);
478
479 *count = total - free;
480 *cur_size = kernel_stack_size * total;
481 *max_size = kernel_stack_size * hiwat;
482 *elem_size = kernel_stack_size;
483 *alloc_size = kernel_stack_size;
484 *sum_size = all * kernel_stack_size;
485
486 *collectable = 1;
487 *exhaustable = 0;
488 *caller_acct = 1;
489 }
490
491 /* OBSOLETE */
492 void stack_privilege(
493 thread_t thread);
494
495 void
496 stack_privilege(
497 __unused thread_t thread)
498 {
499 /* OBSOLETE */
500 }
501
502 /*
503 * Return info on stack usage for threads in a specific processor set
504 */
505 kern_return_t
506 processor_set_stack_usage(
507 processor_set_t pset,
508 unsigned int *totalp,
509 vm_size_t *spacep,
510 vm_size_t *residentp,
511 vm_size_t *maxusagep,
512 vm_offset_t *maxstackp)
513 {
514 #if !MACH_DEBUG
515 return KERN_NOT_SUPPORTED;
516 #else
517 unsigned int total;
518 vm_size_t maxusage;
519 vm_offset_t maxstack;
520
521 register thread_t *thread_list;
522 register thread_t thread;
523
524 unsigned int actual; /* this many things */
525 unsigned int i;
526
527 vm_size_t size, size_needed;
528 void *addr;
529
530 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
531 return KERN_INVALID_ARGUMENT;
532
533 size = 0;
534 addr = NULL;
535
536 for (;;) {
537 lck_mtx_lock(&tasks_threads_lock);
538
539 actual = threads_count;
540
541 /* do we have the memory we need? */
542
543 size_needed = actual * sizeof(thread_t);
544 if (size_needed <= size)
545 break;
546
547 lck_mtx_unlock(&tasks_threads_lock);
548
549 if (size != 0)
550 kfree(addr, size);
551
552 assert(size_needed > 0);
553 size = size_needed;
554
555 addr = kalloc(size);
556 if (addr == 0)
557 return KERN_RESOURCE_SHORTAGE;
558 }
559
560 /* OK, have memory and list is locked */
561 thread_list = (thread_t *) addr;
562 for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
563 !queue_end(&threads, (queue_entry_t) thread);
564 thread = (thread_t)(void *) queue_next(&thread->threads)) {
565 thread_reference_internal(thread);
566 thread_list[i++] = thread;
567 }
568 assert(i <= actual);
569
570 lck_mtx_unlock(&tasks_threads_lock);
571
572 /* calculate maxusage and free thread references */
573
574 total = 0;
575 maxusage = 0;
576 maxstack = 0;
577 while (i > 0) {
578 thread_t threadref = thread_list[--i];
579
580 if (threadref->kernel_stack != 0)
581 total++;
582
583 thread_deallocate(threadref);
584 }
585
586 if (size != 0)
587 kfree(addr, size);
588
589 *totalp = total;
590 *residentp = *spacep = total * round_page(kernel_stack_size);
591 *maxusagep = maxusage;
592 *maxstackp = maxstack;
593 return KERN_SUCCESS;
594
595 #endif /* MACH_DEBUG */
596 }
597
598 vm_offset_t min_valid_stack_address(void)
599 {
600 return (vm_offset_t)vm_map_min(kernel_map);
601 }
602
603 vm_offset_t max_valid_stack_address(void)
604 {
605 return (vm_offset_t)vm_map_max(kernel_map);
606 }