]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/stack.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Kernel stack management routines.
30 */
31
32 #include <mach/mach_host.h>
33 #include <mach/mach_types.h>
34 #include <mach/processor_set.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/mach_param.h>
38 #include <kern/processor.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 #include <kern/kalloc.h>
42
43 #include <vm/vm_map.h>
44 #include <vm/vm_kern.h>
45
46 #include <mach_debug.h>
47
48 /*
49 * We allocate stacks from generic kernel VM.
50 *
51 * The stack_free_list can only be accessed at splsched,
52 * because stack_alloc_try/thread_invoke operate at splsched.
53 */
54
55 decl_simple_lock_data(static,stack_lock_data)
56 #define stack_lock() simple_lock(&stack_lock_data)
57 #define stack_unlock() simple_unlock(&stack_lock_data)
58
59 #define STACK_CACHE_SIZE 2
60
61 static vm_offset_t stack_free_list;
62
63 static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
64 static unsigned int stack_hiwat;
65 unsigned int stack_total; /* current total count */
66
67 static unsigned int stack_free_target;
68 static int stack_free_delta;
69
70 static unsigned int stack_new_count; /* total new stack allocations */
71
72 static vm_offset_t stack_addr_mask;
73
74 unsigned int kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
75 vm_offset_t kernel_stack_size = KERNEL_STACK_SIZE;
76 vm_offset_t kernel_stack_mask = -KERNEL_STACK_SIZE;
77 vm_offset_t kernel_stack_depth_max = 0;
78
79 /*
80 * The next field is at the base of the stack,
81 * so the low end is left unsullied.
82 */
83 #define stack_next(stack) \
84 (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
85
86 static inline int
87 log2(vm_offset_t size)
88 {
89 int result;
90 for (result = 0; size > 0; result++)
91 size >>= 1;
92 return result;
93 }
94
95 static inline vm_offset_t
96 roundup_pow2(vm_offset_t size)
97 {
98 return 1UL << (log2(size - 1) + 1);
99 }
100
101 void
102 stack_init(void)
103 {
104 simple_lock_init(&stack_lock_data, 0);
105
106 if (PE_parse_boot_argn("kernel_stack_pages",
107 &kernel_stack_pages,
108 sizeof (kernel_stack_pages))) {
109 kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
110 printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
111 kernel_stack_pages, (void *) kernel_stack_size);
112 }
113
114 if (kernel_stack_size < round_page(kernel_stack_size))
115 panic("stack_init: stack size %p not a multiple of page size %d\n",
116 (void *) kernel_stack_size, PAGE_SIZE);
117
118 stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
119 kernel_stack_mask = ~stack_addr_mask;
120 }
121
122 /*
123 * stack_alloc:
124 *
125 * Allocate a stack for a thread, may
126 * block.
127 */
128 void
129 stack_alloc(
130 thread_t thread)
131 {
132 vm_offset_t stack;
133 spl_t s;
134 int guard_flags;
135
136 assert(thread->kernel_stack == 0);
137
138 s = splsched();
139 stack_lock();
140 stack = stack_free_list;
141 if (stack != 0) {
142 stack_free_list = stack_next(stack);
143 stack_free_count--;
144 }
145 else {
146 if (++stack_total > stack_hiwat)
147 stack_hiwat = stack_total;
148 stack_new_count++;
149 }
150 stack_free_delta--;
151 stack_unlock();
152 splx(s);
153
154 if (stack == 0) {
155
156 /*
157 * Request guard pages on either side of the stack. Ask
158 * kernel_memory_allocate() for two extra pages to account
159 * for these.
160 */
161
162 guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
163 if (kernel_memory_allocate(kernel_map, &stack,
164 kernel_stack_size + (2*PAGE_SIZE),
165 stack_addr_mask,
166 KMA_KOBJECT | guard_flags)
167 != KERN_SUCCESS)
168 panic("stack_alloc: kernel_memory_allocate");
169
170 /*
171 * The stack address that comes back is the address of the lower
172 * guard page. Skip past it to get the actual stack base address.
173 */
174
175 stack += PAGE_SIZE;
176 }
177
178 machine_stack_attach(thread, stack);
179 }
180
181 /*
182 * stack_free:
183 *
184 * Detach and free the stack for a thread.
185 */
186 void
187 stack_free(
188 thread_t thread)
189 {
190 vm_offset_t stack = machine_stack_detach(thread);
191
192 assert(stack);
193 if (stack != thread->reserved_stack)
194 stack_free_stack(stack);
195 }
196
197 void
198 stack_free_stack(
199 vm_offset_t stack)
200 {
201 struct stack_cache *cache;
202 spl_t s;
203
204 s = splsched();
205 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
206 if (cache->count < STACK_CACHE_SIZE) {
207 stack_next(stack) = cache->free;
208 cache->free = stack;
209 cache->count++;
210 }
211 else {
212 stack_lock();
213 stack_next(stack) = stack_free_list;
214 stack_free_list = stack;
215 if (++stack_free_count > stack_free_hiwat)
216 stack_free_hiwat = stack_free_count;
217 stack_free_delta++;
218 stack_unlock();
219 }
220 splx(s);
221 }
222
223 /*
224 * stack_alloc_try:
225 *
226 * Non-blocking attempt to allocate a
227 * stack for a thread.
228 *
229 * Returns TRUE on success.
230 *
231 * Called at splsched.
232 */
233 boolean_t
234 stack_alloc_try(
235 thread_t thread)
236 {
237 struct stack_cache *cache;
238 vm_offset_t stack;
239
240 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
241 stack = cache->free;
242 if (stack != 0) {
243 cache->free = stack_next(stack);
244 cache->count--;
245 }
246 else {
247 if (stack_free_list != 0) {
248 stack_lock();
249 stack = stack_free_list;
250 if (stack != 0) {
251 stack_free_list = stack_next(stack);
252 stack_free_count--;
253 stack_free_delta--;
254 }
255 stack_unlock();
256 }
257 }
258
259 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
260 machine_stack_attach(thread, stack);
261 return (TRUE);
262 }
263
264 return (FALSE);
265 }
266
267 static unsigned int stack_collect_tick, last_stack_tick;
268
269 /*
270 * stack_collect:
271 *
272 * Free excess kernel stacks, may
273 * block.
274 */
275 void
276 stack_collect(void)
277 {
278 if (stack_collect_tick != last_stack_tick) {
279 unsigned int target;
280 vm_offset_t stack;
281 spl_t s;
282
283 s = splsched();
284 stack_lock();
285
286 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
287 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
288
289 while (stack_free_count > target) {
290 stack = stack_free_list;
291 stack_free_list = stack_next(stack);
292 stack_free_count--; stack_total--;
293 stack_unlock();
294 splx(s);
295
296 /*
297 * Get the stack base address, then decrement by one page
298 * to account for the lower guard page. Add two extra pages
299 * to the size to account for the guard pages on both ends
300 * that were originally requested when the stack was allocated
301 * back in stack_alloc().
302 */
303
304 stack = (vm_offset_t)vm_map_trunc_page(stack);
305 stack -= PAGE_SIZE;
306 if (vm_map_remove(
307 kernel_map,
308 stack,
309 stack + kernel_stack_size+(2*PAGE_SIZE),
310 VM_MAP_REMOVE_KUNWIRE)
311 != KERN_SUCCESS)
312 panic("stack_collect: vm_map_remove");
313 stack = 0;
314
315 s = splsched();
316 stack_lock();
317
318 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
319 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
320 }
321
322 last_stack_tick = stack_collect_tick;
323
324 stack_unlock();
325 splx(s);
326 }
327 }
328
329 /*
330 * compute_stack_target:
331 *
332 * Computes a new target free list count
333 * based on recent alloc / free activity.
334 *
335 * Limits stack collection to once per
336 * computation period.
337 */
338 void
339 compute_stack_target(
340 __unused void *arg)
341 {
342 spl_t s;
343
344 s = splsched();
345 stack_lock();
346
347 if (stack_free_target > 5)
348 stack_free_target = (4 * stack_free_target) / 5;
349 else
350 if (stack_free_target > 0)
351 stack_free_target--;
352
353 stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
354
355 stack_free_delta = 0;
356 stack_collect_tick++;
357
358 stack_unlock();
359 splx(s);
360 }
361
362 void
363 stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
364 vm_size_t *alloc_size, int *collectable, int *exhaustable)
365 {
366 unsigned int total, hiwat, free;
367 spl_t s;
368
369 s = splsched();
370 stack_lock();
371 total = stack_total;
372 hiwat = stack_hiwat;
373 free = stack_free_count;
374 stack_unlock();
375 splx(s);
376
377 *count = total - free;
378 *cur_size = kernel_stack_size * total;
379 *max_size = kernel_stack_size * hiwat;
380 *elem_size = kernel_stack_size;
381 *alloc_size = kernel_stack_size;
382 *collectable = 1;
383 *exhaustable = 0;
384 }
385
386 /* OBSOLETE */
387 void stack_privilege(
388 thread_t thread);
389
390 void
391 stack_privilege(
392 __unused thread_t thread)
393 {
394 /* OBSOLETE */
395 }
396
397 /*
398 * Return info on stack usage for threads in a specific processor set
399 */
400 kern_return_t
401 processor_set_stack_usage(
402 processor_set_t pset,
403 unsigned int *totalp,
404 vm_size_t *spacep,
405 vm_size_t *residentp,
406 vm_size_t *maxusagep,
407 vm_offset_t *maxstackp)
408 {
409 #if !MACH_DEBUG
410 return KERN_NOT_SUPPORTED;
411 #else
412 unsigned int total;
413 vm_size_t maxusage;
414 vm_offset_t maxstack;
415
416 register thread_t *thread_list;
417 register thread_t thread;
418
419 unsigned int actual; /* this many things */
420 unsigned int i;
421
422 vm_size_t size, size_needed;
423 void *addr;
424
425 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
426 return KERN_INVALID_ARGUMENT;
427
428 size = 0;
429 addr = NULL;
430
431 for (;;) {
432 lck_mtx_lock(&tasks_threads_lock);
433
434 actual = threads_count;
435
436 /* do we have the memory we need? */
437
438 size_needed = actual * sizeof(thread_t);
439 if (size_needed <= size)
440 break;
441
442 lck_mtx_unlock(&tasks_threads_lock);
443
444 if (size != 0)
445 kfree(addr, size);
446
447 assert(size_needed > 0);
448 size = size_needed;
449
450 addr = kalloc(size);
451 if (addr == 0)
452 return KERN_RESOURCE_SHORTAGE;
453 }
454
455 /* OK, have memory and list is locked */
456 thread_list = (thread_t *) addr;
457 for (i = 0, thread = (thread_t) queue_first(&threads);
458 !queue_end(&threads, (queue_entry_t) thread);
459 thread = (thread_t) queue_next(&thread->threads)) {
460 thread_reference_internal(thread);
461 thread_list[i++] = thread;
462 }
463 assert(i <= actual);
464
465 lck_mtx_unlock(&tasks_threads_lock);
466
467 /* calculate maxusage and free thread references */
468
469 total = 0;
470 maxusage = 0;
471 maxstack = 0;
472 while (i > 0) {
473 thread_t threadref = thread_list[--i];
474
475 if (threadref->kernel_stack != 0)
476 total++;
477
478 thread_deallocate(threadref);
479 }
480
481 if (size != 0)
482 kfree(addr, size);
483
484 *totalp = total;
485 *residentp = *spacep = total * round_page(kernel_stack_size);
486 *maxusagep = maxusage;
487 *maxstackp = maxstack;
488 return KERN_SUCCESS;
489
490 #endif /* MACH_DEBUG */
491 }
492
493 vm_offset_t min_valid_stack_address(void)
494 {
495 return (vm_offset_t)vm_map_min(kernel_map);
496 }
497
498 vm_offset_t max_valid_stack_address(void)
499 {
500 return (vm_offset_t)vm_map_max(kernel_map);
501 }