]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/stack.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
91447636 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
91447636
A
29 */
30/*
31 * Kernel stack management routines.
32 */
33
34#include <mach/mach_host.h>
35#include <mach/mach_types.h>
36#include <mach/processor_set.h>
37
38#include <kern/kern_types.h>
39#include <kern/mach_param.h>
40#include <kern/processor.h>
41#include <kern/thread.h>
42#include <kern/zalloc.h>
43#include <kern/kalloc.h>
44
45#include <vm/vm_map.h>
46#include <vm/vm_kern.h>
47
48#include <mach_debug.h>
49
50/*
51 * We allocate stacks from generic kernel VM.
52 *
53 * The stack_free_list can only be accessed at splsched,
54 * because stack_alloc_try/thread_invoke operate at splsched.
55 */
56
57decl_simple_lock_data(static,stack_lock_data)
58#define stack_lock() simple_lock(&stack_lock_data)
59#define stack_unlock() simple_unlock(&stack_lock_data)
60
61#define STACK_CACHE_SIZE 2
62
63static vm_map_t stack_map;
64static vm_offset_t stack_free_list;
65
66static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
67static unsigned int stack_total, stack_hiwat; /* current total count */
68
69static unsigned int stack_free_target;
70static int stack_free_delta;
71
72static unsigned int stack_new_count; /* total new stack allocations */
73
74static vm_offset_t stack_addr_mask;
75
76/*
77 * The next field is at the base of the stack,
78 * so the low end is left unsullied.
79 */
80#define stack_next(stack) \
81 (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
82
83void
84stack_init(void)
85{
86 vm_offset_t stacks, boundary;
87 vm_map_offset_t map_addr;
88
89 simple_lock_init(&stack_lock_data, 0);
90
91 if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE))
92 panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE);
93
94 for (boundary = PAGE_SIZE; boundary <= KERNEL_STACK_SIZE; )
95 boundary <<= 1;
96
97 stack_addr_mask = boundary - 1;
98
99 if (kmem_suballoc(kernel_map, &stacks, (boundary * (2 * THREAD_MAX + 64)),
100 FALSE, VM_FLAGS_ANYWHERE, &stack_map) != KERN_SUCCESS)
101 panic("stack_init: kmem_suballoc");
102
103 map_addr = vm_map_min(stack_map);
8ad349bb 104 if (vm_map_enter(stack_map, &map_addr, vm_map_round_page(PAGE_SIZE), 0, VM_FLAGS_FIXED,
91447636
A
105 VM_OBJECT_NULL, 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT) != KERN_SUCCESS)
106 panic("stack_init: vm_map_enter");
107}
108
109/*
110 * stack_alloc:
111 *
112 * Allocate a stack for a thread, may
113 * block.
114 */
115void
116stack_alloc(
117 thread_t thread)
118{
119 vm_offset_t stack;
120 spl_t s;
121
122 assert(thread->kernel_stack == 0);
123
124 s = splsched();
125 stack_lock();
126 stack = stack_free_list;
127 if (stack != 0) {
128 stack_free_list = stack_next(stack);
129 stack_free_count--;
130 }
131 else {
132 if (++stack_total > stack_hiwat)
133 stack_hiwat = stack_total;
134 stack_new_count++;
135 }
136 stack_free_delta--;
137 stack_unlock();
138 splx(s);
139
140 if (stack == 0) {
141 if (kernel_memory_allocate(stack_map, &stack, KERNEL_STACK_SIZE, stack_addr_mask, KMA_KOBJECT) != KERN_SUCCESS)
142 panic("stack_alloc: kernel_memory_allocate");
143 }
144
145 machine_stack_attach(thread, stack);
146}
147
148/*
149 * stack_free:
150 *
151 * Detach and free the stack for a thread.
152 */
153void
154stack_free(
155 thread_t thread)
156{
157 vm_offset_t stack = machine_stack_detach(thread);
158
159 assert(stack);
160 if (stack != thread->reserved_stack) {
161 struct stack_cache *cache;
162 spl_t s;
163
164 s = splsched();
165 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
166 if (cache->count < STACK_CACHE_SIZE) {
167 stack_next(stack) = cache->free;
168 cache->free = stack;
169 cache->count++;
170 }
171 else {
172 stack_lock();
173 stack_next(stack) = stack_free_list;
174 stack_free_list = stack;
175 if (++stack_free_count > stack_free_hiwat)
176 stack_free_hiwat = stack_free_count;
177 stack_free_delta++;
178 stack_unlock();
179 }
180 splx(s);
181 }
182}
183
184void
185stack_free_stack(
186 vm_offset_t stack)
187{
188 struct stack_cache *cache;
189 spl_t s;
190
191 s = splsched();
192 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
193 if (cache->count < STACK_CACHE_SIZE) {
194 stack_next(stack) = cache->free;
195 cache->free = stack;
196 cache->count++;
197 }
198 else {
199 stack_lock();
200 stack_next(stack) = stack_free_list;
201 stack_free_list = stack;
202 if (++stack_free_count > stack_free_hiwat)
203 stack_free_hiwat = stack_free_count;
204 stack_free_delta++;
205 stack_unlock();
206 }
207 splx(s);
208}
209
210/*
211 * stack_alloc_try:
212 *
213 * Non-blocking attempt to allocate a
214 * stack for a thread.
215 *
216 * Returns TRUE on success.
217 *
218 * Called at splsched.
219 */
220boolean_t
221stack_alloc_try(
222 thread_t thread)
223{
224 struct stack_cache *cache;
225 vm_offset_t stack;
226
227 cache = &PROCESSOR_DATA(current_processor(), stack_cache);
228 stack = cache->free;
229 if (stack != 0) {
230 cache->free = stack_next(stack);
231 cache->count--;
232 }
233 else {
234 if (stack_free_list != 0) {
235 stack_lock();
236 stack = stack_free_list;
237 if (stack != 0) {
238 stack_free_list = stack_next(stack);
239 stack_free_count--;
240 stack_free_delta--;
241 }
242 stack_unlock();
243 }
244 }
245
246 if (stack != 0 || (stack = thread->reserved_stack) != 0) {
247 machine_stack_attach(thread, stack);
248 return (TRUE);
249 }
250
251 return (FALSE);
252}
253
254static unsigned int stack_collect_tick, last_stack_tick;
255
256/*
257 * stack_collect:
258 *
259 * Free excess kernel stacks, may
260 * block.
261 */
262void
263stack_collect(void)
264{
265 if (stack_collect_tick != last_stack_tick) {
266 unsigned int target;
267 vm_offset_t stack;
268 spl_t s;
269
270 s = splsched();
271 stack_lock();
272
273 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
274 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
275
276 while (stack_free_count > target) {
277 stack = stack_free_list;
278 stack_free_list = stack_next(stack);
279 stack_free_count--; stack_total--;
280 stack_unlock();
281 splx(s);
282
283 if (vm_map_remove(stack_map, vm_map_trunc_page(stack),
284 vm_map_round_page(stack + KERNEL_STACK_SIZE), VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
285 panic("stack_collect: vm_map_remove");
286
287 s = splsched();
288 stack_lock();
289
290 target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
291 target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
292 }
293
294 last_stack_tick = stack_collect_tick;
295
296 stack_unlock();
297 splx(s);
298 }
299}
300
301/*
302 * compute_stack_target:
303 *
304 * Computes a new target free list count
305 * based on recent alloc / free activity.
306 *
307 * Limits stack collection to once per
308 * computation period.
309 */
310void
311compute_stack_target(
312__unused void *arg)
313{
314 spl_t s;
315
316 s = splsched();
317 stack_lock();
318
319 if (stack_free_target > 5)
320 stack_free_target = (4 * stack_free_target) / 5;
321 else
322 if (stack_free_target > 0)
323 stack_free_target--;
324
325 stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
326
327 stack_free_delta = 0;
328 stack_collect_tick++;
329
330 stack_unlock();
331 splx(s);
332}
333
334void
335stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
336 vm_size_t *alloc_size, int *collectable, int *exhaustable)
337{
338 unsigned int total, hiwat, free;
339 spl_t s;
340
341 s = splsched();
342 stack_lock();
343 total = stack_total;
344 hiwat = stack_hiwat;
345 free = stack_free_count;
346 stack_unlock();
347 splx(s);
348
349 *count = total - free;
350 *cur_size = KERNEL_STACK_SIZE * total;
351 *max_size = KERNEL_STACK_SIZE * hiwat;
352 *elem_size = KERNEL_STACK_SIZE;
353 *alloc_size = KERNEL_STACK_SIZE;
354 *collectable = 1;
355 *exhaustable = 0;
356}
357
358/* OBSOLETE */
359void stack_privilege(
360 thread_t thread);
361
362void
363stack_privilege(
364 __unused thread_t thread)
365{
366 /* OBSOLETE */
367}
368
369/*
370 * Return info on stack usage for threads in a specific processor set
371 */
372kern_return_t
373processor_set_stack_usage(
374 processor_set_t pset,
375 unsigned int *totalp,
376 vm_size_t *spacep,
377 vm_size_t *residentp,
378 vm_size_t *maxusagep,
379 vm_offset_t *maxstackp)
380{
381#if !MACH_DEBUG
382 return KERN_NOT_SUPPORTED;
383#else
384 unsigned int total;
385 vm_size_t maxusage;
386 vm_offset_t maxstack;
387
388 register thread_t *threads;
389 register thread_t thread;
390
391 unsigned int actual; /* this many things */
392 unsigned int i;
393
394 vm_size_t size, size_needed;
395 void *addr;
396
397 if (pset == PROCESSOR_SET_NULL)
398 return KERN_INVALID_ARGUMENT;
399
400 size = 0; addr = 0;
401
402 for (;;) {
403 pset_lock(pset);
404 if (!pset->active) {
405 pset_unlock(pset);
406 return KERN_INVALID_ARGUMENT;
407 }
408
409 actual = pset->thread_count;
410
411 /* do we have the memory we need? */
412
413 size_needed = actual * sizeof(thread_t);
414 if (size_needed <= size)
415 break;
416
417 /* unlock the pset and allocate more memory */
418 pset_unlock(pset);
419
420 if (size != 0)
421 kfree(addr, size);
422
423 assert(size_needed > 0);
424 size = size_needed;
425
426 addr = kalloc(size);
427 if (addr == 0)
428 return KERN_RESOURCE_SHORTAGE;
429 }
430
431 /* OK, have memory and the processor_set is locked & active */
432 threads = (thread_t *) addr;
433 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
434 !queue_end(&pset->threads, (queue_entry_t) thread);
435 thread = (thread_t) queue_next(&thread->pset_threads)) {
436 thread_reference_internal(thread);
437 threads[i++] = thread;
438 }
439 assert(i <= actual);
440
441 /* can unlock processor set now that we have the thread refs */
442 pset_unlock(pset);
443
444 /* calculate maxusage and free thread references */
445
446 total = 0;
447 maxusage = 0;
448 maxstack = 0;
449 while (i > 0) {
450 thread_t threadref = threads[--i];
451
452 if (threadref->kernel_stack != 0)
453 total++;
454
455 thread_deallocate(threadref);
456 }
457
458 if (size != 0)
459 kfree(addr, size);
460
461 *totalp = total;
462 *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
463 *maxusagep = maxusage;
464 *maxstackp = maxstack;
465 return KERN_SUCCESS;
466
467#endif /* MACH_DEBUG */
468}
469
470vm_offset_t min_valid_stack_address(void)
471{
472 return vm_map_min(stack_map);
473}
474
475vm_offset_t max_valid_stack_address(void)
476{
477 return vm_map_max(stack_map);
478}