]>
Commit | Line | Data |
---|---|---|
91447636 | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2003-2007 Apple Inc. All rights reserved. |
91447636 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
91447636 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
91447636 A |
27 | */ |
28 | /* | |
29 | * Kernel stack management routines. | |
30 | */ | |
31 | ||
32 | #include <mach/mach_host.h> | |
33 | #include <mach/mach_types.h> | |
34 | #include <mach/processor_set.h> | |
35 | ||
36 | #include <kern/kern_types.h> | |
37 | #include <kern/mach_param.h> | |
38 | #include <kern/processor.h> | |
39 | #include <kern/thread.h> | |
40 | #include <kern/zalloc.h> | |
41 | #include <kern/kalloc.h> | |
42 | ||
43 | #include <vm/vm_map.h> | |
44 | #include <vm/vm_kern.h> | |
45 | ||
46 | #include <mach_debug.h> | |
47 | ||
48 | /* | |
49 | * We allocate stacks from generic kernel VM. | |
50 | * | |
51 | * The stack_free_list can only be accessed at splsched, | |
52 | * because stack_alloc_try/thread_invoke operate at splsched. | |
53 | */ | |
54 | ||
55 | decl_simple_lock_data(static,stack_lock_data) | |
56 | #define stack_lock() simple_lock(&stack_lock_data) | |
57 | #define stack_unlock() simple_unlock(&stack_lock_data) | |
58 | ||
59 | #define STACK_CACHE_SIZE 2 | |
60 | ||
91447636 A |
61 | static vm_offset_t stack_free_list; |
62 | ||
63 | static unsigned int stack_free_count, stack_free_hiwat; /* free list count */ | |
64 | static unsigned int stack_total, stack_hiwat; /* current total count */ | |
65 | ||
66 | static unsigned int stack_free_target; | |
67 | static int stack_free_delta; | |
68 | ||
69 | static unsigned int stack_new_count; /* total new stack allocations */ | |
70 | ||
71 | static vm_offset_t stack_addr_mask; | |
72 | ||
73 | /* | |
74 | * The next field is at the base of the stack, | |
75 | * so the low end is left unsullied. | |
76 | */ | |
77 | #define stack_next(stack) \ | |
78 | (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1)) | |
79 | ||
80 | void | |
81 | stack_init(void) | |
82 | { | |
91447636 A |
83 | simple_lock_init(&stack_lock_data, 0); |
84 | ||
85 | if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) | |
86 | panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE); | |
87 | ||
2d21ac55 | 88 | stack_addr_mask = KERNEL_STACK_SIZE - 1; |
91447636 A |
89 | } |
90 | ||
91 | /* | |
92 | * stack_alloc: | |
93 | * | |
94 | * Allocate a stack for a thread, may | |
95 | * block. | |
96 | */ | |
97 | void | |
98 | stack_alloc( | |
99 | thread_t thread) | |
100 | { | |
101 | vm_offset_t stack; | |
102 | spl_t s; | |
2d21ac55 | 103 | int guard_flags; |
91447636 A |
104 | |
105 | assert(thread->kernel_stack == 0); | |
106 | ||
107 | s = splsched(); | |
108 | stack_lock(); | |
109 | stack = stack_free_list; | |
110 | if (stack != 0) { | |
111 | stack_free_list = stack_next(stack); | |
112 | stack_free_count--; | |
113 | } | |
114 | else { | |
115 | if (++stack_total > stack_hiwat) | |
116 | stack_hiwat = stack_total; | |
117 | stack_new_count++; | |
118 | } | |
119 | stack_free_delta--; | |
120 | stack_unlock(); | |
121 | splx(s); | |
122 | ||
123 | if (stack == 0) { | |
2d21ac55 A |
124 | |
125 | /* | |
126 | * Request guard pages on either side of the stack. Ask | |
127 | * kernel_memory_allocate() for two extra pages to account | |
128 | * for these. | |
129 | */ | |
130 | ||
131 | guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST; | |
132 | if (kernel_memory_allocate(kernel_map, &stack, | |
133 | KERNEL_STACK_SIZE + (2*PAGE_SIZE), | |
134 | stack_addr_mask, | |
135 | KMA_KOBJECT | guard_flags) | |
136 | != KERN_SUCCESS) | |
91447636 | 137 | panic("stack_alloc: kernel_memory_allocate"); |
2d21ac55 A |
138 | |
139 | /* | |
140 | * The stack address that comes back is the address of the lower | |
141 | * guard page. Skip past it to get the actual stack base address. | |
142 | */ | |
143 | ||
144 | stack += PAGE_SIZE; | |
91447636 A |
145 | } |
146 | ||
147 | machine_stack_attach(thread, stack); | |
148 | } | |
149 | ||
150 | /* | |
151 | * stack_free: | |
152 | * | |
153 | * Detach and free the stack for a thread. | |
154 | */ | |
155 | void | |
156 | stack_free( | |
157 | thread_t thread) | |
158 | { | |
159 | vm_offset_t stack = machine_stack_detach(thread); | |
160 | ||
161 | assert(stack); | |
2d21ac55 A |
162 | if (stack != thread->reserved_stack) |
163 | stack_free_stack(stack); | |
91447636 A |
164 | } |
165 | ||
166 | void | |
167 | stack_free_stack( | |
168 | vm_offset_t stack) | |
169 | { | |
170 | struct stack_cache *cache; | |
171 | spl_t s; | |
172 | ||
173 | s = splsched(); | |
174 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
175 | if (cache->count < STACK_CACHE_SIZE) { | |
176 | stack_next(stack) = cache->free; | |
177 | cache->free = stack; | |
178 | cache->count++; | |
179 | } | |
180 | else { | |
181 | stack_lock(); | |
182 | stack_next(stack) = stack_free_list; | |
183 | stack_free_list = stack; | |
184 | if (++stack_free_count > stack_free_hiwat) | |
185 | stack_free_hiwat = stack_free_count; | |
186 | stack_free_delta++; | |
187 | stack_unlock(); | |
188 | } | |
189 | splx(s); | |
190 | } | |
191 | ||
192 | /* | |
193 | * stack_alloc_try: | |
194 | * | |
195 | * Non-blocking attempt to allocate a | |
196 | * stack for a thread. | |
197 | * | |
198 | * Returns TRUE on success. | |
199 | * | |
200 | * Called at splsched. | |
201 | */ | |
202 | boolean_t | |
203 | stack_alloc_try( | |
204 | thread_t thread) | |
205 | { | |
206 | struct stack_cache *cache; | |
207 | vm_offset_t stack; | |
208 | ||
209 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
210 | stack = cache->free; | |
211 | if (stack != 0) { | |
212 | cache->free = stack_next(stack); | |
213 | cache->count--; | |
214 | } | |
215 | else { | |
216 | if (stack_free_list != 0) { | |
217 | stack_lock(); | |
218 | stack = stack_free_list; | |
219 | if (stack != 0) { | |
220 | stack_free_list = stack_next(stack); | |
221 | stack_free_count--; | |
222 | stack_free_delta--; | |
223 | } | |
224 | stack_unlock(); | |
225 | } | |
226 | } | |
227 | ||
228 | if (stack != 0 || (stack = thread->reserved_stack) != 0) { | |
229 | machine_stack_attach(thread, stack); | |
230 | return (TRUE); | |
231 | } | |
232 | ||
233 | return (FALSE); | |
234 | } | |
235 | ||
236 | static unsigned int stack_collect_tick, last_stack_tick; | |
237 | ||
238 | /* | |
239 | * stack_collect: | |
240 | * | |
241 | * Free excess kernel stacks, may | |
242 | * block. | |
243 | */ | |
244 | void | |
245 | stack_collect(void) | |
246 | { | |
247 | if (stack_collect_tick != last_stack_tick) { | |
248 | unsigned int target; | |
249 | vm_offset_t stack; | |
250 | spl_t s; | |
251 | ||
252 | s = splsched(); | |
253 | stack_lock(); | |
254 | ||
255 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
256 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
257 | ||
258 | while (stack_free_count > target) { | |
259 | stack = stack_free_list; | |
260 | stack_free_list = stack_next(stack); | |
261 | stack_free_count--; stack_total--; | |
262 | stack_unlock(); | |
263 | splx(s); | |
264 | ||
2d21ac55 A |
265 | /* |
266 | * Get the stack base address, then decrement by one page | |
267 | * to account for the lower guard page. Add two extra pages | |
268 | * to the size to account for the guard pages on both ends | |
269 | * that were originally requested when the stack was allocated | |
270 | * back in stack_alloc(). | |
271 | */ | |
272 | ||
273 | stack = vm_map_trunc_page(stack); | |
274 | stack -= PAGE_SIZE; | |
275 | if (vm_map_remove( | |
276 | kernel_map, | |
277 | stack, | |
278 | stack + KERNEL_STACK_SIZE+(2*PAGE_SIZE), | |
279 | VM_MAP_REMOVE_KUNWIRE) | |
280 | != KERN_SUCCESS) | |
91447636 | 281 | panic("stack_collect: vm_map_remove"); |
2d21ac55 | 282 | stack = 0; |
91447636 A |
283 | |
284 | s = splsched(); | |
285 | stack_lock(); | |
286 | ||
287 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
288 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
289 | } | |
290 | ||
291 | last_stack_tick = stack_collect_tick; | |
292 | ||
293 | stack_unlock(); | |
294 | splx(s); | |
295 | } | |
296 | } | |
297 | ||
298 | /* | |
299 | * compute_stack_target: | |
300 | * | |
301 | * Computes a new target free list count | |
302 | * based on recent alloc / free activity. | |
303 | * | |
304 | * Limits stack collection to once per | |
305 | * computation period. | |
306 | */ | |
307 | void | |
308 | compute_stack_target( | |
309 | __unused void *arg) | |
310 | { | |
311 | spl_t s; | |
312 | ||
313 | s = splsched(); | |
314 | stack_lock(); | |
315 | ||
316 | if (stack_free_target > 5) | |
317 | stack_free_target = (4 * stack_free_target) / 5; | |
318 | else | |
319 | if (stack_free_target > 0) | |
320 | stack_free_target--; | |
321 | ||
322 | stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
323 | ||
324 | stack_free_delta = 0; | |
325 | stack_collect_tick++; | |
326 | ||
327 | stack_unlock(); | |
328 | splx(s); | |
329 | } | |
330 | ||
331 | void | |
332 | stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, | |
333 | vm_size_t *alloc_size, int *collectable, int *exhaustable) | |
334 | { | |
335 | unsigned int total, hiwat, free; | |
336 | spl_t s; | |
337 | ||
338 | s = splsched(); | |
339 | stack_lock(); | |
340 | total = stack_total; | |
341 | hiwat = stack_hiwat; | |
342 | free = stack_free_count; | |
343 | stack_unlock(); | |
344 | splx(s); | |
345 | ||
346 | *count = total - free; | |
347 | *cur_size = KERNEL_STACK_SIZE * total; | |
348 | *max_size = KERNEL_STACK_SIZE * hiwat; | |
349 | *elem_size = KERNEL_STACK_SIZE; | |
350 | *alloc_size = KERNEL_STACK_SIZE; | |
351 | *collectable = 1; | |
352 | *exhaustable = 0; | |
353 | } | |
354 | ||
355 | /* OBSOLETE */ | |
356 | void stack_privilege( | |
357 | thread_t thread); | |
358 | ||
359 | void | |
360 | stack_privilege( | |
361 | __unused thread_t thread) | |
362 | { | |
363 | /* OBSOLETE */ | |
364 | } | |
365 | ||
366 | /* | |
367 | * Return info on stack usage for threads in a specific processor set | |
368 | */ | |
369 | kern_return_t | |
370 | processor_set_stack_usage( | |
371 | processor_set_t pset, | |
372 | unsigned int *totalp, | |
373 | vm_size_t *spacep, | |
374 | vm_size_t *residentp, | |
375 | vm_size_t *maxusagep, | |
376 | vm_offset_t *maxstackp) | |
377 | { | |
378 | #if !MACH_DEBUG | |
379 | return KERN_NOT_SUPPORTED; | |
380 | #else | |
381 | unsigned int total; | |
382 | vm_size_t maxusage; | |
383 | vm_offset_t maxstack; | |
384 | ||
2d21ac55 | 385 | register thread_t *thread_list; |
91447636 A |
386 | register thread_t thread; |
387 | ||
388 | unsigned int actual; /* this many things */ | |
389 | unsigned int i; | |
390 | ||
391 | vm_size_t size, size_needed; | |
392 | void *addr; | |
393 | ||
2d21ac55 | 394 | if (pset == PROCESSOR_SET_NULL || pset != &pset0) |
91447636 A |
395 | return KERN_INVALID_ARGUMENT; |
396 | ||
2d21ac55 A |
397 | size = 0; |
398 | addr = NULL; | |
91447636 A |
399 | |
400 | for (;;) { | |
2d21ac55 | 401 | mutex_lock(&tasks_threads_lock); |
91447636 | 402 | |
2d21ac55 | 403 | actual = threads_count; |
91447636 A |
404 | |
405 | /* do we have the memory we need? */ | |
406 | ||
407 | size_needed = actual * sizeof(thread_t); | |
408 | if (size_needed <= size) | |
409 | break; | |
410 | ||
2d21ac55 | 411 | mutex_unlock(&tasks_threads_lock); |
91447636 A |
412 | |
413 | if (size != 0) | |
414 | kfree(addr, size); | |
415 | ||
416 | assert(size_needed > 0); | |
417 | size = size_needed; | |
418 | ||
419 | addr = kalloc(size); | |
420 | if (addr == 0) | |
421 | return KERN_RESOURCE_SHORTAGE; | |
422 | } | |
423 | ||
2d21ac55 A |
424 | /* OK, have memory and list is locked */ |
425 | thread_list = (thread_t *) addr; | |
426 | for (i = 0, thread = (thread_t) queue_first(&threads); | |
427 | !queue_end(&threads, (queue_entry_t) thread); | |
428 | thread = (thread_t) queue_next(&thread->threads)) { | |
91447636 | 429 | thread_reference_internal(thread); |
2d21ac55 | 430 | thread_list[i++] = thread; |
91447636 A |
431 | } |
432 | assert(i <= actual); | |
433 | ||
2d21ac55 | 434 | mutex_unlock(&tasks_threads_lock); |
91447636 A |
435 | |
436 | /* calculate maxusage and free thread references */ | |
437 | ||
438 | total = 0; | |
439 | maxusage = 0; | |
440 | maxstack = 0; | |
441 | while (i > 0) { | |
2d21ac55 | 442 | thread_t threadref = thread_list[--i]; |
91447636 A |
443 | |
444 | if (threadref->kernel_stack != 0) | |
445 | total++; | |
446 | ||
447 | thread_deallocate(threadref); | |
448 | } | |
449 | ||
450 | if (size != 0) | |
451 | kfree(addr, size); | |
452 | ||
453 | *totalp = total; | |
454 | *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE); | |
455 | *maxusagep = maxusage; | |
456 | *maxstackp = maxstack; | |
457 | return KERN_SUCCESS; | |
458 | ||
459 | #endif /* MACH_DEBUG */ | |
460 | } | |
461 | ||
462 | vm_offset_t min_valid_stack_address(void) | |
463 | { | |
2d21ac55 | 464 | return vm_map_min(kernel_map); |
91447636 A |
465 | } |
466 | ||
467 | vm_offset_t max_valid_stack_address(void) | |
468 | { | |
2d21ac55 | 469 | return vm_map_max(kernel_map); |
91447636 | 470 | } |