]>
Commit | Line | Data |
---|---|---|
91447636 A |
1 | /* |
2 | * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
ff6e181a A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
91447636 | 12 | * |
ff6e181a A |
13 | * The Original Code and all software distributed under the License are |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
91447636 A |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
ff6e181a A |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
91447636 A |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | /* | |
24 | * Kernel stack management routines. | |
25 | */ | |
26 | ||
27 | #include <mach/mach_host.h> | |
28 | #include <mach/mach_types.h> | |
29 | #include <mach/processor_set.h> | |
30 | ||
31 | #include <kern/kern_types.h> | |
32 | #include <kern/mach_param.h> | |
33 | #include <kern/processor.h> | |
34 | #include <kern/thread.h> | |
35 | #include <kern/zalloc.h> | |
36 | #include <kern/kalloc.h> | |
37 | ||
38 | #include <vm/vm_map.h> | |
39 | #include <vm/vm_kern.h> | |
40 | ||
41 | #include <mach_debug.h> | |
42 | ||
43 | /* | |
44 | * We allocate stacks from generic kernel VM. | |
45 | * | |
46 | * The stack_free_list can only be accessed at splsched, | |
47 | * because stack_alloc_try/thread_invoke operate at splsched. | |
48 | */ | |
49 | ||
50 | decl_simple_lock_data(static,stack_lock_data) | |
51 | #define stack_lock() simple_lock(&stack_lock_data) | |
52 | #define stack_unlock() simple_unlock(&stack_lock_data) | |
53 | ||
54 | #define STACK_CACHE_SIZE 2 | |
55 | ||
56 | static vm_map_t stack_map; | |
57 | static vm_offset_t stack_free_list; | |
58 | ||
59 | static unsigned int stack_free_count, stack_free_hiwat; /* free list count */ | |
60 | static unsigned int stack_total, stack_hiwat; /* current total count */ | |
61 | ||
62 | static unsigned int stack_free_target; | |
63 | static int stack_free_delta; | |
64 | ||
65 | static unsigned int stack_new_count; /* total new stack allocations */ | |
66 | ||
67 | static vm_offset_t stack_addr_mask; | |
68 | ||
69 | /* | |
70 | * The next field is at the base of the stack, | |
71 | * so the low end is left unsullied. | |
72 | */ | |
73 | #define stack_next(stack) \ | |
74 | (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1)) | |
75 | ||
76 | void | |
77 | stack_init(void) | |
78 | { | |
79 | vm_offset_t stacks, boundary; | |
80 | vm_map_offset_t map_addr; | |
81 | ||
82 | simple_lock_init(&stack_lock_data, 0); | |
83 | ||
84 | if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) | |
85 | panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE); | |
86 | ||
87 | for (boundary = PAGE_SIZE; boundary <= KERNEL_STACK_SIZE; ) | |
88 | boundary <<= 1; | |
89 | ||
90 | stack_addr_mask = boundary - 1; | |
91 | ||
92 | if (kmem_suballoc(kernel_map, &stacks, (boundary * (2 * THREAD_MAX + 64)), | |
93 | FALSE, VM_FLAGS_ANYWHERE, &stack_map) != KERN_SUCCESS) | |
94 | panic("stack_init: kmem_suballoc"); | |
95 | ||
96 | map_addr = vm_map_min(stack_map); | |
97 | if (vm_map_enter(stack_map, &map_addr, vm_map_round_page(PAGE_SIZE), 0, VM_FLAGS_FIXED, | |
98 | VM_OBJECT_NULL, 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT) != KERN_SUCCESS) | |
99 | panic("stack_init: vm_map_enter"); | |
100 | } | |
101 | ||
102 | /* | |
103 | * stack_alloc: | |
104 | * | |
105 | * Allocate a stack for a thread, may | |
106 | * block. | |
107 | */ | |
108 | void | |
109 | stack_alloc( | |
110 | thread_t thread) | |
111 | { | |
112 | vm_offset_t stack; | |
113 | spl_t s; | |
114 | ||
115 | assert(thread->kernel_stack == 0); | |
116 | ||
117 | s = splsched(); | |
118 | stack_lock(); | |
119 | stack = stack_free_list; | |
120 | if (stack != 0) { | |
121 | stack_free_list = stack_next(stack); | |
122 | stack_free_count--; | |
123 | } | |
124 | else { | |
125 | if (++stack_total > stack_hiwat) | |
126 | stack_hiwat = stack_total; | |
127 | stack_new_count++; | |
128 | } | |
129 | stack_free_delta--; | |
130 | stack_unlock(); | |
131 | splx(s); | |
132 | ||
133 | if (stack == 0) { | |
134 | if (kernel_memory_allocate(stack_map, &stack, KERNEL_STACK_SIZE, stack_addr_mask, KMA_KOBJECT) != KERN_SUCCESS) | |
135 | panic("stack_alloc: kernel_memory_allocate"); | |
136 | } | |
137 | ||
138 | machine_stack_attach(thread, stack); | |
139 | } | |
140 | ||
141 | /* | |
142 | * stack_free: | |
143 | * | |
144 | * Detach and free the stack for a thread. | |
145 | */ | |
146 | void | |
147 | stack_free( | |
148 | thread_t thread) | |
149 | { | |
150 | vm_offset_t stack = machine_stack_detach(thread); | |
151 | ||
152 | assert(stack); | |
153 | if (stack != thread->reserved_stack) { | |
154 | struct stack_cache *cache; | |
155 | spl_t s; | |
156 | ||
157 | s = splsched(); | |
158 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
159 | if (cache->count < STACK_CACHE_SIZE) { | |
160 | stack_next(stack) = cache->free; | |
161 | cache->free = stack; | |
162 | cache->count++; | |
163 | } | |
164 | else { | |
165 | stack_lock(); | |
166 | stack_next(stack) = stack_free_list; | |
167 | stack_free_list = stack; | |
168 | if (++stack_free_count > stack_free_hiwat) | |
169 | stack_free_hiwat = stack_free_count; | |
170 | stack_free_delta++; | |
171 | stack_unlock(); | |
172 | } | |
173 | splx(s); | |
174 | } | |
175 | } | |
176 | ||
177 | void | |
178 | stack_free_stack( | |
179 | vm_offset_t stack) | |
180 | { | |
181 | struct stack_cache *cache; | |
182 | spl_t s; | |
183 | ||
184 | s = splsched(); | |
185 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
186 | if (cache->count < STACK_CACHE_SIZE) { | |
187 | stack_next(stack) = cache->free; | |
188 | cache->free = stack; | |
189 | cache->count++; | |
190 | } | |
191 | else { | |
192 | stack_lock(); | |
193 | stack_next(stack) = stack_free_list; | |
194 | stack_free_list = stack; | |
195 | if (++stack_free_count > stack_free_hiwat) | |
196 | stack_free_hiwat = stack_free_count; | |
197 | stack_free_delta++; | |
198 | stack_unlock(); | |
199 | } | |
200 | splx(s); | |
201 | } | |
202 | ||
203 | /* | |
204 | * stack_alloc_try: | |
205 | * | |
206 | * Non-blocking attempt to allocate a | |
207 | * stack for a thread. | |
208 | * | |
209 | * Returns TRUE on success. | |
210 | * | |
211 | * Called at splsched. | |
212 | */ | |
213 | boolean_t | |
214 | stack_alloc_try( | |
215 | thread_t thread) | |
216 | { | |
217 | struct stack_cache *cache; | |
218 | vm_offset_t stack; | |
219 | ||
220 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
221 | stack = cache->free; | |
222 | if (stack != 0) { | |
223 | cache->free = stack_next(stack); | |
224 | cache->count--; | |
225 | } | |
226 | else { | |
227 | if (stack_free_list != 0) { | |
228 | stack_lock(); | |
229 | stack = stack_free_list; | |
230 | if (stack != 0) { | |
231 | stack_free_list = stack_next(stack); | |
232 | stack_free_count--; | |
233 | stack_free_delta--; | |
234 | } | |
235 | stack_unlock(); | |
236 | } | |
237 | } | |
238 | ||
239 | if (stack != 0 || (stack = thread->reserved_stack) != 0) { | |
240 | machine_stack_attach(thread, stack); | |
241 | return (TRUE); | |
242 | } | |
243 | ||
244 | return (FALSE); | |
245 | } | |
246 | ||
247 | static unsigned int stack_collect_tick, last_stack_tick; | |
248 | ||
249 | /* | |
250 | * stack_collect: | |
251 | * | |
252 | * Free excess kernel stacks, may | |
253 | * block. | |
254 | */ | |
255 | void | |
256 | stack_collect(void) | |
257 | { | |
258 | if (stack_collect_tick != last_stack_tick) { | |
259 | unsigned int target; | |
260 | vm_offset_t stack; | |
261 | spl_t s; | |
262 | ||
263 | s = splsched(); | |
264 | stack_lock(); | |
265 | ||
266 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
267 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
268 | ||
269 | while (stack_free_count > target) { | |
270 | stack = stack_free_list; | |
271 | stack_free_list = stack_next(stack); | |
272 | stack_free_count--; stack_total--; | |
273 | stack_unlock(); | |
274 | splx(s); | |
275 | ||
276 | if (vm_map_remove(stack_map, vm_map_trunc_page(stack), | |
277 | vm_map_round_page(stack + KERNEL_STACK_SIZE), VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS) | |
278 | panic("stack_collect: vm_map_remove"); | |
279 | ||
280 | s = splsched(); | |
281 | stack_lock(); | |
282 | ||
283 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
284 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
285 | } | |
286 | ||
287 | last_stack_tick = stack_collect_tick; | |
288 | ||
289 | stack_unlock(); | |
290 | splx(s); | |
291 | } | |
292 | } | |
293 | ||
294 | /* | |
295 | * compute_stack_target: | |
296 | * | |
297 | * Computes a new target free list count | |
298 | * based on recent alloc / free activity. | |
299 | * | |
300 | * Limits stack collection to once per | |
301 | * computation period. | |
302 | */ | |
303 | void | |
304 | compute_stack_target( | |
305 | __unused void *arg) | |
306 | { | |
307 | spl_t s; | |
308 | ||
309 | s = splsched(); | |
310 | stack_lock(); | |
311 | ||
312 | if (stack_free_target > 5) | |
313 | stack_free_target = (4 * stack_free_target) / 5; | |
314 | else | |
315 | if (stack_free_target > 0) | |
316 | stack_free_target--; | |
317 | ||
318 | stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
319 | ||
320 | stack_free_delta = 0; | |
321 | stack_collect_tick++; | |
322 | ||
323 | stack_unlock(); | |
324 | splx(s); | |
325 | } | |
326 | ||
327 | void | |
328 | stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, | |
329 | vm_size_t *alloc_size, int *collectable, int *exhaustable) | |
330 | { | |
331 | unsigned int total, hiwat, free; | |
332 | spl_t s; | |
333 | ||
334 | s = splsched(); | |
335 | stack_lock(); | |
336 | total = stack_total; | |
337 | hiwat = stack_hiwat; | |
338 | free = stack_free_count; | |
339 | stack_unlock(); | |
340 | splx(s); | |
341 | ||
342 | *count = total - free; | |
343 | *cur_size = KERNEL_STACK_SIZE * total; | |
344 | *max_size = KERNEL_STACK_SIZE * hiwat; | |
345 | *elem_size = KERNEL_STACK_SIZE; | |
346 | *alloc_size = KERNEL_STACK_SIZE; | |
347 | *collectable = 1; | |
348 | *exhaustable = 0; | |
349 | } | |
350 | ||
351 | /* OBSOLETE */ | |
352 | void stack_privilege( | |
353 | thread_t thread); | |
354 | ||
355 | void | |
356 | stack_privilege( | |
357 | __unused thread_t thread) | |
358 | { | |
359 | /* OBSOLETE */ | |
360 | } | |
361 | ||
362 | /* | |
363 | * Return info on stack usage for threads in a specific processor set | |
364 | */ | |
365 | kern_return_t | |
366 | processor_set_stack_usage( | |
367 | processor_set_t pset, | |
368 | unsigned int *totalp, | |
369 | vm_size_t *spacep, | |
370 | vm_size_t *residentp, | |
371 | vm_size_t *maxusagep, | |
372 | vm_offset_t *maxstackp) | |
373 | { | |
374 | #if !MACH_DEBUG | |
375 | return KERN_NOT_SUPPORTED; | |
376 | #else | |
377 | unsigned int total; | |
378 | vm_size_t maxusage; | |
379 | vm_offset_t maxstack; | |
380 | ||
381 | register thread_t *threads; | |
382 | register thread_t thread; | |
383 | ||
384 | unsigned int actual; /* this many things */ | |
385 | unsigned int i; | |
386 | ||
387 | vm_size_t size, size_needed; | |
388 | void *addr; | |
389 | ||
390 | if (pset == PROCESSOR_SET_NULL) | |
391 | return KERN_INVALID_ARGUMENT; | |
392 | ||
393 | size = 0; addr = 0; | |
394 | ||
395 | for (;;) { | |
396 | pset_lock(pset); | |
397 | if (!pset->active) { | |
398 | pset_unlock(pset); | |
399 | return KERN_INVALID_ARGUMENT; | |
400 | } | |
401 | ||
402 | actual = pset->thread_count; | |
403 | ||
404 | /* do we have the memory we need? */ | |
405 | ||
406 | size_needed = actual * sizeof(thread_t); | |
407 | if (size_needed <= size) | |
408 | break; | |
409 | ||
410 | /* unlock the pset and allocate more memory */ | |
411 | pset_unlock(pset); | |
412 | ||
413 | if (size != 0) | |
414 | kfree(addr, size); | |
415 | ||
416 | assert(size_needed > 0); | |
417 | size = size_needed; | |
418 | ||
419 | addr = kalloc(size); | |
420 | if (addr == 0) | |
421 | return KERN_RESOURCE_SHORTAGE; | |
422 | } | |
423 | ||
424 | /* OK, have memory and the processor_set is locked & active */ | |
425 | threads = (thread_t *) addr; | |
426 | for (i = 0, thread = (thread_t) queue_first(&pset->threads); | |
427 | !queue_end(&pset->threads, (queue_entry_t) thread); | |
428 | thread = (thread_t) queue_next(&thread->pset_threads)) { | |
429 | thread_reference_internal(thread); | |
430 | threads[i++] = thread; | |
431 | } | |
432 | assert(i <= actual); | |
433 | ||
434 | /* can unlock processor set now that we have the thread refs */ | |
435 | pset_unlock(pset); | |
436 | ||
437 | /* calculate maxusage and free thread references */ | |
438 | ||
439 | total = 0; | |
440 | maxusage = 0; | |
441 | maxstack = 0; | |
442 | while (i > 0) { | |
443 | thread_t threadref = threads[--i]; | |
444 | ||
445 | if (threadref->kernel_stack != 0) | |
446 | total++; | |
447 | ||
448 | thread_deallocate(threadref); | |
449 | } | |
450 | ||
451 | if (size != 0) | |
452 | kfree(addr, size); | |
453 | ||
454 | *totalp = total; | |
455 | *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE); | |
456 | *maxusagep = maxusage; | |
457 | *maxstackp = maxstack; | |
458 | return KERN_SUCCESS; | |
459 | ||
460 | #endif /* MACH_DEBUG */ | |
461 | } | |
462 | ||
463 | vm_offset_t min_valid_stack_address(void) | |
464 | { | |
465 | return vm_map_min(stack_map); | |
466 | } | |
467 | ||
468 | vm_offset_t max_valid_stack_address(void) | |
469 | { | |
470 | return vm_map_max(stack_map); | |
471 | } |