]>
Commit | Line | Data |
---|---|---|
91447636 A |
1 | /* |
2 | * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Kernel stack management routines. | |
24 | */ | |
25 | ||
26 | #include <mach/mach_host.h> | |
27 | #include <mach/mach_types.h> | |
28 | #include <mach/processor_set.h> | |
29 | ||
30 | #include <kern/kern_types.h> | |
31 | #include <kern/mach_param.h> | |
32 | #include <kern/processor.h> | |
33 | #include <kern/thread.h> | |
34 | #include <kern/zalloc.h> | |
35 | #include <kern/kalloc.h> | |
36 | ||
37 | #include <vm/vm_map.h> | |
38 | #include <vm/vm_kern.h> | |
39 | ||
40 | #include <mach_debug.h> | |
41 | ||
42 | /* | |
43 | * We allocate stacks from generic kernel VM. | |
44 | * | |
45 | * The stack_free_list can only be accessed at splsched, | |
46 | * because stack_alloc_try/thread_invoke operate at splsched. | |
47 | */ | |
48 | ||
49 | decl_simple_lock_data(static,stack_lock_data) | |
50 | #define stack_lock() simple_lock(&stack_lock_data) | |
51 | #define stack_unlock() simple_unlock(&stack_lock_data) | |
52 | ||
53 | #define STACK_CACHE_SIZE 2 | |
54 | ||
55 | static vm_map_t stack_map; | |
56 | static vm_offset_t stack_free_list; | |
57 | ||
58 | static unsigned int stack_free_count, stack_free_hiwat; /* free list count */ | |
59 | static unsigned int stack_total, stack_hiwat; /* current total count */ | |
60 | ||
61 | static unsigned int stack_free_target; | |
62 | static int stack_free_delta; | |
63 | ||
64 | static unsigned int stack_new_count; /* total new stack allocations */ | |
65 | ||
66 | static vm_offset_t stack_addr_mask; | |
67 | ||
68 | /* | |
69 | * The next field is at the base of the stack, | |
70 | * so the low end is left unsullied. | |
71 | */ | |
72 | #define stack_next(stack) \ | |
73 | (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1)) | |
74 | ||
75 | void | |
76 | stack_init(void) | |
77 | { | |
78 | vm_offset_t stacks, boundary; | |
79 | vm_map_offset_t map_addr; | |
80 | ||
81 | simple_lock_init(&stack_lock_data, 0); | |
82 | ||
83 | if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) | |
84 | panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE); | |
85 | ||
86 | for (boundary = PAGE_SIZE; boundary <= KERNEL_STACK_SIZE; ) | |
87 | boundary <<= 1; | |
88 | ||
89 | stack_addr_mask = boundary - 1; | |
90 | ||
91 | if (kmem_suballoc(kernel_map, &stacks, (boundary * (2 * THREAD_MAX + 64)), | |
92 | FALSE, VM_FLAGS_ANYWHERE, &stack_map) != KERN_SUCCESS) | |
93 | panic("stack_init: kmem_suballoc"); | |
94 | ||
95 | map_addr = vm_map_min(stack_map); | |
96 | if (vm_map_enter(stack_map, &map_addr, vm_map_round_page(PAGE_SIZE), 0, VM_FLAGS_FIXED, | |
97 | VM_OBJECT_NULL, 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT) != KERN_SUCCESS) | |
98 | panic("stack_init: vm_map_enter"); | |
99 | } | |
100 | ||
101 | /* | |
102 | * stack_alloc: | |
103 | * | |
104 | * Allocate a stack for a thread, may | |
105 | * block. | |
106 | */ | |
107 | void | |
108 | stack_alloc( | |
109 | thread_t thread) | |
110 | { | |
111 | vm_offset_t stack; | |
112 | spl_t s; | |
113 | ||
114 | assert(thread->kernel_stack == 0); | |
115 | ||
116 | s = splsched(); | |
117 | stack_lock(); | |
118 | stack = stack_free_list; | |
119 | if (stack != 0) { | |
120 | stack_free_list = stack_next(stack); | |
121 | stack_free_count--; | |
122 | } | |
123 | else { | |
124 | if (++stack_total > stack_hiwat) | |
125 | stack_hiwat = stack_total; | |
126 | stack_new_count++; | |
127 | } | |
128 | stack_free_delta--; | |
129 | stack_unlock(); | |
130 | splx(s); | |
131 | ||
132 | if (stack == 0) { | |
133 | if (kernel_memory_allocate(stack_map, &stack, KERNEL_STACK_SIZE, stack_addr_mask, KMA_KOBJECT) != KERN_SUCCESS) | |
134 | panic("stack_alloc: kernel_memory_allocate"); | |
135 | } | |
136 | ||
137 | machine_stack_attach(thread, stack); | |
138 | } | |
139 | ||
140 | /* | |
141 | * stack_free: | |
142 | * | |
143 | * Detach and free the stack for a thread. | |
144 | */ | |
145 | void | |
146 | stack_free( | |
147 | thread_t thread) | |
148 | { | |
149 | vm_offset_t stack = machine_stack_detach(thread); | |
150 | ||
151 | assert(stack); | |
152 | if (stack != thread->reserved_stack) { | |
153 | struct stack_cache *cache; | |
154 | spl_t s; | |
155 | ||
156 | s = splsched(); | |
157 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
158 | if (cache->count < STACK_CACHE_SIZE) { | |
159 | stack_next(stack) = cache->free; | |
160 | cache->free = stack; | |
161 | cache->count++; | |
162 | } | |
163 | else { | |
164 | stack_lock(); | |
165 | stack_next(stack) = stack_free_list; | |
166 | stack_free_list = stack; | |
167 | if (++stack_free_count > stack_free_hiwat) | |
168 | stack_free_hiwat = stack_free_count; | |
169 | stack_free_delta++; | |
170 | stack_unlock(); | |
171 | } | |
172 | splx(s); | |
173 | } | |
174 | } | |
175 | ||
176 | void | |
177 | stack_free_stack( | |
178 | vm_offset_t stack) | |
179 | { | |
180 | struct stack_cache *cache; | |
181 | spl_t s; | |
182 | ||
183 | s = splsched(); | |
184 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
185 | if (cache->count < STACK_CACHE_SIZE) { | |
186 | stack_next(stack) = cache->free; | |
187 | cache->free = stack; | |
188 | cache->count++; | |
189 | } | |
190 | else { | |
191 | stack_lock(); | |
192 | stack_next(stack) = stack_free_list; | |
193 | stack_free_list = stack; | |
194 | if (++stack_free_count > stack_free_hiwat) | |
195 | stack_free_hiwat = stack_free_count; | |
196 | stack_free_delta++; | |
197 | stack_unlock(); | |
198 | } | |
199 | splx(s); | |
200 | } | |
201 | ||
202 | /* | |
203 | * stack_alloc_try: | |
204 | * | |
205 | * Non-blocking attempt to allocate a | |
206 | * stack for a thread. | |
207 | * | |
208 | * Returns TRUE on success. | |
209 | * | |
210 | * Called at splsched. | |
211 | */ | |
212 | boolean_t | |
213 | stack_alloc_try( | |
214 | thread_t thread) | |
215 | { | |
216 | struct stack_cache *cache; | |
217 | vm_offset_t stack; | |
218 | ||
219 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
220 | stack = cache->free; | |
221 | if (stack != 0) { | |
222 | cache->free = stack_next(stack); | |
223 | cache->count--; | |
224 | } | |
225 | else { | |
226 | if (stack_free_list != 0) { | |
227 | stack_lock(); | |
228 | stack = stack_free_list; | |
229 | if (stack != 0) { | |
230 | stack_free_list = stack_next(stack); | |
231 | stack_free_count--; | |
232 | stack_free_delta--; | |
233 | } | |
234 | stack_unlock(); | |
235 | } | |
236 | } | |
237 | ||
238 | if (stack != 0 || (stack = thread->reserved_stack) != 0) { | |
239 | machine_stack_attach(thread, stack); | |
240 | return (TRUE); | |
241 | } | |
242 | ||
243 | return (FALSE); | |
244 | } | |
245 | ||
246 | static unsigned int stack_collect_tick, last_stack_tick; | |
247 | ||
248 | /* | |
249 | * stack_collect: | |
250 | * | |
251 | * Free excess kernel stacks, may | |
252 | * block. | |
253 | */ | |
254 | void | |
255 | stack_collect(void) | |
256 | { | |
257 | if (stack_collect_tick != last_stack_tick) { | |
258 | unsigned int target; | |
259 | vm_offset_t stack; | |
260 | spl_t s; | |
261 | ||
262 | s = splsched(); | |
263 | stack_lock(); | |
264 | ||
265 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
266 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
267 | ||
268 | while (stack_free_count > target) { | |
269 | stack = stack_free_list; | |
270 | stack_free_list = stack_next(stack); | |
271 | stack_free_count--; stack_total--; | |
272 | stack_unlock(); | |
273 | splx(s); | |
274 | ||
275 | if (vm_map_remove(stack_map, vm_map_trunc_page(stack), | |
276 | vm_map_round_page(stack + KERNEL_STACK_SIZE), VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS) | |
277 | panic("stack_collect: vm_map_remove"); | |
278 | ||
279 | s = splsched(); | |
280 | stack_lock(); | |
281 | ||
282 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
283 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
284 | } | |
285 | ||
286 | last_stack_tick = stack_collect_tick; | |
287 | ||
288 | stack_unlock(); | |
289 | splx(s); | |
290 | } | |
291 | } | |
292 | ||
293 | /* | |
294 | * compute_stack_target: | |
295 | * | |
296 | * Computes a new target free list count | |
297 | * based on recent alloc / free activity. | |
298 | * | |
299 | * Limits stack collection to once per | |
300 | * computation period. | |
301 | */ | |
302 | void | |
303 | compute_stack_target( | |
304 | __unused void *arg) | |
305 | { | |
306 | spl_t s; | |
307 | ||
308 | s = splsched(); | |
309 | stack_lock(); | |
310 | ||
311 | if (stack_free_target > 5) | |
312 | stack_free_target = (4 * stack_free_target) / 5; | |
313 | else | |
314 | if (stack_free_target > 0) | |
315 | stack_free_target--; | |
316 | ||
317 | stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
318 | ||
319 | stack_free_delta = 0; | |
320 | stack_collect_tick++; | |
321 | ||
322 | stack_unlock(); | |
323 | splx(s); | |
324 | } | |
325 | ||
326 | void | |
327 | stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, | |
328 | vm_size_t *alloc_size, int *collectable, int *exhaustable) | |
329 | { | |
330 | unsigned int total, hiwat, free; | |
331 | spl_t s; | |
332 | ||
333 | s = splsched(); | |
334 | stack_lock(); | |
335 | total = stack_total; | |
336 | hiwat = stack_hiwat; | |
337 | free = stack_free_count; | |
338 | stack_unlock(); | |
339 | splx(s); | |
340 | ||
341 | *count = total - free; | |
342 | *cur_size = KERNEL_STACK_SIZE * total; | |
343 | *max_size = KERNEL_STACK_SIZE * hiwat; | |
344 | *elem_size = KERNEL_STACK_SIZE; | |
345 | *alloc_size = KERNEL_STACK_SIZE; | |
346 | *collectable = 1; | |
347 | *exhaustable = 0; | |
348 | } | |
349 | ||
350 | /* OBSOLETE */ | |
351 | void stack_privilege( | |
352 | thread_t thread); | |
353 | ||
354 | void | |
355 | stack_privilege( | |
356 | __unused thread_t thread) | |
357 | { | |
358 | /* OBSOLETE */ | |
359 | } | |
360 | ||
361 | /* | |
362 | * Return info on stack usage for threads in a specific processor set | |
363 | */ | |
364 | kern_return_t | |
365 | processor_set_stack_usage( | |
366 | processor_set_t pset, | |
367 | unsigned int *totalp, | |
368 | vm_size_t *spacep, | |
369 | vm_size_t *residentp, | |
370 | vm_size_t *maxusagep, | |
371 | vm_offset_t *maxstackp) | |
372 | { | |
373 | #if !MACH_DEBUG | |
374 | return KERN_NOT_SUPPORTED; | |
375 | #else | |
376 | unsigned int total; | |
377 | vm_size_t maxusage; | |
378 | vm_offset_t maxstack; | |
379 | ||
380 | register thread_t *threads; | |
381 | register thread_t thread; | |
382 | ||
383 | unsigned int actual; /* this many things */ | |
384 | unsigned int i; | |
385 | ||
386 | vm_size_t size, size_needed; | |
387 | void *addr; | |
388 | ||
389 | if (pset == PROCESSOR_SET_NULL) | |
390 | return KERN_INVALID_ARGUMENT; | |
391 | ||
392 | size = 0; addr = 0; | |
393 | ||
394 | for (;;) { | |
395 | pset_lock(pset); | |
396 | if (!pset->active) { | |
397 | pset_unlock(pset); | |
398 | return KERN_INVALID_ARGUMENT; | |
399 | } | |
400 | ||
401 | actual = pset->thread_count; | |
402 | ||
403 | /* do we have the memory we need? */ | |
404 | ||
405 | size_needed = actual * sizeof(thread_t); | |
406 | if (size_needed <= size) | |
407 | break; | |
408 | ||
409 | /* unlock the pset and allocate more memory */ | |
410 | pset_unlock(pset); | |
411 | ||
412 | if (size != 0) | |
413 | kfree(addr, size); | |
414 | ||
415 | assert(size_needed > 0); | |
416 | size = size_needed; | |
417 | ||
418 | addr = kalloc(size); | |
419 | if (addr == 0) | |
420 | return KERN_RESOURCE_SHORTAGE; | |
421 | } | |
422 | ||
423 | /* OK, have memory and the processor_set is locked & active */ | |
424 | threads = (thread_t *) addr; | |
425 | for (i = 0, thread = (thread_t) queue_first(&pset->threads); | |
426 | !queue_end(&pset->threads, (queue_entry_t) thread); | |
427 | thread = (thread_t) queue_next(&thread->pset_threads)) { | |
428 | thread_reference_internal(thread); | |
429 | threads[i++] = thread; | |
430 | } | |
431 | assert(i <= actual); | |
432 | ||
433 | /* can unlock processor set now that we have the thread refs */ | |
434 | pset_unlock(pset); | |
435 | ||
436 | /* calculate maxusage and free thread references */ | |
437 | ||
438 | total = 0; | |
439 | maxusage = 0; | |
440 | maxstack = 0; | |
441 | while (i > 0) { | |
442 | thread_t threadref = threads[--i]; | |
443 | ||
444 | if (threadref->kernel_stack != 0) | |
445 | total++; | |
446 | ||
447 | thread_deallocate(threadref); | |
448 | } | |
449 | ||
450 | if (size != 0) | |
451 | kfree(addr, size); | |
452 | ||
453 | *totalp = total; | |
454 | *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE); | |
455 | *maxusagep = maxusage; | |
456 | *maxstackp = maxstack; | |
457 | return KERN_SUCCESS; | |
458 | ||
459 | #endif /* MACH_DEBUG */ | |
460 | } | |
461 | ||
462 | vm_offset_t min_valid_stack_address(void) | |
463 | { | |
464 | return vm_map_min(stack_map); | |
465 | } | |
466 | ||
467 | vm_offset_t max_valid_stack_address(void) | |
468 | { | |
469 | return vm_map_max(stack_map); | |
470 | } |