]>
Commit | Line | Data |
---|---|---|
91447636 A |
1 | /* |
2 | * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
91447636 | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
91447636 A |
27 | */ |
28 | /* | |
29 | * Kernel stack management routines. | |
30 | */ | |
31 | ||
32 | #include <mach/mach_host.h> | |
33 | #include <mach/mach_types.h> | |
34 | #include <mach/processor_set.h> | |
35 | ||
36 | #include <kern/kern_types.h> | |
37 | #include <kern/mach_param.h> | |
38 | #include <kern/processor.h> | |
39 | #include <kern/thread.h> | |
40 | #include <kern/zalloc.h> | |
41 | #include <kern/kalloc.h> | |
42 | ||
43 | #include <vm/vm_map.h> | |
44 | #include <vm/vm_kern.h> | |
45 | ||
46 | #include <mach_debug.h> | |
47 | ||
48 | /* | |
49 | * We allocate stacks from generic kernel VM. | |
50 | * | |
51 | * The stack_free_list can only be accessed at splsched, | |
52 | * because stack_alloc_try/thread_invoke operate at splsched. | |
53 | */ | |
54 | ||
55 | decl_simple_lock_data(static,stack_lock_data) | |
56 | #define stack_lock() simple_lock(&stack_lock_data) | |
57 | #define stack_unlock() simple_unlock(&stack_lock_data) | |
58 | ||
59 | #define STACK_CACHE_SIZE 2 | |
60 | ||
61 | static vm_map_t stack_map; | |
62 | static vm_offset_t stack_free_list; | |
63 | ||
64 | static unsigned int stack_free_count, stack_free_hiwat; /* free list count */ | |
65 | static unsigned int stack_total, stack_hiwat; /* current total count */ | |
66 | ||
67 | static unsigned int stack_free_target; | |
68 | static int stack_free_delta; | |
69 | ||
70 | static unsigned int stack_new_count; /* total new stack allocations */ | |
71 | ||
72 | static vm_offset_t stack_addr_mask; | |
73 | ||
74 | /* | |
75 | * The next field is at the base of the stack, | |
76 | * so the low end is left unsullied. | |
77 | */ | |
78 | #define stack_next(stack) \ | |
79 | (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1)) | |
80 | ||
81 | void | |
82 | stack_init(void) | |
83 | { | |
84 | vm_offset_t stacks, boundary; | |
85 | vm_map_offset_t map_addr; | |
86 | ||
87 | simple_lock_init(&stack_lock_data, 0); | |
88 | ||
89 | if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) | |
90 | panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE); | |
91 | ||
92 | for (boundary = PAGE_SIZE; boundary <= KERNEL_STACK_SIZE; ) | |
93 | boundary <<= 1; | |
94 | ||
95 | stack_addr_mask = boundary - 1; | |
96 | ||
97 | if (kmem_suballoc(kernel_map, &stacks, (boundary * (2 * THREAD_MAX + 64)), | |
98 | FALSE, VM_FLAGS_ANYWHERE, &stack_map) != KERN_SUCCESS) | |
99 | panic("stack_init: kmem_suballoc"); | |
100 | ||
101 | map_addr = vm_map_min(stack_map); | |
89b3af67 | 102 | if (vm_map_enter(stack_map, &map_addr, vm_map_round_page(PAGE_SIZE), 0, (VM_MAKE_TAG(VM_MEMORY_STACK) | VM_FLAGS_FIXED), |
91447636 A |
103 | VM_OBJECT_NULL, 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT) != KERN_SUCCESS) |
104 | panic("stack_init: vm_map_enter"); | |
105 | } | |
106 | ||
107 | /* | |
108 | * stack_alloc: | |
109 | * | |
110 | * Allocate a stack for a thread, may | |
111 | * block. | |
112 | */ | |
113 | void | |
114 | stack_alloc( | |
115 | thread_t thread) | |
116 | { | |
117 | vm_offset_t stack; | |
118 | spl_t s; | |
119 | ||
120 | assert(thread->kernel_stack == 0); | |
121 | ||
122 | s = splsched(); | |
123 | stack_lock(); | |
124 | stack = stack_free_list; | |
125 | if (stack != 0) { | |
126 | stack_free_list = stack_next(stack); | |
127 | stack_free_count--; | |
128 | } | |
129 | else { | |
130 | if (++stack_total > stack_hiwat) | |
131 | stack_hiwat = stack_total; | |
132 | stack_new_count++; | |
133 | } | |
134 | stack_free_delta--; | |
135 | stack_unlock(); | |
136 | splx(s); | |
137 | ||
138 | if (stack == 0) { | |
139 | if (kernel_memory_allocate(stack_map, &stack, KERNEL_STACK_SIZE, stack_addr_mask, KMA_KOBJECT) != KERN_SUCCESS) | |
140 | panic("stack_alloc: kernel_memory_allocate"); | |
141 | } | |
142 | ||
143 | machine_stack_attach(thread, stack); | |
144 | } | |
145 | ||
146 | /* | |
147 | * stack_free: | |
148 | * | |
149 | * Detach and free the stack for a thread. | |
150 | */ | |
151 | void | |
152 | stack_free( | |
153 | thread_t thread) | |
154 | { | |
155 | vm_offset_t stack = machine_stack_detach(thread); | |
156 | ||
157 | assert(stack); | |
158 | if (stack != thread->reserved_stack) { | |
159 | struct stack_cache *cache; | |
160 | spl_t s; | |
161 | ||
162 | s = splsched(); | |
163 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
164 | if (cache->count < STACK_CACHE_SIZE) { | |
165 | stack_next(stack) = cache->free; | |
166 | cache->free = stack; | |
167 | cache->count++; | |
168 | } | |
169 | else { | |
170 | stack_lock(); | |
171 | stack_next(stack) = stack_free_list; | |
172 | stack_free_list = stack; | |
173 | if (++stack_free_count > stack_free_hiwat) | |
174 | stack_free_hiwat = stack_free_count; | |
175 | stack_free_delta++; | |
176 | stack_unlock(); | |
177 | } | |
178 | splx(s); | |
179 | } | |
180 | } | |
181 | ||
182 | void | |
183 | stack_free_stack( | |
184 | vm_offset_t stack) | |
185 | { | |
186 | struct stack_cache *cache; | |
187 | spl_t s; | |
188 | ||
189 | s = splsched(); | |
190 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
191 | if (cache->count < STACK_CACHE_SIZE) { | |
192 | stack_next(stack) = cache->free; | |
193 | cache->free = stack; | |
194 | cache->count++; | |
195 | } | |
196 | else { | |
197 | stack_lock(); | |
198 | stack_next(stack) = stack_free_list; | |
199 | stack_free_list = stack; | |
200 | if (++stack_free_count > stack_free_hiwat) | |
201 | stack_free_hiwat = stack_free_count; | |
202 | stack_free_delta++; | |
203 | stack_unlock(); | |
204 | } | |
205 | splx(s); | |
206 | } | |
207 | ||
208 | /* | |
209 | * stack_alloc_try: | |
210 | * | |
211 | * Non-blocking attempt to allocate a | |
212 | * stack for a thread. | |
213 | * | |
214 | * Returns TRUE on success. | |
215 | * | |
216 | * Called at splsched. | |
217 | */ | |
218 | boolean_t | |
219 | stack_alloc_try( | |
220 | thread_t thread) | |
221 | { | |
222 | struct stack_cache *cache; | |
223 | vm_offset_t stack; | |
224 | ||
225 | cache = &PROCESSOR_DATA(current_processor(), stack_cache); | |
226 | stack = cache->free; | |
227 | if (stack != 0) { | |
228 | cache->free = stack_next(stack); | |
229 | cache->count--; | |
230 | } | |
231 | else { | |
232 | if (stack_free_list != 0) { | |
233 | stack_lock(); | |
234 | stack = stack_free_list; | |
235 | if (stack != 0) { | |
236 | stack_free_list = stack_next(stack); | |
237 | stack_free_count--; | |
238 | stack_free_delta--; | |
239 | } | |
240 | stack_unlock(); | |
241 | } | |
242 | } | |
243 | ||
244 | if (stack != 0 || (stack = thread->reserved_stack) != 0) { | |
245 | machine_stack_attach(thread, stack); | |
246 | return (TRUE); | |
247 | } | |
248 | ||
249 | return (FALSE); | |
250 | } | |
251 | ||
252 | static unsigned int stack_collect_tick, last_stack_tick; | |
253 | ||
254 | /* | |
255 | * stack_collect: | |
256 | * | |
257 | * Free excess kernel stacks, may | |
258 | * block. | |
259 | */ | |
260 | void | |
261 | stack_collect(void) | |
262 | { | |
263 | if (stack_collect_tick != last_stack_tick) { | |
264 | unsigned int target; | |
265 | vm_offset_t stack; | |
266 | spl_t s; | |
267 | ||
268 | s = splsched(); | |
269 | stack_lock(); | |
270 | ||
271 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
272 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
273 | ||
274 | while (stack_free_count > target) { | |
275 | stack = stack_free_list; | |
276 | stack_free_list = stack_next(stack); | |
277 | stack_free_count--; stack_total--; | |
278 | stack_unlock(); | |
279 | splx(s); | |
280 | ||
281 | if (vm_map_remove(stack_map, vm_map_trunc_page(stack), | |
282 | vm_map_round_page(stack + KERNEL_STACK_SIZE), VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS) | |
283 | panic("stack_collect: vm_map_remove"); | |
284 | ||
285 | s = splsched(); | |
286 | stack_lock(); | |
287 | ||
288 | target = stack_free_target + (STACK_CACHE_SIZE * processor_count); | |
289 | target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
290 | } | |
291 | ||
292 | last_stack_tick = stack_collect_tick; | |
293 | ||
294 | stack_unlock(); | |
295 | splx(s); | |
296 | } | |
297 | } | |
298 | ||
299 | /* | |
300 | * compute_stack_target: | |
301 | * | |
302 | * Computes a new target free list count | |
303 | * based on recent alloc / free activity. | |
304 | * | |
305 | * Limits stack collection to once per | |
306 | * computation period. | |
307 | */ | |
308 | void | |
309 | compute_stack_target( | |
310 | __unused void *arg) | |
311 | { | |
312 | spl_t s; | |
313 | ||
314 | s = splsched(); | |
315 | stack_lock(); | |
316 | ||
317 | if (stack_free_target > 5) | |
318 | stack_free_target = (4 * stack_free_target) / 5; | |
319 | else | |
320 | if (stack_free_target > 0) | |
321 | stack_free_target--; | |
322 | ||
323 | stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; | |
324 | ||
325 | stack_free_delta = 0; | |
326 | stack_collect_tick++; | |
327 | ||
328 | stack_unlock(); | |
329 | splx(s); | |
330 | } | |
331 | ||
332 | void | |
333 | stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, | |
334 | vm_size_t *alloc_size, int *collectable, int *exhaustable) | |
335 | { | |
336 | unsigned int total, hiwat, free; | |
337 | spl_t s; | |
338 | ||
339 | s = splsched(); | |
340 | stack_lock(); | |
341 | total = stack_total; | |
342 | hiwat = stack_hiwat; | |
343 | free = stack_free_count; | |
344 | stack_unlock(); | |
345 | splx(s); | |
346 | ||
347 | *count = total - free; | |
348 | *cur_size = KERNEL_STACK_SIZE * total; | |
349 | *max_size = KERNEL_STACK_SIZE * hiwat; | |
350 | *elem_size = KERNEL_STACK_SIZE; | |
351 | *alloc_size = KERNEL_STACK_SIZE; | |
352 | *collectable = 1; | |
353 | *exhaustable = 0; | |
354 | } | |
355 | ||
356 | /* OBSOLETE */ | |
357 | void stack_privilege( | |
358 | thread_t thread); | |
359 | ||
360 | void | |
361 | stack_privilege( | |
362 | __unused thread_t thread) | |
363 | { | |
364 | /* OBSOLETE */ | |
365 | } | |
366 | ||
367 | /* | |
368 | * Return info on stack usage for threads in a specific processor set | |
369 | */ | |
370 | kern_return_t | |
371 | processor_set_stack_usage( | |
372 | processor_set_t pset, | |
373 | unsigned int *totalp, | |
374 | vm_size_t *spacep, | |
375 | vm_size_t *residentp, | |
376 | vm_size_t *maxusagep, | |
377 | vm_offset_t *maxstackp) | |
378 | { | |
379 | #if !MACH_DEBUG | |
380 | return KERN_NOT_SUPPORTED; | |
381 | #else | |
382 | unsigned int total; | |
383 | vm_size_t maxusage; | |
384 | vm_offset_t maxstack; | |
385 | ||
386 | register thread_t *threads; | |
387 | register thread_t thread; | |
388 | ||
389 | unsigned int actual; /* this many things */ | |
390 | unsigned int i; | |
391 | ||
392 | vm_size_t size, size_needed; | |
393 | void *addr; | |
394 | ||
395 | if (pset == PROCESSOR_SET_NULL) | |
396 | return KERN_INVALID_ARGUMENT; | |
397 | ||
398 | size = 0; addr = 0; | |
399 | ||
400 | for (;;) { | |
401 | pset_lock(pset); | |
402 | if (!pset->active) { | |
403 | pset_unlock(pset); | |
404 | return KERN_INVALID_ARGUMENT; | |
405 | } | |
406 | ||
407 | actual = pset->thread_count; | |
408 | ||
409 | /* do we have the memory we need? */ | |
410 | ||
411 | size_needed = actual * sizeof(thread_t); | |
412 | if (size_needed <= size) | |
413 | break; | |
414 | ||
415 | /* unlock the pset and allocate more memory */ | |
416 | pset_unlock(pset); | |
417 | ||
418 | if (size != 0) | |
419 | kfree(addr, size); | |
420 | ||
421 | assert(size_needed > 0); | |
422 | size = size_needed; | |
423 | ||
424 | addr = kalloc(size); | |
425 | if (addr == 0) | |
426 | return KERN_RESOURCE_SHORTAGE; | |
427 | } | |
428 | ||
429 | /* OK, have memory and the processor_set is locked & active */ | |
430 | threads = (thread_t *) addr; | |
431 | for (i = 0, thread = (thread_t) queue_first(&pset->threads); | |
432 | !queue_end(&pset->threads, (queue_entry_t) thread); | |
433 | thread = (thread_t) queue_next(&thread->pset_threads)) { | |
434 | thread_reference_internal(thread); | |
435 | threads[i++] = thread; | |
436 | } | |
437 | assert(i <= actual); | |
438 | ||
439 | /* can unlock processor set now that we have the thread refs */ | |
440 | pset_unlock(pset); | |
441 | ||
442 | /* calculate maxusage and free thread references */ | |
443 | ||
444 | total = 0; | |
445 | maxusage = 0; | |
446 | maxstack = 0; | |
447 | while (i > 0) { | |
448 | thread_t threadref = threads[--i]; | |
449 | ||
450 | if (threadref->kernel_stack != 0) | |
451 | total++; | |
452 | ||
453 | thread_deallocate(threadref); | |
454 | } | |
455 | ||
456 | if (size != 0) | |
457 | kfree(addr, size); | |
458 | ||
459 | *totalp = total; | |
460 | *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE); | |
461 | *maxusagep = maxusage; | |
462 | *maxstackp = maxstack; | |
463 | return KERN_SUCCESS; | |
464 | ||
465 | #endif /* MACH_DEBUG */ | |
466 | } | |
467 | ||
468 | vm_offset_t min_valid_stack_address(void) | |
469 | { | |
470 | return vm_map_min(stack_map); | |
471 | } | |
472 | ||
473 | vm_offset_t max_valid_stack_address(void) | |
474 | { | |
475 | return vm_map_max(stack_map); | |
476 | } |