]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
9c208da92c5ae041fd1ced0ffce4293763b540ec
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: kern/thread.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
58 * Date: 1986
59 *
60 * Thread/thread_shuttle management primitives implementation.
61 */
62 /*
63 * Copyright (c) 1993 The University of Utah and
64 * the Computer Systems Laboratory (CSL). All rights reserved.
65 *
66 * Permission to use, copy, modify and distribute this software and its
67 * documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation.
71 *
72 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
73 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
74 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *
76 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
77 * improvements that they make and grant CSL redistribution rights.
78 *
79 */
80
81 #include <cpus.h>
82 #include <mach_host.h>
83 #include <simple_clock.h>
84 #include <mach_debug.h>
85 #include <mach_prof.h>
86
87 #include <mach/boolean.h>
88 #include <mach/policy.h>
89 #include <mach/thread_info.h>
90 #include <mach/thread_special_ports.h>
91 #include <mach/thread_status.h>
92 #include <mach/time_value.h>
93 #include <mach/vm_param.h>
94 #include <kern/ast.h>
95 #include <kern/cpu_data.h>
96 #include <kern/counters.h>
97 #include <kern/etap_macros.h>
98 #include <kern/ipc_mig.h>
99 #include <kern/ipc_tt.h>
100 #include <kern/mach_param.h>
101 #include <kern/machine.h>
102 #include <kern/misc_protos.h>
103 #include <kern/processor.h>
104 #include <kern/queue.h>
105 #include <kern/sched.h>
106 #include <kern/sched_prim.h>
107 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
108 #include <kern/task.h>
109 #include <kern/thread.h>
110 #include <kern/thread_act.h>
111 #include <kern/thread_swap.h>
112 #include <kern/host.h>
113 #include <kern/zalloc.h>
114 #include <vm/vm_kern.h>
115 #include <ipc/ipc_kmsg.h>
116 #include <ipc/ipc_port.h>
117 #include <machine/thread.h> /* for MACHINE_STACK */
118 #include <kern/profile.h>
119 #include <kern/assert.h>
120 #include <sys/kdebug.h>
121
122 /*
123 * Exported interfaces
124 */
125
126 #include <mach/thread_act_server.h>
127 #include <mach/mach_host_server.h>
128
129 /*
130 * Per-Cpu stashed global state
131 */
132 vm_offset_t active_stacks[NCPUS]; /* per-cpu active stacks */
133 vm_offset_t kernel_stack[NCPUS]; /* top of active stacks */
134 thread_act_t active_kloaded[NCPUS]; /* + act if kernel loaded */
135 boolean_t first_thread;
136
137 struct zone *thread_shuttle_zone;
138
139 queue_head_t reaper_queue;
140 decl_simple_lock_data(,reaper_lock)
141
142 extern int tick;
143
144 extern void pcb_module_init(void);
145
146 struct thread_shuttle pageout_thread;
147
148 /* private */
149 static struct thread_shuttle thr_sh_template;
150
151 #if MACH_DEBUG
152
153 #ifdef MACHINE_STACK
154 extern void stack_statistics(
155 unsigned int *totalp,
156 vm_size_t *maxusagep);
157 #endif /* MACHINE_STACK */
158 #endif /* MACH_DEBUG */
159
160 /* Forwards */
161 void thread_collect_scan(void);
162
163 kern_return_t thread_create_shuttle(
164 thread_act_t thr_act,
165 integer_t priority,
166 void (*start)(void),
167 thread_t *new_thread);
168
169 extern void Load_context(
170 thread_t thread);
171
172
173 /*
174 * Machine-dependent code must define:
175 * thread_machine_init
176 * thread_machine_terminate
177 * thread_machine_collect
178 *
179 * The thread->pcb field is reserved for machine-dependent code.
180 */
181
182 #ifdef MACHINE_STACK
183 /*
184 * Machine-dependent code must define:
185 * stack_alloc_try
186 * stack_alloc
187 * stack_free
188 * stack_free_stack
189 * stack_collect
190 * and if MACH_DEBUG:
191 * stack_statistics
192 */
193 #else /* MACHINE_STACK */
194 /*
195 * We allocate stacks from generic kernel VM.
196 * Machine-dependent code must define:
197 * machine_kernel_stack_init
198 *
199 * The stack_free_list can only be accessed at splsched,
200 * because stack_alloc_try/thread_invoke operate at splsched.
201 */
202
203 decl_simple_lock_data(,stack_lock_data) /* splsched only */
204 #define stack_lock() simple_lock(&stack_lock_data)
205 #define stack_unlock() simple_unlock(&stack_lock_data)
206
207 mutex_t stack_map_lock; /* Lock when allocating stacks maps */
208 vm_map_t stack_map; /* Map for allocating stacks */
209 vm_offset_t stack_free_list; /* splsched only */
210 unsigned int stack_free_max = 0;
211 unsigned int stack_free_count = 0; /* splsched only */
212 unsigned int stack_free_limit = 1; /* Arbitrary */
213
214 unsigned int stack_alloc_hits = 0; /* debugging */
215 unsigned int stack_alloc_misses = 0; /* debugging */
216
217 unsigned int stack_alloc_total = 0;
218 unsigned int stack_alloc_hiwater = 0;
219 unsigned int stack_alloc_bndry = 0;
220
221
222 /*
223 * The next field is at the base of the stack,
224 * so the low end is left unsullied.
225 */
226
227 #define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
228
229 /*
230 * stack_alloc:
231 *
232 * Allocate a kernel stack for an activation.
233 * May block.
234 */
235 vm_offset_t
236 stack_alloc(
237 thread_t thread,
238 void (*start_pos)(thread_t))
239 {
240 vm_offset_t stack = thread->kernel_stack;
241 spl_t s;
242
243 if (stack)
244 return (stack);
245
246 /*
247 * We first try the free list. It is probably empty, or
248 * stack_alloc_try would have succeeded, but possibly a stack was
249 * freed before the swapin thread got to us.
250 *
251 * We allocate stacks from their own map which is submaps of the
252 * kernel map. Because we want to have a guard page (at least) in
253 * front of each stack to catch evil code that overruns its stack, we
254 * allocate the stack on aligned boundaries. The boundary is
255 * calculated as the next power of 2 above the stack size. For
256 * example, a stack of 4 pages would have a boundry of 8, likewise 5
257 * would also be 8.
258 *
259 * We limit the number of stacks to be one allocation chunk
260 * (THREAD_CHUNK) more than the maximum number of threads
261 * (THREAD_MAX). The extra is to allow for priviliged threads that
262 * can sometimes have 2 stacks.
263 *
264 */
265
266 s = splsched();
267 stack_lock();
268 stack = stack_free_list;
269 if (stack != 0) {
270 stack_free_list = stack_next(stack);
271 stack_free_count--;
272 }
273 stack_unlock();
274 splx(s);
275
276 if (stack != 0) { /* Did we find a free one? */
277 stack_attach(thread, stack, start_pos); /* Initialize it */
278 return (stack); /* Send it on home */
279 }
280
281 if (kernel_memory_allocate(
282 stack_map, &stack,
283 KERNEL_STACK_SIZE, stack_alloc_bndry - 1,
284 KMA_KOBJECT) != KERN_SUCCESS)
285 panic("stack_alloc: no space left for stack maps");
286
287 stack_alloc_total++;
288 if (stack_alloc_total > stack_alloc_hiwater)
289 stack_alloc_hiwater = stack_alloc_total;
290
291 stack_attach(thread, stack, start_pos);
292 return (stack);
293 }
294
295 /*
296 * stack_free:
297 *
298 * Free a kernel stack.
299 * Called at splsched.
300 */
301
302 void
303 stack_free(
304 thread_t thread)
305 {
306 vm_offset_t stack = stack_detach(thread);
307
308 assert(stack);
309 if (stack != thread->stack_privilege) {
310 stack_lock();
311 stack_next(stack) = stack_free_list;
312 stack_free_list = stack;
313 if (++stack_free_count > stack_free_max)
314 stack_free_max = stack_free_count;
315 stack_unlock();
316 }
317 }
318
319 static void
320 stack_free_stack(
321 vm_offset_t stack)
322 {
323 spl_t s;
324
325 s = splsched();
326 stack_lock();
327 stack_next(stack) = stack_free_list;
328 stack_free_list = stack;
329 if (++stack_free_count > stack_free_max)
330 stack_free_max = stack_free_count;
331 stack_unlock();
332 splx(s);
333 }
334
335 /*
336 * stack_collect:
337 *
338 * Free excess kernel stacks.
339 * May block.
340 */
341
342 void
343 stack_collect(void)
344 {
345 vm_offset_t stack;
346 int i;
347 spl_t s;
348
349 s = splsched();
350 stack_lock();
351 while (stack_free_count > stack_free_limit) {
352 stack = stack_free_list;
353 stack_free_list = stack_next(stack);
354 stack_free_count--;
355 stack_unlock();
356 splx(s);
357
358 if (vm_map_remove(
359 stack_map, stack, stack + KERNEL_STACK_SIZE,
360 VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
361 panic("stack_collect: vm_map_remove failed");
362
363 s = splsched();
364 stack_lock();
365 stack_alloc_total--;
366 }
367 stack_unlock();
368 splx(s);
369 }
370
371
372 #if MACH_DEBUG
373 /*
374 * stack_statistics:
375 *
376 * Return statistics on cached kernel stacks.
377 * *maxusagep must be initialized by the caller.
378 */
379
380 void
381 stack_statistics(
382 unsigned int *totalp,
383 vm_size_t *maxusagep)
384 {
385 spl_t s;
386
387 s = splsched();
388 stack_lock();
389
390 *totalp = stack_free_count;
391 *maxusagep = 0;
392
393 stack_unlock();
394 splx(s);
395 }
396 #endif /* MACH_DEBUG */
397
398 #endif /* MACHINE_STACK */
399
400
401 stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
402 vm_size_t *alloc_size, int *collectable, int *exhaustable)
403 {
404 *count = stack_alloc_total - stack_free_count;
405 *cur_size = KERNEL_STACK_SIZE * stack_alloc_total;
406 *max_size = KERNEL_STACK_SIZE * stack_alloc_hiwater;
407 *elem_size = KERNEL_STACK_SIZE;
408 *alloc_size = KERNEL_STACK_SIZE;
409 *collectable = 1;
410 *exhaustable = 0;
411 }
412
413
414 /*
415 * stack_privilege:
416 *
417 * stack_alloc_try on this thread must always succeed.
418 */
419
420 void
421 stack_privilege(
422 register thread_t thread)
423 {
424 /*
425 * This implementation only works for the current thread.
426 */
427
428 if (thread != current_thread())
429 panic("stack_privilege");
430
431 if (thread->stack_privilege == 0)
432 thread->stack_privilege = current_stack();
433 }
434
435 /*
436 * stack_alloc_try:
437 *
438 * Non-blocking attempt to allocate a kernel stack.
439 * Called at splsched with the thread locked.
440 */
441
442 boolean_t stack_alloc_try(
443 thread_t thread,
444 void (*start_pos)(thread_t))
445 {
446 register vm_offset_t stack = thread->stack_privilege;
447
448 if (stack == 0) {
449 stack_lock();
450
451 stack = stack_free_list;
452 if (stack != (vm_offset_t)0) {
453 stack_free_list = stack_next(stack);
454 stack_free_count--;
455 }
456
457 stack_unlock();
458 }
459
460 if (stack != 0) {
461 stack_attach(thread, stack, start_pos);
462 stack_alloc_hits++;
463
464 return (TRUE);
465 }
466 else {
467 stack_alloc_misses++;
468
469 return (FALSE);
470 }
471 }
472
473 uint64_t max_unsafe_computation;
474 extern int max_unsafe_quanta;
475
476 uint32_t sched_safe_duration;
477
478 uint64_t max_poll_computation;
479 extern int max_poll_quanta;
480
481 uint32_t std_quantum;
482 uint32_t min_std_quantum;
483
484 uint32_t max_rt_quantum;
485 uint32_t min_rt_quantum;
486
487 void
488 thread_init(void)
489 {
490 kern_return_t ret;
491 unsigned int stack;
492
493 thread_shuttle_zone = zinit(
494 sizeof(struct thread_shuttle),
495 THREAD_MAX * sizeof(struct thread_shuttle),
496 THREAD_CHUNK * sizeof(struct thread_shuttle),
497 "threads");
498
499 /*
500 * Fill in a template thread_shuttle for fast initialization.
501 * [Fields that must be (or are typically) reset at
502 * time of creation are so noted.]
503 */
504
505 /* thr_sh_template.links (none) */
506 thr_sh_template.runq = RUN_QUEUE_NULL;
507
508
509 /* thr_sh_template.task (later) */
510 /* thr_sh_template.thread_list (later) */
511 /* thr_sh_template.pset_threads (later) */
512
513 /* reference for activation */
514 thr_sh_template.ref_count = 1;
515
516 thr_sh_template.reason = AST_NONE;
517 thr_sh_template.at_safe_point = FALSE;
518 thr_sh_template.wait_event = NO_EVENT64;
519 thr_sh_template.wait_queue = WAIT_QUEUE_NULL;
520 thr_sh_template.wait_result = THREAD_WAITING;
521 thr_sh_template.interrupt_level = THREAD_ABORTSAFE;
522 thr_sh_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT;
523 thr_sh_template.wake_active = FALSE;
524 thr_sh_template.active_callout = FALSE;
525 thr_sh_template.continuation = (void (*)(void))0;
526 thr_sh_template.top_act = THR_ACT_NULL;
527
528 thr_sh_template.importance = 0;
529 thr_sh_template.sched_mode = 0;
530 thr_sh_template.safe_mode = 0;
531
532 thr_sh_template.priority = 0;
533 thr_sh_template.sched_pri = 0;
534 thr_sh_template.max_priority = 0;
535 thr_sh_template.task_priority = 0;
536 thr_sh_template.promotions = 0;
537 thr_sh_template.pending_promoter_index = 0;
538 thr_sh_template.pending_promoter[0] =
539 thr_sh_template.pending_promoter[1] = NULL;
540
541 thr_sh_template.current_quantum = 0;
542
543 thr_sh_template.computation_metered = 0;
544 thr_sh_template.computation_epoch = 0;
545
546 thr_sh_template.cpu_usage = 0;
547 thr_sh_template.cpu_delta = 0;
548 thr_sh_template.sched_usage = 0;
549 thr_sh_template.sched_delta = 0;
550 thr_sh_template.sched_stamp = 0;
551 thr_sh_template.sleep_stamp = 0;
552 thr_sh_template.safe_release = 0;
553
554 thr_sh_template.bound_processor = PROCESSOR_NULL;
555 thr_sh_template.last_processor = PROCESSOR_NULL;
556 thr_sh_template.last_switch = 0;
557
558 thr_sh_template.vm_privilege = FALSE;
559
560 timer_init(&(thr_sh_template.user_timer));
561 timer_init(&(thr_sh_template.system_timer));
562 thr_sh_template.user_timer_save.low = 0;
563 thr_sh_template.user_timer_save.high = 0;
564 thr_sh_template.system_timer_save.low = 0;
565 thr_sh_template.system_timer_save.high = 0;
566
567 thr_sh_template.active = FALSE; /* reset */
568
569 thr_sh_template.processor_set = PROCESSOR_SET_NULL;
570 #if MACH_HOST
571 thr_sh_template.may_assign = TRUE;
572 thr_sh_template.assign_active = FALSE;
573 #endif /* MACH_HOST */
574 thr_sh_template.funnel_state = 0;
575
576 /*
577 * Initialize other data structures used in
578 * this module.
579 */
580
581 queue_init(&reaper_queue);
582 simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER);
583 thr_sh_template.funnel_lock = THR_FUNNEL_NULL;
584
585 #ifndef MACHINE_STACK
586 simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */
587
588 if (KERNEL_STACK_SIZE < round_page_32(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */
589 panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
590 KERNEL_STACK_SIZE, PAGE_SIZE);
591 }
592
593 for(stack_alloc_bndry = PAGE_SIZE; stack_alloc_bndry <= KERNEL_STACK_SIZE; stack_alloc_bndry <<= 1); /* Find next power of 2 above stack size */
594
595 ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
596
597 &stack,
598 (stack_alloc_bndry * (2*THREAD_MAX + 64)), /* Allocate enough for all of it */
599 FALSE, /* Say not pageable so that it is wired */
600 TRUE, /* Allocate from anywhere */
601 &stack_map); /* Allocate a submap */
602
603 if(ret != KERN_SUCCESS) { /* Did we get one? */
604 panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret); /* Die */
605 }
606 stack = vm_map_min(stack_map); /* Make sure we skip the first hunk */
607 ret = vm_map_enter(stack_map, &stack, PAGE_SIZE, 0, /* Make sure there is nothing at the start */
608 0, /* Force it at start */
609 VM_OBJECT_NULL, 0, /* No object yet */
610 FALSE, /* No copy */
611 VM_PROT_NONE, /* Allow no access */
612 VM_PROT_NONE, /* Allow no access */
613 VM_INHERIT_DEFAULT); /* Just be normal */
614
615 if(ret != KERN_SUCCESS) { /* Did it work? */
616 panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret);
617 }
618
619 #endif /* MACHINE_STACK */
620
621 #if MACH_LDEBUG
622 thr_sh_template.mutex_count = 0;
623 #endif /* MACH_LDEBUG */
624
625 {
626 uint64_t abstime;
627
628 clock_interval_to_absolutetime_interval(
629 std_quantum_us, NSEC_PER_USEC, &abstime);
630 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
631 std_quantum = abstime;
632
633 /* 250 us */
634 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
635 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
636 min_std_quantum = abstime;
637
638 /* 50 us */
639 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
640 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
641 min_rt_quantum = abstime;
642
643 /* 50 ms */
644 clock_interval_to_absolutetime_interval(
645 50, 1000*NSEC_PER_USEC, &abstime);
646 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
647 max_rt_quantum = abstime;
648
649 max_unsafe_computation = max_unsafe_quanta * std_quantum;
650 max_poll_computation = max_poll_quanta * std_quantum;
651
652 sched_safe_duration = 2 * max_unsafe_quanta *
653 (std_quantum_us / (1000 * 1000)) *
654 (1 << SCHED_TICK_SHIFT);
655 }
656
657 first_thread = TRUE;
658 /*
659 * Initialize any machine-dependent
660 * per-thread structures necessary.
661 */
662 thread_machine_init();
663 }
664
665 /*
666 * Called at splsched.
667 */
668 void
669 thread_reaper_enqueue(
670 thread_t thread)
671 {
672 simple_lock(&reaper_lock);
673 enqueue_tail(&reaper_queue, (queue_entry_t)thread);
674 simple_unlock(&reaper_lock);
675
676 thread_wakeup((event_t)&reaper_queue);
677 }
678
679 void
680 thread_termination_continue(void)
681 {
682 panic("thread_termination_continue");
683 /*NOTREACHED*/
684 }
685
686 /*
687 * Routine: thread_terminate_self
688 *
689 * This routine is called by a thread which has unwound from
690 * its current RPC and kernel contexts and found that it's
691 * root activation has been marked for extinction. This lets
692 * it clean up the last few things that can only be cleaned
693 * up in this context and then impale itself on the reaper
694 * queue.
695 *
696 * When the reaper gets the thread, it will deallocate the
697 * thread_act's reference on itself, which in turn will release
698 * its own reference on this thread. By doing things in that
699 * order, a thread_act will always have a valid thread - but the
700 * thread may persist beyond having a thread_act (but must never
701 * run like that).
702 */
703 void
704 thread_terminate_self(void)
705 {
706 thread_act_t thr_act = current_act();
707 thread_t thread;
708 task_t task = thr_act->task;
709 long active_acts;
710 spl_t s;
711
712 /*
713 * We should be at the base of the inheritance chain.
714 */
715 thread = act_lock_thread(thr_act);
716 assert(thr_act->thread == thread);
717
718 /* This will allow no more control ops on this thr_act. */
719 ipc_thr_act_disable(thr_act);
720
721 /* Clean-up any ulocks that are still owned by the thread
722 * activation (acquired but not released or handed-off).
723 */
724 act_ulock_release_all(thr_act);
725
726 act_unlock_thread(thr_act);
727
728 _mk_sp_thread_depress_abort(thread, TRUE);
729
730 /*
731 * Check to see if this is the last active activation. By
732 * this we mean the last activation to call thread_terminate_self.
733 * If so, and the task is associated with a BSD process, we
734 * need to call BSD and let them clean up.
735 */
736 active_acts = hw_atomic_sub(&task->active_act_count, 1);
737
738 if (active_acts == 0 && task->bsd_info)
739 proc_exit(task->bsd_info);
740
741 /* JMM - for now, no migration */
742 assert(!thr_act->lower);
743
744 s = splsched();
745 thread_lock(thread);
746 thread->active = FALSE;
747 thread_unlock(thread);
748 splx(s);
749
750 thread_timer_terminate();
751
752 /* flush any lazy HW state while in own context */
753 thread_machine_flush(thr_act);
754
755 ipc_thread_terminate(thread);
756
757 s = splsched();
758 thread_lock(thread);
759 thread->state |= TH_TERMINATE;
760 assert((thread->state & TH_UNINT) == 0);
761 thread_mark_wait_locked(thread, THREAD_UNINT);
762 assert(thread->promotions == 0);
763 thread_unlock(thread);
764 /* splx(s); */
765
766 ETAP_SET_REASON(thread, BLOCKED_ON_TERMINATION);
767 thread_block(thread_termination_continue);
768 /*NOTREACHED*/
769 }
770
771 /*
772 * Create a new thread.
773 * Doesn't start the thread running; It first must be attached to
774 * an activation - then use thread_go to start it.
775 */
776 kern_return_t
777 thread_create_shuttle(
778 thread_act_t thr_act,
779 integer_t priority,
780 void (*start)(void),
781 thread_t *new_thread)
782 {
783 kern_return_t result;
784 thread_t new_shuttle;
785 task_t parent_task = thr_act->task;
786 processor_set_t pset;
787
788 /*
789 * Allocate a thread and initialize static fields
790 */
791 if (first_thread) {
792 new_shuttle = &pageout_thread;
793 first_thread = FALSE;
794 } else
795 new_shuttle = (thread_t)zalloc(thread_shuttle_zone);
796 if (new_shuttle == THREAD_NULL)
797 return (KERN_RESOURCE_SHORTAGE);
798
799 #ifdef DEBUG
800 if (new_shuttle != &pageout_thread)
801 assert(!thr_act->thread);
802 #endif
803
804 *new_shuttle = thr_sh_template;
805
806 thread_lock_init(new_shuttle);
807 wake_lock_init(new_shuttle);
808 new_shuttle->sleep_stamp = sched_tick;
809
810 /*
811 * Thread still isn't runnable yet (our caller will do
812 * that). Initialize runtime-dependent fields here.
813 */
814 result = thread_machine_create(new_shuttle, thr_act, thread_continue);
815 assert (result == KERN_SUCCESS);
816
817 thread_start(new_shuttle, start);
818 thread_timer_setup(new_shuttle);
819 ipc_thread_init(new_shuttle);
820
821 pset = parent_task->processor_set;
822 assert(pset == &default_pset);
823 pset_lock(pset);
824
825 task_lock(parent_task);
826 assert(parent_task->processor_set == pset);
827
828 /*
829 * Don't need to initialize because the context switch
830 * code will set it before it can be used.
831 */
832 if (!parent_task->active) {
833 task_unlock(parent_task);
834 pset_unlock(pset);
835 thread_machine_destroy(new_shuttle);
836 zfree(thread_shuttle_zone, (vm_offset_t) new_shuttle);
837 return (KERN_FAILURE);
838 }
839
840 act_attach(thr_act, new_shuttle, 0);
841
842 /* Chain the thr_act onto the task's list */
843 queue_enter(&parent_task->thr_acts, thr_act, thread_act_t, thr_acts);
844 parent_task->thr_act_count++;
845 parent_task->res_act_count++;
846
847 /* So terminating threads don't need to take the task lock to decrement */
848 hw_atomic_add(&parent_task->active_act_count, 1);
849
850 /* Associate the thread with the processor set */
851 pset_add_thread(pset, new_shuttle);
852
853 /* Set the thread's scheduling parameters */
854 if (parent_task != kernel_task)
855 new_shuttle->sched_mode |= TH_MODE_TIMESHARE;
856 new_shuttle->max_priority = parent_task->max_priority;
857 new_shuttle->task_priority = parent_task->priority;
858 new_shuttle->priority = (priority < 0)? parent_task->priority: priority;
859 if (new_shuttle->priority > new_shuttle->max_priority)
860 new_shuttle->priority = new_shuttle->max_priority;
861 new_shuttle->importance =
862 new_shuttle->priority - new_shuttle->task_priority;
863 new_shuttle->sched_stamp = sched_tick;
864 compute_priority(new_shuttle, FALSE);
865
866 #if ETAP_EVENT_MONITOR
867 new_thread->etap_reason = 0;
868 new_thread->etap_trace = FALSE;
869 #endif /* ETAP_EVENT_MONITOR */
870
871 new_shuttle->active = TRUE;
872 thr_act->active = TRUE;
873
874 *new_thread = new_shuttle;
875
876 {
877 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
878
879 KERNEL_DEBUG_CONSTANT(
880 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
881 (vm_address_t)new_shuttle, 0, 0, 0, 0);
882
883 kdbg_trace_string(parent_task->bsd_info,
884 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
885
886 KERNEL_DEBUG_CONSTANT(
887 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
888 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
889 }
890
891 return (KERN_SUCCESS);
892 }
893
894 extern void thread_bootstrap_return(void);
895
896 kern_return_t
897 thread_create(
898 task_t task,
899 thread_act_t *new_act)
900 {
901 kern_return_t result;
902 thread_t thread;
903 thread_act_t act;
904
905 if (task == TASK_NULL)
906 return KERN_INVALID_ARGUMENT;
907
908 result = act_create(task, &act);
909 if (result != KERN_SUCCESS)
910 return (result);
911
912 result = thread_create_shuttle(act, -1, thread_bootstrap_return, &thread);
913 if (result != KERN_SUCCESS) {
914 act_deallocate(act);
915 return (result);
916 }
917
918 act->user_stop_count = 1;
919 thread_hold(act);
920 if (task->suspend_count > 0)
921 thread_hold(act);
922
923 pset_unlock(task->processor_set);
924 task_unlock(task);
925
926 *new_act = act;
927
928 return (KERN_SUCCESS);
929 }
930
931 kern_return_t
932 thread_create_running(
933 register task_t task,
934 int flavor,
935 thread_state_t new_state,
936 mach_msg_type_number_t new_state_count,
937 thread_act_t *new_act) /* OUT */
938 {
939 register kern_return_t result;
940 thread_t thread;
941 thread_act_t act;
942
943 if (task == TASK_NULL)
944 return KERN_INVALID_ARGUMENT;
945
946 result = act_create(task, &act);
947 if (result != KERN_SUCCESS)
948 return (result);
949
950 result = thread_create_shuttle(act, -1, thread_bootstrap_return, &thread);
951 if (result != KERN_SUCCESS) {
952 act_deallocate(act);
953 return (result);
954 }
955
956 act_lock(act);
957 result = act_machine_set_state(act, flavor, new_state, new_state_count);
958 if (result != KERN_SUCCESS) {
959 act_unlock(act);
960 pset_unlock(task->processor_set);
961 task_unlock(task);
962
963 (void)thread_terminate(act);
964 return (result);
965 }
966
967 clear_wait(thread, THREAD_AWAKENED);
968 act->inited = TRUE;
969 act_unlock(act);
970 pset_unlock(task->processor_set);
971 task_unlock(task);
972
973 *new_act = act;
974
975 return (result);
976 }
977
978 /*
979 * kernel_thread:
980 *
981 * Create and kernel thread in the specified task, and
982 * optionally start it running.
983 */
984 thread_t
985 kernel_thread_with_priority(
986 task_t task,
987 integer_t priority,
988 void (*start)(void),
989 boolean_t alloc_stack,
990 boolean_t start_running)
991 {
992 kern_return_t result;
993 thread_t thread;
994 thread_act_t act;
995
996 result = act_create(task, &act);
997 if (result != KERN_SUCCESS)
998 return (THREAD_NULL);
999
1000 result = thread_create_shuttle(act, priority, start, &thread);
1001 if (result != KERN_SUCCESS) {
1002 act_deallocate(act);
1003 return (THREAD_NULL);
1004 }
1005
1006 pset_unlock(task->processor_set);
1007 task_unlock(task);
1008
1009 if (alloc_stack)
1010 thread_doswapin(thread);
1011
1012 act_lock(act);
1013 if (start_running)
1014 clear_wait(thread, THREAD_AWAKENED);
1015 act->inited = TRUE;
1016 act_unlock(act);
1017
1018 act_deallocate(act);
1019
1020 return (thread);
1021 }
1022
1023 thread_t
1024 kernel_thread(
1025 task_t task,
1026 void (*start)(void))
1027 {
1028 return kernel_thread_with_priority(task, -1, start, FALSE, TRUE);
1029 }
1030
1031 unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */
1032
1033 #if MACH_HOST
1034 /* Preclude thread processor set assignement */
1035 #define thread_freeze(thread) assert((thread)->processor_set == &default_pset)
1036
1037 /* Allow thread processor set assignement */
1038 #define thread_unfreeze(thread) assert((thread)->processor_set == &default_pset)
1039
1040 #endif /* MACH_HOST */
1041
1042 void
1043 thread_deallocate(
1044 thread_t thread)
1045 {
1046 task_t task;
1047 processor_set_t pset;
1048 int refs;
1049 spl_t s;
1050
1051 if (thread == THREAD_NULL)
1052 return;
1053
1054 /*
1055 * First, check for new count > 0 (the common case).
1056 * Only the thread needs to be locked.
1057 */
1058 s = splsched();
1059 thread_lock(thread);
1060 refs = --thread->ref_count;
1061 thread_unlock(thread);
1062 splx(s);
1063
1064 if (refs > 0)
1065 return;
1066
1067 if (thread == current_thread())
1068 panic("thread deallocating itself");
1069
1070 /*
1071 * There is a dangling pointer to the thread from the
1072 * processor_set. To clean it up, we freeze the thread
1073 * in the pset (because pset destruction can cause even
1074 * reference-less threads to be reassigned to the default
1075 * pset) and then remove it.
1076 */
1077
1078 #if MACH_HOST
1079 thread_freeze(thread);
1080 #endif
1081
1082 pset = thread->processor_set;
1083 pset_lock(pset);
1084 pset_remove_thread(pset, thread);
1085 pset_unlock(pset);
1086
1087 #if MACH_HOST
1088 thread_unfreeze(thread);
1089 #endif
1090
1091 pset_deallocate(pset);
1092
1093 if (thread->stack_privilege != 0) {
1094 if (thread->stack_privilege != thread->kernel_stack)
1095 stack_free_stack(thread->stack_privilege);
1096 thread->stack_privilege = 0;
1097 }
1098 /* frees kernel stack & other MD resources */
1099 thread_machine_destroy(thread);
1100
1101 zfree(thread_shuttle_zone, (vm_offset_t) thread);
1102 }
1103
1104 void
1105 thread_reference(
1106 thread_t thread)
1107 {
1108 spl_t s;
1109
1110 if (thread == THREAD_NULL)
1111 return;
1112
1113 s = splsched();
1114 thread_lock(thread);
1115 thread_reference_locked(thread);
1116 thread_unlock(thread);
1117 splx(s);
1118 }
1119
1120 /*
1121 * Called with "appropriate" thread-related locks held on
1122 * thread and its top_act for synchrony with RPC (see
1123 * act_lock_thread()).
1124 */
1125 kern_return_t
1126 thread_info_shuttle(
1127 register thread_act_t thr_act,
1128 thread_flavor_t flavor,
1129 thread_info_t thread_info_out, /* ptr to OUT array */
1130 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1131 {
1132 register thread_t thread = thr_act->thread;
1133 int state, flags;
1134 spl_t s;
1135
1136 if (thread == THREAD_NULL)
1137 return (KERN_INVALID_ARGUMENT);
1138
1139 if (flavor == THREAD_BASIC_INFO) {
1140 register thread_basic_info_t basic_info;
1141
1142 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
1143 return (KERN_INVALID_ARGUMENT);
1144
1145 basic_info = (thread_basic_info_t) thread_info_out;
1146
1147 s = splsched();
1148 thread_lock(thread);
1149
1150 /* fill in info */
1151
1152 thread_read_times(thread, &basic_info->user_time,
1153 &basic_info->system_time);
1154
1155 /*
1156 * Update lazy-evaluated scheduler info because someone wants it.
1157 */
1158 if (thread->sched_stamp != sched_tick)
1159 update_priority(thread);
1160
1161 basic_info->sleep_time = 0;
1162
1163 /*
1164 * To calculate cpu_usage, first correct for timer rate,
1165 * then for 5/8 ageing. The correction factor [3/5] is
1166 * (1/(5/8) - 1).
1167 */
1168 basic_info->cpu_usage = (thread->cpu_usage << SCHED_TICK_SHIFT) /
1169 (TIMER_RATE / TH_USAGE_SCALE);
1170 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1171 #if SIMPLE_CLOCK
1172 /*
1173 * Clock drift compensation.
1174 */
1175 basic_info->cpu_usage = (basic_info->cpu_usage * 1000000) / sched_usec;
1176 #endif /* SIMPLE_CLOCK */
1177
1178 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
1179 POLICY_TIMESHARE: POLICY_RR);
1180
1181 flags = 0;
1182 if (thread->state & TH_IDLE)
1183 flags |= TH_FLAGS_IDLE;
1184
1185 if (thread->state & TH_STACK_HANDOFF)
1186 flags |= TH_FLAGS_SWAPPED;
1187
1188 state = 0;
1189 if (thread->state & TH_TERMINATE)
1190 state = TH_STATE_HALTED;
1191 else
1192 if (thread->state & TH_RUN)
1193 state = TH_STATE_RUNNING;
1194 else
1195 if (thread->state & TH_UNINT)
1196 state = TH_STATE_UNINTERRUPTIBLE;
1197 else
1198 if (thread->state & TH_SUSP)
1199 state = TH_STATE_STOPPED;
1200 else
1201 if (thread->state & TH_WAIT)
1202 state = TH_STATE_WAITING;
1203
1204 basic_info->run_state = state;
1205 basic_info->flags = flags;
1206
1207 basic_info->suspend_count = thr_act->user_stop_count;
1208
1209 thread_unlock(thread);
1210 splx(s);
1211
1212 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1213
1214 return (KERN_SUCCESS);
1215 }
1216 else
1217 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1218 policy_timeshare_info_t ts_info;
1219
1220 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1221 return (KERN_INVALID_ARGUMENT);
1222
1223 ts_info = (policy_timeshare_info_t)thread_info_out;
1224
1225 s = splsched();
1226 thread_lock(thread);
1227
1228 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1229 thread_unlock(thread);
1230 splx(s);
1231
1232 return (KERN_INVALID_POLICY);
1233 }
1234
1235 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1236 if (ts_info->depressed) {
1237 ts_info->base_priority = DEPRESSPRI;
1238 ts_info->depress_priority = thread->priority;
1239 }
1240 else {
1241 ts_info->base_priority = thread->priority;
1242 ts_info->depress_priority = -1;
1243 }
1244
1245 ts_info->cur_priority = thread->sched_pri;
1246 ts_info->max_priority = thread->max_priority;
1247
1248 thread_unlock(thread);
1249 splx(s);
1250
1251 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1252
1253 return (KERN_SUCCESS);
1254 }
1255 else
1256 if (flavor == THREAD_SCHED_FIFO_INFO) {
1257 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1258 return (KERN_INVALID_ARGUMENT);
1259
1260 return (KERN_INVALID_POLICY);
1261 }
1262 else
1263 if (flavor == THREAD_SCHED_RR_INFO) {
1264 policy_rr_info_t rr_info;
1265
1266 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1267 return (KERN_INVALID_ARGUMENT);
1268
1269 rr_info = (policy_rr_info_t) thread_info_out;
1270
1271 s = splsched();
1272 thread_lock(thread);
1273
1274 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1275 thread_unlock(thread);
1276 splx(s);
1277
1278 return (KERN_INVALID_POLICY);
1279 }
1280
1281 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1282 if (rr_info->depressed) {
1283 rr_info->base_priority = DEPRESSPRI;
1284 rr_info->depress_priority = thread->priority;
1285 }
1286 else {
1287 rr_info->base_priority = thread->priority;
1288 rr_info->depress_priority = -1;
1289 }
1290
1291 rr_info->max_priority = thread->max_priority;
1292 rr_info->quantum = std_quantum_us / 1000;
1293
1294 thread_unlock(thread);
1295 splx(s);
1296
1297 *thread_info_count = POLICY_RR_INFO_COUNT;
1298
1299 return (KERN_SUCCESS);
1300 }
1301
1302 return (KERN_INVALID_ARGUMENT);
1303 }
1304
1305 void
1306 thread_doreap(
1307 register thread_t thread)
1308 {
1309 thread_act_t thr_act;
1310
1311
1312 thr_act = thread_lock_act(thread);
1313 assert(thr_act && thr_act->thread == thread);
1314
1315 act_locked_act_reference(thr_act);
1316
1317 /*
1318 * Replace `act_unlock_thread()' with individual
1319 * calls. (`act_detach()' can change fields used
1320 * to determine which locks are held, confusing
1321 * `act_unlock_thread()'.)
1322 */
1323 act_unlock(thr_act);
1324
1325 /* Remove the reference held by a rooted thread */
1326 act_deallocate(thr_act);
1327
1328 /* Remove the reference held by the thread: */
1329 act_deallocate(thr_act);
1330 }
1331
1332 /*
1333 * reaper_thread:
1334 *
1335 * This kernel thread runs forever looking for terminating
1336 * threads, releasing their "self" references.
1337 */
1338 static void
1339 reaper_thread_continue(void)
1340 {
1341 register thread_t thread;
1342
1343 (void)splsched();
1344 simple_lock(&reaper_lock);
1345
1346 while ((thread = (thread_t) dequeue_head(&reaper_queue)) != THREAD_NULL) {
1347 simple_unlock(&reaper_lock);
1348 (void)spllo();
1349
1350 thread_doreap(thread);
1351
1352 (void)splsched();
1353 simple_lock(&reaper_lock);
1354 }
1355
1356 assert_wait((event_t)&reaper_queue, THREAD_UNINT);
1357 simple_unlock(&reaper_lock);
1358 (void)spllo();
1359
1360 thread_block(reaper_thread_continue);
1361 /*NOTREACHED*/
1362 }
1363
1364 static void
1365 reaper_thread(void)
1366 {
1367 thread_t self = current_thread();
1368
1369 stack_privilege(self);
1370
1371 reaper_thread_continue();
1372 /*NOTREACHED*/
1373 }
1374
1375 void
1376 thread_reaper_init(void)
1377 {
1378 kernel_thread(kernel_task, reaper_thread);
1379 }
1380
1381 kern_return_t
1382 thread_assign(
1383 thread_act_t thr_act,
1384 processor_set_t new_pset)
1385 {
1386 return(KERN_FAILURE);
1387 }
1388
1389 /*
1390 * thread_assign_default:
1391 *
1392 * Special version of thread_assign for assigning threads to default
1393 * processor set.
1394 */
1395 kern_return_t
1396 thread_assign_default(
1397 thread_act_t thr_act)
1398 {
1399 return (thread_assign(thr_act, &default_pset));
1400 }
1401
1402 /*
1403 * thread_get_assignment
1404 *
1405 * Return current assignment for this thread.
1406 */
1407 kern_return_t
1408 thread_get_assignment(
1409 thread_act_t thr_act,
1410 processor_set_t *pset)
1411 {
1412 thread_t thread;
1413
1414 if (thr_act == THR_ACT_NULL)
1415 return(KERN_INVALID_ARGUMENT);
1416 thread = act_lock_thread(thr_act);
1417 if (thread == THREAD_NULL) {
1418 act_unlock_thread(thr_act);
1419 return(KERN_INVALID_ARGUMENT);
1420 }
1421 *pset = thread->processor_set;
1422 act_unlock_thread(thr_act);
1423 pset_reference(*pset);
1424 return(KERN_SUCCESS);
1425 }
1426
1427 /*
1428 * thread_wire:
1429 *
1430 * Specify that the target thread must always be able
1431 * to run and to allocate memory.
1432 */
1433 kern_return_t
1434 thread_wire(
1435 host_priv_t host_priv,
1436 thread_act_t thr_act,
1437 boolean_t wired)
1438 {
1439 spl_t s;
1440 thread_t thread;
1441 extern void vm_page_free_reserve(int pages);
1442
1443 if (thr_act == THR_ACT_NULL || host_priv == HOST_PRIV_NULL)
1444 return (KERN_INVALID_ARGUMENT);
1445
1446 assert(host_priv == &realhost);
1447
1448 thread = act_lock_thread(thr_act);
1449 if (thread ==THREAD_NULL) {
1450 act_unlock_thread(thr_act);
1451 return(KERN_INVALID_ARGUMENT);
1452 }
1453
1454 /*
1455 * This implementation only works for the current thread.
1456 * See stack_privilege.
1457 */
1458 if (thr_act != current_act())
1459 return KERN_INVALID_ARGUMENT;
1460
1461 s = splsched();
1462 thread_lock(thread);
1463
1464 if (wired) {
1465 if (thread->vm_privilege == FALSE)
1466 vm_page_free_reserve(1); /* XXX */
1467 thread->vm_privilege = TRUE;
1468 } else {
1469 if (thread->vm_privilege == TRUE)
1470 vm_page_free_reserve(-1); /* XXX */
1471 thread->vm_privilege = FALSE;
1472 }
1473
1474 thread_unlock(thread);
1475 splx(s);
1476 act_unlock_thread(thr_act);
1477
1478 return KERN_SUCCESS;
1479 }
1480
1481 /*
1482 * thread_collect_scan:
1483 *
1484 * Attempt to free resources owned by threads.
1485 */
1486
1487 void
1488 thread_collect_scan(void)
1489 {
1490 /* This code runs very quickly! */
1491 }
1492
1493 /* Also disabled in vm/vm_pageout.c */
1494 boolean_t thread_collect_allowed = FALSE;
1495 unsigned thread_collect_last_tick = 0;
1496 unsigned thread_collect_max_rate = 0; /* in ticks */
1497
1498 /*
1499 * consider_thread_collect:
1500 *
1501 * Called by the pageout daemon when the system needs more free pages.
1502 */
1503
1504 void
1505 consider_thread_collect(void)
1506 {
1507 /*
1508 * By default, don't attempt thread collection more frequently
1509 * than once a second.
1510 */
1511
1512 if (thread_collect_max_rate == 0)
1513 thread_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1;
1514
1515 if (thread_collect_allowed &&
1516 (sched_tick >
1517 (thread_collect_last_tick + thread_collect_max_rate))) {
1518 thread_collect_last_tick = sched_tick;
1519 thread_collect_scan();
1520 }
1521 }
1522
1523 kern_return_t
1524 host_stack_usage(
1525 host_t host,
1526 vm_size_t *reservedp,
1527 unsigned int *totalp,
1528 vm_size_t *spacep,
1529 vm_size_t *residentp,
1530 vm_size_t *maxusagep,
1531 vm_offset_t *maxstackp)
1532 {
1533 #if !MACH_DEBUG
1534 return KERN_NOT_SUPPORTED;
1535 #else
1536 unsigned int total;
1537 vm_size_t maxusage;
1538
1539 if (host == HOST_NULL)
1540 return KERN_INVALID_HOST;
1541
1542 maxusage = 0;
1543
1544 stack_statistics(&total, &maxusage);
1545
1546 *reservedp = 0;
1547 *totalp = total;
1548 *spacep = *residentp = total * round_page_32(KERNEL_STACK_SIZE);
1549 *maxusagep = maxusage;
1550 *maxstackp = 0;
1551 return KERN_SUCCESS;
1552
1553 #endif /* MACH_DEBUG */
1554 }
1555
1556 /*
1557 * Return info on stack usage for threads in a specific processor set
1558 */
1559 kern_return_t
1560 processor_set_stack_usage(
1561 processor_set_t pset,
1562 unsigned int *totalp,
1563 vm_size_t *spacep,
1564 vm_size_t *residentp,
1565 vm_size_t *maxusagep,
1566 vm_offset_t *maxstackp)
1567 {
1568 #if !MACH_DEBUG
1569 return KERN_NOT_SUPPORTED;
1570 #else
1571 unsigned int total;
1572 vm_size_t maxusage;
1573 vm_offset_t maxstack;
1574
1575 register thread_t *threads;
1576 register thread_t thread;
1577
1578 unsigned int actual; /* this many things */
1579 unsigned int i;
1580
1581 vm_size_t size, size_needed;
1582 vm_offset_t addr;
1583
1584 spl_t s;
1585
1586 if (pset == PROCESSOR_SET_NULL)
1587 return KERN_INVALID_ARGUMENT;
1588
1589 size = 0; addr = 0;
1590
1591 for (;;) {
1592 pset_lock(pset);
1593 if (!pset->active) {
1594 pset_unlock(pset);
1595 return KERN_INVALID_ARGUMENT;
1596 }
1597
1598 actual = pset->thread_count;
1599
1600 /* do we have the memory we need? */
1601
1602 size_needed = actual * sizeof(thread_t);
1603 if (size_needed <= size)
1604 break;
1605
1606 /* unlock the pset and allocate more memory */
1607 pset_unlock(pset);
1608
1609 if (size != 0)
1610 kfree(addr, size);
1611
1612 assert(size_needed > 0);
1613 size = size_needed;
1614
1615 addr = kalloc(size);
1616 if (addr == 0)
1617 return KERN_RESOURCE_SHORTAGE;
1618 }
1619
1620 /* OK, have memory and the processor_set is locked & active */
1621 s = splsched();
1622 threads = (thread_t *) addr;
1623 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
1624 !queue_end(&pset->threads, (queue_entry_t) thread);
1625 thread = (thread_t) queue_next(&thread->pset_threads)) {
1626 thread_lock(thread);
1627 if (thread->ref_count > 0) {
1628 thread_reference_locked(thread);
1629 threads[i++] = thread;
1630 }
1631 thread_unlock(thread);
1632 }
1633 splx(s);
1634 assert(i <= actual);
1635
1636 /* can unlock processor set now that we have the thread refs */
1637 pset_unlock(pset);
1638
1639 /* calculate maxusage and free thread references */
1640
1641 total = 0;
1642 maxusage = 0;
1643 maxstack = 0;
1644 while (i > 0) {
1645 int cpu;
1646 thread_t thread = threads[--i];
1647 vm_offset_t stack = 0;
1648
1649 /*
1650 * thread->kernel_stack is only accurate if the
1651 * thread isn't swapped and is not executing.
1652 *
1653 * Of course, we don't have the appropriate locks
1654 * for these shenanigans.
1655 */
1656
1657 stack = thread->kernel_stack;
1658
1659 for (cpu = 0; cpu < NCPUS; cpu++)
1660 if (cpu_to_processor(cpu)->cpu_data->active_thread == thread) {
1661 stack = active_stacks[cpu];
1662 break;
1663 }
1664
1665 if (stack != 0) {
1666 total++;
1667 }
1668
1669 thread_deallocate(thread);
1670 }
1671
1672 if (size != 0)
1673 kfree(addr, size);
1674
1675 *totalp = total;
1676 *residentp = *spacep = total * round_page_32(KERNEL_STACK_SIZE);
1677 *maxusagep = maxusage;
1678 *maxstackp = maxstack;
1679 return KERN_SUCCESS;
1680
1681 #endif /* MACH_DEBUG */
1682 }
1683
1684 int split_funnel_off = 0;
1685 funnel_t *
1686 funnel_alloc(
1687 int type)
1688 {
1689 mutex_t *m;
1690 funnel_t * fnl;
1691 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1692 bzero((void *)fnl, sizeof(funnel_t));
1693 if ((m = mutex_alloc(0)) == (mutex_t *)NULL) {
1694 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1695 return(THR_FUNNEL_NULL);
1696 }
1697 fnl->fnl_mutex = m;
1698 fnl->fnl_type = type;
1699 }
1700 return(fnl);
1701 }
1702
1703 void
1704 funnel_free(
1705 funnel_t * fnl)
1706 {
1707 mutex_free(fnl->fnl_mutex);
1708 if (fnl->fnl_oldmutex)
1709 mutex_free(fnl->fnl_oldmutex);
1710 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1711 }
1712
1713 void
1714 funnel_lock(
1715 funnel_t * fnl)
1716 {
1717 mutex_t * m;
1718
1719 m = fnl->fnl_mutex;
1720 restart:
1721 mutex_lock(m);
1722 fnl->fnl_mtxholder = current_thread();
1723 if (split_funnel_off && (m != fnl->fnl_mutex)) {
1724 mutex_unlock(m);
1725 m = fnl->fnl_mutex;
1726 goto restart;
1727 }
1728 }
1729
1730 void
1731 funnel_unlock(
1732 funnel_t * fnl)
1733 {
1734 mutex_unlock(fnl->fnl_mutex);
1735 fnl->fnl_mtxrelease = current_thread();
1736 }
1737
1738 funnel_t *
1739 thread_funnel_get(
1740 void)
1741 {
1742 thread_t th = current_thread();
1743
1744 if (th->funnel_state & TH_FN_OWNED) {
1745 return(th->funnel_lock);
1746 }
1747 return(THR_FUNNEL_NULL);
1748 }
1749
1750 boolean_t
1751 thread_funnel_set(
1752 funnel_t * fnl,
1753 boolean_t funneled)
1754 {
1755 thread_t cur_thread;
1756 boolean_t funnel_state_prev;
1757 boolean_t intr;
1758
1759 cur_thread = current_thread();
1760 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1761
1762 if (funnel_state_prev != funneled) {
1763 intr = ml_set_interrupts_enabled(FALSE);
1764
1765 if (funneled == TRUE) {
1766 if (cur_thread->funnel_lock)
1767 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1768 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1769 fnl, 1, 0, 0, 0);
1770 funnel_lock(fnl);
1771 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1772 fnl, 1, 0, 0, 0);
1773 cur_thread->funnel_state |= TH_FN_OWNED;
1774 cur_thread->funnel_lock = fnl;
1775 } else {
1776 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1777 panic("Funnel unlock when not holding funnel");
1778 cur_thread->funnel_state &= ~TH_FN_OWNED;
1779 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1780 fnl, 1, 0, 0, 0);
1781
1782 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1783 funnel_unlock(fnl);
1784 }
1785 (void)ml_set_interrupts_enabled(intr);
1786 } else {
1787 /* if we are trying to acquire funnel recursively
1788 * check for funnel to be held already
1789 */
1790 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1791 panic("thread_funnel_set: already holding a different funnel");
1792 }
1793 }
1794 return(funnel_state_prev);
1795 }
1796
1797 boolean_t
1798 thread_funnel_merge(
1799 funnel_t * fnl,
1800 funnel_t * otherfnl)
1801 {
1802 mutex_t * m;
1803 mutex_t * otherm;
1804 funnel_t * gfnl;
1805 extern int disable_funnel;
1806
1807 if ((gfnl = thread_funnel_get()) == THR_FUNNEL_NULL)
1808 panic("thread_funnel_merge called with no funnels held");
1809
1810 if (gfnl->fnl_type != 1)
1811 panic("thread_funnel_merge called from non kernel funnel");
1812
1813 if (gfnl != fnl)
1814 panic("thread_funnel_merge incorrect invocation");
1815
1816 if (disable_funnel || split_funnel_off)
1817 return (KERN_FAILURE);
1818
1819 m = fnl->fnl_mutex;
1820 otherm = otherfnl->fnl_mutex;
1821
1822 /* Acquire other funnel mutex */
1823 mutex_lock(otherm);
1824 split_funnel_off = 1;
1825 disable_funnel = 1;
1826 otherfnl->fnl_mutex = m;
1827 otherfnl->fnl_type = fnl->fnl_type;
1828 otherfnl->fnl_oldmutex = otherm; /* save this for future use */
1829
1830 mutex_unlock(otherm);
1831 return(KERN_SUCCESS);
1832 }
1833
1834 void
1835 thread_set_cont_arg(
1836 int arg)
1837 {
1838 thread_t self = current_thread();
1839
1840 self->saved.misc = arg;
1841 }
1842
1843 int
1844 thread_get_cont_arg(void)
1845 {
1846 thread_t self = current_thread();
1847
1848 return (self->saved.misc);
1849 }
1850
1851 /*
1852 * Export routines to other components for things that are done as macros
1853 * within the osfmk component.
1854 */
1855 #undef thread_should_halt
1856 boolean_t
1857 thread_should_halt(
1858 thread_shuttle_t th)
1859 {
1860 return(thread_should_halt_fast(th));
1861 }