]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: kern/thread.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
58 * Date: 1986
59 *
60 * Thread/thread_shuttle management primitives implementation.
61 */
62 /*
63 * Copyright (c) 1993 The University of Utah and
64 * the Computer Systems Laboratory (CSL). All rights reserved.
65 *
66 * Permission to use, copy, modify and distribute this software and its
67 * documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation.
71 *
72 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
73 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
74 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *
76 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
77 * improvements that they make and grant CSL redistribution rights.
78 *
79 */
80
81 #include <cpus.h>
82 #include <mach_host.h>
83 #include <simple_clock.h>
84 #include <mach_debug.h>
85 #include <mach_prof.h>
86
87 #include <mach/boolean.h>
88 #include <mach/policy.h>
89 #include <mach/thread_info.h>
90 #include <mach/thread_special_ports.h>
91 #include <mach/thread_status.h>
92 #include <mach/time_value.h>
93 #include <mach/vm_param.h>
94 #include <kern/ast.h>
95 #include <kern/cpu_data.h>
96 #include <kern/counters.h>
97 #include <kern/etap_macros.h>
98 #include <kern/ipc_mig.h>
99 #include <kern/ipc_tt.h>
100 #include <kern/mach_param.h>
101 #include <kern/machine.h>
102 #include <kern/misc_protos.h>
103 #include <kern/processor.h>
104 #include <kern/queue.h>
105 #include <kern/sched.h>
106 #include <kern/sched_prim.h>
107 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
108 #include <kern/task.h>
109 #include <kern/thread.h>
110 #include <kern/thread_act.h>
111 #include <kern/thread_swap.h>
112 #include <kern/host.h>
113 #include <kern/zalloc.h>
114 #include <vm/vm_kern.h>
115 #include <ipc/ipc_kmsg.h>
116 #include <ipc/ipc_port.h>
117 #include <machine/thread.h> /* for MACHINE_STACK */
118 #include <kern/profile.h>
119 #include <kern/assert.h>
120 #include <sys/kdebug.h>
121
122 /*
123 * Exported interfaces
124 */
125
126 #include <mach/thread_act_server.h>
127 #include <mach/mach_host_server.h>
128
129 static struct zone *thread_zone;
130
131 static queue_head_t reaper_queue;
132 decl_simple_lock_data(static,reaper_lock)
133
134 extern int tick;
135
136 /* private */
137 static struct thread thread_template, init_thread;
138
139 #if MACH_DEBUG
140
141 #ifdef MACHINE_STACK
142 extern void stack_statistics(
143 unsigned int *totalp,
144 vm_size_t *maxusagep);
145 #endif /* MACHINE_STACK */
146 #endif /* MACH_DEBUG */
147
148 #ifdef MACHINE_STACK
149 /*
150 * Machine-dependent code must define:
151 * stack_alloc_try
152 * stack_alloc
153 * stack_free
154 * stack_free_stack
155 * stack_collect
156 * and if MACH_DEBUG:
157 * stack_statistics
158 */
159 #else /* MACHINE_STACK */
160 /*
161 * We allocate stacks from generic kernel VM.
162 * Machine-dependent code must define:
163 * machine_kernel_stack_init
164 *
165 * The stack_free_list can only be accessed at splsched,
166 * because stack_alloc_try/thread_invoke operate at splsched.
167 */
168
169 decl_simple_lock_data(static,stack_lock_data)
170 #define stack_lock() simple_lock(&stack_lock_data)
171 #define stack_unlock() simple_unlock(&stack_lock_data)
172
173 static vm_map_t stack_map;
174 static vm_offset_t stack_free_list;
175
176 static vm_offset_t stack_free_cache[NCPUS];
177
178 unsigned int stack_free_max = 0;
179 unsigned int stack_free_count = 0; /* splsched only */
180 unsigned int stack_free_limit = 1; /* Arbitrary */
181
182 unsigned int stack_cache_hits = 0; /* debugging */
183
184 unsigned int stack_alloc_hits = 0; /* debugging */
185 unsigned int stack_alloc_misses = 0; /* debugging */
186
187 unsigned int stack_alloc_total = 0;
188 unsigned int stack_alloc_hiwater = 0;
189 unsigned int stack_alloc_bndry = 0;
190
191
192 /*
193 * The next field is at the base of the stack,
194 * so the low end is left unsullied.
195 */
196
197 #define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
198
199 /*
200 * stack_alloc:
201 *
202 * Allocate a kernel stack for a thread.
203 * May block.
204 */
205 vm_offset_t
206 stack_alloc(
207 thread_t thread,
208 void (*start_pos)(thread_t))
209 {
210 vm_offset_t stack = thread->kernel_stack;
211 spl_t s;
212
213 if (stack)
214 return (stack);
215
216 s = splsched();
217 stack_lock();
218 stack = stack_free_list;
219 if (stack != 0) {
220 stack_free_list = stack_next(stack);
221 stack_free_count--;
222 }
223 stack_unlock();
224 splx(s);
225
226 if (stack != 0) {
227 machine_stack_attach(thread, stack, start_pos);
228 return (stack);
229 }
230
231 if (kernel_memory_allocate(
232 stack_map, &stack,
233 KERNEL_STACK_SIZE, stack_alloc_bndry - 1,
234 KMA_KOBJECT) != KERN_SUCCESS)
235 panic("stack_alloc: no space left for stack maps");
236
237 stack_alloc_total++;
238 if (stack_alloc_total > stack_alloc_hiwater)
239 stack_alloc_hiwater = stack_alloc_total;
240
241 machine_stack_attach(thread, stack, start_pos);
242 return (stack);
243 }
244
245 /*
246 * stack_free:
247 *
248 * Free a kernel stack.
249 */
250
251 void
252 stack_free(
253 thread_t thread)
254 {
255 vm_offset_t stack = machine_stack_detach(thread);
256
257 assert(stack);
258 if (stack != thread->reserved_stack) {
259 spl_t s = splsched();
260 vm_offset_t *cache;
261
262 cache = &stack_free_cache[cpu_number()];
263 if (*cache == 0) {
264 *cache = stack;
265 splx(s);
266
267 return;
268 }
269
270 stack_lock();
271 stack_next(stack) = stack_free_list;
272 stack_free_list = stack;
273 if (++stack_free_count > stack_free_max)
274 stack_free_max = stack_free_count;
275 stack_unlock();
276 splx(s);
277 }
278 }
279
280 void
281 stack_free_stack(
282 vm_offset_t stack)
283 {
284 spl_t s = splsched();
285 vm_offset_t *cache;
286
287 cache = &stack_free_cache[cpu_number()];
288 if (*cache == 0) {
289 *cache = stack;
290 splx(s);
291
292 return;
293 }
294
295 stack_lock();
296 stack_next(stack) = stack_free_list;
297 stack_free_list = stack;
298 if (++stack_free_count > stack_free_max)
299 stack_free_max = stack_free_count;
300 stack_unlock();
301 splx(s);
302 }
303
304 /*
305 * stack_collect:
306 *
307 * Free excess kernel stacks.
308 * May block.
309 */
310
311 void
312 stack_collect(void)
313 {
314 spl_t s = splsched();
315
316 stack_lock();
317 while (stack_free_count > stack_free_limit) {
318 vm_offset_t stack = stack_free_list;
319
320 stack_free_list = stack_next(stack);
321 stack_free_count--;
322 stack_unlock();
323 splx(s);
324
325 if (vm_map_remove(
326 stack_map, stack, stack + KERNEL_STACK_SIZE,
327 VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
328 panic("stack_collect: vm_map_remove failed");
329
330 s = splsched();
331 stack_lock();
332 stack_alloc_total--;
333 }
334 stack_unlock();
335 splx(s);
336 }
337
338 /*
339 * stack_alloc_try:
340 *
341 * Non-blocking attempt to allocate a kernel stack.
342 * Called at splsched with the thread locked.
343 */
344
345 boolean_t stack_alloc_try(
346 thread_t thread,
347 void (*start)(thread_t))
348 {
349 register vm_offset_t stack, *cache;
350
351 cache = &stack_free_cache[cpu_number()];
352 if (stack = *cache) {
353 *cache = 0;
354 machine_stack_attach(thread, stack, start);
355 stack_cache_hits++;
356
357 return (TRUE);
358 }
359
360 stack_lock();
361 stack = stack_free_list;
362 if (stack != (vm_offset_t)0) {
363 stack_free_list = stack_next(stack);
364 stack_free_count--;
365 }
366 stack_unlock();
367
368 if (stack == 0)
369 stack = thread->reserved_stack;
370
371 if (stack != 0) {
372 machine_stack_attach(thread, stack, start);
373 stack_alloc_hits++;
374
375 return (TRUE);
376 }
377 else {
378 stack_alloc_misses++;
379
380 return (FALSE);
381 }
382 }
383
384 #if MACH_DEBUG
385 /*
386 * stack_statistics:
387 *
388 * Return statistics on cached kernel stacks.
389 * *maxusagep must be initialized by the caller.
390 */
391
392 void
393 stack_statistics(
394 unsigned int *totalp,
395 vm_size_t *maxusagep)
396 {
397 spl_t s;
398
399 s = splsched();
400 stack_lock();
401
402 *totalp = stack_free_count;
403 *maxusagep = 0;
404
405 stack_unlock();
406 splx(s);
407 }
408 #endif /* MACH_DEBUG */
409
410 #endif /* MACHINE_STACK */
411
412
413 stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
414 vm_size_t *alloc_size, int *collectable, int *exhaustable)
415 {
416 *count = stack_alloc_total - stack_free_count;
417 *cur_size = KERNEL_STACK_SIZE * stack_alloc_total;
418 *max_size = KERNEL_STACK_SIZE * stack_alloc_hiwater;
419 *elem_size = KERNEL_STACK_SIZE;
420 *alloc_size = KERNEL_STACK_SIZE;
421 *collectable = 1;
422 *exhaustable = 0;
423 }
424
425 void
426 stack_privilege(
427 register thread_t thread)
428 {
429 /* OBSOLETE */
430 }
431
432 void
433 thread_bootstrap(void)
434 {
435 /*
436 * Fill in a template thread for fast initialization.
437 */
438
439 thread_template.runq = RUN_QUEUE_NULL;
440
441 thread_template.ref_count = 1;
442
443 thread_template.reason = AST_NONE;
444 thread_template.at_safe_point = FALSE;
445 thread_template.wait_event = NO_EVENT64;
446 thread_template.wait_queue = WAIT_QUEUE_NULL;
447 thread_template.wait_result = THREAD_WAITING;
448 thread_template.interrupt_level = THREAD_ABORTSAFE;
449 thread_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT;
450 thread_template.wake_active = FALSE;
451 thread_template.active_callout = FALSE;
452 thread_template.continuation = (void (*)(void))0;
453 thread_template.top_act = THR_ACT_NULL;
454
455 thread_template.importance = 0;
456 thread_template.sched_mode = 0;
457 thread_template.safe_mode = 0;
458
459 thread_template.priority = 0;
460 thread_template.sched_pri = 0;
461 thread_template.max_priority = 0;
462 thread_template.task_priority = 0;
463 thread_template.promotions = 0;
464 thread_template.pending_promoter_index = 0;
465 thread_template.pending_promoter[0] =
466 thread_template.pending_promoter[1] = NULL;
467
468 thread_template.realtime.deadline = UINT64_MAX;
469
470 thread_template.current_quantum = 0;
471
472 thread_template.computation_metered = 0;
473 thread_template.computation_epoch = 0;
474
475 thread_template.cpu_usage = 0;
476 thread_template.cpu_delta = 0;
477 thread_template.sched_usage = 0;
478 thread_template.sched_delta = 0;
479 thread_template.sched_stamp = 0;
480 thread_template.sleep_stamp = 0;
481 thread_template.safe_release = 0;
482
483 thread_template.bound_processor = PROCESSOR_NULL;
484 thread_template.last_processor = PROCESSOR_NULL;
485 thread_template.last_switch = 0;
486
487 thread_template.vm_privilege = FALSE;
488
489 timer_init(&(thread_template.user_timer));
490 timer_init(&(thread_template.system_timer));
491 thread_template.user_timer_save.low = 0;
492 thread_template.user_timer_save.high = 0;
493 thread_template.system_timer_save.low = 0;
494 thread_template.system_timer_save.high = 0;
495
496 thread_template.processor_set = PROCESSOR_SET_NULL;
497
498 thread_template.act_ref_count = 2;
499
500 thread_template.special_handler.handler = special_handler;
501 thread_template.special_handler.next = 0;
502
503 #if MACH_HOST
504 thread_template.may_assign = TRUE;
505 thread_template.assign_active = FALSE;
506 #endif /* MACH_HOST */
507 thread_template.funnel_lock = THR_FUNNEL_NULL;
508 thread_template.funnel_state = 0;
509 #if MACH_LDEBUG
510 thread_template.mutex_count = 0;
511 #endif /* MACH_LDEBUG */
512
513 init_thread = thread_template;
514
515 init_thread.top_act = &init_thread;
516 init_thread.thread = &init_thread;
517 machine_thread_set_current(&init_thread);
518 }
519
520 void
521 thread_init(void)
522 {
523 kern_return_t ret;
524 unsigned int stack;
525
526 thread_zone = zinit(
527 sizeof(struct thread),
528 THREAD_MAX * sizeof(struct thread),
529 THREAD_CHUNK * sizeof(struct thread),
530 "threads");
531
532 /*
533 * Initialize other data structures used in
534 * this module.
535 */
536
537 queue_init(&reaper_queue);
538 simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER);
539
540 #ifndef MACHINE_STACK
541 simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */
542
543 if (KERNEL_STACK_SIZE < round_page_32(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */
544 panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
545 KERNEL_STACK_SIZE, PAGE_SIZE);
546 }
547
548 for(stack_alloc_bndry = PAGE_SIZE; stack_alloc_bndry <= KERNEL_STACK_SIZE; stack_alloc_bndry <<= 1); /* Find next power of 2 above stack size */
549
550 ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
551
552 &stack,
553 (stack_alloc_bndry * (2*THREAD_MAX + 64)), /* Allocate enough for all of it */
554 FALSE, /* Say not pageable so that it is wired */
555 TRUE, /* Allocate from anywhere */
556 &stack_map); /* Allocate a submap */
557
558 if(ret != KERN_SUCCESS) { /* Did we get one? */
559 panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret); /* Die */
560 }
561 stack = vm_map_min(stack_map); /* Make sure we skip the first hunk */
562 ret = vm_map_enter(stack_map, &stack, PAGE_SIZE, 0, /* Make sure there is nothing at the start */
563 0, /* Force it at start */
564 VM_OBJECT_NULL, 0, /* No object yet */
565 FALSE, /* No copy */
566 VM_PROT_NONE, /* Allow no access */
567 VM_PROT_NONE, /* Allow no access */
568 VM_INHERIT_DEFAULT); /* Just be normal */
569
570 if(ret != KERN_SUCCESS) { /* Did it work? */
571 panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret);
572 }
573
574 #endif /* MACHINE_STACK */
575
576 /*
577 * Initialize any machine-dependent
578 * per-thread structures necessary.
579 */
580 machine_thread_init();
581 }
582
583 /*
584 * Called at splsched.
585 */
586 void
587 thread_reaper_enqueue(
588 thread_t thread)
589 {
590 simple_lock(&reaper_lock);
591 enqueue_tail(&reaper_queue, (queue_entry_t)thread);
592 simple_unlock(&reaper_lock);
593
594 thread_wakeup((event_t)&reaper_queue);
595 }
596
597 void
598 thread_termination_continue(void)
599 {
600 panic("thread_termination_continue");
601 /*NOTREACHED*/
602 }
603
604 /*
605 * Routine: thread_terminate_self
606 *
607 * This routine is called by a thread which has unwound from
608 * its current RPC and kernel contexts and found that it's
609 * root activation has been marked for extinction. This lets
610 * it clean up the last few things that can only be cleaned
611 * up in this context and then impale itself on the reaper
612 * queue.
613 *
614 * When the reaper gets the thread, it will deallocate the
615 * thread_act's reference on itself, which in turn will release
616 * its own reference on this thread. By doing things in that
617 * order, a thread_act will always have a valid thread - but the
618 * thread may persist beyond having a thread_act (but must never
619 * run like that).
620 */
621 void
622 thread_terminate_self(void)
623 {
624 thread_act_t thr_act = current_act();
625 thread_t thread;
626 task_t task = thr_act->task;
627 long active_acts;
628 spl_t s;
629
630 /*
631 * We should be at the base of the inheritance chain.
632 */
633 thread = act_lock_thread(thr_act);
634 assert(thr_act->thread == thread);
635
636 /* This will allow no more control ops on this thr_act. */
637 ipc_thr_act_disable(thr_act);
638
639 /* Clean-up any ulocks that are still owned by the thread
640 * activation (acquired but not released or handed-off).
641 */
642 act_ulock_release_all(thr_act);
643
644 act_unlock_thread(thr_act);
645
646 _mk_sp_thread_depress_abort(thread, TRUE);
647
648 /*
649 * Check to see if this is the last active activation. By
650 * this we mean the last activation to call thread_terminate_self.
651 * If so, and the task is associated with a BSD process, we
652 * need to call BSD and let them clean up.
653 */
654 active_acts = hw_atomic_sub(&task->active_thread_count, 1);
655
656 if (active_acts == 0 && task->bsd_info)
657 proc_exit(task->bsd_info);
658
659 /* JMM - for now, no migration */
660 assert(!thr_act->lower);
661
662 thread_timer_terminate();
663
664 ipc_thread_terminate(thread);
665
666 s = splsched();
667 thread_lock(thread);
668 thread->state |= TH_TERMINATE;
669 assert((thread->state & TH_UNINT) == 0);
670 thread_mark_wait_locked(thread, THREAD_UNINT);
671 assert(thread->promotions == 0);
672 thread_unlock(thread);
673 /* splx(s); */
674
675 ETAP_SET_REASON(thread, BLOCKED_ON_TERMINATION);
676 thread_block(thread_termination_continue);
677 /*NOTREACHED*/
678 }
679
680 /*
681 * Create a new thread.
682 * Doesn't start the thread running.
683 */
684 static kern_return_t
685 thread_create_internal(
686 task_t parent_task,
687 integer_t priority,
688 void (*start)(void),
689 thread_t *out_thread)
690 {
691 thread_t new_thread;
692 processor_set_t pset;
693 static thread_t first_thread;
694
695 /*
696 * Allocate a thread and initialize static fields
697 */
698 if (first_thread == NULL)
699 new_thread = first_thread = current_act();
700 else
701 new_thread = (thread_t)zalloc(thread_zone);
702 if (new_thread == NULL)
703 return (KERN_RESOURCE_SHORTAGE);
704
705 if (new_thread != first_thread)
706 *new_thread = thread_template;
707
708 #ifdef MACH_BSD
709 {
710 extern void *uthread_alloc(task_t, thread_act_t);
711
712 new_thread->uthread = uthread_alloc(parent_task, new_thread);
713 if (new_thread->uthread == NULL) {
714 zfree(thread_zone, (vm_offset_t)new_thread);
715 return (KERN_RESOURCE_SHORTAGE);
716 }
717 }
718 #endif /* MACH_BSD */
719
720 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
721 #ifdef MACH_BSD
722 {
723 extern void uthread_free(task_t, void *, void *);
724 void *ut = new_thread->uthread;
725
726 new_thread->uthread = NULL;
727 uthread_free(parent_task, ut, parent_task->bsd_info);
728 }
729 #endif /* MACH_BSD */
730 zfree(thread_zone, (vm_offset_t)new_thread);
731 return (KERN_FAILURE);
732 }
733
734 new_thread->task = parent_task;
735
736 thread_lock_init(new_thread);
737 wake_lock_init(new_thread);
738
739 mutex_init(&new_thread->lock, ETAP_THREAD_ACT);
740
741 ipc_thr_act_init(parent_task, new_thread);
742
743 ipc_thread_init(new_thread);
744 queue_init(&new_thread->held_ulocks);
745 act_prof_init(new_thread, parent_task);
746
747 new_thread->continuation = start;
748 new_thread->sleep_stamp = sched_tick;
749
750 pset = parent_task->processor_set;
751 assert(pset == &default_pset);
752 pset_lock(pset);
753
754 task_lock(parent_task);
755 assert(parent_task->processor_set == pset);
756
757 if ( !parent_task->active ||
758 (parent_task->thread_count >= THREAD_MAX &&
759 parent_task != kernel_task)) {
760 task_unlock(parent_task);
761 pset_unlock(pset);
762
763 #ifdef MACH_BSD
764 {
765 extern void uthread_free(task_t, void *, void *);
766 void *ut = new_thread->uthread;
767
768 new_thread->uthread = NULL;
769 uthread_free(parent_task, ut, parent_task->bsd_info);
770 }
771 #endif /* MACH_BSD */
772 act_prof_deallocate(new_thread);
773 ipc_thr_act_terminate(new_thread);
774 machine_thread_destroy(new_thread);
775 zfree(thread_zone, (vm_offset_t) new_thread);
776 return (KERN_FAILURE);
777 }
778
779 act_attach(new_thread, new_thread);
780
781 task_reference_locked(parent_task);
782
783 /* Cache the task's map */
784 new_thread->map = parent_task->map;
785
786 /* Chain the thread onto the task's list */
787 queue_enter(&parent_task->threads, new_thread, thread_act_t, task_threads);
788 parent_task->thread_count++;
789 parent_task->res_thread_count++;
790
791 /* So terminating threads don't need to take the task lock to decrement */
792 hw_atomic_add(&parent_task->active_thread_count, 1);
793
794 /* Associate the thread with the processor set */
795 pset_add_thread(pset, new_thread);
796
797 thread_timer_setup(new_thread);
798
799 /* Set the thread's scheduling parameters */
800 if (parent_task != kernel_task)
801 new_thread->sched_mode |= TH_MODE_TIMESHARE;
802 new_thread->max_priority = parent_task->max_priority;
803 new_thread->task_priority = parent_task->priority;
804 new_thread->priority = (priority < 0)? parent_task->priority: priority;
805 if (new_thread->priority > new_thread->max_priority)
806 new_thread->priority = new_thread->max_priority;
807 new_thread->importance =
808 new_thread->priority - new_thread->task_priority;
809 new_thread->sched_stamp = sched_tick;
810 compute_priority(new_thread, FALSE);
811
812 #if ETAP_EVENT_MONITOR
813 new_thread->etap_reason = 0;
814 new_thread->etap_trace = FALSE;
815 #endif /* ETAP_EVENT_MONITOR */
816
817 new_thread->active = TRUE;
818
819 *out_thread = new_thread;
820
821 {
822 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
823
824 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
825
826 KERNEL_DEBUG_CONSTANT(
827 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
828 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
829
830 kdbg_trace_string(parent_task->bsd_info,
831 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
832
833 KERNEL_DEBUG_CONSTANT(
834 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
835 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
836 }
837
838 return (KERN_SUCCESS);
839 }
840
841 extern void thread_bootstrap_return(void);
842
843 kern_return_t
844 thread_create(
845 task_t task,
846 thread_act_t *new_thread)
847 {
848 kern_return_t result;
849 thread_t thread;
850
851 if (task == TASK_NULL || task == kernel_task)
852 return (KERN_INVALID_ARGUMENT);
853
854 result = thread_create_internal(task, -1, thread_bootstrap_return, &thread);
855 if (result != KERN_SUCCESS)
856 return (result);
857
858 thread->user_stop_count = 1;
859 thread_hold(thread);
860 if (task->suspend_count > 0)
861 thread_hold(thread);
862
863 pset_unlock(task->processor_set);
864 task_unlock(task);
865
866 *new_thread = thread;
867
868 return (KERN_SUCCESS);
869 }
870
871 kern_return_t
872 thread_create_running(
873 register task_t task,
874 int flavor,
875 thread_state_t new_state,
876 mach_msg_type_number_t new_state_count,
877 thread_act_t *new_thread)
878 {
879 register kern_return_t result;
880 thread_t thread;
881
882 if (task == TASK_NULL || task == kernel_task)
883 return (KERN_INVALID_ARGUMENT);
884
885 result = thread_create_internal(task, -1, thread_bootstrap_return, &thread);
886 if (result != KERN_SUCCESS)
887 return (result);
888
889 result = machine_thread_set_state(thread, flavor, new_state, new_state_count);
890 if (result != KERN_SUCCESS) {
891 pset_unlock(task->processor_set);
892 task_unlock(task);
893
894 thread_terminate(thread);
895 act_deallocate(thread);
896 return (result);
897 }
898
899 act_lock(thread);
900 clear_wait(thread, THREAD_AWAKENED);
901 thread->started = TRUE;
902 act_unlock(thread);
903 pset_unlock(task->processor_set);
904 task_unlock(task);
905
906 *new_thread = thread;
907
908 return (result);
909 }
910
911 /*
912 * kernel_thread:
913 *
914 * Create a thread in the kernel task
915 * to execute in kernel context.
916 */
917 thread_t
918 kernel_thread_create(
919 void (*start)(void),
920 integer_t priority)
921 {
922 kern_return_t result;
923 task_t task = kernel_task;
924 thread_t thread;
925
926 result = thread_create_internal(task, priority, start, &thread);
927 if (result != KERN_SUCCESS)
928 return (THREAD_NULL);
929
930 pset_unlock(task->processor_set);
931 task_unlock(task);
932
933 thread_doswapin(thread);
934 assert(thread->kernel_stack != 0);
935 thread->reserved_stack = thread->kernel_stack;
936
937 act_deallocate(thread);
938
939 return (thread);
940 }
941
942 thread_t
943 kernel_thread_with_priority(
944 void (*start)(void),
945 integer_t priority)
946 {
947 thread_t thread;
948
949 thread = kernel_thread_create(start, priority);
950 if (thread == THREAD_NULL)
951 return (THREAD_NULL);
952
953 act_lock(thread);
954 clear_wait(thread, THREAD_AWAKENED);
955 thread->started = TRUE;
956 act_unlock(thread);
957
958 #ifdef i386
959 thread_bind(thread, master_processor);
960 #endif /* i386 */
961 return (thread);
962 }
963
964 thread_t
965 kernel_thread(
966 task_t task,
967 void (*start)(void))
968 {
969 if (task != kernel_task)
970 panic("kernel_thread");
971
972 return kernel_thread_with_priority(start, -1);
973 }
974
975 unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */
976
977 #if MACH_HOST
978 /* Preclude thread processor set assignement */
979 #define thread_freeze(thread) assert((thread)->processor_set == &default_pset)
980
981 /* Allow thread processor set assignement */
982 #define thread_unfreeze(thread) assert((thread)->processor_set == &default_pset)
983
984 #endif /* MACH_HOST */
985
986 void
987 thread_deallocate(
988 thread_t thread)
989 {
990 task_t task;
991 processor_set_t pset;
992 int refs;
993 spl_t s;
994
995 if (thread == THREAD_NULL)
996 return;
997
998 /*
999 * First, check for new count > 0 (the common case).
1000 * Only the thread needs to be locked.
1001 */
1002 s = splsched();
1003 thread_lock(thread);
1004 refs = --thread->ref_count;
1005 thread_unlock(thread);
1006 splx(s);
1007
1008 if (refs > 0)
1009 return;
1010
1011 if (thread == current_thread())
1012 panic("thread_deallocate");
1013
1014 /*
1015 * There is a dangling pointer to the thread from the
1016 * processor_set. To clean it up, we freeze the thread
1017 * in the pset (because pset destruction can cause even
1018 * reference-less threads to be reassigned to the default
1019 * pset) and then remove it.
1020 */
1021
1022 #if MACH_HOST
1023 thread_freeze(thread);
1024 #endif
1025
1026 pset = thread->processor_set;
1027 pset_lock(pset);
1028 pset_remove_thread(pset, thread);
1029 pset_unlock(pset);
1030
1031 #if MACH_HOST
1032 thread_unfreeze(thread);
1033 #endif
1034
1035 pset_deallocate(pset);
1036
1037 if (thread->reserved_stack != 0) {
1038 if (thread->reserved_stack != thread->kernel_stack)
1039 stack_free_stack(thread->reserved_stack);
1040 thread->reserved_stack = 0;
1041 }
1042
1043 if (thread->kernel_stack != 0)
1044 stack_free(thread);
1045
1046 machine_thread_destroy(thread);
1047
1048 zfree(thread_zone, (vm_offset_t) thread);
1049 }
1050
1051 void
1052 thread_reference(
1053 thread_t thread)
1054 {
1055 spl_t s;
1056
1057 if (thread == THREAD_NULL)
1058 return;
1059
1060 s = splsched();
1061 thread_lock(thread);
1062 thread_reference_locked(thread);
1063 thread_unlock(thread);
1064 splx(s);
1065 }
1066
1067 /*
1068 * Called with "appropriate" thread-related locks held on
1069 * thread and its top_act for synchrony with RPC (see
1070 * act_lock_thread()).
1071 */
1072 kern_return_t
1073 thread_info_shuttle(
1074 register thread_act_t thr_act,
1075 thread_flavor_t flavor,
1076 thread_info_t thread_info_out, /* ptr to OUT array */
1077 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1078 {
1079 register thread_t thread = thr_act->thread;
1080 int state, flags;
1081 spl_t s;
1082
1083 if (thread == THREAD_NULL)
1084 return (KERN_INVALID_ARGUMENT);
1085
1086 if (flavor == THREAD_BASIC_INFO) {
1087 register thread_basic_info_t basic_info;
1088
1089 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
1090 return (KERN_INVALID_ARGUMENT);
1091
1092 basic_info = (thread_basic_info_t) thread_info_out;
1093
1094 s = splsched();
1095 thread_lock(thread);
1096
1097 /* fill in info */
1098
1099 thread_read_times(thread, &basic_info->user_time,
1100 &basic_info->system_time);
1101
1102 /*
1103 * Update lazy-evaluated scheduler info because someone wants it.
1104 */
1105 if (thread->sched_stamp != sched_tick)
1106 update_priority(thread);
1107
1108 basic_info->sleep_time = 0;
1109
1110 /*
1111 * To calculate cpu_usage, first correct for timer rate,
1112 * then for 5/8 ageing. The correction factor [3/5] is
1113 * (1/(5/8) - 1).
1114 */
1115 basic_info->cpu_usage = (thread->cpu_usage << SCHED_TICK_SHIFT) /
1116 (TIMER_RATE / TH_USAGE_SCALE);
1117 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1118 #if SIMPLE_CLOCK
1119 /*
1120 * Clock drift compensation.
1121 */
1122 basic_info->cpu_usage = (basic_info->cpu_usage * 1000000) / sched_usec;
1123 #endif /* SIMPLE_CLOCK */
1124
1125 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
1126 POLICY_TIMESHARE: POLICY_RR);
1127
1128 flags = 0;
1129 if (thread->state & TH_IDLE)
1130 flags |= TH_FLAGS_IDLE;
1131
1132 if (thread->state & TH_STACK_HANDOFF)
1133 flags |= TH_FLAGS_SWAPPED;
1134
1135 state = 0;
1136 if (thread->state & TH_TERMINATE)
1137 state = TH_STATE_HALTED;
1138 else
1139 if (thread->state & TH_RUN)
1140 state = TH_STATE_RUNNING;
1141 else
1142 if (thread->state & TH_UNINT)
1143 state = TH_STATE_UNINTERRUPTIBLE;
1144 else
1145 if (thread->state & TH_SUSP)
1146 state = TH_STATE_STOPPED;
1147 else
1148 if (thread->state & TH_WAIT)
1149 state = TH_STATE_WAITING;
1150
1151 basic_info->run_state = state;
1152 basic_info->flags = flags;
1153
1154 basic_info->suspend_count = thr_act->user_stop_count;
1155
1156 thread_unlock(thread);
1157 splx(s);
1158
1159 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1160
1161 return (KERN_SUCCESS);
1162 }
1163 else
1164 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1165 policy_timeshare_info_t ts_info;
1166
1167 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1168 return (KERN_INVALID_ARGUMENT);
1169
1170 ts_info = (policy_timeshare_info_t)thread_info_out;
1171
1172 s = splsched();
1173 thread_lock(thread);
1174
1175 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1176 thread_unlock(thread);
1177 splx(s);
1178
1179 return (KERN_INVALID_POLICY);
1180 }
1181
1182 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1183 if (ts_info->depressed) {
1184 ts_info->base_priority = DEPRESSPRI;
1185 ts_info->depress_priority = thread->priority;
1186 }
1187 else {
1188 ts_info->base_priority = thread->priority;
1189 ts_info->depress_priority = -1;
1190 }
1191
1192 ts_info->cur_priority = thread->sched_pri;
1193 ts_info->max_priority = thread->max_priority;
1194
1195 thread_unlock(thread);
1196 splx(s);
1197
1198 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1199
1200 return (KERN_SUCCESS);
1201 }
1202 else
1203 if (flavor == THREAD_SCHED_FIFO_INFO) {
1204 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1205 return (KERN_INVALID_ARGUMENT);
1206
1207 return (KERN_INVALID_POLICY);
1208 }
1209 else
1210 if (flavor == THREAD_SCHED_RR_INFO) {
1211 policy_rr_info_t rr_info;
1212
1213 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1214 return (KERN_INVALID_ARGUMENT);
1215
1216 rr_info = (policy_rr_info_t) thread_info_out;
1217
1218 s = splsched();
1219 thread_lock(thread);
1220
1221 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1222 thread_unlock(thread);
1223 splx(s);
1224
1225 return (KERN_INVALID_POLICY);
1226 }
1227
1228 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1229 if (rr_info->depressed) {
1230 rr_info->base_priority = DEPRESSPRI;
1231 rr_info->depress_priority = thread->priority;
1232 }
1233 else {
1234 rr_info->base_priority = thread->priority;
1235 rr_info->depress_priority = -1;
1236 }
1237
1238 rr_info->max_priority = thread->max_priority;
1239 rr_info->quantum = std_quantum_us / 1000;
1240
1241 thread_unlock(thread);
1242 splx(s);
1243
1244 *thread_info_count = POLICY_RR_INFO_COUNT;
1245
1246 return (KERN_SUCCESS);
1247 }
1248
1249 return (KERN_INVALID_ARGUMENT);
1250 }
1251
1252 void
1253 thread_doreap(
1254 register thread_t thread)
1255 {
1256 thread_act_t thr_act;
1257
1258
1259 thr_act = thread_lock_act(thread);
1260 assert(thr_act && thr_act->thread == thread);
1261
1262 act_reference_locked(thr_act);
1263
1264 /*
1265 * Replace `act_unlock_thread()' with individual
1266 * calls. (`act_detach()' can change fields used
1267 * to determine which locks are held, confusing
1268 * `act_unlock_thread()'.)
1269 */
1270 act_unlock(thr_act);
1271
1272 /* Remove the reference held by a rooted thread */
1273 act_deallocate(thr_act);
1274
1275 /* Remove the reference held by the thread: */
1276 act_deallocate(thr_act);
1277 }
1278
1279 /*
1280 * reaper_thread:
1281 *
1282 * This kernel thread runs forever looking for terminating
1283 * threads, releasing their "self" references.
1284 */
1285 static void
1286 reaper_thread_continue(void)
1287 {
1288 register thread_t thread;
1289
1290 (void)splsched();
1291 simple_lock(&reaper_lock);
1292
1293 while ((thread = (thread_t) dequeue_head(&reaper_queue)) != THREAD_NULL) {
1294 simple_unlock(&reaper_lock);
1295 (void)spllo();
1296
1297 thread_doreap(thread);
1298
1299 (void)splsched();
1300 simple_lock(&reaper_lock);
1301 }
1302
1303 assert_wait((event_t)&reaper_queue, THREAD_UNINT);
1304 simple_unlock(&reaper_lock);
1305 (void)spllo();
1306
1307 thread_block(reaper_thread_continue);
1308 /*NOTREACHED*/
1309 }
1310
1311 static void
1312 reaper_thread(void)
1313 {
1314 reaper_thread_continue();
1315 /*NOTREACHED*/
1316 }
1317
1318 void
1319 thread_reaper_init(void)
1320 {
1321 kernel_thread_with_priority(reaper_thread, MINPRI_KERNEL);
1322 }
1323
1324 kern_return_t
1325 thread_assign(
1326 thread_act_t thr_act,
1327 processor_set_t new_pset)
1328 {
1329 return(KERN_FAILURE);
1330 }
1331
1332 /*
1333 * thread_assign_default:
1334 *
1335 * Special version of thread_assign for assigning threads to default
1336 * processor set.
1337 */
1338 kern_return_t
1339 thread_assign_default(
1340 thread_act_t thr_act)
1341 {
1342 return (thread_assign(thr_act, &default_pset));
1343 }
1344
1345 /*
1346 * thread_get_assignment
1347 *
1348 * Return current assignment for this thread.
1349 */
1350 kern_return_t
1351 thread_get_assignment(
1352 thread_act_t thr_act,
1353 processor_set_t *pset)
1354 {
1355 thread_t thread;
1356
1357 if (thr_act == THR_ACT_NULL)
1358 return(KERN_INVALID_ARGUMENT);
1359 thread = act_lock_thread(thr_act);
1360 if (thread == THREAD_NULL) {
1361 act_unlock_thread(thr_act);
1362 return(KERN_INVALID_ARGUMENT);
1363 }
1364 *pset = thread->processor_set;
1365 act_unlock_thread(thr_act);
1366 pset_reference(*pset);
1367 return(KERN_SUCCESS);
1368 }
1369
1370 /*
1371 * thread_wire_internal:
1372 *
1373 * Specify that the target thread must always be able
1374 * to run and to allocate memory.
1375 */
1376 kern_return_t
1377 thread_wire_internal(
1378 host_priv_t host_priv,
1379 thread_act_t thr_act,
1380 boolean_t wired,
1381 boolean_t *prev_state)
1382 {
1383 spl_t s;
1384 thread_t thread;
1385 extern void vm_page_free_reserve(int pages);
1386
1387 if (thr_act == THR_ACT_NULL || host_priv == HOST_PRIV_NULL)
1388 return (KERN_INVALID_ARGUMENT);
1389
1390 assert(host_priv == &realhost);
1391
1392 thread = act_lock_thread(thr_act);
1393 if (thread ==THREAD_NULL) {
1394 act_unlock_thread(thr_act);
1395 return(KERN_INVALID_ARGUMENT);
1396 }
1397
1398 /*
1399 * This implementation only works for the current thread.
1400 */
1401 if (thr_act != current_act())
1402 return KERN_INVALID_ARGUMENT;
1403
1404 s = splsched();
1405 thread_lock(thread);
1406
1407 if (prev_state) {
1408 *prev_state = thread->vm_privilege;
1409 }
1410
1411 if (wired) {
1412 if (thread->vm_privilege == FALSE)
1413 vm_page_free_reserve(1); /* XXX */
1414 thread->vm_privilege = TRUE;
1415 } else {
1416 if (thread->vm_privilege == TRUE)
1417 vm_page_free_reserve(-1); /* XXX */
1418 thread->vm_privilege = FALSE;
1419 }
1420
1421 thread_unlock(thread);
1422 splx(s);
1423 act_unlock_thread(thr_act);
1424
1425 return KERN_SUCCESS;
1426 }
1427
1428
1429 /*
1430 * thread_wire:
1431 *
1432 * User-api wrapper for thread_wire_internal()
1433 */
1434 kern_return_t
1435 thread_wire(
1436 host_priv_t host_priv,
1437 thread_act_t thr_act,
1438 boolean_t wired)
1439
1440 {
1441 return thread_wire_internal(host_priv, thr_act, wired, NULL);
1442 }
1443
1444 kern_return_t
1445 host_stack_usage(
1446 host_t host,
1447 vm_size_t *reservedp,
1448 unsigned int *totalp,
1449 vm_size_t *spacep,
1450 vm_size_t *residentp,
1451 vm_size_t *maxusagep,
1452 vm_offset_t *maxstackp)
1453 {
1454 #if !MACH_DEBUG
1455 return KERN_NOT_SUPPORTED;
1456 #else
1457 unsigned int total;
1458 vm_size_t maxusage;
1459
1460 if (host == HOST_NULL)
1461 return KERN_INVALID_HOST;
1462
1463 maxusage = 0;
1464
1465 stack_statistics(&total, &maxusage);
1466
1467 *reservedp = 0;
1468 *totalp = total;
1469 *spacep = *residentp = total * round_page_32(KERNEL_STACK_SIZE);
1470 *maxusagep = maxusage;
1471 *maxstackp = 0;
1472 return KERN_SUCCESS;
1473
1474 #endif /* MACH_DEBUG */
1475 }
1476
1477 /*
1478 * Return info on stack usage for threads in a specific processor set
1479 */
1480 kern_return_t
1481 processor_set_stack_usage(
1482 processor_set_t pset,
1483 unsigned int *totalp,
1484 vm_size_t *spacep,
1485 vm_size_t *residentp,
1486 vm_size_t *maxusagep,
1487 vm_offset_t *maxstackp)
1488 {
1489 #if !MACH_DEBUG
1490 return KERN_NOT_SUPPORTED;
1491 #else
1492 unsigned int total;
1493 vm_size_t maxusage;
1494 vm_offset_t maxstack;
1495
1496 register thread_t *threads;
1497 register thread_t thread;
1498
1499 unsigned int actual; /* this many things */
1500 unsigned int i;
1501
1502 vm_size_t size, size_needed;
1503 vm_offset_t addr;
1504
1505 spl_t s;
1506
1507 if (pset == PROCESSOR_SET_NULL)
1508 return KERN_INVALID_ARGUMENT;
1509
1510 size = 0; addr = 0;
1511
1512 for (;;) {
1513 pset_lock(pset);
1514 if (!pset->active) {
1515 pset_unlock(pset);
1516 return KERN_INVALID_ARGUMENT;
1517 }
1518
1519 actual = pset->thread_count;
1520
1521 /* do we have the memory we need? */
1522
1523 size_needed = actual * sizeof(thread_t);
1524 if (size_needed <= size)
1525 break;
1526
1527 /* unlock the pset and allocate more memory */
1528 pset_unlock(pset);
1529
1530 if (size != 0)
1531 kfree(addr, size);
1532
1533 assert(size_needed > 0);
1534 size = size_needed;
1535
1536 addr = kalloc(size);
1537 if (addr == 0)
1538 return KERN_RESOURCE_SHORTAGE;
1539 }
1540
1541 /* OK, have memory and the processor_set is locked & active */
1542 s = splsched();
1543 threads = (thread_t *) addr;
1544 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
1545 !queue_end(&pset->threads, (queue_entry_t) thread);
1546 thread = (thread_t) queue_next(&thread->pset_threads)) {
1547 thread_lock(thread);
1548 if (thread->ref_count > 0) {
1549 thread_reference_locked(thread);
1550 threads[i++] = thread;
1551 }
1552 thread_unlock(thread);
1553 }
1554 splx(s);
1555 assert(i <= actual);
1556
1557 /* can unlock processor set now that we have the thread refs */
1558 pset_unlock(pset);
1559
1560 /* calculate maxusage and free thread references */
1561
1562 total = 0;
1563 maxusage = 0;
1564 maxstack = 0;
1565 while (i > 0) {
1566 thread_t thread = threads[--i];
1567
1568 if (thread->kernel_stack != 0)
1569 total++;
1570
1571 thread_deallocate(thread);
1572 }
1573
1574 if (size != 0)
1575 kfree(addr, size);
1576
1577 *totalp = total;
1578 *residentp = *spacep = total * round_page_32(KERNEL_STACK_SIZE);
1579 *maxusagep = maxusage;
1580 *maxstackp = maxstack;
1581 return KERN_SUCCESS;
1582
1583 #endif /* MACH_DEBUG */
1584 }
1585
1586 int split_funnel_off = 0;
1587 funnel_t *
1588 funnel_alloc(
1589 int type)
1590 {
1591 mutex_t *m;
1592 funnel_t * fnl;
1593 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1594 bzero((void *)fnl, sizeof(funnel_t));
1595 if ((m = mutex_alloc(0)) == (mutex_t *)NULL) {
1596 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1597 return(THR_FUNNEL_NULL);
1598 }
1599 fnl->fnl_mutex = m;
1600 fnl->fnl_type = type;
1601 }
1602 return(fnl);
1603 }
1604
1605 void
1606 funnel_free(
1607 funnel_t * fnl)
1608 {
1609 mutex_free(fnl->fnl_mutex);
1610 if (fnl->fnl_oldmutex)
1611 mutex_free(fnl->fnl_oldmutex);
1612 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1613 }
1614
1615 void
1616 funnel_lock(
1617 funnel_t * fnl)
1618 {
1619 mutex_t * m;
1620
1621 m = fnl->fnl_mutex;
1622 restart:
1623 mutex_lock(m);
1624 fnl->fnl_mtxholder = current_thread();
1625 if (split_funnel_off && (m != fnl->fnl_mutex)) {
1626 mutex_unlock(m);
1627 m = fnl->fnl_mutex;
1628 goto restart;
1629 }
1630 }
1631
1632 void
1633 funnel_unlock(
1634 funnel_t * fnl)
1635 {
1636 mutex_unlock(fnl->fnl_mutex);
1637 fnl->fnl_mtxrelease = current_thread();
1638 }
1639
1640 int refunnel_hint_enabled = 0;
1641
1642 boolean_t
1643 refunnel_hint(
1644 thread_t thread,
1645 wait_result_t wresult)
1646 {
1647 if ( !(thread->funnel_state & TH_FN_REFUNNEL) ||
1648 wresult != THREAD_AWAKENED )
1649 return (FALSE);
1650
1651 if (!refunnel_hint_enabled)
1652 return (FALSE);
1653
1654 return (mutex_preblock(thread->funnel_lock->fnl_mutex, thread));
1655 }
1656
1657 funnel_t *
1658 thread_funnel_get(
1659 void)
1660 {
1661 thread_t th = current_thread();
1662
1663 if (th->funnel_state & TH_FN_OWNED) {
1664 return(th->funnel_lock);
1665 }
1666 return(THR_FUNNEL_NULL);
1667 }
1668
1669 boolean_t
1670 thread_funnel_set(
1671 funnel_t * fnl,
1672 boolean_t funneled)
1673 {
1674 thread_t cur_thread;
1675 boolean_t funnel_state_prev;
1676 boolean_t intr;
1677
1678 cur_thread = current_thread();
1679 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1680
1681 if (funnel_state_prev != funneled) {
1682 intr = ml_set_interrupts_enabled(FALSE);
1683
1684 if (funneled == TRUE) {
1685 if (cur_thread->funnel_lock)
1686 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1687 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1688 fnl, 1, 0, 0, 0);
1689 funnel_lock(fnl);
1690 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1691 fnl, 1, 0, 0, 0);
1692 cur_thread->funnel_state |= TH_FN_OWNED;
1693 cur_thread->funnel_lock = fnl;
1694 } else {
1695 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1696 panic("Funnel unlock when not holding funnel");
1697 cur_thread->funnel_state &= ~TH_FN_OWNED;
1698 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1699 fnl, 1, 0, 0, 0);
1700
1701 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1702 funnel_unlock(fnl);
1703 }
1704 (void)ml_set_interrupts_enabled(intr);
1705 } else {
1706 /* if we are trying to acquire funnel recursively
1707 * check for funnel to be held already
1708 */
1709 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1710 panic("thread_funnel_set: already holding a different funnel");
1711 }
1712 }
1713 return(funnel_state_prev);
1714 }
1715
1716 boolean_t
1717 thread_funnel_merge(
1718 funnel_t * fnl,
1719 funnel_t * otherfnl)
1720 {
1721 mutex_t * m;
1722 mutex_t * otherm;
1723 funnel_t * gfnl;
1724 extern int disable_funnel;
1725
1726 if ((gfnl = thread_funnel_get()) == THR_FUNNEL_NULL)
1727 panic("thread_funnel_merge called with no funnels held");
1728
1729 if (gfnl->fnl_type != 1)
1730 panic("thread_funnel_merge called from non kernel funnel");
1731
1732 if (gfnl != fnl)
1733 panic("thread_funnel_merge incorrect invocation");
1734
1735 if (disable_funnel || split_funnel_off)
1736 return (KERN_FAILURE);
1737
1738 m = fnl->fnl_mutex;
1739 otherm = otherfnl->fnl_mutex;
1740
1741 /* Acquire other funnel mutex */
1742 mutex_lock(otherm);
1743 split_funnel_off = 1;
1744 disable_funnel = 1;
1745 otherfnl->fnl_mutex = m;
1746 otherfnl->fnl_type = fnl->fnl_type;
1747 otherfnl->fnl_oldmutex = otherm; /* save this for future use */
1748
1749 mutex_unlock(otherm);
1750 return(KERN_SUCCESS);
1751 }
1752
1753 void
1754 thread_set_cont_arg(
1755 int arg)
1756 {
1757 thread_t self = current_thread();
1758
1759 self->saved.misc = arg;
1760 }
1761
1762 int
1763 thread_get_cont_arg(void)
1764 {
1765 thread_t self = current_thread();
1766
1767 return (self->saved.misc);
1768 }
1769
1770 /*
1771 * Export routines to other components for things that are done as macros
1772 * within the osfmk component.
1773 */
1774 #undef thread_should_halt
1775 boolean_t
1776 thread_should_halt(
1777 thread_t th)
1778 {
1779 return(thread_should_halt_fast(th));
1780 }
1781
1782 vm_offset_t min_valid_stack_address(void)
1783 {
1784 return vm_map_min(stack_map);
1785 }
1786
1787 vm_offset_t max_valid_stack_address(void)
1788 {
1789 return vm_map_max(stack_map);
1790 }