2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
57 * Thread/thread_shuttle management primitives implementation.
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
79 #include <mach_host.h>
80 #include <simple_clock.h>
81 #include <mach_debug.h>
82 #include <mach_prof.h>
83 #include <stack_usage.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_status.h>
90 #include <mach/time_value.h>
91 #include <mach/vm_param.h>
93 #include <kern/cpu_data.h>
94 #include <kern/counters.h>
95 #include <kern/etap_macros.h>
96 #include <kern/ipc_mig.h>
97 #include <kern/ipc_tt.h>
98 #include <kern/mach_param.h>
99 #include <kern/machine.h>
100 #include <kern/misc_protos.h>
101 #include <kern/processor.h>
102 #include <kern/queue.h>
103 #include <kern/sched.h>
104 #include <kern/sched_prim.h>
106 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
107 #include <kern/task.h>
108 #include <kern/thread.h>
109 #include <kern/thread_act.h>
110 #include <kern/thread_swap.h>
111 #include <kern/host.h>
112 #include <kern/zalloc.h>
113 #include <vm/vm_kern.h>
114 #include <ipc/ipc_kmsg.h>
115 #include <ipc/ipc_port.h>
116 #include <machine/thread.h> /* for MACHINE_STACK */
117 #include <kern/profile.h>
118 #include <kern/assert.h>
119 #include <sys/kdebug.h>
122 * Exported interfaces
125 #include <mach/thread_act_server.h>
126 #include <mach/mach_host_server.h>
129 * Per-Cpu stashed global state
131 vm_offset_t active_stacks
[NCPUS
]; /* per-cpu active stacks */
132 vm_offset_t kernel_stack
[NCPUS
]; /* top of active stacks */
133 thread_act_t active_kloaded
[NCPUS
]; /* + act if kernel loaded */
135 struct zone
*thread_shuttle_zone
;
137 queue_head_t reaper_queue
;
138 decl_simple_lock_data(,reaper_lock
)
139 thread_call_t thread_reaper_call
;
143 extern void pcb_module_init(void);
146 static struct thread_shuttle thr_sh_template
;
150 static void stack_init(vm_offset_t stack
, unsigned int bytes
);
151 void stack_finalize(vm_offset_t stack
);
152 vm_size_t
stack_usage(vm_offset_t stack
);
153 #else /*STACK_USAGE*/
154 #define stack_init(stack, size)
155 #define stack_finalize(stack)
156 #define stack_usage(stack) (vm_size_t)0
157 #endif /*STACK_USAGE*/
162 void stack_statistics(
163 unsigned int *totalp
,
164 vm_size_t
*maxusagep
);
166 #define STACK_MARKER 0xdeadbeef
168 boolean_t stack_check_usage
= TRUE
;
169 #else /* STACK_USAGE */
170 boolean_t stack_check_usage
= FALSE
;
171 #endif /* STACK_USAGE */
172 decl_simple_lock_data(,stack_usage_lock
)
173 vm_size_t stack_max_usage
= 0;
174 vm_size_t stack_max_use
= KERNEL_STACK_SIZE
- 64;
175 #endif /* MACH_DEBUG */
178 void thread_collect_scan(void);
180 kern_return_t
thread_create_shuttle(
181 thread_act_t thr_act
,
184 thread_t
*new_thread
);
186 extern void Load_context(
191 * Machine-dependent code must define:
192 * thread_machine_init
193 * thread_machine_terminate
194 * thread_machine_collect
196 * The thread->pcb field is reserved for machine-dependent code.
201 * Machine-dependent code must define:
209 #else /* MACHINE_STACK */
211 * We allocate stacks from generic kernel VM.
212 * Machine-dependent code must define:
213 * machine_kernel_stack_init
215 * The stack_free_list can only be accessed at splsched,
216 * because stack_alloc_try/thread_invoke operate at splsched.
219 decl_simple_lock_data(,stack_lock_data
) /* splsched only */
220 #define stack_lock() simple_lock(&stack_lock_data)
221 #define stack_unlock() simple_unlock(&stack_lock_data)
223 vm_offset_t stack_free_list
; /* splsched only */
224 unsigned int stack_free_max
= 0;
225 unsigned int stack_free_count
= 0; /* splsched only */
226 unsigned int stack_free_limit
= 1; /* patchable */
228 unsigned int stack_alloc_hits
= 0; /* debugging */
229 unsigned int stack_alloc_misses
= 0; /* debugging */
231 unsigned int stack_alloc_total
= 0;
232 unsigned int stack_alloc_hiwater
= 0;
235 * The next field is at the base of the stack,
236 * so the low end is left unsullied.
239 #define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
244 * Allocate a kernel stack for an activation.
250 void (*start_pos
)(thread_t
))
256 * We first try the free list. It is probably empty,
257 * or stack_alloc_try would have succeeded, but possibly
258 * a stack was freed before the swapin thread got to us.
263 stack
= stack_free_list
;
265 stack_free_list
= stack_next(stack
);
273 * Kernel stacks should be naturally aligned,
274 * so that it is easy to find the starting/ending
275 * addresses of a stack given an address in the middle.
278 if (kmem_alloc_aligned(kernel_map
, &stack
,
279 round_page(KERNEL_STACK_SIZE
)) != KERN_SUCCESS
)
280 panic("stack_alloc");
283 if (stack_alloc_total
> stack_alloc_hiwater
)
284 stack_alloc_hiwater
= stack_alloc_total
;
287 stack_init(stack
, round_page(KERNEL_STACK_SIZE
));
288 #endif /* MACH_DEBUG */
291 * If using fractional pages, free the remainder(s)
293 if (KERNEL_STACK_SIZE
< round_page(KERNEL_STACK_SIZE
)) {
294 vm_offset_t ptr
= stack
+ KERNEL_STACK_SIZE
;
295 vm_offset_t endp
= stack
+ round_page(KERNEL_STACK_SIZE
);
299 * We need to initialize just the end of the
302 stack_init(ptr
, (unsigned int) (endp
- ptr
));
305 stack_next(stack
) = stack_free_list
;
306 stack_free_list
= stack
;
307 if (++stack_free_count
> stack_free_max
)
308 stack_free_max
= stack_free_count
;
310 ptr
+= KERNEL_STACK_SIZE
;
314 stack_attach(thread
, stack
, start_pos
);
321 * Free a kernel stack.
322 * Called at splsched.
329 vm_offset_t stack
= stack_detach(thread
);
331 if (stack
!= thread
->stack_privilege
) {
333 stack_next(stack
) = stack_free_list
;
334 stack_free_list
= stack
;
335 if (++stack_free_count
> stack_free_max
)
336 stack_free_max
= stack_free_count
;
344 * Free excess kernel stacks.
351 register vm_offset_t stack
;
354 /* If using fractional pages, Cannot just call kmem_free(),
355 * and we're too lazy to coalesce small chunks.
357 if (KERNEL_STACK_SIZE
< round_page(KERNEL_STACK_SIZE
))
362 while (stack_free_count
> stack_free_limit
) {
363 stack
= stack_free_list
;
364 stack_free_list
= stack_next(stack
);
370 stack_finalize(stack
);
371 #endif /* MACH_DEBUG */
372 kmem_free(kernel_map
, stack
, KERNEL_STACK_SIZE
);
387 * Return statistics on cached kernel stacks.
388 * *maxusagep must be initialized by the caller.
393 unsigned int *totalp
,
394 vm_size_t
*maxusagep
)
402 if (stack_check_usage
) {
406 * This is pretty expensive to do at splsched,
407 * but it only happens when someone makes
408 * a debugging call, so it should be OK.
411 for (stack
= stack_free_list
; stack
!= 0;
412 stack
= stack_next(stack
)) {
413 vm_size_t usage
= stack_usage(stack
);
415 if (usage
> *maxusagep
)
419 #endif /* STACK_USAGE */
421 *totalp
= stack_free_count
;
425 #endif /* MACH_DEBUG */
427 #endif /* MACHINE_STACK */
430 stack_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
431 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
433 *count
= stack_alloc_total
- stack_free_count
;
434 *cur_size
= KERNEL_STACK_SIZE
* stack_alloc_total
;
435 *max_size
= KERNEL_STACK_SIZE
* stack_alloc_hiwater
;
436 *elem_size
= KERNEL_STACK_SIZE
;
437 *alloc_size
= KERNEL_STACK_SIZE
;
446 * stack_alloc_try on this thread must always succeed.
451 register thread_t thread
)
454 * This implementation only works for the current thread.
457 if (thread
!= current_thread())
458 panic("stack_privilege");
460 if (thread
->stack_privilege
== 0)
461 thread
->stack_privilege
= current_stack();
467 * Non-blocking attempt to allocate a kernel stack.
468 * Called at splsched with the thread locked.
471 boolean_t
stack_alloc_try(
473 void (*start_pos
)(thread_t
))
475 register vm_offset_t stack
;
477 if ((stack
= thread
->stack_privilege
) == (vm_offset_t
)0) {
479 stack
= stack_free_list
;
480 if (stack
!= (vm_offset_t
)0) {
481 stack_free_list
= stack_next(stack
);
488 stack_attach(thread
, stack
, start_pos
);
492 stack_alloc_misses
++;
497 natural_t min_quantum_abstime
;
498 extern natural_t min_quantum_ms
;
503 thread_shuttle_zone
= zinit(
504 sizeof(struct thread_shuttle
),
505 THREAD_MAX
* sizeof(struct thread_shuttle
),
506 THREAD_CHUNK
* sizeof(struct thread_shuttle
),
510 * Fill in a template thread_shuttle for fast initialization.
511 * [Fields that must be (or are typically) reset at
512 * time of creation are so noted.]
515 /* thr_sh_template.links (none) */
516 thr_sh_template
.runq
= RUN_QUEUE_NULL
;
519 /* thr_sh_template.task (later) */
520 /* thr_sh_template.thread_list (later) */
521 /* thr_sh_template.pset_threads (later) */
523 /* one ref for pset, one for activation */
524 thr_sh_template
.ref_count
= 2;
526 thr_sh_template
.wait_event
= NO_EVENT
;
527 thr_sh_template
.wait_result
= KERN_SUCCESS
;
528 thr_sh_template
.wait_queue
= WAIT_QUEUE_NULL
;
529 thr_sh_template
.wake_active
= FALSE
;
530 thr_sh_template
.state
= TH_WAIT
|TH_UNINT
;
531 thr_sh_template
.interruptible
= TRUE
;
532 thr_sh_template
.continuation
= (void (*)(void))0;
533 thr_sh_template
.top_act
= THR_ACT_NULL
;
535 thr_sh_template
.importance
= 0;
536 thr_sh_template
.sched_mode
= 0;
538 thr_sh_template
.priority
= 0;
539 thr_sh_template
.sched_pri
= 0;
540 thr_sh_template
.depress_priority
= -1;
541 thr_sh_template
.max_priority
= 0;
543 thr_sh_template
.cpu_usage
= 0;
544 thr_sh_template
.sched_usage
= 0;
545 thr_sh_template
.sched_stamp
= 0;
546 thr_sh_template
.sleep_stamp
= 0;
548 thr_sh_template
.policy
= POLICY_NULL
;
549 thr_sh_template
.sp_state
= 0;
550 thr_sh_template
.unconsumed_quantum
= 0;
552 thr_sh_template
.vm_privilege
= FALSE
;
554 timer_init(&(thr_sh_template
.user_timer
));
555 timer_init(&(thr_sh_template
.system_timer
));
556 thr_sh_template
.user_timer_save
.low
= 0;
557 thr_sh_template
.user_timer_save
.high
= 0;
558 thr_sh_template
.system_timer_save
.low
= 0;
559 thr_sh_template
.system_timer_save
.high
= 0;
560 thr_sh_template
.cpu_delta
= 0;
561 thr_sh_template
.sched_delta
= 0;
563 thr_sh_template
.active
= FALSE
; /* reset */
565 /* thr_sh_template.processor_set (later) */
567 thr_sh_template
.bound_processor
= PROCESSOR_NULL
;
570 thr_sh_template
.may_assign
= TRUE
;
571 thr_sh_template
.assign_active
= FALSE
;
572 #endif /* MACH_HOST */
573 thr_sh_template
.funnel_state
= 0;
576 /* thr_sh_template.last_processor (later) */
577 #endif /* NCPUS > 1 */
580 * Initialize other data structures used in
584 queue_init(&reaper_queue
);
585 simple_lock_init(&reaper_lock
, ETAP_THREAD_REAPER
);
586 thr_sh_template
.funnel_lock
= THR_FUNNEL_NULL
;
588 #ifndef MACHINE_STACK
589 simple_lock_init(&stack_lock_data
, ETAP_THREAD_STACK
);
590 #endif /* MACHINE_STACK */
593 simple_lock_init(&stack_usage_lock
, ETAP_THREAD_STACK_USAGE
);
594 #endif /* MACH_DEBUG */
597 thr_sh_template
.kthread
= FALSE
;
598 thr_sh_template
.mutex_count
= 0;
599 #endif /* MACH_LDEBUG */
602 AbsoluteTime abstime
;
604 clock_interval_to_absolutetime_interval(
605 min_quantum_ms
, 1000*NSEC_PER_USEC
, &abstime
);
606 assert(abstime
.hi
== 0 && abstime
.lo
!= 0);
607 min_quantum_abstime
= abstime
.lo
;
611 * Initialize any machine-dependent
612 * per-thread structures necessary.
614 thread_machine_init();
618 thread_reaper_enqueue(
622 * thread lock is already held, splsched()
623 * not necessary here.
625 simple_lock(&reaper_lock
);
627 enqueue_tail(&reaper_queue
, (queue_entry_t
)thread
);
630 * Since thread has been put in the reaper_queue, it must no longer
631 * be preempted (otherwise, it could be put back in a run queue).
633 thread
->preempt
= TH_NOT_PREEMPTABLE
;
636 simple_unlock(&reaper_lock
);
638 thread_call_enter(thread_reaper_call
);
643 * Routine: thread_terminate_self
645 * This routine is called by a thread which has unwound from
646 * its current RPC and kernel contexts and found that it's
647 * root activation has been marked for extinction. This lets
648 * it clean up the last few things that can only be cleaned
649 * up in this context and then impale itself on the reaper
652 * When the reaper gets the thread, it will deallocate the
653 * thread_act's reference on itself, which in turn will release
654 * its own reference on this thread. By doing things in that
655 * order, a thread_act will always have a valid thread - but the
656 * thread may persist beyond having a thread_act (but must never
660 thread_terminate_self(void)
662 register thread_t thread
= current_thread();
663 thread_act_t thr_act
= thread
->top_act
;
664 task_t task
= thr_act
->task
;
669 * We should be at the base of the inheritance chain.
671 assert(thr_act
->thread
== thread
);
674 * Check to see if this is the last active activation. By
675 * this we mean the last activation to call thread_terminate_self.
676 * If so, and the task is associated with a BSD process, we
677 * need to call BSD and let them clean up.
680 active_acts
= --task
->active_act_count
;
682 if (!active_acts
&& task
->bsd_info
)
683 proc_exit(task
->bsd_info
);
685 #ifdef CALLOUT_RPC_MODEL
686 if (thr_act
->lower
) {
688 * JMM - RPC will not be using a callout/stack manipulation
689 * mechanism. instead we will let it return normally as if
690 * from a continuation. Accordingly, these need to be cleaned
693 act_switch_swapcheck(thread
, (ipc_port_t
)0);
694 act_lock(thr_act
); /* hierarchy violation XXX */
695 (void) switch_act(THR_ACT_NULL
);
696 assert(thr_act
->ref_count
== 1); /* XXX */
697 /* act_deallocate(thr_act); XXX */
698 prev_act
= thread
->top_act
;
700 * disable preemption to protect kernel stack changes
701 * disable_preemption();
702 * MACH_RPC_RET(prev_act) = KERN_RPC_SERVER_TERMINATED;
703 * machine_kernel_stack_init(thread, mach_rpc_return_error);
708 * Load_context(thread);
713 #else /* !CALLOUT_RPC_MODEL */
715 assert(!thr_act
->lower
);
717 #endif /* CALLOUT_RPC_MODEL */
721 thread
->active
= FALSE
;
722 thread_unlock(thread
);
725 thread_timer_terminate();
727 /* flush any lazy HW state while in own context */
728 thread_machine_flush(thr_act
);
730 ipc_thread_terminate(thread
);
734 thread
->state
|= (TH_HALTED
|TH_TERMINATE
);
735 assert((thread
->state
& TH_UNINT
) == 0);
738 * Since thread has been put in the reaper_queue, it must no longer
739 * be preempted (otherwise, it could be put back in a run queue).
741 thread
->preempt
= TH_NOT_PREEMPTABLE
;
743 thread_mark_wait_locked(thread
, THREAD_UNINT
);
744 thread_unlock(thread
);
747 ETAP_SET_REASON(thread
, BLOCKED_ON_TERMINATION
);
748 thread_block((void (*)(void)) 0);
749 panic("the zombie walks!");
755 * Create a new thread.
756 * Doesn't start the thread running; It first must be attached to
757 * an activation - then use thread_go to start it.
760 thread_create_shuttle(
761 thread_act_t thr_act
,
764 thread_t
*new_thread
)
766 thread_t new_shuttle
;
767 task_t parent_task
= thr_act
->task
;
768 processor_set_t pset
;
769 kern_return_t result
;
770 sched_policy_t
*policy
;
774 assert(!thr_act
->thread
);
775 assert(!thr_act
->pool_port
);
778 * Allocate a thread and initialize static fields
780 new_shuttle
= (thread_t
)zalloc(thread_shuttle_zone
);
781 if (new_shuttle
== THREAD_NULL
)
782 return (KERN_RESOURCE_SHORTAGE
);
784 *new_shuttle
= thr_sh_template
;
786 thread_lock_init(new_shuttle
);
787 rpc_lock_init(new_shuttle
);
788 wake_lock_init(new_shuttle
);
789 new_shuttle
->sleep_stamp
= sched_tick
;
791 pset
= parent_task
->processor_set
;
793 pset
= &default_pset
;
797 task_lock(parent_task
);
800 * Don't need to initialize because the context switch
801 * code will set it before it can be used.
803 if (!parent_task
->active
) {
804 task_unlock(parent_task
);
806 zfree(thread_shuttle_zone
, (vm_offset_t
) new_shuttle
);
807 return (KERN_FAILURE
);
810 act_attach(thr_act
, new_shuttle
, 0);
812 /* Chain the thr_act onto the task's list */
813 queue_enter(&parent_task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
);
814 parent_task
->thr_act_count
++;
815 parent_task
->res_act_count
++;
816 parent_task
->active_act_count
++;
818 /* Associate the thread with that scheduling policy */
819 new_shuttle
->policy
= parent_task
->policy
;
820 policy
= &sched_policy
[new_shuttle
->policy
];
821 sfr
= policy
->sp_ops
.sp_thread_attach(policy
, new_shuttle
);
822 if (sfr
!= SF_SUCCESS
)
823 panic("thread_create_shuttle: sp_thread_attach");
825 /* Associate the thread with the processor set */
826 sfr
= policy
->sp_ops
.sp_thread_processor_set(policy
, new_shuttle
, pset
);
827 if (sfr
!= SF_SUCCESS
)
828 panic("thread_create_shuttle: sp_thread_proceessor_set");
830 /* Set the thread's scheduling parameters */
831 new_shuttle
->max_priority
= parent_task
->max_priority
;
832 new_shuttle
->priority
= (priority
< 0)? parent_task
->priority
: priority
;
833 if (new_shuttle
->priority
> new_shuttle
->max_priority
)
834 new_shuttle
->priority
= new_shuttle
->max_priority
;
835 sfr
= policy
->sp_ops
.sp_thread_setup(policy
, new_shuttle
);
836 if (sfr
!= SF_SUCCESS
)
837 panic("thread_create_shuttle: sp_thread_setup");
839 #if ETAP_EVENT_MONITOR
840 new_thread
->etap_reason
= 0;
841 new_thread
->etap_trace
= FALSE
;
842 #endif /* ETAP_EVENT_MONITOR */
844 new_shuttle
->active
= TRUE
;
845 thr_act
->active
= TRUE
;
850 * No need to lock thr_act, since it can't be known to anyone --
851 * we set its suspend_count to one more than the task suspend_count
852 * by calling thread_hold.
854 thr_act
->user_stop_count
= 1;
855 for (suspcnt
= thr_act
->task
->suspend_count
+ 1; suspcnt
; --suspcnt
)
856 thread_hold(thr_act
);
857 task_unlock(parent_task
);
860 * Thread still isn't runnable yet (our caller will do
861 * that). Initialize runtime-dependent fields here.
863 result
= thread_machine_create(new_shuttle
, thr_act
, thread_continue
);
864 assert (result
== KERN_SUCCESS
);
866 machine_kernel_stack_init(new_shuttle
, thread_continue
);
867 ipc_thread_init(new_shuttle
);
868 thread_start(new_shuttle
, start
);
869 thread_timer_setup(new_shuttle
);
871 *new_thread
= new_shuttle
;
874 long dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
;
876 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA
, 1)) | DBG_FUNC_NONE
,
877 (vm_address_t
)new_shuttle
, 0,0,0,0);
879 kdbg_trace_string(parent_task
->bsd_info
, &dbg_arg1
, &dbg_arg2
, &dbg_arg3
,
881 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING
, 1)) | DBG_FUNC_NONE
,
882 dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
, 0);
885 return (KERN_SUCCESS
);
891 thread_act_t
*new_act
)
893 thread_act_t thr_act
;
895 kern_return_t result
;
896 sched_policy_t
*policy
;
899 extern void thread_bootstrap_return(void);
901 if (task
== TASK_NULL
)
902 return KERN_INVALID_ARGUMENT
;
904 result
= act_create(task
, &thr_act
);
905 if (result
!= KERN_SUCCESS
)
908 result
= thread_create_shuttle(thr_act
, -1, thread_bootstrap_return
, &thread
);
909 if (result
!= KERN_SUCCESS
) {
910 act_deallocate(thr_act
);
914 if (task
->kernel_loaded
)
915 thread_user_to_kernel(thread
);
917 /* Start the thread running (it will immediately suspend itself). */
919 thread_ast_set(thr_act
, AST_APC
);
921 thread_go_locked(thread
, THREAD_AWAKENED
);
922 thread_unlock(thread
);
927 return (KERN_SUCCESS
);
931 * Update thread that belongs to a task created via kernel_task_create().
934 thread_user_to_kernel(
938 * Used to set special swap_func here...
943 thread_create_running(
944 register task_t parent_task
,
946 thread_state_t new_state
,
947 mach_msg_type_number_t new_state_count
,
948 thread_act_t
*child_act
) /* OUT */
950 register kern_return_t result
;
952 result
= thread_create(parent_task
, child_act
);
953 if (result
!= KERN_SUCCESS
)
956 result
= act_machine_set_state(*child_act
, flavor
,
957 new_state
, new_state_count
);
958 if (result
!= KERN_SUCCESS
) {
959 (void) thread_terminate(*child_act
);
963 result
= thread_resume(*child_act
);
964 if (result
!= KERN_SUCCESS
) {
965 (void) thread_terminate(*child_act
);
975 * Create and kernel thread in the specified task, and
976 * optionally start it running.
979 kernel_thread_with_priority(
983 boolean_t start_running
)
985 kern_return_t result
;
987 thread_act_t thr_act
;
988 sched_policy_t
*policy
;
992 result
= act_create(task
, &thr_act
);
993 if (result
!= KERN_SUCCESS
) {
997 result
= thread_create_shuttle(thr_act
, priority
, start
, &thread
);
998 if (result
!= KERN_SUCCESS
) {
999 act_deallocate(thr_act
);
1003 thread_swappable(thr_act
, FALSE
);
1006 thread_lock(thread
);
1008 thr_act
= thread
->top_act
;
1010 thread
->kthread
= TRUE
;
1011 #endif /* MACH_LDEBUG */
1014 thread_go_locked(thread
, THREAD_AWAKENED
);
1016 thread_unlock(thread
);
1020 thread_resume(thr_act
);
1022 act_deallocate(thr_act
);
1029 void (*start
)(void))
1031 return kernel_thread_with_priority(task
, -1, start
, TRUE
);
1034 unsigned int c_weird_pset_ref_exit
= 0; /* pset code raced us */
1041 processor_set_t pset
;
1042 sched_policy_t
*policy
;
1046 if (thread
== THREAD_NULL
)
1050 * First, check for new count > 1 (the common case).
1051 * Only the thread needs to be locked.
1054 thread_lock(thread
);
1055 if (--thread
->ref_count
> 1) {
1056 thread_unlock(thread
);
1062 * Down to pset reference, lets try to clean up.
1063 * However, the processor set may make more. Its lock
1064 * also dominate the thread lock. So, reverse the
1065 * order of the locks and see if its still the last
1068 assert(thread
->ref_count
== 1); /* Else this is an extra dealloc! */
1069 thread_unlock(thread
);
1073 thread_freeze(thread
);
1074 #endif /* MACH_HOST */
1076 pset
= thread
->processor_set
;
1080 thread_lock(thread
);
1082 if (thread
->ref_count
> 1) {
1084 boolean_t need_wakeup
= FALSE
;
1086 * processor_set made extra reference.
1088 /* Inline the unfreeze */
1089 thread
->may_assign
= TRUE
;
1090 if (thread
->assign_active
) {
1092 thread
->assign_active
= FALSE
;
1094 #endif /* MACH_HOST */
1095 thread_unlock(thread
);
1100 thread_wakeup((event_t
)&thread
->assign_active
);
1101 #endif /* MACH_HOST */
1102 c_weird_pset_ref_exit
++;
1106 assert(thread
->assign_active
== FALSE
);
1107 #endif /* MACH_HOST */
1110 * Thread only had pset reference - we can remove it.
1112 if (thread
== current_thread())
1113 panic("thread deallocating itself");
1115 /* Detach thread (shuttle) from its sched policy */
1116 policy
= &sched_policy
[thread
->policy
];
1117 sfr
= policy
->sp_ops
.sp_thread_detach(policy
, thread
);
1118 if (sfr
!= SF_SUCCESS
)
1119 panic("thread_deallocate: sp_thread_detach");
1121 pset_remove_thread(pset
, thread
);
1122 thread
->ref_count
= 0;
1123 thread_unlock(thread
); /* no more references - safe */
1127 pset_deallocate(thread
->processor_set
);
1129 /* frees kernel stack & other MD resources */
1130 if (thread
->stack_privilege
&& (thread
->stack_privilege
!= thread
->kernel_stack
)) {
1133 stack
= thread
->stack_privilege
;
1135 thread
->kernel_stack
= stack
;
1138 thread
->stack_privilege
= 0;
1139 thread_machine_destroy(thread
);
1141 zfree(thread_shuttle_zone
, (vm_offset_t
) thread
);
1150 if (thread
== THREAD_NULL
)
1154 thread_lock(thread
);
1155 thread
->ref_count
++;
1156 thread_unlock(thread
);
1161 * Called with "appropriate" thread-related locks held on
1162 * thread and its top_act for synchrony with RPC (see
1163 * act_lock_thread()).
1166 thread_info_shuttle(
1167 register thread_act_t thr_act
,
1168 thread_flavor_t flavor
,
1169 thread_info_t thread_info_out
, /* ptr to OUT array */
1170 mach_msg_type_number_t
*thread_info_count
) /*IN/OUT*/
1172 register thread_t thread
= thr_act
->thread
;
1176 if (thread
== THREAD_NULL
)
1177 return (KERN_INVALID_ARGUMENT
);
1179 if (flavor
== THREAD_BASIC_INFO
) {
1180 register thread_basic_info_t basic_info
;
1182 if (*thread_info_count
< THREAD_BASIC_INFO_COUNT
)
1183 return (KERN_INVALID_ARGUMENT
);
1185 basic_info
= (thread_basic_info_t
) thread_info_out
;
1188 thread_lock(thread
);
1192 thread_read_times(thread
, &basic_info
->user_time
,
1193 &basic_info
->system_time
);
1195 if (thread
->policy
& (POLICY_TIMESHARE
|POLICY_RR
|POLICY_FIFO
)) {
1197 * Update lazy-evaluated scheduler info because someone wants it.
1199 if (thread
->sched_stamp
!= sched_tick
)
1200 update_priority(thread
);
1202 basic_info
->sleep_time
= 0;
1205 * To calculate cpu_usage, first correct for timer rate,
1206 * then for 5/8 ageing. The correction factor [3/5] is
1209 basic_info
->cpu_usage
= (thread
->cpu_usage
<< SCHED_TICK_SHIFT
) /
1210 (TIMER_RATE
/ TH_USAGE_SCALE
);
1211 basic_info
->cpu_usage
= (basic_info
->cpu_usage
* 3) / 5;
1214 * Clock drift compensation.
1216 basic_info
->cpu_usage
=
1217 (basic_info
->cpu_usage
* 1000000) / sched_usec
;
1218 #endif /* SIMPLE_CLOCK */
1221 basic_info
->sleep_time
= basic_info
->cpu_usage
= 0;
1223 basic_info
->policy
= thread
->policy
;
1226 if (thread
->state
& TH_SWAPPED_OUT
)
1227 flags
= TH_FLAGS_SWAPPED
;
1229 if (thread
->state
& TH_IDLE
)
1230 flags
= TH_FLAGS_IDLE
;
1233 if (thread
->state
& TH_HALTED
)
1234 state
= TH_STATE_HALTED
;
1236 if (thread
->state
& TH_RUN
)
1237 state
= TH_STATE_RUNNING
;
1239 if (thread
->state
& TH_UNINT
)
1240 state
= TH_STATE_UNINTERRUPTIBLE
;
1242 if (thread
->state
& TH_SUSP
)
1243 state
= TH_STATE_STOPPED
;
1245 if (thread
->state
& TH_WAIT
)
1246 state
= TH_STATE_WAITING
;
1248 basic_info
->run_state
= state
;
1249 basic_info
->flags
= flags
;
1251 basic_info
->suspend_count
= thr_act
->user_stop_count
;
1253 thread_unlock(thread
);
1256 *thread_info_count
= THREAD_BASIC_INFO_COUNT
;
1258 return (KERN_SUCCESS
);
1261 if (flavor
== THREAD_SCHED_TIMESHARE_INFO
) {
1262 policy_timeshare_info_t ts_info
;
1264 if (*thread_info_count
< POLICY_TIMESHARE_INFO_COUNT
)
1265 return (KERN_INVALID_ARGUMENT
);
1267 ts_info
= (policy_timeshare_info_t
)thread_info_out
;
1270 thread_lock(thread
);
1272 if (thread
->policy
!= POLICY_TIMESHARE
) {
1273 thread_unlock(thread
);
1276 return (KERN_INVALID_POLICY
);
1279 ts_info
->base_priority
= thread
->priority
;
1280 ts_info
->max_priority
= thread
->max_priority
;
1281 ts_info
->cur_priority
= thread
->sched_pri
;
1283 ts_info
->depressed
= (thread
->depress_priority
>= 0);
1284 ts_info
->depress_priority
= thread
->depress_priority
;
1286 thread_unlock(thread
);
1289 *thread_info_count
= POLICY_TIMESHARE_INFO_COUNT
;
1291 return (KERN_SUCCESS
);
1294 if (flavor
== THREAD_SCHED_FIFO_INFO
) {
1295 policy_fifo_info_t fifo_info
;
1297 if (*thread_info_count
< POLICY_FIFO_INFO_COUNT
)
1298 return (KERN_INVALID_ARGUMENT
);
1300 fifo_info
= (policy_fifo_info_t
)thread_info_out
;
1303 thread_lock(thread
);
1305 if (thread
->policy
!= POLICY_FIFO
) {
1306 thread_unlock(thread
);
1309 return (KERN_INVALID_POLICY
);
1312 fifo_info
->base_priority
= thread
->priority
;
1313 fifo_info
->max_priority
= thread
->max_priority
;
1315 fifo_info
->depressed
= (thread
->depress_priority
>= 0);
1316 fifo_info
->depress_priority
= thread
->depress_priority
;
1318 thread_unlock(thread
);
1321 *thread_info_count
= POLICY_FIFO_INFO_COUNT
;
1323 return (KERN_SUCCESS
);
1326 if (flavor
== THREAD_SCHED_RR_INFO
) {
1327 policy_rr_info_t rr_info
;
1329 if (*thread_info_count
< POLICY_RR_INFO_COUNT
)
1330 return (KERN_INVALID_ARGUMENT
);
1332 rr_info
= (policy_rr_info_t
) thread_info_out
;
1335 thread_lock(thread
);
1337 if (thread
->policy
!= POLICY_RR
) {
1338 thread_unlock(thread
);
1341 return (KERN_INVALID_POLICY
);
1344 rr_info
->base_priority
= thread
->priority
;
1345 rr_info
->max_priority
= thread
->max_priority
;
1346 rr_info
->quantum
= min_quantum_ms
;
1348 rr_info
->depressed
= (thread
->depress_priority
>= 0);
1349 rr_info
->depress_priority
= thread
->depress_priority
;
1351 thread_unlock(thread
);
1354 *thread_info_count
= POLICY_RR_INFO_COUNT
;
1356 return (KERN_SUCCESS
);
1359 return (KERN_INVALID_ARGUMENT
);
1364 register thread_t thread
)
1366 thread_act_t thr_act
;
1367 struct ipc_port
*pool_port
;
1370 thr_act
= thread_lock_act(thread
);
1371 assert(thr_act
&& thr_act
->thread
== thread
);
1373 act_locked_act_reference(thr_act
);
1374 pool_port
= thr_act
->pool_port
;
1377 * Replace `act_unlock_thread()' with individual
1378 * calls. (`act_detach()' can change fields used
1379 * to determine which locks are held, confusing
1380 * `act_unlock_thread()'.)
1383 if (pool_port
!= IP_NULL
)
1384 ip_unlock(pool_port
);
1385 act_unlock(thr_act
);
1387 /* Remove the reference held by a rooted thread */
1388 if (pool_port
== IP_NULL
)
1389 act_deallocate(thr_act
);
1391 /* Remove the reference held by the thread: */
1392 act_deallocate(thr_act
);
1395 static thread_call_data_t thread_reaper_call_data
;
1400 * This kernel thread runs forever looking for threads to destroy
1401 * (when they request that they be destroyed, of course).
1403 * The reaper thread will disappear in the next revision of thread
1404 * control when it's function will be moved into thread_dispatch.
1408 thread_call_param_t p0
,
1409 thread_call_param_t p1
)
1411 register thread_t thread
;
1415 simple_lock(&reaper_lock
);
1417 while ((thread
= (thread_t
) dequeue_head(&reaper_queue
)) != THREAD_NULL
) {
1418 simple_unlock(&reaper_lock
);
1421 * wait for run bit to clear
1423 thread_lock(thread
);
1424 if (thread
->state
& TH_RUN
)
1425 panic("thread reaper: TH_RUN");
1426 thread_unlock(thread
);
1429 thread_doreap(thread
);
1432 simple_lock(&reaper_lock
);
1435 simple_unlock(&reaper_lock
);
1442 thread_call_setup(&thread_reaper_call_data
, _thread_reaper
, NULL
);
1443 thread_reaper_call
= &thread_reaper_call_data
;
1445 _thread_reaper(NULL
, NULL
);
1450 thread_act_t thr_act
,
1451 processor_set_t new_pset
)
1454 thread
++; new_pset
++;
1456 return(KERN_FAILURE
);
1460 * thread_assign_default:
1462 * Special version of thread_assign for assigning threads to default
1466 thread_assign_default(
1467 thread_act_t thr_act
)
1469 return (thread_assign(thr_act
, &default_pset
));
1473 * thread_get_assignment
1475 * Return current assignment for this thread.
1478 thread_get_assignment(
1479 thread_act_t thr_act
,
1480 processor_set_t
*pset
)
1484 if (thr_act
== THR_ACT_NULL
)
1485 return(KERN_INVALID_ARGUMENT
);
1486 thread
= act_lock_thread(thr_act
);
1487 if (thread
== THREAD_NULL
) {
1488 act_unlock_thread(thr_act
);
1489 return(KERN_INVALID_ARGUMENT
);
1491 *pset
= thread
->processor_set
;
1492 act_unlock_thread(thr_act
);
1493 pset_reference(*pset
);
1494 return(KERN_SUCCESS
);
1500 * Specify that the target thread must always be able
1501 * to run and to allocate memory.
1505 host_priv_t host_priv
,
1506 thread_act_t thr_act
,
1511 extern void vm_page_free_reserve(int pages
);
1513 if (thr_act
== THR_ACT_NULL
|| host_priv
== HOST_PRIV_NULL
)
1514 return (KERN_INVALID_ARGUMENT
);
1516 assert(host_priv
== &realhost
);
1518 thread
= act_lock_thread(thr_act
);
1519 if (thread
==THREAD_NULL
) {
1520 act_unlock_thread(thr_act
);
1521 return(KERN_INVALID_ARGUMENT
);
1525 * This implementation only works for the current thread.
1526 * See stack_privilege.
1528 if (thr_act
!= current_act())
1529 return KERN_INVALID_ARGUMENT
;
1532 thread_lock(thread
);
1535 if (thread
->vm_privilege
== FALSE
)
1536 vm_page_free_reserve(1); /* XXX */
1537 thread
->vm_privilege
= TRUE
;
1539 if (thread
->vm_privilege
== TRUE
)
1540 vm_page_free_reserve(-1); /* XXX */
1541 thread
->vm_privilege
= FALSE
;
1544 thread_unlock(thread
);
1546 act_unlock_thread(thr_act
);
1549 * Make the thread unswappable.
1552 thread_swappable(thr_act
, FALSE
);
1554 return KERN_SUCCESS
;
1558 * thread_collect_scan:
1560 * Attempt to free resources owned by threads.
1564 thread_collect_scan(void)
1566 /* This code runs very quickly! */
1569 boolean_t thread_collect_allowed
= TRUE
;
1570 unsigned thread_collect_last_tick
= 0;
1571 unsigned thread_collect_max_rate
= 0; /* in ticks */
1574 * consider_thread_collect:
1576 * Called by the pageout daemon when the system needs more free pages.
1580 consider_thread_collect(void)
1583 * By default, don't attempt thread collection more frequently
1584 * than once a second (one scheduler tick).
1587 if (thread_collect_max_rate
== 0)
1588 thread_collect_max_rate
= 2; /* sched_tick is a 1 second resolution 2 here insures at least 1 second interval */
1590 if (thread_collect_allowed
&&
1592 (thread_collect_last_tick
+ thread_collect_max_rate
))) {
1593 thread_collect_last_tick
= sched_tick
;
1594 thread_collect_scan();
1603 register vm_offset_t stack
)
1607 for (i
= 0; i
< KERNEL_STACK_SIZE
/sizeof(unsigned int); i
++)
1608 if (((unsigned int *)stack
)[i
] != STACK_MARKER
)
1611 return KERNEL_STACK_SIZE
- i
* sizeof(unsigned int);
1615 * Machine-dependent code should call stack_init
1616 * before doing its own initialization of the stack.
1621 register vm_offset_t stack
,
1624 if (stack_check_usage
) {
1627 for (i
= 0; i
< bytes
/ sizeof(unsigned int); i
++)
1628 ((unsigned int *)stack
)[i
] = STACK_MARKER
;
1633 * Machine-dependent code should call stack_finalize
1634 * before releasing the stack memory.
1639 register vm_offset_t stack
)
1641 if (stack_check_usage
) {
1642 vm_size_t used
= stack_usage(stack
);
1644 simple_lock(&stack_usage_lock
);
1645 if (used
> stack_max_usage
)
1646 stack_max_usage
= used
;
1647 simple_unlock(&stack_usage_lock
);
1648 if (used
> stack_max_use
) {
1649 printf("stack usage = %x\n", used
);
1650 panic("stack overflow");
1655 #endif /*STACK_USAGE*/
1656 #endif /* MACH_DEBUG */
1661 vm_size_t
*reservedp
,
1662 unsigned int *totalp
,
1664 vm_size_t
*residentp
,
1665 vm_size_t
*maxusagep
,
1666 vm_offset_t
*maxstackp
)
1669 return KERN_NOT_SUPPORTED
;
1674 if (host
== HOST_NULL
)
1675 return KERN_INVALID_HOST
;
1677 simple_lock(&stack_usage_lock
);
1678 maxusage
= stack_max_usage
;
1679 simple_unlock(&stack_usage_lock
);
1681 stack_statistics(&total
, &maxusage
);
1685 *spacep
= *residentp
= total
* round_page(KERNEL_STACK_SIZE
);
1686 *maxusagep
= maxusage
;
1688 return KERN_SUCCESS
;
1690 #endif /* MACH_DEBUG */
1694 * Return info on stack usage for threads in a specific processor set
1697 processor_set_stack_usage(
1698 processor_set_t pset
,
1699 unsigned int *totalp
,
1701 vm_size_t
*residentp
,
1702 vm_size_t
*maxusagep
,
1703 vm_offset_t
*maxstackp
)
1706 return KERN_NOT_SUPPORTED
;
1710 vm_offset_t maxstack
;
1712 register thread_t
*threads
;
1713 register thread_t thread
;
1715 unsigned int actual
; /* this many things */
1718 vm_size_t size
, size_needed
;
1721 if (pset
== PROCESSOR_SET_NULL
)
1722 return KERN_INVALID_ARGUMENT
;
1728 if (!pset
->active
) {
1730 return KERN_INVALID_ARGUMENT
;
1733 actual
= pset
->thread_count
;
1735 /* do we have the memory we need? */
1737 size_needed
= actual
* sizeof(thread_t
);
1738 if (size_needed
<= size
)
1741 /* unlock the pset and allocate more memory */
1747 assert(size_needed
> 0);
1750 addr
= kalloc(size
);
1752 return KERN_RESOURCE_SHORTAGE
;
1755 /* OK, have memory and the processor_set is locked & active */
1757 threads
= (thread_t
*) addr
;
1758 for (i
= 0, thread
= (thread_t
) queue_first(&pset
->threads
);
1761 thread
= (thread_t
) queue_next(&thread
->pset_threads
)) {
1762 thread_reference(thread
);
1763 threads
[i
] = thread
;
1765 assert(queue_end(&pset
->threads
, (queue_entry_t
) thread
));
1767 /* can unlock processor set now that we have the thread refs */
1770 /* calculate maxusage and free thread references */
1775 for (i
= 0; i
< actual
; i
++) {
1777 thread_t thread
= threads
[i
];
1778 vm_offset_t stack
= 0;
1781 * thread->kernel_stack is only accurate if the
1782 * thread isn't swapped and is not executing.
1784 * Of course, we don't have the appropriate locks
1785 * for these shenanigans.
1788 stack
= thread
->kernel_stack
;
1790 for (cpu
= 0; cpu
< NCPUS
; cpu
++)
1791 if (cpu_data
[cpu
].active_thread
== thread
) {
1792 stack
= active_stacks
[cpu
];
1799 if (stack_check_usage
) {
1800 vm_size_t usage
= stack_usage(stack
);
1802 if (usage
> maxusage
) {
1804 maxstack
= (vm_offset_t
) thread
;
1809 thread_deallocate(thread
);
1816 *residentp
= *spacep
= total
* round_page(KERNEL_STACK_SIZE
);
1817 *maxusagep
= maxusage
;
1818 *maxstackp
= maxstack
;
1819 return KERN_SUCCESS
;
1821 #endif /* MACH_DEBUG */
1824 static int split_funnel_off
= 0;
1831 if ((fnl
= (funnel_t
*)kalloc(sizeof(funnel_t
))) != 0){
1832 bzero(fnl
, sizeof(funnel_t
));
1833 if ((m
= mutex_alloc(0)) == (mutex_t
*)NULL
) {
1834 kfree(fnl
, sizeof(funnel_t
));
1835 return(THR_FUNNEL_NULL
);
1838 fnl
->fnl_type
= type
;
1847 mutex_free(fnl
->fnl_mutex
);
1848 if (fnl
->fnl_oldmutex
)
1849 mutex_free(fnl
->fnl_oldmutex
);
1850 kfree(fnl
, sizeof(funnel_t
));
1862 fnl
->fnl_mtxholder
= current_thread();
1863 if (split_funnel_off
&& (m
!= fnl
->fnl_mutex
)) {
1874 mutex_unlock(fnl
->fnl_mutex
);
1875 fnl
->fnl_mtxrelease
= current_thread();
1882 thread_t th
= current_thread();
1884 if (th
->funnel_state
& TH_FN_OWNED
) {
1885 return(th
->funnel_lock
);
1887 return(THR_FUNNEL_NULL
);
1895 thread_t cur_thread
;
1896 boolean_t funnel_state_prev
;
1899 cur_thread
= current_thread();
1900 funnel_state_prev
= ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
);
1902 if (funnel_state_prev
!= funneled
) {
1903 intr
= ml_set_interrupts_enabled(FALSE
);
1905 if (funneled
== TRUE
) {
1906 if (cur_thread
->funnel_lock
)
1907 panic("Funnel lock called when holding one %x", cur_thread
->funnel_lock
);
1908 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
,
1911 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE
,
1913 cur_thread
->funnel_state
|= TH_FN_OWNED
;
1914 cur_thread
->funnel_lock
= fnl
;
1916 if(cur_thread
->funnel_lock
->fnl_mutex
!= fnl
->fnl_mutex
)
1917 panic("Funnel unlock when not holding funnel");
1918 cur_thread
->funnel_state
&= ~TH_FN_OWNED
;
1919 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE
,
1922 cur_thread
->funnel_lock
= THR_FUNNEL_NULL
;
1925 (void)ml_set_interrupts_enabled(intr
);
1927 /* if we are trying to acquire funnel recursively
1928 * check for funnel to be held already
1930 if (funneled
&& (fnl
->fnl_mutex
!= cur_thread
->funnel_lock
->fnl_mutex
)) {
1931 panic("thread_funnel_set: already holding a different funnel");
1934 return(funnel_state_prev
);
1938 thread_funnel_merge(
1940 funnel_t
* otherfnl
)
1945 extern int disable_funnel
;
1947 if ((gfnl
= thread_funnel_get()) == THR_FUNNEL_NULL
)
1948 panic("thread_funnel_merge called with no funnels held");
1950 if (gfnl
->fnl_type
!= 1)
1951 panic("thread_funnel_merge called from non kernel funnel");
1954 panic("thread_funnel_merge incorrect invocation");
1956 if (disable_funnel
|| split_funnel_off
)
1957 return (KERN_FAILURE
);
1960 otherm
= otherfnl
->fnl_mutex
;
1962 /* Acquire other funnel mutex */
1964 split_funnel_off
= 1;
1966 otherfnl
->fnl_mutex
= m
;
1967 otherfnl
->fnl_type
= fnl
->fnl_type
;
1968 otherfnl
->fnl_oldmutex
= otherm
; /* save this for future use */
1970 mutex_unlock(otherm
);
1971 return(KERN_SUCCESS
);
1975 thread_set_cont_arg(int arg
)
1977 thread_t th
= current_thread();
1982 thread_get_cont_arg(void)
1984 thread_t th
= current_thread();
1985 return(th
->cont_arg
);
1989 * Export routines to other components for things that are done as macros
1990 * within the osfmk component.
1992 #undef thread_should_halt
1995 thread_shuttle_t th
)
1997 return(thread_should_halt_fast(th
));