2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
57 * Thread/thread_shuttle management primitives implementation.
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
79 #include <mach_host.h>
80 #include <simple_clock.h>
81 #include <mach_debug.h>
82 #include <mach_prof.h>
84 #include <mach/boolean.h>
85 #include <mach/policy.h>
86 #include <mach/thread_info.h>
87 #include <mach/thread_special_ports.h>
88 #include <mach/thread_status.h>
89 #include <mach/time_value.h>
90 #include <mach/vm_param.h>
92 #include <kern/cpu_data.h>
93 #include <kern/counters.h>
94 #include <kern/etap_macros.h>
95 #include <kern/ipc_mig.h>
96 #include <kern/ipc_tt.h>
97 #include <kern/mach_param.h>
98 #include <kern/machine.h>
99 #include <kern/misc_protos.h>
100 #include <kern/processor.h>
101 #include <kern/queue.h>
102 #include <kern/sched.h>
103 #include <kern/sched_prim.h>
104 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
105 #include <kern/task.h>
106 #include <kern/thread.h>
107 #include <kern/thread_act.h>
108 #include <kern/thread_swap.h>
109 #include <kern/host.h>
110 #include <kern/zalloc.h>
111 #include <vm/vm_kern.h>
112 #include <ipc/ipc_kmsg.h>
113 #include <ipc/ipc_port.h>
114 #include <machine/thread.h> /* for MACHINE_STACK */
115 #include <kern/profile.h>
116 #include <kern/assert.h>
117 #include <sys/kdebug.h>
120 * Exported interfaces
123 #include <mach/thread_act_server.h>
124 #include <mach/mach_host_server.h>
127 * Per-Cpu stashed global state
129 vm_offset_t active_stacks
[NCPUS
]; /* per-cpu active stacks */
130 vm_offset_t kernel_stack
[NCPUS
]; /* top of active stacks */
131 thread_act_t active_kloaded
[NCPUS
]; /* + act if kernel loaded */
133 struct zone
*thread_shuttle_zone
;
135 queue_head_t reaper_queue
;
136 decl_simple_lock_data(,reaper_lock
)
137 thread_call_t thread_reaper_call
;
141 extern void pcb_module_init(void);
144 static struct thread_shuttle thr_sh_template
;
149 extern void stack_statistics(
150 unsigned int *totalp
,
151 vm_size_t
*maxusagep
);
152 #endif /* MACHINE_STACK */
153 #endif /* MACH_DEBUG */
156 void thread_collect_scan(void);
158 kern_return_t
thread_create_shuttle(
159 thread_act_t thr_act
,
162 thread_t
*new_thread
);
164 extern void Load_context(
169 * Machine-dependent code must define:
170 * thread_machine_init
171 * thread_machine_terminate
172 * thread_machine_collect
174 * The thread->pcb field is reserved for machine-dependent code.
179 * Machine-dependent code must define:
188 #else /* MACHINE_STACK */
190 * We allocate stacks from generic kernel VM.
191 * Machine-dependent code must define:
192 * machine_kernel_stack_init
194 * The stack_free_list can only be accessed at splsched,
195 * because stack_alloc_try/thread_invoke operate at splsched.
198 decl_simple_lock_data(,stack_lock_data
) /* splsched only */
199 #define stack_lock() simple_lock(&stack_lock_data)
200 #define stack_unlock() simple_unlock(&stack_lock_data)
202 mutex_t stack_map_lock
; /* Lock when allocating stacks maps */
203 vm_map_t stack_map
; /* Map for allocating stacks */
204 vm_offset_t stack_free_list
; /* splsched only */
205 unsigned int stack_free_max
= 0;
206 unsigned int stack_free_count
= 0; /* splsched only */
207 unsigned int stack_free_limit
= 1; /* Arbitrary */
209 unsigned int stack_alloc_hits
= 0; /* debugging */
210 unsigned int stack_alloc_misses
= 0; /* debugging */
212 unsigned int stack_alloc_total
= 0;
213 unsigned int stack_alloc_hiwater
= 0;
214 unsigned int stack_alloc_bndry
= 0;
218 * The next field is at the base of the stack,
219 * so the low end is left unsullied.
222 #define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
227 * Allocate a kernel stack for an activation.
233 void (*start_pos
)(thread_t
))
235 vm_offset_t stack
= thread
->kernel_stack
;
242 * We first try the free list. It is probably empty, or
243 * stack_alloc_try would have succeeded, but possibly a stack was
244 * freed before the swapin thread got to us.
246 * We allocate stacks from their own map which is submaps of the
247 * kernel map. Because we want to have a guard page (at least) in
248 * front of each stack to catch evil code that overruns its stack, we
249 * allocate the stack on aligned boundaries. The boundary is
250 * calculated as the next power of 2 above the stack size. For
251 * example, a stack of 4 pages would have a boundry of 8, likewise 5
254 * We limit the number of stacks to be one allocation chunk
255 * (THREAD_CHUNK) more than the maximum number of threads
256 * (THREAD_MAX). The extra is to allow for priviliged threads that
257 * can sometimes have 2 stacks.
263 stack
= stack_free_list
;
265 stack_free_list
= stack_next(stack
);
271 if (stack
!= 0) { /* Did we find a free one? */
272 stack_attach(thread
, stack
, start_pos
); /* Initialize it */
273 return (stack
); /* Send it on home */
276 if (kernel_memory_allocate(
278 KERNEL_STACK_SIZE
, stack_alloc_bndry
- 1,
279 KMA_KOBJECT
) != KERN_SUCCESS
)
280 panic("stack_alloc: no space left for stack maps");
283 if (stack_alloc_total
> stack_alloc_hiwater
)
284 stack_alloc_hiwater
= stack_alloc_total
;
286 stack_attach(thread
, stack
, start_pos
);
293 * Free a kernel stack.
294 * Called at splsched.
301 vm_offset_t stack
= stack_detach(thread
);
304 if (stack
!= thread
->stack_privilege
) {
306 stack_next(stack
) = stack_free_list
;
307 stack_free_list
= stack
;
308 if (++stack_free_count
> stack_free_max
)
309 stack_free_max
= stack_free_count
;
322 stack_next(stack
) = stack_free_list
;
323 stack_free_list
= stack
;
324 if (++stack_free_count
> stack_free_max
)
325 stack_free_max
= stack_free_count
;
333 * Free excess kernel stacks.
346 while (stack_free_count
> stack_free_limit
) {
347 stack
= stack_free_list
;
348 stack_free_list
= stack_next(stack
);
354 stack_map
, stack
, stack
+ KERNEL_STACK_SIZE
,
355 VM_MAP_REMOVE_KUNWIRE
) != KERN_SUCCESS
)
356 panic("stack_collect: vm_map_remove failed");
371 * Return statistics on cached kernel stacks.
372 * *maxusagep must be initialized by the caller.
377 unsigned int *totalp
,
378 vm_size_t
*maxusagep
)
385 *totalp
= stack_free_count
;
391 #endif /* MACH_DEBUG */
393 #endif /* MACHINE_STACK */
396 stack_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
397 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
399 *count
= stack_alloc_total
- stack_free_count
;
400 *cur_size
= KERNEL_STACK_SIZE
* stack_alloc_total
;
401 *max_size
= KERNEL_STACK_SIZE
* stack_alloc_hiwater
;
402 *elem_size
= KERNEL_STACK_SIZE
;
403 *alloc_size
= KERNEL_STACK_SIZE
;
412 * stack_alloc_try on this thread must always succeed.
417 register thread_t thread
)
420 * This implementation only works for the current thread.
423 if (thread
!= current_thread())
424 panic("stack_privilege");
426 if (thread
->stack_privilege
== 0)
427 thread
->stack_privilege
= current_stack();
433 * Non-blocking attempt to allocate a kernel stack.
434 * Called at splsched with the thread locked.
437 boolean_t
stack_alloc_try(
439 void (*start_pos
)(thread_t
))
441 register vm_offset_t stack
= thread
->stack_privilege
;
446 stack
= stack_free_list
;
447 if (stack
!= (vm_offset_t
)0) {
448 stack_free_list
= stack_next(stack
);
456 stack_attach(thread
, stack
, start_pos
);
462 stack_alloc_misses
++;
468 uint64_t max_unsafe_computation
;
469 extern int max_unsafe_quanta
;
471 uint32_t sched_safe_duration
;
473 uint64_t max_poll_computation
;
474 extern int max_poll_quanta
;
476 uint32_t std_quantum
;
477 uint32_t min_std_quantum
;
479 uint32_t max_rt_quantum
;
480 uint32_t min_rt_quantum
;
488 thread_shuttle_zone
= zinit(
489 sizeof(struct thread_shuttle
),
490 THREAD_MAX
* sizeof(struct thread_shuttle
),
491 THREAD_CHUNK
* sizeof(struct thread_shuttle
),
495 * Fill in a template thread_shuttle for fast initialization.
496 * [Fields that must be (or are typically) reset at
497 * time of creation are so noted.]
500 /* thr_sh_template.links (none) */
501 thr_sh_template
.runq
= RUN_QUEUE_NULL
;
504 /* thr_sh_template.task (later) */
505 /* thr_sh_template.thread_list (later) */
506 /* thr_sh_template.pset_threads (later) */
508 /* one ref for pset, one for activation */
509 thr_sh_template
.ref_count
= 2;
511 thr_sh_template
.wait_event
= NO_EVENT
;
512 thr_sh_template
.wait_result
= KERN_SUCCESS
;
513 thr_sh_template
.wait_queue
= WAIT_QUEUE_NULL
;
514 thr_sh_template
.wake_active
= FALSE
;
515 thr_sh_template
.state
= TH_STACK_HANDOFF
| TH_WAIT
| TH_UNINT
;
516 thr_sh_template
.interruptible
= TRUE
;
517 thr_sh_template
.continuation
= (void (*)(void))0;
518 thr_sh_template
.top_act
= THR_ACT_NULL
;
520 thr_sh_template
.importance
= 0;
521 thr_sh_template
.sched_mode
= 0;
522 thr_sh_template
.safe_mode
= 0;
524 thr_sh_template
.priority
= 0;
525 thr_sh_template
.sched_pri
= 0;
526 thr_sh_template
.depress_priority
= -1;
527 thr_sh_template
.max_priority
= 0;
528 thr_sh_template
.task_priority
= 0;
530 thr_sh_template
.current_quantum
= 0;
532 thr_sh_template
.metered_computation
= 0;
533 thr_sh_template
.computation_epoch
= 0;
535 thr_sh_template
.cpu_usage
= 0;
536 thr_sh_template
.cpu_delta
= 0;
537 thr_sh_template
.sched_usage
= 0;
538 thr_sh_template
.sched_delta
= 0;
539 thr_sh_template
.sched_stamp
= 0;
540 thr_sh_template
.sleep_stamp
= 0;
541 thr_sh_template
.safe_release
= 0;
543 thr_sh_template
.vm_privilege
= FALSE
;
545 timer_init(&(thr_sh_template
.user_timer
));
546 timer_init(&(thr_sh_template
.system_timer
));
547 thr_sh_template
.user_timer_save
.low
= 0;
548 thr_sh_template
.user_timer_save
.high
= 0;
549 thr_sh_template
.system_timer_save
.low
= 0;
550 thr_sh_template
.system_timer_save
.high
= 0;
552 thr_sh_template
.active
= FALSE
; /* reset */
554 /* thr_sh_template.processor_set (later) */
556 thr_sh_template
.bound_processor
= PROCESSOR_NULL
;
559 thr_sh_template
.may_assign
= TRUE
;
560 thr_sh_template
.assign_active
= FALSE
;
561 #endif /* MACH_HOST */
562 thr_sh_template
.funnel_state
= 0;
565 /* thr_sh_template.last_processor (later) */
566 #endif /* NCPUS > 1 */
569 * Initialize other data structures used in
573 queue_init(&reaper_queue
);
574 simple_lock_init(&reaper_lock
, ETAP_THREAD_REAPER
);
575 thr_sh_template
.funnel_lock
= THR_FUNNEL_NULL
;
577 #ifndef MACHINE_STACK
578 simple_lock_init(&stack_lock_data
, ETAP_THREAD_STACK
); /* Initialize the stack lock */
580 if (KERNEL_STACK_SIZE
< round_page(KERNEL_STACK_SIZE
)) { /* Kernel stacks must be multiples of pages */
581 panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
582 KERNEL_STACK_SIZE
, PAGE_SIZE
);
585 for(stack_alloc_bndry
= PAGE_SIZE
; stack_alloc_bndry
<= KERNEL_STACK_SIZE
; stack_alloc_bndry
<<= 1); /* Find next power of 2 above stack size */
587 ret
= kmem_suballoc(kernel_map
, /* Suballocate from the kernel map */
590 (stack_alloc_bndry
* (THREAD_MAX
+ 64)), /* Allocate enough for all of it */
591 FALSE
, /* Say not pageable so that it is wired */
592 TRUE
, /* Allocate from anywhere */
593 &stack_map
); /* Allocate a submap */
595 if(ret
!= KERN_SUCCESS
) { /* Did we get one? */
596 panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret
); /* Die */
598 stack
= vm_map_min(stack_map
); /* Make sure we skip the first hunk */
599 ret
= vm_map_enter(stack_map
, &stack
, PAGE_SIZE
, 0, /* Make sure there is nothing at the start */
600 0, /* Force it at start */
601 VM_OBJECT_NULL
, 0, /* No object yet */
603 VM_PROT_NONE
, /* Allow no access */
604 VM_PROT_NONE
, /* Allow no access */
605 VM_INHERIT_DEFAULT
); /* Just be normal */
607 if(ret
!= KERN_SUCCESS
) { /* Did it work? */
608 panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret
);
611 #endif /* MACHINE_STACK */
614 thr_sh_template
.kthread
= FALSE
;
615 thr_sh_template
.mutex_count
= 0;
616 #endif /* MACH_LDEBUG */
621 clock_interval_to_absolutetime_interval(
622 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
623 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
624 std_quantum
= abstime
;
627 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
628 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
629 min_std_quantum
= abstime
;
632 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
633 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
634 min_rt_quantum
= abstime
;
637 clock_interval_to_absolutetime_interval(
638 50, 1000*NSEC_PER_USEC
, &abstime
);
639 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
640 max_rt_quantum
= abstime
;
642 max_unsafe_computation
= max_unsafe_quanta
* std_quantum
;
643 max_poll_computation
= max_poll_quanta
* std_quantum
;
645 sched_safe_duration
= 2 * max_unsafe_quanta
*
646 (std_quantum_us
/ (1000 * 1000)) *
647 (1 << SCHED_TICK_SHIFT
);
651 * Initialize any machine-dependent
652 * per-thread structures necessary.
654 thread_machine_init();
658 thread_reaper_enqueue(
662 * thread lock is already held, splsched()
663 * not necessary here.
665 simple_lock(&reaper_lock
);
666 enqueue_tail(&reaper_queue
, (queue_entry_t
)thread
);
667 simple_unlock(&reaper_lock
);
669 thread_call_enter(thread_reaper_call
);
674 * Routine: thread_terminate_self
676 * This routine is called by a thread which has unwound from
677 * its current RPC and kernel contexts and found that it's
678 * root activation has been marked for extinction. This lets
679 * it clean up the last few things that can only be cleaned
680 * up in this context and then impale itself on the reaper
683 * When the reaper gets the thread, it will deallocate the
684 * thread_act's reference on itself, which in turn will release
685 * its own reference on this thread. By doing things in that
686 * order, a thread_act will always have a valid thread - but the
687 * thread may persist beyond having a thread_act (but must never
691 thread_terminate_self(void)
693 register thread_t thread
= current_thread();
694 thread_act_t thr_act
= thread
->top_act
;
695 task_t task
= thr_act
->task
;
700 * We should be at the base of the inheritance chain.
702 assert(thr_act
->thread
== thread
);
704 _mk_sp_thread_depress_abort(thread
, TRUE
);
707 * Check to see if this is the last active activation. By
708 * this we mean the last activation to call thread_terminate_self.
709 * If so, and the task is associated with a BSD process, we
710 * need to call BSD and let them clean up.
713 active_acts
= --task
->active_act_count
;
715 if (!active_acts
&& task
->bsd_info
)
716 proc_exit(task
->bsd_info
);
718 #ifdef CALLOUT_RPC_MODEL
719 if (thr_act
->lower
) {
721 * JMM - RPC will not be using a callout/stack manipulation
722 * mechanism. instead we will let it return normally as if
723 * from a continuation. Accordingly, these need to be cleaned
726 act_switch_swapcheck(thread
, (ipc_port_t
)0);
727 act_lock(thr_act
); /* hierarchy violation XXX */
728 (void) switch_act(THR_ACT_NULL
);
729 assert(thr_act
->ref_count
== 1); /* XXX */
730 /* act_deallocate(thr_act); XXX */
731 prev_act
= thread
->top_act
;
733 * disable preemption to protect kernel stack changes
734 * disable_preemption();
735 * MACH_RPC_RET(prev_act) = KERN_RPC_SERVER_TERMINATED;
736 * machine_kernel_stack_init(thread, mach_rpc_return_error);
741 * Load_context(thread);
746 #else /* !CALLOUT_RPC_MODEL */
748 assert(!thr_act
->lower
);
750 #endif /* CALLOUT_RPC_MODEL */
754 thread
->active
= FALSE
;
755 thread_unlock(thread
);
758 thread_timer_terminate();
760 /* flush any lazy HW state while in own context */
761 thread_machine_flush(thr_act
);
763 ipc_thread_terminate(thread
);
767 thread
->state
|= (TH_HALTED
|TH_TERMINATE
);
768 assert((thread
->state
& TH_UNINT
) == 0);
769 thread_mark_wait_locked(thread
, THREAD_UNINT
);
770 thread_unlock(thread
);
773 ETAP_SET_REASON(thread
, BLOCKED_ON_TERMINATION
);
774 thread_block((void (*)(void)) 0);
775 panic("the zombie walks!");
781 * Create a new thread.
782 * Doesn't start the thread running; It first must be attached to
783 * an activation - then use thread_go to start it.
786 thread_create_shuttle(
787 thread_act_t thr_act
,
790 thread_t
*new_thread
)
792 thread_t new_shuttle
;
793 task_t parent_task
= thr_act
->task
;
794 processor_set_t pset
;
795 kern_return_t result
;
798 assert(!thr_act
->thread
);
799 assert(!thr_act
->pool_port
);
802 * Allocate a thread and initialize static fields
804 new_shuttle
= (thread_t
)zalloc(thread_shuttle_zone
);
805 if (new_shuttle
== THREAD_NULL
)
806 return (KERN_RESOURCE_SHORTAGE
);
808 *new_shuttle
= thr_sh_template
;
810 thread_lock_init(new_shuttle
);
811 rpc_lock_init(new_shuttle
);
812 wake_lock_init(new_shuttle
);
813 new_shuttle
->sleep_stamp
= sched_tick
;
816 * Thread still isn't runnable yet (our caller will do
817 * that). Initialize runtime-dependent fields here.
819 result
= thread_machine_create(new_shuttle
, thr_act
, thread_continue
);
820 assert (result
== KERN_SUCCESS
);
822 thread_start(new_shuttle
, start
);
823 thread_timer_setup(new_shuttle
);
824 ipc_thread_init(new_shuttle
);
826 pset
= parent_task
->processor_set
;
828 pset
= &default_pset
;
832 task_lock(parent_task
);
835 * Don't need to initialize because the context switch
836 * code will set it before it can be used.
838 if (!parent_task
->active
) {
839 task_unlock(parent_task
);
841 thread_machine_destroy(new_shuttle
);
842 zfree(thread_shuttle_zone
, (vm_offset_t
) new_shuttle
);
843 return (KERN_FAILURE
);
846 act_attach(thr_act
, new_shuttle
, 0);
848 /* Chain the thr_act onto the task's list */
849 queue_enter(&parent_task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
);
850 parent_task
->thr_act_count
++;
851 parent_task
->res_act_count
++;
852 parent_task
->active_act_count
++;
854 /* Associate the thread with the processor set */
855 pset_add_thread(pset
, new_shuttle
);
857 /* Set the thread's scheduling parameters */
858 if (parent_task
!= kernel_task
)
859 new_shuttle
->sched_mode
|= TH_MODE_TIMESHARE
;
860 new_shuttle
->max_priority
= parent_task
->max_priority
;
861 new_shuttle
->task_priority
= parent_task
->priority
;
862 new_shuttle
->priority
= (priority
< 0)? parent_task
->priority
: priority
;
863 if (new_shuttle
->priority
> new_shuttle
->max_priority
)
864 new_shuttle
->priority
= new_shuttle
->max_priority
;
865 new_shuttle
->importance
=
866 new_shuttle
->priority
- new_shuttle
->task_priority
;
867 new_shuttle
->sched_stamp
= sched_tick
;
868 compute_priority(new_shuttle
, TRUE
);
870 #if ETAP_EVENT_MONITOR
871 new_thread
->etap_reason
= 0;
872 new_thread
->etap_trace
= FALSE
;
873 #endif /* ETAP_EVENT_MONITOR */
875 new_shuttle
->active
= TRUE
;
876 thr_act
->active
= TRUE
;
880 * No need to lock thr_act, since it can't be known to anyone --
881 * we set its suspend_count to one more than the task suspend_count
882 * by calling thread_hold.
884 thr_act
->user_stop_count
= 1;
885 for (suspcnt
= thr_act
->task
->suspend_count
+ 1; suspcnt
; --suspcnt
)
886 thread_hold(thr_act
);
887 task_unlock(parent_task
);
889 *new_thread
= new_shuttle
;
892 long dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
;
894 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA
, 1)) | DBG_FUNC_NONE
,
895 (vm_address_t
)new_shuttle
, 0,0,0,0);
897 kdbg_trace_string(parent_task
->bsd_info
, &dbg_arg1
, &dbg_arg2
, &dbg_arg3
,
899 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING
, 1)) | DBG_FUNC_NONE
,
900 dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
, 0);
903 return (KERN_SUCCESS
);
909 thread_act_t
*new_act
)
911 thread_act_t thr_act
;
913 kern_return_t result
;
915 extern void thread_bootstrap_return(void);
917 if (task
== TASK_NULL
)
918 return KERN_INVALID_ARGUMENT
;
920 result
= act_create(task
, &thr_act
);
921 if (result
!= KERN_SUCCESS
)
924 result
= thread_create_shuttle(thr_act
, -1, thread_bootstrap_return
, &thread
);
925 if (result
!= KERN_SUCCESS
) {
926 act_deallocate(thr_act
);
930 if (task
->kernel_loaded
)
931 thread_user_to_kernel(thread
);
933 /* Start the thread running (it will immediately suspend itself). */
935 thread_ast_set(thr_act
, AST_APC
);
937 thread_go_locked(thread
, THREAD_AWAKENED
);
938 thread_unlock(thread
);
943 return (KERN_SUCCESS
);
947 * Update thread that belongs to a task created via kernel_task_create().
950 thread_user_to_kernel(
954 * Used to set special swap_func here...
959 thread_create_running(
960 register task_t parent_task
,
962 thread_state_t new_state
,
963 mach_msg_type_number_t new_state_count
,
964 thread_act_t
*child_act
) /* OUT */
966 register kern_return_t result
;
968 result
= thread_create(parent_task
, child_act
);
969 if (result
!= KERN_SUCCESS
)
972 result
= act_machine_set_state(*child_act
, flavor
,
973 new_state
, new_state_count
);
974 if (result
!= KERN_SUCCESS
) {
975 (void) thread_terminate(*child_act
);
979 result
= thread_resume(*child_act
);
980 if (result
!= KERN_SUCCESS
) {
981 (void) thread_terminate(*child_act
);
991 * Create and kernel thread in the specified task, and
992 * optionally start it running.
995 kernel_thread_with_priority(
999 boolean_t alloc_stack
,
1000 boolean_t start_running
)
1002 kern_return_t result
;
1004 thread_act_t thr_act
;
1007 result
= act_create(task
, &thr_act
);
1008 if (result
!= KERN_SUCCESS
) {
1012 result
= thread_create_shuttle(thr_act
, priority
, start
, &thread
);
1013 if (result
!= KERN_SUCCESS
) {
1014 act_deallocate(thr_act
);
1019 thread_doswapin(thread
);
1022 thread_lock(thread
);
1024 thr_act
= thread
->top_act
;
1026 thread
->kthread
= TRUE
;
1027 #endif /* MACH_LDEBUG */
1030 thread_go_locked(thread
, THREAD_AWAKENED
);
1032 thread_unlock(thread
);
1036 thread_resume(thr_act
);
1038 act_deallocate(thr_act
);
1045 void (*start
)(void))
1047 return kernel_thread_with_priority(task
, -1, start
, FALSE
, TRUE
);
1050 unsigned int c_weird_pset_ref_exit
= 0; /* pset code raced us */
1057 processor_set_t pset
;
1060 if (thread
== THREAD_NULL
)
1064 * First, check for new count > 1 (the common case).
1065 * Only the thread needs to be locked.
1068 thread_lock(thread
);
1069 if (--thread
->ref_count
> 1) {
1070 thread_unlock(thread
);
1076 * Down to pset reference, lets try to clean up.
1077 * However, the processor set may make more. Its lock
1078 * also dominate the thread lock. So, reverse the
1079 * order of the locks and see if its still the last
1082 assert(thread
->ref_count
== 1); /* Else this is an extra dealloc! */
1083 thread_unlock(thread
);
1087 thread_freeze(thread
);
1088 #endif /* MACH_HOST */
1090 pset
= thread
->processor_set
;
1094 thread_lock(thread
);
1096 if (thread
->ref_count
> 1) {
1098 boolean_t need_wakeup
= FALSE
;
1100 * processor_set made extra reference.
1102 /* Inline the unfreeze */
1103 thread
->may_assign
= TRUE
;
1104 if (thread
->assign_active
) {
1106 thread
->assign_active
= FALSE
;
1108 #endif /* MACH_HOST */
1109 thread_unlock(thread
);
1114 thread_wakeup((event_t
)&thread
->assign_active
);
1115 #endif /* MACH_HOST */
1116 c_weird_pset_ref_exit
++;
1120 assert(thread
->assign_active
== FALSE
);
1121 #endif /* MACH_HOST */
1124 * Thread only had pset reference - we can remove it.
1126 if (thread
== current_thread())
1127 panic("thread deallocating itself");
1129 pset_remove_thread(pset
, thread
);
1130 thread
->ref_count
= 0;
1131 thread_unlock(thread
); /* no more references - safe */
1135 pset_deallocate(thread
->processor_set
);
1137 if (thread
->stack_privilege
!= 0) {
1138 if (thread
->stack_privilege
!= thread
->kernel_stack
)
1139 stack_free_stack(thread
->stack_privilege
);
1140 thread
->stack_privilege
= 0;
1142 /* frees kernel stack & other MD resources */
1143 thread_machine_destroy(thread
);
1145 zfree(thread_shuttle_zone
, (vm_offset_t
) thread
);
1154 if (thread
== THREAD_NULL
)
1158 thread_lock(thread
);
1159 thread
->ref_count
++;
1160 thread_unlock(thread
);
1165 * Called with "appropriate" thread-related locks held on
1166 * thread and its top_act for synchrony with RPC (see
1167 * act_lock_thread()).
1170 thread_info_shuttle(
1171 register thread_act_t thr_act
,
1172 thread_flavor_t flavor
,
1173 thread_info_t thread_info_out
, /* ptr to OUT array */
1174 mach_msg_type_number_t
*thread_info_count
) /*IN/OUT*/
1176 register thread_t thread
= thr_act
->thread
;
1180 if (thread
== THREAD_NULL
)
1181 return (KERN_INVALID_ARGUMENT
);
1183 if (flavor
== THREAD_BASIC_INFO
) {
1184 register thread_basic_info_t basic_info
;
1186 if (*thread_info_count
< THREAD_BASIC_INFO_COUNT
)
1187 return (KERN_INVALID_ARGUMENT
);
1189 basic_info
= (thread_basic_info_t
) thread_info_out
;
1192 thread_lock(thread
);
1196 thread_read_times(thread
, &basic_info
->user_time
,
1197 &basic_info
->system_time
);
1200 * Update lazy-evaluated scheduler info because someone wants it.
1202 if (thread
->sched_stamp
!= sched_tick
)
1203 update_priority(thread
);
1205 basic_info
->sleep_time
= 0;
1208 * To calculate cpu_usage, first correct for timer rate,
1209 * then for 5/8 ageing. The correction factor [3/5] is
1212 basic_info
->cpu_usage
= (thread
->cpu_usage
<< SCHED_TICK_SHIFT
) /
1213 (TIMER_RATE
/ TH_USAGE_SCALE
);
1214 basic_info
->cpu_usage
= (basic_info
->cpu_usage
* 3) / 5;
1217 * Clock drift compensation.
1219 basic_info
->cpu_usage
= (basic_info
->cpu_usage
* 1000000) / sched_usec
;
1220 #endif /* SIMPLE_CLOCK */
1222 basic_info
->policy
= ((thread
->sched_mode
& TH_MODE_TIMESHARE
)?
1223 POLICY_TIMESHARE
: POLICY_RR
);
1226 if (thread
->state
& TH_IDLE
)
1227 flags
|= TH_FLAGS_IDLE
;
1229 if (thread
->state
& TH_STACK_HANDOFF
)
1230 flags
|= TH_FLAGS_SWAPPED
;
1233 if (thread
->state
& TH_HALTED
)
1234 state
= TH_STATE_HALTED
;
1236 if (thread
->state
& TH_RUN
)
1237 state
= TH_STATE_RUNNING
;
1239 if (thread
->state
& TH_UNINT
)
1240 state
= TH_STATE_UNINTERRUPTIBLE
;
1242 if (thread
->state
& TH_SUSP
)
1243 state
= TH_STATE_STOPPED
;
1245 if (thread
->state
& TH_WAIT
)
1246 state
= TH_STATE_WAITING
;
1248 basic_info
->run_state
= state
;
1249 basic_info
->flags
= flags
;
1251 basic_info
->suspend_count
= thr_act
->user_stop_count
;
1253 thread_unlock(thread
);
1256 *thread_info_count
= THREAD_BASIC_INFO_COUNT
;
1258 return (KERN_SUCCESS
);
1261 if (flavor
== THREAD_SCHED_TIMESHARE_INFO
) {
1262 policy_timeshare_info_t ts_info
;
1264 if (*thread_info_count
< POLICY_TIMESHARE_INFO_COUNT
)
1265 return (KERN_INVALID_ARGUMENT
);
1267 ts_info
= (policy_timeshare_info_t
)thread_info_out
;
1270 thread_lock(thread
);
1272 if (!(thread
->sched_mode
& TH_MODE_TIMESHARE
)) {
1273 thread_unlock(thread
);
1276 return (KERN_INVALID_POLICY
);
1279 ts_info
->base_priority
= thread
->priority
;
1280 ts_info
->max_priority
= thread
->max_priority
;
1281 ts_info
->cur_priority
= thread
->sched_pri
;
1283 ts_info
->depressed
= (thread
->depress_priority
>= 0);
1284 ts_info
->depress_priority
= thread
->depress_priority
;
1286 thread_unlock(thread
);
1289 *thread_info_count
= POLICY_TIMESHARE_INFO_COUNT
;
1291 return (KERN_SUCCESS
);
1294 if (flavor
== THREAD_SCHED_FIFO_INFO
) {
1295 if (*thread_info_count
< POLICY_FIFO_INFO_COUNT
)
1296 return (KERN_INVALID_ARGUMENT
);
1298 return (KERN_INVALID_POLICY
);
1301 if (flavor
== THREAD_SCHED_RR_INFO
) {
1302 policy_rr_info_t rr_info
;
1304 if (*thread_info_count
< POLICY_RR_INFO_COUNT
)
1305 return (KERN_INVALID_ARGUMENT
);
1307 rr_info
= (policy_rr_info_t
) thread_info_out
;
1310 thread_lock(thread
);
1312 if (thread
->sched_mode
& TH_MODE_TIMESHARE
) {
1313 thread_unlock(thread
);
1316 return (KERN_INVALID_POLICY
);
1319 rr_info
->base_priority
= thread
->priority
;
1320 rr_info
->max_priority
= thread
->max_priority
;
1321 rr_info
->quantum
= std_quantum_us
/ 1000;
1323 rr_info
->depressed
= (thread
->depress_priority
>= 0);
1324 rr_info
->depress_priority
= thread
->depress_priority
;
1326 thread_unlock(thread
);
1329 *thread_info_count
= POLICY_RR_INFO_COUNT
;
1331 return (KERN_SUCCESS
);
1334 return (KERN_INVALID_ARGUMENT
);
1339 register thread_t thread
)
1341 thread_act_t thr_act
;
1342 struct ipc_port
*pool_port
;
1345 thr_act
= thread_lock_act(thread
);
1346 assert(thr_act
&& thr_act
->thread
== thread
);
1348 act_locked_act_reference(thr_act
);
1349 pool_port
= thr_act
->pool_port
;
1352 * Replace `act_unlock_thread()' with individual
1353 * calls. (`act_detach()' can change fields used
1354 * to determine which locks are held, confusing
1355 * `act_unlock_thread()'.)
1358 if (pool_port
!= IP_NULL
)
1359 ip_unlock(pool_port
);
1360 act_unlock(thr_act
);
1362 /* Remove the reference held by a rooted thread */
1363 if (pool_port
== IP_NULL
)
1364 act_deallocate(thr_act
);
1366 /* Remove the reference held by the thread: */
1367 act_deallocate(thr_act
);
1370 static thread_call_data_t thread_reaper_call_data
;
1375 * This kernel thread runs forever looking for threads to destroy
1376 * (when they request that they be destroyed, of course).
1378 * The reaper thread will disappear in the next revision of thread
1379 * control when it's function will be moved into thread_dispatch.
1383 thread_call_param_t p0
,
1384 thread_call_param_t p1
)
1386 register thread_t thread
;
1390 simple_lock(&reaper_lock
);
1392 while ((thread
= (thread_t
) dequeue_head(&reaper_queue
)) != THREAD_NULL
) {
1393 simple_unlock(&reaper_lock
);
1396 * wait for run bit to clear
1398 thread_lock(thread
);
1399 if (thread
->state
& TH_RUN
)
1400 panic("thread reaper: TH_RUN");
1401 thread_unlock(thread
);
1404 thread_doreap(thread
);
1407 simple_lock(&reaper_lock
);
1410 simple_unlock(&reaper_lock
);
1417 thread_call_setup(&thread_reaper_call_data
, _thread_reaper
, NULL
);
1418 thread_reaper_call
= &thread_reaper_call_data
;
1420 _thread_reaper(NULL
, NULL
);
1425 thread_act_t thr_act
,
1426 processor_set_t new_pset
)
1429 thread
++; new_pset
++;
1431 return(KERN_FAILURE
);
1435 * thread_assign_default:
1437 * Special version of thread_assign for assigning threads to default
1441 thread_assign_default(
1442 thread_act_t thr_act
)
1444 return (thread_assign(thr_act
, &default_pset
));
1448 * thread_get_assignment
1450 * Return current assignment for this thread.
1453 thread_get_assignment(
1454 thread_act_t thr_act
,
1455 processor_set_t
*pset
)
1459 if (thr_act
== THR_ACT_NULL
)
1460 return(KERN_INVALID_ARGUMENT
);
1461 thread
= act_lock_thread(thr_act
);
1462 if (thread
== THREAD_NULL
) {
1463 act_unlock_thread(thr_act
);
1464 return(KERN_INVALID_ARGUMENT
);
1466 *pset
= thread
->processor_set
;
1467 act_unlock_thread(thr_act
);
1468 pset_reference(*pset
);
1469 return(KERN_SUCCESS
);
1475 * Specify that the target thread must always be able
1476 * to run and to allocate memory.
1480 host_priv_t host_priv
,
1481 thread_act_t thr_act
,
1486 extern void vm_page_free_reserve(int pages
);
1488 if (thr_act
== THR_ACT_NULL
|| host_priv
== HOST_PRIV_NULL
)
1489 return (KERN_INVALID_ARGUMENT
);
1491 assert(host_priv
== &realhost
);
1493 thread
= act_lock_thread(thr_act
);
1494 if (thread
==THREAD_NULL
) {
1495 act_unlock_thread(thr_act
);
1496 return(KERN_INVALID_ARGUMENT
);
1500 * This implementation only works for the current thread.
1501 * See stack_privilege.
1503 if (thr_act
!= current_act())
1504 return KERN_INVALID_ARGUMENT
;
1507 thread_lock(thread
);
1510 if (thread
->vm_privilege
== FALSE
)
1511 vm_page_free_reserve(1); /* XXX */
1512 thread
->vm_privilege
= TRUE
;
1514 if (thread
->vm_privilege
== TRUE
)
1515 vm_page_free_reserve(-1); /* XXX */
1516 thread
->vm_privilege
= FALSE
;
1519 thread_unlock(thread
);
1521 act_unlock_thread(thr_act
);
1523 return KERN_SUCCESS
;
1527 * thread_collect_scan:
1529 * Attempt to free resources owned by threads.
1533 thread_collect_scan(void)
1535 /* This code runs very quickly! */
1538 /* Also disabled in vm/vm_pageout.c */
1539 boolean_t thread_collect_allowed
= FALSE
;
1540 unsigned thread_collect_last_tick
= 0;
1541 unsigned thread_collect_max_rate
= 0; /* in ticks */
1544 * consider_thread_collect:
1546 * Called by the pageout daemon when the system needs more free pages.
1550 consider_thread_collect(void)
1553 * By default, don't attempt thread collection more frequently
1554 * than once a second.
1557 if (thread_collect_max_rate
== 0)
1558 thread_collect_max_rate
= (1 << SCHED_TICK_SHIFT
) + 1;
1560 if (thread_collect_allowed
&&
1562 (thread_collect_last_tick
+ thread_collect_max_rate
))) {
1563 thread_collect_last_tick
= sched_tick
;
1564 thread_collect_scan();
1571 vm_size_t
*reservedp
,
1572 unsigned int *totalp
,
1574 vm_size_t
*residentp
,
1575 vm_size_t
*maxusagep
,
1576 vm_offset_t
*maxstackp
)
1579 return KERN_NOT_SUPPORTED
;
1584 if (host
== HOST_NULL
)
1585 return KERN_INVALID_HOST
;
1589 stack_statistics(&total
, &maxusage
);
1593 *spacep
= *residentp
= total
* round_page(KERNEL_STACK_SIZE
);
1594 *maxusagep
= maxusage
;
1596 return KERN_SUCCESS
;
1598 #endif /* MACH_DEBUG */
1602 * Return info on stack usage for threads in a specific processor set
1605 processor_set_stack_usage(
1606 processor_set_t pset
,
1607 unsigned int *totalp
,
1609 vm_size_t
*residentp
,
1610 vm_size_t
*maxusagep
,
1611 vm_offset_t
*maxstackp
)
1614 return KERN_NOT_SUPPORTED
;
1618 vm_offset_t maxstack
;
1620 register thread_t
*threads
;
1621 register thread_t thread
;
1623 unsigned int actual
; /* this many things */
1626 vm_size_t size
, size_needed
;
1629 if (pset
== PROCESSOR_SET_NULL
)
1630 return KERN_INVALID_ARGUMENT
;
1636 if (!pset
->active
) {
1638 return KERN_INVALID_ARGUMENT
;
1641 actual
= pset
->thread_count
;
1643 /* do we have the memory we need? */
1645 size_needed
= actual
* sizeof(thread_t
);
1646 if (size_needed
<= size
)
1649 /* unlock the pset and allocate more memory */
1655 assert(size_needed
> 0);
1658 addr
= kalloc(size
);
1660 return KERN_RESOURCE_SHORTAGE
;
1663 /* OK, have memory and the processor_set is locked & active */
1665 threads
= (thread_t
*) addr
;
1666 for (i
= 0, thread
= (thread_t
) queue_first(&pset
->threads
);
1669 thread
= (thread_t
) queue_next(&thread
->pset_threads
)) {
1670 thread_reference(thread
);
1671 threads
[i
] = thread
;
1673 assert(queue_end(&pset
->threads
, (queue_entry_t
) thread
));
1675 /* can unlock processor set now that we have the thread refs */
1678 /* calculate maxusage and free thread references */
1683 for (i
= 0; i
< actual
; i
++) {
1685 thread_t thread
= threads
[i
];
1686 vm_offset_t stack
= 0;
1689 * thread->kernel_stack is only accurate if the
1690 * thread isn't swapped and is not executing.
1692 * Of course, we don't have the appropriate locks
1693 * for these shenanigans.
1696 stack
= thread
->kernel_stack
;
1698 for (cpu
= 0; cpu
< NCPUS
; cpu
++)
1699 if (cpu_data
[cpu
].active_thread
== thread
) {
1700 stack
= active_stacks
[cpu
];
1708 thread_deallocate(thread
);
1715 *residentp
= *spacep
= total
* round_page(KERNEL_STACK_SIZE
);
1716 *maxusagep
= maxusage
;
1717 *maxstackp
= maxstack
;
1718 return KERN_SUCCESS
;
1720 #endif /* MACH_DEBUG */
1723 static int split_funnel_off
= 0;
1730 if ((fnl
= (funnel_t
*)kalloc(sizeof(funnel_t
))) != 0){
1731 bzero((void *)fnl
, sizeof(funnel_t
));
1732 if ((m
= mutex_alloc(0)) == (mutex_t
*)NULL
) {
1733 kfree((vm_offset_t
)fnl
, sizeof(funnel_t
));
1734 return(THR_FUNNEL_NULL
);
1737 fnl
->fnl_type
= type
;
1746 mutex_free(fnl
->fnl_mutex
);
1747 if (fnl
->fnl_oldmutex
)
1748 mutex_free(fnl
->fnl_oldmutex
);
1749 kfree((vm_offset_t
)fnl
, sizeof(funnel_t
));
1761 fnl
->fnl_mtxholder
= current_thread();
1762 if (split_funnel_off
&& (m
!= fnl
->fnl_mutex
)) {
1773 mutex_unlock(fnl
->fnl_mutex
);
1774 fnl
->fnl_mtxrelease
= current_thread();
1781 thread_t th
= current_thread();
1783 if (th
->funnel_state
& TH_FN_OWNED
) {
1784 return(th
->funnel_lock
);
1786 return(THR_FUNNEL_NULL
);
1794 thread_t cur_thread
;
1795 boolean_t funnel_state_prev
;
1798 cur_thread
= current_thread();
1799 funnel_state_prev
= ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
);
1801 if (funnel_state_prev
!= funneled
) {
1802 intr
= ml_set_interrupts_enabled(FALSE
);
1804 if (funneled
== TRUE
) {
1805 if (cur_thread
->funnel_lock
)
1806 panic("Funnel lock called when holding one %x", cur_thread
->funnel_lock
);
1807 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
,
1810 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE
,
1812 cur_thread
->funnel_state
|= TH_FN_OWNED
;
1813 cur_thread
->funnel_lock
= fnl
;
1815 if(cur_thread
->funnel_lock
->fnl_mutex
!= fnl
->fnl_mutex
)
1816 panic("Funnel unlock when not holding funnel");
1817 cur_thread
->funnel_state
&= ~TH_FN_OWNED
;
1818 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE
,
1821 cur_thread
->funnel_lock
= THR_FUNNEL_NULL
;
1824 (void)ml_set_interrupts_enabled(intr
);
1826 /* if we are trying to acquire funnel recursively
1827 * check for funnel to be held already
1829 if (funneled
&& (fnl
->fnl_mutex
!= cur_thread
->funnel_lock
->fnl_mutex
)) {
1830 panic("thread_funnel_set: already holding a different funnel");
1833 return(funnel_state_prev
);
1837 thread_funnel_merge(
1839 funnel_t
* otherfnl
)
1844 extern int disable_funnel
;
1846 if ((gfnl
= thread_funnel_get()) == THR_FUNNEL_NULL
)
1847 panic("thread_funnel_merge called with no funnels held");
1849 if (gfnl
->fnl_type
!= 1)
1850 panic("thread_funnel_merge called from non kernel funnel");
1853 panic("thread_funnel_merge incorrect invocation");
1855 if (disable_funnel
|| split_funnel_off
)
1856 return (KERN_FAILURE
);
1859 otherm
= otherfnl
->fnl_mutex
;
1861 /* Acquire other funnel mutex */
1863 split_funnel_off
= 1;
1865 otherfnl
->fnl_mutex
= m
;
1866 otherfnl
->fnl_type
= fnl
->fnl_type
;
1867 otherfnl
->fnl_oldmutex
= otherm
; /* save this for future use */
1869 mutex_unlock(otherm
);
1870 return(KERN_SUCCESS
);
1874 thread_set_cont_arg(
1877 thread_t self
= current_thread();
1879 self
->saved
.misc
= arg
;
1883 thread_get_cont_arg(void)
1885 thread_t self
= current_thread();
1887 return (self
->saved
.misc
);
1891 * Export routines to other components for things that are done as macros
1892 * within the osfmk component.
1894 #undef thread_should_halt
1897 thread_shuttle_t th
)
1899 return(thread_should_halt_fast(th
));