2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
63 * Thread management primitives implementation.
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_status.h>
90 #include <mach/time_value.h>
91 #include <mach/vm_param.h>
93 #include <machine/thread.h>
94 #include <machine/pal_routines.h>
96 #include <kern/kern_types.h>
97 #include <kern/kalloc.h>
98 #include <kern/cpu_data.h>
99 #include <kern/counters.h>
100 #include <kern/extmod_statistics.h>
101 #include <kern/ipc_mig.h>
102 #include <kern/ipc_tt.h>
103 #include <kern/mach_param.h>
104 #include <kern/machine.h>
105 #include <kern/misc_protos.h>
106 #include <kern/processor.h>
107 #include <kern/queue.h>
108 #include <kern/sched.h>
109 #include <kern/sched_prim.h>
110 #include <kern/sync_lock.h>
111 #include <kern/syscall_subr.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/host.h>
115 #include <kern/zalloc.h>
116 #include <kern/assert.h>
118 #include <ipc/ipc_kmsg.h>
119 #include <ipc/ipc_port.h>
121 #include <vm/vm_kern.h>
122 #include <vm/vm_pageout.h>
124 #include <sys/kdebug.h>
126 #include <mach/sdt.h>
129 * Exported interfaces
131 #include <mach/task_server.h>
132 #include <mach/thread_act_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_priv_server.h>
136 static struct zone
*thread_zone
;
137 static lck_grp_attr_t thread_lck_grp_attr
;
138 lck_attr_t thread_lck_attr
;
139 lck_grp_t thread_lck_grp
;
141 decl_simple_lock_data(static,thread_stack_lock
)
142 static queue_head_t thread_stack_queue
;
144 decl_simple_lock_data(static,thread_terminate_lock
)
145 static queue_head_t thread_terminate_queue
;
147 static struct thread thread_template
, init_thread
;
149 static void sched_call_null(
154 extern void proc_exit(void *);
155 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
156 #endif /* MACH_BSD */
158 extern int debug_task
;
159 int thread_max
= CONFIG_THREAD_MAX
; /* Max number of threads */
160 int task_threadmax
= CONFIG_THREAD_MAX
;
162 static uint64_t thread_unique_id
= 0;
165 thread_bootstrap(void)
168 * Fill in a template thread for fast initialization.
171 thread_template
.runq
= PROCESSOR_NULL
;
173 thread_template
.ref_count
= 2;
175 thread_template
.reason
= AST_NONE
;
176 thread_template
.at_safe_point
= FALSE
;
177 thread_template
.wait_event
= NO_EVENT64
;
178 thread_template
.wait_queue
= WAIT_QUEUE_NULL
;
179 thread_template
.wait_result
= THREAD_WAITING
;
180 thread_template
.options
= THREAD_ABORTSAFE
;
181 thread_template
.state
= TH_WAIT
| TH_UNINT
;
182 thread_template
.wake_active
= FALSE
;
183 thread_template
.continuation
= THREAD_CONTINUE_NULL
;
184 thread_template
.parameter
= NULL
;
186 thread_template
.importance
= 0;
187 thread_template
.sched_mode
= TH_MODE_NONE
;
188 thread_template
.sched_flags
= 0;
189 thread_template
.saved_mode
= TH_MODE_NONE
;
190 thread_template
.safe_release
= 0;
192 thread_template
.priority
= 0;
193 thread_template
.sched_pri
= 0;
194 thread_template
.max_priority
= 0;
195 thread_template
.task_priority
= 0;
196 thread_template
.promotions
= 0;
197 thread_template
.pending_promoter_index
= 0;
198 thread_template
.pending_promoter
[0] =
199 thread_template
.pending_promoter
[1] = NULL
;
201 thread_template
.realtime
.deadline
= UINT64_MAX
;
203 thread_template
.current_quantum
= 0;
204 thread_template
.last_run_time
= 0;
205 thread_template
.last_quantum_refill_time
= 0;
207 thread_template
.computation_metered
= 0;
208 thread_template
.computation_epoch
= 0;
210 #if defined(CONFIG_SCHED_TRADITIONAL)
211 thread_template
.sched_stamp
= 0;
212 thread_template
.pri_shift
= INT8_MAX
;
213 thread_template
.sched_usage
= 0;
214 thread_template
.cpu_usage
= thread_template
.cpu_delta
= 0;
216 thread_template
.c_switch
= thread_template
.p_switch
= thread_template
.ps_switch
= 0;
218 thread_template
.bound_processor
= PROCESSOR_NULL
;
219 thread_template
.last_processor
= PROCESSOR_NULL
;
221 thread_template
.sched_call
= sched_call_null
;
223 timer_init(&thread_template
.user_timer
);
224 timer_init(&thread_template
.system_timer
);
225 thread_template
.user_timer_save
= 0;
226 thread_template
.system_timer_save
= 0;
227 thread_template
.vtimer_user_save
= 0;
228 thread_template
.vtimer_prof_save
= 0;
229 thread_template
.vtimer_rlim_save
= 0;
231 thread_template
.wait_timer_is_set
= FALSE
;
232 thread_template
.wait_timer_active
= 0;
234 thread_template
.depress_timer_active
= 0;
236 thread_template
.special_handler
.handler
= special_handler
;
237 thread_template
.special_handler
.next
= NULL
;
239 thread_template
.funnel_lock
= THR_FUNNEL_NULL
;
240 thread_template
.funnel_state
= 0;
241 thread_template
.recover
= (vm_offset_t
)NULL
;
243 thread_template
.map
= VM_MAP_NULL
;
246 thread_template
.t_dtrace_predcache
= 0;
247 thread_template
.t_dtrace_vtime
= 0;
248 thread_template
.t_dtrace_tracing
= 0;
249 #endif /* CONFIG_DTRACE */
251 thread_template
.t_chud
= 0;
252 thread_template
.t_page_creation_count
= 0;
253 thread_template
.t_page_creation_time
= 0;
255 thread_template
.affinity_set
= NULL
;
257 thread_template
.syscalls_unix
= 0;
258 thread_template
.syscalls_mach
= 0;
260 thread_template
.tkm_private
.alloc
= 0;
261 thread_template
.tkm_private
.free
= 0;
262 thread_template
.tkm_shared
.alloc
= 0;
263 thread_template
.tkm_shared
.free
= 0;
264 thread_template
.actionstate
= default_task_null_policy
;
265 thread_template
.ext_actionstate
= default_task_null_policy
;
266 thread_template
.policystate
= default_task_proc_policy
;
267 thread_template
.ext_policystate
= default_task_proc_policy
;
269 init_thread
= thread_template
;
270 machine_set_current_thread(&init_thread
);
277 sizeof(struct thread
),
278 thread_max
* sizeof(struct thread
),
279 THREAD_CHUNK
* sizeof(struct thread
),
282 lck_grp_attr_setdefault(&thread_lck_grp_attr
);
283 lck_grp_init(&thread_lck_grp
, "thread", &thread_lck_grp_attr
);
284 lck_attr_setdefault(&thread_lck_attr
);
289 * Initialize any machine-dependent
290 * per-thread structures necessary.
292 machine_thread_init();
296 thread_terminate_continue(void)
298 panic("thread_terminate_continue");
303 * thread_terminate_self:
306 thread_terminate_self(void)
308 thread_t thread
= current_thread();
314 pal_thread_terminate_self(thread
);
316 DTRACE_PROC(lwp__exit
);
318 thread_mtx_lock(thread
);
320 ulock_release_all(thread
);
322 ipc_thread_disable(thread
);
324 thread_mtx_unlock(thread
);
330 * Cancel priority depression, wait for concurrent expirations
331 * on other processors.
333 if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) {
334 thread
->sched_flags
&= ~TH_SFLAG_DEPRESSED_MASK
;
336 if (timer_call_cancel(&thread
->depress_timer
))
337 thread
->depress_timer_active
--;
340 while (thread
->depress_timer_active
> 0) {
341 thread_unlock(thread
);
350 thread_sched_call(thread
, NULL
);
352 thread_unlock(thread
);
355 thread_policy_reset(thread
);
358 uthread_cleanup(task
, thread
->uthread
, task
->bsd_info
);
359 threadcnt
= hw_atomic_sub(&task
->active_thread_count
, 1);
362 * If we are the last thread to terminate and the task is
363 * associated with a BSD process, perform BSD process exit.
365 if (threadcnt
== 0 && task
->bsd_info
!= NULL
)
366 proc_exit(task
->bsd_info
);
368 uthread_cred_free(thread
->uthread
);
374 * Cancel wait timer, and wait for
375 * concurrent expirations.
377 if (thread
->wait_timer_is_set
) {
378 thread
->wait_timer_is_set
= FALSE
;
380 if (timer_call_cancel(&thread
->wait_timer
))
381 thread
->wait_timer_active
--;
384 while (thread
->wait_timer_active
> 0) {
385 thread_unlock(thread
);
395 * If there is a reserved stack, release it.
397 if (thread
->reserved_stack
!= 0) {
398 stack_free_reserved(thread
);
399 thread
->reserved_stack
= 0;
403 * Mark thread as terminating, and block.
405 thread
->state
|= TH_TERMINATE
;
406 thread_mark_wait_locked(thread
, THREAD_UNINT
);
407 assert(thread
->promotions
== 0);
408 thread_unlock(thread
);
411 thread_block((thread_continue_t
)thread_terminate_continue
);
421 if (thread
== THREAD_NULL
)
424 if (thread_deallocate_internal(thread
) > 0)
428 ipc_thread_terminate(thread
);
434 void *ut
= thread
->uthread
;
436 thread
->uthread
= NULL
;
437 uthread_zone_free(ut
);
439 #endif /* MACH_BSD */
441 if (thread
->kernel_stack
!= 0)
444 lck_mtx_destroy(&thread
->mutex
, &thread_lck_grp
);
445 machine_thread_destroy(thread
);
447 task_deallocate(task
);
449 zfree(thread_zone
, thread
);
453 * thread_terminate_daemon:
455 * Perform final clean up for terminating threads.
458 thread_terminate_daemon(void)
460 thread_t self
, thread
;
463 self
= current_thread();
464 self
->options
|= TH_OPT_SYSTEM_CRITICAL
;
467 simple_lock(&thread_terminate_lock
);
469 while ((thread
= (thread_t
)dequeue_head(&thread_terminate_queue
)) != THREAD_NULL
) {
470 simple_unlock(&thread_terminate_lock
);
476 task
->total_user_time
+= timer_grab(&thread
->user_timer
);
477 task
->total_system_time
+= timer_grab(&thread
->system_timer
);
479 task
->c_switch
+= thread
->c_switch
;
480 task
->p_switch
+= thread
->p_switch
;
481 task
->ps_switch
+= thread
->ps_switch
;
483 task
->syscalls_unix
+= thread
->syscalls_unix
;
484 task
->syscalls_mach
+= thread
->syscalls_mach
;
486 task
->tkm_private
.alloc
+= thread
->tkm_private
.alloc
;
487 task
->tkm_private
.free
+= thread
->tkm_private
.free
;
488 task
->tkm_shared
.alloc
+= thread
->tkm_shared
.alloc
;
489 task
->tkm_shared
.free
+= thread
->tkm_shared
.free
;
491 queue_remove(&task
->threads
, thread
, thread_t
, task_threads
);
492 task
->thread_count
--;
495 * If the task is being halted, and there is only one thread
496 * left in the task after this one, then wakeup that thread.
498 if (task
->thread_count
== 1 && task
->halting
)
499 thread_wakeup((event_t
)&task
->halting
);
503 lck_mtx_lock(&tasks_threads_lock
);
504 queue_remove(&threads
, thread
, thread_t
, threads
);
506 lck_mtx_unlock(&tasks_threads_lock
);
508 thread_deallocate(thread
);
511 simple_lock(&thread_terminate_lock
);
514 assert_wait((event_t
)&thread_terminate_queue
, THREAD_UNINT
);
515 simple_unlock(&thread_terminate_lock
);
518 self
->options
&= ~TH_OPT_SYSTEM_CRITICAL
;
519 thread_block((thread_continue_t
)thread_terminate_daemon
);
524 * thread_terminate_enqueue:
526 * Enqueue a terminating thread for final disposition.
528 * Called at splsched.
531 thread_terminate_enqueue(
534 simple_lock(&thread_terminate_lock
);
535 enqueue_tail(&thread_terminate_queue
, (queue_entry_t
)thread
);
536 simple_unlock(&thread_terminate_lock
);
538 thread_wakeup((event_t
)&thread_terminate_queue
);
542 * thread_stack_daemon:
544 * Perform stack allocation as required due to
548 thread_stack_daemon(void)
552 simple_lock(&thread_stack_lock
);
554 while ((thread
= (thread_t
)dequeue_head(&thread_stack_queue
)) != THREAD_NULL
) {
555 simple_unlock(&thread_stack_lock
);
561 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
562 thread_unlock(thread
);
565 simple_lock(&thread_stack_lock
);
568 assert_wait((event_t
)&thread_stack_queue
, THREAD_UNINT
);
569 simple_unlock(&thread_stack_lock
);
571 thread_block((thread_continue_t
)thread_stack_daemon
);
576 * thread_stack_enqueue:
578 * Enqueue a thread for stack allocation.
580 * Called at splsched.
583 thread_stack_enqueue(
586 simple_lock(&thread_stack_lock
);
587 enqueue_tail(&thread_stack_queue
, (queue_entry_t
)thread
);
588 simple_unlock(&thread_stack_lock
);
590 thread_wakeup((event_t
)&thread_stack_queue
);
594 thread_daemon_init(void)
596 kern_return_t result
;
597 thread_t thread
= NULL
;
599 simple_lock_init(&thread_terminate_lock
, 0);
600 queue_init(&thread_terminate_queue
);
602 result
= kernel_thread_start_priority((thread_continue_t
)thread_terminate_daemon
, NULL
, MINPRI_KERNEL
, &thread
);
603 if (result
!= KERN_SUCCESS
)
604 panic("thread_daemon_init: thread_terminate_daemon");
606 thread_deallocate(thread
);
608 simple_lock_init(&thread_stack_lock
, 0);
609 queue_init(&thread_stack_queue
);
611 result
= kernel_thread_start_priority((thread_continue_t
)thread_stack_daemon
, NULL
, BASEPRI_PREEMPT
, &thread
);
612 if (result
!= KERN_SUCCESS
)
613 panic("thread_daemon_init: thread_stack_daemon");
615 thread_deallocate(thread
);
619 * Create a new thread.
620 * Doesn't start the thread running.
623 thread_create_internal(
626 thread_continue_t continuation
,
628 #define TH_OPTION_NONE 0x00
629 #define TH_OPTION_NOCRED 0x01
630 #define TH_OPTION_NOSUSP 0x02
631 thread_t
*out_thread
)
634 static thread_t first_thread
;
637 * Allocate a thread and initialize static fields
639 if (first_thread
== THREAD_NULL
)
640 new_thread
= first_thread
= current_thread();
642 new_thread
= (thread_t
)zalloc(thread_zone
);
643 if (new_thread
== THREAD_NULL
)
644 return (KERN_RESOURCE_SHORTAGE
);
646 if (new_thread
!= first_thread
)
647 *new_thread
= thread_template
;
650 new_thread
->uthread
= uthread_alloc(parent_task
, new_thread
, (options
& TH_OPTION_NOCRED
) != 0);
651 if (new_thread
->uthread
== NULL
) {
652 zfree(thread_zone
, new_thread
);
653 return (KERN_RESOURCE_SHORTAGE
);
655 #endif /* MACH_BSD */
657 if (machine_thread_create(new_thread
, parent_task
) != KERN_SUCCESS
) {
659 void *ut
= new_thread
->uthread
;
661 new_thread
->uthread
= NULL
;
662 /* cred free may not be necessary */
663 uthread_cleanup(parent_task
, ut
, parent_task
->bsd_info
);
664 uthread_cred_free(ut
);
665 uthread_zone_free(ut
);
666 #endif /* MACH_BSD */
668 zfree(thread_zone
, new_thread
);
669 return (KERN_FAILURE
);
672 new_thread
->task
= parent_task
;
674 thread_lock_init(new_thread
);
675 wake_lock_init(new_thread
);
677 lck_mtx_init(&new_thread
->mutex
, &thread_lck_grp
, &thread_lck_attr
);
679 ipc_thread_init(new_thread
);
680 queue_init(&new_thread
->held_ulocks
);
682 new_thread
->continuation
= continuation
;
684 lck_mtx_lock(&tasks_threads_lock
);
685 task_lock(parent_task
);
687 if ( !parent_task
->active
|| parent_task
->halting
||
688 ((options
& TH_OPTION_NOSUSP
) != 0 &&
689 parent_task
->suspend_count
> 0) ||
690 (parent_task
->thread_count
>= task_threadmax
&&
691 parent_task
!= kernel_task
) ) {
692 task_unlock(parent_task
);
693 lck_mtx_unlock(&tasks_threads_lock
);
697 void *ut
= new_thread
->uthread
;
699 new_thread
->uthread
= NULL
;
700 uthread_cleanup(parent_task
, ut
, parent_task
->bsd_info
);
701 /* cred free may not be necessary */
702 uthread_cred_free(ut
);
703 uthread_zone_free(ut
);
705 #endif /* MACH_BSD */
706 ipc_thread_disable(new_thread
);
707 ipc_thread_terminate(new_thread
);
708 lck_mtx_destroy(&new_thread
->mutex
, &thread_lck_grp
);
709 machine_thread_destroy(new_thread
);
710 zfree(thread_zone
, new_thread
);
711 return (KERN_FAILURE
);
714 /* New threads inherit any default state on the task */
715 machine_thread_inherit_taskwide(new_thread
, parent_task
);
717 task_reference_internal(parent_task
);
719 /* Cache the task's map */
720 new_thread
->map
= parent_task
->map
;
722 /* Chain the thread onto the task's list */
723 queue_enter(&parent_task
->threads
, new_thread
, thread_t
, task_threads
);
724 parent_task
->thread_count
++;
726 /* So terminating threads don't need to take the task lock to decrement */
727 hw_atomic_add(&parent_task
->active_thread_count
, 1);
729 /* Protected by the tasks_threads_lock */
730 new_thread
->thread_id
= ++thread_unique_id
;
732 queue_enter(&threads
, new_thread
, thread_t
, threads
);
735 timer_call_setup(&new_thread
->wait_timer
, thread_timer_expire
, new_thread
);
736 timer_call_setup(&new_thread
->depress_timer
, thread_depress_expire
, new_thread
);
740 * If parent task has any reservations, they need to be propagated to this
743 new_thread
->t_chud
= (TASK_PMC_FLAG
== (parent_task
->t_chud
& TASK_PMC_FLAG
)) ?
744 THREAD_PMC_FLAG
: 0U;
747 /* Set the thread's scheduling parameters */
748 new_thread
->sched_mode
= SCHED(initial_thread_sched_mode
)(parent_task
);
749 new_thread
->sched_flags
= 0;
750 new_thread
->max_priority
= parent_task
->max_priority
;
751 new_thread
->task_priority
= parent_task
->priority
;
752 new_thread
->priority
= (priority
< 0)? parent_task
->priority
: priority
;
753 if (new_thread
->priority
> new_thread
->max_priority
)
754 new_thread
->priority
= new_thread
->max_priority
;
756 if (new_thread
->priority
< MAXPRI_THROTTLE
) {
757 new_thread
->priority
= MAXPRI_THROTTLE
;
759 #endif /* CONFIG_EMBEDDED */
760 new_thread
->importance
=
761 new_thread
->priority
- new_thread
->task_priority
;
762 #if defined(CONFIG_SCHED_TRADITIONAL)
763 new_thread
->sched_stamp
= sched_tick
;
764 new_thread
->pri_shift
= sched_pri_shift
;
766 SCHED(compute_priority
)(new_thread
, FALSE
);
768 new_thread
->active
= TRUE
;
770 *out_thread
= new_thread
;
773 long dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
;
775 kdbg_trace_data(parent_task
->bsd_info
, &dbg_arg2
);
777 KERNEL_DEBUG_CONSTANT(
778 TRACEDBG_CODE(DBG_TRACE_DATA
, 1) | DBG_FUNC_NONE
,
779 (vm_address_t
)(uintptr_t)thread_tid(new_thread
), dbg_arg2
, 0, 0, 0);
781 kdbg_trace_string(parent_task
->bsd_info
,
782 &dbg_arg1
, &dbg_arg2
, &dbg_arg3
, &dbg_arg4
);
784 KERNEL_DEBUG_CONSTANT(
785 TRACEDBG_CODE(DBG_TRACE_STRING
, 1) | DBG_FUNC_NONE
,
786 dbg_arg1
, dbg_arg2
, dbg_arg3
, dbg_arg4
, 0);
789 DTRACE_PROC1(lwp__create
, thread_t
, *out_thread
);
791 return (KERN_SUCCESS
);
795 thread_create_internal2(
797 thread_t
*new_thread
,
800 kern_return_t result
;
803 if (task
== TASK_NULL
|| task
== kernel_task
)
804 return (KERN_INVALID_ARGUMENT
);
806 result
= thread_create_internal(task
, -1, (thread_continue_t
)thread_bootstrap_return
, TH_OPTION_NONE
, &thread
);
807 if (result
!= KERN_SUCCESS
)
810 thread
->user_stop_count
= 1;
812 if (task
->suspend_count
> 0)
816 extmod_statistics_incr_thread_create(task
);
819 lck_mtx_unlock(&tasks_threads_lock
);
821 *new_thread
= thread
;
823 return (KERN_SUCCESS
);
826 /* No prototype, since task_server.h has the _from_user version if KERNEL_SERVER */
830 thread_t
*new_thread
);
835 thread_t
*new_thread
)
837 return thread_create_internal2(task
, new_thread
, FALSE
);
841 thread_create_from_user(
843 thread_t
*new_thread
)
845 return thread_create_internal2(task
, new_thread
, TRUE
);
849 thread_create_running_internal2(
850 register task_t task
,
852 thread_state_t new_state
,
853 mach_msg_type_number_t new_state_count
,
854 thread_t
*new_thread
,
857 register kern_return_t result
;
860 if (task
== TASK_NULL
|| task
== kernel_task
)
861 return (KERN_INVALID_ARGUMENT
);
863 result
= thread_create_internal(task
, -1, (thread_continue_t
)thread_bootstrap_return
, TH_OPTION_NONE
, &thread
);
864 if (result
!= KERN_SUCCESS
)
867 result
= machine_thread_set_state(
868 thread
, flavor
, new_state
, new_state_count
);
869 if (result
!= KERN_SUCCESS
) {
871 lck_mtx_unlock(&tasks_threads_lock
);
873 thread_terminate(thread
);
874 thread_deallocate(thread
);
878 thread_mtx_lock(thread
);
879 thread_start_internal(thread
);
880 thread_mtx_unlock(thread
);
883 extmod_statistics_incr_thread_create(task
);
886 lck_mtx_unlock(&tasks_threads_lock
);
888 *new_thread
= thread
;
893 /* Prototype, see justification above */
895 thread_create_running(
896 register task_t task
,
898 thread_state_t new_state
,
899 mach_msg_type_number_t new_state_count
,
900 thread_t
*new_thread
);
903 thread_create_running(
904 register task_t task
,
906 thread_state_t new_state
,
907 mach_msg_type_number_t new_state_count
,
908 thread_t
*new_thread
)
910 return thread_create_running_internal2(
911 task
, flavor
, new_state
, new_state_count
,
916 thread_create_running_from_user(
917 register task_t task
,
919 thread_state_t new_state
,
920 mach_msg_type_number_t new_state_count
,
921 thread_t
*new_thread
)
923 return thread_create_running_internal2(
924 task
, flavor
, new_state
, new_state_count
,
931 thread_continue_t thread_return
,
932 thread_t
*new_thread
)
934 kern_return_t result
;
937 if (task
== TASK_NULL
|| task
== kernel_task
)
938 return (KERN_INVALID_ARGUMENT
);
940 result
= thread_create_internal(task
, -1, thread_return
, TH_OPTION_NOCRED
| TH_OPTION_NOSUSP
, &thread
);
941 if (result
!= KERN_SUCCESS
)
944 thread
->user_stop_count
= 1;
946 if (task
->suspend_count
> 0)
950 lck_mtx_unlock(&tasks_threads_lock
);
952 *new_thread
= thread
;
954 return (KERN_SUCCESS
);
958 * kernel_thread_create:
960 * Create a thread in the kernel task
961 * to execute in kernel context.
964 kernel_thread_create(
965 thread_continue_t continuation
,
968 thread_t
*new_thread
)
970 kern_return_t result
;
972 task_t task
= kernel_task
;
974 result
= thread_create_internal(task
, priority
, continuation
, TH_OPTION_NONE
, &thread
);
975 if (result
!= KERN_SUCCESS
)
979 lck_mtx_unlock(&tasks_threads_lock
);
982 assert(thread
->kernel_stack
!= 0);
984 if (priority
> BASEPRI_KERNEL
)
986 thread
->reserved_stack
= thread
->kernel_stack
;
988 thread
->parameter
= parameter
;
991 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread
, continuation
);
992 *new_thread
= thread
;
998 kernel_thread_start_priority(
999 thread_continue_t continuation
,
1002 thread_t
*new_thread
)
1004 kern_return_t result
;
1007 result
= kernel_thread_create(continuation
, parameter
, priority
, &thread
);
1008 if (result
!= KERN_SUCCESS
)
1011 *new_thread
= thread
;
1013 thread_mtx_lock(thread
);
1014 thread_start_internal(thread
);
1015 thread_mtx_unlock(thread
);
1021 kernel_thread_start(
1022 thread_continue_t continuation
,
1024 thread_t
*new_thread
)
1026 return kernel_thread_start_priority(continuation
, parameter
, -1, new_thread
);
1034 void (*start
)(void))
1036 kern_return_t result
;
1039 if (task
!= kernel_task
)
1040 panic("kernel_thread");
1042 result
= kernel_thread_start_priority((thread_continue_t
)start
, NULL
, -1, &thread
);
1043 if (result
!= KERN_SUCCESS
)
1044 return (THREAD_NULL
);
1046 thread_deallocate(thread
);
1051 #endif /* __LP64__ */
1054 thread_info_internal(
1055 register thread_t thread
,
1056 thread_flavor_t flavor
,
1057 thread_info_t thread_info_out
, /* ptr to OUT array */
1058 mach_msg_type_number_t
*thread_info_count
) /*IN/OUT*/
1063 if (thread
== THREAD_NULL
)
1064 return (KERN_INVALID_ARGUMENT
);
1066 if (flavor
== THREAD_BASIC_INFO
) {
1067 register thread_basic_info_t basic_info
;
1069 if (*thread_info_count
< THREAD_BASIC_INFO_COUNT
)
1070 return (KERN_INVALID_ARGUMENT
);
1072 basic_info
= (thread_basic_info_t
) thread_info_out
;
1075 thread_lock(thread
);
1079 thread_read_times(thread
, &basic_info
->user_time
,
1080 &basic_info
->system_time
);
1083 * Update lazy-evaluated scheduler info because someone wants it.
1085 if (SCHED(can_update_priority
)(thread
))
1086 SCHED(update_priority
)(thread
);
1088 basic_info
->sleep_time
= 0;
1091 * To calculate cpu_usage, first correct for timer rate,
1092 * then for 5/8 ageing. The correction factor [3/5] is
1095 basic_info
->cpu_usage
= 0;
1096 #if defined(CONFIG_SCHED_TRADITIONAL)
1097 if (sched_tick_interval
) {
1098 basic_info
->cpu_usage
= (integer_t
)(((uint64_t)thread
->cpu_usage
1099 * TH_USAGE_SCALE
) / sched_tick_interval
);
1100 basic_info
->cpu_usage
= (basic_info
->cpu_usage
* 3) / 5;
1104 if (basic_info
->cpu_usage
> TH_USAGE_SCALE
)
1105 basic_info
->cpu_usage
= TH_USAGE_SCALE
;
1107 basic_info
->policy
= ((thread
->sched_mode
== TH_MODE_TIMESHARE
)?
1108 POLICY_TIMESHARE
: POLICY_RR
);
1111 if (thread
->bound_processor
!= PROCESSOR_NULL
&& thread
->bound_processor
->idle_thread
== thread
)
1112 flags
|= TH_FLAGS_IDLE
;
1114 if (!thread
->kernel_stack
)
1115 flags
|= TH_FLAGS_SWAPPED
;
1118 if (thread
->state
& TH_TERMINATE
)
1119 state
= TH_STATE_HALTED
;
1121 if (thread
->state
& TH_RUN
)
1122 state
= TH_STATE_RUNNING
;
1124 if (thread
->state
& TH_UNINT
)
1125 state
= TH_STATE_UNINTERRUPTIBLE
;
1127 if (thread
->state
& TH_SUSP
)
1128 state
= TH_STATE_STOPPED
;
1130 if (thread
->state
& TH_WAIT
)
1131 state
= TH_STATE_WAITING
;
1133 basic_info
->run_state
= state
;
1134 basic_info
->flags
= flags
;
1136 basic_info
->suspend_count
= thread
->user_stop_count
;
1138 thread_unlock(thread
);
1141 *thread_info_count
= THREAD_BASIC_INFO_COUNT
;
1143 return (KERN_SUCCESS
);
1146 if (flavor
== THREAD_IDENTIFIER_INFO
) {
1147 register thread_identifier_info_t identifier_info
;
1149 if (*thread_info_count
< THREAD_IDENTIFIER_INFO_COUNT
)
1150 return (KERN_INVALID_ARGUMENT
);
1152 identifier_info
= (thread_identifier_info_t
) thread_info_out
;
1155 thread_lock(thread
);
1157 identifier_info
->thread_id
= thread
->thread_id
;
1158 identifier_info
->thread_handle
= thread
->machine
.cthread_self
;
1159 if(thread
->task
->bsd_info
) {
1160 identifier_info
->dispatch_qaddr
= identifier_info
->thread_handle
+ get_dispatchqueue_offset_from_proc(thread
->task
->bsd_info
);
1162 thread_unlock(thread
);
1164 return KERN_INVALID_ARGUMENT
;
1167 thread_unlock(thread
);
1169 return KERN_SUCCESS
;
1172 if (flavor
== THREAD_SCHED_TIMESHARE_INFO
) {
1173 policy_timeshare_info_t ts_info
;
1175 if (*thread_info_count
< POLICY_TIMESHARE_INFO_COUNT
)
1176 return (KERN_INVALID_ARGUMENT
);
1178 ts_info
= (policy_timeshare_info_t
)thread_info_out
;
1181 thread_lock(thread
);
1183 if (thread
->sched_mode
!= TH_MODE_TIMESHARE
) {
1184 thread_unlock(thread
);
1187 return (KERN_INVALID_POLICY
);
1190 ts_info
->depressed
= (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) != 0;
1191 if (ts_info
->depressed
) {
1192 ts_info
->base_priority
= DEPRESSPRI
;
1193 ts_info
->depress_priority
= thread
->priority
;
1196 ts_info
->base_priority
= thread
->priority
;
1197 ts_info
->depress_priority
= -1;
1200 ts_info
->cur_priority
= thread
->sched_pri
;
1201 ts_info
->max_priority
= thread
->max_priority
;
1203 thread_unlock(thread
);
1206 *thread_info_count
= POLICY_TIMESHARE_INFO_COUNT
;
1208 return (KERN_SUCCESS
);
1211 if (flavor
== THREAD_SCHED_FIFO_INFO
) {
1212 if (*thread_info_count
< POLICY_FIFO_INFO_COUNT
)
1213 return (KERN_INVALID_ARGUMENT
);
1215 return (KERN_INVALID_POLICY
);
1218 if (flavor
== THREAD_SCHED_RR_INFO
) {
1219 policy_rr_info_t rr_info
;
1220 uint32_t quantum_time
;
1221 uint64_t quantum_ns
;
1223 if (*thread_info_count
< POLICY_RR_INFO_COUNT
)
1224 return (KERN_INVALID_ARGUMENT
);
1226 rr_info
= (policy_rr_info_t
) thread_info_out
;
1229 thread_lock(thread
);
1231 if (thread
->sched_mode
== TH_MODE_TIMESHARE
) {
1232 thread_unlock(thread
);
1235 return (KERN_INVALID_POLICY
);
1238 rr_info
->depressed
= (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) != 0;
1239 if (rr_info
->depressed
) {
1240 rr_info
->base_priority
= DEPRESSPRI
;
1241 rr_info
->depress_priority
= thread
->priority
;
1244 rr_info
->base_priority
= thread
->priority
;
1245 rr_info
->depress_priority
= -1;
1248 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
1249 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
1251 rr_info
->max_priority
= thread
->max_priority
;
1252 rr_info
->quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
1254 thread_unlock(thread
);
1257 *thread_info_count
= POLICY_RR_INFO_COUNT
;
1259 return (KERN_SUCCESS
);
1262 return (KERN_INVALID_ARGUMENT
);
1268 time_value_t
*user_time
,
1269 time_value_t
*system_time
)
1274 absolutetime_to_microtime(timer_grab(&thread
->user_timer
), &secs
, &usecs
);
1275 user_time
->seconds
= (typeof(user_time
->seconds
))secs
;
1276 user_time
->microseconds
= usecs
;
1278 absolutetime_to_microtime(timer_grab(&thread
->system_timer
), &secs
, &usecs
);
1279 system_time
->seconds
= (typeof(system_time
->seconds
))secs
;
1280 system_time
->microseconds
= usecs
;
1285 __unused thread_t thread
,
1286 __unused processor_set_t new_pset
)
1288 return (KERN_FAILURE
);
1292 * thread_assign_default:
1294 * Special version of thread_assign for assigning threads to default
1298 thread_assign_default(
1301 return (thread_assign(thread
, &pset0
));
1305 * thread_get_assignment
1307 * Return current assignment for this thread.
1310 thread_get_assignment(
1312 processor_set_t
*pset
)
1315 return (KERN_INVALID_ARGUMENT
);
1319 return (KERN_SUCCESS
);
1323 * thread_wire_internal:
1325 * Specify that the target thread must always be able
1326 * to run and to allocate memory.
1329 thread_wire_internal(
1330 host_priv_t host_priv
,
1333 boolean_t
*prev_state
)
1335 if (host_priv
== NULL
|| thread
!= current_thread())
1336 return (KERN_INVALID_ARGUMENT
);
1338 assert(host_priv
== &realhost
);
1341 *prev_state
= (thread
->options
& TH_OPT_VMPRIV
) != 0;
1344 if (!(thread
->options
& TH_OPT_VMPRIV
))
1345 vm_page_free_reserve(1); /* XXX */
1346 thread
->options
|= TH_OPT_VMPRIV
;
1349 if (thread
->options
& TH_OPT_VMPRIV
)
1350 vm_page_free_reserve(-1); /* XXX */
1351 thread
->options
&= ~TH_OPT_VMPRIV
;
1354 return (KERN_SUCCESS
);
1361 * User-api wrapper for thread_wire_internal()
1365 host_priv_t host_priv
,
1369 return (thread_wire_internal(host_priv
, thread
, wired
, NULL
));
1372 int split_funnel_off
= 0;
1373 lck_grp_t
*funnel_lck_grp
= LCK_GRP_NULL
;
1374 lck_grp_attr_t
*funnel_lck_grp_attr
;
1375 lck_attr_t
*funnel_lck_attr
;
1384 if (funnel_lck_grp
== LCK_GRP_NULL
) {
1385 funnel_lck_grp_attr
= lck_grp_attr_alloc_init();
1387 funnel_lck_grp
= lck_grp_alloc_init("Funnel", funnel_lck_grp_attr
);
1389 funnel_lck_attr
= lck_attr_alloc_init();
1391 if ((fnl
= (funnel_t
*)kalloc(sizeof(funnel_t
))) != 0){
1392 bzero((void *)fnl
, sizeof(funnel_t
));
1393 if ((m
= lck_mtx_alloc_init(funnel_lck_grp
, funnel_lck_attr
)) == (lck_mtx_t
*)NULL
) {
1394 kfree(fnl
, sizeof(funnel_t
));
1395 return(THR_FUNNEL_NULL
);
1398 fnl
->fnl_type
= type
;
1407 lck_mtx_free(fnl
->fnl_mutex
, funnel_lck_grp
);
1408 if (fnl
->fnl_oldmutex
)
1409 lck_mtx_free(fnl
->fnl_oldmutex
, funnel_lck_grp
);
1410 kfree(fnl
, sizeof(funnel_t
));
1417 lck_mtx_lock(fnl
->fnl_mutex
);
1418 fnl
->fnl_mtxholder
= current_thread();
1425 lck_mtx_unlock(fnl
->fnl_mutex
);
1426 fnl
->fnl_mtxholder
= NULL
;
1427 fnl
->fnl_mtxrelease
= current_thread();
1434 thread_t th
= current_thread();
1436 if (th
->funnel_state
& TH_FN_OWNED
) {
1437 return(th
->funnel_lock
);
1439 return(THR_FUNNEL_NULL
);
1447 thread_t cur_thread
;
1448 boolean_t funnel_state_prev
;
1451 cur_thread
= current_thread();
1452 funnel_state_prev
= ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
);
1454 if (funnel_state_prev
!= funneled
) {
1455 intr
= ml_set_interrupts_enabled(FALSE
);
1457 if (funneled
== TRUE
) {
1458 if (cur_thread
->funnel_lock
)
1459 panic("Funnel lock called when holding one %p", cur_thread
->funnel_lock
);
1460 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
,
1463 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE
,
1465 cur_thread
->funnel_state
|= TH_FN_OWNED
;
1466 cur_thread
->funnel_lock
= fnl
;
1468 if(cur_thread
->funnel_lock
->fnl_mutex
!= fnl
->fnl_mutex
)
1469 panic("Funnel unlock when not holding funnel");
1470 cur_thread
->funnel_state
&= ~TH_FN_OWNED
;
1471 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE
,
1474 cur_thread
->funnel_lock
= THR_FUNNEL_NULL
;
1477 (void)ml_set_interrupts_enabled(intr
);
1479 /* if we are trying to acquire funnel recursively
1480 * check for funnel to be held already
1482 if (funneled
&& (fnl
->fnl_mutex
!= cur_thread
->funnel_lock
->fnl_mutex
)) {
1483 panic("thread_funnel_set: already holding a different funnel");
1486 return(funnel_state_prev
);
1492 __unused thread_t thread
)
1502 thread
->sched_call
= (call
!= NULL
)? call
: sched_call_null
;
1506 thread_static_param(
1510 thread_mtx_lock(thread
);
1511 thread
->static_param
= state
;
1512 thread_mtx_unlock(thread
);
1519 return (thread
!= THREAD_NULL
? thread
->thread_id
: 0);
1523 thread_dispatchqaddr(
1526 uint64_t dispatchqueue_addr
= 0;
1527 uint64_t thread_handle
= 0;
1529 if (thread
!= THREAD_NULL
) {
1530 thread_handle
= thread
->machine
.cthread_self
;
1532 if (thread
->task
->bsd_info
)
1533 dispatchqueue_addr
= thread_handle
+ get_dispatchqueue_offset_from_proc(thread
->task
->bsd_info
);
1536 return (dispatchqueue_addr
);
1540 * Export routines to other components for things that are done as macros
1541 * within the osfmk component.
1544 #undef thread_reference
1545 void thread_reference(thread_t thread
);
1550 if (thread
!= THREAD_NULL
)
1551 thread_reference_internal(thread
);
1554 #undef thread_should_halt
1560 return (thread_should_halt_fast(th
));
1564 uint32_t dtrace_get_thread_predcache(thread_t thread
)
1566 if (thread
!= THREAD_NULL
)
1567 return thread
->t_dtrace_predcache
;
1572 int64_t dtrace_get_thread_vtime(thread_t thread
)
1574 if (thread
!= THREAD_NULL
)
1575 return thread
->t_dtrace_vtime
;
1580 int64_t dtrace_get_thread_tracing(thread_t thread
)
1582 if (thread
!= THREAD_NULL
)
1583 return thread
->t_dtrace_tracing
;
1588 boolean_t
dtrace_get_thread_reentering(thread_t thread
)
1590 if (thread
!= THREAD_NULL
)
1591 return (thread
->options
& TH_OPT_DTRACE
) ? TRUE
: FALSE
;
1596 vm_offset_t
dtrace_get_kernel_stack(thread_t thread
)
1598 if (thread
!= THREAD_NULL
)
1599 return thread
->kernel_stack
;
1604 int64_t dtrace_calc_thread_recent_vtime(thread_t thread
)
1607 if (thread
!= THREAD_NULL
) {
1608 return timer_grab(&(thread
->system_timer
)) + timer_grab(&(thread
->user_timer
));
1612 if (thread
!= THREAD_NULL
) {
1613 processor_t processor
= current_processor();
1614 uint64_t abstime
= mach_absolute_time();
1617 timer
= PROCESSOR_DATA(processor
, thread_timer
);
1619 return timer_grab(&(thread
->system_timer
)) + timer_grab(&(thread
->user_timer
)) +
1620 (abstime
- timer
->tstamp
); /* XXX need interrupts off to prevent missed time? */
1626 void dtrace_set_thread_predcache(thread_t thread
, uint32_t predcache
)
1628 if (thread
!= THREAD_NULL
)
1629 thread
->t_dtrace_predcache
= predcache
;
1632 void dtrace_set_thread_vtime(thread_t thread
, int64_t vtime
)
1634 if (thread
!= THREAD_NULL
)
1635 thread
->t_dtrace_vtime
= vtime
;
1638 void dtrace_set_thread_tracing(thread_t thread
, int64_t accum
)
1640 if (thread
!= THREAD_NULL
)
1641 thread
->t_dtrace_tracing
= accum
;
1644 void dtrace_set_thread_reentering(thread_t thread
, boolean_t vbool
)
1646 if (thread
!= THREAD_NULL
) {
1648 thread
->options
|= TH_OPT_DTRACE
;
1650 thread
->options
&= (~TH_OPT_DTRACE
);
1654 vm_offset_t
dtrace_set_thread_recover(thread_t thread
, vm_offset_t recover
)
1656 vm_offset_t prev
= 0;
1658 if (thread
!= THREAD_NULL
) {
1659 prev
= thread
->recover
;
1660 thread
->recover
= recover
;
1665 void dtrace_thread_bootstrap(void)
1667 task_t task
= current_task();
1668 if(task
->thread_count
== 1) {
1671 DTRACE_PROC(lwp__start
);
1674 #endif /* CONFIG_DTRACE */