]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_status.h>
90 #include <mach/time_value.h>
91 #include <mach/vm_param.h>
92
93 #include <machine/thread.h>
94
95 #include <kern/kern_types.h>
96 #include <kern/kalloc.h>
97 #include <kern/cpu_data.h>
98 #include <kern/counters.h>
99 #include <kern/ipc_mig.h>
100 #include <kern/ipc_tt.h>
101 #include <kern/mach_param.h>
102 #include <kern/machine.h>
103 #include <kern/misc_protos.h>
104 #include <kern/processor.h>
105 #include <kern/queue.h>
106 #include <kern/sched.h>
107 #include <kern/sched_prim.h>
108 #include <kern/sync_lock.h>
109 #include <kern/syscall_subr.h>
110 #include <kern/task.h>
111 #include <kern/thread.h>
112 #include <kern/host.h>
113 #include <kern/zalloc.h>
114 #include <kern/assert.h>
115
116 #include <ipc/ipc_kmsg.h>
117 #include <ipc/ipc_port.h>
118
119 #include <vm/vm_kern.h>
120 #include <vm/vm_pageout.h>
121
122 #include <sys/kdebug.h>
123
124 #include <mach/sdt.h>
125
126 /*
127 * Exported interfaces
128 */
129 #include <mach/task_server.h>
130 #include <mach/thread_act_server.h>
131 #include <mach/mach_host_server.h>
132 #include <mach/host_priv_server.h>
133
134 static struct zone *thread_zone;
135 static lck_grp_attr_t thread_lck_grp_attr;
136 lck_attr_t thread_lck_attr;
137 lck_grp_t thread_lck_grp;
138
139 decl_simple_lock_data(static,thread_stack_lock)
140 static queue_head_t thread_stack_queue;
141
142 decl_simple_lock_data(static,thread_terminate_lock)
143 static queue_head_t thread_terminate_queue;
144
145 static struct thread thread_template, init_thread;
146
147 static void sched_call_null(
148 int type,
149 thread_t thread);
150
151 #ifdef MACH_BSD
152 extern void proc_exit(void *);
153 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
154 #endif /* MACH_BSD */
155
156 extern int debug_task;
157 int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */
158 int task_threadmax = CONFIG_THREAD_MAX;
159
160 static uint64_t thread_unique_id = 0;
161
162 void
163 thread_bootstrap(void)
164 {
165 /*
166 * Fill in a template thread for fast initialization.
167 */
168
169 thread_template.runq = PROCESSOR_NULL;
170
171 thread_template.ref_count = 2;
172
173 thread_template.reason = AST_NONE;
174 thread_template.at_safe_point = FALSE;
175 thread_template.wait_event = NO_EVENT64;
176 thread_template.wait_queue = WAIT_QUEUE_NULL;
177 thread_template.wait_result = THREAD_WAITING;
178 thread_template.options = THREAD_ABORTSAFE;
179 thread_template.state = TH_WAIT | TH_UNINT;
180 thread_template.wake_active = FALSE;
181 thread_template.continuation = THREAD_CONTINUE_NULL;
182 thread_template.parameter = NULL;
183
184 thread_template.importance = 0;
185 thread_template.sched_mode = 0;
186 thread_template.safe_mode = 0;
187 thread_template.safe_release = 0;
188
189 thread_template.priority = 0;
190 thread_template.sched_pri = 0;
191 thread_template.max_priority = 0;
192 thread_template.task_priority = 0;
193 thread_template.promotions = 0;
194 thread_template.pending_promoter_index = 0;
195 thread_template.pending_promoter[0] =
196 thread_template.pending_promoter[1] = NULL;
197
198 thread_template.realtime.deadline = UINT64_MAX;
199
200 thread_template.current_quantum = 0;
201
202 thread_template.computation_metered = 0;
203 thread_template.computation_epoch = 0;
204
205 thread_template.sched_stamp = 0;
206 thread_template.sched_usage = 0;
207 thread_template.pri_shift = INT8_MAX;
208 thread_template.cpu_usage = thread_template.cpu_delta = 0;
209 thread_template.c_switch = thread_template.p_switch = thread_template.ps_switch = 0;
210
211 thread_template.bound_processor = PROCESSOR_NULL;
212 thread_template.last_processor = PROCESSOR_NULL;
213
214 thread_template.sched_call = sched_call_null;
215
216 timer_init(&thread_template.user_timer);
217 timer_init(&thread_template.system_timer);
218 thread_template.user_timer_save = 0;
219 thread_template.system_timer_save = 0;
220 thread_template.vtimer_user_save = 0;
221 thread_template.vtimer_prof_save = 0;
222 thread_template.vtimer_rlim_save = 0;
223
224 thread_template.wait_timer_is_set = FALSE;
225 thread_template.wait_timer_active = 0;
226
227 thread_template.depress_timer_active = 0;
228
229 thread_template.special_handler.handler = special_handler;
230 thread_template.special_handler.next = NULL;
231
232 thread_template.funnel_lock = THR_FUNNEL_NULL;
233 thread_template.funnel_state = 0;
234 thread_template.recover = (vm_offset_t)NULL;
235
236 thread_template.map = VM_MAP_NULL;
237
238 #if CONFIG_DTRACE
239 thread_template.t_dtrace_predcache = 0;
240 thread_template.t_dtrace_vtime = 0;
241 thread_template.t_dtrace_tracing = 0;
242 #endif /* CONFIG_DTRACE */
243
244 thread_template.t_chud = 0;
245 thread_template.t_page_creation_count = 0;
246 thread_template.t_page_creation_time = 0;
247
248 thread_template.affinity_set = NULL;
249
250 init_thread = thread_template;
251 machine_set_current_thread(&init_thread);
252 }
253
254 void
255 thread_init(void)
256 {
257 thread_zone = zinit(
258 sizeof(struct thread),
259 thread_max * sizeof(struct thread),
260 THREAD_CHUNK * sizeof(struct thread),
261 "threads");
262
263 lck_grp_attr_setdefault(&thread_lck_grp_attr);
264 lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr);
265 lck_attr_setdefault(&thread_lck_attr);
266
267 stack_init();
268
269 /*
270 * Initialize any machine-dependent
271 * per-thread structures necessary.
272 */
273 machine_thread_init();
274 }
275
276 static void
277 thread_terminate_continue(void)
278 {
279 panic("thread_terminate_continue");
280 /*NOTREACHED*/
281 }
282
283 /*
284 * thread_terminate_self:
285 */
286 void
287 thread_terminate_self(void)
288 {
289 thread_t thread = current_thread();
290 task_t task;
291 spl_t s;
292 int threadcnt;
293
294 DTRACE_PROC(lwp__exit);
295
296 thread_mtx_lock(thread);
297
298 ulock_release_all(thread);
299
300 ipc_thread_disable(thread);
301
302 thread_mtx_unlock(thread);
303
304 s = splsched();
305 thread_lock(thread);
306
307 /*
308 * Cancel priority depression, wait for concurrent expirations
309 * on other processors.
310 */
311 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
312 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
313
314 if (timer_call_cancel(&thread->depress_timer))
315 thread->depress_timer_active--;
316 }
317
318 while (thread->depress_timer_active > 0) {
319 thread_unlock(thread);
320 splx(s);
321
322 delay(1);
323
324 s = splsched();
325 thread_lock(thread);
326 }
327
328 thread_sched_call(thread, NULL);
329
330 thread_unlock(thread);
331 splx(s);
332
333 thread_policy_reset(thread);
334
335 task = thread->task;
336 uthread_cleanup(task, thread->uthread, task->bsd_info);
337 threadcnt = hw_atomic_sub(&task->active_thread_count, 1);
338
339 /*
340 * If we are the last thread to terminate and the task is
341 * associated with a BSD process, perform BSD process exit.
342 */
343 if (threadcnt == 0 && task->bsd_info != NULL)
344 proc_exit(task->bsd_info);
345
346 uthread_cred_free(thread->uthread);
347
348 s = splsched();
349 thread_lock(thread);
350
351 /*
352 * Cancel wait timer, and wait for
353 * concurrent expirations.
354 */
355 if (thread->wait_timer_is_set) {
356 thread->wait_timer_is_set = FALSE;
357
358 if (timer_call_cancel(&thread->wait_timer))
359 thread->wait_timer_active--;
360 }
361
362 while (thread->wait_timer_active > 0) {
363 thread_unlock(thread);
364 splx(s);
365
366 delay(1);
367
368 s = splsched();
369 thread_lock(thread);
370 }
371
372 /*
373 * If there is a reserved stack, release it.
374 */
375 if (thread->reserved_stack != 0) {
376 if (thread->reserved_stack != thread->kernel_stack)
377 stack_free_stack(thread->reserved_stack);
378 thread->reserved_stack = 0;
379 }
380
381 /*
382 * Mark thread as terminating, and block.
383 */
384 thread->state |= TH_TERMINATE;
385 thread_mark_wait_locked(thread, THREAD_UNINT);
386 assert(thread->promotions == 0);
387 thread_unlock(thread);
388 /* splsched */
389
390 thread_block((thread_continue_t)thread_terminate_continue);
391 /*NOTREACHED*/
392 }
393
394 void
395 thread_deallocate(
396 thread_t thread)
397 {
398 task_t task;
399
400 if (thread == THREAD_NULL)
401 return;
402
403 if (thread_deallocate_internal(thread) > 0)
404 return;
405
406 ipc_thread_terminate(thread);
407
408 task = thread->task;
409
410 #ifdef MACH_BSD
411 {
412 void *ut = thread->uthread;
413
414 thread->uthread = NULL;
415 uthread_zone_free(ut);
416 }
417 #endif /* MACH_BSD */
418
419 task_deallocate(task);
420
421 if (thread->kernel_stack != 0)
422 stack_free(thread);
423
424 lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
425 machine_thread_destroy(thread);
426
427 zfree(thread_zone, thread);
428 }
429
430 /*
431 * thread_terminate_daemon:
432 *
433 * Perform final clean up for terminating threads.
434 */
435 static void
436 thread_terminate_daemon(void)
437 {
438 thread_t thread;
439 task_t task;
440
441 (void)splsched();
442 simple_lock(&thread_terminate_lock);
443
444 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
445 simple_unlock(&thread_terminate_lock);
446 (void)spllo();
447
448 task = thread->task;
449
450 task_lock(task);
451 task->total_user_time += timer_grab(&thread->user_timer);
452 task->total_system_time += timer_grab(&thread->system_timer);
453
454 task->c_switch += thread->c_switch;
455 task->p_switch += thread->p_switch;
456 task->ps_switch += thread->ps_switch;
457
458 queue_remove(&task->threads, thread, thread_t, task_threads);
459 task->thread_count--;
460
461 /*
462 * If the task is being halted, and there is only one thread
463 * left in the task after this one, then wakeup that thread.
464 */
465 if (task->thread_count == 1 && task->halting)
466 thread_wakeup((event_t)&task->halting);
467
468 task_unlock(task);
469
470 lck_mtx_lock(&tasks_threads_lock);
471 queue_remove(&threads, thread, thread_t, threads);
472 threads_count--;
473 lck_mtx_unlock(&tasks_threads_lock);
474
475 thread_deallocate(thread);
476
477 (void)splsched();
478 simple_lock(&thread_terminate_lock);
479 }
480
481 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
482 simple_unlock(&thread_terminate_lock);
483 /* splsched */
484
485 thread_block((thread_continue_t)thread_terminate_daemon);
486 /*NOTREACHED*/
487 }
488
489 /*
490 * thread_terminate_enqueue:
491 *
492 * Enqueue a terminating thread for final disposition.
493 *
494 * Called at splsched.
495 */
496 void
497 thread_terminate_enqueue(
498 thread_t thread)
499 {
500 simple_lock(&thread_terminate_lock);
501 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
502 simple_unlock(&thread_terminate_lock);
503
504 thread_wakeup((event_t)&thread_terminate_queue);
505 }
506
507 /*
508 * thread_stack_daemon:
509 *
510 * Perform stack allocation as required due to
511 * invoke failures.
512 */
513 static void
514 thread_stack_daemon(void)
515 {
516 thread_t thread;
517
518 simple_lock(&thread_stack_lock);
519
520 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
521 simple_unlock(&thread_stack_lock);
522
523 stack_alloc(thread);
524
525 (void)splsched();
526 thread_lock(thread);
527 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
528 thread_unlock(thread);
529 (void)spllo();
530
531 simple_lock(&thread_stack_lock);
532 }
533
534 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
535 simple_unlock(&thread_stack_lock);
536
537 thread_block((thread_continue_t)thread_stack_daemon);
538 /*NOTREACHED*/
539 }
540
541 /*
542 * thread_stack_enqueue:
543 *
544 * Enqueue a thread for stack allocation.
545 *
546 * Called at splsched.
547 */
548 void
549 thread_stack_enqueue(
550 thread_t thread)
551 {
552 simple_lock(&thread_stack_lock);
553 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
554 simple_unlock(&thread_stack_lock);
555
556 thread_wakeup((event_t)&thread_stack_queue);
557 }
558
559 void
560 thread_daemon_init(void)
561 {
562 kern_return_t result;
563 thread_t thread;
564
565 simple_lock_init(&thread_terminate_lock, 0);
566 queue_init(&thread_terminate_queue);
567
568 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
569 if (result != KERN_SUCCESS)
570 panic("thread_daemon_init: thread_terminate_daemon");
571
572 thread_deallocate(thread);
573
574 simple_lock_init(&thread_stack_lock, 0);
575 queue_init(&thread_stack_queue);
576
577 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
578 if (result != KERN_SUCCESS)
579 panic("thread_daemon_init: thread_stack_daemon");
580
581 thread_deallocate(thread);
582 }
583
584 /*
585 * Create a new thread.
586 * Doesn't start the thread running.
587 */
588 static kern_return_t
589 thread_create_internal(
590 task_t parent_task,
591 integer_t priority,
592 thread_continue_t continuation,
593 int options,
594 #define TH_OPTION_NONE 0x00
595 #define TH_OPTION_NOCRED 0x01
596 #define TH_OPTION_NOSUSP 0x02
597 thread_t *out_thread)
598 {
599 thread_t new_thread;
600 static thread_t first_thread;
601
602 /*
603 * Allocate a thread and initialize static fields
604 */
605 if (first_thread == THREAD_NULL)
606 new_thread = first_thread = current_thread();
607 else
608 new_thread = (thread_t)zalloc(thread_zone);
609 if (new_thread == THREAD_NULL)
610 return (KERN_RESOURCE_SHORTAGE);
611
612 if (new_thread != first_thread)
613 *new_thread = thread_template;
614
615 #ifdef MACH_BSD
616 new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
617 if (new_thread->uthread == NULL) {
618 zfree(thread_zone, new_thread);
619 return (KERN_RESOURCE_SHORTAGE);
620 }
621 #endif /* MACH_BSD */
622
623 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
624 #ifdef MACH_BSD
625 void *ut = new_thread->uthread;
626
627 new_thread->uthread = NULL;
628 /* cred free may not be necessary */
629 uthread_cleanup(parent_task, ut, parent_task->bsd_info);
630 uthread_cred_free(ut);
631 uthread_zone_free(ut);
632 #endif /* MACH_BSD */
633
634 zfree(thread_zone, new_thread);
635 return (KERN_FAILURE);
636 }
637
638 new_thread->task = parent_task;
639
640 thread_lock_init(new_thread);
641 wake_lock_init(new_thread);
642
643 lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);
644
645 ipc_thread_init(new_thread);
646 queue_init(&new_thread->held_ulocks);
647
648 new_thread->continuation = continuation;
649
650 lck_mtx_lock(&tasks_threads_lock);
651 task_lock(parent_task);
652
653 if ( !parent_task->active || parent_task->halting ||
654 ((options & TH_OPTION_NOSUSP) != 0 &&
655 parent_task->suspend_count > 0) ||
656 (parent_task->thread_count >= task_threadmax &&
657 parent_task != kernel_task) ) {
658 task_unlock(parent_task);
659 lck_mtx_unlock(&tasks_threads_lock);
660
661 #ifdef MACH_BSD
662 {
663 void *ut = new_thread->uthread;
664
665 new_thread->uthread = NULL;
666 uthread_cleanup(parent_task, ut, parent_task->bsd_info);
667 /* cred free may not be necessary */
668 uthread_cred_free(ut);
669 uthread_zone_free(ut);
670 }
671 #endif /* MACH_BSD */
672 ipc_thread_disable(new_thread);
673 ipc_thread_terminate(new_thread);
674 lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
675 machine_thread_destroy(new_thread);
676 zfree(thread_zone, new_thread);
677 return (KERN_FAILURE);
678 }
679
680 /* New threads inherit any default state on the task */
681 machine_thread_inherit_taskwide(new_thread, parent_task);
682
683 task_reference_internal(parent_task);
684
685 /* Cache the task's map */
686 new_thread->map = parent_task->map;
687
688 /* Chain the thread onto the task's list */
689 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
690 parent_task->thread_count++;
691
692 /* So terminating threads don't need to take the task lock to decrement */
693 hw_atomic_add(&parent_task->active_thread_count, 1);
694
695 /* Protected by the tasks_threads_lock */
696 new_thread->thread_id = ++thread_unique_id;
697
698 queue_enter(&threads, new_thread, thread_t, threads);
699 threads_count++;
700
701 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
702 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
703
704 #if CONFIG_COUNTERS
705 /*
706 * If parent task has any reservations, they need to be propagated to this
707 * thread.
708 */
709 new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ?
710 THREAD_PMC_FLAG : 0U;
711 #endif
712
713 /* Set the thread's scheduling parameters */
714 if (parent_task != kernel_task)
715 new_thread->sched_mode |= TH_MODE_TIMESHARE;
716 new_thread->max_priority = parent_task->max_priority;
717 new_thread->task_priority = parent_task->priority;
718 new_thread->priority = (priority < 0)? parent_task->priority: priority;
719 if (new_thread->priority > new_thread->max_priority)
720 new_thread->priority = new_thread->max_priority;
721 new_thread->importance =
722 new_thread->priority - new_thread->task_priority;
723 new_thread->sched_stamp = sched_tick;
724 new_thread->pri_shift = sched_pri_shift;
725 compute_priority(new_thread, FALSE);
726
727 new_thread->active = TRUE;
728
729 *out_thread = new_thread;
730
731 {
732 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
733
734 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
735
736 KERNEL_DEBUG_CONSTANT(
737 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
738 (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);
739
740 kdbg_trace_string(parent_task->bsd_info,
741 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
742
743 KERNEL_DEBUG_CONSTANT(
744 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
745 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
746 }
747
748 DTRACE_PROC1(lwp__create, thread_t, *out_thread);
749
750 return (KERN_SUCCESS);
751 }
752
753 kern_return_t
754 thread_create(
755 task_t task,
756 thread_t *new_thread)
757 {
758 kern_return_t result;
759 thread_t thread;
760
761 if (task == TASK_NULL || task == kernel_task)
762 return (KERN_INVALID_ARGUMENT);
763
764 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
765 if (result != KERN_SUCCESS)
766 return (result);
767
768 thread->user_stop_count = 1;
769 thread_hold(thread);
770 if (task->suspend_count > 0)
771 thread_hold(thread);
772
773 task_unlock(task);
774 lck_mtx_unlock(&tasks_threads_lock);
775
776 *new_thread = thread;
777
778 return (KERN_SUCCESS);
779 }
780
781 kern_return_t
782 thread_create_running(
783 register task_t task,
784 int flavor,
785 thread_state_t new_state,
786 mach_msg_type_number_t new_state_count,
787 thread_t *new_thread)
788 {
789 register kern_return_t result;
790 thread_t thread;
791
792 if (task == TASK_NULL || task == kernel_task)
793 return (KERN_INVALID_ARGUMENT);
794
795 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
796 if (result != KERN_SUCCESS)
797 return (result);
798
799 result = machine_thread_set_state(
800 thread, flavor, new_state, new_state_count);
801 if (result != KERN_SUCCESS) {
802 task_unlock(task);
803 lck_mtx_unlock(&tasks_threads_lock);
804
805 thread_terminate(thread);
806 thread_deallocate(thread);
807 return (result);
808 }
809
810 thread_mtx_lock(thread);
811 thread_start_internal(thread);
812 thread_mtx_unlock(thread);
813
814 task_unlock(task);
815 lck_mtx_unlock(&tasks_threads_lock);
816
817 *new_thread = thread;
818
819 return (result);
820 }
821
822 kern_return_t
823 thread_create_workq(
824 task_t task,
825 thread_t *new_thread)
826 {
827 kern_return_t result;
828 thread_t thread;
829
830 if (task == TASK_NULL || task == kernel_task)
831 return (KERN_INVALID_ARGUMENT);
832
833 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return,
834 TH_OPTION_NOCRED | TH_OPTION_NOSUSP, &thread);
835 if (result != KERN_SUCCESS)
836 return (result);
837
838 thread->user_stop_count = 1;
839 thread_hold(thread);
840 if (task->suspend_count > 0)
841 thread_hold(thread);
842
843 task_unlock(task);
844 lck_mtx_unlock(&tasks_threads_lock);
845
846 *new_thread = thread;
847
848 return (KERN_SUCCESS);
849 }
850
851 /*
852 * kernel_thread_create:
853 *
854 * Create a thread in the kernel task
855 * to execute in kernel context.
856 */
857 kern_return_t
858 kernel_thread_create(
859 thread_continue_t continuation,
860 void *parameter,
861 integer_t priority,
862 thread_t *new_thread)
863 {
864 kern_return_t result;
865 thread_t thread;
866 task_t task = kernel_task;
867
868 result = thread_create_internal(task, priority, continuation, TH_OPTION_NONE, &thread);
869 if (result != KERN_SUCCESS)
870 return (result);
871
872 task_unlock(task);
873 lck_mtx_unlock(&tasks_threads_lock);
874
875 stack_alloc(thread);
876 assert(thread->kernel_stack != 0);
877 #if CONFIG_EMBEDDED
878 if (priority > BASEPRI_KERNEL)
879 #endif
880 thread->reserved_stack = thread->kernel_stack;
881
882 thread->parameter = parameter;
883
884 if(debug_task & 1)
885 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
886 *new_thread = thread;
887
888 return (result);
889 }
890
891 kern_return_t
892 kernel_thread_start_priority(
893 thread_continue_t continuation,
894 void *parameter,
895 integer_t priority,
896 thread_t *new_thread)
897 {
898 kern_return_t result;
899 thread_t thread;
900
901 result = kernel_thread_create(continuation, parameter, priority, &thread);
902 if (result != KERN_SUCCESS)
903 return (result);
904
905 *new_thread = thread;
906
907 thread_mtx_lock(thread);
908 thread_start_internal(thread);
909 thread_mtx_unlock(thread);
910
911 return (result);
912 }
913
914 kern_return_t
915 kernel_thread_start(
916 thread_continue_t continuation,
917 void *parameter,
918 thread_t *new_thread)
919 {
920 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
921 }
922
923 #ifndef __LP64__
924
925 thread_t
926 kernel_thread(
927 task_t task,
928 void (*start)(void))
929 {
930 kern_return_t result;
931 thread_t thread;
932
933 if (task != kernel_task)
934 panic("kernel_thread");
935
936 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
937 if (result != KERN_SUCCESS)
938 return (THREAD_NULL);
939
940 thread_deallocate(thread);
941
942 return (thread);
943 }
944
945 #endif /* __LP64__ */
946
947 kern_return_t
948 thread_info_internal(
949 register thread_t thread,
950 thread_flavor_t flavor,
951 thread_info_t thread_info_out, /* ptr to OUT array */
952 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
953 {
954 int state, flags;
955 spl_t s;
956
957 if (thread == THREAD_NULL)
958 return (KERN_INVALID_ARGUMENT);
959
960 if (flavor == THREAD_BASIC_INFO) {
961 register thread_basic_info_t basic_info;
962
963 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
964 return (KERN_INVALID_ARGUMENT);
965
966 basic_info = (thread_basic_info_t) thread_info_out;
967
968 s = splsched();
969 thread_lock(thread);
970
971 /* fill in info */
972
973 thread_read_times(thread, &basic_info->user_time,
974 &basic_info->system_time);
975
976 /*
977 * Update lazy-evaluated scheduler info because someone wants it.
978 */
979 if (thread->sched_stamp != sched_tick)
980 update_priority(thread);
981
982 basic_info->sleep_time = 0;
983
984 /*
985 * To calculate cpu_usage, first correct for timer rate,
986 * then for 5/8 ageing. The correction factor [3/5] is
987 * (1/(5/8) - 1).
988 */
989 basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
990 * TH_USAGE_SCALE) / sched_tick_interval);
991 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
992
993 if (basic_info->cpu_usage > TH_USAGE_SCALE)
994 basic_info->cpu_usage = TH_USAGE_SCALE;
995
996 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
997 POLICY_TIMESHARE: POLICY_RR);
998
999 flags = 0;
1000 if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread)
1001 flags |= TH_FLAGS_IDLE;
1002
1003 if (!thread->kernel_stack)
1004 flags |= TH_FLAGS_SWAPPED;
1005
1006 state = 0;
1007 if (thread->state & TH_TERMINATE)
1008 state = TH_STATE_HALTED;
1009 else
1010 if (thread->state & TH_RUN)
1011 state = TH_STATE_RUNNING;
1012 else
1013 if (thread->state & TH_UNINT)
1014 state = TH_STATE_UNINTERRUPTIBLE;
1015 else
1016 if (thread->state & TH_SUSP)
1017 state = TH_STATE_STOPPED;
1018 else
1019 if (thread->state & TH_WAIT)
1020 state = TH_STATE_WAITING;
1021
1022 basic_info->run_state = state;
1023 basic_info->flags = flags;
1024
1025 basic_info->suspend_count = thread->user_stop_count;
1026
1027 thread_unlock(thread);
1028 splx(s);
1029
1030 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1031
1032 return (KERN_SUCCESS);
1033 }
1034 else
1035 if (flavor == THREAD_IDENTIFIER_INFO) {
1036 register thread_identifier_info_t identifier_info;
1037
1038 if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT)
1039 return (KERN_INVALID_ARGUMENT);
1040
1041 identifier_info = (thread_identifier_info_t) thread_info_out;
1042
1043 s = splsched();
1044 thread_lock(thread);
1045
1046 identifier_info->thread_id = thread->thread_id;
1047 #if defined(__ppc__) || defined(__arm__)
1048 identifier_info->thread_handle = thread->machine.cthread_self;
1049 #else
1050 identifier_info->thread_handle = thread->machine.pcb->cthread_self;
1051 #endif
1052 if(thread->task->bsd_info) {
1053 identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
1054 } else {
1055 thread_unlock(thread);
1056 splx(s);
1057 return KERN_INVALID_ARGUMENT;
1058 }
1059
1060 thread_unlock(thread);
1061 splx(s);
1062 return KERN_SUCCESS;
1063 }
1064 else
1065 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1066 policy_timeshare_info_t ts_info;
1067
1068 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1069 return (KERN_INVALID_ARGUMENT);
1070
1071 ts_info = (policy_timeshare_info_t)thread_info_out;
1072
1073 s = splsched();
1074 thread_lock(thread);
1075
1076 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1077 thread_unlock(thread);
1078 splx(s);
1079
1080 return (KERN_INVALID_POLICY);
1081 }
1082
1083 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1084 if (ts_info->depressed) {
1085 ts_info->base_priority = DEPRESSPRI;
1086 ts_info->depress_priority = thread->priority;
1087 }
1088 else {
1089 ts_info->base_priority = thread->priority;
1090 ts_info->depress_priority = -1;
1091 }
1092
1093 ts_info->cur_priority = thread->sched_pri;
1094 ts_info->max_priority = thread->max_priority;
1095
1096 thread_unlock(thread);
1097 splx(s);
1098
1099 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1100
1101 return (KERN_SUCCESS);
1102 }
1103 else
1104 if (flavor == THREAD_SCHED_FIFO_INFO) {
1105 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1106 return (KERN_INVALID_ARGUMENT);
1107
1108 return (KERN_INVALID_POLICY);
1109 }
1110 else
1111 if (flavor == THREAD_SCHED_RR_INFO) {
1112 policy_rr_info_t rr_info;
1113
1114 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1115 return (KERN_INVALID_ARGUMENT);
1116
1117 rr_info = (policy_rr_info_t) thread_info_out;
1118
1119 s = splsched();
1120 thread_lock(thread);
1121
1122 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1123 thread_unlock(thread);
1124 splx(s);
1125
1126 return (KERN_INVALID_POLICY);
1127 }
1128
1129 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1130 if (rr_info->depressed) {
1131 rr_info->base_priority = DEPRESSPRI;
1132 rr_info->depress_priority = thread->priority;
1133 }
1134 else {
1135 rr_info->base_priority = thread->priority;
1136 rr_info->depress_priority = -1;
1137 }
1138
1139 rr_info->max_priority = thread->max_priority;
1140 rr_info->quantum = std_quantum_us / 1000;
1141
1142 thread_unlock(thread);
1143 splx(s);
1144
1145 *thread_info_count = POLICY_RR_INFO_COUNT;
1146
1147 return (KERN_SUCCESS);
1148 }
1149
1150 return (KERN_INVALID_ARGUMENT);
1151 }
1152
1153 void
1154 thread_read_times(
1155 thread_t thread,
1156 time_value_t *user_time,
1157 time_value_t *system_time)
1158 {
1159 clock_sec_t secs;
1160 clock_usec_t usecs;
1161
1162 absolutetime_to_microtime(timer_grab(&thread->user_timer), &secs, &usecs);
1163 user_time->seconds = (typeof(user_time->seconds))secs;
1164 user_time->microseconds = usecs;
1165
1166 absolutetime_to_microtime(timer_grab(&thread->system_timer), &secs, &usecs);
1167 system_time->seconds = (typeof(system_time->seconds))secs;
1168 system_time->microseconds = usecs;
1169 }
1170
1171 kern_return_t
1172 thread_assign(
1173 __unused thread_t thread,
1174 __unused processor_set_t new_pset)
1175 {
1176 return (KERN_FAILURE);
1177 }
1178
1179 /*
1180 * thread_assign_default:
1181 *
1182 * Special version of thread_assign for assigning threads to default
1183 * processor set.
1184 */
1185 kern_return_t
1186 thread_assign_default(
1187 thread_t thread)
1188 {
1189 return (thread_assign(thread, &pset0));
1190 }
1191
1192 /*
1193 * thread_get_assignment
1194 *
1195 * Return current assignment for this thread.
1196 */
1197 kern_return_t
1198 thread_get_assignment(
1199 thread_t thread,
1200 processor_set_t *pset)
1201 {
1202 if (thread == NULL)
1203 return (KERN_INVALID_ARGUMENT);
1204
1205 *pset = &pset0;
1206
1207 return (KERN_SUCCESS);
1208 }
1209
1210 /*
1211 * thread_wire_internal:
1212 *
1213 * Specify that the target thread must always be able
1214 * to run and to allocate memory.
1215 */
1216 kern_return_t
1217 thread_wire_internal(
1218 host_priv_t host_priv,
1219 thread_t thread,
1220 boolean_t wired,
1221 boolean_t *prev_state)
1222 {
1223 if (host_priv == NULL || thread != current_thread())
1224 return (KERN_INVALID_ARGUMENT);
1225
1226 assert(host_priv == &realhost);
1227
1228 if (prev_state)
1229 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
1230
1231 if (wired) {
1232 if (!(thread->options & TH_OPT_VMPRIV))
1233 vm_page_free_reserve(1); /* XXX */
1234 thread->options |= TH_OPT_VMPRIV;
1235 }
1236 else {
1237 if (thread->options & TH_OPT_VMPRIV)
1238 vm_page_free_reserve(-1); /* XXX */
1239 thread->options &= ~TH_OPT_VMPRIV;
1240 }
1241
1242 return (KERN_SUCCESS);
1243 }
1244
1245
1246 /*
1247 * thread_wire:
1248 *
1249 * User-api wrapper for thread_wire_internal()
1250 */
1251 kern_return_t
1252 thread_wire(
1253 host_priv_t host_priv,
1254 thread_t thread,
1255 boolean_t wired)
1256 {
1257 return (thread_wire_internal(host_priv, thread, wired, NULL));
1258 }
1259
1260 int split_funnel_off = 0;
1261 lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1262 lck_grp_attr_t *funnel_lck_grp_attr;
1263 lck_attr_t *funnel_lck_attr;
1264
1265 funnel_t *
1266 funnel_alloc(
1267 int type)
1268 {
1269 lck_mtx_t *m;
1270 funnel_t *fnl;
1271
1272 if (funnel_lck_grp == LCK_GRP_NULL) {
1273 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1274
1275 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1276
1277 funnel_lck_attr = lck_attr_alloc_init();
1278 }
1279 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1280 bzero((void *)fnl, sizeof(funnel_t));
1281 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1282 kfree(fnl, sizeof(funnel_t));
1283 return(THR_FUNNEL_NULL);
1284 }
1285 fnl->fnl_mutex = m;
1286 fnl->fnl_type = type;
1287 }
1288 return(fnl);
1289 }
1290
1291 void
1292 funnel_free(
1293 funnel_t * fnl)
1294 {
1295 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1296 if (fnl->fnl_oldmutex)
1297 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1298 kfree(fnl, sizeof(funnel_t));
1299 }
1300
1301 void
1302 funnel_lock(
1303 funnel_t * fnl)
1304 {
1305 lck_mtx_lock(fnl->fnl_mutex);
1306 fnl->fnl_mtxholder = current_thread();
1307 }
1308
1309 void
1310 funnel_unlock(
1311 funnel_t * fnl)
1312 {
1313 lck_mtx_unlock(fnl->fnl_mutex);
1314 fnl->fnl_mtxholder = NULL;
1315 fnl->fnl_mtxrelease = current_thread();
1316 }
1317
1318 funnel_t *
1319 thread_funnel_get(
1320 void)
1321 {
1322 thread_t th = current_thread();
1323
1324 if (th->funnel_state & TH_FN_OWNED) {
1325 return(th->funnel_lock);
1326 }
1327 return(THR_FUNNEL_NULL);
1328 }
1329
1330 boolean_t
1331 thread_funnel_set(
1332 funnel_t * fnl,
1333 boolean_t funneled)
1334 {
1335 thread_t cur_thread;
1336 boolean_t funnel_state_prev;
1337 boolean_t intr;
1338
1339 cur_thread = current_thread();
1340 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1341
1342 if (funnel_state_prev != funneled) {
1343 intr = ml_set_interrupts_enabled(FALSE);
1344
1345 if (funneled == TRUE) {
1346 if (cur_thread->funnel_lock)
1347 panic("Funnel lock called when holding one %p", cur_thread->funnel_lock);
1348 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1349 fnl, 1, 0, 0, 0);
1350 funnel_lock(fnl);
1351 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1352 fnl, 1, 0, 0, 0);
1353 cur_thread->funnel_state |= TH_FN_OWNED;
1354 cur_thread->funnel_lock = fnl;
1355 } else {
1356 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1357 panic("Funnel unlock when not holding funnel");
1358 cur_thread->funnel_state &= ~TH_FN_OWNED;
1359 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1360 fnl, 1, 0, 0, 0);
1361
1362 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1363 funnel_unlock(fnl);
1364 }
1365 (void)ml_set_interrupts_enabled(intr);
1366 } else {
1367 /* if we are trying to acquire funnel recursively
1368 * check for funnel to be held already
1369 */
1370 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1371 panic("thread_funnel_set: already holding a different funnel");
1372 }
1373 }
1374 return(funnel_state_prev);
1375 }
1376
1377 static void
1378 sched_call_null(
1379 __unused int type,
1380 __unused thread_t thread)
1381 {
1382 return;
1383 }
1384
1385 void
1386 thread_sched_call(
1387 thread_t thread,
1388 sched_call_t call)
1389 {
1390 thread->sched_call = (call != NULL)? call: sched_call_null;
1391 }
1392
1393 void
1394 thread_static_param(
1395 thread_t thread,
1396 boolean_t state)
1397 {
1398 thread_mtx_lock(thread);
1399 thread->static_param = state;
1400 thread_mtx_unlock(thread);
1401 }
1402
1403 uint64_t
1404 thread_tid(
1405 thread_t thread)
1406 {
1407 return (thread != THREAD_NULL? thread->thread_id: 0);
1408 }
1409
1410 uint64_t
1411 thread_dispatchqaddr(
1412 thread_t thread)
1413 {
1414 uint64_t dispatchqueue_addr = 0;
1415 uint64_t thread_handle = 0;
1416
1417 if (thread != THREAD_NULL) {
1418 #if defined(__ppc__) || defined(__arm__)
1419 thread_handle = thread->machine.cthread_self;
1420 #else
1421 thread_handle = thread->machine.pcb->cthread_self;
1422 #endif
1423
1424 if (thread->task->bsd_info)
1425 dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
1426 }
1427
1428 return (dispatchqueue_addr);
1429 }
1430
1431 /*
1432 * Export routines to other components for things that are done as macros
1433 * within the osfmk component.
1434 */
1435
1436 #undef thread_reference
1437 void thread_reference(thread_t thread);
1438 void
1439 thread_reference(
1440 thread_t thread)
1441 {
1442 if (thread != THREAD_NULL)
1443 thread_reference_internal(thread);
1444 }
1445
1446 #undef thread_should_halt
1447
1448 boolean_t
1449 thread_should_halt(
1450 thread_t th)
1451 {
1452 return (thread_should_halt_fast(th));
1453 }
1454
1455 #if CONFIG_DTRACE
1456 uint32_t dtrace_get_thread_predcache(thread_t thread)
1457 {
1458 if (thread != THREAD_NULL)
1459 return thread->t_dtrace_predcache;
1460 else
1461 return 0;
1462 }
1463
1464 int64_t dtrace_get_thread_vtime(thread_t thread)
1465 {
1466 if (thread != THREAD_NULL)
1467 return thread->t_dtrace_vtime;
1468 else
1469 return 0;
1470 }
1471
1472 int64_t dtrace_get_thread_tracing(thread_t thread)
1473 {
1474 if (thread != THREAD_NULL)
1475 return thread->t_dtrace_tracing;
1476 else
1477 return 0;
1478 }
1479
1480 boolean_t dtrace_get_thread_reentering(thread_t thread)
1481 {
1482 if (thread != THREAD_NULL)
1483 return (thread->options & TH_OPT_DTRACE) ? TRUE : FALSE;
1484 else
1485 return 0;
1486 }
1487
1488 vm_offset_t dtrace_get_kernel_stack(thread_t thread)
1489 {
1490 if (thread != THREAD_NULL)
1491 return thread->kernel_stack;
1492 else
1493 return 0;
1494 }
1495
1496 int64_t dtrace_calc_thread_recent_vtime(thread_t thread)
1497 {
1498 #if STAT_TIME
1499 if (thread != THREAD_NULL) {
1500 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer));
1501 } else
1502 return 0;
1503 #else
1504 if (thread != THREAD_NULL) {
1505 processor_t processor = current_processor();
1506 uint64_t abstime = mach_absolute_time();
1507 timer_t timer;
1508
1509 timer = PROCESSOR_DATA(processor, thread_timer);
1510
1511 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
1512 (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
1513 } else
1514 return 0;
1515 #endif
1516 }
1517
1518 void dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
1519 {
1520 if (thread != THREAD_NULL)
1521 thread->t_dtrace_predcache = predcache;
1522 }
1523
1524 void dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
1525 {
1526 if (thread != THREAD_NULL)
1527 thread->t_dtrace_vtime = vtime;
1528 }
1529
1530 void dtrace_set_thread_tracing(thread_t thread, int64_t accum)
1531 {
1532 if (thread != THREAD_NULL)
1533 thread->t_dtrace_tracing = accum;
1534 }
1535
1536 void dtrace_set_thread_reentering(thread_t thread, boolean_t vbool)
1537 {
1538 if (thread != THREAD_NULL) {
1539 if (vbool)
1540 thread->options |= TH_OPT_DTRACE;
1541 else
1542 thread->options &= (~TH_OPT_DTRACE);
1543 }
1544 }
1545
1546 vm_offset_t dtrace_set_thread_recover(thread_t thread, vm_offset_t recover)
1547 {
1548 vm_offset_t prev = 0;
1549
1550 if (thread != THREAD_NULL) {
1551 prev = thread->recover;
1552 thread->recover = recover;
1553 }
1554 return prev;
1555 }
1556
1557 void dtrace_thread_bootstrap(void)
1558 {
1559 task_t task = current_task();
1560 if(task->thread_count == 1) {
1561 DTRACE_PROC(start);
1562 }
1563 DTRACE_PROC(lwp__start);
1564
1565 }
1566 #endif /* CONFIG_DTRACE */