]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_status.h>
90 #include <mach/time_value.h>
91 #include <mach/vm_param.h>
92
93 #include <machine/thread.h>
94 #include <machine/pal_routines.h>
95
96 #include <kern/kern_types.h>
97 #include <kern/kalloc.h>
98 #include <kern/cpu_data.h>
99 #include <kern/counters.h>
100 #include <kern/extmod_statistics.h>
101 #include <kern/ipc_mig.h>
102 #include <kern/ipc_tt.h>
103 #include <kern/mach_param.h>
104 #include <kern/machine.h>
105 #include <kern/misc_protos.h>
106 #include <kern/processor.h>
107 #include <kern/queue.h>
108 #include <kern/sched.h>
109 #include <kern/sched_prim.h>
110 #include <kern/sync_lock.h>
111 #include <kern/syscall_subr.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/host.h>
115 #include <kern/zalloc.h>
116 #include <kern/assert.h>
117
118 #include <ipc/ipc_kmsg.h>
119 #include <ipc/ipc_port.h>
120
121 #include <vm/vm_kern.h>
122 #include <vm/vm_pageout.h>
123
124 #include <sys/kdebug.h>
125
126 #include <mach/sdt.h>
127
128 /*
129 * Exported interfaces
130 */
131 #include <mach/task_server.h>
132 #include <mach/thread_act_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_priv_server.h>
135
136 static struct zone *thread_zone;
137 static lck_grp_attr_t thread_lck_grp_attr;
138 lck_attr_t thread_lck_attr;
139 lck_grp_t thread_lck_grp;
140
141 decl_simple_lock_data(static,thread_stack_lock)
142 static queue_head_t thread_stack_queue;
143
144 decl_simple_lock_data(static,thread_terminate_lock)
145 static queue_head_t thread_terminate_queue;
146
147 static struct thread thread_template, init_thread;
148
149 static void sched_call_null(
150 int type,
151 thread_t thread);
152
153 #ifdef MACH_BSD
154 extern void proc_exit(void *);
155 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
156 #endif /* MACH_BSD */
157
158 extern int debug_task;
159 int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */
160 int task_threadmax = CONFIG_THREAD_MAX;
161
162 static uint64_t thread_unique_id = 0;
163
164 void
165 thread_bootstrap(void)
166 {
167 /*
168 * Fill in a template thread for fast initialization.
169 */
170
171 thread_template.runq = PROCESSOR_NULL;
172
173 thread_template.ref_count = 2;
174
175 thread_template.reason = AST_NONE;
176 thread_template.at_safe_point = FALSE;
177 thread_template.wait_event = NO_EVENT64;
178 thread_template.wait_queue = WAIT_QUEUE_NULL;
179 thread_template.wait_result = THREAD_WAITING;
180 thread_template.options = THREAD_ABORTSAFE;
181 thread_template.state = TH_WAIT | TH_UNINT;
182 thread_template.wake_active = FALSE;
183 thread_template.continuation = THREAD_CONTINUE_NULL;
184 thread_template.parameter = NULL;
185
186 thread_template.importance = 0;
187 thread_template.sched_mode = TH_MODE_NONE;
188 thread_template.sched_flags = 0;
189 thread_template.saved_mode = TH_MODE_NONE;
190 thread_template.safe_release = 0;
191
192 thread_template.priority = 0;
193 thread_template.sched_pri = 0;
194 thread_template.max_priority = 0;
195 thread_template.task_priority = 0;
196 thread_template.promotions = 0;
197 thread_template.pending_promoter_index = 0;
198 thread_template.pending_promoter[0] =
199 thread_template.pending_promoter[1] = NULL;
200
201 thread_template.realtime.deadline = UINT64_MAX;
202
203 thread_template.current_quantum = 0;
204 thread_template.last_run_time = 0;
205 thread_template.last_quantum_refill_time = 0;
206
207 thread_template.computation_metered = 0;
208 thread_template.computation_epoch = 0;
209
210 #if defined(CONFIG_SCHED_TRADITIONAL)
211 thread_template.sched_stamp = 0;
212 thread_template.pri_shift = INT8_MAX;
213 thread_template.sched_usage = 0;
214 thread_template.cpu_usage = thread_template.cpu_delta = 0;
215 #endif
216 thread_template.c_switch = thread_template.p_switch = thread_template.ps_switch = 0;
217
218 thread_template.bound_processor = PROCESSOR_NULL;
219 thread_template.last_processor = PROCESSOR_NULL;
220
221 thread_template.sched_call = sched_call_null;
222
223 timer_init(&thread_template.user_timer);
224 timer_init(&thread_template.system_timer);
225 thread_template.user_timer_save = 0;
226 thread_template.system_timer_save = 0;
227 thread_template.vtimer_user_save = 0;
228 thread_template.vtimer_prof_save = 0;
229 thread_template.vtimer_rlim_save = 0;
230
231 thread_template.wait_timer_is_set = FALSE;
232 thread_template.wait_timer_active = 0;
233
234 thread_template.depress_timer_active = 0;
235
236 thread_template.special_handler.handler = special_handler;
237 thread_template.special_handler.next = NULL;
238
239 thread_template.funnel_lock = THR_FUNNEL_NULL;
240 thread_template.funnel_state = 0;
241 thread_template.recover = (vm_offset_t)NULL;
242
243 thread_template.map = VM_MAP_NULL;
244
245 #if CONFIG_DTRACE
246 thread_template.t_dtrace_predcache = 0;
247 thread_template.t_dtrace_vtime = 0;
248 thread_template.t_dtrace_tracing = 0;
249 #endif /* CONFIG_DTRACE */
250
251 thread_template.t_chud = 0;
252 thread_template.t_page_creation_count = 0;
253 thread_template.t_page_creation_time = 0;
254
255 thread_template.affinity_set = NULL;
256
257 thread_template.syscalls_unix = 0;
258 thread_template.syscalls_mach = 0;
259
260 thread_template.tkm_private.alloc = 0;
261 thread_template.tkm_private.free = 0;
262 thread_template.tkm_shared.alloc = 0;
263 thread_template.tkm_shared.free = 0;
264 thread_template.actionstate = default_task_null_policy;
265 thread_template.ext_actionstate = default_task_null_policy;
266 thread_template.policystate = default_task_proc_policy;
267 thread_template.ext_policystate = default_task_proc_policy;
268
269 init_thread = thread_template;
270 machine_set_current_thread(&init_thread);
271 }
272
273 void
274 thread_init(void)
275 {
276 thread_zone = zinit(
277 sizeof(struct thread),
278 thread_max * sizeof(struct thread),
279 THREAD_CHUNK * sizeof(struct thread),
280 "threads");
281
282 zone_change(thread_zone, Z_NOENCRYPT, TRUE);
283
284 lck_grp_attr_setdefault(&thread_lck_grp_attr);
285 lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr);
286 lck_attr_setdefault(&thread_lck_attr);
287
288 stack_init();
289
290 /*
291 * Initialize any machine-dependent
292 * per-thread structures necessary.
293 */
294 machine_thread_init();
295 }
296
297 static void
298 thread_terminate_continue(void)
299 {
300 panic("thread_terminate_continue");
301 /*NOTREACHED*/
302 }
303
304 /*
305 * thread_terminate_self:
306 */
307 void
308 thread_terminate_self(void)
309 {
310 thread_t thread = current_thread();
311
312 task_t task;
313 spl_t s;
314 int threadcnt;
315
316 pal_thread_terminate_self(thread);
317
318 DTRACE_PROC(lwp__exit);
319
320 thread_mtx_lock(thread);
321
322 ulock_release_all(thread);
323
324 ipc_thread_disable(thread);
325
326 thread_mtx_unlock(thread);
327
328 s = splsched();
329 thread_lock(thread);
330
331 /*
332 * Cancel priority depression, wait for concurrent expirations
333 * on other processors.
334 */
335 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
336 thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
337
338 if (timer_call_cancel(&thread->depress_timer))
339 thread->depress_timer_active--;
340 }
341
342 while (thread->depress_timer_active > 0) {
343 thread_unlock(thread);
344 splx(s);
345
346 delay(1);
347
348 s = splsched();
349 thread_lock(thread);
350 }
351
352 thread_sched_call(thread, NULL);
353
354 thread_unlock(thread);
355 splx(s);
356
357 thread_policy_reset(thread);
358
359 task = thread->task;
360 uthread_cleanup(task, thread->uthread, task->bsd_info);
361 threadcnt = hw_atomic_sub(&task->active_thread_count, 1);
362
363 /*
364 * If we are the last thread to terminate and the task is
365 * associated with a BSD process, perform BSD process exit.
366 */
367 if (threadcnt == 0 && task->bsd_info != NULL)
368 proc_exit(task->bsd_info);
369
370 uthread_cred_free(thread->uthread);
371
372 s = splsched();
373 thread_lock(thread);
374
375 /*
376 * Cancel wait timer, and wait for
377 * concurrent expirations.
378 */
379 if (thread->wait_timer_is_set) {
380 thread->wait_timer_is_set = FALSE;
381
382 if (timer_call_cancel(&thread->wait_timer))
383 thread->wait_timer_active--;
384 }
385
386 while (thread->wait_timer_active > 0) {
387 thread_unlock(thread);
388 splx(s);
389
390 delay(1);
391
392 s = splsched();
393 thread_lock(thread);
394 }
395
396 /*
397 * If there is a reserved stack, release it.
398 */
399 if (thread->reserved_stack != 0) {
400 stack_free_reserved(thread);
401 thread->reserved_stack = 0;
402 }
403
404 /*
405 * Mark thread as terminating, and block.
406 */
407 thread->state |= TH_TERMINATE;
408 thread_mark_wait_locked(thread, THREAD_UNINT);
409 assert(thread->promotions == 0);
410 thread_unlock(thread);
411 /* splsched */
412
413 thread_block((thread_continue_t)thread_terminate_continue);
414 /*NOTREACHED*/
415 }
416
417 void
418 thread_deallocate(
419 thread_t thread)
420 {
421 task_t task;
422
423 if (thread == THREAD_NULL)
424 return;
425
426 if (thread_deallocate_internal(thread) > 0)
427 return;
428
429
430 ipc_thread_terminate(thread);
431
432 task = thread->task;
433
434 #ifdef MACH_BSD
435 {
436 void *ut = thread->uthread;
437
438 thread->uthread = NULL;
439 uthread_zone_free(ut);
440 }
441 #endif /* MACH_BSD */
442
443 if (thread->kernel_stack != 0)
444 stack_free(thread);
445
446 lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
447 machine_thread_destroy(thread);
448
449 task_deallocate(task);
450
451 zfree(thread_zone, thread);
452 }
453
454 /*
455 * thread_terminate_daemon:
456 *
457 * Perform final clean up for terminating threads.
458 */
459 static void
460 thread_terminate_daemon(void)
461 {
462 thread_t self, thread;
463 task_t task;
464
465 self = current_thread();
466 self->options |= TH_OPT_SYSTEM_CRITICAL;
467
468 (void)splsched();
469 simple_lock(&thread_terminate_lock);
470
471 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
472 simple_unlock(&thread_terminate_lock);
473 (void)spllo();
474
475 task = thread->task;
476
477 task_lock(task);
478 task->total_user_time += timer_grab(&thread->user_timer);
479 task->total_system_time += timer_grab(&thread->system_timer);
480
481 task->c_switch += thread->c_switch;
482 task->p_switch += thread->p_switch;
483 task->ps_switch += thread->ps_switch;
484
485 task->syscalls_unix += thread->syscalls_unix;
486 task->syscalls_mach += thread->syscalls_mach;
487
488 task->tkm_private.alloc += thread->tkm_private.alloc;
489 task->tkm_private.free += thread->tkm_private.free;
490 task->tkm_shared.alloc += thread->tkm_shared.alloc;
491 task->tkm_shared.free += thread->tkm_shared.free;
492
493 queue_remove(&task->threads, thread, thread_t, task_threads);
494 task->thread_count--;
495
496 /*
497 * If the task is being halted, and there is only one thread
498 * left in the task after this one, then wakeup that thread.
499 */
500 if (task->thread_count == 1 && task->halting)
501 thread_wakeup((event_t)&task->halting);
502
503 task_unlock(task);
504
505 lck_mtx_lock(&tasks_threads_lock);
506 queue_remove(&threads, thread, thread_t, threads);
507 threads_count--;
508 lck_mtx_unlock(&tasks_threads_lock);
509
510 thread_deallocate(thread);
511
512 (void)splsched();
513 simple_lock(&thread_terminate_lock);
514 }
515
516 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
517 simple_unlock(&thread_terminate_lock);
518 /* splsched */
519
520 self->options &= ~TH_OPT_SYSTEM_CRITICAL;
521 thread_block((thread_continue_t)thread_terminate_daemon);
522 /*NOTREACHED*/
523 }
524
525 /*
526 * thread_terminate_enqueue:
527 *
528 * Enqueue a terminating thread for final disposition.
529 *
530 * Called at splsched.
531 */
532 void
533 thread_terminate_enqueue(
534 thread_t thread)
535 {
536 simple_lock(&thread_terminate_lock);
537 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
538 simple_unlock(&thread_terminate_lock);
539
540 thread_wakeup((event_t)&thread_terminate_queue);
541 }
542
543 /*
544 * thread_stack_daemon:
545 *
546 * Perform stack allocation as required due to
547 * invoke failures.
548 */
549 static void
550 thread_stack_daemon(void)
551 {
552 thread_t thread;
553
554 simple_lock(&thread_stack_lock);
555
556 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
557 simple_unlock(&thread_stack_lock);
558
559 stack_alloc(thread);
560
561 (void)splsched();
562 thread_lock(thread);
563 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
564 thread_unlock(thread);
565 (void)spllo();
566
567 simple_lock(&thread_stack_lock);
568 }
569
570 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
571 simple_unlock(&thread_stack_lock);
572
573 thread_block((thread_continue_t)thread_stack_daemon);
574 /*NOTREACHED*/
575 }
576
577 /*
578 * thread_stack_enqueue:
579 *
580 * Enqueue a thread for stack allocation.
581 *
582 * Called at splsched.
583 */
584 void
585 thread_stack_enqueue(
586 thread_t thread)
587 {
588 simple_lock(&thread_stack_lock);
589 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
590 simple_unlock(&thread_stack_lock);
591
592 thread_wakeup((event_t)&thread_stack_queue);
593 }
594
595 void
596 thread_daemon_init(void)
597 {
598 kern_return_t result;
599 thread_t thread = NULL;
600
601 simple_lock_init(&thread_terminate_lock, 0);
602 queue_init(&thread_terminate_queue);
603
604 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
605 if (result != KERN_SUCCESS)
606 panic("thread_daemon_init: thread_terminate_daemon");
607
608 thread_deallocate(thread);
609
610 simple_lock_init(&thread_stack_lock, 0);
611 queue_init(&thread_stack_queue);
612
613 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
614 if (result != KERN_SUCCESS)
615 panic("thread_daemon_init: thread_stack_daemon");
616
617 thread_deallocate(thread);
618 }
619
620 /*
621 * Create a new thread.
622 * Doesn't start the thread running.
623 */
624 static kern_return_t
625 thread_create_internal(
626 task_t parent_task,
627 integer_t priority,
628 thread_continue_t continuation,
629 int options,
630 #define TH_OPTION_NONE 0x00
631 #define TH_OPTION_NOCRED 0x01
632 #define TH_OPTION_NOSUSP 0x02
633 thread_t *out_thread)
634 {
635 thread_t new_thread;
636 static thread_t first_thread;
637
638 /*
639 * Allocate a thread and initialize static fields
640 */
641 if (first_thread == THREAD_NULL)
642 new_thread = first_thread = current_thread();
643 else
644 new_thread = (thread_t)zalloc(thread_zone);
645 if (new_thread == THREAD_NULL)
646 return (KERN_RESOURCE_SHORTAGE);
647
648 if (new_thread != first_thread)
649 *new_thread = thread_template;
650
651 #ifdef MACH_BSD
652 new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
653 if (new_thread->uthread == NULL) {
654 zfree(thread_zone, new_thread);
655 return (KERN_RESOURCE_SHORTAGE);
656 }
657 #endif /* MACH_BSD */
658
659 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
660 #ifdef MACH_BSD
661 void *ut = new_thread->uthread;
662
663 new_thread->uthread = NULL;
664 /* cred free may not be necessary */
665 uthread_cleanup(parent_task, ut, parent_task->bsd_info);
666 uthread_cred_free(ut);
667 uthread_zone_free(ut);
668 #endif /* MACH_BSD */
669
670 zfree(thread_zone, new_thread);
671 return (KERN_FAILURE);
672 }
673
674 new_thread->task = parent_task;
675
676 thread_lock_init(new_thread);
677 wake_lock_init(new_thread);
678
679 lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);
680
681 ipc_thread_init(new_thread);
682 queue_init(&new_thread->held_ulocks);
683
684 new_thread->continuation = continuation;
685
686 lck_mtx_lock(&tasks_threads_lock);
687 task_lock(parent_task);
688
689 if ( !parent_task->active || parent_task->halting ||
690 ((options & TH_OPTION_NOSUSP) != 0 &&
691 parent_task->suspend_count > 0) ||
692 (parent_task->thread_count >= task_threadmax &&
693 parent_task != kernel_task) ) {
694 task_unlock(parent_task);
695 lck_mtx_unlock(&tasks_threads_lock);
696
697 #ifdef MACH_BSD
698 {
699 void *ut = new_thread->uthread;
700
701 new_thread->uthread = NULL;
702 uthread_cleanup(parent_task, ut, parent_task->bsd_info);
703 /* cred free may not be necessary */
704 uthread_cred_free(ut);
705 uthread_zone_free(ut);
706 }
707 #endif /* MACH_BSD */
708 ipc_thread_disable(new_thread);
709 ipc_thread_terminate(new_thread);
710 lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
711 machine_thread_destroy(new_thread);
712 zfree(thread_zone, new_thread);
713 return (KERN_FAILURE);
714 }
715
716 /* New threads inherit any default state on the task */
717 machine_thread_inherit_taskwide(new_thread, parent_task);
718
719 task_reference_internal(parent_task);
720
721 /* Cache the task's map */
722 new_thread->map = parent_task->map;
723
724 /* Chain the thread onto the task's list */
725 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
726 parent_task->thread_count++;
727
728 /* So terminating threads don't need to take the task lock to decrement */
729 hw_atomic_add(&parent_task->active_thread_count, 1);
730
731 /* Protected by the tasks_threads_lock */
732 new_thread->thread_id = ++thread_unique_id;
733
734 queue_enter(&threads, new_thread, thread_t, threads);
735 threads_count++;
736
737 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
738 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
739
740 #if CONFIG_COUNTERS
741 /*
742 * If parent task has any reservations, they need to be propagated to this
743 * thread.
744 */
745 new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ?
746 THREAD_PMC_FLAG : 0U;
747 #endif
748
749 /* Set the thread's scheduling parameters */
750 new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
751 new_thread->sched_flags = 0;
752 new_thread->max_priority = parent_task->max_priority;
753 new_thread->task_priority = parent_task->priority;
754 new_thread->priority = (priority < 0)? parent_task->priority: priority;
755 if (new_thread->priority > new_thread->max_priority)
756 new_thread->priority = new_thread->max_priority;
757 #if CONFIG_EMBEDDED
758 if (new_thread->priority < MAXPRI_THROTTLE) {
759 new_thread->priority = MAXPRI_THROTTLE;
760 }
761 #endif /* CONFIG_EMBEDDED */
762 new_thread->importance =
763 new_thread->priority - new_thread->task_priority;
764 #if defined(CONFIG_SCHED_TRADITIONAL)
765 new_thread->sched_stamp = sched_tick;
766 new_thread->pri_shift = sched_pri_shift;
767 #endif
768 SCHED(compute_priority)(new_thread, FALSE);
769
770 new_thread->active = TRUE;
771
772 *out_thread = new_thread;
773
774 {
775 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
776
777 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
778
779 KERNEL_DEBUG_CONSTANT(
780 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
781 (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);
782
783 kdbg_trace_string(parent_task->bsd_info,
784 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
785
786 KERNEL_DEBUG_CONSTANT(
787 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
788 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
789 }
790
791 DTRACE_PROC1(lwp__create, thread_t, *out_thread);
792
793 return (KERN_SUCCESS);
794 }
795
796 static kern_return_t
797 thread_create_internal2(
798 task_t task,
799 thread_t *new_thread,
800 boolean_t from_user)
801 {
802 kern_return_t result;
803 thread_t thread;
804
805 if (task == TASK_NULL || task == kernel_task)
806 return (KERN_INVALID_ARGUMENT);
807
808 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
809 if (result != KERN_SUCCESS)
810 return (result);
811
812 thread->user_stop_count = 1;
813 thread_hold(thread);
814 if (task->suspend_count > 0)
815 thread_hold(thread);
816
817 if (from_user)
818 extmod_statistics_incr_thread_create(task);
819
820 task_unlock(task);
821 lck_mtx_unlock(&tasks_threads_lock);
822
823 *new_thread = thread;
824
825 return (KERN_SUCCESS);
826 }
827
828 /* No prototype, since task_server.h has the _from_user version if KERNEL_SERVER */
829 kern_return_t
830 thread_create(
831 task_t task,
832 thread_t *new_thread);
833
834 kern_return_t
835 thread_create(
836 task_t task,
837 thread_t *new_thread)
838 {
839 return thread_create_internal2(task, new_thread, FALSE);
840 }
841
842 kern_return_t
843 thread_create_from_user(
844 task_t task,
845 thread_t *new_thread)
846 {
847 return thread_create_internal2(task, new_thread, TRUE);
848 }
849
850 static kern_return_t
851 thread_create_running_internal2(
852 register task_t task,
853 int flavor,
854 thread_state_t new_state,
855 mach_msg_type_number_t new_state_count,
856 thread_t *new_thread,
857 boolean_t from_user)
858 {
859 register kern_return_t result;
860 thread_t thread;
861
862 if (task == TASK_NULL || task == kernel_task)
863 return (KERN_INVALID_ARGUMENT);
864
865 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
866 if (result != KERN_SUCCESS)
867 return (result);
868
869 result = machine_thread_set_state(
870 thread, flavor, new_state, new_state_count);
871 if (result != KERN_SUCCESS) {
872 task_unlock(task);
873 lck_mtx_unlock(&tasks_threads_lock);
874
875 thread_terminate(thread);
876 thread_deallocate(thread);
877 return (result);
878 }
879
880 thread_mtx_lock(thread);
881 thread_start_internal(thread);
882 thread_mtx_unlock(thread);
883
884 if (from_user)
885 extmod_statistics_incr_thread_create(task);
886
887 task_unlock(task);
888 lck_mtx_unlock(&tasks_threads_lock);
889
890 *new_thread = thread;
891
892 return (result);
893 }
894
895 /* Prototype, see justification above */
896 kern_return_t
897 thread_create_running(
898 register task_t task,
899 int flavor,
900 thread_state_t new_state,
901 mach_msg_type_number_t new_state_count,
902 thread_t *new_thread);
903
904 kern_return_t
905 thread_create_running(
906 register task_t task,
907 int flavor,
908 thread_state_t new_state,
909 mach_msg_type_number_t new_state_count,
910 thread_t *new_thread)
911 {
912 return thread_create_running_internal2(
913 task, flavor, new_state, new_state_count,
914 new_thread, FALSE);
915 }
916
917 kern_return_t
918 thread_create_running_from_user(
919 register task_t task,
920 int flavor,
921 thread_state_t new_state,
922 mach_msg_type_number_t new_state_count,
923 thread_t *new_thread)
924 {
925 return thread_create_running_internal2(
926 task, flavor, new_state, new_state_count,
927 new_thread, TRUE);
928 }
929
930 kern_return_t
931 thread_create_workq(
932 task_t task,
933 thread_continue_t thread_return,
934 thread_t *new_thread)
935 {
936 kern_return_t result;
937 thread_t thread;
938
939 if (task == TASK_NULL || task == kernel_task)
940 return (KERN_INVALID_ARGUMENT);
941
942 result = thread_create_internal(task, -1, thread_return, TH_OPTION_NOCRED | TH_OPTION_NOSUSP, &thread);
943 if (result != KERN_SUCCESS)
944 return (result);
945
946 thread->user_stop_count = 1;
947 thread_hold(thread);
948 if (task->suspend_count > 0)
949 thread_hold(thread);
950
951 task_unlock(task);
952 lck_mtx_unlock(&tasks_threads_lock);
953
954 *new_thread = thread;
955
956 return (KERN_SUCCESS);
957 }
958
959 /*
960 * kernel_thread_create:
961 *
962 * Create a thread in the kernel task
963 * to execute in kernel context.
964 */
965 kern_return_t
966 kernel_thread_create(
967 thread_continue_t continuation,
968 void *parameter,
969 integer_t priority,
970 thread_t *new_thread)
971 {
972 kern_return_t result;
973 thread_t thread;
974 task_t task = kernel_task;
975
976 result = thread_create_internal(task, priority, continuation, TH_OPTION_NONE, &thread);
977 if (result != KERN_SUCCESS)
978 return (result);
979
980 task_unlock(task);
981 lck_mtx_unlock(&tasks_threads_lock);
982
983 stack_alloc(thread);
984 assert(thread->kernel_stack != 0);
985 #if CONFIG_EMBEDDED
986 if (priority > BASEPRI_KERNEL)
987 #endif
988 thread->reserved_stack = thread->kernel_stack;
989
990 thread->parameter = parameter;
991
992 if(debug_task & 1)
993 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
994 *new_thread = thread;
995
996 return (result);
997 }
998
999 kern_return_t
1000 kernel_thread_start_priority(
1001 thread_continue_t continuation,
1002 void *parameter,
1003 integer_t priority,
1004 thread_t *new_thread)
1005 {
1006 kern_return_t result;
1007 thread_t thread;
1008
1009 result = kernel_thread_create(continuation, parameter, priority, &thread);
1010 if (result != KERN_SUCCESS)
1011 return (result);
1012
1013 *new_thread = thread;
1014
1015 thread_mtx_lock(thread);
1016 thread_start_internal(thread);
1017 thread_mtx_unlock(thread);
1018
1019 return (result);
1020 }
1021
1022 kern_return_t
1023 kernel_thread_start(
1024 thread_continue_t continuation,
1025 void *parameter,
1026 thread_t *new_thread)
1027 {
1028 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1029 }
1030
1031 #ifndef __LP64__
1032
1033 thread_t
1034 kernel_thread(
1035 task_t task,
1036 void (*start)(void))
1037 {
1038 kern_return_t result;
1039 thread_t thread;
1040
1041 if (task != kernel_task)
1042 panic("kernel_thread");
1043
1044 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
1045 if (result != KERN_SUCCESS)
1046 return (THREAD_NULL);
1047
1048 thread_deallocate(thread);
1049
1050 return (thread);
1051 }
1052
1053 #endif /* __LP64__ */
1054
1055 kern_return_t
1056 thread_info_internal(
1057 register thread_t thread,
1058 thread_flavor_t flavor,
1059 thread_info_t thread_info_out, /* ptr to OUT array */
1060 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1061 {
1062 int state, flags;
1063 spl_t s;
1064
1065 if (thread == THREAD_NULL)
1066 return (KERN_INVALID_ARGUMENT);
1067
1068 if (flavor == THREAD_BASIC_INFO) {
1069 register thread_basic_info_t basic_info;
1070
1071 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
1072 return (KERN_INVALID_ARGUMENT);
1073
1074 basic_info = (thread_basic_info_t) thread_info_out;
1075
1076 s = splsched();
1077 thread_lock(thread);
1078
1079 /* fill in info */
1080
1081 thread_read_times(thread, &basic_info->user_time,
1082 &basic_info->system_time);
1083
1084 /*
1085 * Update lazy-evaluated scheduler info because someone wants it.
1086 */
1087 if (SCHED(can_update_priority)(thread))
1088 SCHED(update_priority)(thread);
1089
1090 basic_info->sleep_time = 0;
1091
1092 /*
1093 * To calculate cpu_usage, first correct for timer rate,
1094 * then for 5/8 ageing. The correction factor [3/5] is
1095 * (1/(5/8) - 1).
1096 */
1097 basic_info->cpu_usage = 0;
1098 #if defined(CONFIG_SCHED_TRADITIONAL)
1099 if (sched_tick_interval) {
1100 basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage
1101 * TH_USAGE_SCALE) / sched_tick_interval);
1102 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1103 }
1104 #endif
1105
1106 if (basic_info->cpu_usage > TH_USAGE_SCALE)
1107 basic_info->cpu_usage = TH_USAGE_SCALE;
1108
1109 basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
1110 POLICY_TIMESHARE: POLICY_RR);
1111
1112 flags = 0;
1113 if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread)
1114 flags |= TH_FLAGS_IDLE;
1115
1116 if (!thread->kernel_stack)
1117 flags |= TH_FLAGS_SWAPPED;
1118
1119 state = 0;
1120 if (thread->state & TH_TERMINATE)
1121 state = TH_STATE_HALTED;
1122 else
1123 if (thread->state & TH_RUN)
1124 state = TH_STATE_RUNNING;
1125 else
1126 if (thread->state & TH_UNINT)
1127 state = TH_STATE_UNINTERRUPTIBLE;
1128 else
1129 if (thread->state & TH_SUSP)
1130 state = TH_STATE_STOPPED;
1131 else
1132 if (thread->state & TH_WAIT)
1133 state = TH_STATE_WAITING;
1134
1135 basic_info->run_state = state;
1136 basic_info->flags = flags;
1137
1138 basic_info->suspend_count = thread->user_stop_count;
1139
1140 thread_unlock(thread);
1141 splx(s);
1142
1143 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1144
1145 return (KERN_SUCCESS);
1146 }
1147 else
1148 if (flavor == THREAD_IDENTIFIER_INFO) {
1149 register thread_identifier_info_t identifier_info;
1150
1151 if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT)
1152 return (KERN_INVALID_ARGUMENT);
1153
1154 identifier_info = (thread_identifier_info_t) thread_info_out;
1155
1156 s = splsched();
1157 thread_lock(thread);
1158
1159 identifier_info->thread_id = thread->thread_id;
1160 identifier_info->thread_handle = thread->machine.cthread_self;
1161 if(thread->task->bsd_info) {
1162 identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
1163 } else {
1164 thread_unlock(thread);
1165 splx(s);
1166 return KERN_INVALID_ARGUMENT;
1167 }
1168
1169 thread_unlock(thread);
1170 splx(s);
1171 return KERN_SUCCESS;
1172 }
1173 else
1174 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1175 policy_timeshare_info_t ts_info;
1176
1177 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1178 return (KERN_INVALID_ARGUMENT);
1179
1180 ts_info = (policy_timeshare_info_t)thread_info_out;
1181
1182 s = splsched();
1183 thread_lock(thread);
1184
1185 if (thread->sched_mode != TH_MODE_TIMESHARE) {
1186 thread_unlock(thread);
1187 splx(s);
1188
1189 return (KERN_INVALID_POLICY);
1190 }
1191
1192 ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
1193 if (ts_info->depressed) {
1194 ts_info->base_priority = DEPRESSPRI;
1195 ts_info->depress_priority = thread->priority;
1196 }
1197 else {
1198 ts_info->base_priority = thread->priority;
1199 ts_info->depress_priority = -1;
1200 }
1201
1202 ts_info->cur_priority = thread->sched_pri;
1203 ts_info->max_priority = thread->max_priority;
1204
1205 thread_unlock(thread);
1206 splx(s);
1207
1208 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1209
1210 return (KERN_SUCCESS);
1211 }
1212 else
1213 if (flavor == THREAD_SCHED_FIFO_INFO) {
1214 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1215 return (KERN_INVALID_ARGUMENT);
1216
1217 return (KERN_INVALID_POLICY);
1218 }
1219 else
1220 if (flavor == THREAD_SCHED_RR_INFO) {
1221 policy_rr_info_t rr_info;
1222 uint32_t quantum_time;
1223 uint64_t quantum_ns;
1224
1225 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1226 return (KERN_INVALID_ARGUMENT);
1227
1228 rr_info = (policy_rr_info_t) thread_info_out;
1229
1230 s = splsched();
1231 thread_lock(thread);
1232
1233 if (thread->sched_mode == TH_MODE_TIMESHARE) {
1234 thread_unlock(thread);
1235 splx(s);
1236
1237 return (KERN_INVALID_POLICY);
1238 }
1239
1240 rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
1241 if (rr_info->depressed) {
1242 rr_info->base_priority = DEPRESSPRI;
1243 rr_info->depress_priority = thread->priority;
1244 }
1245 else {
1246 rr_info->base_priority = thread->priority;
1247 rr_info->depress_priority = -1;
1248 }
1249
1250 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
1251 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
1252
1253 rr_info->max_priority = thread->max_priority;
1254 rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1255
1256 thread_unlock(thread);
1257 splx(s);
1258
1259 *thread_info_count = POLICY_RR_INFO_COUNT;
1260
1261 return (KERN_SUCCESS);
1262 }
1263
1264 return (KERN_INVALID_ARGUMENT);
1265 }
1266
1267 void
1268 thread_read_times(
1269 thread_t thread,
1270 time_value_t *user_time,
1271 time_value_t *system_time)
1272 {
1273 clock_sec_t secs;
1274 clock_usec_t usecs;
1275
1276 absolutetime_to_microtime(timer_grab(&thread->user_timer), &secs, &usecs);
1277 user_time->seconds = (typeof(user_time->seconds))secs;
1278 user_time->microseconds = usecs;
1279
1280 absolutetime_to_microtime(timer_grab(&thread->system_timer), &secs, &usecs);
1281 system_time->seconds = (typeof(system_time->seconds))secs;
1282 system_time->microseconds = usecs;
1283 }
1284
1285 kern_return_t
1286 thread_assign(
1287 __unused thread_t thread,
1288 __unused processor_set_t new_pset)
1289 {
1290 return (KERN_FAILURE);
1291 }
1292
1293 /*
1294 * thread_assign_default:
1295 *
1296 * Special version of thread_assign for assigning threads to default
1297 * processor set.
1298 */
1299 kern_return_t
1300 thread_assign_default(
1301 thread_t thread)
1302 {
1303 return (thread_assign(thread, &pset0));
1304 }
1305
1306 /*
1307 * thread_get_assignment
1308 *
1309 * Return current assignment for this thread.
1310 */
1311 kern_return_t
1312 thread_get_assignment(
1313 thread_t thread,
1314 processor_set_t *pset)
1315 {
1316 if (thread == NULL)
1317 return (KERN_INVALID_ARGUMENT);
1318
1319 *pset = &pset0;
1320
1321 return (KERN_SUCCESS);
1322 }
1323
1324 /*
1325 * thread_wire_internal:
1326 *
1327 * Specify that the target thread must always be able
1328 * to run and to allocate memory.
1329 */
1330 kern_return_t
1331 thread_wire_internal(
1332 host_priv_t host_priv,
1333 thread_t thread,
1334 boolean_t wired,
1335 boolean_t *prev_state)
1336 {
1337 if (host_priv == NULL || thread != current_thread())
1338 return (KERN_INVALID_ARGUMENT);
1339
1340 assert(host_priv == &realhost);
1341
1342 if (prev_state)
1343 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
1344
1345 if (wired) {
1346 if (!(thread->options & TH_OPT_VMPRIV))
1347 vm_page_free_reserve(1); /* XXX */
1348 thread->options |= TH_OPT_VMPRIV;
1349 }
1350 else {
1351 if (thread->options & TH_OPT_VMPRIV)
1352 vm_page_free_reserve(-1); /* XXX */
1353 thread->options &= ~TH_OPT_VMPRIV;
1354 }
1355
1356 return (KERN_SUCCESS);
1357 }
1358
1359
1360 /*
1361 * thread_wire:
1362 *
1363 * User-api wrapper for thread_wire_internal()
1364 */
1365 kern_return_t
1366 thread_wire(
1367 host_priv_t host_priv,
1368 thread_t thread,
1369 boolean_t wired)
1370 {
1371 return (thread_wire_internal(host_priv, thread, wired, NULL));
1372 }
1373
1374 int split_funnel_off = 0;
1375 lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1376 lck_grp_attr_t *funnel_lck_grp_attr;
1377 lck_attr_t *funnel_lck_attr;
1378
1379 funnel_t *
1380 funnel_alloc(
1381 int type)
1382 {
1383 lck_mtx_t *m;
1384 funnel_t *fnl;
1385
1386 if (funnel_lck_grp == LCK_GRP_NULL) {
1387 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1388
1389 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1390
1391 funnel_lck_attr = lck_attr_alloc_init();
1392 }
1393 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1394 bzero((void *)fnl, sizeof(funnel_t));
1395 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1396 kfree(fnl, sizeof(funnel_t));
1397 return(THR_FUNNEL_NULL);
1398 }
1399 fnl->fnl_mutex = m;
1400 fnl->fnl_type = type;
1401 }
1402 return(fnl);
1403 }
1404
1405 void
1406 funnel_free(
1407 funnel_t * fnl)
1408 {
1409 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1410 if (fnl->fnl_oldmutex)
1411 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1412 kfree(fnl, sizeof(funnel_t));
1413 }
1414
1415 void
1416 funnel_lock(
1417 funnel_t * fnl)
1418 {
1419 lck_mtx_lock(fnl->fnl_mutex);
1420 fnl->fnl_mtxholder = current_thread();
1421 }
1422
1423 void
1424 funnel_unlock(
1425 funnel_t * fnl)
1426 {
1427 lck_mtx_unlock(fnl->fnl_mutex);
1428 fnl->fnl_mtxholder = NULL;
1429 fnl->fnl_mtxrelease = current_thread();
1430 }
1431
1432 funnel_t *
1433 thread_funnel_get(
1434 void)
1435 {
1436 thread_t th = current_thread();
1437
1438 if (th->funnel_state & TH_FN_OWNED) {
1439 return(th->funnel_lock);
1440 }
1441 return(THR_FUNNEL_NULL);
1442 }
1443
1444 boolean_t
1445 thread_funnel_set(
1446 funnel_t * fnl,
1447 boolean_t funneled)
1448 {
1449 thread_t cur_thread;
1450 boolean_t funnel_state_prev;
1451 boolean_t intr;
1452
1453 cur_thread = current_thread();
1454 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1455
1456 if (funnel_state_prev != funneled) {
1457 intr = ml_set_interrupts_enabled(FALSE);
1458
1459 if (funneled == TRUE) {
1460 if (cur_thread->funnel_lock)
1461 panic("Funnel lock called when holding one %p", cur_thread->funnel_lock);
1462 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1463 fnl, 1, 0, 0, 0);
1464 funnel_lock(fnl);
1465 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1466 fnl, 1, 0, 0, 0);
1467 cur_thread->funnel_state |= TH_FN_OWNED;
1468 cur_thread->funnel_lock = fnl;
1469 } else {
1470 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1471 panic("Funnel unlock when not holding funnel");
1472 cur_thread->funnel_state &= ~TH_FN_OWNED;
1473 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1474 fnl, 1, 0, 0, 0);
1475
1476 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1477 funnel_unlock(fnl);
1478 }
1479 (void)ml_set_interrupts_enabled(intr);
1480 } else {
1481 /* if we are trying to acquire funnel recursively
1482 * check for funnel to be held already
1483 */
1484 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1485 panic("thread_funnel_set: already holding a different funnel");
1486 }
1487 }
1488 return(funnel_state_prev);
1489 }
1490
1491 static void
1492 sched_call_null(
1493 __unused int type,
1494 __unused thread_t thread)
1495 {
1496 return;
1497 }
1498
1499 void
1500 thread_sched_call(
1501 thread_t thread,
1502 sched_call_t call)
1503 {
1504 thread->sched_call = (call != NULL)? call: sched_call_null;
1505 }
1506
1507 void
1508 thread_static_param(
1509 thread_t thread,
1510 boolean_t state)
1511 {
1512 thread_mtx_lock(thread);
1513 thread->static_param = state;
1514 thread_mtx_unlock(thread);
1515 }
1516
1517 uint64_t
1518 thread_tid(
1519 thread_t thread)
1520 {
1521 return (thread != THREAD_NULL? thread->thread_id: 0);
1522 }
1523
1524 uint64_t
1525 thread_dispatchqaddr(
1526 thread_t thread)
1527 {
1528 uint64_t dispatchqueue_addr = 0;
1529 uint64_t thread_handle = 0;
1530
1531 if (thread != THREAD_NULL) {
1532 thread_handle = thread->machine.cthread_self;
1533
1534 if (thread->task->bsd_info)
1535 dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
1536 }
1537
1538 return (dispatchqueue_addr);
1539 }
1540
1541 /*
1542 * Export routines to other components for things that are done as macros
1543 * within the osfmk component.
1544 */
1545
1546 #undef thread_reference
1547 void thread_reference(thread_t thread);
1548 void
1549 thread_reference(
1550 thread_t thread)
1551 {
1552 if (thread != THREAD_NULL)
1553 thread_reference_internal(thread);
1554 }
1555
1556 #undef thread_should_halt
1557
1558 boolean_t
1559 thread_should_halt(
1560 thread_t th)
1561 {
1562 return (thread_should_halt_fast(th));
1563 }
1564
1565 #if CONFIG_DTRACE
1566 uint32_t dtrace_get_thread_predcache(thread_t thread)
1567 {
1568 if (thread != THREAD_NULL)
1569 return thread->t_dtrace_predcache;
1570 else
1571 return 0;
1572 }
1573
1574 int64_t dtrace_get_thread_vtime(thread_t thread)
1575 {
1576 if (thread != THREAD_NULL)
1577 return thread->t_dtrace_vtime;
1578 else
1579 return 0;
1580 }
1581
1582 int64_t dtrace_get_thread_tracing(thread_t thread)
1583 {
1584 if (thread != THREAD_NULL)
1585 return thread->t_dtrace_tracing;
1586 else
1587 return 0;
1588 }
1589
1590 boolean_t dtrace_get_thread_reentering(thread_t thread)
1591 {
1592 if (thread != THREAD_NULL)
1593 return (thread->options & TH_OPT_DTRACE) ? TRUE : FALSE;
1594 else
1595 return 0;
1596 }
1597
1598 vm_offset_t dtrace_get_kernel_stack(thread_t thread)
1599 {
1600 if (thread != THREAD_NULL)
1601 return thread->kernel_stack;
1602 else
1603 return 0;
1604 }
1605
1606 int64_t dtrace_calc_thread_recent_vtime(thread_t thread)
1607 {
1608 #if STAT_TIME
1609 if (thread != THREAD_NULL) {
1610 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer));
1611 } else
1612 return 0;
1613 #else
1614 if (thread != THREAD_NULL) {
1615 processor_t processor = current_processor();
1616 uint64_t abstime = mach_absolute_time();
1617 timer_t timer;
1618
1619 timer = PROCESSOR_DATA(processor, thread_timer);
1620
1621 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
1622 (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
1623 } else
1624 return 0;
1625 #endif
1626 }
1627
1628 void dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
1629 {
1630 if (thread != THREAD_NULL)
1631 thread->t_dtrace_predcache = predcache;
1632 }
1633
1634 void dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
1635 {
1636 if (thread != THREAD_NULL)
1637 thread->t_dtrace_vtime = vtime;
1638 }
1639
1640 void dtrace_set_thread_tracing(thread_t thread, int64_t accum)
1641 {
1642 if (thread != THREAD_NULL)
1643 thread->t_dtrace_tracing = accum;
1644 }
1645
1646 void dtrace_set_thread_reentering(thread_t thread, boolean_t vbool)
1647 {
1648 if (thread != THREAD_NULL) {
1649 if (vbool)
1650 thread->options |= TH_OPT_DTRACE;
1651 else
1652 thread->options &= (~TH_OPT_DTRACE);
1653 }
1654 }
1655
1656 vm_offset_t dtrace_set_thread_recover(thread_t thread, vm_offset_t recover)
1657 {
1658 vm_offset_t prev = 0;
1659
1660 if (thread != THREAD_NULL) {
1661 prev = thread->recover;
1662 thread->recover = recover;
1663 }
1664 return prev;
1665 }
1666
1667 void dtrace_thread_bootstrap(void)
1668 {
1669 task_t task = current_task();
1670 if(task->thread_count == 1) {
1671 DTRACE_PROC(start);
1672 }
1673 DTRACE_PROC(lwp__start);
1674
1675 }
1676 #endif /* CONFIG_DTRACE */