]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
9e4360a73afc22a76a46465745d277bc1a18d5dd
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach/mach_types.h>
85 #include <mach/boolean.h>
86 #include <mach/policy.h>
87 #include <mach/thread_info.h>
88 #include <mach/thread_special_ports.h>
89 #include <mach/thread_status.h>
90 #include <mach/time_value.h>
91 #include <mach/vm_param.h>
92
93 #include <machine/thread.h>
94
95 #include <kern/kern_types.h>
96 #include <kern/kalloc.h>
97 #include <kern/cpu_data.h>
98 #include <kern/counters.h>
99 #include <kern/ipc_mig.h>
100 #include <kern/ipc_tt.h>
101 #include <kern/mach_param.h>
102 #include <kern/machine.h>
103 #include <kern/misc_protos.h>
104 #include <kern/processor.h>
105 #include <kern/queue.h>
106 #include <kern/sched.h>
107 #include <kern/sched_prim.h>
108 #include <kern/sync_lock.h>
109 #include <kern/syscall_subr.h>
110 #include <kern/task.h>
111 #include <kern/thread.h>
112 #include <kern/host.h>
113 #include <kern/zalloc.h>
114 #include <kern/assert.h>
115
116 #include <ipc/ipc_kmsg.h>
117 #include <ipc/ipc_port.h>
118
119 #include <vm/vm_kern.h>
120 #include <vm/vm_pageout.h>
121
122 #include <sys/kdebug.h>
123
124 #include <mach/sdt.h>
125
126 /*
127 * Exported interfaces
128 */
129 #include <mach/task_server.h>
130 #include <mach/thread_act_server.h>
131 #include <mach/mach_host_server.h>
132 #include <mach/host_priv_server.h>
133
134 static struct zone *thread_zone;
135
136 decl_simple_lock_data(static,thread_stack_lock)
137 static queue_head_t thread_stack_queue;
138
139 decl_simple_lock_data(static,thread_terminate_lock)
140 static queue_head_t thread_terminate_queue;
141
142 static struct thread thread_template, init_thread;
143
144 static void sched_call_null(
145 int type,
146 thread_t thread);
147 #ifdef MACH_BSD
148 extern void proc_exit(void *);
149 #endif /* MACH_BSD */
150 extern int debug_task;
151
152 void
153 thread_bootstrap(void)
154 {
155 /*
156 * Fill in a template thread for fast initialization.
157 */
158
159 thread_template.runq = PROCESSOR_NULL;
160
161 thread_template.ref_count = 2;
162
163 thread_template.reason = AST_NONE;
164 thread_template.at_safe_point = FALSE;
165 thread_template.wait_event = NO_EVENT64;
166 thread_template.wait_queue = WAIT_QUEUE_NULL;
167 thread_template.wait_result = THREAD_WAITING;
168 thread_template.options = THREAD_ABORTSAFE;
169 thread_template.state = TH_WAIT | TH_UNINT;
170 thread_template.wake_active = FALSE;
171 thread_template.continuation = THREAD_CONTINUE_NULL;
172 thread_template.parameter = NULL;
173
174 thread_template.importance = 0;
175 thread_template.sched_mode = 0;
176 thread_template.safe_mode = 0;
177 thread_template.safe_release = 0;
178
179 thread_template.priority = 0;
180 thread_template.sched_pri = 0;
181 thread_template.max_priority = 0;
182 thread_template.task_priority = 0;
183 thread_template.promotions = 0;
184 thread_template.pending_promoter_index = 0;
185 thread_template.pending_promoter[0] =
186 thread_template.pending_promoter[1] = NULL;
187
188 thread_template.realtime.deadline = UINT64_MAX;
189
190 thread_template.current_quantum = 0;
191
192 thread_template.computation_metered = 0;
193 thread_template.computation_epoch = 0;
194
195 thread_template.sched_stamp = 0;
196 thread_template.sched_usage = 0;
197 thread_template.pri_shift = INT8_MAX;
198 thread_template.cpu_usage = thread_template.cpu_delta = 0;
199 thread_template.c_switch = thread_template.p_switch = thread_template.ps_switch = 0;
200
201 thread_template.bound_processor = PROCESSOR_NULL;
202 thread_template.last_processor = PROCESSOR_NULL;
203 thread_template.last_switch = 0;
204
205 thread_template.sched_call = sched_call_null;
206
207 timer_init(&thread_template.user_timer);
208 timer_init(&thread_template.system_timer);
209 thread_template.user_timer_save = 0;
210 thread_template.system_timer_save = 0;
211 thread_template.vtimer_user_save = 0;
212 thread_template.vtimer_prof_save = 0;
213 thread_template.vtimer_rlim_save = 0;
214
215 thread_template.wait_timer_is_set = FALSE;
216 thread_template.wait_timer_active = 0;
217
218 thread_template.depress_timer_active = 0;
219
220 thread_template.special_handler.handler = special_handler;
221 thread_template.special_handler.next = NULL;
222
223 thread_template.funnel_lock = THR_FUNNEL_NULL;
224 thread_template.funnel_state = 0;
225 thread_template.recover = (vm_offset_t)NULL;
226
227 thread_template.map = VM_MAP_NULL;
228
229 #if CONFIG_DTRACE
230 thread_template.t_dtrace_predcache = 0;
231 thread_template.t_dtrace_vtime = 0;
232 thread_template.t_dtrace_tracing = 0;
233 #endif /* CONFIG_DTRACE */
234
235 thread_template.t_chud = 0;
236
237 thread_template.affinity_set = NULL;
238
239 init_thread = thread_template;
240 machine_set_current_thread(&init_thread);
241 }
242
243 void
244 thread_init(void)
245 {
246 thread_zone = zinit(
247 sizeof(struct thread),
248 THREAD_MAX * sizeof(struct thread),
249 THREAD_CHUNK * sizeof(struct thread),
250 "threads");
251
252 stack_init();
253
254 /*
255 * Initialize any machine-dependent
256 * per-thread structures necessary.
257 */
258 machine_thread_init();
259 }
260
261 static void
262 thread_terminate_continue(void)
263 {
264 panic("thread_terminate_continue");
265 /*NOTREACHED*/
266 }
267
268 /*
269 * thread_terminate_self:
270 */
271 void
272 thread_terminate_self(void)
273 {
274 thread_t thread = current_thread();
275 task_t task;
276 spl_t s;
277 int lastthread = 0;
278
279 thread_mtx_lock(thread);
280
281 ulock_release_all(thread);
282
283 ipc_thread_disable(thread);
284
285 thread_mtx_unlock(thread);
286
287 s = splsched();
288 thread_lock(thread);
289
290 /*
291 * Cancel priority depression, wait for concurrent expirations
292 * on other processors.
293 */
294 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
295 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
296
297 if (timer_call_cancel(&thread->depress_timer))
298 thread->depress_timer_active--;
299 }
300
301 while (thread->depress_timer_active > 0) {
302 thread_unlock(thread);
303 splx(s);
304
305 delay(1);
306
307 s = splsched();
308 thread_lock(thread);
309 }
310
311 thread_unlock(thread);
312 splx(s);
313
314 thread_policy_reset(thread);
315
316 /*
317 * If we are the last thread to terminate and the task is
318 * associated with a BSD process, perform BSD process exit.
319 */
320 task = thread->task;
321 uthread_cleanup(task, thread->uthread, task->bsd_info);
322 if (hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
323 task->bsd_info != NULL) {
324 lastthread = 1;
325 }
326
327 if (lastthread != 0)
328 proc_exit(task->bsd_info);
329
330 uthread_cred_free(thread->uthread);
331
332 s = splsched();
333 thread_lock(thread);
334
335 /*
336 * Cancel wait timer, and wait for
337 * concurrent expirations.
338 */
339 if (thread->wait_timer_is_set) {
340 thread->wait_timer_is_set = FALSE;
341
342 if (timer_call_cancel(&thread->wait_timer))
343 thread->wait_timer_active--;
344 }
345
346 while (thread->wait_timer_active > 0) {
347 thread_unlock(thread);
348 splx(s);
349
350 delay(1);
351
352 s = splsched();
353 thread_lock(thread);
354 }
355
356 /*
357 * If there is a reserved stack, release it.
358 */
359 if (thread->reserved_stack != 0) {
360 if (thread->reserved_stack != thread->kernel_stack)
361 stack_free_stack(thread->reserved_stack);
362 thread->reserved_stack = 0;
363 }
364
365 /*
366 * Mark thread as terminating, and block.
367 */
368 thread->state |= TH_TERMINATE;
369 thread_mark_wait_locked(thread, THREAD_UNINT);
370 assert(thread->promotions == 0);
371 thread_unlock(thread);
372 /* splsched */
373
374 thread_block((thread_continue_t)thread_terminate_continue);
375 /*NOTREACHED*/
376 }
377
378 void
379 thread_deallocate(
380 thread_t thread)
381 {
382 task_t task;
383
384 if (thread == THREAD_NULL)
385 return;
386
387 if (thread_deallocate_internal(thread) > 0)
388 return;
389
390 ipc_thread_terminate(thread);
391
392 task = thread->task;
393
394 #ifdef MACH_BSD
395 {
396 void *ut = thread->uthread;
397
398 thread->uthread = NULL;
399 uthread_zone_free(ut);
400 }
401 #endif /* MACH_BSD */
402
403 task_deallocate(task);
404
405 if (thread->kernel_stack != 0)
406 stack_free(thread);
407
408 machine_thread_destroy(thread);
409
410 zfree(thread_zone, thread);
411 }
412
413 /*
414 * thread_terminate_daemon:
415 *
416 * Perform final clean up for terminating threads.
417 */
418 static void
419 thread_terminate_daemon(void)
420 {
421 thread_t thread;
422 task_t task;
423
424 (void)splsched();
425 simple_lock(&thread_terminate_lock);
426
427 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
428 simple_unlock(&thread_terminate_lock);
429 (void)spllo();
430
431 task = thread->task;
432
433 task_lock(task);
434 task->total_user_time += timer_grab(&thread->user_timer);
435 task->total_system_time += timer_grab(&thread->system_timer);
436
437 task->c_switch += thread->c_switch;
438 task->p_switch += thread->p_switch;
439 task->ps_switch += thread->ps_switch;
440
441 queue_remove(&task->threads, thread, thread_t, task_threads);
442 task->thread_count--;
443 task_unlock(task);
444
445 mutex_lock(&tasks_threads_lock);
446 queue_remove(&threads, thread, thread_t, threads);
447 threads_count--;
448 mutex_unlock(&tasks_threads_lock);
449
450 thread_deallocate(thread);
451
452 (void)splsched();
453 simple_lock(&thread_terminate_lock);
454 }
455
456 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
457 simple_unlock(&thread_terminate_lock);
458 /* splsched */
459
460 thread_block((thread_continue_t)thread_terminate_daemon);
461 /*NOTREACHED*/
462 }
463
464 /*
465 * thread_terminate_enqueue:
466 *
467 * Enqueue a terminating thread for final disposition.
468 *
469 * Called at splsched.
470 */
471 void
472 thread_terminate_enqueue(
473 thread_t thread)
474 {
475 simple_lock(&thread_terminate_lock);
476 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
477 simple_unlock(&thread_terminate_lock);
478
479 thread_wakeup((event_t)&thread_terminate_queue);
480 }
481
482 /*
483 * thread_stack_daemon:
484 *
485 * Perform stack allocation as required due to
486 * invoke failures.
487 */
488 static void
489 thread_stack_daemon(void)
490 {
491 thread_t thread;
492
493 simple_lock(&thread_stack_lock);
494
495 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
496 simple_unlock(&thread_stack_lock);
497
498 stack_alloc(thread);
499
500 (void)splsched();
501 thread_lock(thread);
502 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
503 thread_unlock(thread);
504 (void)spllo();
505
506 simple_lock(&thread_stack_lock);
507 }
508
509 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
510 simple_unlock(&thread_stack_lock);
511
512 thread_block((thread_continue_t)thread_stack_daemon);
513 /*NOTREACHED*/
514 }
515
516 /*
517 * thread_stack_enqueue:
518 *
519 * Enqueue a thread for stack allocation.
520 *
521 * Called at splsched.
522 */
523 void
524 thread_stack_enqueue(
525 thread_t thread)
526 {
527 simple_lock(&thread_stack_lock);
528 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
529 simple_unlock(&thread_stack_lock);
530
531 thread_wakeup((event_t)&thread_stack_queue);
532 }
533
534 void
535 thread_daemon_init(void)
536 {
537 kern_return_t result;
538 thread_t thread;
539
540 simple_lock_init(&thread_terminate_lock, 0);
541 queue_init(&thread_terminate_queue);
542
543 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
544 if (result != KERN_SUCCESS)
545 panic("thread_daemon_init: thread_terminate_daemon");
546
547 thread_deallocate(thread);
548
549 simple_lock_init(&thread_stack_lock, 0);
550 queue_init(&thread_stack_queue);
551
552 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
553 if (result != KERN_SUCCESS)
554 panic("thread_daemon_init: thread_stack_daemon");
555
556 thread_deallocate(thread);
557 }
558
559 /*
560 * Create a new thread.
561 * Doesn't start the thread running.
562 */
563 static kern_return_t
564 thread_create_internal(
565 task_t parent_task,
566 integer_t priority,
567 thread_continue_t continuation,
568 thread_t *out_thread)
569 {
570 thread_t new_thread;
571 static thread_t first_thread;
572
573 /*
574 * Allocate a thread and initialize static fields
575 */
576 if (first_thread == NULL)
577 new_thread = first_thread = current_thread();
578 else
579 new_thread = (thread_t)zalloc(thread_zone);
580 if (new_thread == NULL)
581 return (KERN_RESOURCE_SHORTAGE);
582
583 if (new_thread != first_thread)
584 *new_thread = thread_template;
585
586 #ifdef MACH_BSD
587 {
588 new_thread->uthread = uthread_alloc(parent_task, new_thread);
589 if (new_thread->uthread == NULL) {
590 zfree(thread_zone, new_thread);
591 return (KERN_RESOURCE_SHORTAGE);
592 }
593 }
594 #endif /* MACH_BSD */
595
596 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
597 #ifdef MACH_BSD
598 {
599 void *ut = new_thread->uthread;
600
601 new_thread->uthread = NULL;
602 /* cred free may not be necessary */
603 uthread_cleanup(parent_task, ut, parent_task->bsd_info);
604 uthread_cred_free(ut);
605 uthread_zone_free(ut);
606 }
607 #endif /* MACH_BSD */
608 zfree(thread_zone, new_thread);
609 return (KERN_FAILURE);
610 }
611
612 new_thread->task = parent_task;
613
614 thread_lock_init(new_thread);
615 wake_lock_init(new_thread);
616
617 mutex_init(&new_thread->mutex, 0);
618
619 ipc_thread_init(new_thread);
620 queue_init(&new_thread->held_ulocks);
621
622 new_thread->continuation = continuation;
623
624 mutex_lock(&tasks_threads_lock);
625 task_lock(parent_task);
626
627 if ( !parent_task->active ||
628 (parent_task->thread_count >= THREAD_MAX &&
629 parent_task != kernel_task)) {
630 task_unlock(parent_task);
631 mutex_unlock(&tasks_threads_lock);
632
633 #ifdef MACH_BSD
634 {
635 void *ut = new_thread->uthread;
636
637 new_thread->uthread = NULL;
638 uthread_cleanup(parent_task, ut, parent_task->bsd_info);
639 /* cred free may not be necessary */
640 uthread_cred_free(ut);
641 uthread_zone_free(ut);
642 }
643 #endif /* MACH_BSD */
644 ipc_thread_disable(new_thread);
645 ipc_thread_terminate(new_thread);
646 machine_thread_destroy(new_thread);
647 zfree(thread_zone, new_thread);
648 return (KERN_FAILURE);
649 }
650
651 task_reference_internal(parent_task);
652
653 /* Cache the task's map */
654 new_thread->map = parent_task->map;
655
656 /* Chain the thread onto the task's list */
657 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
658 parent_task->thread_count++;
659
660 /* So terminating threads don't need to take the task lock to decrement */
661 hw_atomic_add(&parent_task->active_thread_count, 1);
662
663 queue_enter(&threads, new_thread, thread_t, threads);
664 threads_count++;
665
666 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
667 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
668
669 /* Set the thread's scheduling parameters */
670 if (parent_task != kernel_task)
671 new_thread->sched_mode |= TH_MODE_TIMESHARE;
672 new_thread->max_priority = parent_task->max_priority;
673 new_thread->task_priority = parent_task->priority;
674 new_thread->priority = (priority < 0)? parent_task->priority: priority;
675 if (new_thread->priority > new_thread->max_priority)
676 new_thread->priority = new_thread->max_priority;
677 new_thread->importance =
678 new_thread->priority - new_thread->task_priority;
679 new_thread->sched_stamp = sched_tick;
680 new_thread->pri_shift = sched_pri_shift;
681 compute_priority(new_thread, FALSE);
682
683 new_thread->active = TRUE;
684
685 *out_thread = new_thread;
686
687 {
688 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
689
690 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
691
692 KERNEL_DEBUG_CONSTANT(
693 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
694 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
695
696 kdbg_trace_string(parent_task->bsd_info,
697 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
698
699 KERNEL_DEBUG_CONSTANT(
700 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
701 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
702 }
703
704 DTRACE_PROC1(lwp__create, thread_t, *out_thread);
705
706 return (KERN_SUCCESS);
707 }
708
709 kern_return_t
710 thread_create(
711 task_t task,
712 thread_t *new_thread)
713 {
714 kern_return_t result;
715 thread_t thread;
716
717 if (task == TASK_NULL || task == kernel_task)
718 return (KERN_INVALID_ARGUMENT);
719
720 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
721 if (result != KERN_SUCCESS)
722 return (result);
723
724 thread->user_stop_count = 1;
725 thread_hold(thread);
726 if (task->suspend_count > 0)
727 thread_hold(thread);
728
729 task_unlock(task);
730 mutex_unlock(&tasks_threads_lock);
731
732 *new_thread = thread;
733
734 return (KERN_SUCCESS);
735 }
736
737 kern_return_t
738 thread_create_running(
739 register task_t task,
740 int flavor,
741 thread_state_t new_state,
742 mach_msg_type_number_t new_state_count,
743 thread_t *new_thread)
744 {
745 register kern_return_t result;
746 thread_t thread;
747
748 if (task == TASK_NULL || task == kernel_task)
749 return (KERN_INVALID_ARGUMENT);
750
751 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
752 if (result != KERN_SUCCESS)
753 return (result);
754
755 result = machine_thread_set_state(
756 thread, flavor, new_state, new_state_count);
757 if (result != KERN_SUCCESS) {
758 task_unlock(task);
759 mutex_unlock(&tasks_threads_lock);
760
761 thread_terminate(thread);
762 thread_deallocate(thread);
763 return (result);
764 }
765
766 thread_mtx_lock(thread);
767 thread_start_internal(thread);
768 thread_mtx_unlock(thread);
769
770 task_unlock(task);
771 mutex_unlock(&tasks_threads_lock);
772
773 *new_thread = thread;
774
775 return (result);
776 }
777
778 /*
779 * kernel_thread_create:
780 *
781 * Create a thread in the kernel task
782 * to execute in kernel context.
783 */
784 kern_return_t
785 kernel_thread_create(
786 thread_continue_t continuation,
787 void *parameter,
788 integer_t priority,
789 thread_t *new_thread)
790 {
791 kern_return_t result;
792 thread_t thread;
793 task_t task = kernel_task;
794
795 result = thread_create_internal(task, priority, continuation, &thread);
796 if (result != KERN_SUCCESS)
797 return (result);
798
799 task_unlock(task);
800 mutex_unlock(&tasks_threads_lock);
801
802 stack_alloc(thread);
803 assert(thread->kernel_stack != 0);
804 #if CONFIG_EMBEDDED
805 if (priority > BASEPRI_KERNEL)
806 #endif
807 thread->reserved_stack = thread->kernel_stack;
808
809 thread->parameter = parameter;
810
811 if(debug_task & 1)
812 kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation);
813 *new_thread = thread;
814
815 return (result);
816 }
817
818 kern_return_t
819 kernel_thread_start_priority(
820 thread_continue_t continuation,
821 void *parameter,
822 integer_t priority,
823 thread_t *new_thread)
824 {
825 kern_return_t result;
826 thread_t thread;
827
828 result = kernel_thread_create(continuation, parameter, priority, &thread);
829 if (result != KERN_SUCCESS)
830 return (result);
831
832 thread_mtx_lock(thread);
833 thread_start_internal(thread);
834 thread_mtx_unlock(thread);
835
836 *new_thread = thread;
837
838 return (result);
839 }
840
841 kern_return_t
842 kernel_thread_start(
843 thread_continue_t continuation,
844 void *parameter,
845 thread_t *new_thread)
846 {
847 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
848 }
849
850 thread_t
851 kernel_thread(
852 task_t task,
853 void (*start)(void))
854 {
855 kern_return_t result;
856 thread_t thread;
857
858 if (task != kernel_task)
859 panic("kernel_thread");
860
861 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
862 if (result != KERN_SUCCESS)
863 return (THREAD_NULL);
864
865 thread_deallocate(thread);
866
867 return (thread);
868 }
869
870 kern_return_t
871 thread_info_internal(
872 register thread_t thread,
873 thread_flavor_t flavor,
874 thread_info_t thread_info_out, /* ptr to OUT array */
875 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
876 {
877 int state, flags;
878 spl_t s;
879
880 if (thread == THREAD_NULL)
881 return (KERN_INVALID_ARGUMENT);
882
883 if (flavor == THREAD_BASIC_INFO) {
884 register thread_basic_info_t basic_info;
885
886 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
887 return (KERN_INVALID_ARGUMENT);
888
889 basic_info = (thread_basic_info_t) thread_info_out;
890
891 s = splsched();
892 thread_lock(thread);
893
894 /* fill in info */
895
896 thread_read_times(thread, &basic_info->user_time,
897 &basic_info->system_time);
898
899 /*
900 * Update lazy-evaluated scheduler info because someone wants it.
901 */
902 if (thread->sched_stamp != sched_tick)
903 update_priority(thread);
904
905 basic_info->sleep_time = 0;
906
907 /*
908 * To calculate cpu_usage, first correct for timer rate,
909 * then for 5/8 ageing. The correction factor [3/5] is
910 * (1/(5/8) - 1).
911 */
912 basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
913 * TH_USAGE_SCALE) / sched_tick_interval;
914 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
915
916 if (basic_info->cpu_usage > TH_USAGE_SCALE)
917 basic_info->cpu_usage = TH_USAGE_SCALE;
918
919 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
920 POLICY_TIMESHARE: POLICY_RR);
921
922 flags = 0;
923 if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread)
924 flags |= TH_FLAGS_IDLE;
925
926 if (!thread->kernel_stack)
927 flags |= TH_FLAGS_SWAPPED;
928
929 state = 0;
930 if (thread->state & TH_TERMINATE)
931 state = TH_STATE_HALTED;
932 else
933 if (thread->state & TH_RUN)
934 state = TH_STATE_RUNNING;
935 else
936 if (thread->state & TH_UNINT)
937 state = TH_STATE_UNINTERRUPTIBLE;
938 else
939 if (thread->state & TH_SUSP)
940 state = TH_STATE_STOPPED;
941 else
942 if (thread->state & TH_WAIT)
943 state = TH_STATE_WAITING;
944
945 basic_info->run_state = state;
946 basic_info->flags = flags;
947
948 basic_info->suspend_count = thread->user_stop_count;
949
950 thread_unlock(thread);
951 splx(s);
952
953 *thread_info_count = THREAD_BASIC_INFO_COUNT;
954
955 return (KERN_SUCCESS);
956 }
957 else
958 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
959 policy_timeshare_info_t ts_info;
960
961 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
962 return (KERN_INVALID_ARGUMENT);
963
964 ts_info = (policy_timeshare_info_t)thread_info_out;
965
966 s = splsched();
967 thread_lock(thread);
968
969 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
970 thread_unlock(thread);
971 splx(s);
972
973 return (KERN_INVALID_POLICY);
974 }
975
976 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
977 if (ts_info->depressed) {
978 ts_info->base_priority = DEPRESSPRI;
979 ts_info->depress_priority = thread->priority;
980 }
981 else {
982 ts_info->base_priority = thread->priority;
983 ts_info->depress_priority = -1;
984 }
985
986 ts_info->cur_priority = thread->sched_pri;
987 ts_info->max_priority = thread->max_priority;
988
989 thread_unlock(thread);
990 splx(s);
991
992 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
993
994 return (KERN_SUCCESS);
995 }
996 else
997 if (flavor == THREAD_SCHED_FIFO_INFO) {
998 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
999 return (KERN_INVALID_ARGUMENT);
1000
1001 return (KERN_INVALID_POLICY);
1002 }
1003 else
1004 if (flavor == THREAD_SCHED_RR_INFO) {
1005 policy_rr_info_t rr_info;
1006
1007 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1008 return (KERN_INVALID_ARGUMENT);
1009
1010 rr_info = (policy_rr_info_t) thread_info_out;
1011
1012 s = splsched();
1013 thread_lock(thread);
1014
1015 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1016 thread_unlock(thread);
1017 splx(s);
1018
1019 return (KERN_INVALID_POLICY);
1020 }
1021
1022 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1023 if (rr_info->depressed) {
1024 rr_info->base_priority = DEPRESSPRI;
1025 rr_info->depress_priority = thread->priority;
1026 }
1027 else {
1028 rr_info->base_priority = thread->priority;
1029 rr_info->depress_priority = -1;
1030 }
1031
1032 rr_info->max_priority = thread->max_priority;
1033 rr_info->quantum = std_quantum_us / 1000;
1034
1035 thread_unlock(thread);
1036 splx(s);
1037
1038 *thread_info_count = POLICY_RR_INFO_COUNT;
1039
1040 return (KERN_SUCCESS);
1041 }
1042
1043 return (KERN_INVALID_ARGUMENT);
1044 }
1045
1046 void
1047 thread_read_times(
1048 thread_t thread,
1049 time_value_t *user_time,
1050 time_value_t *system_time)
1051 {
1052 absolutetime_to_microtime(timer_grab(&thread->user_timer),
1053 (unsigned *)&user_time->seconds,
1054 (unsigned *)&user_time->microseconds);
1055
1056 absolutetime_to_microtime(timer_grab(&thread->system_timer),
1057 (unsigned *)&system_time->seconds,
1058 (unsigned *)&system_time->microseconds);
1059 }
1060
1061 kern_return_t
1062 thread_assign(
1063 __unused thread_t thread,
1064 __unused processor_set_t new_pset)
1065 {
1066 return (KERN_FAILURE);
1067 }
1068
1069 /*
1070 * thread_assign_default:
1071 *
1072 * Special version of thread_assign for assigning threads to default
1073 * processor set.
1074 */
1075 kern_return_t
1076 thread_assign_default(
1077 thread_t thread)
1078 {
1079 return (thread_assign(thread, &pset0));
1080 }
1081
1082 /*
1083 * thread_get_assignment
1084 *
1085 * Return current assignment for this thread.
1086 */
1087 kern_return_t
1088 thread_get_assignment(
1089 thread_t thread,
1090 processor_set_t *pset)
1091 {
1092 if (thread == NULL)
1093 return (KERN_INVALID_ARGUMENT);
1094
1095 *pset = &pset0;
1096
1097 return (KERN_SUCCESS);
1098 }
1099
1100 /*
1101 * thread_wire_internal:
1102 *
1103 * Specify that the target thread must always be able
1104 * to run and to allocate memory.
1105 */
1106 kern_return_t
1107 thread_wire_internal(
1108 host_priv_t host_priv,
1109 thread_t thread,
1110 boolean_t wired,
1111 boolean_t *prev_state)
1112 {
1113 if (host_priv == NULL || thread != current_thread())
1114 return (KERN_INVALID_ARGUMENT);
1115
1116 assert(host_priv == &realhost);
1117
1118 if (prev_state)
1119 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
1120
1121 if (wired) {
1122 if (!(thread->options & TH_OPT_VMPRIV))
1123 vm_page_free_reserve(1); /* XXX */
1124 thread->options |= TH_OPT_VMPRIV;
1125 }
1126 else {
1127 if (thread->options & TH_OPT_VMPRIV)
1128 vm_page_free_reserve(-1); /* XXX */
1129 thread->options &= ~TH_OPT_VMPRIV;
1130 }
1131
1132 return (KERN_SUCCESS);
1133 }
1134
1135
1136 /*
1137 * thread_wire:
1138 *
1139 * User-api wrapper for thread_wire_internal()
1140 */
1141 kern_return_t
1142 thread_wire(
1143 host_priv_t host_priv,
1144 thread_t thread,
1145 boolean_t wired)
1146 {
1147 return (thread_wire_internal(host_priv, thread, wired, NULL));
1148 }
1149
1150 int split_funnel_off = 0;
1151 lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1152 lck_grp_attr_t *funnel_lck_grp_attr;
1153 lck_attr_t *funnel_lck_attr;
1154
1155 funnel_t *
1156 funnel_alloc(
1157 int type)
1158 {
1159 lck_mtx_t *m;
1160 funnel_t *fnl;
1161
1162 if (funnel_lck_grp == LCK_GRP_NULL) {
1163 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1164
1165 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1166
1167 funnel_lck_attr = lck_attr_alloc_init();
1168 }
1169 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1170 bzero((void *)fnl, sizeof(funnel_t));
1171 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1172 kfree(fnl, sizeof(funnel_t));
1173 return(THR_FUNNEL_NULL);
1174 }
1175 fnl->fnl_mutex = m;
1176 fnl->fnl_type = type;
1177 }
1178 return(fnl);
1179 }
1180
1181 void
1182 funnel_free(
1183 funnel_t * fnl)
1184 {
1185 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1186 if (fnl->fnl_oldmutex)
1187 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1188 kfree(fnl, sizeof(funnel_t));
1189 }
1190
1191 void
1192 funnel_lock(
1193 funnel_t * fnl)
1194 {
1195 lck_mtx_lock(fnl->fnl_mutex);
1196 fnl->fnl_mtxholder = current_thread();
1197 }
1198
1199 void
1200 funnel_unlock(
1201 funnel_t * fnl)
1202 {
1203 lck_mtx_unlock(fnl->fnl_mutex);
1204 fnl->fnl_mtxholder = NULL;
1205 fnl->fnl_mtxrelease = current_thread();
1206 }
1207
1208 funnel_t *
1209 thread_funnel_get(
1210 void)
1211 {
1212 thread_t th = current_thread();
1213
1214 if (th->funnel_state & TH_FN_OWNED) {
1215 return(th->funnel_lock);
1216 }
1217 return(THR_FUNNEL_NULL);
1218 }
1219
1220 boolean_t
1221 thread_funnel_set(
1222 funnel_t * fnl,
1223 boolean_t funneled)
1224 {
1225 thread_t cur_thread;
1226 boolean_t funnel_state_prev;
1227 boolean_t intr;
1228
1229 cur_thread = current_thread();
1230 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1231
1232 if (funnel_state_prev != funneled) {
1233 intr = ml_set_interrupts_enabled(FALSE);
1234
1235 if (funneled == TRUE) {
1236 if (cur_thread->funnel_lock)
1237 panic("Funnel lock called when holding one %p", cur_thread->funnel_lock);
1238 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1239 fnl, 1, 0, 0, 0);
1240 funnel_lock(fnl);
1241 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1242 fnl, 1, 0, 0, 0);
1243 cur_thread->funnel_state |= TH_FN_OWNED;
1244 cur_thread->funnel_lock = fnl;
1245 } else {
1246 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1247 panic("Funnel unlock when not holding funnel");
1248 cur_thread->funnel_state &= ~TH_FN_OWNED;
1249 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1250 fnl, 1, 0, 0, 0);
1251
1252 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1253 funnel_unlock(fnl);
1254 }
1255 (void)ml_set_interrupts_enabled(intr);
1256 } else {
1257 /* if we are trying to acquire funnel recursively
1258 * check for funnel to be held already
1259 */
1260 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1261 panic("thread_funnel_set: already holding a different funnel");
1262 }
1263 }
1264 return(funnel_state_prev);
1265 }
1266
1267 static void
1268 sched_call_null(
1269 __unused int type,
1270 __unused thread_t thread)
1271 {
1272 return;
1273 }
1274
1275 void
1276 thread_sched_call(
1277 thread_t thread,
1278 sched_call_t call)
1279 {
1280 thread->sched_call = (call != NULL)? call: sched_call_null;
1281 }
1282
1283 void
1284 thread_static_param(
1285 thread_t thread,
1286 boolean_t state)
1287 {
1288 thread_mtx_lock(thread);
1289 thread->static_param = state;
1290 thread_mtx_unlock(thread);
1291 }
1292
1293 /*
1294 * Export routines to other components for things that are done as macros
1295 * within the osfmk component.
1296 */
1297
1298 #undef thread_reference
1299 void thread_reference(thread_t thread);
1300 void
1301 thread_reference(
1302 thread_t thread)
1303 {
1304 if (thread != THREAD_NULL)
1305 thread_reference_internal(thread);
1306 }
1307
1308 #undef thread_should_halt
1309
1310 boolean_t
1311 thread_should_halt(
1312 thread_t th)
1313 {
1314 return (thread_should_halt_fast(th));
1315 }
1316
1317 #if CONFIG_DTRACE
1318 uint32_t dtrace_get_thread_predcache(thread_t thread)
1319 {
1320 if (thread != THREAD_NULL)
1321 return thread->t_dtrace_predcache;
1322 else
1323 return 0;
1324 }
1325
1326 int64_t dtrace_get_thread_vtime(thread_t thread)
1327 {
1328 if (thread != THREAD_NULL)
1329 return thread->t_dtrace_vtime;
1330 else
1331 return 0;
1332 }
1333
1334 int64_t dtrace_get_thread_tracing(thread_t thread)
1335 {
1336 if (thread != THREAD_NULL)
1337 return thread->t_dtrace_tracing;
1338 else
1339 return 0;
1340 }
1341
1342 boolean_t dtrace_get_thread_reentering(thread_t thread)
1343 {
1344 if (thread != THREAD_NULL)
1345 return (thread->options & TH_OPT_DTRACE) ? TRUE : FALSE;
1346 else
1347 return 0;
1348 }
1349
1350 vm_offset_t dtrace_get_kernel_stack(thread_t thread)
1351 {
1352 if (thread != THREAD_NULL)
1353 return thread->kernel_stack;
1354 else
1355 return 0;
1356 }
1357
1358 int64_t dtrace_calc_thread_recent_vtime(thread_t thread)
1359 {
1360 #if STAT_TIME
1361 if (thread != THREAD_NULL) {
1362 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer));
1363 } else
1364 return 0;
1365 #else
1366 if (thread != THREAD_NULL) {
1367 processor_t processor = current_processor();
1368 uint64_t abstime = mach_absolute_time();
1369 timer_t timer;
1370
1371 timer = PROCESSOR_DATA(processor, thread_timer);
1372
1373 return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
1374 (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
1375 } else
1376 return 0;
1377 #endif
1378 }
1379
1380 void dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
1381 {
1382 if (thread != THREAD_NULL)
1383 thread->t_dtrace_predcache = predcache;
1384 }
1385
1386 void dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
1387 {
1388 if (thread != THREAD_NULL)
1389 thread->t_dtrace_vtime = vtime;
1390 }
1391
1392 void dtrace_set_thread_tracing(thread_t thread, int64_t accum)
1393 {
1394 if (thread != THREAD_NULL)
1395 thread->t_dtrace_tracing = accum;
1396 }
1397
1398 void dtrace_set_thread_reentering(thread_t thread, boolean_t vbool)
1399 {
1400 if (thread != THREAD_NULL) {
1401 if (vbool)
1402 thread->options |= TH_OPT_DTRACE;
1403 else
1404 thread->options &= (~TH_OPT_DTRACE);
1405 }
1406 }
1407
1408 vm_offset_t dtrace_set_thread_recover(thread_t thread, vm_offset_t recover)
1409 {
1410 vm_offset_t prev = 0;
1411
1412 if (thread != THREAD_NULL) {
1413 prev = thread->recover;
1414 thread->recover = recover;
1415 }
1416 return prev;
1417 }
1418
1419 #endif /* CONFIG_DTRACE */