]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach_host.h>
85 #include <mach_prof.h>
86
87 #include <mach/mach_types.h>
88 #include <mach/boolean.h>
89 #include <mach/policy.h>
90 #include <mach/thread_info.h>
91 #include <mach/thread_special_ports.h>
92 #include <mach/thread_status.h>
93 #include <mach/time_value.h>
94 #include <mach/vm_param.h>
95
96 #include <machine/thread.h>
97
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/counters.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/sched.h>
110 #include <kern/sched_prim.h>
111 #include <kern/sync_lock.h>
112 #include <kern/syscall_subr.h>
113 #include <kern/task.h>
114 #include <kern/thread.h>
115 #include <kern/host.h>
116 #include <kern/zalloc.h>
117 #include <kern/profile.h>
118 #include <kern/assert.h>
119
120 #include <ipc/ipc_kmsg.h>
121 #include <ipc/ipc_port.h>
122
123 #include <vm/vm_kern.h>
124 #include <vm/vm_pageout.h>
125
126 #include <sys/kdebug.h>
127
128 /*
129 * Exported interfaces
130 */
131 #include <mach/task_server.h>
132 #include <mach/thread_act_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_priv_server.h>
135
136 static struct zone *thread_zone;
137
138 decl_simple_lock_data(static,thread_stack_lock)
139 static queue_head_t thread_stack_queue;
140
141 decl_simple_lock_data(static,thread_terminate_lock)
142 static queue_head_t thread_terminate_queue;
143
144 static struct thread thread_template, init_thread;
145
146 #ifdef MACH_BSD
147 extern void proc_exit(void *);
148 #endif /* MACH_BSD */
149
150 void
151 thread_bootstrap(void)
152 {
153 /*
154 * Fill in a template thread for fast initialization.
155 */
156
157 thread_template.runq = RUN_QUEUE_NULL;
158
159 thread_template.ref_count = 2;
160
161 thread_template.reason = AST_NONE;
162 thread_template.at_safe_point = FALSE;
163 thread_template.wait_event = NO_EVENT64;
164 thread_template.wait_queue = WAIT_QUEUE_NULL;
165 thread_template.wait_result = THREAD_WAITING;
166 thread_template.options = THREAD_ABORTSAFE;
167 thread_template.state = TH_WAIT | TH_UNINT;
168 thread_template.wake_active = FALSE;
169 thread_template.continuation = THREAD_CONTINUE_NULL;
170 thread_template.parameter = NULL;
171
172 thread_template.importance = 0;
173 thread_template.sched_mode = 0;
174 thread_template.safe_mode = 0;
175 thread_template.safe_release = 0;
176
177 thread_template.priority = 0;
178 thread_template.sched_pri = 0;
179 thread_template.max_priority = 0;
180 thread_template.task_priority = 0;
181 thread_template.promotions = 0;
182 thread_template.pending_promoter_index = 0;
183 thread_template.pending_promoter[0] =
184 thread_template.pending_promoter[1] = NULL;
185
186 thread_template.realtime.deadline = UINT64_MAX;
187
188 thread_template.current_quantum = 0;
189
190 thread_template.computation_metered = 0;
191 thread_template.computation_epoch = 0;
192
193 thread_template.sched_stamp = 0;
194 thread_template.sched_usage = 0;
195 thread_template.pri_shift = INT8_MAX;
196 thread_template.cpu_usage = thread_template.cpu_delta = 0;
197
198 thread_template.bound_processor = PROCESSOR_NULL;
199 thread_template.last_processor = PROCESSOR_NULL;
200 thread_template.last_switch = 0;
201
202 timer_init(&thread_template.user_timer);
203 timer_init(&thread_template.system_timer);
204 thread_template.user_timer_save = 0;
205 thread_template.system_timer_save = 0;
206
207 thread_template.wait_timer_is_set = FALSE;
208 thread_template.wait_timer_active = 0;
209
210 thread_template.depress_timer_active = 0;
211
212 thread_template.processor_set = PROCESSOR_SET_NULL;
213
214 thread_template.special_handler.handler = special_handler;
215 thread_template.special_handler.next = 0;
216
217 #if MACH_HOST
218 thread_template.may_assign = TRUE;
219 thread_template.assign_active = FALSE;
220 #endif /* MACH_HOST */
221 thread_template.funnel_lock = THR_FUNNEL_NULL;
222 thread_template.funnel_state = 0;
223 thread_template.recover = (vm_offset_t)NULL;
224
225 init_thread = thread_template;
226 machine_set_current_thread(&init_thread);
227 }
228
229 void
230 thread_init(void)
231 {
232 thread_zone = zinit(
233 sizeof(struct thread),
234 THREAD_MAX * sizeof(struct thread),
235 THREAD_CHUNK * sizeof(struct thread),
236 "threads");
237
238 stack_init();
239
240 /*
241 * Initialize any machine-dependent
242 * per-thread structures necessary.
243 */
244 machine_thread_init();
245 }
246
247 static void
248 thread_terminate_continue(void)
249 {
250 panic("thread_terminate_continue");
251 /*NOTREACHED*/
252 }
253
254 /*
255 * thread_terminate_self:
256 */
257 void
258 thread_terminate_self(void)
259 {
260 thread_t thread = current_thread();
261 task_t task;
262 spl_t s;
263
264 s = splsched();
265 thread_lock(thread);
266
267 /*
268 * Cancel priority depression, reset scheduling parameters,
269 * and wait for concurrent expirations on other processors.
270 */
271 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
272 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
273
274 if (timer_call_cancel(&thread->depress_timer))
275 thread->depress_timer_active--;
276 }
277
278 thread_policy_reset(thread);
279
280 while (thread->depress_timer_active > 0) {
281 thread_unlock(thread);
282 splx(s);
283
284 delay(1);
285
286 s = splsched();
287 thread_lock(thread);
288 }
289
290 thread_unlock(thread);
291 splx(s);
292
293 thread_mtx_lock(thread);
294
295 ulock_release_all(thread);
296
297 ipc_thread_disable(thread);
298
299 thread_mtx_unlock(thread);
300
301 /*
302 * If we are the last thread to terminate and the task is
303 * associated with a BSD process, perform BSD process exit.
304 */
305 task = thread->task;
306 if ( hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
307 task->bsd_info != NULL )
308 proc_exit(task->bsd_info);
309
310 s = splsched();
311 thread_lock(thread);
312
313 /*
314 * Cancel wait timer, and wait for
315 * concurrent expirations.
316 */
317 if (thread->wait_timer_is_set) {
318 thread->wait_timer_is_set = FALSE;
319
320 if (timer_call_cancel(&thread->wait_timer))
321 thread->wait_timer_active--;
322 }
323
324 while (thread->wait_timer_active > 0) {
325 thread_unlock(thread);
326 splx(s);
327
328 delay(1);
329
330 s = splsched();
331 thread_lock(thread);
332 }
333
334 /*
335 * If there is a reserved stack, release it.
336 */
337 if (thread->reserved_stack != 0) {
338 if (thread->reserved_stack != thread->kernel_stack)
339 stack_free_stack(thread->reserved_stack);
340 thread->reserved_stack = 0;
341 }
342
343 /*
344 * Mark thread as terminating, and block.
345 */
346 thread->state |= TH_TERMINATE;
347 thread_mark_wait_locked(thread, THREAD_UNINT);
348 assert(thread->promotions == 0);
349 thread_unlock(thread);
350 /* splsched */
351
352 thread_block((thread_continue_t)thread_terminate_continue);
353 /*NOTREACHED*/
354 }
355
356 void
357 thread_deallocate(
358 thread_t thread)
359 {
360 processor_set_t pset;
361 task_t task;
362
363 if (thread == THREAD_NULL)
364 return;
365
366 if (thread_deallocate_internal(thread) > 0)
367 return;
368
369 ipc_thread_terminate(thread);
370
371 task = thread->task;
372
373 #ifdef MACH_BSD
374 {
375 void *ut = thread->uthread;
376
377 thread->uthread = NULL;
378 uthread_free(task, ut, task->bsd_info);
379 }
380 #endif /* MACH_BSD */
381
382 task_deallocate(task);
383
384 pset = thread->processor_set;
385 pset_deallocate(pset);
386
387 if (thread->kernel_stack != 0)
388 stack_free(thread);
389
390 machine_thread_destroy(thread);
391
392 zfree(thread_zone, thread);
393 }
394
395 /*
396 * thread_terminate_daemon:
397 *
398 * Perform final clean up for terminating threads.
399 */
400 static void
401 thread_terminate_daemon(void)
402 {
403 thread_t thread;
404 task_t task;
405 processor_set_t pset;
406
407 (void)splsched();
408 simple_lock(&thread_terminate_lock);
409
410 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
411 simple_unlock(&thread_terminate_lock);
412 (void)spllo();
413
414 task = thread->task;
415
416 task_lock(task);
417 task->total_user_time += timer_grab(&thread->user_timer);
418 task->total_system_time += timer_grab(&thread->system_timer);
419
420 queue_remove(&task->threads, thread, thread_t, task_threads);
421 task->thread_count--;
422 task_unlock(task);
423
424 pset = thread->processor_set;
425
426 pset_lock(pset);
427 pset_remove_thread(pset, thread);
428 pset_unlock(pset);
429
430 thread_deallocate(thread);
431
432 (void)splsched();
433 simple_lock(&thread_terminate_lock);
434 }
435
436 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
437 simple_unlock(&thread_terminate_lock);
438 /* splsched */
439
440 thread_block((thread_continue_t)thread_terminate_daemon);
441 /*NOTREACHED*/
442 }
443
444 /*
445 * thread_terminate_enqueue:
446 *
447 * Enqueue a terminating thread for final disposition.
448 *
449 * Called at splsched.
450 */
451 void
452 thread_terminate_enqueue(
453 thread_t thread)
454 {
455 simple_lock(&thread_terminate_lock);
456 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
457 simple_unlock(&thread_terminate_lock);
458
459 thread_wakeup((event_t)&thread_terminate_queue);
460 }
461
462 /*
463 * thread_stack_daemon:
464 *
465 * Perform stack allocation as required due to
466 * invoke failures.
467 */
468 static void
469 thread_stack_daemon(void)
470 {
471 thread_t thread;
472
473 (void)splsched();
474 simple_lock(&thread_stack_lock);
475
476 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
477 simple_unlock(&thread_stack_lock);
478 /* splsched */
479
480 stack_alloc(thread);
481
482 thread_lock(thread);
483 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
484 thread_unlock(thread);
485 (void)spllo();
486
487 (void)splsched();
488 simple_lock(&thread_stack_lock);
489 }
490
491 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
492 simple_unlock(&thread_stack_lock);
493 /* splsched */
494
495 thread_block((thread_continue_t)thread_stack_daemon);
496 /*NOTREACHED*/
497 }
498
499 /*
500 * thread_stack_enqueue:
501 *
502 * Enqueue a thread for stack allocation.
503 *
504 * Called at splsched.
505 */
506 void
507 thread_stack_enqueue(
508 thread_t thread)
509 {
510 simple_lock(&thread_stack_lock);
511 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
512 simple_unlock(&thread_stack_lock);
513
514 thread_wakeup((event_t)&thread_stack_queue);
515 }
516
517 void
518 thread_daemon_init(void)
519 {
520 kern_return_t result;
521 thread_t thread;
522
523 simple_lock_init(&thread_terminate_lock, 0);
524 queue_init(&thread_terminate_queue);
525
526 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
527 if (result != KERN_SUCCESS)
528 panic("thread_daemon_init: thread_terminate_daemon");
529
530 thread_deallocate(thread);
531
532 simple_lock_init(&thread_stack_lock, 0);
533 queue_init(&thread_stack_queue);
534
535 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
536 if (result != KERN_SUCCESS)
537 panic("thread_daemon_init: thread_stack_daemon");
538
539 thread_deallocate(thread);
540 }
541
542 /*
543 * Create a new thread.
544 * Doesn't start the thread running.
545 */
546 static kern_return_t
547 thread_create_internal(
548 task_t parent_task,
549 integer_t priority,
550 thread_continue_t continuation,
551 thread_t *out_thread)
552 {
553 thread_t new_thread;
554 processor_set_t pset;
555 static thread_t first_thread;
556
557 /*
558 * Allocate a thread and initialize static fields
559 */
560 if (first_thread == NULL)
561 new_thread = first_thread = current_thread();
562 else
563 new_thread = (thread_t)zalloc(thread_zone);
564 if (new_thread == NULL)
565 return (KERN_RESOURCE_SHORTAGE);
566
567 if (new_thread != first_thread)
568 *new_thread = thread_template;
569
570 #ifdef MACH_BSD
571 {
572 new_thread->uthread = uthread_alloc(parent_task, new_thread);
573 if (new_thread->uthread == NULL) {
574 zfree(thread_zone, new_thread);
575 return (KERN_RESOURCE_SHORTAGE);
576 }
577 }
578 #endif /* MACH_BSD */
579
580 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
581 #ifdef MACH_BSD
582 {
583 void *ut = new_thread->uthread;
584
585 new_thread->uthread = NULL;
586 uthread_free(parent_task, ut, parent_task->bsd_info);
587 }
588 #endif /* MACH_BSD */
589 zfree(thread_zone, new_thread);
590 return (KERN_FAILURE);
591 }
592
593 new_thread->task = parent_task;
594
595 thread_lock_init(new_thread);
596 wake_lock_init(new_thread);
597
598 mutex_init(&new_thread->mutex, 0);
599
600 ipc_thread_init(new_thread);
601 queue_init(&new_thread->held_ulocks);
602 thread_prof_init(new_thread, parent_task);
603
604 new_thread->continuation = continuation;
605
606 pset = parent_task->processor_set;
607 assert(pset == &default_pset);
608 pset_lock(pset);
609
610 task_lock(parent_task);
611 assert(parent_task->processor_set == pset);
612
613 if ( !parent_task->active ||
614 (parent_task->thread_count >= THREAD_MAX &&
615 parent_task != kernel_task)) {
616 task_unlock(parent_task);
617 pset_unlock(pset);
618
619 #ifdef MACH_BSD
620 {
621 void *ut = new_thread->uthread;
622
623 new_thread->uthread = NULL;
624 uthread_free(parent_task, ut, parent_task->bsd_info);
625 }
626 #endif /* MACH_BSD */
627 ipc_thread_disable(new_thread);
628 ipc_thread_terminate(new_thread);
629 machine_thread_destroy(new_thread);
630 zfree(thread_zone, new_thread);
631 return (KERN_FAILURE);
632 }
633
634 task_reference_internal(parent_task);
635
636 /* Cache the task's map */
637 new_thread->map = parent_task->map;
638
639 /* Chain the thread onto the task's list */
640 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
641 parent_task->thread_count++;
642
643 /* So terminating threads don't need to take the task lock to decrement */
644 hw_atomic_add(&parent_task->active_thread_count, 1);
645
646 /* Associate the thread with the processor set */
647 pset_add_thread(pset, new_thread);
648
649 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
650 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
651
652 /* Set the thread's scheduling parameters */
653 if (parent_task != kernel_task)
654 new_thread->sched_mode |= TH_MODE_TIMESHARE;
655 new_thread->max_priority = parent_task->max_priority;
656 new_thread->task_priority = parent_task->priority;
657 new_thread->priority = (priority < 0)? parent_task->priority: priority;
658 if (new_thread->priority > new_thread->max_priority)
659 new_thread->priority = new_thread->max_priority;
660 new_thread->importance =
661 new_thread->priority - new_thread->task_priority;
662 new_thread->sched_stamp = sched_tick;
663 new_thread->pri_shift = new_thread->processor_set->pri_shift;
664 compute_priority(new_thread, FALSE);
665
666 new_thread->active = TRUE;
667
668 *out_thread = new_thread;
669
670 {
671 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
672
673 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
674
675 KERNEL_DEBUG_CONSTANT(
676 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
677 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
678
679 kdbg_trace_string(parent_task->bsd_info,
680 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
681
682 KERNEL_DEBUG_CONSTANT(
683 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
684 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
685 }
686
687 return (KERN_SUCCESS);
688 }
689
690 kern_return_t
691 thread_create(
692 task_t task,
693 thread_t *new_thread)
694 {
695 kern_return_t result;
696 thread_t thread;
697
698 if (task == TASK_NULL || task == kernel_task)
699 return (KERN_INVALID_ARGUMENT);
700
701 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
702 if (result != KERN_SUCCESS)
703 return (result);
704
705 thread->user_stop_count = 1;
706 thread_hold(thread);
707 if (task->suspend_count > 0)
708 thread_hold(thread);
709
710 pset_unlock(task->processor_set);
711 task_unlock(task);
712
713 *new_thread = thread;
714
715 return (KERN_SUCCESS);
716 }
717
718 kern_return_t
719 thread_create_running(
720 register task_t task,
721 int flavor,
722 thread_state_t new_state,
723 mach_msg_type_number_t new_state_count,
724 thread_t *new_thread)
725 {
726 register kern_return_t result;
727 thread_t thread;
728
729 if (task == TASK_NULL || task == kernel_task)
730 return (KERN_INVALID_ARGUMENT);
731
732 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
733 if (result != KERN_SUCCESS)
734 return (result);
735
736 result = machine_thread_set_state(
737 thread, flavor, new_state, new_state_count);
738 if (result != KERN_SUCCESS) {
739 pset_unlock(task->processor_set);
740 task_unlock(task);
741
742 thread_terminate(thread);
743 thread_deallocate(thread);
744 return (result);
745 }
746
747 thread_mtx_lock(thread);
748 clear_wait(thread, THREAD_AWAKENED);
749 thread->started = TRUE;
750 thread_mtx_unlock(thread);
751 pset_unlock(task->processor_set);
752 task_unlock(task);
753
754 *new_thread = thread;
755
756 return (result);
757 }
758
759 /*
760 * kernel_thread_create:
761 *
762 * Create a thread in the kernel task
763 * to execute in kernel context.
764 */
765 kern_return_t
766 kernel_thread_create(
767 thread_continue_t continuation,
768 void *parameter,
769 integer_t priority,
770 thread_t *new_thread)
771 {
772 kern_return_t result;
773 thread_t thread;
774 task_t task = kernel_task;
775
776 result = thread_create_internal(task, priority, continuation, &thread);
777 if (result != KERN_SUCCESS)
778 return (result);
779
780 pset_unlock(task->processor_set);
781 task_unlock(task);
782
783 stack_alloc(thread);
784 assert(thread->kernel_stack != 0);
785 thread->reserved_stack = thread->kernel_stack;
786
787 thread->parameter = parameter;
788
789 *new_thread = thread;
790
791 return (result);
792 }
793
794 kern_return_t
795 kernel_thread_start_priority(
796 thread_continue_t continuation,
797 void *parameter,
798 integer_t priority,
799 thread_t *new_thread)
800 {
801 kern_return_t result;
802 thread_t thread;
803
804 result = kernel_thread_create(continuation, parameter, priority, &thread);
805 if (result != KERN_SUCCESS)
806 return (result);
807
808 thread_mtx_lock(thread);
809 clear_wait(thread, THREAD_AWAKENED);
810 thread->started = TRUE;
811 thread_mtx_unlock(thread);
812
813 *new_thread = thread;
814
815 return (result);
816 }
817
818 kern_return_t
819 kernel_thread_start(
820 thread_continue_t continuation,
821 void *parameter,
822 thread_t *new_thread)
823 {
824 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
825 }
826
827 thread_t
828 kernel_thread(
829 task_t task,
830 void (*start)(void))
831 {
832 kern_return_t result;
833 thread_t thread;
834
835 if (task != kernel_task)
836 panic("kernel_thread");
837
838 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
839 if (result != KERN_SUCCESS)
840 return (THREAD_NULL);
841
842 thread_deallocate(thread);
843
844 return (thread);
845 }
846
847 kern_return_t
848 thread_info_internal(
849 register thread_t thread,
850 thread_flavor_t flavor,
851 thread_info_t thread_info_out, /* ptr to OUT array */
852 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
853 {
854 int state, flags;
855 spl_t s;
856
857 if (thread == THREAD_NULL)
858 return (KERN_INVALID_ARGUMENT);
859
860 if (flavor == THREAD_BASIC_INFO) {
861 register thread_basic_info_t basic_info;
862
863 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
864 return (KERN_INVALID_ARGUMENT);
865
866 basic_info = (thread_basic_info_t) thread_info_out;
867
868 s = splsched();
869 thread_lock(thread);
870
871 /* fill in info */
872
873 thread_read_times(thread, &basic_info->user_time,
874 &basic_info->system_time);
875
876 /*
877 * Update lazy-evaluated scheduler info because someone wants it.
878 */
879 if (thread->sched_stamp != sched_tick)
880 update_priority(thread);
881
882 basic_info->sleep_time = 0;
883
884 /*
885 * To calculate cpu_usage, first correct for timer rate,
886 * then for 5/8 ageing. The correction factor [3/5] is
887 * (1/(5/8) - 1).
888 */
889 basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
890 * TH_USAGE_SCALE) / sched_tick_interval;
891 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
892
893 if (basic_info->cpu_usage > TH_USAGE_SCALE)
894 basic_info->cpu_usage = TH_USAGE_SCALE;
895
896 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
897 POLICY_TIMESHARE: POLICY_RR);
898
899 flags = 0;
900 if (thread->state & TH_IDLE)
901 flags |= TH_FLAGS_IDLE;
902
903 if (!thread->kernel_stack)
904 flags |= TH_FLAGS_SWAPPED;
905
906 state = 0;
907 if (thread->state & TH_TERMINATE)
908 state = TH_STATE_HALTED;
909 else
910 if (thread->state & TH_RUN)
911 state = TH_STATE_RUNNING;
912 else
913 if (thread->state & TH_UNINT)
914 state = TH_STATE_UNINTERRUPTIBLE;
915 else
916 if (thread->state & TH_SUSP)
917 state = TH_STATE_STOPPED;
918 else
919 if (thread->state & TH_WAIT)
920 state = TH_STATE_WAITING;
921
922 basic_info->run_state = state;
923 basic_info->flags = flags;
924
925 basic_info->suspend_count = thread->user_stop_count;
926
927 thread_unlock(thread);
928 splx(s);
929
930 *thread_info_count = THREAD_BASIC_INFO_COUNT;
931
932 return (KERN_SUCCESS);
933 }
934 else
935 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
936 policy_timeshare_info_t ts_info;
937
938 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
939 return (KERN_INVALID_ARGUMENT);
940
941 ts_info = (policy_timeshare_info_t)thread_info_out;
942
943 s = splsched();
944 thread_lock(thread);
945
946 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
947 thread_unlock(thread);
948 splx(s);
949
950 return (KERN_INVALID_POLICY);
951 }
952
953 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
954 if (ts_info->depressed) {
955 ts_info->base_priority = DEPRESSPRI;
956 ts_info->depress_priority = thread->priority;
957 }
958 else {
959 ts_info->base_priority = thread->priority;
960 ts_info->depress_priority = -1;
961 }
962
963 ts_info->cur_priority = thread->sched_pri;
964 ts_info->max_priority = thread->max_priority;
965
966 thread_unlock(thread);
967 splx(s);
968
969 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
970
971 return (KERN_SUCCESS);
972 }
973 else
974 if (flavor == THREAD_SCHED_FIFO_INFO) {
975 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
976 return (KERN_INVALID_ARGUMENT);
977
978 return (KERN_INVALID_POLICY);
979 }
980 else
981 if (flavor == THREAD_SCHED_RR_INFO) {
982 policy_rr_info_t rr_info;
983
984 if (*thread_info_count < POLICY_RR_INFO_COUNT)
985 return (KERN_INVALID_ARGUMENT);
986
987 rr_info = (policy_rr_info_t) thread_info_out;
988
989 s = splsched();
990 thread_lock(thread);
991
992 if (thread->sched_mode & TH_MODE_TIMESHARE) {
993 thread_unlock(thread);
994 splx(s);
995
996 return (KERN_INVALID_POLICY);
997 }
998
999 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1000 if (rr_info->depressed) {
1001 rr_info->base_priority = DEPRESSPRI;
1002 rr_info->depress_priority = thread->priority;
1003 }
1004 else {
1005 rr_info->base_priority = thread->priority;
1006 rr_info->depress_priority = -1;
1007 }
1008
1009 rr_info->max_priority = thread->max_priority;
1010 rr_info->quantum = std_quantum_us / 1000;
1011
1012 thread_unlock(thread);
1013 splx(s);
1014
1015 *thread_info_count = POLICY_RR_INFO_COUNT;
1016
1017 return (KERN_SUCCESS);
1018 }
1019
1020 return (KERN_INVALID_ARGUMENT);
1021 }
1022
1023 void
1024 thread_read_times(
1025 thread_t thread,
1026 time_value_t *user_time,
1027 time_value_t *system_time)
1028 {
1029 absolutetime_to_microtime(
1030 timer_grab(&thread->user_timer),
1031 &user_time->seconds, &user_time->microseconds);
1032
1033 absolutetime_to_microtime(
1034 timer_grab(&thread->system_timer),
1035 &system_time->seconds, &system_time->microseconds);
1036 }
1037
1038 kern_return_t
1039 thread_assign(
1040 __unused thread_t thread,
1041 __unused processor_set_t new_pset)
1042 {
1043 return (KERN_FAILURE);
1044 }
1045
1046 /*
1047 * thread_assign_default:
1048 *
1049 * Special version of thread_assign for assigning threads to default
1050 * processor set.
1051 */
1052 kern_return_t
1053 thread_assign_default(
1054 thread_t thread)
1055 {
1056 return (thread_assign(thread, &default_pset));
1057 }
1058
1059 /*
1060 * thread_get_assignment
1061 *
1062 * Return current assignment for this thread.
1063 */
1064 kern_return_t
1065 thread_get_assignment(
1066 thread_t thread,
1067 processor_set_t *pset)
1068 {
1069 if (thread == NULL)
1070 return (KERN_INVALID_ARGUMENT);
1071
1072 *pset = thread->processor_set;
1073 pset_reference(*pset);
1074 return (KERN_SUCCESS);
1075 }
1076
1077 /*
1078 * thread_wire_internal:
1079 *
1080 * Specify that the target thread must always be able
1081 * to run and to allocate memory.
1082 */
1083 kern_return_t
1084 thread_wire_internal(
1085 host_priv_t host_priv,
1086 thread_t thread,
1087 boolean_t wired,
1088 boolean_t *prev_state)
1089 {
1090 if (host_priv == NULL || thread != current_thread())
1091 return (KERN_INVALID_ARGUMENT);
1092
1093 assert(host_priv == &realhost);
1094
1095 if (prev_state)
1096 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
1097
1098 if (wired) {
1099 if (!(thread->options & TH_OPT_VMPRIV))
1100 vm_page_free_reserve(1); /* XXX */
1101 thread->options |= TH_OPT_VMPRIV;
1102 }
1103 else {
1104 if (thread->options & TH_OPT_VMPRIV)
1105 vm_page_free_reserve(-1); /* XXX */
1106 thread->options &= ~TH_OPT_VMPRIV;
1107 }
1108
1109 return (KERN_SUCCESS);
1110 }
1111
1112
1113 /*
1114 * thread_wire:
1115 *
1116 * User-api wrapper for thread_wire_internal()
1117 */
1118 kern_return_t
1119 thread_wire(
1120 host_priv_t host_priv,
1121 thread_t thread,
1122 boolean_t wired)
1123 {
1124 return (thread_wire_internal(host_priv, thread, wired, NULL));
1125 }
1126
1127 int split_funnel_off = 0;
1128 lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1129 lck_grp_attr_t *funnel_lck_grp_attr;
1130 lck_attr_t *funnel_lck_attr;
1131
1132 funnel_t *
1133 funnel_alloc(
1134 int type)
1135 {
1136 lck_mtx_t *m;
1137 funnel_t *fnl;
1138
1139 if (funnel_lck_grp == LCK_GRP_NULL) {
1140 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1141
1142 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1143
1144 funnel_lck_attr = lck_attr_alloc_init();
1145 }
1146 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1147 bzero((void *)fnl, sizeof(funnel_t));
1148 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1149 kfree(fnl, sizeof(funnel_t));
1150 return(THR_FUNNEL_NULL);
1151 }
1152 fnl->fnl_mutex = m;
1153 fnl->fnl_type = type;
1154 }
1155 return(fnl);
1156 }
1157
1158 void
1159 funnel_free(
1160 funnel_t * fnl)
1161 {
1162 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1163 if (fnl->fnl_oldmutex)
1164 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1165 kfree(fnl, sizeof(funnel_t));
1166 }
1167
1168 void
1169 funnel_lock(
1170 funnel_t * fnl)
1171 {
1172 lck_mtx_lock(fnl->fnl_mutex);
1173 fnl->fnl_mtxholder = current_thread();
1174 }
1175
1176 void
1177 funnel_unlock(
1178 funnel_t * fnl)
1179 {
1180 lck_mtx_unlock(fnl->fnl_mutex);
1181 fnl->fnl_mtxrelease = current_thread();
1182 }
1183
1184 funnel_t *
1185 thread_funnel_get(
1186 void)
1187 {
1188 thread_t th = current_thread();
1189
1190 if (th->funnel_state & TH_FN_OWNED) {
1191 return(th->funnel_lock);
1192 }
1193 return(THR_FUNNEL_NULL);
1194 }
1195
1196 boolean_t
1197 thread_funnel_set(
1198 funnel_t * fnl,
1199 boolean_t funneled)
1200 {
1201 thread_t cur_thread;
1202 boolean_t funnel_state_prev;
1203 boolean_t intr;
1204
1205 cur_thread = current_thread();
1206 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1207
1208 if (funnel_state_prev != funneled) {
1209 intr = ml_set_interrupts_enabled(FALSE);
1210
1211 if (funneled == TRUE) {
1212 if (cur_thread->funnel_lock)
1213 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1214 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1215 fnl, 1, 0, 0, 0);
1216 funnel_lock(fnl);
1217 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1218 fnl, 1, 0, 0, 0);
1219 cur_thread->funnel_state |= TH_FN_OWNED;
1220 cur_thread->funnel_lock = fnl;
1221 } else {
1222 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1223 panic("Funnel unlock when not holding funnel");
1224 cur_thread->funnel_state &= ~TH_FN_OWNED;
1225 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1226 fnl, 1, 0, 0, 0);
1227
1228 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1229 funnel_unlock(fnl);
1230 }
1231 (void)ml_set_interrupts_enabled(intr);
1232 } else {
1233 /* if we are trying to acquire funnel recursively
1234 * check for funnel to be held already
1235 */
1236 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1237 panic("thread_funnel_set: already holding a different funnel");
1238 }
1239 }
1240 return(funnel_state_prev);
1241 }
1242
1243
1244 /*
1245 * Export routines to other components for things that are done as macros
1246 * within the osfmk component.
1247 */
1248
1249 #undef thread_reference
1250 void thread_reference(thread_t thread);
1251 void
1252 thread_reference(
1253 thread_t thread)
1254 {
1255 if (thread != THREAD_NULL)
1256 thread_reference_internal(thread);
1257 }
1258
1259 #undef thread_should_halt
1260
1261 boolean_t
1262 thread_should_halt(
1263 thread_t th)
1264 {
1265 return (thread_should_halt_fast(th));
1266 }