]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
1f1c4e49a6ac67f70c28bb5c59b41f35ef4b5364
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
63 * Thread management primitives implementation.
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #include <mach_host.h>
85 #include <mach_prof.h>
86
87 #include <mach/mach_types.h>
88 #include <mach/boolean.h>
89 #include <mach/policy.h>
90 #include <mach/thread_info.h>
91 #include <mach/thread_special_ports.h>
92 #include <mach/thread_status.h>
93 #include <mach/time_value.h>
94 #include <mach/vm_param.h>
95
96 #include <machine/thread.h>
97
98 #include <kern/kern_types.h>
99 #include <kern/kalloc.h>
100 #include <kern/cpu_data.h>
101 #include <kern/counters.h>
102 #include <kern/ipc_mig.h>
103 #include <kern/ipc_tt.h>
104 #include <kern/mach_param.h>
105 #include <kern/machine.h>
106 #include <kern/misc_protos.h>
107 #include <kern/processor.h>
108 #include <kern/queue.h>
109 #include <kern/sched.h>
110 #include <kern/sched_prim.h>
111 #include <kern/sync_lock.h>
112 #include <kern/syscall_subr.h>
113 #include <kern/task.h>
114 #include <kern/thread.h>
115 #include <kern/host.h>
116 #include <kern/zalloc.h>
117 #include <kern/profile.h>
118 #include <kern/assert.h>
119
120 #include <ipc/ipc_kmsg.h>
121 #include <ipc/ipc_port.h>
122
123 #include <vm/vm_kern.h>
124 #include <vm/vm_pageout.h>
125
126 #include <sys/kdebug.h>
127
128 /*
129 * Exported interfaces
130 */
131 #include <mach/task_server.h>
132 #include <mach/thread_act_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_priv_server.h>
135
136 static struct zone *thread_zone;
137
138 decl_simple_lock_data(static,thread_stack_lock)
139 static queue_head_t thread_stack_queue;
140
141 decl_simple_lock_data(static,thread_terminate_lock)
142 static queue_head_t thread_terminate_queue;
143
144 static struct thread thread_template, init_thread;
145
146 #ifdef MACH_BSD
147 extern void proc_exit(void *);
148 #endif /* MACH_BSD */
149
150 void
151 thread_bootstrap(void)
152 {
153 /*
154 * Fill in a template thread for fast initialization.
155 */
156
157 thread_template.runq = RUN_QUEUE_NULL;
158
159 thread_template.ref_count = 2;
160
161 thread_template.reason = AST_NONE;
162 thread_template.at_safe_point = FALSE;
163 thread_template.wait_event = NO_EVENT64;
164 thread_template.wait_queue = WAIT_QUEUE_NULL;
165 thread_template.wait_result = THREAD_WAITING;
166 thread_template.options = THREAD_ABORTSAFE;
167 thread_template.state = TH_WAIT | TH_UNINT;
168 thread_template.wake_active = FALSE;
169 thread_template.continuation = THREAD_CONTINUE_NULL;
170 thread_template.parameter = NULL;
171
172 thread_template.importance = 0;
173 thread_template.sched_mode = 0;
174 thread_template.safe_mode = 0;
175 thread_template.safe_release = 0;
176
177 thread_template.priority = 0;
178 thread_template.sched_pri = 0;
179 thread_template.max_priority = 0;
180 thread_template.task_priority = 0;
181 thread_template.promotions = 0;
182 thread_template.pending_promoter_index = 0;
183 thread_template.pending_promoter[0] =
184 thread_template.pending_promoter[1] = NULL;
185
186 thread_template.realtime.deadline = UINT64_MAX;
187
188 thread_template.current_quantum = 0;
189
190 thread_template.computation_metered = 0;
191 thread_template.computation_epoch = 0;
192
193 thread_template.sched_stamp = 0;
194 thread_template.sched_usage = 0;
195 thread_template.pri_shift = INT8_MAX;
196 thread_template.cpu_usage = thread_template.cpu_delta = 0;
197
198 thread_template.bound_processor = PROCESSOR_NULL;
199 thread_template.last_processor = PROCESSOR_NULL;
200 thread_template.last_switch = 0;
201
202 timer_init(&thread_template.user_timer);
203 timer_init(&thread_template.system_timer);
204 thread_template.user_timer_save = 0;
205 thread_template.system_timer_save = 0;
206
207 thread_template.wait_timer_is_set = FALSE;
208 thread_template.wait_timer_active = 0;
209
210 thread_template.depress_timer_active = 0;
211
212 thread_template.processor_set = PROCESSOR_SET_NULL;
213
214 thread_template.special_handler.handler = special_handler;
215 thread_template.special_handler.next = 0;
216
217 #if MACH_HOST
218 thread_template.may_assign = TRUE;
219 thread_template.assign_active = FALSE;
220 #endif /* MACH_HOST */
221 thread_template.funnel_lock = THR_FUNNEL_NULL;
222 thread_template.funnel_state = 0;
223 thread_template.recover = (vm_offset_t)NULL;
224
225 init_thread = thread_template;
226 machine_set_current_thread(&init_thread);
227 }
228
229 void
230 thread_init(void)
231 {
232 thread_zone = zinit(
233 sizeof(struct thread),
234 THREAD_MAX * sizeof(struct thread),
235 THREAD_CHUNK * sizeof(struct thread),
236 "threads");
237
238 stack_init();
239
240 /*
241 * Initialize any machine-dependent
242 * per-thread structures necessary.
243 */
244 machine_thread_init();
245 }
246
247 static void
248 thread_terminate_continue(void)
249 {
250 panic("thread_terminate_continue");
251 /*NOTREACHED*/
252 }
253
254 /*
255 * thread_terminate_self:
256 */
257 void
258 thread_terminate_self(void)
259 {
260 thread_t thread = current_thread();
261 task_t task;
262 spl_t s;
263
264 s = splsched();
265 thread_lock(thread);
266
267 /*
268 * Cancel priority depression, reset scheduling parameters,
269 * and wait for concurrent expirations on other processors.
270 */
271 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
272 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
273
274 if (timer_call_cancel(&thread->depress_timer))
275 thread->depress_timer_active--;
276 }
277
278 thread_policy_reset(thread);
279
280 while (thread->depress_timer_active > 0) {
281 thread_unlock(thread);
282 splx(s);
283
284 delay(1);
285
286 s = splsched();
287 thread_lock(thread);
288 }
289
290 thread_unlock(thread);
291 splx(s);
292
293 thread_mtx_lock(thread);
294
295 ulock_release_all(thread);
296
297 ipc_thread_disable(thread);
298
299 thread_mtx_unlock(thread);
300
301 /*
302 * If we are the last thread to terminate and the task is
303 * associated with a BSD process, perform BSD process exit.
304 */
305 task = thread->task;
306 if ( hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
307 task->bsd_info != NULL )
308 proc_exit(task->bsd_info);
309
310 s = splsched();
311 thread_lock(thread);
312
313 /*
314 * Cancel wait timer, and wait for
315 * concurrent expirations.
316 */
317 if (thread->wait_timer_is_set) {
318 thread->wait_timer_is_set = FALSE;
319
320 if (timer_call_cancel(&thread->wait_timer))
321 thread->wait_timer_active--;
322 }
323
324 while (thread->wait_timer_active > 0) {
325 thread_unlock(thread);
326 splx(s);
327
328 delay(1);
329
330 s = splsched();
331 thread_lock(thread);
332 }
333
334 /*
335 * If there is a reserved stack, release it.
336 */
337 if (thread->reserved_stack != 0) {
338 if (thread->reserved_stack != thread->kernel_stack)
339 stack_free_stack(thread->reserved_stack);
340 thread->reserved_stack = 0;
341 }
342
343 /*
344 * Mark thread as terminating, and block.
345 */
346 thread->state |= TH_TERMINATE;
347 thread_mark_wait_locked(thread, THREAD_UNINT);
348 assert(thread->promotions == 0);
349 thread_unlock(thread);
350 /* splsched */
351
352 thread_block((thread_continue_t)thread_terminate_continue);
353 /*NOTREACHED*/
354 }
355
356 void
357 thread_deallocate(
358 thread_t thread)
359 {
360 processor_set_t pset;
361 task_t task;
362
363 if (thread == THREAD_NULL)
364 return;
365
366 if (thread_deallocate_internal(thread) > 0)
367 return;
368
369 ipc_thread_terminate(thread);
370
371 task = thread->task;
372
373 #ifdef MACH_BSD
374 {
375 void *ut = thread->uthread;
376
377 thread->uthread = NULL;
378 uthread_free(task, ut, task->bsd_info);
379 }
380 #endif /* MACH_BSD */
381
382 task_deallocate(task);
383
384 pset = thread->processor_set;
385 pset_deallocate(pset);
386
387 if (thread->kernel_stack != 0)
388 stack_free(thread);
389
390 machine_thread_destroy(thread);
391
392 zfree(thread_zone, thread);
393 }
394
395 /*
396 * thread_terminate_daemon:
397 *
398 * Perform final clean up for terminating threads.
399 */
400 static void
401 thread_terminate_daemon(void)
402 {
403 thread_t thread;
404 task_t task;
405 processor_set_t pset;
406
407 (void)splsched();
408 simple_lock(&thread_terminate_lock);
409
410 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
411 simple_unlock(&thread_terminate_lock);
412 (void)spllo();
413
414 task = thread->task;
415
416 task_lock(task);
417 task->total_user_time += timer_grab(&thread->user_timer);
418 task->total_system_time += timer_grab(&thread->system_timer);
419
420 queue_remove(&task->threads, thread, thread_t, task_threads);
421 task->thread_count--;
422 task_unlock(task);
423
424 pset = thread->processor_set;
425
426 pset_lock(pset);
427 pset_remove_thread(pset, thread);
428 pset_unlock(pset);
429
430 thread_deallocate(thread);
431
432 (void)splsched();
433 simple_lock(&thread_terminate_lock);
434 }
435
436 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
437 simple_unlock(&thread_terminate_lock);
438 /* splsched */
439
440 thread_block((thread_continue_t)thread_terminate_daemon);
441 /*NOTREACHED*/
442 }
443
444 /*
445 * thread_terminate_enqueue:
446 *
447 * Enqueue a terminating thread for final disposition.
448 *
449 * Called at splsched.
450 */
451 void
452 thread_terminate_enqueue(
453 thread_t thread)
454 {
455 simple_lock(&thread_terminate_lock);
456 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
457 simple_unlock(&thread_terminate_lock);
458
459 thread_wakeup((event_t)&thread_terminate_queue);
460 }
461
462 /*
463 * thread_stack_daemon:
464 *
465 * Perform stack allocation as required due to
466 * invoke failures.
467 */
468 static void
469 thread_stack_daemon(void)
470 {
471 thread_t thread;
472
473 (void)splsched();
474 simple_lock(&thread_stack_lock);
475
476 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
477 simple_unlock(&thread_stack_lock);
478 /* splsched */
479
480 stack_alloc(thread);
481
482 thread_lock(thread);
483 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
484 thread_unlock(thread);
485 (void)spllo();
486
487 (void)splsched();
488 simple_lock(&thread_stack_lock);
489 }
490
491 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
492 simple_unlock(&thread_stack_lock);
493 /* splsched */
494
495 thread_block((thread_continue_t)thread_stack_daemon);
496 /*NOTREACHED*/
497 }
498
499 /*
500 * thread_stack_enqueue:
501 *
502 * Enqueue a thread for stack allocation.
503 *
504 * Called at splsched.
505 */
506 void
507 thread_stack_enqueue(
508 thread_t thread)
509 {
510 simple_lock(&thread_stack_lock);
511 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
512 simple_unlock(&thread_stack_lock);
513
514 thread_wakeup((event_t)&thread_stack_queue);
515 }
516
517 void
518 thread_daemon_init(void)
519 {
520 kern_return_t result;
521 thread_t thread;
522
523 simple_lock_init(&thread_terminate_lock, 0);
524 queue_init(&thread_terminate_queue);
525
526 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
527 if (result != KERN_SUCCESS)
528 panic("thread_daemon_init: thread_terminate_daemon");
529
530 thread_deallocate(thread);
531
532 simple_lock_init(&thread_stack_lock, 0);
533 queue_init(&thread_stack_queue);
534
535 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
536 if (result != KERN_SUCCESS)
537 panic("thread_daemon_init: thread_stack_daemon");
538
539 thread_deallocate(thread);
540 }
541
542 /*
543 * Create a new thread.
544 * Doesn't start the thread running.
545 */
546 static kern_return_t
547 thread_create_internal(
548 task_t parent_task,
549 integer_t priority,
550 thread_continue_t continuation,
551 thread_t *out_thread)
552 {
553 thread_t new_thread;
554 processor_set_t pset;
555 static thread_t first_thread;
556
557 /*
558 * Allocate a thread and initialize static fields
559 */
560 if (first_thread == NULL)
561 new_thread = first_thread = current_thread();
562 else
563 new_thread = (thread_t)zalloc(thread_zone);
564 if (new_thread == NULL)
565 return (KERN_RESOURCE_SHORTAGE);
566
567 if (new_thread != first_thread)
568 *new_thread = thread_template;
569
570 #ifdef MACH_BSD
571 {
572 new_thread->uthread = uthread_alloc(parent_task, new_thread);
573 if (new_thread->uthread == NULL) {
574 zfree(thread_zone, new_thread);
575 return (KERN_RESOURCE_SHORTAGE);
576 }
577 }
578 #endif /* MACH_BSD */
579
580 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
581 #ifdef MACH_BSD
582 {
583 void *ut = new_thread->uthread;
584
585 new_thread->uthread = NULL;
586 uthread_free(parent_task, ut, parent_task->bsd_info);
587 }
588 #endif /* MACH_BSD */
589 zfree(thread_zone, new_thread);
590 return (KERN_FAILURE);
591 }
592
593 new_thread->task = parent_task;
594
595 thread_lock_init(new_thread);
596 wake_lock_init(new_thread);
597
598 mutex_init(&new_thread->mutex, 0);
599
600 ipc_thread_init(new_thread);
601 queue_init(&new_thread->held_ulocks);
602 thread_prof_init(new_thread, parent_task);
603
604 new_thread->continuation = continuation;
605
606 pset = parent_task->processor_set;
607 assert(pset == &default_pset);
608 pset_lock(pset);
609
610 task_lock(parent_task);
611 assert(parent_task->processor_set == pset);
612
613 if ( !parent_task->active ||
614 (parent_task->thread_count >= THREAD_MAX &&
615 parent_task != kernel_task)) {
616 task_unlock(parent_task);
617 pset_unlock(pset);
618
619 #ifdef MACH_BSD
620 {
621 void *ut = new_thread->uthread;
622
623 new_thread->uthread = NULL;
624 uthread_free(parent_task, ut, parent_task->bsd_info);
625 }
626 #endif /* MACH_BSD */
627 ipc_thread_disable(new_thread);
628 ipc_thread_terminate(new_thread);
629 machine_thread_destroy(new_thread);
630 zfree(thread_zone, new_thread);
631 return (KERN_FAILURE);
632 }
633
634 task_reference_internal(parent_task);
635
636 /* Cache the task's map */
637 new_thread->map = parent_task->map;
638
639 /* Chain the thread onto the task's list */
640 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
641 parent_task->thread_count++;
642
643 /* So terminating threads don't need to take the task lock to decrement */
644 hw_atomic_add(&parent_task->active_thread_count, 1);
645
646 /* Associate the thread with the processor set */
647 pset_add_thread(pset, new_thread);
648
649 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
650 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
651
652 /* Set the thread's scheduling parameters */
653 if (parent_task != kernel_task)
654 new_thread->sched_mode |= TH_MODE_TIMESHARE;
655 new_thread->max_priority = parent_task->max_priority;
656 new_thread->task_priority = parent_task->priority;
657 new_thread->priority = (priority < 0)? parent_task->priority: priority;
658 if (new_thread->priority > new_thread->max_priority)
659 new_thread->priority = new_thread->max_priority;
660 new_thread->importance =
661 new_thread->priority - new_thread->task_priority;
662 new_thread->sched_stamp = sched_tick;
663 new_thread->pri_shift = new_thread->processor_set->pri_shift;
664 compute_priority(new_thread, FALSE);
665
666 new_thread->active = TRUE;
667
668 *out_thread = new_thread;
669
670 {
671 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
672
673 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
674
675 KERNEL_DEBUG_CONSTANT(
676 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
677 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
678
679 kdbg_trace_string(parent_task->bsd_info,
680 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
681
682 KERNEL_DEBUG_CONSTANT(
683 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
684 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
685 }
686
687 return (KERN_SUCCESS);
688 }
689
690 kern_return_t
691 thread_create(
692 task_t task,
693 thread_t *new_thread)
694 {
695 kern_return_t result;
696 thread_t thread;
697
698 if (task == TASK_NULL || task == kernel_task)
699 return (KERN_INVALID_ARGUMENT);
700
701 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
702 if (result != KERN_SUCCESS)
703 return (result);
704
705 thread->user_stop_count = 1;
706 thread_hold(thread);
707 if (task->suspend_count > 0)
708 thread_hold(thread);
709
710 pset_unlock(task->processor_set);
711 task_unlock(task);
712
713 *new_thread = thread;
714
715 return (KERN_SUCCESS);
716 }
717
718 kern_return_t
719 thread_create_running(
720 register task_t task,
721 int flavor,
722 thread_state_t new_state,
723 mach_msg_type_number_t new_state_count,
724 thread_t *new_thread)
725 {
726 register kern_return_t result;
727 thread_t thread;
728
729 if (task == TASK_NULL || task == kernel_task)
730 return (KERN_INVALID_ARGUMENT);
731
732 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
733 if (result != KERN_SUCCESS)
734 return (result);
735
736 result = machine_thread_set_state(
737 thread, flavor, new_state, new_state_count);
738 if (result != KERN_SUCCESS) {
739 pset_unlock(task->processor_set);
740 task_unlock(task);
741
742 thread_terminate(thread);
743 thread_deallocate(thread);
744 return (result);
745 }
746
747 thread_mtx_lock(thread);
748 clear_wait(thread, THREAD_AWAKENED);
749 thread->started = TRUE;
750 thread_mtx_unlock(thread);
751 pset_unlock(task->processor_set);
752 task_unlock(task);
753
754 *new_thread = thread;
755
756 return (result);
757 }
758
759 /*
760 * kernel_thread_create:
761 *
762 * Create a thread in the kernel task
763 * to execute in kernel context.
764 */
765 kern_return_t
766 kernel_thread_create(
767 thread_continue_t continuation,
768 void *parameter,
769 integer_t priority,
770 thread_t *new_thread)
771 {
772 kern_return_t result;
773 thread_t thread;
774 task_t task = kernel_task;
775
776 result = thread_create_internal(task, priority, continuation, &thread);
777 if (result != KERN_SUCCESS)
778 return (result);
779
780 pset_unlock(task->processor_set);
781 task_unlock(task);
782
783 #if !defined(i386)
784 stack_alloc(thread);
785 assert(thread->kernel_stack != 0);
786 thread->reserved_stack = thread->kernel_stack;
787 #endif /* !defined(i386) */
788
789 thread->parameter = parameter;
790
791 *new_thread = thread;
792
793 return (result);
794 }
795
796 kern_return_t
797 kernel_thread_start_priority(
798 thread_continue_t continuation,
799 void *parameter,
800 integer_t priority,
801 thread_t *new_thread)
802 {
803 kern_return_t result;
804 thread_t thread;
805
806 result = kernel_thread_create(continuation, parameter, priority, &thread);
807 if (result != KERN_SUCCESS)
808 return (result);
809
810 thread_mtx_lock(thread);
811 clear_wait(thread, THREAD_AWAKENED);
812 thread->started = TRUE;
813 thread_mtx_unlock(thread);
814
815 *new_thread = thread;
816
817 return (result);
818 }
819
820 kern_return_t
821 kernel_thread_start(
822 thread_continue_t continuation,
823 void *parameter,
824 thread_t *new_thread)
825 {
826 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
827 }
828
829 thread_t
830 kernel_thread(
831 task_t task,
832 void (*start)(void))
833 {
834 kern_return_t result;
835 thread_t thread;
836
837 if (task != kernel_task)
838 panic("kernel_thread");
839
840 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
841 if (result != KERN_SUCCESS)
842 return (THREAD_NULL);
843
844 thread_deallocate(thread);
845
846 return (thread);
847 }
848
849 kern_return_t
850 thread_info_internal(
851 register thread_t thread,
852 thread_flavor_t flavor,
853 thread_info_t thread_info_out, /* ptr to OUT array */
854 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
855 {
856 int state, flags;
857 spl_t s;
858
859 if (thread == THREAD_NULL)
860 return (KERN_INVALID_ARGUMENT);
861
862 if (flavor == THREAD_BASIC_INFO) {
863 register thread_basic_info_t basic_info;
864
865 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
866 return (KERN_INVALID_ARGUMENT);
867
868 basic_info = (thread_basic_info_t) thread_info_out;
869
870 s = splsched();
871 thread_lock(thread);
872
873 /* fill in info */
874
875 thread_read_times(thread, &basic_info->user_time,
876 &basic_info->system_time);
877
878 /*
879 * Update lazy-evaluated scheduler info because someone wants it.
880 */
881 if (thread->sched_stamp != sched_tick)
882 update_priority(thread);
883
884 basic_info->sleep_time = 0;
885
886 /*
887 * To calculate cpu_usage, first correct for timer rate,
888 * then for 5/8 ageing. The correction factor [3/5] is
889 * (1/(5/8) - 1).
890 */
891 basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
892 * TH_USAGE_SCALE) / sched_tick_interval;
893 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
894
895 if (basic_info->cpu_usage > TH_USAGE_SCALE)
896 basic_info->cpu_usage = TH_USAGE_SCALE;
897
898 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
899 POLICY_TIMESHARE: POLICY_RR);
900
901 flags = 0;
902 if (thread->state & TH_IDLE)
903 flags |= TH_FLAGS_IDLE;
904
905 if (!thread->kernel_stack)
906 flags |= TH_FLAGS_SWAPPED;
907
908 state = 0;
909 if (thread->state & TH_TERMINATE)
910 state = TH_STATE_HALTED;
911 else
912 if (thread->state & TH_RUN)
913 state = TH_STATE_RUNNING;
914 else
915 if (thread->state & TH_UNINT)
916 state = TH_STATE_UNINTERRUPTIBLE;
917 else
918 if (thread->state & TH_SUSP)
919 state = TH_STATE_STOPPED;
920 else
921 if (thread->state & TH_WAIT)
922 state = TH_STATE_WAITING;
923
924 basic_info->run_state = state;
925 basic_info->flags = flags;
926
927 basic_info->suspend_count = thread->user_stop_count;
928
929 thread_unlock(thread);
930 splx(s);
931
932 *thread_info_count = THREAD_BASIC_INFO_COUNT;
933
934 return (KERN_SUCCESS);
935 }
936 else
937 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
938 policy_timeshare_info_t ts_info;
939
940 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
941 return (KERN_INVALID_ARGUMENT);
942
943 ts_info = (policy_timeshare_info_t)thread_info_out;
944
945 s = splsched();
946 thread_lock(thread);
947
948 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
949 thread_unlock(thread);
950 splx(s);
951
952 return (KERN_INVALID_POLICY);
953 }
954
955 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
956 if (ts_info->depressed) {
957 ts_info->base_priority = DEPRESSPRI;
958 ts_info->depress_priority = thread->priority;
959 }
960 else {
961 ts_info->base_priority = thread->priority;
962 ts_info->depress_priority = -1;
963 }
964
965 ts_info->cur_priority = thread->sched_pri;
966 ts_info->max_priority = thread->max_priority;
967
968 thread_unlock(thread);
969 splx(s);
970
971 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
972
973 return (KERN_SUCCESS);
974 }
975 else
976 if (flavor == THREAD_SCHED_FIFO_INFO) {
977 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
978 return (KERN_INVALID_ARGUMENT);
979
980 return (KERN_INVALID_POLICY);
981 }
982 else
983 if (flavor == THREAD_SCHED_RR_INFO) {
984 policy_rr_info_t rr_info;
985
986 if (*thread_info_count < POLICY_RR_INFO_COUNT)
987 return (KERN_INVALID_ARGUMENT);
988
989 rr_info = (policy_rr_info_t) thread_info_out;
990
991 s = splsched();
992 thread_lock(thread);
993
994 if (thread->sched_mode & TH_MODE_TIMESHARE) {
995 thread_unlock(thread);
996 splx(s);
997
998 return (KERN_INVALID_POLICY);
999 }
1000
1001 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1002 if (rr_info->depressed) {
1003 rr_info->base_priority = DEPRESSPRI;
1004 rr_info->depress_priority = thread->priority;
1005 }
1006 else {
1007 rr_info->base_priority = thread->priority;
1008 rr_info->depress_priority = -1;
1009 }
1010
1011 rr_info->max_priority = thread->max_priority;
1012 rr_info->quantum = std_quantum_us / 1000;
1013
1014 thread_unlock(thread);
1015 splx(s);
1016
1017 *thread_info_count = POLICY_RR_INFO_COUNT;
1018
1019 return (KERN_SUCCESS);
1020 }
1021
1022 return (KERN_INVALID_ARGUMENT);
1023 }
1024
1025 void
1026 thread_read_times(
1027 thread_t thread,
1028 time_value_t *user_time,
1029 time_value_t *system_time)
1030 {
1031 absolutetime_to_microtime(
1032 timer_grab(&thread->user_timer),
1033 &user_time->seconds, &user_time->microseconds);
1034
1035 absolutetime_to_microtime(
1036 timer_grab(&thread->system_timer),
1037 &system_time->seconds, &system_time->microseconds);
1038 }
1039
1040 kern_return_t
1041 thread_assign(
1042 __unused thread_t thread,
1043 __unused processor_set_t new_pset)
1044 {
1045 return (KERN_FAILURE);
1046 }
1047
1048 /*
1049 * thread_assign_default:
1050 *
1051 * Special version of thread_assign for assigning threads to default
1052 * processor set.
1053 */
1054 kern_return_t
1055 thread_assign_default(
1056 thread_t thread)
1057 {
1058 return (thread_assign(thread, &default_pset));
1059 }
1060
1061 /*
1062 * thread_get_assignment
1063 *
1064 * Return current assignment for this thread.
1065 */
1066 kern_return_t
1067 thread_get_assignment(
1068 thread_t thread,
1069 processor_set_t *pset)
1070 {
1071 if (thread == NULL)
1072 return (KERN_INVALID_ARGUMENT);
1073
1074 *pset = thread->processor_set;
1075 pset_reference(*pset);
1076 return (KERN_SUCCESS);
1077 }
1078
1079 /*
1080 * thread_wire_internal:
1081 *
1082 * Specify that the target thread must always be able
1083 * to run and to allocate memory.
1084 */
1085 kern_return_t
1086 thread_wire_internal(
1087 host_priv_t host_priv,
1088 thread_t thread,
1089 boolean_t wired,
1090 boolean_t *prev_state)
1091 {
1092 if (host_priv == NULL || thread != current_thread())
1093 return (KERN_INVALID_ARGUMENT);
1094
1095 assert(host_priv == &realhost);
1096
1097 if (prev_state)
1098 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
1099
1100 if (wired) {
1101 if (!(thread->options & TH_OPT_VMPRIV))
1102 vm_page_free_reserve(1); /* XXX */
1103 thread->options |= TH_OPT_VMPRIV;
1104 }
1105 else {
1106 if (thread->options & TH_OPT_VMPRIV)
1107 vm_page_free_reserve(-1); /* XXX */
1108 thread->options &= ~TH_OPT_VMPRIV;
1109 }
1110
1111 return (KERN_SUCCESS);
1112 }
1113
1114
1115 /*
1116 * thread_wire:
1117 *
1118 * User-api wrapper for thread_wire_internal()
1119 */
1120 kern_return_t
1121 thread_wire(
1122 host_priv_t host_priv,
1123 thread_t thread,
1124 boolean_t wired)
1125 {
1126 return (thread_wire_internal(host_priv, thread, wired, NULL));
1127 }
1128
1129 int split_funnel_off = 0;
1130 lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1131 lck_grp_attr_t *funnel_lck_grp_attr;
1132 lck_attr_t *funnel_lck_attr;
1133
1134 funnel_t *
1135 funnel_alloc(
1136 int type)
1137 {
1138 lck_mtx_t *m;
1139 funnel_t *fnl;
1140
1141 if (funnel_lck_grp == LCK_GRP_NULL) {
1142 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1143 //lck_grp_attr_setstat(funnel_lck_grp_attr);
1144
1145 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1146
1147 funnel_lck_attr = lck_attr_alloc_init();
1148 //lck_attr_setdebug(funnel_lck_attr);
1149 }
1150 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1151 bzero((void *)fnl, sizeof(funnel_t));
1152 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1153 kfree(fnl, sizeof(funnel_t));
1154 return(THR_FUNNEL_NULL);
1155 }
1156 fnl->fnl_mutex = m;
1157 fnl->fnl_type = type;
1158 }
1159 return(fnl);
1160 }
1161
1162 void
1163 funnel_free(
1164 funnel_t * fnl)
1165 {
1166 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1167 if (fnl->fnl_oldmutex)
1168 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1169 kfree(fnl, sizeof(funnel_t));
1170 }
1171
1172 void
1173 funnel_lock(
1174 funnel_t * fnl)
1175 {
1176 lck_mtx_lock(fnl->fnl_mutex);
1177 fnl->fnl_mtxholder = current_thread();
1178 }
1179
1180 void
1181 funnel_unlock(
1182 funnel_t * fnl)
1183 {
1184 lck_mtx_unlock(fnl->fnl_mutex);
1185 fnl->fnl_mtxrelease = current_thread();
1186 }
1187
1188 funnel_t *
1189 thread_funnel_get(
1190 void)
1191 {
1192 thread_t th = current_thread();
1193
1194 if (th->funnel_state & TH_FN_OWNED) {
1195 return(th->funnel_lock);
1196 }
1197 return(THR_FUNNEL_NULL);
1198 }
1199
1200 boolean_t
1201 thread_funnel_set(
1202 funnel_t * fnl,
1203 boolean_t funneled)
1204 {
1205 thread_t cur_thread;
1206 boolean_t funnel_state_prev;
1207 boolean_t intr;
1208
1209 cur_thread = current_thread();
1210 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1211
1212 if (funnel_state_prev != funneled) {
1213 intr = ml_set_interrupts_enabled(FALSE);
1214
1215 if (funneled == TRUE) {
1216 if (cur_thread->funnel_lock)
1217 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1218 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1219 fnl, 1, 0, 0, 0);
1220 funnel_lock(fnl);
1221 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1222 fnl, 1, 0, 0, 0);
1223 cur_thread->funnel_state |= TH_FN_OWNED;
1224 cur_thread->funnel_lock = fnl;
1225 } else {
1226 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1227 panic("Funnel unlock when not holding funnel");
1228 cur_thread->funnel_state &= ~TH_FN_OWNED;
1229 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1230 fnl, 1, 0, 0, 0);
1231
1232 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1233 funnel_unlock(fnl);
1234 }
1235 (void)ml_set_interrupts_enabled(intr);
1236 } else {
1237 /* if we are trying to acquire funnel recursively
1238 * check for funnel to be held already
1239 */
1240 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1241 panic("thread_funnel_set: already holding a different funnel");
1242 }
1243 }
1244 return(funnel_state_prev);
1245 }
1246
1247
1248 /*
1249 * Export routines to other components for things that are done as macros
1250 * within the osfmk component.
1251 */
1252
1253 #undef thread_reference
1254 void thread_reference(thread_t thread);
1255 void
1256 thread_reference(
1257 thread_t thread)
1258 {
1259 if (thread != THREAD_NULL)
1260 thread_reference_internal(thread);
1261 }
1262
1263 #undef thread_should_halt
1264
1265 boolean_t
1266 thread_should_halt(
1267 thread_t th)
1268 {
1269 return (thread_should_halt_fast(th));
1270 }