]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
c396f528bca539a940b5d11a4062abd189ff36fb
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_FREE_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: kern/thread.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
63 * Date: 1986
64 *
65 * Thread management primitives implementation.
66 */
67 /*
68 * Copyright (c) 1993 The University of Utah and
69 * the Computer Systems Laboratory (CSL). All rights reserved.
70 *
71 * Permission to use, copy, modify and distribute this software and its
72 * documentation is hereby granted, provided that both the copyright
73 * notice and this permission notice appear in all copies of the
74 * software, derivative works or modified versions, and any portions
75 * thereof, and that both notices appear in supporting documentation.
76 *
77 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
78 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
79 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
80 *
81 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
82 * improvements that they make and grant CSL redistribution rights.
83 *
84 */
85
86 #include <mach_host.h>
87 #include <mach_prof.h>
88
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/policy.h>
92 #include <mach/thread_info.h>
93 #include <mach/thread_special_ports.h>
94 #include <mach/thread_status.h>
95 #include <mach/time_value.h>
96 #include <mach/vm_param.h>
97
98 #include <machine/thread.h>
99
100 #include <kern/kern_types.h>
101 #include <kern/kalloc.h>
102 #include <kern/cpu_data.h>
103 #include <kern/counters.h>
104 #include <kern/ipc_mig.h>
105 #include <kern/ipc_tt.h>
106 #include <kern/mach_param.h>
107 #include <kern/machine.h>
108 #include <kern/misc_protos.h>
109 #include <kern/processor.h>
110 #include <kern/queue.h>
111 #include <kern/sched.h>
112 #include <kern/sched_prim.h>
113 #include <kern/sync_lock.h>
114 #include <kern/syscall_subr.h>
115 #include <kern/task.h>
116 #include <kern/thread.h>
117 #include <kern/host.h>
118 #include <kern/zalloc.h>
119 #include <kern/profile.h>
120 #include <kern/assert.h>
121
122 #include <ipc/ipc_kmsg.h>
123 #include <ipc/ipc_port.h>
124
125 #include <vm/vm_kern.h>
126 #include <vm/vm_pageout.h>
127
128 #include <sys/kdebug.h>
129
130 /*
131 * Exported interfaces
132 */
133 #include <mach/task_server.h>
134 #include <mach/thread_act_server.h>
135 #include <mach/mach_host_server.h>
136 #include <mach/host_priv_server.h>
137
138 static struct zone *thread_zone;
139
140 decl_simple_lock_data(static,thread_stack_lock)
141 static queue_head_t thread_stack_queue;
142
143 decl_simple_lock_data(static,thread_terminate_lock)
144 static queue_head_t thread_terminate_queue;
145
146 static struct thread thread_template, init_thread;
147
148 #ifdef MACH_BSD
149 extern void proc_exit(void *);
150 #endif /* MACH_BSD */
151
152 void
153 thread_bootstrap(void)
154 {
155 /*
156 * Fill in a template thread for fast initialization.
157 */
158
159 thread_template.runq = RUN_QUEUE_NULL;
160
161 thread_template.ref_count = 2;
162
163 thread_template.reason = AST_NONE;
164 thread_template.at_safe_point = FALSE;
165 thread_template.wait_event = NO_EVENT64;
166 thread_template.wait_queue = WAIT_QUEUE_NULL;
167 thread_template.wait_result = THREAD_WAITING;
168 thread_template.options = THREAD_ABORTSAFE;
169 thread_template.state = TH_WAIT | TH_UNINT;
170 thread_template.wake_active = FALSE;
171 thread_template.continuation = THREAD_CONTINUE_NULL;
172 thread_template.parameter = NULL;
173
174 thread_template.importance = 0;
175 thread_template.sched_mode = 0;
176 thread_template.safe_mode = 0;
177 thread_template.safe_release = 0;
178
179 thread_template.priority = 0;
180 thread_template.sched_pri = 0;
181 thread_template.max_priority = 0;
182 thread_template.task_priority = 0;
183 thread_template.promotions = 0;
184 thread_template.pending_promoter_index = 0;
185 thread_template.pending_promoter[0] =
186 thread_template.pending_promoter[1] = NULL;
187
188 thread_template.realtime.deadline = UINT64_MAX;
189
190 thread_template.current_quantum = 0;
191
192 thread_template.computation_metered = 0;
193 thread_template.computation_epoch = 0;
194
195 thread_template.sched_stamp = 0;
196 thread_template.sched_usage = 0;
197 thread_template.pri_shift = INT8_MAX;
198 thread_template.cpu_usage = thread_template.cpu_delta = 0;
199
200 thread_template.bound_processor = PROCESSOR_NULL;
201 thread_template.last_processor = PROCESSOR_NULL;
202 thread_template.last_switch = 0;
203
204 timer_init(&thread_template.user_timer);
205 timer_init(&thread_template.system_timer);
206 thread_template.user_timer_save = 0;
207 thread_template.system_timer_save = 0;
208
209 thread_template.wait_timer_is_set = FALSE;
210 thread_template.wait_timer_active = 0;
211
212 thread_template.depress_timer_active = 0;
213
214 thread_template.processor_set = PROCESSOR_SET_NULL;
215
216 thread_template.special_handler.handler = special_handler;
217 thread_template.special_handler.next = 0;
218
219 #if MACH_HOST
220 thread_template.may_assign = TRUE;
221 thread_template.assign_active = FALSE;
222 #endif /* MACH_HOST */
223 thread_template.funnel_lock = THR_FUNNEL_NULL;
224 thread_template.funnel_state = 0;
225 thread_template.recover = (vm_offset_t)NULL;
226
227 init_thread = thread_template;
228 machine_set_current_thread(&init_thread);
229 }
230
231 void
232 thread_init(void)
233 {
234 thread_zone = zinit(
235 sizeof(struct thread),
236 THREAD_MAX * sizeof(struct thread),
237 THREAD_CHUNK * sizeof(struct thread),
238 "threads");
239
240 stack_init();
241
242 /*
243 * Initialize any machine-dependent
244 * per-thread structures necessary.
245 */
246 machine_thread_init();
247 }
248
249 static void
250 thread_terminate_continue(void)
251 {
252 panic("thread_terminate_continue");
253 /*NOTREACHED*/
254 }
255
256 /*
257 * thread_terminate_self:
258 */
259 void
260 thread_terminate_self(void)
261 {
262 thread_t thread = current_thread();
263 task_t task;
264 spl_t s;
265
266 s = splsched();
267 thread_lock(thread);
268
269 /*
270 * Cancel priority depression, reset scheduling parameters,
271 * and wait for concurrent expirations on other processors.
272 */
273 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
274 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
275
276 if (timer_call_cancel(&thread->depress_timer))
277 thread->depress_timer_active--;
278 }
279
280 thread_policy_reset(thread);
281
282 while (thread->depress_timer_active > 0) {
283 thread_unlock(thread);
284 splx(s);
285
286 delay(1);
287
288 s = splsched();
289 thread_lock(thread);
290 }
291
292 thread_unlock(thread);
293 splx(s);
294
295 thread_mtx_lock(thread);
296
297 ulock_release_all(thread);
298
299 ipc_thread_disable(thread);
300
301 thread_mtx_unlock(thread);
302
303 /*
304 * If we are the last thread to terminate and the task is
305 * associated with a BSD process, perform BSD process exit.
306 */
307 task = thread->task;
308 if ( hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
309 task->bsd_info != NULL )
310 proc_exit(task->bsd_info);
311
312 s = splsched();
313 thread_lock(thread);
314
315 /*
316 * Cancel wait timer, and wait for
317 * concurrent expirations.
318 */
319 if (thread->wait_timer_is_set) {
320 thread->wait_timer_is_set = FALSE;
321
322 if (timer_call_cancel(&thread->wait_timer))
323 thread->wait_timer_active--;
324 }
325
326 while (thread->wait_timer_active > 0) {
327 thread_unlock(thread);
328 splx(s);
329
330 delay(1);
331
332 s = splsched();
333 thread_lock(thread);
334 }
335
336 /*
337 * If there is a reserved stack, release it.
338 */
339 if (thread->reserved_stack != 0) {
340 if (thread->reserved_stack != thread->kernel_stack)
341 stack_free_stack(thread->reserved_stack);
342 thread->reserved_stack = 0;
343 }
344
345 /*
346 * Mark thread as terminating, and block.
347 */
348 thread->state |= TH_TERMINATE;
349 thread_mark_wait_locked(thread, THREAD_UNINT);
350 assert(thread->promotions == 0);
351 thread_unlock(thread);
352 /* splsched */
353
354 thread_block((thread_continue_t)thread_terminate_continue);
355 /*NOTREACHED*/
356 }
357
358 void
359 thread_deallocate(
360 thread_t thread)
361 {
362 processor_set_t pset;
363 task_t task;
364
365 if (thread == THREAD_NULL)
366 return;
367
368 if (thread_deallocate_internal(thread) > 0)
369 return;
370
371 ipc_thread_terminate(thread);
372
373 task = thread->task;
374
375 #ifdef MACH_BSD
376 {
377 void *ut = thread->uthread;
378
379 thread->uthread = NULL;
380 uthread_free(task, ut, task->bsd_info);
381 }
382 #endif /* MACH_BSD */
383
384 task_deallocate(task);
385
386 pset = thread->processor_set;
387 pset_deallocate(pset);
388
389 if (thread->kernel_stack != 0)
390 stack_free(thread);
391
392 machine_thread_destroy(thread);
393
394 zfree(thread_zone, thread);
395 }
396
397 /*
398 * thread_terminate_daemon:
399 *
400 * Perform final clean up for terminating threads.
401 */
402 static void
403 thread_terminate_daemon(void)
404 {
405 thread_t thread;
406 task_t task;
407 processor_set_t pset;
408
409 (void)splsched();
410 simple_lock(&thread_terminate_lock);
411
412 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
413 simple_unlock(&thread_terminate_lock);
414 (void)spllo();
415
416 task = thread->task;
417
418 task_lock(task);
419 task->total_user_time += timer_grab(&thread->user_timer);
420 task->total_system_time += timer_grab(&thread->system_timer);
421
422 queue_remove(&task->threads, thread, thread_t, task_threads);
423 task->thread_count--;
424 task_unlock(task);
425
426 pset = thread->processor_set;
427
428 pset_lock(pset);
429 pset_remove_thread(pset, thread);
430 pset_unlock(pset);
431
432 thread_deallocate(thread);
433
434 (void)splsched();
435 simple_lock(&thread_terminate_lock);
436 }
437
438 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
439 simple_unlock(&thread_terminate_lock);
440 /* splsched */
441
442 thread_block((thread_continue_t)thread_terminate_daemon);
443 /*NOTREACHED*/
444 }
445
446 /*
447 * thread_terminate_enqueue:
448 *
449 * Enqueue a terminating thread for final disposition.
450 *
451 * Called at splsched.
452 */
453 void
454 thread_terminate_enqueue(
455 thread_t thread)
456 {
457 simple_lock(&thread_terminate_lock);
458 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
459 simple_unlock(&thread_terminate_lock);
460
461 thread_wakeup((event_t)&thread_terminate_queue);
462 }
463
464 /*
465 * thread_stack_daemon:
466 *
467 * Perform stack allocation as required due to
468 * invoke failures.
469 */
470 static void
471 thread_stack_daemon(void)
472 {
473 thread_t thread;
474
475 (void)splsched();
476 simple_lock(&thread_stack_lock);
477
478 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
479 simple_unlock(&thread_stack_lock);
480 /* splsched */
481
482 stack_alloc(thread);
483
484 thread_lock(thread);
485 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
486 thread_unlock(thread);
487 (void)spllo();
488
489 (void)splsched();
490 simple_lock(&thread_stack_lock);
491 }
492
493 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
494 simple_unlock(&thread_stack_lock);
495 /* splsched */
496
497 thread_block((thread_continue_t)thread_stack_daemon);
498 /*NOTREACHED*/
499 }
500
501 /*
502 * thread_stack_enqueue:
503 *
504 * Enqueue a thread for stack allocation.
505 *
506 * Called at splsched.
507 */
508 void
509 thread_stack_enqueue(
510 thread_t thread)
511 {
512 simple_lock(&thread_stack_lock);
513 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
514 simple_unlock(&thread_stack_lock);
515
516 thread_wakeup((event_t)&thread_stack_queue);
517 }
518
519 void
520 thread_daemon_init(void)
521 {
522 kern_return_t result;
523 thread_t thread;
524
525 simple_lock_init(&thread_terminate_lock, 0);
526 queue_init(&thread_terminate_queue);
527
528 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
529 if (result != KERN_SUCCESS)
530 panic("thread_daemon_init: thread_terminate_daemon");
531
532 thread_deallocate(thread);
533
534 simple_lock_init(&thread_stack_lock, 0);
535 queue_init(&thread_stack_queue);
536
537 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
538 if (result != KERN_SUCCESS)
539 panic("thread_daemon_init: thread_stack_daemon");
540
541 thread_deallocate(thread);
542 }
543
544 /*
545 * Create a new thread.
546 * Doesn't start the thread running.
547 */
548 static kern_return_t
549 thread_create_internal(
550 task_t parent_task,
551 integer_t priority,
552 thread_continue_t continuation,
553 thread_t *out_thread)
554 {
555 thread_t new_thread;
556 processor_set_t pset;
557 static thread_t first_thread;
558
559 /*
560 * Allocate a thread and initialize static fields
561 */
562 if (first_thread == NULL)
563 new_thread = first_thread = current_thread();
564 else
565 new_thread = (thread_t)zalloc(thread_zone);
566 if (new_thread == NULL)
567 return (KERN_RESOURCE_SHORTAGE);
568
569 if (new_thread != first_thread)
570 *new_thread = thread_template;
571
572 #ifdef MACH_BSD
573 {
574 new_thread->uthread = uthread_alloc(parent_task, new_thread);
575 if (new_thread->uthread == NULL) {
576 zfree(thread_zone, new_thread);
577 return (KERN_RESOURCE_SHORTAGE);
578 }
579 }
580 #endif /* MACH_BSD */
581
582 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
583 #ifdef MACH_BSD
584 {
585 void *ut = new_thread->uthread;
586
587 new_thread->uthread = NULL;
588 uthread_free(parent_task, ut, parent_task->bsd_info);
589 }
590 #endif /* MACH_BSD */
591 zfree(thread_zone, new_thread);
592 return (KERN_FAILURE);
593 }
594
595 new_thread->task = parent_task;
596
597 thread_lock_init(new_thread);
598 wake_lock_init(new_thread);
599
600 mutex_init(&new_thread->mutex, 0);
601
602 ipc_thread_init(new_thread);
603 queue_init(&new_thread->held_ulocks);
604 thread_prof_init(new_thread, parent_task);
605
606 new_thread->continuation = continuation;
607
608 pset = parent_task->processor_set;
609 assert(pset == &default_pset);
610 pset_lock(pset);
611
612 task_lock(parent_task);
613 assert(parent_task->processor_set == pset);
614
615 if ( !parent_task->active ||
616 (parent_task->thread_count >= THREAD_MAX &&
617 parent_task != kernel_task)) {
618 task_unlock(parent_task);
619 pset_unlock(pset);
620
621 #ifdef MACH_BSD
622 {
623 void *ut = new_thread->uthread;
624
625 new_thread->uthread = NULL;
626 uthread_free(parent_task, ut, parent_task->bsd_info);
627 }
628 #endif /* MACH_BSD */
629 ipc_thread_disable(new_thread);
630 ipc_thread_terminate(new_thread);
631 machine_thread_destroy(new_thread);
632 zfree(thread_zone, new_thread);
633 return (KERN_FAILURE);
634 }
635
636 task_reference_internal(parent_task);
637
638 /* Cache the task's map */
639 new_thread->map = parent_task->map;
640
641 /* Chain the thread onto the task's list */
642 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
643 parent_task->thread_count++;
644
645 /* So terminating threads don't need to take the task lock to decrement */
646 hw_atomic_add(&parent_task->active_thread_count, 1);
647
648 /* Associate the thread with the processor set */
649 pset_add_thread(pset, new_thread);
650
651 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
652 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
653
654 /* Set the thread's scheduling parameters */
655 if (parent_task != kernel_task)
656 new_thread->sched_mode |= TH_MODE_TIMESHARE;
657 new_thread->max_priority = parent_task->max_priority;
658 new_thread->task_priority = parent_task->priority;
659 new_thread->priority = (priority < 0)? parent_task->priority: priority;
660 if (new_thread->priority > new_thread->max_priority)
661 new_thread->priority = new_thread->max_priority;
662 new_thread->importance =
663 new_thread->priority - new_thread->task_priority;
664 new_thread->sched_stamp = sched_tick;
665 new_thread->pri_shift = new_thread->processor_set->pri_shift;
666 compute_priority(new_thread, FALSE);
667
668 new_thread->active = TRUE;
669
670 *out_thread = new_thread;
671
672 {
673 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
674
675 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
676
677 KERNEL_DEBUG_CONSTANT(
678 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
679 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
680
681 kdbg_trace_string(parent_task->bsd_info,
682 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
683
684 KERNEL_DEBUG_CONSTANT(
685 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
686 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
687 }
688
689 return (KERN_SUCCESS);
690 }
691
692 kern_return_t
693 thread_create(
694 task_t task,
695 thread_t *new_thread)
696 {
697 kern_return_t result;
698 thread_t thread;
699
700 if (task == TASK_NULL || task == kernel_task)
701 return (KERN_INVALID_ARGUMENT);
702
703 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
704 if (result != KERN_SUCCESS)
705 return (result);
706
707 thread->user_stop_count = 1;
708 thread_hold(thread);
709 if (task->suspend_count > 0)
710 thread_hold(thread);
711
712 pset_unlock(task->processor_set);
713 task_unlock(task);
714
715 *new_thread = thread;
716
717 return (KERN_SUCCESS);
718 }
719
720 kern_return_t
721 thread_create_running(
722 register task_t task,
723 int flavor,
724 thread_state_t new_state,
725 mach_msg_type_number_t new_state_count,
726 thread_t *new_thread)
727 {
728 register kern_return_t result;
729 thread_t thread;
730
731 if (task == TASK_NULL || task == kernel_task)
732 return (KERN_INVALID_ARGUMENT);
733
734 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
735 if (result != KERN_SUCCESS)
736 return (result);
737
738 result = machine_thread_set_state(
739 thread, flavor, new_state, new_state_count);
740 if (result != KERN_SUCCESS) {
741 pset_unlock(task->processor_set);
742 task_unlock(task);
743
744 thread_terminate(thread);
745 thread_deallocate(thread);
746 return (result);
747 }
748
749 thread_mtx_lock(thread);
750 clear_wait(thread, THREAD_AWAKENED);
751 thread->started = TRUE;
752 thread_mtx_unlock(thread);
753 pset_unlock(task->processor_set);
754 task_unlock(task);
755
756 *new_thread = thread;
757
758 return (result);
759 }
760
761 /*
762 * kernel_thread_create:
763 *
764 * Create a thread in the kernel task
765 * to execute in kernel context.
766 */
767 kern_return_t
768 kernel_thread_create(
769 thread_continue_t continuation,
770 void *parameter,
771 integer_t priority,
772 thread_t *new_thread)
773 {
774 kern_return_t result;
775 thread_t thread;
776 task_t task = kernel_task;
777
778 result = thread_create_internal(task, priority, continuation, &thread);
779 if (result != KERN_SUCCESS)
780 return (result);
781
782 pset_unlock(task->processor_set);
783 task_unlock(task);
784
785 stack_alloc(thread);
786 assert(thread->kernel_stack != 0);
787 thread->reserved_stack = thread->kernel_stack;
788
789 thread->parameter = parameter;
790
791 *new_thread = thread;
792
793 return (result);
794 }
795
796 kern_return_t
797 kernel_thread_start_priority(
798 thread_continue_t continuation,
799 void *parameter,
800 integer_t priority,
801 thread_t *new_thread)
802 {
803 kern_return_t result;
804 thread_t thread;
805
806 result = kernel_thread_create(continuation, parameter, priority, &thread);
807 if (result != KERN_SUCCESS)
808 return (result);
809
810 thread_mtx_lock(thread);
811 clear_wait(thread, THREAD_AWAKENED);
812 thread->started = TRUE;
813 thread_mtx_unlock(thread);
814
815 *new_thread = thread;
816
817 return (result);
818 }
819
820 kern_return_t
821 kernel_thread_start(
822 thread_continue_t continuation,
823 void *parameter,
824 thread_t *new_thread)
825 {
826 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
827 }
828
829 thread_t
830 kernel_thread(
831 task_t task,
832 void (*start)(void))
833 {
834 kern_return_t result;
835 thread_t thread;
836
837 if (task != kernel_task)
838 panic("kernel_thread");
839
840 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
841 if (result != KERN_SUCCESS)
842 return (THREAD_NULL);
843
844 thread_deallocate(thread);
845
846 return (thread);
847 }
848
849 kern_return_t
850 thread_info_internal(
851 register thread_t thread,
852 thread_flavor_t flavor,
853 thread_info_t thread_info_out, /* ptr to OUT array */
854 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
855 {
856 int state, flags;
857 spl_t s;
858
859 if (thread == THREAD_NULL)
860 return (KERN_INVALID_ARGUMENT);
861
862 if (flavor == THREAD_BASIC_INFO) {
863 register thread_basic_info_t basic_info;
864
865 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
866 return (KERN_INVALID_ARGUMENT);
867
868 basic_info = (thread_basic_info_t) thread_info_out;
869
870 s = splsched();
871 thread_lock(thread);
872
873 /* fill in info */
874
875 thread_read_times(thread, &basic_info->user_time,
876 &basic_info->system_time);
877
878 /*
879 * Update lazy-evaluated scheduler info because someone wants it.
880 */
881 if (thread->sched_stamp != sched_tick)
882 update_priority(thread);
883
884 basic_info->sleep_time = 0;
885
886 /*
887 * To calculate cpu_usage, first correct for timer rate,
888 * then for 5/8 ageing. The correction factor [3/5] is
889 * (1/(5/8) - 1).
890 */
891 basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
892 * TH_USAGE_SCALE) / sched_tick_interval;
893 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
894
895 if (basic_info->cpu_usage > TH_USAGE_SCALE)
896 basic_info->cpu_usage = TH_USAGE_SCALE;
897
898 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
899 POLICY_TIMESHARE: POLICY_RR);
900
901 flags = 0;
902 if (thread->state & TH_IDLE)
903 flags |= TH_FLAGS_IDLE;
904
905 if (!thread->kernel_stack)
906 flags |= TH_FLAGS_SWAPPED;
907
908 state = 0;
909 if (thread->state & TH_TERMINATE)
910 state = TH_STATE_HALTED;
911 else
912 if (thread->state & TH_RUN)
913 state = TH_STATE_RUNNING;
914 else
915 if (thread->state & TH_UNINT)
916 state = TH_STATE_UNINTERRUPTIBLE;
917 else
918 if (thread->state & TH_SUSP)
919 state = TH_STATE_STOPPED;
920 else
921 if (thread->state & TH_WAIT)
922 state = TH_STATE_WAITING;
923
924 basic_info->run_state = state;
925 basic_info->flags = flags;
926
927 basic_info->suspend_count = thread->user_stop_count;
928
929 thread_unlock(thread);
930 splx(s);
931
932 *thread_info_count = THREAD_BASIC_INFO_COUNT;
933
934 return (KERN_SUCCESS);
935 }
936 else
937 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
938 policy_timeshare_info_t ts_info;
939
940 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
941 return (KERN_INVALID_ARGUMENT);
942
943 ts_info = (policy_timeshare_info_t)thread_info_out;
944
945 s = splsched();
946 thread_lock(thread);
947
948 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
949 thread_unlock(thread);
950 splx(s);
951
952 return (KERN_INVALID_POLICY);
953 }
954
955 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
956 if (ts_info->depressed) {
957 ts_info->base_priority = DEPRESSPRI;
958 ts_info->depress_priority = thread->priority;
959 }
960 else {
961 ts_info->base_priority = thread->priority;
962 ts_info->depress_priority = -1;
963 }
964
965 ts_info->cur_priority = thread->sched_pri;
966 ts_info->max_priority = thread->max_priority;
967
968 thread_unlock(thread);
969 splx(s);
970
971 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
972
973 return (KERN_SUCCESS);
974 }
975 else
976 if (flavor == THREAD_SCHED_FIFO_INFO) {
977 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
978 return (KERN_INVALID_ARGUMENT);
979
980 return (KERN_INVALID_POLICY);
981 }
982 else
983 if (flavor == THREAD_SCHED_RR_INFO) {
984 policy_rr_info_t rr_info;
985
986 if (*thread_info_count < POLICY_RR_INFO_COUNT)
987 return (KERN_INVALID_ARGUMENT);
988
989 rr_info = (policy_rr_info_t) thread_info_out;
990
991 s = splsched();
992 thread_lock(thread);
993
994 if (thread->sched_mode & TH_MODE_TIMESHARE) {
995 thread_unlock(thread);
996 splx(s);
997
998 return (KERN_INVALID_POLICY);
999 }
1000
1001 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1002 if (rr_info->depressed) {
1003 rr_info->base_priority = DEPRESSPRI;
1004 rr_info->depress_priority = thread->priority;
1005 }
1006 else {
1007 rr_info->base_priority = thread->priority;
1008 rr_info->depress_priority = -1;
1009 }
1010
1011 rr_info->max_priority = thread->max_priority;
1012 rr_info->quantum = std_quantum_us / 1000;
1013
1014 thread_unlock(thread);
1015 splx(s);
1016
1017 *thread_info_count = POLICY_RR_INFO_COUNT;
1018
1019 return (KERN_SUCCESS);
1020 }
1021
1022 return (KERN_INVALID_ARGUMENT);
1023 }
1024
1025 void
1026 thread_read_times(
1027 thread_t thread,
1028 time_value_t *user_time,
1029 time_value_t *system_time)
1030 {
1031 absolutetime_to_microtime(
1032 timer_grab(&thread->user_timer),
1033 &user_time->seconds, &user_time->microseconds);
1034
1035 absolutetime_to_microtime(
1036 timer_grab(&thread->system_timer),
1037 &system_time->seconds, &system_time->microseconds);
1038 }
1039
1040 kern_return_t
1041 thread_assign(
1042 __unused thread_t thread,
1043 __unused processor_set_t new_pset)
1044 {
1045 return (KERN_FAILURE);
1046 }
1047
1048 /*
1049 * thread_assign_default:
1050 *
1051 * Special version of thread_assign for assigning threads to default
1052 * processor set.
1053 */
1054 kern_return_t
1055 thread_assign_default(
1056 thread_t thread)
1057 {
1058 return (thread_assign(thread, &default_pset));
1059 }
1060
1061 /*
1062 * thread_get_assignment
1063 *
1064 * Return current assignment for this thread.
1065 */
1066 kern_return_t
1067 thread_get_assignment(
1068 thread_t thread,
1069 processor_set_t *pset)
1070 {
1071 if (thread == NULL)
1072 return (KERN_INVALID_ARGUMENT);
1073
1074 *pset = thread->processor_set;
1075 pset_reference(*pset);
1076 return (KERN_SUCCESS);
1077 }
1078
1079 /*
1080 * thread_wire_internal:
1081 *
1082 * Specify that the target thread must always be able
1083 * to run and to allocate memory.
1084 */
1085 kern_return_t
1086 thread_wire_internal(
1087 host_priv_t host_priv,
1088 thread_t thread,
1089 boolean_t wired,
1090 boolean_t *prev_state)
1091 {
1092 if (host_priv == NULL || thread != current_thread())
1093 return (KERN_INVALID_ARGUMENT);
1094
1095 assert(host_priv == &realhost);
1096
1097 if (prev_state)
1098 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
1099
1100 if (wired) {
1101 if (!(thread->options & TH_OPT_VMPRIV))
1102 vm_page_free_reserve(1); /* XXX */
1103 thread->options |= TH_OPT_VMPRIV;
1104 }
1105 else {
1106 if (thread->options & TH_OPT_VMPRIV)
1107 vm_page_free_reserve(-1); /* XXX */
1108 thread->options &= ~TH_OPT_VMPRIV;
1109 }
1110
1111 return (KERN_SUCCESS);
1112 }
1113
1114
1115 /*
1116 * thread_wire:
1117 *
1118 * User-api wrapper for thread_wire_internal()
1119 */
1120 kern_return_t
1121 thread_wire(
1122 host_priv_t host_priv,
1123 thread_t thread,
1124 boolean_t wired)
1125 {
1126 return (thread_wire_internal(host_priv, thread, wired, NULL));
1127 }
1128
1129 int split_funnel_off = 0;
1130 lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1131 lck_grp_attr_t *funnel_lck_grp_attr;
1132 lck_attr_t *funnel_lck_attr;
1133
1134 funnel_t *
1135 funnel_alloc(
1136 int type)
1137 {
1138 lck_mtx_t *m;
1139 funnel_t *fnl;
1140
1141 if (funnel_lck_grp == LCK_GRP_NULL) {
1142 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1143
1144 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1145
1146 funnel_lck_attr = lck_attr_alloc_init();
1147 }
1148 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1149 bzero((void *)fnl, sizeof(funnel_t));
1150 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1151 kfree(fnl, sizeof(funnel_t));
1152 return(THR_FUNNEL_NULL);
1153 }
1154 fnl->fnl_mutex = m;
1155 fnl->fnl_type = type;
1156 }
1157 return(fnl);
1158 }
1159
1160 void
1161 funnel_free(
1162 funnel_t * fnl)
1163 {
1164 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1165 if (fnl->fnl_oldmutex)
1166 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1167 kfree(fnl, sizeof(funnel_t));
1168 }
1169
1170 void
1171 funnel_lock(
1172 funnel_t * fnl)
1173 {
1174 lck_mtx_lock(fnl->fnl_mutex);
1175 fnl->fnl_mtxholder = current_thread();
1176 }
1177
1178 void
1179 funnel_unlock(
1180 funnel_t * fnl)
1181 {
1182 lck_mtx_unlock(fnl->fnl_mutex);
1183 fnl->fnl_mtxrelease = current_thread();
1184 }
1185
1186 funnel_t *
1187 thread_funnel_get(
1188 void)
1189 {
1190 thread_t th = current_thread();
1191
1192 if (th->funnel_state & TH_FN_OWNED) {
1193 return(th->funnel_lock);
1194 }
1195 return(THR_FUNNEL_NULL);
1196 }
1197
1198 boolean_t
1199 thread_funnel_set(
1200 funnel_t * fnl,
1201 boolean_t funneled)
1202 {
1203 thread_t cur_thread;
1204 boolean_t funnel_state_prev;
1205 boolean_t intr;
1206
1207 cur_thread = current_thread();
1208 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1209
1210 if (funnel_state_prev != funneled) {
1211 intr = ml_set_interrupts_enabled(FALSE);
1212
1213 if (funneled == TRUE) {
1214 if (cur_thread->funnel_lock)
1215 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1216 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1217 fnl, 1, 0, 0, 0);
1218 funnel_lock(fnl);
1219 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1220 fnl, 1, 0, 0, 0);
1221 cur_thread->funnel_state |= TH_FN_OWNED;
1222 cur_thread->funnel_lock = fnl;
1223 } else {
1224 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1225 panic("Funnel unlock when not holding funnel");
1226 cur_thread->funnel_state &= ~TH_FN_OWNED;
1227 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1228 fnl, 1, 0, 0, 0);
1229
1230 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1231 funnel_unlock(fnl);
1232 }
1233 (void)ml_set_interrupts_enabled(intr);
1234 } else {
1235 /* if we are trying to acquire funnel recursively
1236 * check for funnel to be held already
1237 */
1238 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1239 panic("thread_funnel_set: already holding a different funnel");
1240 }
1241 }
1242 return(funnel_state_prev);
1243 }
1244
1245
1246 /*
1247 * Export routines to other components for things that are done as macros
1248 * within the osfmk component.
1249 */
1250
1251 #undef thread_reference
1252 void thread_reference(thread_t thread);
1253 void
1254 thread_reference(
1255 thread_t thread)
1256 {
1257 if (thread != THREAD_NULL)
1258 thread_reference_internal(thread);
1259 }
1260
1261 #undef thread_should_halt
1262
1263 boolean_t
1264 thread_should_halt(
1265 thread_t th)
1266 {
1267 return (thread_should_halt_fast(th));
1268 }