]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
ed6942ff81ef3f5779ba526ac47ccf436a21f2aa
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_FREE_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: kern/thread.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
63 * Date: 1986
64 *
65 * Thread management primitives implementation.
66 */
67 /*
68 * Copyright (c) 1993 The University of Utah and
69 * the Computer Systems Laboratory (CSL). All rights reserved.
70 *
71 * Permission to use, copy, modify and distribute this software and its
72 * documentation is hereby granted, provided that both the copyright
73 * notice and this permission notice appear in all copies of the
74 * software, derivative works or modified versions, and any portions
75 * thereof, and that both notices appear in supporting documentation.
76 *
77 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
78 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
79 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
80 *
81 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
82 * improvements that they make and grant CSL redistribution rights.
83 *
84 */
85
86 #include <mach_host.h>
87 #include <mach_prof.h>
88
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/policy.h>
92 #include <mach/thread_info.h>
93 #include <mach/thread_special_ports.h>
94 #include <mach/thread_status.h>
95 #include <mach/time_value.h>
96 #include <mach/vm_param.h>
97
98 #include <machine/thread.h>
99
100 #include <kern/kern_types.h>
101 #include <kern/kalloc.h>
102 #include <kern/cpu_data.h>
103 #include <kern/counters.h>
104 #include <kern/ipc_mig.h>
105 #include <kern/ipc_tt.h>
106 #include <kern/mach_param.h>
107 #include <kern/machine.h>
108 #include <kern/misc_protos.h>
109 #include <kern/processor.h>
110 #include <kern/queue.h>
111 #include <kern/sched.h>
112 #include <kern/sched_prim.h>
113 #include <kern/sync_lock.h>
114 #include <kern/syscall_subr.h>
115 #include <kern/task.h>
116 #include <kern/thread.h>
117 #include <kern/host.h>
118 #include <kern/zalloc.h>
119 #include <kern/profile.h>
120 #include <kern/assert.h>
121
122 #include <ipc/ipc_kmsg.h>
123 #include <ipc/ipc_port.h>
124
125 #include <vm/vm_kern.h>
126 #include <vm/vm_pageout.h>
127
128 #include <sys/kdebug.h>
129
130 /*
131 * Exported interfaces
132 */
133 #include <mach/task_server.h>
134 #include <mach/thread_act_server.h>
135 #include <mach/mach_host_server.h>
136 #include <mach/host_priv_server.h>
137
138 static struct zone *thread_zone;
139
140 decl_simple_lock_data(static,thread_stack_lock)
141 static queue_head_t thread_stack_queue;
142
143 decl_simple_lock_data(static,thread_terminate_lock)
144 static queue_head_t thread_terminate_queue;
145
146 static struct thread thread_template, init_thread;
147
148 #ifdef MACH_BSD
149 extern void proc_exit(void *);
150 #endif /* MACH_BSD */
151
152 void
153 thread_bootstrap(void)
154 {
155 /*
156 * Fill in a template thread for fast initialization.
157 */
158
159 thread_template.runq = RUN_QUEUE_NULL;
160
161 thread_template.ref_count = 2;
162
163 thread_template.reason = AST_NONE;
164 thread_template.at_safe_point = FALSE;
165 thread_template.wait_event = NO_EVENT64;
166 thread_template.wait_queue = WAIT_QUEUE_NULL;
167 thread_template.wait_result = THREAD_WAITING;
168 thread_template.options = THREAD_ABORTSAFE;
169 thread_template.state = TH_WAIT | TH_UNINT;
170 thread_template.wake_active = FALSE;
171 thread_template.continuation = THREAD_CONTINUE_NULL;
172 thread_template.parameter = NULL;
173
174 thread_template.importance = 0;
175 thread_template.sched_mode = 0;
176 thread_template.safe_mode = 0;
177 thread_template.safe_release = 0;
178
179 thread_template.priority = 0;
180 thread_template.sched_pri = 0;
181 thread_template.max_priority = 0;
182 thread_template.task_priority = 0;
183 thread_template.promotions = 0;
184 thread_template.pending_promoter_index = 0;
185 thread_template.pending_promoter[0] =
186 thread_template.pending_promoter[1] = NULL;
187
188 thread_template.realtime.deadline = UINT64_MAX;
189
190 thread_template.current_quantum = 0;
191
192 thread_template.computation_metered = 0;
193 thread_template.computation_epoch = 0;
194
195 thread_template.sched_stamp = 0;
196 thread_template.sched_usage = 0;
197 thread_template.pri_shift = INT8_MAX;
198 thread_template.cpu_usage = thread_template.cpu_delta = 0;
199
200 thread_template.bound_processor = PROCESSOR_NULL;
201 thread_template.last_processor = PROCESSOR_NULL;
202 thread_template.last_switch = 0;
203
204 timer_init(&thread_template.user_timer);
205 timer_init(&thread_template.system_timer);
206 thread_template.user_timer_save = 0;
207 thread_template.system_timer_save = 0;
208
209 thread_template.wait_timer_is_set = FALSE;
210 thread_template.wait_timer_active = 0;
211
212 thread_template.depress_timer_active = 0;
213
214 thread_template.processor_set = PROCESSOR_SET_NULL;
215
216 thread_template.special_handler.handler = special_handler;
217 thread_template.special_handler.next = 0;
218
219 #if MACH_HOST
220 thread_template.may_assign = TRUE;
221 thread_template.assign_active = FALSE;
222 #endif /* MACH_HOST */
223 thread_template.funnel_lock = THR_FUNNEL_NULL;
224 thread_template.funnel_state = 0;
225 thread_template.recover = (vm_offset_t)NULL;
226
227 init_thread = thread_template;
228 machine_set_current_thread(&init_thread);
229 }
230
231 void
232 thread_init(void)
233 {
234 thread_zone = zinit(
235 sizeof(struct thread),
236 THREAD_MAX * sizeof(struct thread),
237 THREAD_CHUNK * sizeof(struct thread),
238 "threads");
239
240 stack_init();
241
242 /*
243 * Initialize any machine-dependent
244 * per-thread structures necessary.
245 */
246 machine_thread_init();
247 }
248
249 static void
250 thread_terminate_continue(void)
251 {
252 panic("thread_terminate_continue");
253 /*NOTREACHED*/
254 }
255
256 /*
257 * thread_terminate_self:
258 */
259 void
260 thread_terminate_self(void)
261 {
262 thread_t thread = current_thread();
263 task_t task;
264 spl_t s;
265
266 s = splsched();
267 thread_lock(thread);
268
269 /*
270 * Cancel priority depression, reset scheduling parameters,
271 * and wait for concurrent expirations on other processors.
272 */
273 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
274 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
275
276 if (timer_call_cancel(&thread->depress_timer))
277 thread->depress_timer_active--;
278 }
279
280 thread_policy_reset(thread);
281
282 while (thread->depress_timer_active > 0) {
283 thread_unlock(thread);
284 splx(s);
285
286 delay(1);
287
288 s = splsched();
289 thread_lock(thread);
290 }
291
292 thread_unlock(thread);
293 splx(s);
294
295 thread_mtx_lock(thread);
296
297 ulock_release_all(thread);
298
299 ipc_thread_disable(thread);
300
301 thread_mtx_unlock(thread);
302
303 /*
304 * If we are the last thread to terminate and the task is
305 * associated with a BSD process, perform BSD process exit.
306 */
307 task = thread->task;
308 if ( hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
309 task->bsd_info != NULL )
310 proc_exit(task->bsd_info);
311
312 s = splsched();
313 thread_lock(thread);
314
315 /*
316 * Cancel wait timer, and wait for
317 * concurrent expirations.
318 */
319 if (thread->wait_timer_is_set) {
320 thread->wait_timer_is_set = FALSE;
321
322 if (timer_call_cancel(&thread->wait_timer))
323 thread->wait_timer_active--;
324 }
325
326 while (thread->wait_timer_active > 0) {
327 thread_unlock(thread);
328 splx(s);
329
330 delay(1);
331
332 s = splsched();
333 thread_lock(thread);
334 }
335
336 /*
337 * If there is a reserved stack, release it.
338 */
339 if (thread->reserved_stack != 0) {
340 if (thread->reserved_stack != thread->kernel_stack)
341 stack_free_stack(thread->reserved_stack);
342 thread->reserved_stack = 0;
343 }
344
345 /*
346 * Mark thread as terminating, and block.
347 */
348 thread->state |= TH_TERMINATE;
349 thread_mark_wait_locked(thread, THREAD_UNINT);
350 assert(thread->promotions == 0);
351 thread_unlock(thread);
352 /* splsched */
353
354 thread_block((thread_continue_t)thread_terminate_continue);
355 /*NOTREACHED*/
356 }
357
358 void
359 thread_deallocate(
360 thread_t thread)
361 {
362 processor_set_t pset;
363 task_t task;
364
365 if (thread == THREAD_NULL)
366 return;
367
368 if (thread_deallocate_internal(thread) > 0)
369 return;
370
371 ipc_thread_terminate(thread);
372
373 task = thread->task;
374
375 #ifdef MACH_BSD
376 {
377 void *ut = thread->uthread;
378
379 thread->uthread = NULL;
380 uthread_free(task, ut, task->bsd_info);
381 }
382 #endif /* MACH_BSD */
383
384 task_deallocate(task);
385
386 pset = thread->processor_set;
387 pset_deallocate(pset);
388
389 if (thread->kernel_stack != 0)
390 stack_free(thread);
391
392 machine_thread_destroy(thread);
393
394 zfree(thread_zone, thread);
395 }
396
397 /*
398 * thread_terminate_daemon:
399 *
400 * Perform final clean up for terminating threads.
401 */
402 static void
403 thread_terminate_daemon(void)
404 {
405 thread_t thread;
406 task_t task;
407 processor_set_t pset;
408
409 (void)splsched();
410 simple_lock(&thread_terminate_lock);
411
412 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
413 simple_unlock(&thread_terminate_lock);
414 (void)spllo();
415
416 task = thread->task;
417
418 task_lock(task);
419 task->total_user_time += timer_grab(&thread->user_timer);
420 task->total_system_time += timer_grab(&thread->system_timer);
421
422 queue_remove(&task->threads, thread, thread_t, task_threads);
423 task->thread_count--;
424 task_unlock(task);
425
426 pset = thread->processor_set;
427
428 pset_lock(pset);
429 pset_remove_thread(pset, thread);
430 pset_unlock(pset);
431
432 thread_deallocate(thread);
433
434 (void)splsched();
435 simple_lock(&thread_terminate_lock);
436 }
437
438 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
439 simple_unlock(&thread_terminate_lock);
440 /* splsched */
441
442 thread_block((thread_continue_t)thread_terminate_daemon);
443 /*NOTREACHED*/
444 }
445
446 /*
447 * thread_terminate_enqueue:
448 *
449 * Enqueue a terminating thread for final disposition.
450 *
451 * Called at splsched.
452 */
453 void
454 thread_terminate_enqueue(
455 thread_t thread)
456 {
457 simple_lock(&thread_terminate_lock);
458 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
459 simple_unlock(&thread_terminate_lock);
460
461 thread_wakeup((event_t)&thread_terminate_queue);
462 }
463
464 /*
465 * thread_stack_daemon:
466 *
467 * Perform stack allocation as required due to
468 * invoke failures.
469 */
470 static void
471 thread_stack_daemon(void)
472 {
473 thread_t thread;
474
475 (void)splsched();
476 simple_lock(&thread_stack_lock);
477
478 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
479 simple_unlock(&thread_stack_lock);
480 /* splsched */
481
482 stack_alloc(thread);
483
484 thread_lock(thread);
485 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
486 thread_unlock(thread);
487 (void)spllo();
488
489 (void)splsched();
490 simple_lock(&thread_stack_lock);
491 }
492
493 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
494 simple_unlock(&thread_stack_lock);
495 /* splsched */
496
497 thread_block((thread_continue_t)thread_stack_daemon);
498 /*NOTREACHED*/
499 }
500
501 /*
502 * thread_stack_enqueue:
503 *
504 * Enqueue a thread for stack allocation.
505 *
506 * Called at splsched.
507 */
508 void
509 thread_stack_enqueue(
510 thread_t thread)
511 {
512 simple_lock(&thread_stack_lock);
513 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
514 simple_unlock(&thread_stack_lock);
515
516 thread_wakeup((event_t)&thread_stack_queue);
517 }
518
519 void
520 thread_daemon_init(void)
521 {
522 kern_return_t result;
523 thread_t thread;
524
525 simple_lock_init(&thread_terminate_lock, 0);
526 queue_init(&thread_terminate_queue);
527
528 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
529 if (result != KERN_SUCCESS)
530 panic("thread_daemon_init: thread_terminate_daemon");
531
532 thread_deallocate(thread);
533
534 simple_lock_init(&thread_stack_lock, 0);
535 queue_init(&thread_stack_queue);
536
537 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
538 if (result != KERN_SUCCESS)
539 panic("thread_daemon_init: thread_stack_daemon");
540
541 thread_deallocate(thread);
542 }
543
544 /*
545 * Create a new thread.
546 * Doesn't start the thread running.
547 */
548 static kern_return_t
549 thread_create_internal(
550 task_t parent_task,
551 integer_t priority,
552 thread_continue_t continuation,
553 thread_t *out_thread)
554 {
555 thread_t new_thread;
556 processor_set_t pset;
557 static thread_t first_thread;
558
559 /*
560 * Allocate a thread and initialize static fields
561 */
562 if (first_thread == NULL)
563 new_thread = first_thread = current_thread();
564 else
565 new_thread = (thread_t)zalloc(thread_zone);
566 if (new_thread == NULL)
567 return (KERN_RESOURCE_SHORTAGE);
568
569 if (new_thread != first_thread)
570 *new_thread = thread_template;
571
572 #ifdef MACH_BSD
573 {
574 new_thread->uthread = uthread_alloc(parent_task, new_thread);
575 if (new_thread->uthread == NULL) {
576 zfree(thread_zone, new_thread);
577 return (KERN_RESOURCE_SHORTAGE);
578 }
579 }
580 #endif /* MACH_BSD */
581
582 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
583 #ifdef MACH_BSD
584 {
585 void *ut = new_thread->uthread;
586
587 new_thread->uthread = NULL;
588 uthread_free(parent_task, ut, parent_task->bsd_info);
589 }
590 #endif /* MACH_BSD */
591 zfree(thread_zone, new_thread);
592 return (KERN_FAILURE);
593 }
594
595 new_thread->task = parent_task;
596
597 thread_lock_init(new_thread);
598 wake_lock_init(new_thread);
599
600 mutex_init(&new_thread->mutex, 0);
601
602 ipc_thread_init(new_thread);
603 queue_init(&new_thread->held_ulocks);
604 thread_prof_init(new_thread, parent_task);
605
606 new_thread->continuation = continuation;
607
608 pset = parent_task->processor_set;
609 assert(pset == &default_pset);
610 pset_lock(pset);
611
612 task_lock(parent_task);
613 assert(parent_task->processor_set == pset);
614
615 if ( !parent_task->active ||
616 (parent_task->thread_count >= THREAD_MAX &&
617 parent_task != kernel_task)) {
618 task_unlock(parent_task);
619 pset_unlock(pset);
620
621 #ifdef MACH_BSD
622 {
623 void *ut = new_thread->uthread;
624
625 new_thread->uthread = NULL;
626 uthread_free(parent_task, ut, parent_task->bsd_info);
627 }
628 #endif /* MACH_BSD */
629 ipc_thread_disable(new_thread);
630 ipc_thread_terminate(new_thread);
631 machine_thread_destroy(new_thread);
632 zfree(thread_zone, new_thread);
633 return (KERN_FAILURE);
634 }
635
636 task_reference_internal(parent_task);
637
638 /* Cache the task's map */
639 new_thread->map = parent_task->map;
640
641 /* Chain the thread onto the task's list */
642 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
643 parent_task->thread_count++;
644
645 /* So terminating threads don't need to take the task lock to decrement */
646 hw_atomic_add(&parent_task->active_thread_count, 1);
647
648 /* Associate the thread with the processor set */
649 pset_add_thread(pset, new_thread);
650
651 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
652 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
653
654 /* Set the thread's scheduling parameters */
655 if (parent_task != kernel_task)
656 new_thread->sched_mode |= TH_MODE_TIMESHARE;
657 new_thread->max_priority = parent_task->max_priority;
658 new_thread->task_priority = parent_task->priority;
659 new_thread->priority = (priority < 0)? parent_task->priority: priority;
660 if (new_thread->priority > new_thread->max_priority)
661 new_thread->priority = new_thread->max_priority;
662 new_thread->importance =
663 new_thread->priority - new_thread->task_priority;
664 new_thread->sched_stamp = sched_tick;
665 new_thread->pri_shift = new_thread->processor_set->pri_shift;
666 compute_priority(new_thread, FALSE);
667
668 new_thread->active = TRUE;
669
670 *out_thread = new_thread;
671
672 {
673 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
674
675 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
676
677 KERNEL_DEBUG_CONSTANT(
678 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
679 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
680
681 kdbg_trace_string(parent_task->bsd_info,
682 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
683
684 KERNEL_DEBUG_CONSTANT(
685 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
686 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
687 }
688
689 return (KERN_SUCCESS);
690 }
691
692 kern_return_t
693 thread_create(
694 task_t task,
695 thread_t *new_thread)
696 {
697 kern_return_t result;
698 thread_t thread;
699
700 if (task == TASK_NULL || task == kernel_task)
701 return (KERN_INVALID_ARGUMENT);
702
703 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
704 if (result != KERN_SUCCESS)
705 return (result);
706
707 thread->user_stop_count = 1;
708 thread_hold(thread);
709 if (task->suspend_count > 0)
710 thread_hold(thread);
711
712 pset_unlock(task->processor_set);
713 task_unlock(task);
714
715 *new_thread = thread;
716
717 return (KERN_SUCCESS);
718 }
719
720 kern_return_t
721 thread_create_running(
722 register task_t task,
723 int flavor,
724 thread_state_t new_state,
725 mach_msg_type_number_t new_state_count,
726 thread_t *new_thread)
727 {
728 register kern_return_t result;
729 thread_t thread;
730
731 if (task == TASK_NULL || task == kernel_task)
732 return (KERN_INVALID_ARGUMENT);
733
734 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
735 if (result != KERN_SUCCESS)
736 return (result);
737
738 result = machine_thread_set_state(
739 thread, flavor, new_state, new_state_count);
740 if (result != KERN_SUCCESS) {
741 pset_unlock(task->processor_set);
742 task_unlock(task);
743
744 thread_terminate(thread);
745 thread_deallocate(thread);
746 return (result);
747 }
748
749 thread_mtx_lock(thread);
750 clear_wait(thread, THREAD_AWAKENED);
751 thread->started = TRUE;
752 thread_mtx_unlock(thread);
753 pset_unlock(task->processor_set);
754 task_unlock(task);
755
756 *new_thread = thread;
757
758 return (result);
759 }
760
761 /*
762 * kernel_thread_create:
763 *
764 * Create a thread in the kernel task
765 * to execute in kernel context.
766 */
767 kern_return_t
768 kernel_thread_create(
769 thread_continue_t continuation,
770 void *parameter,
771 integer_t priority,
772 thread_t *new_thread)
773 {
774 kern_return_t result;
775 thread_t thread;
776 task_t task = kernel_task;
777
778 result = thread_create_internal(task, priority, continuation, &thread);
779 if (result != KERN_SUCCESS)
780 return (result);
781
782 pset_unlock(task->processor_set);
783 task_unlock(task);
784
785 #if !defined(i386)
786 stack_alloc(thread);
787 assert(thread->kernel_stack != 0);
788 thread->reserved_stack = thread->kernel_stack;
789 #endif /* !defined(i386) */
790
791 thread->parameter = parameter;
792
793 *new_thread = thread;
794
795 return (result);
796 }
797
798 kern_return_t
799 kernel_thread_start_priority(
800 thread_continue_t continuation,
801 void *parameter,
802 integer_t priority,
803 thread_t *new_thread)
804 {
805 kern_return_t result;
806 thread_t thread;
807
808 result = kernel_thread_create(continuation, parameter, priority, &thread);
809 if (result != KERN_SUCCESS)
810 return (result);
811
812 thread_mtx_lock(thread);
813 clear_wait(thread, THREAD_AWAKENED);
814 thread->started = TRUE;
815 thread_mtx_unlock(thread);
816
817 *new_thread = thread;
818
819 return (result);
820 }
821
822 kern_return_t
823 kernel_thread_start(
824 thread_continue_t continuation,
825 void *parameter,
826 thread_t *new_thread)
827 {
828 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
829 }
830
831 thread_t
832 kernel_thread(
833 task_t task,
834 void (*start)(void))
835 {
836 kern_return_t result;
837 thread_t thread;
838
839 if (task != kernel_task)
840 panic("kernel_thread");
841
842 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
843 if (result != KERN_SUCCESS)
844 return (THREAD_NULL);
845
846 thread_deallocate(thread);
847
848 return (thread);
849 }
850
851 kern_return_t
852 thread_info_internal(
853 register thread_t thread,
854 thread_flavor_t flavor,
855 thread_info_t thread_info_out, /* ptr to OUT array */
856 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
857 {
858 int state, flags;
859 spl_t s;
860
861 if (thread == THREAD_NULL)
862 return (KERN_INVALID_ARGUMENT);
863
864 if (flavor == THREAD_BASIC_INFO) {
865 register thread_basic_info_t basic_info;
866
867 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
868 return (KERN_INVALID_ARGUMENT);
869
870 basic_info = (thread_basic_info_t) thread_info_out;
871
872 s = splsched();
873 thread_lock(thread);
874
875 /* fill in info */
876
877 thread_read_times(thread, &basic_info->user_time,
878 &basic_info->system_time);
879
880 /*
881 * Update lazy-evaluated scheduler info because someone wants it.
882 */
883 if (thread->sched_stamp != sched_tick)
884 update_priority(thread);
885
886 basic_info->sleep_time = 0;
887
888 /*
889 * To calculate cpu_usage, first correct for timer rate,
890 * then for 5/8 ageing. The correction factor [3/5] is
891 * (1/(5/8) - 1).
892 */
893 basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
894 * TH_USAGE_SCALE) / sched_tick_interval;
895 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
896
897 if (basic_info->cpu_usage > TH_USAGE_SCALE)
898 basic_info->cpu_usage = TH_USAGE_SCALE;
899
900 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
901 POLICY_TIMESHARE: POLICY_RR);
902
903 flags = 0;
904 if (thread->state & TH_IDLE)
905 flags |= TH_FLAGS_IDLE;
906
907 if (!thread->kernel_stack)
908 flags |= TH_FLAGS_SWAPPED;
909
910 state = 0;
911 if (thread->state & TH_TERMINATE)
912 state = TH_STATE_HALTED;
913 else
914 if (thread->state & TH_RUN)
915 state = TH_STATE_RUNNING;
916 else
917 if (thread->state & TH_UNINT)
918 state = TH_STATE_UNINTERRUPTIBLE;
919 else
920 if (thread->state & TH_SUSP)
921 state = TH_STATE_STOPPED;
922 else
923 if (thread->state & TH_WAIT)
924 state = TH_STATE_WAITING;
925
926 basic_info->run_state = state;
927 basic_info->flags = flags;
928
929 basic_info->suspend_count = thread->user_stop_count;
930
931 thread_unlock(thread);
932 splx(s);
933
934 *thread_info_count = THREAD_BASIC_INFO_COUNT;
935
936 return (KERN_SUCCESS);
937 }
938 else
939 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
940 policy_timeshare_info_t ts_info;
941
942 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
943 return (KERN_INVALID_ARGUMENT);
944
945 ts_info = (policy_timeshare_info_t)thread_info_out;
946
947 s = splsched();
948 thread_lock(thread);
949
950 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
951 thread_unlock(thread);
952 splx(s);
953
954 return (KERN_INVALID_POLICY);
955 }
956
957 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
958 if (ts_info->depressed) {
959 ts_info->base_priority = DEPRESSPRI;
960 ts_info->depress_priority = thread->priority;
961 }
962 else {
963 ts_info->base_priority = thread->priority;
964 ts_info->depress_priority = -1;
965 }
966
967 ts_info->cur_priority = thread->sched_pri;
968 ts_info->max_priority = thread->max_priority;
969
970 thread_unlock(thread);
971 splx(s);
972
973 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
974
975 return (KERN_SUCCESS);
976 }
977 else
978 if (flavor == THREAD_SCHED_FIFO_INFO) {
979 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
980 return (KERN_INVALID_ARGUMENT);
981
982 return (KERN_INVALID_POLICY);
983 }
984 else
985 if (flavor == THREAD_SCHED_RR_INFO) {
986 policy_rr_info_t rr_info;
987
988 if (*thread_info_count < POLICY_RR_INFO_COUNT)
989 return (KERN_INVALID_ARGUMENT);
990
991 rr_info = (policy_rr_info_t) thread_info_out;
992
993 s = splsched();
994 thread_lock(thread);
995
996 if (thread->sched_mode & TH_MODE_TIMESHARE) {
997 thread_unlock(thread);
998 splx(s);
999
1000 return (KERN_INVALID_POLICY);
1001 }
1002
1003 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1004 if (rr_info->depressed) {
1005 rr_info->base_priority = DEPRESSPRI;
1006 rr_info->depress_priority = thread->priority;
1007 }
1008 else {
1009 rr_info->base_priority = thread->priority;
1010 rr_info->depress_priority = -1;
1011 }
1012
1013 rr_info->max_priority = thread->max_priority;
1014 rr_info->quantum = std_quantum_us / 1000;
1015
1016 thread_unlock(thread);
1017 splx(s);
1018
1019 *thread_info_count = POLICY_RR_INFO_COUNT;
1020
1021 return (KERN_SUCCESS);
1022 }
1023
1024 return (KERN_INVALID_ARGUMENT);
1025 }
1026
1027 void
1028 thread_read_times(
1029 thread_t thread,
1030 time_value_t *user_time,
1031 time_value_t *system_time)
1032 {
1033 absolutetime_to_microtime(
1034 timer_grab(&thread->user_timer),
1035 &user_time->seconds, &user_time->microseconds);
1036
1037 absolutetime_to_microtime(
1038 timer_grab(&thread->system_timer),
1039 &system_time->seconds, &system_time->microseconds);
1040 }
1041
1042 kern_return_t
1043 thread_assign(
1044 __unused thread_t thread,
1045 __unused processor_set_t new_pset)
1046 {
1047 return (KERN_FAILURE);
1048 }
1049
1050 /*
1051 * thread_assign_default:
1052 *
1053 * Special version of thread_assign for assigning threads to default
1054 * processor set.
1055 */
1056 kern_return_t
1057 thread_assign_default(
1058 thread_t thread)
1059 {
1060 return (thread_assign(thread, &default_pset));
1061 }
1062
1063 /*
1064 * thread_get_assignment
1065 *
1066 * Return current assignment for this thread.
1067 */
1068 kern_return_t
1069 thread_get_assignment(
1070 thread_t thread,
1071 processor_set_t *pset)
1072 {
1073 if (thread == NULL)
1074 return (KERN_INVALID_ARGUMENT);
1075
1076 *pset = thread->processor_set;
1077 pset_reference(*pset);
1078 return (KERN_SUCCESS);
1079 }
1080
1081 /*
1082 * thread_wire_internal:
1083 *
1084 * Specify that the target thread must always be able
1085 * to run and to allocate memory.
1086 */
1087 kern_return_t
1088 thread_wire_internal(
1089 host_priv_t host_priv,
1090 thread_t thread,
1091 boolean_t wired,
1092 boolean_t *prev_state)
1093 {
1094 if (host_priv == NULL || thread != current_thread())
1095 return (KERN_INVALID_ARGUMENT);
1096
1097 assert(host_priv == &realhost);
1098
1099 if (prev_state)
1100 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
1101
1102 if (wired) {
1103 if (!(thread->options & TH_OPT_VMPRIV))
1104 vm_page_free_reserve(1); /* XXX */
1105 thread->options |= TH_OPT_VMPRIV;
1106 }
1107 else {
1108 if (thread->options & TH_OPT_VMPRIV)
1109 vm_page_free_reserve(-1); /* XXX */
1110 thread->options &= ~TH_OPT_VMPRIV;
1111 }
1112
1113 return (KERN_SUCCESS);
1114 }
1115
1116
1117 /*
1118 * thread_wire:
1119 *
1120 * User-api wrapper for thread_wire_internal()
1121 */
1122 kern_return_t
1123 thread_wire(
1124 host_priv_t host_priv,
1125 thread_t thread,
1126 boolean_t wired)
1127 {
1128 return (thread_wire_internal(host_priv, thread, wired, NULL));
1129 }
1130
1131 int split_funnel_off = 0;
1132 lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1133 lck_grp_attr_t *funnel_lck_grp_attr;
1134 lck_attr_t *funnel_lck_attr;
1135
1136 funnel_t *
1137 funnel_alloc(
1138 int type)
1139 {
1140 lck_mtx_t *m;
1141 funnel_t *fnl;
1142
1143 if (funnel_lck_grp == LCK_GRP_NULL) {
1144 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1145 //lck_grp_attr_setstat(funnel_lck_grp_attr);
1146
1147 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1148
1149 funnel_lck_attr = lck_attr_alloc_init();
1150 //lck_attr_setdebug(funnel_lck_attr);
1151 }
1152 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1153 bzero((void *)fnl, sizeof(funnel_t));
1154 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1155 kfree(fnl, sizeof(funnel_t));
1156 return(THR_FUNNEL_NULL);
1157 }
1158 fnl->fnl_mutex = m;
1159 fnl->fnl_type = type;
1160 }
1161 return(fnl);
1162 }
1163
1164 void
1165 funnel_free(
1166 funnel_t * fnl)
1167 {
1168 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1169 if (fnl->fnl_oldmutex)
1170 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1171 kfree(fnl, sizeof(funnel_t));
1172 }
1173
1174 void
1175 funnel_lock(
1176 funnel_t * fnl)
1177 {
1178 lck_mtx_lock(fnl->fnl_mutex);
1179 fnl->fnl_mtxholder = current_thread();
1180 }
1181
1182 void
1183 funnel_unlock(
1184 funnel_t * fnl)
1185 {
1186 lck_mtx_unlock(fnl->fnl_mutex);
1187 fnl->fnl_mtxrelease = current_thread();
1188 }
1189
1190 funnel_t *
1191 thread_funnel_get(
1192 void)
1193 {
1194 thread_t th = current_thread();
1195
1196 if (th->funnel_state & TH_FN_OWNED) {
1197 return(th->funnel_lock);
1198 }
1199 return(THR_FUNNEL_NULL);
1200 }
1201
1202 boolean_t
1203 thread_funnel_set(
1204 funnel_t * fnl,
1205 boolean_t funneled)
1206 {
1207 thread_t cur_thread;
1208 boolean_t funnel_state_prev;
1209 boolean_t intr;
1210
1211 cur_thread = current_thread();
1212 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1213
1214 if (funnel_state_prev != funneled) {
1215 intr = ml_set_interrupts_enabled(FALSE);
1216
1217 if (funneled == TRUE) {
1218 if (cur_thread->funnel_lock)
1219 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1220 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1221 fnl, 1, 0, 0, 0);
1222 funnel_lock(fnl);
1223 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1224 fnl, 1, 0, 0, 0);
1225 cur_thread->funnel_state |= TH_FN_OWNED;
1226 cur_thread->funnel_lock = fnl;
1227 } else {
1228 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1229 panic("Funnel unlock when not holding funnel");
1230 cur_thread->funnel_state &= ~TH_FN_OWNED;
1231 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1232 fnl, 1, 0, 0, 0);
1233
1234 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1235 funnel_unlock(fnl);
1236 }
1237 (void)ml_set_interrupts_enabled(intr);
1238 } else {
1239 /* if we are trying to acquire funnel recursively
1240 * check for funnel to be held already
1241 */
1242 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1243 panic("thread_funnel_set: already holding a different funnel");
1244 }
1245 }
1246 return(funnel_state_prev);
1247 }
1248
1249
1250 /*
1251 * Export routines to other components for things that are done as macros
1252 * within the osfmk component.
1253 */
1254
1255 #undef thread_reference
1256 void thread_reference(thread_t thread);
1257 void
1258 thread_reference(
1259 thread_t thread)
1260 {
1261 if (thread != THREAD_NULL)
1262 thread_reference_internal(thread);
1263 }
1264
1265 #undef thread_should_halt
1266
1267 boolean_t
1268 thread_should_halt(
1269 thread_t th)
1270 {
1271 return (thread_should_halt_fast(th));
1272 }