]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_FREE_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: kern/thread.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
55 * Date: 1986
56 *
91447636 57 * Thread management primitives implementation.
1c79356b
A
58 */
59/*
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
62 *
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
68 *
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
72 *
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
75 *
76 */
77
1c79356b 78#include <mach_host.h>
1c79356b 79#include <mach_prof.h>
1c79356b 80
91447636 81#include <mach/mach_types.h>
1c79356b
A
82#include <mach/boolean.h>
83#include <mach/policy.h>
84#include <mach/thread_info.h>
85#include <mach/thread_special_ports.h>
86#include <mach/thread_status.h>
87#include <mach/time_value.h>
88#include <mach/vm_param.h>
91447636
A
89
90#include <machine/thread.h>
91
92#include <kern/kern_types.h>
93#include <kern/kalloc.h>
1c79356b
A
94#include <kern/cpu_data.h>
95#include <kern/counters.h>
1c79356b
A
96#include <kern/ipc_mig.h>
97#include <kern/ipc_tt.h>
98#include <kern/mach_param.h>
99#include <kern/machine.h>
100#include <kern/misc_protos.h>
101#include <kern/processor.h>
102#include <kern/queue.h>
103#include <kern/sched.h>
104#include <kern/sched_prim.h>
91447636
A
105#include <kern/sync_lock.h>
106#include <kern/syscall_subr.h>
1c79356b
A
107#include <kern/task.h>
108#include <kern/thread.h>
1c79356b
A
109#include <kern/host.h>
110#include <kern/zalloc.h>
1c79356b
A
111#include <kern/profile.h>
112#include <kern/assert.h>
91447636
A
113
114#include <ipc/ipc_kmsg.h>
115#include <ipc/ipc_port.h>
116
117#include <vm/vm_kern.h>
118#include <vm/vm_pageout.h>
119
1c79356b
A
120#include <sys/kdebug.h>
121
122/*
123 * Exported interfaces
124 */
91447636 125#include <mach/task_server.h>
1c79356b
A
126#include <mach/thread_act_server.h>
127#include <mach/mach_host_server.h>
91447636 128#include <mach/host_priv_server.h>
1c79356b 129
55e303ae 130static struct zone *thread_zone;
1c79356b 131
91447636
A
132decl_simple_lock_data(static,thread_stack_lock)
133static queue_head_t thread_stack_queue;
1c79356b 134
91447636
A
135decl_simple_lock_data(static,thread_terminate_lock)
136static queue_head_t thread_terminate_queue;
1c79356b 137
55e303ae 138static struct thread thread_template, init_thread;
1c79356b 139
91447636
A
140#ifdef MACH_BSD
141extern void proc_exit(void *);
142#endif /* MACH_BSD */
55e303ae 143
91447636
A
144void
145thread_bootstrap(void)
146{
147 /*
148 * Fill in a template thread for fast initialization.
149 */
1c79356b 150
91447636 151 thread_template.runq = RUN_QUEUE_NULL;
1c79356b 152
91447636 153 thread_template.ref_count = 2;
55e303ae 154
91447636
A
155 thread_template.reason = AST_NONE;
156 thread_template.at_safe_point = FALSE;
157 thread_template.wait_event = NO_EVENT64;
158 thread_template.wait_queue = WAIT_QUEUE_NULL;
159 thread_template.wait_result = THREAD_WAITING;
160 thread_template.options = THREAD_ABORTSAFE;
161 thread_template.state = TH_WAIT | TH_UNINT;
162 thread_template.wake_active = FALSE;
163 thread_template.continuation = THREAD_CONTINUE_NULL;
164 thread_template.parameter = NULL;
1c79356b 165
91447636
A
166 thread_template.importance = 0;
167 thread_template.sched_mode = 0;
168 thread_template.safe_mode = 0;
169 thread_template.safe_release = 0;
0b4e3aa0 170
91447636
A
171 thread_template.priority = 0;
172 thread_template.sched_pri = 0;
173 thread_template.max_priority = 0;
174 thread_template.task_priority = 0;
175 thread_template.promotions = 0;
176 thread_template.pending_promoter_index = 0;
177 thread_template.pending_promoter[0] =
178 thread_template.pending_promoter[1] = NULL;
1c79356b 179
91447636 180 thread_template.realtime.deadline = UINT64_MAX;
1c79356b 181
91447636 182 thread_template.current_quantum = 0;
1c79356b 183
91447636
A
184 thread_template.computation_metered = 0;
185 thread_template.computation_epoch = 0;
1c79356b 186
91447636
A
187 thread_template.sched_stamp = 0;
188 thread_template.sched_usage = 0;
189 thread_template.pri_shift = INT8_MAX;
190 thread_template.cpu_usage = thread_template.cpu_delta = 0;
0b4e3aa0 191
91447636
A
192 thread_template.bound_processor = PROCESSOR_NULL;
193 thread_template.last_processor = PROCESSOR_NULL;
194 thread_template.last_switch = 0;
1c79356b 195
91447636
A
196 timer_init(&thread_template.user_timer);
197 timer_init(&thread_template.system_timer);
198 thread_template.user_timer_save = 0;
199 thread_template.system_timer_save = 0;
1c79356b 200
91447636
A
201 thread_template.wait_timer_is_set = FALSE;
202 thread_template.wait_timer_active = 0;
1c79356b 203
91447636 204 thread_template.depress_timer_active = 0;
0b4e3aa0 205
91447636 206 thread_template.processor_set = PROCESSOR_SET_NULL;
55e303ae 207
91447636
A
208 thread_template.special_handler.handler = special_handler;
209 thread_template.special_handler.next = 0;
55e303ae 210
91447636
A
211#if MACH_HOST
212 thread_template.may_assign = TRUE;
213 thread_template.assign_active = FALSE;
214#endif /* MACH_HOST */
215 thread_template.funnel_lock = THR_FUNNEL_NULL;
216 thread_template.funnel_state = 0;
217 thread_template.recover = (vm_offset_t)NULL;
55e303ae 218
91447636
A
219 init_thread = thread_template;
220 machine_set_current_thread(&init_thread);
1c79356b
A
221}
222
55e303ae 223void
91447636 224thread_init(void)
0b4e3aa0 225{
91447636
A
226 thread_zone = zinit(
227 sizeof(struct thread),
228 THREAD_MAX * sizeof(struct thread),
229 THREAD_CHUNK * sizeof(struct thread),
230 "threads");
55e303ae 231
91447636 232 stack_init();
55e303ae 233
91447636
A
234 /*
235 * Initialize any machine-dependent
236 * per-thread structures necessary.
237 */
238 machine_thread_init();
239}
0b4e3aa0 240
91447636
A
241static void
242thread_terminate_continue(void)
243{
244 panic("thread_terminate_continue");
245 /*NOTREACHED*/
0b4e3aa0
A
246}
247
1c79356b 248/*
91447636 249 * thread_terminate_self:
1c79356b 250 */
1c79356b 251void
91447636 252thread_terminate_self(void)
1c79356b 253{
91447636
A
254 thread_t thread = current_thread();
255 task_t task;
256 spl_t s;
55e303ae 257
91447636
A
258 s = splsched();
259 thread_lock(thread);
1c79356b 260
91447636
A
261 /*
262 * Cancel priority depression, reset scheduling parameters,
263 * and wait for concurrent expirations on other processors.
264 */
265 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
266 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
1c79356b 267
91447636
A
268 if (timer_call_cancel(&thread->depress_timer))
269 thread->depress_timer_active--;
1c79356b 270 }
1c79356b 271
91447636 272 thread_policy_reset(thread);
55e303ae 273
91447636
A
274 while (thread->depress_timer_active > 0) {
275 thread_unlock(thread);
276 splx(s);
55e303ae 277
91447636 278 delay(1);
55e303ae 279
91447636
A
280 s = splsched();
281 thread_lock(thread);
55e303ae
A
282 }
283
91447636
A
284 thread_unlock(thread);
285 splx(s);
55e303ae 286
91447636 287 thread_mtx_lock(thread);
55e303ae 288
91447636 289 ulock_release_all(thread);
55e303ae 290
91447636
A
291 ipc_thread_disable(thread);
292
293 thread_mtx_unlock(thread);
55e303ae 294
91447636
A
295 /*
296 * If we are the last thread to terminate and the task is
297 * associated with a BSD process, perform BSD process exit.
298 */
299 task = thread->task;
300 if ( hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
301 task->bsd_info != NULL )
302 proc_exit(task->bsd_info);
1c79356b 303
91447636
A
304 s = splsched();
305 thread_lock(thread);
1c79356b 306
91447636
A
307 /*
308 * Cancel wait timer, and wait for
309 * concurrent expirations.
310 */
311 if (thread->wait_timer_is_set) {
312 thread->wait_timer_is_set = FALSE;
1c79356b 313
91447636
A
314 if (timer_call_cancel(&thread->wait_timer))
315 thread->wait_timer_active--;
316 }
1c79356b 317
91447636
A
318 while (thread->wait_timer_active > 0) {
319 thread_unlock(thread);
320 splx(s);
0b4e3aa0 321
91447636 322 delay(1);
1c79356b 323
91447636
A
324 s = splsched();
325 thread_lock(thread);
326 }
1c79356b 327
91447636
A
328 /*
329 * If there is a reserved stack, release it.
330 */
331 if (thread->reserved_stack != 0) {
332 if (thread->reserved_stack != thread->kernel_stack)
333 stack_free_stack(thread->reserved_stack);
334 thread->reserved_stack = 0;
335 }
1c79356b 336
91447636
A
337 /*
338 * Mark thread as terminating, and block.
339 */
340 thread->state |= TH_TERMINATE;
341 thread_mark_wait_locked(thread, THREAD_UNINT);
342 assert(thread->promotions == 0);
343 thread_unlock(thread);
344 /* splsched */
1c79356b 345
91447636
A
346 thread_block((thread_continue_t)thread_terminate_continue);
347 /*NOTREACHED*/
55e303ae
A
348}
349
350void
91447636
A
351thread_deallocate(
352 thread_t thread)
1c79356b 353{
91447636
A
354 processor_set_t pset;
355 task_t task;
1c79356b 356
91447636
A
357 if (thread == THREAD_NULL)
358 return;
1c79356b 359
91447636
A
360 if (thread_deallocate_internal(thread) > 0)
361 return;
1c79356b 362
91447636 363 ipc_thread_terminate(thread);
1c79356b 364
91447636 365 task = thread->task;
0b4e3aa0 366
91447636
A
367#ifdef MACH_BSD
368 {
369 void *ut = thread->uthread;
0b4e3aa0 370
91447636
A
371 thread->uthread = NULL;
372 uthread_free(task, ut, task->bsd_info);
373 }
374#endif /* MACH_BSD */
0b4e3aa0 375
91447636 376 task_deallocate(task);
1c79356b 377
91447636
A
378 pset = thread->processor_set;
379 pset_deallocate(pset);
0b4e3aa0 380
91447636
A
381 if (thread->kernel_stack != 0)
382 stack_free(thread);
0b4e3aa0 383
91447636 384 machine_thread_destroy(thread);
1c79356b 385
91447636
A
386 zfree(thread_zone, thread);
387}
0b4e3aa0 388
91447636
A
389/*
390 * thread_terminate_daemon:
391 *
392 * Perform final clean up for terminating threads.
393 */
394static void
395thread_terminate_daemon(void)
396{
397 thread_t thread;
398 task_t task;
399 processor_set_t pset;
0b4e3aa0 400
91447636
A
401 (void)splsched();
402 simple_lock(&thread_terminate_lock);
0b4e3aa0 403
91447636
A
404 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
405 simple_unlock(&thread_terminate_lock);
406 (void)spllo();
0b4e3aa0 407
91447636 408 task = thread->task;
55e303ae 409
91447636
A
410 task_lock(task);
411 task->total_user_time += timer_grab(&thread->user_timer);
412 task->total_system_time += timer_grab(&thread->system_timer);
55e303ae 413
91447636
A
414 queue_remove(&task->threads, thread, thread_t, task_threads);
415 task->thread_count--;
416 task_unlock(task);
1c79356b 417
91447636 418 pset = thread->processor_set;
1c79356b 419
91447636
A
420 pset_lock(pset);
421 pset_remove_thread(pset, thread);
422 pset_unlock(pset);
1c79356b 423
91447636 424 thread_deallocate(thread);
1c79356b 425
91447636
A
426 (void)splsched();
427 simple_lock(&thread_terminate_lock);
0b4e3aa0 428 }
1c79356b 429
91447636
A
430 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
431 simple_unlock(&thread_terminate_lock);
432 /* splsched */
433
434 thread_block((thread_continue_t)thread_terminate_daemon);
435 /*NOTREACHED*/
1c79356b
A
436}
437
9bccf70c 438/*
91447636
A
439 * thread_terminate_enqueue:
440 *
441 * Enqueue a terminating thread for final disposition.
442 *
443 * Called at splsched.
9bccf70c 444 */
1c79356b 445void
91447636 446thread_terminate_enqueue(
1c79356b
A
447 thread_t thread)
448{
91447636
A
449 simple_lock(&thread_terminate_lock);
450 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
451 simple_unlock(&thread_terminate_lock);
1c79356b 452
91447636 453 thread_wakeup((event_t)&thread_terminate_queue);
1c79356b
A
454}
455
91447636
A
456/*
457 * thread_stack_daemon:
458 *
459 * Perform stack allocation as required due to
460 * invoke failures.
461 */
462static void
463thread_stack_daemon(void)
9bccf70c 464{
91447636
A
465 thread_t thread;
466
467 (void)splsched();
468 simple_lock(&thread_stack_lock);
469
470 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
471 simple_unlock(&thread_stack_lock);
472 /* splsched */
473
474 stack_alloc(thread);
475
476 thread_lock(thread);
477 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
478 thread_unlock(thread);
479 (void)spllo();
480
481 (void)splsched();
482 simple_lock(&thread_stack_lock);
483 }
484
485 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
486 simple_unlock(&thread_stack_lock);
487 /* splsched */
488
489 thread_block((thread_continue_t)thread_stack_daemon);
9bccf70c
A
490 /*NOTREACHED*/
491}
1c79356b
A
492
493/*
91447636 494 * thread_stack_enqueue:
1c79356b 495 *
91447636 496 * Enqueue a thread for stack allocation.
1c79356b 497 *
91447636 498 * Called at splsched.
1c79356b
A
499 */
500void
91447636
A
501thread_stack_enqueue(
502 thread_t thread)
1c79356b 503{
91447636
A
504 simple_lock(&thread_stack_lock);
505 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
506 simple_unlock(&thread_stack_lock);
1c79356b 507
91447636
A
508 thread_wakeup((event_t)&thread_stack_queue);
509}
9bccf70c 510
91447636
A
511void
512thread_daemon_init(void)
513{
514 kern_return_t result;
515 thread_t thread;
0b4e3aa0 516
91447636
A
517 simple_lock_init(&thread_terminate_lock, 0);
518 queue_init(&thread_terminate_queue);
1c79356b 519
91447636
A
520 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
521 if (result != KERN_SUCCESS)
522 panic("thread_daemon_init: thread_terminate_daemon");
1c79356b 523
91447636 524 thread_deallocate(thread);
1c79356b 525
91447636
A
526 simple_lock_init(&thread_stack_lock, 0);
527 queue_init(&thread_stack_queue);
1c79356b 528
91447636
A
529 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
530 if (result != KERN_SUCCESS)
531 panic("thread_daemon_init: thread_stack_daemon");
1c79356b 532
91447636 533 thread_deallocate(thread);
1c79356b
A
534}
535
1c79356b
A
536/*
537 * Create a new thread.
55e303ae 538 * Doesn't start the thread running.
1c79356b 539 */
55e303ae
A
540static kern_return_t
541thread_create_internal(
542 task_t parent_task,
1c79356b 543 integer_t priority,
91447636 544 thread_continue_t continuation,
55e303ae 545 thread_t *out_thread)
1c79356b 546{
55e303ae 547 thread_t new_thread;
1c79356b 548 processor_set_t pset;
55e303ae 549 static thread_t first_thread;
1c79356b
A
550
551 /*
552 * Allocate a thread and initialize static fields
553 */
55e303ae 554 if (first_thread == NULL)
91447636 555 new_thread = first_thread = current_thread();
55e303ae
A
556 else
557 new_thread = (thread_t)zalloc(thread_zone);
558 if (new_thread == NULL)
1c79356b
A
559 return (KERN_RESOURCE_SHORTAGE);
560
55e303ae
A
561 if (new_thread != first_thread)
562 *new_thread = thread_template;
563
564#ifdef MACH_BSD
565 {
55e303ae
A
566 new_thread->uthread = uthread_alloc(parent_task, new_thread);
567 if (new_thread->uthread == NULL) {
91447636 568 zfree(thread_zone, new_thread);
55e303ae
A
569 return (KERN_RESOURCE_SHORTAGE);
570 }
571 }
572#endif /* MACH_BSD */
1c79356b 573
55e303ae
A
574 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
575#ifdef MACH_BSD
576 {
55e303ae 577 void *ut = new_thread->uthread;
1c79356b 578
55e303ae 579 new_thread->uthread = NULL;
91447636 580 uthread_free(parent_task, ut, parent_task->bsd_info);
55e303ae
A
581 }
582#endif /* MACH_BSD */
91447636 583 zfree(thread_zone, new_thread);
55e303ae
A
584 return (KERN_FAILURE);
585 }
586
587 new_thread->task = parent_task;
588
589 thread_lock_init(new_thread);
590 wake_lock_init(new_thread);
591
91447636 592 mutex_init(&new_thread->mutex, 0);
0b4e3aa0 593
55e303ae
A
594 ipc_thread_init(new_thread);
595 queue_init(&new_thread->held_ulocks);
91447636 596 thread_prof_init(new_thread, parent_task);
55e303ae 597
91447636 598 new_thread->continuation = continuation;
0b4e3aa0 599
1c79356b 600 pset = parent_task->processor_set;
9bccf70c 601 assert(pset == &default_pset);
1c79356b
A
602 pset_lock(pset);
603
604 task_lock(parent_task);
9bccf70c 605 assert(parent_task->processor_set == pset);
1c79356b 606
55e303ae
A
607 if ( !parent_task->active ||
608 (parent_task->thread_count >= THREAD_MAX &&
609 parent_task != kernel_task)) {
1c79356b
A
610 task_unlock(parent_task);
611 pset_unlock(pset);
55e303ae
A
612
613#ifdef MACH_BSD
614 {
55e303ae
A
615 void *ut = new_thread->uthread;
616
617 new_thread->uthread = NULL;
91447636 618 uthread_free(parent_task, ut, parent_task->bsd_info);
55e303ae
A
619 }
620#endif /* MACH_BSD */
91447636
A
621 ipc_thread_disable(new_thread);
622 ipc_thread_terminate(new_thread);
55e303ae 623 machine_thread_destroy(new_thread);
91447636 624 zfree(thread_zone, new_thread);
1c79356b
A
625 return (KERN_FAILURE);
626 }
627
91447636 628 task_reference_internal(parent_task);
55e303ae
A
629
630 /* Cache the task's map */
631 new_thread->map = parent_task->map;
1c79356b 632
55e303ae 633 /* Chain the thread onto the task's list */
91447636 634 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
55e303ae 635 parent_task->thread_count++;
9bccf70c
A
636
637 /* So terminating threads don't need to take the task lock to decrement */
55e303ae 638 hw_atomic_add(&parent_task->active_thread_count, 1);
1c79356b 639
1c79356b 640 /* Associate the thread with the processor set */
55e303ae
A
641 pset_add_thread(pset, new_thread);
642
91447636
A
643 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
644 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
1c79356b
A
645
646 /* Set the thread's scheduling parameters */
0b4e3aa0 647 if (parent_task != kernel_task)
55e303ae
A
648 new_thread->sched_mode |= TH_MODE_TIMESHARE;
649 new_thread->max_priority = parent_task->max_priority;
650 new_thread->task_priority = parent_task->priority;
651 new_thread->priority = (priority < 0)? parent_task->priority: priority;
652 if (new_thread->priority > new_thread->max_priority)
653 new_thread->priority = new_thread->max_priority;
654 new_thread->importance =
655 new_thread->priority - new_thread->task_priority;
656 new_thread->sched_stamp = sched_tick;
91447636 657 new_thread->pri_shift = new_thread->processor_set->pri_shift;
55e303ae 658 compute_priority(new_thread, FALSE);
1c79356b 659
55e303ae 660 new_thread->active = TRUE;
1c79356b 661
55e303ae 662 *out_thread = new_thread;
1c79356b
A
663
664 {
9bccf70c 665 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
1c79356b 666
55e303ae
A
667 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
668
9bccf70c
A
669 KERNEL_DEBUG_CONSTANT(
670 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
55e303ae 671 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
1c79356b 672
9bccf70c
A
673 kdbg_trace_string(parent_task->bsd_info,
674 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
675
676 KERNEL_DEBUG_CONSTANT(
677 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
678 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
1c79356b
A
679 }
680
681 return (KERN_SUCCESS);
682}
683
684kern_return_t
685thread_create(
686 task_t task,
91447636 687 thread_t *new_thread)
1c79356b 688{
1c79356b 689 kern_return_t result;
9bccf70c 690 thread_t thread;
1c79356b 691
55e303ae
A
692 if (task == TASK_NULL || task == kernel_task)
693 return (KERN_INVALID_ARGUMENT);
1c79356b 694
91447636 695 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
1c79356b
A
696 if (result != KERN_SUCCESS)
697 return (result);
698
55e303ae
A
699 thread->user_stop_count = 1;
700 thread_hold(thread);
9bccf70c 701 if (task->suspend_count > 0)
55e303ae 702 thread_hold(thread);
1c79356b 703
9bccf70c
A
704 pset_unlock(task->processor_set);
705 task_unlock(task);
1c79356b 706
55e303ae 707 *new_thread = thread;
1c79356b
A
708
709 return (KERN_SUCCESS);
710}
711
1c79356b
A
712kern_return_t
713thread_create_running(
9bccf70c 714 register task_t task,
1c79356b
A
715 int flavor,
716 thread_state_t new_state,
717 mach_msg_type_number_t new_state_count,
91447636 718 thread_t *new_thread)
1c79356b
A
719{
720 register kern_return_t result;
9bccf70c 721 thread_t thread;
9bccf70c 722
55e303ae
A
723 if (task == TASK_NULL || task == kernel_task)
724 return (KERN_INVALID_ARGUMENT);
1c79356b 725
91447636 726 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
1c79356b
A
727 if (result != KERN_SUCCESS)
728 return (result);
729
91447636
A
730 result = machine_thread_set_state(
731 thread, flavor, new_state, new_state_count);
1c79356b 732 if (result != KERN_SUCCESS) {
9bccf70c
A
733 pset_unlock(task->processor_set);
734 task_unlock(task);
735
55e303ae 736 thread_terminate(thread);
91447636 737 thread_deallocate(thread);
1c79356b
A
738 return (result);
739 }
740
91447636 741 thread_mtx_lock(thread);
9bccf70c 742 clear_wait(thread, THREAD_AWAKENED);
55e303ae 743 thread->started = TRUE;
91447636 744 thread_mtx_unlock(thread);
9bccf70c
A
745 pset_unlock(task->processor_set);
746 task_unlock(task);
747
55e303ae 748 *new_thread = thread;
9bccf70c 749
1c79356b
A
750 return (result);
751}
752
753/*
91447636 754 * kernel_thread_create:
1c79356b 755 *
55e303ae
A
756 * Create a thread in the kernel task
757 * to execute in kernel context.
1c79356b 758 */
91447636 759kern_return_t
55e303ae 760kernel_thread_create(
91447636
A
761 thread_continue_t continuation,
762 void *parameter,
763 integer_t priority,
764 thread_t *new_thread)
1c79356b
A
765{
766 kern_return_t result;
767 thread_t thread;
91447636 768 task_t task = kernel_task;
1c79356b 769
91447636 770 result = thread_create_internal(task, priority, continuation, &thread);
9bccf70c 771 if (result != KERN_SUCCESS)
91447636 772 return (result);
1c79356b 773
9bccf70c
A
774 pset_unlock(task->processor_set);
775 task_unlock(task);
776
91447636
A
777#if !defined(i386)
778 stack_alloc(thread);
55e303ae
A
779 assert(thread->kernel_stack != 0);
780 thread->reserved_stack = thread->kernel_stack;
91447636 781#endif /* !defined(i386) */
55e303ae 782
91447636 783 thread->parameter = parameter;
55e303ae 784
91447636
A
785 *new_thread = thread;
786
787 return (result);
55e303ae
A
788}
789
91447636
A
790kern_return_t
791kernel_thread_start_priority(
792 thread_continue_t continuation,
793 void *parameter,
794 integer_t priority,
795 thread_t *new_thread)
55e303ae 796{
91447636 797 kern_return_t result;
55e303ae 798 thread_t thread;
1c79356b 799
91447636
A
800 result = kernel_thread_create(continuation, parameter, priority, &thread);
801 if (result != KERN_SUCCESS)
802 return (result);
1c79356b 803
91447636 804 thread_mtx_lock(thread);
55e303ae
A
805 clear_wait(thread, THREAD_AWAKENED);
806 thread->started = TRUE;
91447636 807 thread_mtx_unlock(thread);
1c79356b 808
91447636
A
809 *new_thread = thread;
810
811 return (result);
812}
813
814kern_return_t
815kernel_thread_start(
816 thread_continue_t continuation,
817 void *parameter,
818 thread_t *new_thread)
819{
820 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1c79356b
A
821}
822
823thread_t
824kernel_thread(
825 task_t task,
826 void (*start)(void))
827{
91447636
A
828 kern_return_t result;
829 thread_t thread;
830
55e303ae
A
831 if (task != kernel_task)
832 panic("kernel_thread");
833
91447636
A
834 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
835 if (result != KERN_SUCCESS)
836 return (THREAD_NULL);
1c79356b 837
91447636 838 thread_deallocate(thread);
1c79356b 839
91447636 840 return (thread);
1c79356b
A
841}
842
1c79356b 843kern_return_t
91447636
A
844thread_info_internal(
845 register thread_t thread,
1c79356b
A
846 thread_flavor_t flavor,
847 thread_info_t thread_info_out, /* ptr to OUT array */
848 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
849{
1c79356b
A
850 int state, flags;
851 spl_t s;
852
853 if (thread == THREAD_NULL)
854 return (KERN_INVALID_ARGUMENT);
855
856 if (flavor == THREAD_BASIC_INFO) {
857 register thread_basic_info_t basic_info;
858
859 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
860 return (KERN_INVALID_ARGUMENT);
861
862 basic_info = (thread_basic_info_t) thread_info_out;
863
864 s = splsched();
865 thread_lock(thread);
866
867 /* fill in info */
868
869 thread_read_times(thread, &basic_info->user_time,
870 &basic_info->system_time);
871
0b4e3aa0
A
872 /*
873 * Update lazy-evaluated scheduler info because someone wants it.
874 */
875 if (thread->sched_stamp != sched_tick)
876 update_priority(thread);
877
878 basic_info->sleep_time = 0;
879
880 /*
881 * To calculate cpu_usage, first correct for timer rate,
882 * then for 5/8 ageing. The correction factor [3/5] is
883 * (1/(5/8) - 1).
884 */
91447636
A
885 basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
886 * TH_USAGE_SCALE) / sched_tick_interval;
0b4e3aa0 887 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
91447636
A
888
889 if (basic_info->cpu_usage > TH_USAGE_SCALE)
890 basic_info->cpu_usage = TH_USAGE_SCALE;
1c79356b 891
0b4e3aa0
A
892 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
893 POLICY_TIMESHARE: POLICY_RR);
1c79356b
A
894
895 flags = 0;
1c79356b 896 if (thread->state & TH_IDLE)
0b4e3aa0
A
897 flags |= TH_FLAGS_IDLE;
898
91447636 899 if (!thread->kernel_stack)
0b4e3aa0 900 flags |= TH_FLAGS_SWAPPED;
1c79356b
A
901
902 state = 0;
9bccf70c 903 if (thread->state & TH_TERMINATE)
1c79356b
A
904 state = TH_STATE_HALTED;
905 else
906 if (thread->state & TH_RUN)
907 state = TH_STATE_RUNNING;
908 else
909 if (thread->state & TH_UNINT)
910 state = TH_STATE_UNINTERRUPTIBLE;
911 else
912 if (thread->state & TH_SUSP)
913 state = TH_STATE_STOPPED;
914 else
915 if (thread->state & TH_WAIT)
916 state = TH_STATE_WAITING;
917
918 basic_info->run_state = state;
919 basic_info->flags = flags;
920
91447636 921 basic_info->suspend_count = thread->user_stop_count;
1c79356b
A
922
923 thread_unlock(thread);
924 splx(s);
925
926 *thread_info_count = THREAD_BASIC_INFO_COUNT;
927
928 return (KERN_SUCCESS);
929 }
930 else
931 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
932 policy_timeshare_info_t ts_info;
933
934 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
935 return (KERN_INVALID_ARGUMENT);
936
937 ts_info = (policy_timeshare_info_t)thread_info_out;
938
939 s = splsched();
940 thread_lock(thread);
941
0b4e3aa0 942 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1c79356b
A
943 thread_unlock(thread);
944 splx(s);
945
946 return (KERN_INVALID_POLICY);
947 }
948
9bccf70c
A
949 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
950 if (ts_info->depressed) {
951 ts_info->base_priority = DEPRESSPRI;
952 ts_info->depress_priority = thread->priority;
953 }
954 else {
955 ts_info->base_priority = thread->priority;
956 ts_info->depress_priority = -1;
957 }
1c79356b 958
9bccf70c
A
959 ts_info->cur_priority = thread->sched_pri;
960 ts_info->max_priority = thread->max_priority;
1c79356b
A
961
962 thread_unlock(thread);
963 splx(s);
964
965 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
966
967 return (KERN_SUCCESS);
968 }
969 else
970 if (flavor == THREAD_SCHED_FIFO_INFO) {
1c79356b
A
971 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
972 return (KERN_INVALID_ARGUMENT);
973
0b4e3aa0 974 return (KERN_INVALID_POLICY);
1c79356b
A
975 }
976 else
977 if (flavor == THREAD_SCHED_RR_INFO) {
978 policy_rr_info_t rr_info;
979
980 if (*thread_info_count < POLICY_RR_INFO_COUNT)
981 return (KERN_INVALID_ARGUMENT);
982
983 rr_info = (policy_rr_info_t) thread_info_out;
984
985 s = splsched();
986 thread_lock(thread);
987
0b4e3aa0 988 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1c79356b
A
989 thread_unlock(thread);
990 splx(s);
991
992 return (KERN_INVALID_POLICY);
993 }
994
9bccf70c
A
995 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
996 if (rr_info->depressed) {
997 rr_info->base_priority = DEPRESSPRI;
998 rr_info->depress_priority = thread->priority;
999 }
1000 else {
1001 rr_info->base_priority = thread->priority;
1002 rr_info->depress_priority = -1;
1003 }
1004
1c79356b 1005 rr_info->max_priority = thread->max_priority;
0b4e3aa0 1006 rr_info->quantum = std_quantum_us / 1000;
1c79356b 1007
1c79356b
A
1008 thread_unlock(thread);
1009 splx(s);
1010
1011 *thread_info_count = POLICY_RR_INFO_COUNT;
1012
1013 return (KERN_SUCCESS);
1014 }
1015
1016 return (KERN_INVALID_ARGUMENT);
1017}
1018
1019void
91447636
A
1020thread_read_times(
1021 thread_t thread,
1022 time_value_t *user_time,
1023 time_value_t *system_time)
1c79356b 1024{
91447636
A
1025 absolutetime_to_microtime(
1026 timer_grab(&thread->user_timer),
1027 &user_time->seconds, &user_time->microseconds);
9bccf70c 1028
91447636
A
1029 absolutetime_to_microtime(
1030 timer_grab(&thread->system_timer),
1031 &system_time->seconds, &system_time->microseconds);
1c79356b
A
1032}
1033
1034kern_return_t
1035thread_assign(
91447636
A
1036 __unused thread_t thread,
1037 __unused processor_set_t new_pset)
1c79356b 1038{
91447636 1039 return (KERN_FAILURE);
1c79356b
A
1040}
1041
1042/*
1043 * thread_assign_default:
1044 *
1045 * Special version of thread_assign for assigning threads to default
1046 * processor set.
1047 */
1048kern_return_t
1049thread_assign_default(
91447636 1050 thread_t thread)
1c79356b 1051{
91447636 1052 return (thread_assign(thread, &default_pset));
1c79356b
A
1053}
1054
1055/*
1056 * thread_get_assignment
1057 *
1058 * Return current assignment for this thread.
1059 */
1060kern_return_t
1061thread_get_assignment(
91447636 1062 thread_t thread,
1c79356b
A
1063 processor_set_t *pset)
1064{
91447636
A
1065 if (thread == NULL)
1066 return (KERN_INVALID_ARGUMENT);
1067
1c79356b 1068 *pset = thread->processor_set;
1c79356b 1069 pset_reference(*pset);
91447636 1070 return (KERN_SUCCESS);
1c79356b
A
1071}
1072
1073/*
55e303ae 1074 * thread_wire_internal:
1c79356b
A
1075 *
1076 * Specify that the target thread must always be able
1077 * to run and to allocate memory.
1078 */
1079kern_return_t
55e303ae 1080thread_wire_internal(
91447636
A
1081 host_priv_t host_priv,
1082 thread_t thread,
1083 boolean_t wired,
1084 boolean_t *prev_state)
1c79356b 1085{
91447636 1086 if (host_priv == NULL || thread != current_thread())
1c79356b
A
1087 return (KERN_INVALID_ARGUMENT);
1088
1089 assert(host_priv == &realhost);
1090
91447636
A
1091 if (prev_state)
1092 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
55e303ae 1093
1c79356b 1094 if (wired) {
91447636 1095 if (!(thread->options & TH_OPT_VMPRIV))
1c79356b 1096 vm_page_free_reserve(1); /* XXX */
91447636
A
1097 thread->options |= TH_OPT_VMPRIV;
1098 }
1099 else {
1100 if (thread->options & TH_OPT_VMPRIV)
1c79356b 1101 vm_page_free_reserve(-1); /* XXX */
91447636 1102 thread->options &= ~TH_OPT_VMPRIV;
1c79356b
A
1103 }
1104
91447636 1105 return (KERN_SUCCESS);
1c79356b
A
1106}
1107
1c79356b
A
1108
1109/*
55e303ae 1110 * thread_wire:
1c79356b 1111 *
55e303ae 1112 * User-api wrapper for thread_wire_internal()
1c79356b 1113 */
55e303ae
A
1114kern_return_t
1115thread_wire(
1116 host_priv_t host_priv,
91447636 1117 thread_t thread,
55e303ae 1118 boolean_t wired)
1c79356b 1119{
91447636 1120 return (thread_wire_internal(host_priv, thread, wired, NULL));
1c79356b
A
1121}
1122
91447636
A
1123int split_funnel_off = 0;
1124lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1125lck_grp_attr_t *funnel_lck_grp_attr;
1126lck_attr_t *funnel_lck_attr;
1c79356b 1127
91447636
A
1128funnel_t *
1129funnel_alloc(
1130 int type)
1c79356b 1131{
91447636
A
1132 lck_mtx_t *m;
1133 funnel_t *fnl;
1c79356b 1134
91447636
A
1135 if (funnel_lck_grp == LCK_GRP_NULL) {
1136 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
1137 //lck_grp_attr_setstat(funnel_lck_grp_attr);
1c79356b 1138
91447636 1139 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1c79356b 1140
91447636
A
1141 funnel_lck_attr = lck_attr_alloc_init();
1142 //lck_attr_setdebug(funnel_lck_attr);
1c79356b 1143 }
1c79356b 1144 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
0b4e3aa0 1145 bzero((void *)fnl, sizeof(funnel_t));
91447636
A
1146 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1147 kfree(fnl, sizeof(funnel_t));
1c79356b
A
1148 return(THR_FUNNEL_NULL);
1149 }
1150 fnl->fnl_mutex = m;
1151 fnl->fnl_type = type;
1152 }
1153 return(fnl);
1154}
1155
1156void
1157funnel_free(
1158 funnel_t * fnl)
1159{
91447636 1160 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1c79356b 1161 if (fnl->fnl_oldmutex)
91447636
A
1162 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1163 kfree(fnl, sizeof(funnel_t));
1c79356b
A
1164}
1165
1166void
1167funnel_lock(
1168 funnel_t * fnl)
1169{
91447636 1170 lck_mtx_lock(fnl->fnl_mutex);
1c79356b 1171 fnl->fnl_mtxholder = current_thread();
1c79356b
A
1172}
1173
1174void
1175funnel_unlock(
1176 funnel_t * fnl)
1177{
91447636 1178 lck_mtx_unlock(fnl->fnl_mutex);
1c79356b
A
1179 fnl->fnl_mtxrelease = current_thread();
1180}
1181
1182funnel_t *
1183thread_funnel_get(
1184 void)
1185{
1186 thread_t th = current_thread();
1187
1188 if (th->funnel_state & TH_FN_OWNED) {
1189 return(th->funnel_lock);
1190 }
1191 return(THR_FUNNEL_NULL);
1192}
1193
1194boolean_t
1195thread_funnel_set(
1196 funnel_t * fnl,
1197 boolean_t funneled)
1198{
1199 thread_t cur_thread;
1200 boolean_t funnel_state_prev;
1201 boolean_t intr;
1202
1203 cur_thread = current_thread();
1204 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1205
1206 if (funnel_state_prev != funneled) {
1207 intr = ml_set_interrupts_enabled(FALSE);
1208
1209 if (funneled == TRUE) {
1210 if (cur_thread->funnel_lock)
1211 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1212 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1213 fnl, 1, 0, 0, 0);
1214 funnel_lock(fnl);
1215 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1216 fnl, 1, 0, 0, 0);
1217 cur_thread->funnel_state |= TH_FN_OWNED;
1218 cur_thread->funnel_lock = fnl;
1219 } else {
1220 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1221 panic("Funnel unlock when not holding funnel");
1222 cur_thread->funnel_state &= ~TH_FN_OWNED;
1223 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1224 fnl, 1, 0, 0, 0);
1225
1226 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1227 funnel_unlock(fnl);
1228 }
1229 (void)ml_set_interrupts_enabled(intr);
1230 } else {
1231 /* if we are trying to acquire funnel recursively
1232 * check for funnel to be held already
1233 */
1234 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1235 panic("thread_funnel_set: already holding a different funnel");
1236 }
1237 }
1238 return(funnel_state_prev);
1239}
1240
1c79356b 1241
91447636
A
1242/*
1243 * Export routines to other components for things that are done as macros
1244 * within the osfmk component.
1245 */
1c79356b 1246
91447636
A
1247#undef thread_reference
1248void thread_reference(thread_t thread);
1c79356b 1249void
91447636
A
1250thread_reference(
1251 thread_t thread)
1c79356b 1252{
91447636
A
1253 if (thread != THREAD_NULL)
1254 thread_reference_internal(thread);
1c79356b
A
1255}
1256
1c79356b 1257#undef thread_should_halt
91447636 1258
1c79356b
A
1259boolean_t
1260thread_should_halt(
55e303ae 1261 thread_t th)
1c79356b 1262{
91447636 1263 return (thread_should_halt_fast(th));
55e303ae 1264}