]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/thread.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
61 * Date: 1986
62 *
91447636 63 * Thread management primitives implementation.
1c79356b
A
64 */
65/*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
1c79356b 84#include <mach_host.h>
1c79356b 85#include <mach_prof.h>
1c79356b 86
91447636 87#include <mach/mach_types.h>
1c79356b
A
88#include <mach/boolean.h>
89#include <mach/policy.h>
90#include <mach/thread_info.h>
91#include <mach/thread_special_ports.h>
92#include <mach/thread_status.h>
93#include <mach/time_value.h>
94#include <mach/vm_param.h>
91447636
A
95
96#include <machine/thread.h>
97
98#include <kern/kern_types.h>
99#include <kern/kalloc.h>
1c79356b
A
100#include <kern/cpu_data.h>
101#include <kern/counters.h>
1c79356b
A
102#include <kern/ipc_mig.h>
103#include <kern/ipc_tt.h>
104#include <kern/mach_param.h>
105#include <kern/machine.h>
106#include <kern/misc_protos.h>
107#include <kern/processor.h>
108#include <kern/queue.h>
109#include <kern/sched.h>
110#include <kern/sched_prim.h>
91447636
A
111#include <kern/sync_lock.h>
112#include <kern/syscall_subr.h>
1c79356b
A
113#include <kern/task.h>
114#include <kern/thread.h>
1c79356b
A
115#include <kern/host.h>
116#include <kern/zalloc.h>
1c79356b
A
117#include <kern/profile.h>
118#include <kern/assert.h>
91447636
A
119
120#include <ipc/ipc_kmsg.h>
121#include <ipc/ipc_port.h>
122
123#include <vm/vm_kern.h>
124#include <vm/vm_pageout.h>
125
1c79356b
A
126#include <sys/kdebug.h>
127
128/*
129 * Exported interfaces
130 */
91447636 131#include <mach/task_server.h>
1c79356b
A
132#include <mach/thread_act_server.h>
133#include <mach/mach_host_server.h>
91447636 134#include <mach/host_priv_server.h>
1c79356b 135
55e303ae 136static struct zone *thread_zone;
1c79356b 137
91447636
A
138decl_simple_lock_data(static,thread_stack_lock)
139static queue_head_t thread_stack_queue;
1c79356b 140
91447636
A
141decl_simple_lock_data(static,thread_terminate_lock)
142static queue_head_t thread_terminate_queue;
1c79356b 143
55e303ae 144static struct thread thread_template, init_thread;
1c79356b 145
91447636
A
146#ifdef MACH_BSD
147extern void proc_exit(void *);
148#endif /* MACH_BSD */
55e303ae 149
91447636
A
150void
151thread_bootstrap(void)
152{
153 /*
154 * Fill in a template thread for fast initialization.
155 */
1c79356b 156
91447636 157 thread_template.runq = RUN_QUEUE_NULL;
1c79356b 158
91447636 159 thread_template.ref_count = 2;
55e303ae 160
91447636
A
161 thread_template.reason = AST_NONE;
162 thread_template.at_safe_point = FALSE;
163 thread_template.wait_event = NO_EVENT64;
164 thread_template.wait_queue = WAIT_QUEUE_NULL;
165 thread_template.wait_result = THREAD_WAITING;
166 thread_template.options = THREAD_ABORTSAFE;
167 thread_template.state = TH_WAIT | TH_UNINT;
168 thread_template.wake_active = FALSE;
169 thread_template.continuation = THREAD_CONTINUE_NULL;
170 thread_template.parameter = NULL;
1c79356b 171
91447636
A
172 thread_template.importance = 0;
173 thread_template.sched_mode = 0;
174 thread_template.safe_mode = 0;
175 thread_template.safe_release = 0;
0b4e3aa0 176
91447636
A
177 thread_template.priority = 0;
178 thread_template.sched_pri = 0;
179 thread_template.max_priority = 0;
180 thread_template.task_priority = 0;
181 thread_template.promotions = 0;
182 thread_template.pending_promoter_index = 0;
183 thread_template.pending_promoter[0] =
184 thread_template.pending_promoter[1] = NULL;
1c79356b 185
91447636 186 thread_template.realtime.deadline = UINT64_MAX;
1c79356b 187
91447636 188 thread_template.current_quantum = 0;
1c79356b 189
91447636
A
190 thread_template.computation_metered = 0;
191 thread_template.computation_epoch = 0;
1c79356b 192
91447636
A
193 thread_template.sched_stamp = 0;
194 thread_template.sched_usage = 0;
195 thread_template.pri_shift = INT8_MAX;
196 thread_template.cpu_usage = thread_template.cpu_delta = 0;
0b4e3aa0 197
91447636
A
198 thread_template.bound_processor = PROCESSOR_NULL;
199 thread_template.last_processor = PROCESSOR_NULL;
200 thread_template.last_switch = 0;
1c79356b 201
91447636
A
202 timer_init(&thread_template.user_timer);
203 timer_init(&thread_template.system_timer);
204 thread_template.user_timer_save = 0;
205 thread_template.system_timer_save = 0;
1c79356b 206
91447636
A
207 thread_template.wait_timer_is_set = FALSE;
208 thread_template.wait_timer_active = 0;
1c79356b 209
91447636 210 thread_template.depress_timer_active = 0;
0b4e3aa0 211
91447636 212 thread_template.processor_set = PROCESSOR_SET_NULL;
55e303ae 213
91447636
A
214 thread_template.special_handler.handler = special_handler;
215 thread_template.special_handler.next = 0;
55e303ae 216
91447636
A
217#if MACH_HOST
218 thread_template.may_assign = TRUE;
219 thread_template.assign_active = FALSE;
220#endif /* MACH_HOST */
221 thread_template.funnel_lock = THR_FUNNEL_NULL;
222 thread_template.funnel_state = 0;
223 thread_template.recover = (vm_offset_t)NULL;
55e303ae 224
91447636
A
225 init_thread = thread_template;
226 machine_set_current_thread(&init_thread);
1c79356b
A
227}
228
55e303ae 229void
91447636 230thread_init(void)
0b4e3aa0 231{
91447636
A
232 thread_zone = zinit(
233 sizeof(struct thread),
234 THREAD_MAX * sizeof(struct thread),
235 THREAD_CHUNK * sizeof(struct thread),
236 "threads");
55e303ae 237
91447636 238 stack_init();
55e303ae 239
91447636
A
240 /*
241 * Initialize any machine-dependent
242 * per-thread structures necessary.
243 */
244 machine_thread_init();
245}
0b4e3aa0 246
91447636
A
247static void
248thread_terminate_continue(void)
249{
250 panic("thread_terminate_continue");
251 /*NOTREACHED*/
0b4e3aa0
A
252}
253
1c79356b 254/*
91447636 255 * thread_terminate_self:
1c79356b 256 */
1c79356b 257void
91447636 258thread_terminate_self(void)
1c79356b 259{
91447636
A
260 thread_t thread = current_thread();
261 task_t task;
262 spl_t s;
55e303ae 263
91447636
A
264 s = splsched();
265 thread_lock(thread);
1c79356b 266
91447636
A
267 /*
268 * Cancel priority depression, reset scheduling parameters,
269 * and wait for concurrent expirations on other processors.
270 */
271 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
272 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
1c79356b 273
91447636
A
274 if (timer_call_cancel(&thread->depress_timer))
275 thread->depress_timer_active--;
1c79356b 276 }
1c79356b 277
91447636 278 thread_policy_reset(thread);
55e303ae 279
91447636
A
280 while (thread->depress_timer_active > 0) {
281 thread_unlock(thread);
282 splx(s);
55e303ae 283
91447636 284 delay(1);
55e303ae 285
91447636
A
286 s = splsched();
287 thread_lock(thread);
55e303ae
A
288 }
289
91447636
A
290 thread_unlock(thread);
291 splx(s);
55e303ae 292
91447636 293 thread_mtx_lock(thread);
55e303ae 294
91447636 295 ulock_release_all(thread);
55e303ae 296
91447636
A
297 ipc_thread_disable(thread);
298
299 thread_mtx_unlock(thread);
55e303ae 300
91447636
A
301 /*
302 * If we are the last thread to terminate and the task is
303 * associated with a BSD process, perform BSD process exit.
304 */
305 task = thread->task;
306 if ( hw_atomic_sub(&task->active_thread_count, 1) == 0 &&
307 task->bsd_info != NULL )
308 proc_exit(task->bsd_info);
1c79356b 309
91447636
A
310 s = splsched();
311 thread_lock(thread);
1c79356b 312
91447636
A
313 /*
314 * Cancel wait timer, and wait for
315 * concurrent expirations.
316 */
317 if (thread->wait_timer_is_set) {
318 thread->wait_timer_is_set = FALSE;
1c79356b 319
91447636
A
320 if (timer_call_cancel(&thread->wait_timer))
321 thread->wait_timer_active--;
322 }
1c79356b 323
91447636
A
324 while (thread->wait_timer_active > 0) {
325 thread_unlock(thread);
326 splx(s);
0b4e3aa0 327
91447636 328 delay(1);
1c79356b 329
91447636
A
330 s = splsched();
331 thread_lock(thread);
332 }
1c79356b 333
91447636
A
334 /*
335 * If there is a reserved stack, release it.
336 */
337 if (thread->reserved_stack != 0) {
338 if (thread->reserved_stack != thread->kernel_stack)
339 stack_free_stack(thread->reserved_stack);
340 thread->reserved_stack = 0;
341 }
1c79356b 342
91447636
A
343 /*
344 * Mark thread as terminating, and block.
345 */
346 thread->state |= TH_TERMINATE;
347 thread_mark_wait_locked(thread, THREAD_UNINT);
348 assert(thread->promotions == 0);
349 thread_unlock(thread);
350 /* splsched */
1c79356b 351
91447636
A
352 thread_block((thread_continue_t)thread_terminate_continue);
353 /*NOTREACHED*/
55e303ae
A
354}
355
356void
91447636
A
357thread_deallocate(
358 thread_t thread)
1c79356b 359{
91447636
A
360 processor_set_t pset;
361 task_t task;
1c79356b 362
91447636
A
363 if (thread == THREAD_NULL)
364 return;
1c79356b 365
91447636
A
366 if (thread_deallocate_internal(thread) > 0)
367 return;
1c79356b 368
91447636 369 ipc_thread_terminate(thread);
1c79356b 370
91447636 371 task = thread->task;
0b4e3aa0 372
91447636
A
373#ifdef MACH_BSD
374 {
375 void *ut = thread->uthread;
0b4e3aa0 376
91447636
A
377 thread->uthread = NULL;
378 uthread_free(task, ut, task->bsd_info);
379 }
380#endif /* MACH_BSD */
0b4e3aa0 381
91447636 382 task_deallocate(task);
1c79356b 383
91447636
A
384 pset = thread->processor_set;
385 pset_deallocate(pset);
0b4e3aa0 386
91447636
A
387 if (thread->kernel_stack != 0)
388 stack_free(thread);
0b4e3aa0 389
91447636 390 machine_thread_destroy(thread);
1c79356b 391
91447636
A
392 zfree(thread_zone, thread);
393}
0b4e3aa0 394
91447636
A
395/*
396 * thread_terminate_daemon:
397 *
398 * Perform final clean up for terminating threads.
399 */
400static void
401thread_terminate_daemon(void)
402{
403 thread_t thread;
404 task_t task;
405 processor_set_t pset;
0b4e3aa0 406
91447636
A
407 (void)splsched();
408 simple_lock(&thread_terminate_lock);
0b4e3aa0 409
91447636
A
410 while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
411 simple_unlock(&thread_terminate_lock);
412 (void)spllo();
0b4e3aa0 413
91447636 414 task = thread->task;
55e303ae 415
91447636
A
416 task_lock(task);
417 task->total_user_time += timer_grab(&thread->user_timer);
418 task->total_system_time += timer_grab(&thread->system_timer);
55e303ae 419
91447636
A
420 queue_remove(&task->threads, thread, thread_t, task_threads);
421 task->thread_count--;
422 task_unlock(task);
1c79356b 423
91447636 424 pset = thread->processor_set;
1c79356b 425
91447636
A
426 pset_lock(pset);
427 pset_remove_thread(pset, thread);
428 pset_unlock(pset);
1c79356b 429
91447636 430 thread_deallocate(thread);
1c79356b 431
91447636
A
432 (void)splsched();
433 simple_lock(&thread_terminate_lock);
0b4e3aa0 434 }
1c79356b 435
91447636
A
436 assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
437 simple_unlock(&thread_terminate_lock);
438 /* splsched */
439
440 thread_block((thread_continue_t)thread_terminate_daemon);
441 /*NOTREACHED*/
1c79356b
A
442}
443
9bccf70c 444/*
91447636
A
445 * thread_terminate_enqueue:
446 *
447 * Enqueue a terminating thread for final disposition.
448 *
449 * Called at splsched.
9bccf70c 450 */
1c79356b 451void
91447636 452thread_terminate_enqueue(
1c79356b
A
453 thread_t thread)
454{
91447636
A
455 simple_lock(&thread_terminate_lock);
456 enqueue_tail(&thread_terminate_queue, (queue_entry_t)thread);
457 simple_unlock(&thread_terminate_lock);
1c79356b 458
91447636 459 thread_wakeup((event_t)&thread_terminate_queue);
1c79356b
A
460}
461
91447636
A
462/*
463 * thread_stack_daemon:
464 *
465 * Perform stack allocation as required due to
466 * invoke failures.
467 */
468static void
469thread_stack_daemon(void)
9bccf70c 470{
91447636
A
471 thread_t thread;
472
473 (void)splsched();
474 simple_lock(&thread_stack_lock);
475
476 while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
477 simple_unlock(&thread_stack_lock);
478 /* splsched */
479
480 stack_alloc(thread);
481
482 thread_lock(thread);
483 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
484 thread_unlock(thread);
485 (void)spllo();
486
487 (void)splsched();
488 simple_lock(&thread_stack_lock);
489 }
490
491 assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
492 simple_unlock(&thread_stack_lock);
493 /* splsched */
494
495 thread_block((thread_continue_t)thread_stack_daemon);
9bccf70c
A
496 /*NOTREACHED*/
497}
1c79356b
A
498
499/*
91447636 500 * thread_stack_enqueue:
1c79356b 501 *
91447636 502 * Enqueue a thread for stack allocation.
1c79356b 503 *
91447636 504 * Called at splsched.
1c79356b
A
505 */
506void
91447636
A
507thread_stack_enqueue(
508 thread_t thread)
1c79356b 509{
91447636
A
510 simple_lock(&thread_stack_lock);
511 enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
512 simple_unlock(&thread_stack_lock);
1c79356b 513
91447636
A
514 thread_wakeup((event_t)&thread_stack_queue);
515}
9bccf70c 516
91447636
A
517void
518thread_daemon_init(void)
519{
520 kern_return_t result;
521 thread_t thread;
0b4e3aa0 522
91447636
A
523 simple_lock_init(&thread_terminate_lock, 0);
524 queue_init(&thread_terminate_queue);
1c79356b 525
91447636
A
526 result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
527 if (result != KERN_SUCCESS)
528 panic("thread_daemon_init: thread_terminate_daemon");
1c79356b 529
91447636 530 thread_deallocate(thread);
1c79356b 531
91447636
A
532 simple_lock_init(&thread_stack_lock, 0);
533 queue_init(&thread_stack_queue);
1c79356b 534
91447636
A
535 result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
536 if (result != KERN_SUCCESS)
537 panic("thread_daemon_init: thread_stack_daemon");
1c79356b 538
91447636 539 thread_deallocate(thread);
1c79356b
A
540}
541
1c79356b
A
542/*
543 * Create a new thread.
55e303ae 544 * Doesn't start the thread running.
1c79356b 545 */
55e303ae
A
546static kern_return_t
547thread_create_internal(
548 task_t parent_task,
1c79356b 549 integer_t priority,
91447636 550 thread_continue_t continuation,
55e303ae 551 thread_t *out_thread)
1c79356b 552{
55e303ae 553 thread_t new_thread;
1c79356b 554 processor_set_t pset;
55e303ae 555 static thread_t first_thread;
1c79356b
A
556
557 /*
558 * Allocate a thread and initialize static fields
559 */
55e303ae 560 if (first_thread == NULL)
91447636 561 new_thread = first_thread = current_thread();
55e303ae
A
562 else
563 new_thread = (thread_t)zalloc(thread_zone);
564 if (new_thread == NULL)
1c79356b
A
565 return (KERN_RESOURCE_SHORTAGE);
566
55e303ae
A
567 if (new_thread != first_thread)
568 *new_thread = thread_template;
569
570#ifdef MACH_BSD
571 {
55e303ae
A
572 new_thread->uthread = uthread_alloc(parent_task, new_thread);
573 if (new_thread->uthread == NULL) {
91447636 574 zfree(thread_zone, new_thread);
55e303ae
A
575 return (KERN_RESOURCE_SHORTAGE);
576 }
577 }
578#endif /* MACH_BSD */
1c79356b 579
55e303ae
A
580 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
581#ifdef MACH_BSD
582 {
55e303ae 583 void *ut = new_thread->uthread;
1c79356b 584
55e303ae 585 new_thread->uthread = NULL;
91447636 586 uthread_free(parent_task, ut, parent_task->bsd_info);
55e303ae
A
587 }
588#endif /* MACH_BSD */
91447636 589 zfree(thread_zone, new_thread);
55e303ae
A
590 return (KERN_FAILURE);
591 }
592
593 new_thread->task = parent_task;
594
595 thread_lock_init(new_thread);
596 wake_lock_init(new_thread);
597
91447636 598 mutex_init(&new_thread->mutex, 0);
0b4e3aa0 599
55e303ae
A
600 ipc_thread_init(new_thread);
601 queue_init(&new_thread->held_ulocks);
91447636 602 thread_prof_init(new_thread, parent_task);
55e303ae 603
91447636 604 new_thread->continuation = continuation;
0b4e3aa0 605
1c79356b 606 pset = parent_task->processor_set;
9bccf70c 607 assert(pset == &default_pset);
1c79356b
A
608 pset_lock(pset);
609
610 task_lock(parent_task);
9bccf70c 611 assert(parent_task->processor_set == pset);
1c79356b 612
55e303ae
A
613 if ( !parent_task->active ||
614 (parent_task->thread_count >= THREAD_MAX &&
615 parent_task != kernel_task)) {
1c79356b
A
616 task_unlock(parent_task);
617 pset_unlock(pset);
55e303ae
A
618
619#ifdef MACH_BSD
620 {
55e303ae
A
621 void *ut = new_thread->uthread;
622
623 new_thread->uthread = NULL;
91447636 624 uthread_free(parent_task, ut, parent_task->bsd_info);
55e303ae
A
625 }
626#endif /* MACH_BSD */
91447636
A
627 ipc_thread_disable(new_thread);
628 ipc_thread_terminate(new_thread);
55e303ae 629 machine_thread_destroy(new_thread);
91447636 630 zfree(thread_zone, new_thread);
1c79356b
A
631 return (KERN_FAILURE);
632 }
633
91447636 634 task_reference_internal(parent_task);
55e303ae
A
635
636 /* Cache the task's map */
637 new_thread->map = parent_task->map;
1c79356b 638
55e303ae 639 /* Chain the thread onto the task's list */
91447636 640 queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
55e303ae 641 parent_task->thread_count++;
9bccf70c
A
642
643 /* So terminating threads don't need to take the task lock to decrement */
55e303ae 644 hw_atomic_add(&parent_task->active_thread_count, 1);
1c79356b 645
1c79356b 646 /* Associate the thread with the processor set */
55e303ae
A
647 pset_add_thread(pset, new_thread);
648
91447636
A
649 timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
650 timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
1c79356b
A
651
652 /* Set the thread's scheduling parameters */
0b4e3aa0 653 if (parent_task != kernel_task)
55e303ae
A
654 new_thread->sched_mode |= TH_MODE_TIMESHARE;
655 new_thread->max_priority = parent_task->max_priority;
656 new_thread->task_priority = parent_task->priority;
657 new_thread->priority = (priority < 0)? parent_task->priority: priority;
658 if (new_thread->priority > new_thread->max_priority)
659 new_thread->priority = new_thread->max_priority;
660 new_thread->importance =
661 new_thread->priority - new_thread->task_priority;
662 new_thread->sched_stamp = sched_tick;
91447636 663 new_thread->pri_shift = new_thread->processor_set->pri_shift;
55e303ae 664 compute_priority(new_thread, FALSE);
1c79356b 665
55e303ae 666 new_thread->active = TRUE;
1c79356b 667
55e303ae 668 *out_thread = new_thread;
1c79356b
A
669
670 {
9bccf70c 671 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
1c79356b 672
55e303ae
A
673 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
674
9bccf70c
A
675 KERNEL_DEBUG_CONSTANT(
676 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
55e303ae 677 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
1c79356b 678
9bccf70c
A
679 kdbg_trace_string(parent_task->bsd_info,
680 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
681
682 KERNEL_DEBUG_CONSTANT(
683 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
684 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
1c79356b
A
685 }
686
687 return (KERN_SUCCESS);
688}
689
690kern_return_t
691thread_create(
692 task_t task,
91447636 693 thread_t *new_thread)
1c79356b 694{
1c79356b 695 kern_return_t result;
9bccf70c 696 thread_t thread;
1c79356b 697
55e303ae
A
698 if (task == TASK_NULL || task == kernel_task)
699 return (KERN_INVALID_ARGUMENT);
1c79356b 700
91447636 701 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
1c79356b
A
702 if (result != KERN_SUCCESS)
703 return (result);
704
55e303ae
A
705 thread->user_stop_count = 1;
706 thread_hold(thread);
9bccf70c 707 if (task->suspend_count > 0)
55e303ae 708 thread_hold(thread);
1c79356b 709
9bccf70c
A
710 pset_unlock(task->processor_set);
711 task_unlock(task);
1c79356b 712
55e303ae 713 *new_thread = thread;
1c79356b
A
714
715 return (KERN_SUCCESS);
716}
717
1c79356b
A
718kern_return_t
719thread_create_running(
9bccf70c 720 register task_t task,
1c79356b
A
721 int flavor,
722 thread_state_t new_state,
723 mach_msg_type_number_t new_state_count,
91447636 724 thread_t *new_thread)
1c79356b
A
725{
726 register kern_return_t result;
9bccf70c 727 thread_t thread;
9bccf70c 728
55e303ae
A
729 if (task == TASK_NULL || task == kernel_task)
730 return (KERN_INVALID_ARGUMENT);
1c79356b 731
91447636 732 result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, &thread);
1c79356b
A
733 if (result != KERN_SUCCESS)
734 return (result);
735
91447636
A
736 result = machine_thread_set_state(
737 thread, flavor, new_state, new_state_count);
1c79356b 738 if (result != KERN_SUCCESS) {
9bccf70c
A
739 pset_unlock(task->processor_set);
740 task_unlock(task);
741
55e303ae 742 thread_terminate(thread);
91447636 743 thread_deallocate(thread);
1c79356b
A
744 return (result);
745 }
746
91447636 747 thread_mtx_lock(thread);
9bccf70c 748 clear_wait(thread, THREAD_AWAKENED);
55e303ae 749 thread->started = TRUE;
91447636 750 thread_mtx_unlock(thread);
9bccf70c
A
751 pset_unlock(task->processor_set);
752 task_unlock(task);
753
55e303ae 754 *new_thread = thread;
9bccf70c 755
1c79356b
A
756 return (result);
757}
758
759/*
91447636 760 * kernel_thread_create:
1c79356b 761 *
55e303ae
A
762 * Create a thread in the kernel task
763 * to execute in kernel context.
1c79356b 764 */
91447636 765kern_return_t
55e303ae 766kernel_thread_create(
91447636
A
767 thread_continue_t continuation,
768 void *parameter,
769 integer_t priority,
770 thread_t *new_thread)
1c79356b
A
771{
772 kern_return_t result;
773 thread_t thread;
91447636 774 task_t task = kernel_task;
1c79356b 775
91447636 776 result = thread_create_internal(task, priority, continuation, &thread);
9bccf70c 777 if (result != KERN_SUCCESS)
91447636 778 return (result);
1c79356b 779
9bccf70c
A
780 pset_unlock(task->processor_set);
781 task_unlock(task);
782
8f6c56a5 783#if !defined(i386)
91447636 784 stack_alloc(thread);
55e303ae
A
785 assert(thread->kernel_stack != 0);
786 thread->reserved_stack = thread->kernel_stack;
8f6c56a5 787#endif /* !defined(i386) */
55e303ae 788
91447636 789 thread->parameter = parameter;
55e303ae 790
91447636
A
791 *new_thread = thread;
792
793 return (result);
55e303ae
A
794}
795
91447636
A
796kern_return_t
797kernel_thread_start_priority(
798 thread_continue_t continuation,
799 void *parameter,
800 integer_t priority,
801 thread_t *new_thread)
55e303ae 802{
91447636 803 kern_return_t result;
55e303ae 804 thread_t thread;
1c79356b 805
91447636
A
806 result = kernel_thread_create(continuation, parameter, priority, &thread);
807 if (result != KERN_SUCCESS)
808 return (result);
1c79356b 809
91447636 810 thread_mtx_lock(thread);
55e303ae
A
811 clear_wait(thread, THREAD_AWAKENED);
812 thread->started = TRUE;
91447636 813 thread_mtx_unlock(thread);
1c79356b 814
91447636
A
815 *new_thread = thread;
816
817 return (result);
818}
819
820kern_return_t
821kernel_thread_start(
822 thread_continue_t continuation,
823 void *parameter,
824 thread_t *new_thread)
825{
826 return kernel_thread_start_priority(continuation, parameter, -1, new_thread);
1c79356b
A
827}
828
829thread_t
830kernel_thread(
831 task_t task,
832 void (*start)(void))
833{
91447636
A
834 kern_return_t result;
835 thread_t thread;
836
55e303ae
A
837 if (task != kernel_task)
838 panic("kernel_thread");
839
91447636
A
840 result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
841 if (result != KERN_SUCCESS)
842 return (THREAD_NULL);
1c79356b 843
91447636 844 thread_deallocate(thread);
1c79356b 845
91447636 846 return (thread);
1c79356b
A
847}
848
1c79356b 849kern_return_t
91447636
A
850thread_info_internal(
851 register thread_t thread,
1c79356b
A
852 thread_flavor_t flavor,
853 thread_info_t thread_info_out, /* ptr to OUT array */
854 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
855{
1c79356b
A
856 int state, flags;
857 spl_t s;
858
859 if (thread == THREAD_NULL)
860 return (KERN_INVALID_ARGUMENT);
861
862 if (flavor == THREAD_BASIC_INFO) {
863 register thread_basic_info_t basic_info;
864
865 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
866 return (KERN_INVALID_ARGUMENT);
867
868 basic_info = (thread_basic_info_t) thread_info_out;
869
870 s = splsched();
871 thread_lock(thread);
872
873 /* fill in info */
874
875 thread_read_times(thread, &basic_info->user_time,
876 &basic_info->system_time);
877
0b4e3aa0
A
878 /*
879 * Update lazy-evaluated scheduler info because someone wants it.
880 */
881 if (thread->sched_stamp != sched_tick)
882 update_priority(thread);
883
884 basic_info->sleep_time = 0;
885
886 /*
887 * To calculate cpu_usage, first correct for timer rate,
888 * then for 5/8 ageing. The correction factor [3/5] is
889 * (1/(5/8) - 1).
890 */
91447636
A
891 basic_info->cpu_usage = ((uint64_t)thread->cpu_usage
892 * TH_USAGE_SCALE) / sched_tick_interval;
0b4e3aa0 893 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
91447636
A
894
895 if (basic_info->cpu_usage > TH_USAGE_SCALE)
896 basic_info->cpu_usage = TH_USAGE_SCALE;
1c79356b 897
0b4e3aa0
A
898 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
899 POLICY_TIMESHARE: POLICY_RR);
1c79356b
A
900
901 flags = 0;
1c79356b 902 if (thread->state & TH_IDLE)
0b4e3aa0
A
903 flags |= TH_FLAGS_IDLE;
904
91447636 905 if (!thread->kernel_stack)
0b4e3aa0 906 flags |= TH_FLAGS_SWAPPED;
1c79356b
A
907
908 state = 0;
9bccf70c 909 if (thread->state & TH_TERMINATE)
1c79356b
A
910 state = TH_STATE_HALTED;
911 else
912 if (thread->state & TH_RUN)
913 state = TH_STATE_RUNNING;
914 else
915 if (thread->state & TH_UNINT)
916 state = TH_STATE_UNINTERRUPTIBLE;
917 else
918 if (thread->state & TH_SUSP)
919 state = TH_STATE_STOPPED;
920 else
921 if (thread->state & TH_WAIT)
922 state = TH_STATE_WAITING;
923
924 basic_info->run_state = state;
925 basic_info->flags = flags;
926
91447636 927 basic_info->suspend_count = thread->user_stop_count;
1c79356b
A
928
929 thread_unlock(thread);
930 splx(s);
931
932 *thread_info_count = THREAD_BASIC_INFO_COUNT;
933
934 return (KERN_SUCCESS);
935 }
936 else
937 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
938 policy_timeshare_info_t ts_info;
939
940 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
941 return (KERN_INVALID_ARGUMENT);
942
943 ts_info = (policy_timeshare_info_t)thread_info_out;
944
945 s = splsched();
946 thread_lock(thread);
947
0b4e3aa0 948 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1c79356b
A
949 thread_unlock(thread);
950 splx(s);
951
952 return (KERN_INVALID_POLICY);
953 }
954
9bccf70c
A
955 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
956 if (ts_info->depressed) {
957 ts_info->base_priority = DEPRESSPRI;
958 ts_info->depress_priority = thread->priority;
959 }
960 else {
961 ts_info->base_priority = thread->priority;
962 ts_info->depress_priority = -1;
963 }
1c79356b 964
9bccf70c
A
965 ts_info->cur_priority = thread->sched_pri;
966 ts_info->max_priority = thread->max_priority;
1c79356b
A
967
968 thread_unlock(thread);
969 splx(s);
970
971 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
972
973 return (KERN_SUCCESS);
974 }
975 else
976 if (flavor == THREAD_SCHED_FIFO_INFO) {
1c79356b
A
977 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
978 return (KERN_INVALID_ARGUMENT);
979
0b4e3aa0 980 return (KERN_INVALID_POLICY);
1c79356b
A
981 }
982 else
983 if (flavor == THREAD_SCHED_RR_INFO) {
984 policy_rr_info_t rr_info;
985
986 if (*thread_info_count < POLICY_RR_INFO_COUNT)
987 return (KERN_INVALID_ARGUMENT);
988
989 rr_info = (policy_rr_info_t) thread_info_out;
990
991 s = splsched();
992 thread_lock(thread);
993
0b4e3aa0 994 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1c79356b
A
995 thread_unlock(thread);
996 splx(s);
997
998 return (KERN_INVALID_POLICY);
999 }
1000
9bccf70c
A
1001 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1002 if (rr_info->depressed) {
1003 rr_info->base_priority = DEPRESSPRI;
1004 rr_info->depress_priority = thread->priority;
1005 }
1006 else {
1007 rr_info->base_priority = thread->priority;
1008 rr_info->depress_priority = -1;
1009 }
1010
1c79356b 1011 rr_info->max_priority = thread->max_priority;
0b4e3aa0 1012 rr_info->quantum = std_quantum_us / 1000;
1c79356b 1013
1c79356b
A
1014 thread_unlock(thread);
1015 splx(s);
1016
1017 *thread_info_count = POLICY_RR_INFO_COUNT;
1018
1019 return (KERN_SUCCESS);
1020 }
1021
1022 return (KERN_INVALID_ARGUMENT);
1023}
1024
1025void
91447636
A
1026thread_read_times(
1027 thread_t thread,
1028 time_value_t *user_time,
1029 time_value_t *system_time)
1c79356b 1030{
91447636
A
1031 absolutetime_to_microtime(
1032 timer_grab(&thread->user_timer),
1033 &user_time->seconds, &user_time->microseconds);
9bccf70c 1034
91447636
A
1035 absolutetime_to_microtime(
1036 timer_grab(&thread->system_timer),
1037 &system_time->seconds, &system_time->microseconds);
1c79356b
A
1038}
1039
1040kern_return_t
1041thread_assign(
91447636
A
1042 __unused thread_t thread,
1043 __unused processor_set_t new_pset)
1c79356b 1044{
91447636 1045 return (KERN_FAILURE);
1c79356b
A
1046}
1047
1048/*
1049 * thread_assign_default:
1050 *
1051 * Special version of thread_assign for assigning threads to default
1052 * processor set.
1053 */
1054kern_return_t
1055thread_assign_default(
91447636 1056 thread_t thread)
1c79356b 1057{
91447636 1058 return (thread_assign(thread, &default_pset));
1c79356b
A
1059}
1060
1061/*
1062 * thread_get_assignment
1063 *
1064 * Return current assignment for this thread.
1065 */
1066kern_return_t
1067thread_get_assignment(
91447636 1068 thread_t thread,
1c79356b
A
1069 processor_set_t *pset)
1070{
91447636
A
1071 if (thread == NULL)
1072 return (KERN_INVALID_ARGUMENT);
1073
1c79356b 1074 *pset = thread->processor_set;
1c79356b 1075 pset_reference(*pset);
91447636 1076 return (KERN_SUCCESS);
1c79356b
A
1077}
1078
1079/*
55e303ae 1080 * thread_wire_internal:
1c79356b
A
1081 *
1082 * Specify that the target thread must always be able
1083 * to run and to allocate memory.
1084 */
1085kern_return_t
55e303ae 1086thread_wire_internal(
91447636
A
1087 host_priv_t host_priv,
1088 thread_t thread,
1089 boolean_t wired,
1090 boolean_t *prev_state)
1c79356b 1091{
91447636 1092 if (host_priv == NULL || thread != current_thread())
1c79356b
A
1093 return (KERN_INVALID_ARGUMENT);
1094
1095 assert(host_priv == &realhost);
1096
91447636
A
1097 if (prev_state)
1098 *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
55e303ae 1099
1c79356b 1100 if (wired) {
91447636 1101 if (!(thread->options & TH_OPT_VMPRIV))
1c79356b 1102 vm_page_free_reserve(1); /* XXX */
91447636
A
1103 thread->options |= TH_OPT_VMPRIV;
1104 }
1105 else {
1106 if (thread->options & TH_OPT_VMPRIV)
1c79356b 1107 vm_page_free_reserve(-1); /* XXX */
91447636 1108 thread->options &= ~TH_OPT_VMPRIV;
1c79356b
A
1109 }
1110
91447636 1111 return (KERN_SUCCESS);
1c79356b
A
1112}
1113
1c79356b
A
1114
1115/*
55e303ae 1116 * thread_wire:
1c79356b 1117 *
55e303ae 1118 * User-api wrapper for thread_wire_internal()
1c79356b 1119 */
55e303ae
A
1120kern_return_t
1121thread_wire(
1122 host_priv_t host_priv,
91447636 1123 thread_t thread,
55e303ae 1124 boolean_t wired)
1c79356b 1125{
91447636 1126 return (thread_wire_internal(host_priv, thread, wired, NULL));
1c79356b
A
1127}
1128
91447636
A
1129int split_funnel_off = 0;
1130lck_grp_t *funnel_lck_grp = LCK_GRP_NULL;
1131lck_grp_attr_t *funnel_lck_grp_attr;
1132lck_attr_t *funnel_lck_attr;
1c79356b 1133
91447636
A
1134funnel_t *
1135funnel_alloc(
1136 int type)
1c79356b 1137{
91447636
A
1138 lck_mtx_t *m;
1139 funnel_t *fnl;
1c79356b 1140
91447636
A
1141 if (funnel_lck_grp == LCK_GRP_NULL) {
1142 funnel_lck_grp_attr = lck_grp_attr_alloc_init();
8f6c56a5 1143 //lck_grp_attr_setstat(funnel_lck_grp_attr);
1c79356b 1144
91447636 1145 funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr);
1c79356b 1146
91447636 1147 funnel_lck_attr = lck_attr_alloc_init();
8f6c56a5 1148 //lck_attr_setdebug(funnel_lck_attr);
1c79356b 1149 }
1c79356b 1150 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
0b4e3aa0 1151 bzero((void *)fnl, sizeof(funnel_t));
91447636
A
1152 if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) {
1153 kfree(fnl, sizeof(funnel_t));
1c79356b
A
1154 return(THR_FUNNEL_NULL);
1155 }
1156 fnl->fnl_mutex = m;
1157 fnl->fnl_type = type;
1158 }
1159 return(fnl);
1160}
1161
1162void
1163funnel_free(
1164 funnel_t * fnl)
1165{
91447636 1166 lck_mtx_free(fnl->fnl_mutex, funnel_lck_grp);
1c79356b 1167 if (fnl->fnl_oldmutex)
91447636
A
1168 lck_mtx_free(fnl->fnl_oldmutex, funnel_lck_grp);
1169 kfree(fnl, sizeof(funnel_t));
1c79356b
A
1170}
1171
1172void
1173funnel_lock(
1174 funnel_t * fnl)
1175{
91447636 1176 lck_mtx_lock(fnl->fnl_mutex);
1c79356b 1177 fnl->fnl_mtxholder = current_thread();
1c79356b
A
1178}
1179
1180void
1181funnel_unlock(
1182 funnel_t * fnl)
1183{
91447636 1184 lck_mtx_unlock(fnl->fnl_mutex);
1c79356b
A
1185 fnl->fnl_mtxrelease = current_thread();
1186}
1187
1188funnel_t *
1189thread_funnel_get(
1190 void)
1191{
1192 thread_t th = current_thread();
1193
1194 if (th->funnel_state & TH_FN_OWNED) {
1195 return(th->funnel_lock);
1196 }
1197 return(THR_FUNNEL_NULL);
1198}
1199
1200boolean_t
1201thread_funnel_set(
1202 funnel_t * fnl,
1203 boolean_t funneled)
1204{
1205 thread_t cur_thread;
1206 boolean_t funnel_state_prev;
1207 boolean_t intr;
1208
1209 cur_thread = current_thread();
1210 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1211
1212 if (funnel_state_prev != funneled) {
1213 intr = ml_set_interrupts_enabled(FALSE);
1214
1215 if (funneled == TRUE) {
1216 if (cur_thread->funnel_lock)
1217 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1218 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1219 fnl, 1, 0, 0, 0);
1220 funnel_lock(fnl);
1221 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1222 fnl, 1, 0, 0, 0);
1223 cur_thread->funnel_state |= TH_FN_OWNED;
1224 cur_thread->funnel_lock = fnl;
1225 } else {
1226 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1227 panic("Funnel unlock when not holding funnel");
1228 cur_thread->funnel_state &= ~TH_FN_OWNED;
1229 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1230 fnl, 1, 0, 0, 0);
1231
1232 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1233 funnel_unlock(fnl);
1234 }
1235 (void)ml_set_interrupts_enabled(intr);
1236 } else {
1237 /* if we are trying to acquire funnel recursively
1238 * check for funnel to be held already
1239 */
1240 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1241 panic("thread_funnel_set: already holding a different funnel");
1242 }
1243 }
1244 return(funnel_state_prev);
1245}
1246
1c79356b 1247
91447636
A
1248/*
1249 * Export routines to other components for things that are done as macros
1250 * within the osfmk component.
1251 */
1c79356b 1252
91447636
A
1253#undef thread_reference
1254void thread_reference(thread_t thread);
1c79356b 1255void
91447636
A
1256thread_reference(
1257 thread_t thread)
1c79356b 1258{
91447636
A
1259 if (thread != THREAD_NULL)
1260 thread_reference_internal(thread);
1c79356b
A
1261}
1262
1c79356b 1263#undef thread_should_halt
91447636 1264
1c79356b
A
1265boolean_t
1266thread_should_halt(
55e303ae 1267 thread_t th)
1c79356b 1268{
91447636 1269 return (thread_should_halt_fast(th));
55e303ae 1270}