]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/thread.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_FREE_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: kern/thread.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
55 * Date: 1986
56 *
57 * Thread/thread_shuttle management primitives implementation.
58 */
59/*
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
62 *
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
68 *
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
72 *
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
75 *
76 */
77
78#include <cpus.h>
79#include <mach_host.h>
80#include <simple_clock.h>
81#include <mach_debug.h>
82#include <mach_prof.h>
83
84#include <mach/boolean.h>
85#include <mach/policy.h>
86#include <mach/thread_info.h>
87#include <mach/thread_special_ports.h>
88#include <mach/thread_status.h>
89#include <mach/time_value.h>
90#include <mach/vm_param.h>
91#include <kern/ast.h>
92#include <kern/cpu_data.h>
93#include <kern/counters.h>
94#include <kern/etap_macros.h>
95#include <kern/ipc_mig.h>
96#include <kern/ipc_tt.h>
97#include <kern/mach_param.h>
98#include <kern/machine.h>
99#include <kern/misc_protos.h>
100#include <kern/processor.h>
101#include <kern/queue.h>
102#include <kern/sched.h>
103#include <kern/sched_prim.h>
104#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
105#include <kern/task.h>
106#include <kern/thread.h>
107#include <kern/thread_act.h>
108#include <kern/thread_swap.h>
109#include <kern/host.h>
110#include <kern/zalloc.h>
111#include <vm/vm_kern.h>
112#include <ipc/ipc_kmsg.h>
113#include <ipc/ipc_port.h>
114#include <machine/thread.h> /* for MACHINE_STACK */
115#include <kern/profile.h>
116#include <kern/assert.h>
117#include <sys/kdebug.h>
118
119/*
120 * Exported interfaces
121 */
122
123#include <mach/thread_act_server.h>
124#include <mach/mach_host_server.h>
125
126static struct zone *thread_zone;
127
128static queue_head_t reaper_queue;
129decl_simple_lock_data(static,reaper_lock)
130
131extern int tick;
132
133/* private */
134static struct thread thread_template, init_thread;
135
136#if MACH_DEBUG
137
138#ifdef MACHINE_STACK
139extern void stack_statistics(
140 unsigned int *totalp,
141 vm_size_t *maxusagep);
142#endif /* MACHINE_STACK */
143#endif /* MACH_DEBUG */
144
145#ifdef MACHINE_STACK
146/*
147 * Machine-dependent code must define:
148 * stack_alloc_try
149 * stack_alloc
150 * stack_free
151 * stack_free_stack
152 * stack_collect
153 * and if MACH_DEBUG:
154 * stack_statistics
155 */
156#else /* MACHINE_STACK */
157/*
158 * We allocate stacks from generic kernel VM.
159 * Machine-dependent code must define:
160 * machine_kernel_stack_init
161 *
162 * The stack_free_list can only be accessed at splsched,
163 * because stack_alloc_try/thread_invoke operate at splsched.
164 */
165
166decl_simple_lock_data(static,stack_lock_data)
167#define stack_lock() simple_lock(&stack_lock_data)
168#define stack_unlock() simple_unlock(&stack_lock_data)
169
170static vm_map_t stack_map;
171static vm_offset_t stack_free_list;
172
173static vm_offset_t stack_free_cache[NCPUS];
174
175unsigned int stack_free_max = 0;
176unsigned int stack_free_count = 0; /* splsched only */
177unsigned int stack_free_limit = 1; /* Arbitrary */
178
179unsigned int stack_cache_hits = 0; /* debugging */
180
181unsigned int stack_alloc_hits = 0; /* debugging */
182unsigned int stack_alloc_misses = 0; /* debugging */
183
184unsigned int stack_alloc_total = 0;
185unsigned int stack_alloc_hiwater = 0;
186unsigned int stack_alloc_bndry = 0;
187
188
189/*
190 * The next field is at the base of the stack,
191 * so the low end is left unsullied.
192 */
193
194#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
195
196/*
197 * stack_alloc:
198 *
199 * Allocate a kernel stack for a thread.
200 * May block.
201 */
202vm_offset_t
203stack_alloc(
204 thread_t thread,
205 void (*start_pos)(thread_t))
206{
207 vm_offset_t stack = thread->kernel_stack;
208 spl_t s;
209
210 if (stack)
211 return (stack);
212
213 s = splsched();
214 stack_lock();
215 stack = stack_free_list;
216 if (stack != 0) {
217 stack_free_list = stack_next(stack);
218 stack_free_count--;
219 }
220 stack_unlock();
221 splx(s);
222
223 if (stack != 0) {
224 machine_stack_attach(thread, stack, start_pos);
225 return (stack);
226 }
227
228 if (kernel_memory_allocate(
229 stack_map, &stack,
230 KERNEL_STACK_SIZE, stack_alloc_bndry - 1,
231 KMA_KOBJECT) != KERN_SUCCESS)
232 panic("stack_alloc: no space left for stack maps");
233
234 stack_alloc_total++;
235 if (stack_alloc_total > stack_alloc_hiwater)
236 stack_alloc_hiwater = stack_alloc_total;
237
238 machine_stack_attach(thread, stack, start_pos);
239 return (stack);
240}
241
242/*
243 * stack_free:
244 *
245 * Free a kernel stack.
246 */
247
248void
249stack_free(
250 thread_t thread)
251{
252 vm_offset_t stack = machine_stack_detach(thread);
253
254 assert(stack);
255 if (stack != thread->reserved_stack) {
256 spl_t s = splsched();
257 vm_offset_t *cache;
258
259 cache = &stack_free_cache[cpu_number()];
260 if (*cache == 0) {
261 *cache = stack;
262 splx(s);
263
264 return;
265 }
266
267 stack_lock();
268 stack_next(stack) = stack_free_list;
269 stack_free_list = stack;
270 if (++stack_free_count > stack_free_max)
271 stack_free_max = stack_free_count;
272 stack_unlock();
273 splx(s);
274 }
275}
276
277void
278stack_free_stack(
279 vm_offset_t stack)
280{
281 spl_t s = splsched();
282 vm_offset_t *cache;
283
284 cache = &stack_free_cache[cpu_number()];
285 if (*cache == 0) {
286 *cache = stack;
287 splx(s);
288
289 return;
290 }
291
292 stack_lock();
293 stack_next(stack) = stack_free_list;
294 stack_free_list = stack;
295 if (++stack_free_count > stack_free_max)
296 stack_free_max = stack_free_count;
297 stack_unlock();
298 splx(s);
299}
300
301/*
302 * stack_collect:
303 *
304 * Free excess kernel stacks.
305 * May block.
306 */
307
308void
309stack_collect(void)
310{
311 spl_t s = splsched();
312
313 stack_lock();
314 while (stack_free_count > stack_free_limit) {
315 vm_offset_t stack = stack_free_list;
316
317 stack_free_list = stack_next(stack);
318 stack_free_count--;
319 stack_unlock();
320 splx(s);
321
322 if (vm_map_remove(
323 stack_map, stack, stack + KERNEL_STACK_SIZE,
324 VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
325 panic("stack_collect: vm_map_remove failed");
326
327 s = splsched();
328 stack_lock();
329 stack_alloc_total--;
330 }
331 stack_unlock();
332 splx(s);
333}
334
335/*
336 * stack_alloc_try:
337 *
338 * Non-blocking attempt to allocate a kernel stack.
339 * Called at splsched with the thread locked.
340 */
341
342boolean_t stack_alloc_try(
343 thread_t thread,
344 void (*start)(thread_t))
345{
346 register vm_offset_t stack, *cache;
347
348 cache = &stack_free_cache[cpu_number()];
349 if (stack = *cache) {
350 *cache = 0;
351 machine_stack_attach(thread, stack, start);
352 stack_cache_hits++;
353
354 return (TRUE);
355 }
356
357 stack_lock();
358 stack = stack_free_list;
359 if (stack != (vm_offset_t)0) {
360 stack_free_list = stack_next(stack);
361 stack_free_count--;
362 }
363 stack_unlock();
364
365 if (stack == 0)
366 stack = thread->reserved_stack;
367
368 if (stack != 0) {
369 machine_stack_attach(thread, stack, start);
370 stack_alloc_hits++;
371
372 return (TRUE);
373 }
374 else {
375 stack_alloc_misses++;
376
377 return (FALSE);
378 }
379}
380
381#if MACH_DEBUG
382/*
383 * stack_statistics:
384 *
385 * Return statistics on cached kernel stacks.
386 * *maxusagep must be initialized by the caller.
387 */
388
389void
390stack_statistics(
391 unsigned int *totalp,
392 vm_size_t *maxusagep)
393{
394 spl_t s;
395
396 s = splsched();
397 stack_lock();
398
399 *totalp = stack_free_count;
400 *maxusagep = 0;
401
402 stack_unlock();
403 splx(s);
404}
405#endif /* MACH_DEBUG */
406
407#endif /* MACHINE_STACK */
408
409
410stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
411 vm_size_t *alloc_size, int *collectable, int *exhaustable)
412{
413 *count = stack_alloc_total - stack_free_count;
414 *cur_size = KERNEL_STACK_SIZE * stack_alloc_total;
415 *max_size = KERNEL_STACK_SIZE * stack_alloc_hiwater;
416 *elem_size = KERNEL_STACK_SIZE;
417 *alloc_size = KERNEL_STACK_SIZE;
418 *collectable = 1;
419 *exhaustable = 0;
420}
421
422void
423stack_privilege(
424 register thread_t thread)
425{
426 /* OBSOLETE */
427}
428
429void
430thread_bootstrap(void)
431{
432 /*
433 * Fill in a template thread for fast initialization.
434 */
435
436 thread_template.runq = RUN_QUEUE_NULL;
437
438 thread_template.ref_count = 1;
439
440 thread_template.reason = AST_NONE;
441 thread_template.at_safe_point = FALSE;
442 thread_template.wait_event = NO_EVENT64;
443 thread_template.wait_queue = WAIT_QUEUE_NULL;
444 thread_template.wait_result = THREAD_WAITING;
445 thread_template.interrupt_level = THREAD_ABORTSAFE;
446 thread_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT;
447 thread_template.wake_active = FALSE;
448 thread_template.active_callout = FALSE;
449 thread_template.continuation = (void (*)(void))0;
450 thread_template.top_act = THR_ACT_NULL;
451
452 thread_template.importance = 0;
453 thread_template.sched_mode = 0;
454 thread_template.safe_mode = 0;
455
456 thread_template.priority = 0;
457 thread_template.sched_pri = 0;
458 thread_template.max_priority = 0;
459 thread_template.task_priority = 0;
460 thread_template.promotions = 0;
461 thread_template.pending_promoter_index = 0;
462 thread_template.pending_promoter[0] =
463 thread_template.pending_promoter[1] = NULL;
464
465 thread_template.realtime.deadline = UINT64_MAX;
466
467 thread_template.current_quantum = 0;
468
469 thread_template.computation_metered = 0;
470 thread_template.computation_epoch = 0;
471
472 thread_template.cpu_usage = 0;
473 thread_template.cpu_delta = 0;
474 thread_template.sched_usage = 0;
475 thread_template.sched_delta = 0;
476 thread_template.sched_stamp = 0;
477 thread_template.sleep_stamp = 0;
478 thread_template.safe_release = 0;
479
480 thread_template.bound_processor = PROCESSOR_NULL;
481 thread_template.last_processor = PROCESSOR_NULL;
482 thread_template.last_switch = 0;
483
484 thread_template.vm_privilege = FALSE;
485
486 timer_init(&(thread_template.user_timer));
487 timer_init(&(thread_template.system_timer));
488 thread_template.user_timer_save.low = 0;
489 thread_template.user_timer_save.high = 0;
490 thread_template.system_timer_save.low = 0;
491 thread_template.system_timer_save.high = 0;
492
493 thread_template.processor_set = PROCESSOR_SET_NULL;
494
495 thread_template.act_ref_count = 2;
496
497 thread_template.special_handler.handler = special_handler;
498 thread_template.special_handler.next = 0;
499
500#if MACH_HOST
501 thread_template.may_assign = TRUE;
502 thread_template.assign_active = FALSE;
503#endif /* MACH_HOST */
504 thread_template.funnel_lock = THR_FUNNEL_NULL;
505 thread_template.funnel_state = 0;
506#if MACH_LDEBUG
507 thread_template.mutex_count = 0;
508#endif /* MACH_LDEBUG */
509
510 init_thread = thread_template;
511
512 init_thread.top_act = &init_thread;
513 init_thread.thread = &init_thread;
514 machine_thread_set_current(&init_thread);
515}
516
517void
518thread_init(void)
519{
520 kern_return_t ret;
521 unsigned int stack;
522
523 thread_zone = zinit(
524 sizeof(struct thread),
525 THREAD_MAX * sizeof(struct thread),
526 THREAD_CHUNK * sizeof(struct thread),
527 "threads");
528
529 /*
530 * Initialize other data structures used in
531 * this module.
532 */
533
534 queue_init(&reaper_queue);
535 simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER);
536
537#ifndef MACHINE_STACK
538 simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */
539
540 if (KERNEL_STACK_SIZE < round_page_32(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */
541 panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
542 KERNEL_STACK_SIZE, PAGE_SIZE);
543 }
544
545 for(stack_alloc_bndry = PAGE_SIZE; stack_alloc_bndry <= KERNEL_STACK_SIZE; stack_alloc_bndry <<= 1); /* Find next power of 2 above stack size */
546
547 ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
548
549 &stack,
550 (stack_alloc_bndry * (2*THREAD_MAX + 64)), /* Allocate enough for all of it */
551 FALSE, /* Say not pageable so that it is wired */
552 TRUE, /* Allocate from anywhere */
553 &stack_map); /* Allocate a submap */
554
555 if(ret != KERN_SUCCESS) { /* Did we get one? */
556 panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret); /* Die */
557 }
558 stack = vm_map_min(stack_map); /* Make sure we skip the first hunk */
559 ret = vm_map_enter(stack_map, &stack, PAGE_SIZE, 0, /* Make sure there is nothing at the start */
560 0, /* Force it at start */
561 VM_OBJECT_NULL, 0, /* No object yet */
562 FALSE, /* No copy */
563 VM_PROT_NONE, /* Allow no access */
564 VM_PROT_NONE, /* Allow no access */
565 VM_INHERIT_DEFAULT); /* Just be normal */
566
567 if(ret != KERN_SUCCESS) { /* Did it work? */
568 panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret);
569 }
570
571#endif /* MACHINE_STACK */
572
573 /*
574 * Initialize any machine-dependent
575 * per-thread structures necessary.
576 */
577 machine_thread_init();
578}
579
580/*
581 * Called at splsched.
582 */
583void
584thread_reaper_enqueue(
585 thread_t thread)
586{
587 simple_lock(&reaper_lock);
588 enqueue_tail(&reaper_queue, (queue_entry_t)thread);
589 simple_unlock(&reaper_lock);
590
591 thread_wakeup((event_t)&reaper_queue);
592}
593
594void
595thread_termination_continue(void)
596{
597 panic("thread_termination_continue");
598 /*NOTREACHED*/
599}
600
601/*
602 * Routine: thread_terminate_self
603 *
604 * This routine is called by a thread which has unwound from
605 * its current RPC and kernel contexts and found that it's
606 * root activation has been marked for extinction. This lets
607 * it clean up the last few things that can only be cleaned
608 * up in this context and then impale itself on the reaper
609 * queue.
610 *
611 * When the reaper gets the thread, it will deallocate the
612 * thread_act's reference on itself, which in turn will release
613 * its own reference on this thread. By doing things in that
614 * order, a thread_act will always have a valid thread - but the
615 * thread may persist beyond having a thread_act (but must never
616 * run like that).
617 */
618void
619thread_terminate_self(void)
620{
621 thread_act_t thr_act = current_act();
622 thread_t thread;
623 task_t task = thr_act->task;
624 long active_acts;
625 spl_t s;
626
627 /*
628 * We should be at the base of the inheritance chain.
629 */
630 thread = act_lock_thread(thr_act);
631 assert(thr_act->thread == thread);
632
633 /* This will allow no more control ops on this thr_act. */
634 ipc_thr_act_disable(thr_act);
635
636 /* Clean-up any ulocks that are still owned by the thread
637 * activation (acquired but not released or handed-off).
638 */
639 act_ulock_release_all(thr_act);
640
641 act_unlock_thread(thr_act);
642
643 _mk_sp_thread_depress_abort(thread, TRUE);
644
645 /*
646 * Check to see if this is the last active activation. By
647 * this we mean the last activation to call thread_terminate_self.
648 * If so, and the task is associated with a BSD process, we
649 * need to call BSD and let them clean up.
650 */
651 active_acts = hw_atomic_sub(&task->active_thread_count, 1);
652
653 if (active_acts == 0 && task->bsd_info)
654 proc_exit(task->bsd_info);
655
656 /* JMM - for now, no migration */
657 assert(!thr_act->lower);
658
659 thread_timer_terminate();
660
661 ipc_thread_terminate(thread);
662
663 s = splsched();
664 thread_lock(thread);
665 thread->state |= TH_TERMINATE;
666 assert((thread->state & TH_UNINT) == 0);
667 thread_mark_wait_locked(thread, THREAD_UNINT);
668 assert(thread->promotions == 0);
669 thread_unlock(thread);
670 /* splx(s); */
671
672 ETAP_SET_REASON(thread, BLOCKED_ON_TERMINATION);
673 thread_block(thread_termination_continue);
674 /*NOTREACHED*/
675}
676
677/*
678 * Create a new thread.
679 * Doesn't start the thread running.
680 */
681static kern_return_t
682thread_create_internal(
683 task_t parent_task,
684 integer_t priority,
685 void (*start)(void),
686 thread_t *out_thread)
687{
688 thread_t new_thread;
689 processor_set_t pset;
690 static thread_t first_thread;
691
692 /*
693 * Allocate a thread and initialize static fields
694 */
695 if (first_thread == NULL)
696 new_thread = first_thread = current_act();
697 else
698 new_thread = (thread_t)zalloc(thread_zone);
699 if (new_thread == NULL)
700 return (KERN_RESOURCE_SHORTAGE);
701
702 if (new_thread != first_thread)
703 *new_thread = thread_template;
704
705#ifdef MACH_BSD
706 {
707 extern void *uthread_alloc(task_t, thread_act_t);
708
709 new_thread->uthread = uthread_alloc(parent_task, new_thread);
710 if (new_thread->uthread == NULL) {
711 zfree(thread_zone, (vm_offset_t)new_thread);
712 return (KERN_RESOURCE_SHORTAGE);
713 }
714 }
715#endif /* MACH_BSD */
716
717 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
718#ifdef MACH_BSD
719 {
720 extern void uthread_free(task_t, void *, void *, void *);
721 void *ut = new_thread->uthread;
722
723 new_thread->uthread = NULL;
724 uthread_free(parent_task, (void *)new_thread, ut, parent_task->bsd_info);
725 }
726#endif /* MACH_BSD */
727 zfree(thread_zone, (vm_offset_t)new_thread);
728 return (KERN_FAILURE);
729 }
730
731 new_thread->task = parent_task;
732
733 thread_lock_init(new_thread);
734 wake_lock_init(new_thread);
735
736 mutex_init(&new_thread->lock, ETAP_THREAD_ACT);
737
738 ipc_thr_act_init(parent_task, new_thread);
739
740 ipc_thread_init(new_thread);
741 queue_init(&new_thread->held_ulocks);
742 act_prof_init(new_thread, parent_task);
743
744 new_thread->continuation = start;
745 new_thread->sleep_stamp = sched_tick;
746
747 pset = parent_task->processor_set;
748 assert(pset == &default_pset);
749 pset_lock(pset);
750
751 task_lock(parent_task);
752 assert(parent_task->processor_set == pset);
753
754 if ( !parent_task->active ||
755 (parent_task->thread_count >= THREAD_MAX &&
756 parent_task != kernel_task)) {
757 task_unlock(parent_task);
758 pset_unlock(pset);
759
760#ifdef MACH_BSD
761 {
762 extern void uthread_free(task_t, void *, void *, void *);
763 void *ut = new_thread->uthread;
764
765 new_thread->uthread = NULL;
766 uthread_free(parent_task, (void *)new_thread, ut, parent_task->bsd_info);
767 }
768#endif /* MACH_BSD */
769 act_prof_deallocate(new_thread);
770 ipc_thr_act_terminate(new_thread);
771 machine_thread_destroy(new_thread);
772 zfree(thread_zone, (vm_offset_t) new_thread);
773 return (KERN_FAILURE);
774 }
775
776 act_attach(new_thread, new_thread);
777
778 task_reference_locked(parent_task);
779
780 /* Cache the task's map */
781 new_thread->map = parent_task->map;
782
783 /* Chain the thread onto the task's list */
784 queue_enter(&parent_task->threads, new_thread, thread_act_t, task_threads);
785 parent_task->thread_count++;
786 parent_task->res_thread_count++;
787
788 /* So terminating threads don't need to take the task lock to decrement */
789 hw_atomic_add(&parent_task->active_thread_count, 1);
790
791 /* Associate the thread with the processor set */
792 pset_add_thread(pset, new_thread);
793
794 thread_timer_setup(new_thread);
795
796 /* Set the thread's scheduling parameters */
797 if (parent_task != kernel_task)
798 new_thread->sched_mode |= TH_MODE_TIMESHARE;
799 new_thread->max_priority = parent_task->max_priority;
800 new_thread->task_priority = parent_task->priority;
801 new_thread->priority = (priority < 0)? parent_task->priority: priority;
802 if (new_thread->priority > new_thread->max_priority)
803 new_thread->priority = new_thread->max_priority;
804 new_thread->importance =
805 new_thread->priority - new_thread->task_priority;
806 new_thread->sched_stamp = sched_tick;
807 compute_priority(new_thread, FALSE);
808
809#if ETAP_EVENT_MONITOR
810 new_thread->etap_reason = 0;
811 new_thread->etap_trace = FALSE;
812#endif /* ETAP_EVENT_MONITOR */
813
814 new_thread->active = TRUE;
815
816 *out_thread = new_thread;
817
818 {
819 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
820
821 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
822
823 KERNEL_DEBUG_CONSTANT(
824 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
825 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
826
827 kdbg_trace_string(parent_task->bsd_info,
828 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
829
830 KERNEL_DEBUG_CONSTANT(
831 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
832 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
833 }
834
835 return (KERN_SUCCESS);
836}
837
838extern void thread_bootstrap_return(void);
839
840kern_return_t
841thread_create(
842 task_t task,
843 thread_act_t *new_thread)
844{
845 kern_return_t result;
846 thread_t thread;
847
848 if (task == TASK_NULL || task == kernel_task)
849 return (KERN_INVALID_ARGUMENT);
850
851 result = thread_create_internal(task, -1, thread_bootstrap_return, &thread);
852 if (result != KERN_SUCCESS)
853 return (result);
854
855 thread->user_stop_count = 1;
856 thread_hold(thread);
857 if (task->suspend_count > 0)
858 thread_hold(thread);
859
860 pset_unlock(task->processor_set);
861 task_unlock(task);
862
863 *new_thread = thread;
864
865 return (KERN_SUCCESS);
866}
867
868kern_return_t
869thread_create_running(
870 register task_t task,
871 int flavor,
872 thread_state_t new_state,
873 mach_msg_type_number_t new_state_count,
874 thread_act_t *new_thread)
875{
876 register kern_return_t result;
877 thread_t thread;
878
879 if (task == TASK_NULL || task == kernel_task)
880 return (KERN_INVALID_ARGUMENT);
881
882 result = thread_create_internal(task, -1, thread_bootstrap_return, &thread);
883 if (result != KERN_SUCCESS)
884 return (result);
885
886 result = machine_thread_set_state(thread, flavor, new_state, new_state_count);
887 if (result != KERN_SUCCESS) {
888 pset_unlock(task->processor_set);
889 task_unlock(task);
890
891 thread_terminate(thread);
892 act_deallocate(thread);
893 return (result);
894 }
895
896 act_lock(thread);
897 clear_wait(thread, THREAD_AWAKENED);
898 thread->started = TRUE;
899 act_unlock(thread);
900 pset_unlock(task->processor_set);
901 task_unlock(task);
902
903 *new_thread = thread;
904
905 return (result);
906}
907
908/*
909 * kernel_thread:
910 *
911 * Create a thread in the kernel task
912 * to execute in kernel context.
913 */
914thread_t
915kernel_thread_create(
916 void (*start)(void),
917 integer_t priority)
918{
919 kern_return_t result;
920 task_t task = kernel_task;
921 thread_t thread;
922
923 result = thread_create_internal(task, priority, start, &thread);
924 if (result != KERN_SUCCESS)
925 return (THREAD_NULL);
926
927 pset_unlock(task->processor_set);
928 task_unlock(task);
929
930 thread_doswapin(thread);
931 assert(thread->kernel_stack != 0);
932 thread->reserved_stack = thread->kernel_stack;
933
934 act_deallocate(thread);
935
936 return (thread);
937}
938
939thread_t
940kernel_thread_with_priority(
941 void (*start)(void),
942 integer_t priority)
943{
944 thread_t thread;
945
946 thread = kernel_thread_create(start, priority);
947 if (thread == THREAD_NULL)
948 return (THREAD_NULL);
949
950 act_lock(thread);
951 clear_wait(thread, THREAD_AWAKENED);
952 thread->started = TRUE;
953 act_unlock(thread);
954
955#ifdef i386
956 thread_bind(thread, master_processor);
957#endif /* i386 */
958 return (thread);
959}
960
961thread_t
962kernel_thread(
963 task_t task,
964 void (*start)(void))
965{
966 if (task != kernel_task)
967 panic("kernel_thread");
968
969 return kernel_thread_with_priority(start, -1);
970}
971
972unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */
973
974#if MACH_HOST
975/* Preclude thread processor set assignement */
976#define thread_freeze(thread) assert((thread)->processor_set == &default_pset)
977
978/* Allow thread processor set assignement */
979#define thread_unfreeze(thread) assert((thread)->processor_set == &default_pset)
980
981#endif /* MACH_HOST */
982
983void
984thread_deallocate(
985 thread_t thread)
986{
987 task_t task;
988 processor_set_t pset;
989 int refs;
990 spl_t s;
991
992 if (thread == THREAD_NULL)
993 return;
994
995 /*
996 * First, check for new count > 0 (the common case).
997 * Only the thread needs to be locked.
998 */
999 s = splsched();
1000 thread_lock(thread);
1001 refs = --thread->ref_count;
1002 thread_unlock(thread);
1003 splx(s);
1004
1005 if (refs > 0)
1006 return;
1007
1008 if (thread == current_thread())
1009 panic("thread_deallocate");
1010
1011 /*
1012 * There is a dangling pointer to the thread from the
1013 * processor_set. To clean it up, we freeze the thread
1014 * in the pset (because pset destruction can cause even
1015 * reference-less threads to be reassigned to the default
1016 * pset) and then remove it.
1017 */
1018
1019#if MACH_HOST
1020 thread_freeze(thread);
1021#endif
1022
1023 pset = thread->processor_set;
1024 pset_lock(pset);
1025 pset_remove_thread(pset, thread);
1026 pset_unlock(pset);
1027
1028#if MACH_HOST
1029 thread_unfreeze(thread);
1030#endif
1031
1032 pset_deallocate(pset);
1033
1034 if (thread->reserved_stack != 0) {
1035 if (thread->reserved_stack != thread->kernel_stack)
1036 stack_free_stack(thread->reserved_stack);
1037 thread->reserved_stack = 0;
1038 }
1039
1040 if (thread->kernel_stack != 0)
1041 stack_free(thread);
1042
1043 machine_thread_destroy(thread);
1044
1045 zfree(thread_zone, (vm_offset_t) thread);
1046}
1047
1048void
1049thread_reference(
1050 thread_t thread)
1051{
1052 spl_t s;
1053
1054 if (thread == THREAD_NULL)
1055 return;
1056
1057 s = splsched();
1058 thread_lock(thread);
1059 thread_reference_locked(thread);
1060 thread_unlock(thread);
1061 splx(s);
1062}
1063
1064/*
1065 * Called with "appropriate" thread-related locks held on
1066 * thread and its top_act for synchrony with RPC (see
1067 * act_lock_thread()).
1068 */
1069kern_return_t
1070thread_info_shuttle(
1071 register thread_act_t thr_act,
1072 thread_flavor_t flavor,
1073 thread_info_t thread_info_out, /* ptr to OUT array */
1074 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1075{
1076 register thread_t thread = thr_act->thread;
1077 int state, flags;
1078 spl_t s;
1079
1080 if (thread == THREAD_NULL)
1081 return (KERN_INVALID_ARGUMENT);
1082
1083 if (flavor == THREAD_BASIC_INFO) {
1084 register thread_basic_info_t basic_info;
1085
1086 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
1087 return (KERN_INVALID_ARGUMENT);
1088
1089 basic_info = (thread_basic_info_t) thread_info_out;
1090
1091 s = splsched();
1092 thread_lock(thread);
1093
1094 /* fill in info */
1095
1096 thread_read_times(thread, &basic_info->user_time,
1097 &basic_info->system_time);
1098
1099 /*
1100 * Update lazy-evaluated scheduler info because someone wants it.
1101 */
1102 if (thread->sched_stamp != sched_tick)
1103 update_priority(thread);
1104
1105 basic_info->sleep_time = 0;
1106
1107 /*
1108 * To calculate cpu_usage, first correct for timer rate,
1109 * then for 5/8 ageing. The correction factor [3/5] is
1110 * (1/(5/8) - 1).
1111 */
1112 basic_info->cpu_usage = (thread->cpu_usage << SCHED_TICK_SHIFT) /
1113 (TIMER_RATE / TH_USAGE_SCALE);
1114 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1115#if SIMPLE_CLOCK
1116 /*
1117 * Clock drift compensation.
1118 */
1119 basic_info->cpu_usage = (basic_info->cpu_usage * 1000000) / sched_usec;
1120#endif /* SIMPLE_CLOCK */
1121
1122 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
1123 POLICY_TIMESHARE: POLICY_RR);
1124
1125 flags = 0;
1126 if (thread->state & TH_IDLE)
1127 flags |= TH_FLAGS_IDLE;
1128
1129 if (thread->state & TH_STACK_HANDOFF)
1130 flags |= TH_FLAGS_SWAPPED;
1131
1132 state = 0;
1133 if (thread->state & TH_TERMINATE)
1134 state = TH_STATE_HALTED;
1135 else
1136 if (thread->state & TH_RUN)
1137 state = TH_STATE_RUNNING;
1138 else
1139 if (thread->state & TH_UNINT)
1140 state = TH_STATE_UNINTERRUPTIBLE;
1141 else
1142 if (thread->state & TH_SUSP)
1143 state = TH_STATE_STOPPED;
1144 else
1145 if (thread->state & TH_WAIT)
1146 state = TH_STATE_WAITING;
1147
1148 basic_info->run_state = state;
1149 basic_info->flags = flags;
1150
1151 basic_info->suspend_count = thr_act->user_stop_count;
1152
1153 thread_unlock(thread);
1154 splx(s);
1155
1156 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1157
1158 return (KERN_SUCCESS);
1159 }
1160 else
1161 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1162 policy_timeshare_info_t ts_info;
1163
1164 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1165 return (KERN_INVALID_ARGUMENT);
1166
1167 ts_info = (policy_timeshare_info_t)thread_info_out;
1168
1169 s = splsched();
1170 thread_lock(thread);
1171
1172 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1173 thread_unlock(thread);
1174 splx(s);
1175
1176 return (KERN_INVALID_POLICY);
1177 }
1178
1179 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1180 if (ts_info->depressed) {
1181 ts_info->base_priority = DEPRESSPRI;
1182 ts_info->depress_priority = thread->priority;
1183 }
1184 else {
1185 ts_info->base_priority = thread->priority;
1186 ts_info->depress_priority = -1;
1187 }
1188
1189 ts_info->cur_priority = thread->sched_pri;
1190 ts_info->max_priority = thread->max_priority;
1191
1192 thread_unlock(thread);
1193 splx(s);
1194
1195 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1196
1197 return (KERN_SUCCESS);
1198 }
1199 else
1200 if (flavor == THREAD_SCHED_FIFO_INFO) {
1201 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1202 return (KERN_INVALID_ARGUMENT);
1203
1204 return (KERN_INVALID_POLICY);
1205 }
1206 else
1207 if (flavor == THREAD_SCHED_RR_INFO) {
1208 policy_rr_info_t rr_info;
1209
1210 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1211 return (KERN_INVALID_ARGUMENT);
1212
1213 rr_info = (policy_rr_info_t) thread_info_out;
1214
1215 s = splsched();
1216 thread_lock(thread);
1217
1218 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1219 thread_unlock(thread);
1220 splx(s);
1221
1222 return (KERN_INVALID_POLICY);
1223 }
1224
1225 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1226 if (rr_info->depressed) {
1227 rr_info->base_priority = DEPRESSPRI;
1228 rr_info->depress_priority = thread->priority;
1229 }
1230 else {
1231 rr_info->base_priority = thread->priority;
1232 rr_info->depress_priority = -1;
1233 }
1234
1235 rr_info->max_priority = thread->max_priority;
1236 rr_info->quantum = std_quantum_us / 1000;
1237
1238 thread_unlock(thread);
1239 splx(s);
1240
1241 *thread_info_count = POLICY_RR_INFO_COUNT;
1242
1243 return (KERN_SUCCESS);
1244 }
1245
1246 return (KERN_INVALID_ARGUMENT);
1247}
1248
1249void
1250thread_doreap(
1251 register thread_t thread)
1252{
1253 thread_act_t thr_act;
1254
1255
1256 thr_act = thread_lock_act(thread);
1257 assert(thr_act && thr_act->thread == thread);
1258
1259 act_reference_locked(thr_act);
1260
1261 /*
1262 * Replace `act_unlock_thread()' with individual
1263 * calls. (`act_detach()' can change fields used
1264 * to determine which locks are held, confusing
1265 * `act_unlock_thread()'.)
1266 */
1267 act_unlock(thr_act);
1268
1269 /* Remove the reference held by a rooted thread */
1270 act_deallocate(thr_act);
1271
1272 /* Remove the reference held by the thread: */
1273 act_deallocate(thr_act);
1274}
1275
1276/*
1277 * reaper_thread:
1278 *
1279 * This kernel thread runs forever looking for terminating
1280 * threads, releasing their "self" references.
1281 */
1282static void
1283reaper_thread_continue(void)
1284{
1285 register thread_t thread;
1286
1287 (void)splsched();
1288 simple_lock(&reaper_lock);
1289
1290 while ((thread = (thread_t) dequeue_head(&reaper_queue)) != THREAD_NULL) {
1291 simple_unlock(&reaper_lock);
1292 (void)spllo();
1293
1294 thread_doreap(thread);
1295
1296 (void)splsched();
1297 simple_lock(&reaper_lock);
1298 }
1299
1300 assert_wait((event_t)&reaper_queue, THREAD_UNINT);
1301 simple_unlock(&reaper_lock);
1302 (void)spllo();
1303
1304 thread_block(reaper_thread_continue);
1305 /*NOTREACHED*/
1306}
1307
1308static void
1309reaper_thread(void)
1310{
1311 reaper_thread_continue();
1312 /*NOTREACHED*/
1313}
1314
1315void
1316thread_reaper_init(void)
1317{
1318 kernel_thread_with_priority(reaper_thread, MINPRI_KERNEL);
1319}
1320
1321kern_return_t
1322thread_assign(
1323 thread_act_t thr_act,
1324 processor_set_t new_pset)
1325{
1326 return(KERN_FAILURE);
1327}
1328
1329/*
1330 * thread_assign_default:
1331 *
1332 * Special version of thread_assign for assigning threads to default
1333 * processor set.
1334 */
1335kern_return_t
1336thread_assign_default(
1337 thread_act_t thr_act)
1338{
1339 return (thread_assign(thr_act, &default_pset));
1340}
1341
1342/*
1343 * thread_get_assignment
1344 *
1345 * Return current assignment for this thread.
1346 */
1347kern_return_t
1348thread_get_assignment(
1349 thread_act_t thr_act,
1350 processor_set_t *pset)
1351{
1352 thread_t thread;
1353
1354 if (thr_act == THR_ACT_NULL)
1355 return(KERN_INVALID_ARGUMENT);
1356 thread = act_lock_thread(thr_act);
1357 if (thread == THREAD_NULL) {
1358 act_unlock_thread(thr_act);
1359 return(KERN_INVALID_ARGUMENT);
1360 }
1361 *pset = thread->processor_set;
1362 act_unlock_thread(thr_act);
1363 pset_reference(*pset);
1364 return(KERN_SUCCESS);
1365}
1366
1367/*
1368 * thread_wire_internal:
1369 *
1370 * Specify that the target thread must always be able
1371 * to run and to allocate memory.
1372 */
1373kern_return_t
1374thread_wire_internal(
1375 host_priv_t host_priv,
1376 thread_act_t thr_act,
1377 boolean_t wired,
1378 boolean_t *prev_state)
1379{
1380 spl_t s;
1381 thread_t thread;
1382 extern void vm_page_free_reserve(int pages);
1383
1384 if (thr_act == THR_ACT_NULL || host_priv == HOST_PRIV_NULL)
1385 return (KERN_INVALID_ARGUMENT);
1386
1387 assert(host_priv == &realhost);
1388
1389 thread = act_lock_thread(thr_act);
1390 if (thread ==THREAD_NULL) {
1391 act_unlock_thread(thr_act);
1392 return(KERN_INVALID_ARGUMENT);
1393 }
1394
1395 /*
1396 * This implementation only works for the current thread.
1397 */
1398 if (thr_act != current_act())
1399 return KERN_INVALID_ARGUMENT;
1400
1401 s = splsched();
1402 thread_lock(thread);
1403
1404 if (prev_state) {
1405 *prev_state = thread->vm_privilege;
1406 }
1407
1408 if (wired) {
1409 if (thread->vm_privilege == FALSE)
1410 vm_page_free_reserve(1); /* XXX */
1411 thread->vm_privilege = TRUE;
1412 } else {
1413 if (thread->vm_privilege == TRUE)
1414 vm_page_free_reserve(-1); /* XXX */
1415 thread->vm_privilege = FALSE;
1416 }
1417
1418 thread_unlock(thread);
1419 splx(s);
1420 act_unlock_thread(thr_act);
1421
1422 return KERN_SUCCESS;
1423}
1424
1425
1426/*
1427 * thread_wire:
1428 *
1429 * User-api wrapper for thread_wire_internal()
1430 */
1431kern_return_t
1432thread_wire(
1433 host_priv_t host_priv,
1434 thread_act_t thr_act,
1435 boolean_t wired)
1436
1437{
1438 return thread_wire_internal(host_priv, thr_act, wired, NULL);
1439}
1440
1441kern_return_t
1442host_stack_usage(
1443 host_t host,
1444 vm_size_t *reservedp,
1445 unsigned int *totalp,
1446 vm_size_t *spacep,
1447 vm_size_t *residentp,
1448 vm_size_t *maxusagep,
1449 vm_offset_t *maxstackp)
1450{
1451#if !MACH_DEBUG
1452 return KERN_NOT_SUPPORTED;
1453#else
1454 unsigned int total;
1455 vm_size_t maxusage;
1456
1457 if (host == HOST_NULL)
1458 return KERN_INVALID_HOST;
1459
1460 maxusage = 0;
1461
1462 stack_statistics(&total, &maxusage);
1463
1464 *reservedp = 0;
1465 *totalp = total;
1466 *spacep = *residentp = total * round_page_32(KERNEL_STACK_SIZE);
1467 *maxusagep = maxusage;
1468 *maxstackp = 0;
1469 return KERN_SUCCESS;
1470
1471#endif /* MACH_DEBUG */
1472}
1473
1474/*
1475 * Return info on stack usage for threads in a specific processor set
1476 */
1477kern_return_t
1478processor_set_stack_usage(
1479 processor_set_t pset,
1480 unsigned int *totalp,
1481 vm_size_t *spacep,
1482 vm_size_t *residentp,
1483 vm_size_t *maxusagep,
1484 vm_offset_t *maxstackp)
1485{
1486#if !MACH_DEBUG
1487 return KERN_NOT_SUPPORTED;
1488#else
1489 unsigned int total;
1490 vm_size_t maxusage;
1491 vm_offset_t maxstack;
1492
1493 register thread_t *threads;
1494 register thread_t thread;
1495
1496 unsigned int actual; /* this many things */
1497 unsigned int i;
1498
1499 vm_size_t size, size_needed;
1500 vm_offset_t addr;
1501
1502 spl_t s;
1503
1504 if (pset == PROCESSOR_SET_NULL)
1505 return KERN_INVALID_ARGUMENT;
1506
1507 size = 0; addr = 0;
1508
1509 for (;;) {
1510 pset_lock(pset);
1511 if (!pset->active) {
1512 pset_unlock(pset);
1513 return KERN_INVALID_ARGUMENT;
1514 }
1515
1516 actual = pset->thread_count;
1517
1518 /* do we have the memory we need? */
1519
1520 size_needed = actual * sizeof(thread_t);
1521 if (size_needed <= size)
1522 break;
1523
1524 /* unlock the pset and allocate more memory */
1525 pset_unlock(pset);
1526
1527 if (size != 0)
1528 kfree(addr, size);
1529
1530 assert(size_needed > 0);
1531 size = size_needed;
1532
1533 addr = kalloc(size);
1534 if (addr == 0)
1535 return KERN_RESOURCE_SHORTAGE;
1536 }
1537
1538 /* OK, have memory and the processor_set is locked & active */
1539 s = splsched();
1540 threads = (thread_t *) addr;
1541 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
1542 !queue_end(&pset->threads, (queue_entry_t) thread);
1543 thread = (thread_t) queue_next(&thread->pset_threads)) {
1544 thread_lock(thread);
1545 if (thread->ref_count > 0) {
1546 thread_reference_locked(thread);
1547 threads[i++] = thread;
1548 }
1549 thread_unlock(thread);
1550 }
1551 splx(s);
1552 assert(i <= actual);
1553
1554 /* can unlock processor set now that we have the thread refs */
1555 pset_unlock(pset);
1556
1557 /* calculate maxusage and free thread references */
1558
1559 total = 0;
1560 maxusage = 0;
1561 maxstack = 0;
1562 while (i > 0) {
1563 thread_t thread = threads[--i];
1564
1565 if (thread->kernel_stack != 0)
1566 total++;
1567
1568 thread_deallocate(thread);
1569 }
1570
1571 if (size != 0)
1572 kfree(addr, size);
1573
1574 *totalp = total;
1575 *residentp = *spacep = total * round_page_32(KERNEL_STACK_SIZE);
1576 *maxusagep = maxusage;
1577 *maxstackp = maxstack;
1578 return KERN_SUCCESS;
1579
1580#endif /* MACH_DEBUG */
1581}
1582
1583int split_funnel_off = 0;
1584funnel_t *
1585funnel_alloc(
1586 int type)
1587{
1588 mutex_t *m;
1589 funnel_t * fnl;
1590 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1591 bzero((void *)fnl, sizeof(funnel_t));
1592 if ((m = mutex_alloc(0)) == (mutex_t *)NULL) {
1593 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1594 return(THR_FUNNEL_NULL);
1595 }
1596 fnl->fnl_mutex = m;
1597 fnl->fnl_type = type;
1598 }
1599 return(fnl);
1600}
1601
1602void
1603funnel_free(
1604 funnel_t * fnl)
1605{
1606 mutex_free(fnl->fnl_mutex);
1607 if (fnl->fnl_oldmutex)
1608 mutex_free(fnl->fnl_oldmutex);
1609 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1610}
1611
1612void
1613funnel_lock(
1614 funnel_t * fnl)
1615{
1616 mutex_t * m;
1617
1618 m = fnl->fnl_mutex;
1619restart:
1620 mutex_lock(m);
1621 fnl->fnl_mtxholder = current_thread();
1622 if (split_funnel_off && (m != fnl->fnl_mutex)) {
1623 mutex_unlock(m);
1624 m = fnl->fnl_mutex;
1625 goto restart;
1626 }
1627}
1628
1629void
1630funnel_unlock(
1631 funnel_t * fnl)
1632{
1633 mutex_unlock(fnl->fnl_mutex);
1634 fnl->fnl_mtxrelease = current_thread();
1635}
1636
1637int refunnel_hint_enabled = 0;
1638
1639boolean_t
1640refunnel_hint(
1641 thread_t thread,
1642 wait_result_t wresult)
1643{
1644 if ( !(thread->funnel_state & TH_FN_REFUNNEL) ||
1645 wresult != THREAD_AWAKENED )
1646 return (FALSE);
1647
1648 if (!refunnel_hint_enabled)
1649 return (FALSE);
1650
1651 return (mutex_preblock(thread->funnel_lock->fnl_mutex, thread));
1652}
1653
1654funnel_t *
1655thread_funnel_get(
1656 void)
1657{
1658 thread_t th = current_thread();
1659
1660 if (th->funnel_state & TH_FN_OWNED) {
1661 return(th->funnel_lock);
1662 }
1663 return(THR_FUNNEL_NULL);
1664}
1665
1666boolean_t
1667thread_funnel_set(
1668 funnel_t * fnl,
1669 boolean_t funneled)
1670{
1671 thread_t cur_thread;
1672 boolean_t funnel_state_prev;
1673 boolean_t intr;
1674
1675 cur_thread = current_thread();
1676 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1677
1678 if (funnel_state_prev != funneled) {
1679 intr = ml_set_interrupts_enabled(FALSE);
1680
1681 if (funneled == TRUE) {
1682 if (cur_thread->funnel_lock)
1683 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1684 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1685 fnl, 1, 0, 0, 0);
1686 funnel_lock(fnl);
1687 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1688 fnl, 1, 0, 0, 0);
1689 cur_thread->funnel_state |= TH_FN_OWNED;
1690 cur_thread->funnel_lock = fnl;
1691 } else {
1692 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1693 panic("Funnel unlock when not holding funnel");
1694 cur_thread->funnel_state &= ~TH_FN_OWNED;
1695 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1696 fnl, 1, 0, 0, 0);
1697
1698 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1699 funnel_unlock(fnl);
1700 }
1701 (void)ml_set_interrupts_enabled(intr);
1702 } else {
1703 /* if we are trying to acquire funnel recursively
1704 * check for funnel to be held already
1705 */
1706 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1707 panic("thread_funnel_set: already holding a different funnel");
1708 }
1709 }
1710 return(funnel_state_prev);
1711}
1712
1713boolean_t
1714thread_funnel_merge(
1715 funnel_t * fnl,
1716 funnel_t * otherfnl)
1717{
1718 mutex_t * m;
1719 mutex_t * otherm;
1720 funnel_t * gfnl;
1721 extern int disable_funnel;
1722
1723 if ((gfnl = thread_funnel_get()) == THR_FUNNEL_NULL)
1724 panic("thread_funnel_merge called with no funnels held");
1725
1726 if (gfnl->fnl_type != 1)
1727 panic("thread_funnel_merge called from non kernel funnel");
1728
1729 if (gfnl != fnl)
1730 panic("thread_funnel_merge incorrect invocation");
1731
1732 if (disable_funnel || split_funnel_off)
1733 return (KERN_FAILURE);
1734
1735 m = fnl->fnl_mutex;
1736 otherm = otherfnl->fnl_mutex;
1737
1738 /* Acquire other funnel mutex */
1739 mutex_lock(otherm);
1740 split_funnel_off = 1;
1741 disable_funnel = 1;
1742 otherfnl->fnl_mutex = m;
1743 otherfnl->fnl_type = fnl->fnl_type;
1744 otherfnl->fnl_oldmutex = otherm; /* save this for future use */
1745
1746 mutex_unlock(otherm);
1747 return(KERN_SUCCESS);
1748}
1749
1750void
1751thread_set_cont_arg(
1752 int arg)
1753{
1754 thread_t self = current_thread();
1755
1756 self->saved.misc = arg;
1757}
1758
1759int
1760thread_get_cont_arg(void)
1761{
1762 thread_t self = current_thread();
1763
1764 return (self->saved.misc);
1765}
1766
1767/*
1768 * Export routines to other components for things that are done as macros
1769 * within the osfmk component.
1770 */
1771#undef thread_should_halt
1772boolean_t
1773thread_should_halt(
1774 thread_t th)
1775{
1776 return(thread_should_halt_fast(th));
1777}
1778
1779vm_offset_t min_valid_stack_address(void)
1780{
1781 return vm_map_min(stack_map);
1782}
1783
1784vm_offset_t max_valid_stack_address(void)
1785{
1786 return vm_map_max(stack_map);
1787}