]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: kern/thread.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
55 * Date: 1986
56 *
57 * Thread/thread_shuttle management primitives implementation.
58 */
59 /*
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
62 *
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
68 *
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
72 *
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
75 *
76 */
77
78 #include <cpus.h>
79 #include <mach_host.h>
80 #include <simple_clock.h>
81 #include <mach_debug.h>
82 #include <mach_prof.h>
83
84 #include <mach/boolean.h>
85 #include <mach/policy.h>
86 #include <mach/thread_info.h>
87 #include <mach/thread_special_ports.h>
88 #include <mach/thread_status.h>
89 #include <mach/time_value.h>
90 #include <mach/vm_param.h>
91 #include <kern/ast.h>
92 #include <kern/cpu_data.h>
93 #include <kern/counters.h>
94 #include <kern/etap_macros.h>
95 #include <kern/ipc_mig.h>
96 #include <kern/ipc_tt.h>
97 #include <kern/mach_param.h>
98 #include <kern/machine.h>
99 #include <kern/misc_protos.h>
100 #include <kern/processor.h>
101 #include <kern/queue.h>
102 #include <kern/sched.h>
103 #include <kern/sched_prim.h>
104 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
105 #include <kern/task.h>
106 #include <kern/thread.h>
107 #include <kern/thread_act.h>
108 #include <kern/thread_swap.h>
109 #include <kern/host.h>
110 #include <kern/zalloc.h>
111 #include <vm/vm_kern.h>
112 #include <ipc/ipc_kmsg.h>
113 #include <ipc/ipc_port.h>
114 #include <machine/thread.h> /* for MACHINE_STACK */
115 #include <kern/profile.h>
116 #include <kern/assert.h>
117 #include <sys/kdebug.h>
118
119 /*
120 * Exported interfaces
121 */
122
123 #include <mach/thread_act_server.h>
124 #include <mach/mach_host_server.h>
125
126 /*
127 * Per-Cpu stashed global state
128 */
129 vm_offset_t active_stacks[NCPUS]; /* per-cpu active stacks */
130 vm_offset_t kernel_stack[NCPUS]; /* top of active stacks */
131 thread_act_t active_kloaded[NCPUS]; /* + act if kernel loaded */
132
133 struct zone *thread_shuttle_zone;
134
135 queue_head_t reaper_queue;
136 decl_simple_lock_data(,reaper_lock)
137 thread_call_t thread_reaper_call;
138
139 extern int tick;
140
141 extern void pcb_module_init(void);
142
143 /* private */
144 static struct thread_shuttle thr_sh_template;
145
146 #if MACH_DEBUG
147
148 #ifdef MACHINE_STACK
149 extern void stack_statistics(
150 unsigned int *totalp,
151 vm_size_t *maxusagep);
152 #endif /* MACHINE_STACK */
153 #endif /* MACH_DEBUG */
154
155 /* Forwards */
156 void thread_collect_scan(void);
157
158 kern_return_t thread_create_shuttle(
159 thread_act_t thr_act,
160 integer_t priority,
161 void (*start)(void),
162 thread_t *new_thread);
163
164 extern void Load_context(
165 thread_t thread);
166
167
168 /*
169 * Machine-dependent code must define:
170 * thread_machine_init
171 * thread_machine_terminate
172 * thread_machine_collect
173 *
174 * The thread->pcb field is reserved for machine-dependent code.
175 */
176
177 #ifdef MACHINE_STACK
178 /*
179 * Machine-dependent code must define:
180 * stack_alloc_try
181 * stack_alloc
182 * stack_free
183 * stack_free_stack
184 * stack_collect
185 * and if MACH_DEBUG:
186 * stack_statistics
187 */
188 #else /* MACHINE_STACK */
189 /*
190 * We allocate stacks from generic kernel VM.
191 * Machine-dependent code must define:
192 * machine_kernel_stack_init
193 *
194 * The stack_free_list can only be accessed at splsched,
195 * because stack_alloc_try/thread_invoke operate at splsched.
196 */
197
198 decl_simple_lock_data(,stack_lock_data) /* splsched only */
199 #define stack_lock() simple_lock(&stack_lock_data)
200 #define stack_unlock() simple_unlock(&stack_lock_data)
201
202 mutex_t stack_map_lock; /* Lock when allocating stacks maps */
203 vm_map_t stack_map; /* Map for allocating stacks */
204 vm_offset_t stack_free_list; /* splsched only */
205 unsigned int stack_free_max = 0;
206 unsigned int stack_free_count = 0; /* splsched only */
207 unsigned int stack_free_limit = 1; /* Arbitrary */
208
209 unsigned int stack_alloc_hits = 0; /* debugging */
210 unsigned int stack_alloc_misses = 0; /* debugging */
211
212 unsigned int stack_alloc_total = 0;
213 unsigned int stack_alloc_hiwater = 0;
214 unsigned int stack_alloc_bndry = 0;
215
216
217 /*
218 * The next field is at the base of the stack,
219 * so the low end is left unsullied.
220 */
221
222 #define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
223
224 /*
225 * stack_alloc:
226 *
227 * Allocate a kernel stack for an activation.
228 * May block.
229 */
230 vm_offset_t
231 stack_alloc(
232 thread_t thread,
233 void (*start_pos)(thread_t))
234 {
235 vm_offset_t stack = thread->kernel_stack;
236 spl_t s;
237
238 if (stack)
239 return (stack);
240
241 /*
242 * We first try the free list. It is probably empty, or
243 * stack_alloc_try would have succeeded, but possibly a stack was
244 * freed before the swapin thread got to us.
245 *
246 * We allocate stacks from their own map which is submaps of the
247 * kernel map. Because we want to have a guard page (at least) in
248 * front of each stack to catch evil code that overruns its stack, we
249 * allocate the stack on aligned boundaries. The boundary is
250 * calculated as the next power of 2 above the stack size. For
251 * example, a stack of 4 pages would have a boundry of 8, likewise 5
252 * would also be 8.
253 *
254 * We limit the number of stacks to be one allocation chunk
255 * (THREAD_CHUNK) more than the maximum number of threads
256 * (THREAD_MAX). The extra is to allow for priviliged threads that
257 * can sometimes have 2 stacks.
258 *
259 */
260
261 s = splsched();
262 stack_lock();
263 stack = stack_free_list;
264 if (stack != 0) {
265 stack_free_list = stack_next(stack);
266 stack_free_count--;
267 }
268 stack_unlock();
269 splx(s);
270
271 if (stack != 0) { /* Did we find a free one? */
272 stack_attach(thread, stack, start_pos); /* Initialize it */
273 return (stack); /* Send it on home */
274 }
275
276 if (kernel_memory_allocate(
277 stack_map, &stack,
278 KERNEL_STACK_SIZE, stack_alloc_bndry - 1,
279 KMA_KOBJECT) != KERN_SUCCESS)
280 panic("stack_alloc: no space left for stack maps");
281
282 stack_alloc_total++;
283 if (stack_alloc_total > stack_alloc_hiwater)
284 stack_alloc_hiwater = stack_alloc_total;
285
286 stack_attach(thread, stack, start_pos);
287 return (stack);
288 }
289
290 /*
291 * stack_free:
292 *
293 * Free a kernel stack.
294 * Called at splsched.
295 */
296
297 void
298 stack_free(
299 thread_t thread)
300 {
301 vm_offset_t stack = stack_detach(thread);
302
303 assert(stack);
304 if (stack != thread->stack_privilege) {
305 stack_lock();
306 stack_next(stack) = stack_free_list;
307 stack_free_list = stack;
308 if (++stack_free_count > stack_free_max)
309 stack_free_max = stack_free_count;
310 stack_unlock();
311 }
312 }
313
314 static void
315 stack_free_stack(
316 vm_offset_t stack)
317 {
318 spl_t s;
319
320 s = splsched();
321 stack_lock();
322 stack_next(stack) = stack_free_list;
323 stack_free_list = stack;
324 if (++stack_free_count > stack_free_max)
325 stack_free_max = stack_free_count;
326 stack_unlock();
327 splx(s);
328 }
329
330 /*
331 * stack_collect:
332 *
333 * Free excess kernel stacks.
334 * May block.
335 */
336
337 void
338 stack_collect(void)
339 {
340 vm_offset_t stack;
341 int i;
342 spl_t s;
343
344 s = splsched();
345 stack_lock();
346 while (stack_free_count > stack_free_limit) {
347 stack = stack_free_list;
348 stack_free_list = stack_next(stack);
349 stack_free_count--;
350 stack_unlock();
351 splx(s);
352
353 if (vm_map_remove(
354 stack_map, stack, stack + KERNEL_STACK_SIZE,
355 VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
356 panic("stack_collect: vm_map_remove failed");
357
358 s = splsched();
359 stack_lock();
360 stack_alloc_total--;
361 }
362 stack_unlock();
363 splx(s);
364 }
365
366
367 #if MACH_DEBUG
368 /*
369 * stack_statistics:
370 *
371 * Return statistics on cached kernel stacks.
372 * *maxusagep must be initialized by the caller.
373 */
374
375 void
376 stack_statistics(
377 unsigned int *totalp,
378 vm_size_t *maxusagep)
379 {
380 spl_t s;
381
382 s = splsched();
383 stack_lock();
384
385 *totalp = stack_free_count;
386 *maxusagep = 0;
387
388 stack_unlock();
389 splx(s);
390 }
391 #endif /* MACH_DEBUG */
392
393 #endif /* MACHINE_STACK */
394
395
396 stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
397 vm_size_t *alloc_size, int *collectable, int *exhaustable)
398 {
399 *count = stack_alloc_total - stack_free_count;
400 *cur_size = KERNEL_STACK_SIZE * stack_alloc_total;
401 *max_size = KERNEL_STACK_SIZE * stack_alloc_hiwater;
402 *elem_size = KERNEL_STACK_SIZE;
403 *alloc_size = KERNEL_STACK_SIZE;
404 *collectable = 1;
405 *exhaustable = 0;
406 }
407
408
409 /*
410 * stack_privilege:
411 *
412 * stack_alloc_try on this thread must always succeed.
413 */
414
415 void
416 stack_privilege(
417 register thread_t thread)
418 {
419 /*
420 * This implementation only works for the current thread.
421 */
422
423 if (thread != current_thread())
424 panic("stack_privilege");
425
426 if (thread->stack_privilege == 0)
427 thread->stack_privilege = current_stack();
428 }
429
430 /*
431 * stack_alloc_try:
432 *
433 * Non-blocking attempt to allocate a kernel stack.
434 * Called at splsched with the thread locked.
435 */
436
437 boolean_t stack_alloc_try(
438 thread_t thread,
439 void (*start_pos)(thread_t))
440 {
441 register vm_offset_t stack = thread->stack_privilege;
442
443 if (stack == 0) {
444 stack_lock();
445
446 stack = stack_free_list;
447 if (stack != (vm_offset_t)0) {
448 stack_free_list = stack_next(stack);
449 stack_free_count--;
450 }
451
452 stack_unlock();
453 }
454
455 if (stack != 0) {
456 stack_attach(thread, stack, start_pos);
457 stack_alloc_hits++;
458
459 return (TRUE);
460 }
461 else {
462 stack_alloc_misses++;
463
464 return (FALSE);
465 }
466 }
467
468 uint64_t max_unsafe_computation;
469 extern int max_unsafe_quanta;
470
471 uint32_t sched_safe_duration;
472
473 uint64_t max_poll_computation;
474 extern int max_poll_quanta;
475
476 uint32_t std_quantum;
477 uint32_t min_std_quantum;
478
479 uint32_t max_rt_quantum;
480 uint32_t min_rt_quantum;
481
482 void
483 thread_init(void)
484 {
485 kern_return_t ret;
486 unsigned int stack;
487
488 thread_shuttle_zone = zinit(
489 sizeof(struct thread_shuttle),
490 THREAD_MAX * sizeof(struct thread_shuttle),
491 THREAD_CHUNK * sizeof(struct thread_shuttle),
492 "threads");
493
494 /*
495 * Fill in a template thread_shuttle for fast initialization.
496 * [Fields that must be (or are typically) reset at
497 * time of creation are so noted.]
498 */
499
500 /* thr_sh_template.links (none) */
501 thr_sh_template.runq = RUN_QUEUE_NULL;
502
503
504 /* thr_sh_template.task (later) */
505 /* thr_sh_template.thread_list (later) */
506 /* thr_sh_template.pset_threads (later) */
507
508 /* one ref for pset, one for activation */
509 thr_sh_template.ref_count = 2;
510
511 thr_sh_template.wait_event = NO_EVENT;
512 thr_sh_template.wait_result = KERN_SUCCESS;
513 thr_sh_template.wait_queue = WAIT_QUEUE_NULL;
514 thr_sh_template.wake_active = FALSE;
515 thr_sh_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT;
516 thr_sh_template.interruptible = TRUE;
517 thr_sh_template.continuation = (void (*)(void))0;
518 thr_sh_template.top_act = THR_ACT_NULL;
519
520 thr_sh_template.importance = 0;
521 thr_sh_template.sched_mode = 0;
522 thr_sh_template.safe_mode = 0;
523
524 thr_sh_template.priority = 0;
525 thr_sh_template.sched_pri = 0;
526 thr_sh_template.depress_priority = -1;
527 thr_sh_template.max_priority = 0;
528 thr_sh_template.task_priority = 0;
529
530 thr_sh_template.current_quantum = 0;
531
532 thr_sh_template.metered_computation = 0;
533 thr_sh_template.computation_epoch = 0;
534
535 thr_sh_template.cpu_usage = 0;
536 thr_sh_template.cpu_delta = 0;
537 thr_sh_template.sched_usage = 0;
538 thr_sh_template.sched_delta = 0;
539 thr_sh_template.sched_stamp = 0;
540 thr_sh_template.sleep_stamp = 0;
541 thr_sh_template.safe_release = 0;
542
543 thr_sh_template.vm_privilege = FALSE;
544
545 timer_init(&(thr_sh_template.user_timer));
546 timer_init(&(thr_sh_template.system_timer));
547 thr_sh_template.user_timer_save.low = 0;
548 thr_sh_template.user_timer_save.high = 0;
549 thr_sh_template.system_timer_save.low = 0;
550 thr_sh_template.system_timer_save.high = 0;
551
552 thr_sh_template.active = FALSE; /* reset */
553
554 /* thr_sh_template.processor_set (later) */
555 #if NCPUS > 1
556 thr_sh_template.bound_processor = PROCESSOR_NULL;
557 #endif /*NCPUS > 1*/
558 #if MACH_HOST
559 thr_sh_template.may_assign = TRUE;
560 thr_sh_template.assign_active = FALSE;
561 #endif /* MACH_HOST */
562 thr_sh_template.funnel_state = 0;
563
564 #if NCPUS > 1
565 /* thr_sh_template.last_processor (later) */
566 #endif /* NCPUS > 1 */
567
568 /*
569 * Initialize other data structures used in
570 * this module.
571 */
572
573 queue_init(&reaper_queue);
574 simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER);
575 thr_sh_template.funnel_lock = THR_FUNNEL_NULL;
576
577 #ifndef MACHINE_STACK
578 simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */
579
580 if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */
581 panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
582 KERNEL_STACK_SIZE, PAGE_SIZE);
583 }
584
585 for(stack_alloc_bndry = PAGE_SIZE; stack_alloc_bndry <= KERNEL_STACK_SIZE; stack_alloc_bndry <<= 1); /* Find next power of 2 above stack size */
586
587 ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
588
589 &stack,
590 (stack_alloc_bndry * (THREAD_MAX + 64)), /* Allocate enough for all of it */
591 FALSE, /* Say not pageable so that it is wired */
592 TRUE, /* Allocate from anywhere */
593 &stack_map); /* Allocate a submap */
594
595 if(ret != KERN_SUCCESS) { /* Did we get one? */
596 panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret); /* Die */
597 }
598 stack = vm_map_min(stack_map); /* Make sure we skip the first hunk */
599 ret = vm_map_enter(stack_map, &stack, PAGE_SIZE, 0, /* Make sure there is nothing at the start */
600 0, /* Force it at start */
601 VM_OBJECT_NULL, 0, /* No object yet */
602 FALSE, /* No copy */
603 VM_PROT_NONE, /* Allow no access */
604 VM_PROT_NONE, /* Allow no access */
605 VM_INHERIT_DEFAULT); /* Just be normal */
606
607 if(ret != KERN_SUCCESS) { /* Did it work? */
608 panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret);
609 }
610
611 #endif /* MACHINE_STACK */
612
613 #if MACH_LDEBUG
614 thr_sh_template.kthread = FALSE;
615 thr_sh_template.mutex_count = 0;
616 #endif /* MACH_LDEBUG */
617
618 {
619 uint64_t abstime;
620
621 clock_interval_to_absolutetime_interval(
622 std_quantum_us, NSEC_PER_USEC, &abstime);
623 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
624 std_quantum = abstime;
625
626 /* 250 us */
627 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
628 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
629 min_std_quantum = abstime;
630
631 /* 50 us */
632 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
633 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
634 min_rt_quantum = abstime;
635
636 /* 50 ms */
637 clock_interval_to_absolutetime_interval(
638 50, 1000*NSEC_PER_USEC, &abstime);
639 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
640 max_rt_quantum = abstime;
641
642 max_unsafe_computation = max_unsafe_quanta * std_quantum;
643 max_poll_computation = max_poll_quanta * std_quantum;
644
645 sched_safe_duration = 2 * max_unsafe_quanta *
646 (std_quantum_us / (1000 * 1000)) *
647 (1 << SCHED_TICK_SHIFT);
648 }
649
650 /*
651 * Initialize any machine-dependent
652 * per-thread structures necessary.
653 */
654 thread_machine_init();
655 }
656
657 void
658 thread_reaper_enqueue(
659 thread_t thread)
660 {
661 /*
662 * thread lock is already held, splsched()
663 * not necessary here.
664 */
665 simple_lock(&reaper_lock);
666 enqueue_tail(&reaper_queue, (queue_entry_t)thread);
667 simple_unlock(&reaper_lock);
668
669 thread_call_enter(thread_reaper_call);
670 }
671
672
673 /*
674 * Routine: thread_terminate_self
675 *
676 * This routine is called by a thread which has unwound from
677 * its current RPC and kernel contexts and found that it's
678 * root activation has been marked for extinction. This lets
679 * it clean up the last few things that can only be cleaned
680 * up in this context and then impale itself on the reaper
681 * queue.
682 *
683 * When the reaper gets the thread, it will deallocate the
684 * thread_act's reference on itself, which in turn will release
685 * its own reference on this thread. By doing things in that
686 * order, a thread_act will always have a valid thread - but the
687 * thread may persist beyond having a thread_act (but must never
688 * run like that).
689 */
690 void
691 thread_terminate_self(void)
692 {
693 register thread_t thread = current_thread();
694 thread_act_t thr_act = thread->top_act;
695 task_t task = thr_act->task;
696 int active_acts;
697 spl_t s;
698
699 /*
700 * We should be at the base of the inheritance chain.
701 */
702 assert(thr_act->thread == thread);
703
704 _mk_sp_thread_depress_abort(thread, TRUE);
705
706 /*
707 * Check to see if this is the last active activation. By
708 * this we mean the last activation to call thread_terminate_self.
709 * If so, and the task is associated with a BSD process, we
710 * need to call BSD and let them clean up.
711 */
712 task_lock(task);
713 active_acts = --task->active_act_count;
714 task_unlock(task);
715 if (!active_acts && task->bsd_info)
716 proc_exit(task->bsd_info);
717
718 #ifdef CALLOUT_RPC_MODEL
719 if (thr_act->lower) {
720 /*
721 * JMM - RPC will not be using a callout/stack manipulation
722 * mechanism. instead we will let it return normally as if
723 * from a continuation. Accordingly, these need to be cleaned
724 * up a bit.
725 */
726 act_switch_swapcheck(thread, (ipc_port_t)0);
727 act_lock(thr_act); /* hierarchy violation XXX */
728 (void) switch_act(THR_ACT_NULL);
729 assert(thr_act->ref_count == 1); /* XXX */
730 /* act_deallocate(thr_act); XXX */
731 prev_act = thread->top_act;
732 /*
733 * disable preemption to protect kernel stack changes
734 * disable_preemption();
735 * MACH_RPC_RET(prev_act) = KERN_RPC_SERVER_TERMINATED;
736 * machine_kernel_stack_init(thread, mach_rpc_return_error);
737 */
738 act_unlock(thr_act);
739
740 /*
741 * Load_context(thread);
742 */
743 /* NOTREACHED */
744 }
745
746 #else /* !CALLOUT_RPC_MODEL */
747
748 assert(!thr_act->lower);
749
750 #endif /* CALLOUT_RPC_MODEL */
751
752 s = splsched();
753 thread_lock(thread);
754 thread->active = FALSE;
755 thread_unlock(thread);
756 splx(s);
757
758 thread_timer_terminate();
759
760 /* flush any lazy HW state while in own context */
761 thread_machine_flush(thr_act);
762
763 ipc_thread_terminate(thread);
764
765 s = splsched();
766 thread_lock(thread);
767 thread->state |= (TH_HALTED|TH_TERMINATE);
768 assert((thread->state & TH_UNINT) == 0);
769 thread_mark_wait_locked(thread, THREAD_UNINT);
770 thread_unlock(thread);
771 /* splx(s); */
772
773 ETAP_SET_REASON(thread, BLOCKED_ON_TERMINATION);
774 thread_block((void (*)(void)) 0);
775 panic("the zombie walks!");
776 /*NOTREACHED*/
777 }
778
779
780 /*
781 * Create a new thread.
782 * Doesn't start the thread running; It first must be attached to
783 * an activation - then use thread_go to start it.
784 */
785 kern_return_t
786 thread_create_shuttle(
787 thread_act_t thr_act,
788 integer_t priority,
789 void (*start)(void),
790 thread_t *new_thread)
791 {
792 thread_t new_shuttle;
793 task_t parent_task = thr_act->task;
794 processor_set_t pset;
795 kern_return_t result;
796 int suspcnt;
797
798 assert(!thr_act->thread);
799 assert(!thr_act->pool_port);
800
801 /*
802 * Allocate a thread and initialize static fields
803 */
804 new_shuttle = (thread_t)zalloc(thread_shuttle_zone);
805 if (new_shuttle == THREAD_NULL)
806 return (KERN_RESOURCE_SHORTAGE);
807
808 *new_shuttle = thr_sh_template;
809
810 thread_lock_init(new_shuttle);
811 rpc_lock_init(new_shuttle);
812 wake_lock_init(new_shuttle);
813 new_shuttle->sleep_stamp = sched_tick;
814
815 /*
816 * Thread still isn't runnable yet (our caller will do
817 * that). Initialize runtime-dependent fields here.
818 */
819 result = thread_machine_create(new_shuttle, thr_act, thread_continue);
820 assert (result == KERN_SUCCESS);
821
822 thread_start(new_shuttle, start);
823 thread_timer_setup(new_shuttle);
824 ipc_thread_init(new_shuttle);
825
826 pset = parent_task->processor_set;
827 if (!pset->active) {
828 pset = &default_pset;
829 }
830 pset_lock(pset);
831
832 task_lock(parent_task);
833
834 /*
835 * Don't need to initialize because the context switch
836 * code will set it before it can be used.
837 */
838 if (!parent_task->active) {
839 task_unlock(parent_task);
840 pset_unlock(pset);
841 thread_machine_destroy(new_shuttle);
842 zfree(thread_shuttle_zone, (vm_offset_t) new_shuttle);
843 return (KERN_FAILURE);
844 }
845
846 act_attach(thr_act, new_shuttle, 0);
847
848 /* Chain the thr_act onto the task's list */
849 queue_enter(&parent_task->thr_acts, thr_act, thread_act_t, thr_acts);
850 parent_task->thr_act_count++;
851 parent_task->res_act_count++;
852 parent_task->active_act_count++;
853
854 /* Associate the thread with the processor set */
855 pset_add_thread(pset, new_shuttle);
856
857 /* Set the thread's scheduling parameters */
858 if (parent_task != kernel_task)
859 new_shuttle->sched_mode |= TH_MODE_TIMESHARE;
860 new_shuttle->max_priority = parent_task->max_priority;
861 new_shuttle->task_priority = parent_task->priority;
862 new_shuttle->priority = (priority < 0)? parent_task->priority: priority;
863 if (new_shuttle->priority > new_shuttle->max_priority)
864 new_shuttle->priority = new_shuttle->max_priority;
865 new_shuttle->importance =
866 new_shuttle->priority - new_shuttle->task_priority;
867 new_shuttle->sched_stamp = sched_tick;
868 compute_priority(new_shuttle, TRUE);
869
870 #if ETAP_EVENT_MONITOR
871 new_thread->etap_reason = 0;
872 new_thread->etap_trace = FALSE;
873 #endif /* ETAP_EVENT_MONITOR */
874
875 new_shuttle->active = TRUE;
876 thr_act->active = TRUE;
877 pset_unlock(pset);
878
879 /*
880 * No need to lock thr_act, since it can't be known to anyone --
881 * we set its suspend_count to one more than the task suspend_count
882 * by calling thread_hold.
883 */
884 thr_act->user_stop_count = 1;
885 for (suspcnt = thr_act->task->suspend_count + 1; suspcnt; --suspcnt)
886 thread_hold(thr_act);
887 task_unlock(parent_task);
888
889 *new_thread = new_shuttle;
890
891 {
892 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
893
894 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA, 1)) | DBG_FUNC_NONE,
895 (vm_address_t)new_shuttle, 0,0,0,0);
896
897 kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3,
898 &dbg_arg4);
899 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 1)) | DBG_FUNC_NONE,
900 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
901 }
902
903 return (KERN_SUCCESS);
904 }
905
906 kern_return_t
907 thread_create(
908 task_t task,
909 thread_act_t *new_act)
910 {
911 thread_act_t thr_act;
912 thread_t thread;
913 kern_return_t result;
914 spl_t s;
915 extern void thread_bootstrap_return(void);
916
917 if (task == TASK_NULL)
918 return KERN_INVALID_ARGUMENT;
919
920 result = act_create(task, &thr_act);
921 if (result != KERN_SUCCESS)
922 return (result);
923
924 result = thread_create_shuttle(thr_act, -1, thread_bootstrap_return, &thread);
925 if (result != KERN_SUCCESS) {
926 act_deallocate(thr_act);
927 return (result);
928 }
929
930 if (task->kernel_loaded)
931 thread_user_to_kernel(thread);
932
933 /* Start the thread running (it will immediately suspend itself). */
934 s = splsched();
935 thread_ast_set(thr_act, AST_APC);
936 thread_lock(thread);
937 thread_go_locked(thread, THREAD_AWAKENED);
938 thread_unlock(thread);
939 splx(s);
940
941 *new_act = thr_act;
942
943 return (KERN_SUCCESS);
944 }
945
946 /*
947 * Update thread that belongs to a task created via kernel_task_create().
948 */
949 void
950 thread_user_to_kernel(
951 thread_t thread)
952 {
953 /*
954 * Used to set special swap_func here...
955 */
956 }
957
958 kern_return_t
959 thread_create_running(
960 register task_t parent_task,
961 int flavor,
962 thread_state_t new_state,
963 mach_msg_type_number_t new_state_count,
964 thread_act_t *child_act) /* OUT */
965 {
966 register kern_return_t result;
967
968 result = thread_create(parent_task, child_act);
969 if (result != KERN_SUCCESS)
970 return (result);
971
972 result = act_machine_set_state(*child_act, flavor,
973 new_state, new_state_count);
974 if (result != KERN_SUCCESS) {
975 (void) thread_terminate(*child_act);
976 return (result);
977 }
978
979 result = thread_resume(*child_act);
980 if (result != KERN_SUCCESS) {
981 (void) thread_terminate(*child_act);
982 return (result);
983 }
984
985 return (result);
986 }
987
988 /*
989 * kernel_thread:
990 *
991 * Create and kernel thread in the specified task, and
992 * optionally start it running.
993 */
994 thread_t
995 kernel_thread_with_priority(
996 task_t task,
997 integer_t priority,
998 void (*start)(void),
999 boolean_t alloc_stack,
1000 boolean_t start_running)
1001 {
1002 kern_return_t result;
1003 thread_t thread;
1004 thread_act_t thr_act;
1005 spl_t s;
1006
1007 result = act_create(task, &thr_act);
1008 if (result != KERN_SUCCESS) {
1009 return THREAD_NULL;
1010 }
1011
1012 result = thread_create_shuttle(thr_act, priority, start, &thread);
1013 if (result != KERN_SUCCESS) {
1014 act_deallocate(thr_act);
1015 return THREAD_NULL;
1016 }
1017
1018 if (alloc_stack)
1019 thread_doswapin(thread);
1020
1021 s = splsched();
1022 thread_lock(thread);
1023
1024 thr_act = thread->top_act;
1025 #if MACH_LDEBUG
1026 thread->kthread = TRUE;
1027 #endif /* MACH_LDEBUG */
1028
1029 if (start_running)
1030 thread_go_locked(thread, THREAD_AWAKENED);
1031
1032 thread_unlock(thread);
1033 splx(s);
1034
1035 if (start_running)
1036 thread_resume(thr_act);
1037
1038 act_deallocate(thr_act);
1039 return (thread);
1040 }
1041
1042 thread_t
1043 kernel_thread(
1044 task_t task,
1045 void (*start)(void))
1046 {
1047 return kernel_thread_with_priority(task, -1, start, FALSE, TRUE);
1048 }
1049
1050 unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */
1051
1052 void
1053 thread_deallocate(
1054 thread_t thread)
1055 {
1056 task_t task;
1057 processor_set_t pset;
1058 spl_t s;
1059
1060 if (thread == THREAD_NULL)
1061 return;
1062
1063 /*
1064 * First, check for new count > 1 (the common case).
1065 * Only the thread needs to be locked.
1066 */
1067 s = splsched();
1068 thread_lock(thread);
1069 if (--thread->ref_count > 1) {
1070 thread_unlock(thread);
1071 splx(s);
1072 return;
1073 }
1074
1075 /*
1076 * Down to pset reference, lets try to clean up.
1077 * However, the processor set may make more. Its lock
1078 * also dominate the thread lock. So, reverse the
1079 * order of the locks and see if its still the last
1080 * reference;
1081 */
1082 assert(thread->ref_count == 1); /* Else this is an extra dealloc! */
1083 thread_unlock(thread);
1084 splx(s);
1085
1086 #if MACH_HOST
1087 thread_freeze(thread);
1088 #endif /* MACH_HOST */
1089
1090 pset = thread->processor_set;
1091 pset_lock(pset);
1092
1093 s = splsched();
1094 thread_lock(thread);
1095
1096 if (thread->ref_count > 1) {
1097 #if MACH_HOST
1098 boolean_t need_wakeup = FALSE;
1099 /*
1100 * processor_set made extra reference.
1101 */
1102 /* Inline the unfreeze */
1103 thread->may_assign = TRUE;
1104 if (thread->assign_active) {
1105 need_wakeup = TRUE;
1106 thread->assign_active = FALSE;
1107 }
1108 #endif /* MACH_HOST */
1109 thread_unlock(thread);
1110 splx(s);
1111 pset_unlock(pset);
1112 #if MACH_HOST
1113 if (need_wakeup)
1114 thread_wakeup((event_t)&thread->assign_active);
1115 #endif /* MACH_HOST */
1116 c_weird_pset_ref_exit++;
1117 return;
1118 }
1119 #if MACH_HOST
1120 assert(thread->assign_active == FALSE);
1121 #endif /* MACH_HOST */
1122
1123 /*
1124 * Thread only had pset reference - we can remove it.
1125 */
1126 if (thread == current_thread())
1127 panic("thread deallocating itself");
1128
1129 pset_remove_thread(pset, thread);
1130 thread->ref_count = 0;
1131 thread_unlock(thread); /* no more references - safe */
1132 splx(s);
1133 pset_unlock(pset);
1134
1135 pset_deallocate(thread->processor_set);
1136
1137 if (thread->stack_privilege != 0) {
1138 if (thread->stack_privilege != thread->kernel_stack)
1139 stack_free_stack(thread->stack_privilege);
1140 thread->stack_privilege = 0;
1141 }
1142 /* frees kernel stack & other MD resources */
1143 thread_machine_destroy(thread);
1144
1145 zfree(thread_shuttle_zone, (vm_offset_t) thread);
1146 }
1147
1148 void
1149 thread_reference(
1150 thread_t thread)
1151 {
1152 spl_t s;
1153
1154 if (thread == THREAD_NULL)
1155 return;
1156
1157 s = splsched();
1158 thread_lock(thread);
1159 thread->ref_count++;
1160 thread_unlock(thread);
1161 splx(s);
1162 }
1163
1164 /*
1165 * Called with "appropriate" thread-related locks held on
1166 * thread and its top_act for synchrony with RPC (see
1167 * act_lock_thread()).
1168 */
1169 kern_return_t
1170 thread_info_shuttle(
1171 register thread_act_t thr_act,
1172 thread_flavor_t flavor,
1173 thread_info_t thread_info_out, /* ptr to OUT array */
1174 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1175 {
1176 register thread_t thread = thr_act->thread;
1177 int state, flags;
1178 spl_t s;
1179
1180 if (thread == THREAD_NULL)
1181 return (KERN_INVALID_ARGUMENT);
1182
1183 if (flavor == THREAD_BASIC_INFO) {
1184 register thread_basic_info_t basic_info;
1185
1186 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
1187 return (KERN_INVALID_ARGUMENT);
1188
1189 basic_info = (thread_basic_info_t) thread_info_out;
1190
1191 s = splsched();
1192 thread_lock(thread);
1193
1194 /* fill in info */
1195
1196 thread_read_times(thread, &basic_info->user_time,
1197 &basic_info->system_time);
1198
1199 /*
1200 * Update lazy-evaluated scheduler info because someone wants it.
1201 */
1202 if (thread->sched_stamp != sched_tick)
1203 update_priority(thread);
1204
1205 basic_info->sleep_time = 0;
1206
1207 /*
1208 * To calculate cpu_usage, first correct for timer rate,
1209 * then for 5/8 ageing. The correction factor [3/5] is
1210 * (1/(5/8) - 1).
1211 */
1212 basic_info->cpu_usage = (thread->cpu_usage << SCHED_TICK_SHIFT) /
1213 (TIMER_RATE / TH_USAGE_SCALE);
1214 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1215 #if SIMPLE_CLOCK
1216 /*
1217 * Clock drift compensation.
1218 */
1219 basic_info->cpu_usage = (basic_info->cpu_usage * 1000000) / sched_usec;
1220 #endif /* SIMPLE_CLOCK */
1221
1222 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
1223 POLICY_TIMESHARE: POLICY_RR);
1224
1225 flags = 0;
1226 if (thread->state & TH_IDLE)
1227 flags |= TH_FLAGS_IDLE;
1228
1229 if (thread->state & TH_STACK_HANDOFF)
1230 flags |= TH_FLAGS_SWAPPED;
1231
1232 state = 0;
1233 if (thread->state & TH_HALTED)
1234 state = TH_STATE_HALTED;
1235 else
1236 if (thread->state & TH_RUN)
1237 state = TH_STATE_RUNNING;
1238 else
1239 if (thread->state & TH_UNINT)
1240 state = TH_STATE_UNINTERRUPTIBLE;
1241 else
1242 if (thread->state & TH_SUSP)
1243 state = TH_STATE_STOPPED;
1244 else
1245 if (thread->state & TH_WAIT)
1246 state = TH_STATE_WAITING;
1247
1248 basic_info->run_state = state;
1249 basic_info->flags = flags;
1250
1251 basic_info->suspend_count = thr_act->user_stop_count;
1252
1253 thread_unlock(thread);
1254 splx(s);
1255
1256 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1257
1258 return (KERN_SUCCESS);
1259 }
1260 else
1261 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1262 policy_timeshare_info_t ts_info;
1263
1264 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1265 return (KERN_INVALID_ARGUMENT);
1266
1267 ts_info = (policy_timeshare_info_t)thread_info_out;
1268
1269 s = splsched();
1270 thread_lock(thread);
1271
1272 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1273 thread_unlock(thread);
1274 splx(s);
1275
1276 return (KERN_INVALID_POLICY);
1277 }
1278
1279 ts_info->base_priority = thread->priority;
1280 ts_info->max_priority = thread->max_priority;
1281 ts_info->cur_priority = thread->sched_pri;
1282
1283 ts_info->depressed = (thread->depress_priority >= 0);
1284 ts_info->depress_priority = thread->depress_priority;
1285
1286 thread_unlock(thread);
1287 splx(s);
1288
1289 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1290
1291 return (KERN_SUCCESS);
1292 }
1293 else
1294 if (flavor == THREAD_SCHED_FIFO_INFO) {
1295 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1296 return (KERN_INVALID_ARGUMENT);
1297
1298 return (KERN_INVALID_POLICY);
1299 }
1300 else
1301 if (flavor == THREAD_SCHED_RR_INFO) {
1302 policy_rr_info_t rr_info;
1303
1304 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1305 return (KERN_INVALID_ARGUMENT);
1306
1307 rr_info = (policy_rr_info_t) thread_info_out;
1308
1309 s = splsched();
1310 thread_lock(thread);
1311
1312 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1313 thread_unlock(thread);
1314 splx(s);
1315
1316 return (KERN_INVALID_POLICY);
1317 }
1318
1319 rr_info->base_priority = thread->priority;
1320 rr_info->max_priority = thread->max_priority;
1321 rr_info->quantum = std_quantum_us / 1000;
1322
1323 rr_info->depressed = (thread->depress_priority >= 0);
1324 rr_info->depress_priority = thread->depress_priority;
1325
1326 thread_unlock(thread);
1327 splx(s);
1328
1329 *thread_info_count = POLICY_RR_INFO_COUNT;
1330
1331 return (KERN_SUCCESS);
1332 }
1333
1334 return (KERN_INVALID_ARGUMENT);
1335 }
1336
1337 void
1338 thread_doreap(
1339 register thread_t thread)
1340 {
1341 thread_act_t thr_act;
1342 struct ipc_port *pool_port;
1343
1344
1345 thr_act = thread_lock_act(thread);
1346 assert(thr_act && thr_act->thread == thread);
1347
1348 act_locked_act_reference(thr_act);
1349 pool_port = thr_act->pool_port;
1350
1351 /*
1352 * Replace `act_unlock_thread()' with individual
1353 * calls. (`act_detach()' can change fields used
1354 * to determine which locks are held, confusing
1355 * `act_unlock_thread()'.)
1356 */
1357 rpc_unlock(thread);
1358 if (pool_port != IP_NULL)
1359 ip_unlock(pool_port);
1360 act_unlock(thr_act);
1361
1362 /* Remove the reference held by a rooted thread */
1363 if (pool_port == IP_NULL)
1364 act_deallocate(thr_act);
1365
1366 /* Remove the reference held by the thread: */
1367 act_deallocate(thr_act);
1368 }
1369
1370 static thread_call_data_t thread_reaper_call_data;
1371
1372 /*
1373 * reaper_thread:
1374 *
1375 * This kernel thread runs forever looking for threads to destroy
1376 * (when they request that they be destroyed, of course).
1377 *
1378 * The reaper thread will disappear in the next revision of thread
1379 * control when it's function will be moved into thread_dispatch.
1380 */
1381 static void
1382 _thread_reaper(
1383 thread_call_param_t p0,
1384 thread_call_param_t p1)
1385 {
1386 register thread_t thread;
1387 spl_t s;
1388
1389 s = splsched();
1390 simple_lock(&reaper_lock);
1391
1392 while ((thread = (thread_t) dequeue_head(&reaper_queue)) != THREAD_NULL) {
1393 simple_unlock(&reaper_lock);
1394
1395 /*
1396 * wait for run bit to clear
1397 */
1398 thread_lock(thread);
1399 if (thread->state & TH_RUN)
1400 panic("thread reaper: TH_RUN");
1401 thread_unlock(thread);
1402 splx(s);
1403
1404 thread_doreap(thread);
1405
1406 s = splsched();
1407 simple_lock(&reaper_lock);
1408 }
1409
1410 simple_unlock(&reaper_lock);
1411 splx(s);
1412 }
1413
1414 void
1415 thread_reaper(void)
1416 {
1417 thread_call_setup(&thread_reaper_call_data, _thread_reaper, NULL);
1418 thread_reaper_call = &thread_reaper_call_data;
1419
1420 _thread_reaper(NULL, NULL);
1421 }
1422
1423 kern_return_t
1424 thread_assign(
1425 thread_act_t thr_act,
1426 processor_set_t new_pset)
1427 {
1428 #ifdef lint
1429 thread++; new_pset++;
1430 #endif /* lint */
1431 return(KERN_FAILURE);
1432 }
1433
1434 /*
1435 * thread_assign_default:
1436 *
1437 * Special version of thread_assign for assigning threads to default
1438 * processor set.
1439 */
1440 kern_return_t
1441 thread_assign_default(
1442 thread_act_t thr_act)
1443 {
1444 return (thread_assign(thr_act, &default_pset));
1445 }
1446
1447 /*
1448 * thread_get_assignment
1449 *
1450 * Return current assignment for this thread.
1451 */
1452 kern_return_t
1453 thread_get_assignment(
1454 thread_act_t thr_act,
1455 processor_set_t *pset)
1456 {
1457 thread_t thread;
1458
1459 if (thr_act == THR_ACT_NULL)
1460 return(KERN_INVALID_ARGUMENT);
1461 thread = act_lock_thread(thr_act);
1462 if (thread == THREAD_NULL) {
1463 act_unlock_thread(thr_act);
1464 return(KERN_INVALID_ARGUMENT);
1465 }
1466 *pset = thread->processor_set;
1467 act_unlock_thread(thr_act);
1468 pset_reference(*pset);
1469 return(KERN_SUCCESS);
1470 }
1471
1472 /*
1473 * thread_wire:
1474 *
1475 * Specify that the target thread must always be able
1476 * to run and to allocate memory.
1477 */
1478 kern_return_t
1479 thread_wire(
1480 host_priv_t host_priv,
1481 thread_act_t thr_act,
1482 boolean_t wired)
1483 {
1484 spl_t s;
1485 thread_t thread;
1486 extern void vm_page_free_reserve(int pages);
1487
1488 if (thr_act == THR_ACT_NULL || host_priv == HOST_PRIV_NULL)
1489 return (KERN_INVALID_ARGUMENT);
1490
1491 assert(host_priv == &realhost);
1492
1493 thread = act_lock_thread(thr_act);
1494 if (thread ==THREAD_NULL) {
1495 act_unlock_thread(thr_act);
1496 return(KERN_INVALID_ARGUMENT);
1497 }
1498
1499 /*
1500 * This implementation only works for the current thread.
1501 * See stack_privilege.
1502 */
1503 if (thr_act != current_act())
1504 return KERN_INVALID_ARGUMENT;
1505
1506 s = splsched();
1507 thread_lock(thread);
1508
1509 if (wired) {
1510 if (thread->vm_privilege == FALSE)
1511 vm_page_free_reserve(1); /* XXX */
1512 thread->vm_privilege = TRUE;
1513 } else {
1514 if (thread->vm_privilege == TRUE)
1515 vm_page_free_reserve(-1); /* XXX */
1516 thread->vm_privilege = FALSE;
1517 }
1518
1519 thread_unlock(thread);
1520 splx(s);
1521 act_unlock_thread(thr_act);
1522
1523 return KERN_SUCCESS;
1524 }
1525
1526 /*
1527 * thread_collect_scan:
1528 *
1529 * Attempt to free resources owned by threads.
1530 */
1531
1532 void
1533 thread_collect_scan(void)
1534 {
1535 /* This code runs very quickly! */
1536 }
1537
1538 /* Also disabled in vm/vm_pageout.c */
1539 boolean_t thread_collect_allowed = FALSE;
1540 unsigned thread_collect_last_tick = 0;
1541 unsigned thread_collect_max_rate = 0; /* in ticks */
1542
1543 /*
1544 * consider_thread_collect:
1545 *
1546 * Called by the pageout daemon when the system needs more free pages.
1547 */
1548
1549 void
1550 consider_thread_collect(void)
1551 {
1552 /*
1553 * By default, don't attempt thread collection more frequently
1554 * than once a second.
1555 */
1556
1557 if (thread_collect_max_rate == 0)
1558 thread_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1;
1559
1560 if (thread_collect_allowed &&
1561 (sched_tick >
1562 (thread_collect_last_tick + thread_collect_max_rate))) {
1563 thread_collect_last_tick = sched_tick;
1564 thread_collect_scan();
1565 }
1566 }
1567
1568 kern_return_t
1569 host_stack_usage(
1570 host_t host,
1571 vm_size_t *reservedp,
1572 unsigned int *totalp,
1573 vm_size_t *spacep,
1574 vm_size_t *residentp,
1575 vm_size_t *maxusagep,
1576 vm_offset_t *maxstackp)
1577 {
1578 #if !MACH_DEBUG
1579 return KERN_NOT_SUPPORTED;
1580 #else
1581 unsigned int total;
1582 vm_size_t maxusage;
1583
1584 if (host == HOST_NULL)
1585 return KERN_INVALID_HOST;
1586
1587 maxusage = 0;
1588
1589 stack_statistics(&total, &maxusage);
1590
1591 *reservedp = 0;
1592 *totalp = total;
1593 *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
1594 *maxusagep = maxusage;
1595 *maxstackp = 0;
1596 return KERN_SUCCESS;
1597
1598 #endif /* MACH_DEBUG */
1599 }
1600
1601 /*
1602 * Return info on stack usage for threads in a specific processor set
1603 */
1604 kern_return_t
1605 processor_set_stack_usage(
1606 processor_set_t pset,
1607 unsigned int *totalp,
1608 vm_size_t *spacep,
1609 vm_size_t *residentp,
1610 vm_size_t *maxusagep,
1611 vm_offset_t *maxstackp)
1612 {
1613 #if !MACH_DEBUG
1614 return KERN_NOT_SUPPORTED;
1615 #else
1616 unsigned int total;
1617 vm_size_t maxusage;
1618 vm_offset_t maxstack;
1619
1620 register thread_t *threads;
1621 register thread_t thread;
1622
1623 unsigned int actual; /* this many things */
1624 unsigned int i;
1625
1626 vm_size_t size, size_needed;
1627 vm_offset_t addr;
1628
1629 if (pset == PROCESSOR_SET_NULL)
1630 return KERN_INVALID_ARGUMENT;
1631
1632 size = 0; addr = 0;
1633
1634 for (;;) {
1635 pset_lock(pset);
1636 if (!pset->active) {
1637 pset_unlock(pset);
1638 return KERN_INVALID_ARGUMENT;
1639 }
1640
1641 actual = pset->thread_count;
1642
1643 /* do we have the memory we need? */
1644
1645 size_needed = actual * sizeof(thread_t);
1646 if (size_needed <= size)
1647 break;
1648
1649 /* unlock the pset and allocate more memory */
1650 pset_unlock(pset);
1651
1652 if (size != 0)
1653 kfree(addr, size);
1654
1655 assert(size_needed > 0);
1656 size = size_needed;
1657
1658 addr = kalloc(size);
1659 if (addr == 0)
1660 return KERN_RESOURCE_SHORTAGE;
1661 }
1662
1663 /* OK, have memory and the processor_set is locked & active */
1664
1665 threads = (thread_t *) addr;
1666 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
1667 i < actual;
1668 i++,
1669 thread = (thread_t) queue_next(&thread->pset_threads)) {
1670 thread_reference(thread);
1671 threads[i] = thread;
1672 }
1673 assert(queue_end(&pset->threads, (queue_entry_t) thread));
1674
1675 /* can unlock processor set now that we have the thread refs */
1676 pset_unlock(pset);
1677
1678 /* calculate maxusage and free thread references */
1679
1680 total = 0;
1681 maxusage = 0;
1682 maxstack = 0;
1683 for (i = 0; i < actual; i++) {
1684 int cpu;
1685 thread_t thread = threads[i];
1686 vm_offset_t stack = 0;
1687
1688 /*
1689 * thread->kernel_stack is only accurate if the
1690 * thread isn't swapped and is not executing.
1691 *
1692 * Of course, we don't have the appropriate locks
1693 * for these shenanigans.
1694 */
1695
1696 stack = thread->kernel_stack;
1697
1698 for (cpu = 0; cpu < NCPUS; cpu++)
1699 if (cpu_data[cpu].active_thread == thread) {
1700 stack = active_stacks[cpu];
1701 break;
1702 }
1703
1704 if (stack != 0) {
1705 total++;
1706 }
1707
1708 thread_deallocate(thread);
1709 }
1710
1711 if (size != 0)
1712 kfree(addr, size);
1713
1714 *totalp = total;
1715 *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
1716 *maxusagep = maxusage;
1717 *maxstackp = maxstack;
1718 return KERN_SUCCESS;
1719
1720 #endif /* MACH_DEBUG */
1721 }
1722
1723 static int split_funnel_off = 0;
1724 funnel_t *
1725 funnel_alloc(
1726 int type)
1727 {
1728 mutex_t *m;
1729 funnel_t * fnl;
1730 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
1731 bzero((void *)fnl, sizeof(funnel_t));
1732 if ((m = mutex_alloc(0)) == (mutex_t *)NULL) {
1733 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1734 return(THR_FUNNEL_NULL);
1735 }
1736 fnl->fnl_mutex = m;
1737 fnl->fnl_type = type;
1738 }
1739 return(fnl);
1740 }
1741
1742 void
1743 funnel_free(
1744 funnel_t * fnl)
1745 {
1746 mutex_free(fnl->fnl_mutex);
1747 if (fnl->fnl_oldmutex)
1748 mutex_free(fnl->fnl_oldmutex);
1749 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1750 }
1751
1752 void
1753 funnel_lock(
1754 funnel_t * fnl)
1755 {
1756 mutex_t * m;
1757
1758 m = fnl->fnl_mutex;
1759 restart:
1760 mutex_lock(m);
1761 fnl->fnl_mtxholder = current_thread();
1762 if (split_funnel_off && (m != fnl->fnl_mutex)) {
1763 mutex_unlock(m);
1764 m = fnl->fnl_mutex;
1765 goto restart;
1766 }
1767 }
1768
1769 void
1770 funnel_unlock(
1771 funnel_t * fnl)
1772 {
1773 mutex_unlock(fnl->fnl_mutex);
1774 fnl->fnl_mtxrelease = current_thread();
1775 }
1776
1777 funnel_t *
1778 thread_funnel_get(
1779 void)
1780 {
1781 thread_t th = current_thread();
1782
1783 if (th->funnel_state & TH_FN_OWNED) {
1784 return(th->funnel_lock);
1785 }
1786 return(THR_FUNNEL_NULL);
1787 }
1788
1789 boolean_t
1790 thread_funnel_set(
1791 funnel_t * fnl,
1792 boolean_t funneled)
1793 {
1794 thread_t cur_thread;
1795 boolean_t funnel_state_prev;
1796 boolean_t intr;
1797
1798 cur_thread = current_thread();
1799 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1800
1801 if (funnel_state_prev != funneled) {
1802 intr = ml_set_interrupts_enabled(FALSE);
1803
1804 if (funneled == TRUE) {
1805 if (cur_thread->funnel_lock)
1806 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1807 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1808 fnl, 1, 0, 0, 0);
1809 funnel_lock(fnl);
1810 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1811 fnl, 1, 0, 0, 0);
1812 cur_thread->funnel_state |= TH_FN_OWNED;
1813 cur_thread->funnel_lock = fnl;
1814 } else {
1815 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1816 panic("Funnel unlock when not holding funnel");
1817 cur_thread->funnel_state &= ~TH_FN_OWNED;
1818 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1819 fnl, 1, 0, 0, 0);
1820
1821 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1822 funnel_unlock(fnl);
1823 }
1824 (void)ml_set_interrupts_enabled(intr);
1825 } else {
1826 /* if we are trying to acquire funnel recursively
1827 * check for funnel to be held already
1828 */
1829 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1830 panic("thread_funnel_set: already holding a different funnel");
1831 }
1832 }
1833 return(funnel_state_prev);
1834 }
1835
1836 boolean_t
1837 thread_funnel_merge(
1838 funnel_t * fnl,
1839 funnel_t * otherfnl)
1840 {
1841 mutex_t * m;
1842 mutex_t * otherm;
1843 funnel_t * gfnl;
1844 extern int disable_funnel;
1845
1846 if ((gfnl = thread_funnel_get()) == THR_FUNNEL_NULL)
1847 panic("thread_funnel_merge called with no funnels held");
1848
1849 if (gfnl->fnl_type != 1)
1850 panic("thread_funnel_merge called from non kernel funnel");
1851
1852 if (gfnl != fnl)
1853 panic("thread_funnel_merge incorrect invocation");
1854
1855 if (disable_funnel || split_funnel_off)
1856 return (KERN_FAILURE);
1857
1858 m = fnl->fnl_mutex;
1859 otherm = otherfnl->fnl_mutex;
1860
1861 /* Acquire other funnel mutex */
1862 mutex_lock(otherm);
1863 split_funnel_off = 1;
1864 disable_funnel = 1;
1865 otherfnl->fnl_mutex = m;
1866 otherfnl->fnl_type = fnl->fnl_type;
1867 otherfnl->fnl_oldmutex = otherm; /* save this for future use */
1868
1869 mutex_unlock(otherm);
1870 return(KERN_SUCCESS);
1871 }
1872
1873 void
1874 thread_set_cont_arg(
1875 int arg)
1876 {
1877 thread_t self = current_thread();
1878
1879 self->saved.misc = arg;
1880 }
1881
1882 int
1883 thread_get_cont_arg(void)
1884 {
1885 thread_t self = current_thread();
1886
1887 return (self->saved.misc);
1888 }
1889
1890 /*
1891 * Export routines to other components for things that are done as macros
1892 * within the osfmk component.
1893 */
1894 #undef thread_should_halt
1895 boolean_t
1896 thread_should_halt(
1897 thread_shuttle_t th)
1898 {
1899 return(thread_should_halt_fast(th));
1900 }
1901