]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_FREE_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: kern/thread.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
55 * Date: 1986
56 *
57 * Thread/thread_shuttle management primitives implementation.
58 */
59/*
60 * Copyright (c) 1993 The University of Utah and
61 * the Computer Systems Laboratory (CSL). All rights reserved.
62 *
63 * Permission to use, copy, modify and distribute this software and its
64 * documentation is hereby granted, provided that both the copyright
65 * notice and this permission notice appear in all copies of the
66 * software, derivative works or modified versions, and any portions
67 * thereof, and that both notices appear in supporting documentation.
68 *
69 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
70 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
71 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
72 *
73 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
74 * improvements that they make and grant CSL redistribution rights.
75 *
76 */
77
78#include <cpus.h>
79#include <mach_host.h>
80#include <simple_clock.h>
81#include <mach_debug.h>
82#include <mach_prof.h>
1c79356b
A
83
84#include <mach/boolean.h>
85#include <mach/policy.h>
86#include <mach/thread_info.h>
87#include <mach/thread_special_ports.h>
88#include <mach/thread_status.h>
89#include <mach/time_value.h>
90#include <mach/vm_param.h>
91#include <kern/ast.h>
92#include <kern/cpu_data.h>
93#include <kern/counters.h>
94#include <kern/etap_macros.h>
95#include <kern/ipc_mig.h>
96#include <kern/ipc_tt.h>
97#include <kern/mach_param.h>
98#include <kern/machine.h>
99#include <kern/misc_protos.h>
100#include <kern/processor.h>
101#include <kern/queue.h>
102#include <kern/sched.h>
103#include <kern/sched_prim.h>
1c79356b
A
104#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
105#include <kern/task.h>
106#include <kern/thread.h>
107#include <kern/thread_act.h>
108#include <kern/thread_swap.h>
109#include <kern/host.h>
110#include <kern/zalloc.h>
111#include <vm/vm_kern.h>
112#include <ipc/ipc_kmsg.h>
113#include <ipc/ipc_port.h>
114#include <machine/thread.h> /* for MACHINE_STACK */
115#include <kern/profile.h>
116#include <kern/assert.h>
117#include <sys/kdebug.h>
118
119/*
120 * Exported interfaces
121 */
122
123#include <mach/thread_act_server.h>
124#include <mach/mach_host_server.h>
125
126/*
127 * Per-Cpu stashed global state
128 */
129vm_offset_t active_stacks[NCPUS]; /* per-cpu active stacks */
130vm_offset_t kernel_stack[NCPUS]; /* top of active stacks */
131thread_act_t active_kloaded[NCPUS]; /* + act if kernel loaded */
9bccf70c 132boolean_t first_thread;
1c79356b
A
133
134struct zone *thread_shuttle_zone;
135
136queue_head_t reaper_queue;
137decl_simple_lock_data(,reaper_lock)
1c79356b
A
138
139extern int tick;
140
141extern void pcb_module_init(void);
142
9bccf70c
A
143struct thread_shuttle pageout_thread;
144
1c79356b
A
145/* private */
146static struct thread_shuttle thr_sh_template;
147
148#if MACH_DEBUG
1c79356b
A
149
150#ifdef MACHINE_STACK
0b4e3aa0 151extern void stack_statistics(
1c79356b
A
152 unsigned int *totalp,
153 vm_size_t *maxusagep);
0b4e3aa0 154#endif /* MACHINE_STACK */
1c79356b
A
155#endif /* MACH_DEBUG */
156
157/* Forwards */
158void thread_collect_scan(void);
159
160kern_return_t thread_create_shuttle(
161 thread_act_t thr_act,
162 integer_t priority,
163 void (*start)(void),
164 thread_t *new_thread);
165
166extern void Load_context(
167 thread_t thread);
168
169
170/*
171 * Machine-dependent code must define:
172 * thread_machine_init
173 * thread_machine_terminate
174 * thread_machine_collect
175 *
176 * The thread->pcb field is reserved for machine-dependent code.
177 */
178
179#ifdef MACHINE_STACK
180/*
181 * Machine-dependent code must define:
182 * stack_alloc_try
183 * stack_alloc
184 * stack_free
0b4e3aa0 185 * stack_free_stack
1c79356b
A
186 * stack_collect
187 * and if MACH_DEBUG:
188 * stack_statistics
189 */
190#else /* MACHINE_STACK */
191/*
192 * We allocate stacks from generic kernel VM.
193 * Machine-dependent code must define:
194 * machine_kernel_stack_init
195 *
196 * The stack_free_list can only be accessed at splsched,
197 * because stack_alloc_try/thread_invoke operate at splsched.
198 */
199
200decl_simple_lock_data(,stack_lock_data) /* splsched only */
201#define stack_lock() simple_lock(&stack_lock_data)
202#define stack_unlock() simple_unlock(&stack_lock_data)
203
0b4e3aa0
A
204mutex_t stack_map_lock; /* Lock when allocating stacks maps */
205vm_map_t stack_map; /* Map for allocating stacks */
1c79356b
A
206vm_offset_t stack_free_list; /* splsched only */
207unsigned int stack_free_max = 0;
208unsigned int stack_free_count = 0; /* splsched only */
0b4e3aa0 209unsigned int stack_free_limit = 1; /* Arbitrary */
1c79356b
A
210
211unsigned int stack_alloc_hits = 0; /* debugging */
212unsigned int stack_alloc_misses = 0; /* debugging */
213
214unsigned int stack_alloc_total = 0;
215unsigned int stack_alloc_hiwater = 0;
0b4e3aa0
A
216unsigned int stack_alloc_bndry = 0;
217
1c79356b
A
218
219/*
220 * The next field is at the base of the stack,
221 * so the low end is left unsullied.
222 */
223
224#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
225
226/*
227 * stack_alloc:
228 *
229 * Allocate a kernel stack for an activation.
230 * May block.
231 */
232vm_offset_t
233stack_alloc(
234 thread_t thread,
235 void (*start_pos)(thread_t))
236{
0b4e3aa0
A
237 vm_offset_t stack = thread->kernel_stack;
238 spl_t s;
1c79356b 239
0b4e3aa0
A
240 if (stack)
241 return (stack);
242
243/*
244 * We first try the free list. It is probably empty, or
245 * stack_alloc_try would have succeeded, but possibly a stack was
246 * freed before the swapin thread got to us.
247 *
248 * We allocate stacks from their own map which is submaps of the
249 * kernel map. Because we want to have a guard page (at least) in
250 * front of each stack to catch evil code that overruns its stack, we
251 * allocate the stack on aligned boundaries. The boundary is
252 * calculated as the next power of 2 above the stack size. For
253 * example, a stack of 4 pages would have a boundry of 8, likewise 5
254 * would also be 8.
255 *
256 * We limit the number of stacks to be one allocation chunk
257 * (THREAD_CHUNK) more than the maximum number of threads
258 * (THREAD_MAX). The extra is to allow for priviliged threads that
259 * can sometimes have 2 stacks.
260 *
261 */
1c79356b
A
262
263 s = splsched();
264 stack_lock();
265 stack = stack_free_list;
266 if (stack != 0) {
267 stack_free_list = stack_next(stack);
268 stack_free_count--;
269 }
270 stack_unlock();
271 splx(s);
272
0b4e3aa0
A
273 if (stack != 0) { /* Did we find a free one? */
274 stack_attach(thread, stack, start_pos); /* Initialize it */
275 return (stack); /* Send it on home */
276 }
277
278 if (kernel_memory_allocate(
279 stack_map, &stack,
280 KERNEL_STACK_SIZE, stack_alloc_bndry - 1,
281 KMA_KOBJECT) != KERN_SUCCESS)
282 panic("stack_alloc: no space left for stack maps");
1c79356b 283
0b4e3aa0
A
284 stack_alloc_total++;
285 if (stack_alloc_total > stack_alloc_hiwater)
286 stack_alloc_hiwater = stack_alloc_total;
1c79356b 287
1c79356b
A
288 stack_attach(thread, stack, start_pos);
289 return (stack);
290}
291
292/*
293 * stack_free:
294 *
295 * Free a kernel stack.
296 * Called at splsched.
297 */
298
299void
300stack_free(
301 thread_t thread)
302{
303 vm_offset_t stack = stack_detach(thread);
0b4e3aa0 304
1c79356b
A
305 assert(stack);
306 if (stack != thread->stack_privilege) {
0b4e3aa0
A
307 stack_lock();
308 stack_next(stack) = stack_free_list;
309 stack_free_list = stack;
310 if (++stack_free_count > stack_free_max)
311 stack_free_max = stack_free_count;
312 stack_unlock();
1c79356b
A
313 }
314}
315
0b4e3aa0
A
316static void
317stack_free_stack(
318 vm_offset_t stack)
319{
320 spl_t s;
321
322 s = splsched();
323 stack_lock();
324 stack_next(stack) = stack_free_list;
325 stack_free_list = stack;
326 if (++stack_free_count > stack_free_max)
327 stack_free_max = stack_free_count;
328 stack_unlock();
329 splx(s);
330}
331
1c79356b
A
332/*
333 * stack_collect:
334 *
335 * Free excess kernel stacks.
336 * May block.
337 */
338
339void
340stack_collect(void)
341{
0b4e3aa0
A
342 vm_offset_t stack;
343 int i;
344 spl_t s;
1c79356b
A
345
346 s = splsched();
347 stack_lock();
348 while (stack_free_count > stack_free_limit) {
349 stack = stack_free_list;
350 stack_free_list = stack_next(stack);
351 stack_free_count--;
352 stack_unlock();
353 splx(s);
354
0b4e3aa0
A
355 if (vm_map_remove(
356 stack_map, stack, stack + KERNEL_STACK_SIZE,
357 VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
358 panic("stack_collect: vm_map_remove failed");
1c79356b
A
359
360 s = splsched();
1c79356b 361 stack_lock();
0b4e3aa0 362 stack_alloc_total--;
1c79356b
A
363 }
364 stack_unlock();
365 splx(s);
366}
367
368
369#if MACH_DEBUG
370/*
371 * stack_statistics:
372 *
373 * Return statistics on cached kernel stacks.
374 * *maxusagep must be initialized by the caller.
375 */
376
377void
378stack_statistics(
379 unsigned int *totalp,
380 vm_size_t *maxusagep)
381{
382 spl_t s;
383
384 s = splsched();
385 stack_lock();
386
1c79356b 387 *totalp = stack_free_count;
0b4e3aa0
A
388 *maxusagep = 0;
389
1c79356b
A
390 stack_unlock();
391 splx(s);
392}
393#endif /* MACH_DEBUG */
394
395#endif /* MACHINE_STACK */
396
397
398stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
399 vm_size_t *alloc_size, int *collectable, int *exhaustable)
400{
401 *count = stack_alloc_total - stack_free_count;
402 *cur_size = KERNEL_STACK_SIZE * stack_alloc_total;
403 *max_size = KERNEL_STACK_SIZE * stack_alloc_hiwater;
404 *elem_size = KERNEL_STACK_SIZE;
405 *alloc_size = KERNEL_STACK_SIZE;
406 *collectable = 1;
407 *exhaustable = 0;
408}
409
410
411/*
412 * stack_privilege:
413 *
414 * stack_alloc_try on this thread must always succeed.
415 */
416
417void
418stack_privilege(
419 register thread_t thread)
420{
421 /*
422 * This implementation only works for the current thread.
423 */
424
425 if (thread != current_thread())
426 panic("stack_privilege");
427
428 if (thread->stack_privilege == 0)
429 thread->stack_privilege = current_stack();
430}
431
432/*
433 * stack_alloc_try:
434 *
435 * Non-blocking attempt to allocate a kernel stack.
436 * Called at splsched with the thread locked.
437 */
438
439boolean_t stack_alloc_try(
440 thread_t thread,
441 void (*start_pos)(thread_t))
442{
0b4e3aa0
A
443 register vm_offset_t stack = thread->stack_privilege;
444
445 if (stack == 0) {
446 stack_lock();
447
448 stack = stack_free_list;
449 if (stack != (vm_offset_t)0) {
450 stack_free_list = stack_next(stack);
451 stack_free_count--;
452 }
453
454 stack_unlock();
1c79356b
A
455 }
456
457 if (stack != 0) {
458 stack_attach(thread, stack, start_pos);
459 stack_alloc_hits++;
0b4e3aa0
A
460
461 return (TRUE);
462 }
463 else {
1c79356b 464 stack_alloc_misses++;
0b4e3aa0
A
465
466 return (FALSE);
1c79356b
A
467 }
468}
469
0b4e3aa0
A
470uint64_t max_unsafe_computation;
471extern int max_unsafe_quanta;
472
473uint32_t sched_safe_duration;
474
475uint64_t max_poll_computation;
476extern int max_poll_quanta;
477
478uint32_t std_quantum;
479uint32_t min_std_quantum;
480
481uint32_t max_rt_quantum;
482uint32_t min_rt_quantum;
1c79356b
A
483
484void
485thread_init(void)
486{
0b4e3aa0
A
487 kern_return_t ret;
488 unsigned int stack;
489
1c79356b
A
490 thread_shuttle_zone = zinit(
491 sizeof(struct thread_shuttle),
492 THREAD_MAX * sizeof(struct thread_shuttle),
493 THREAD_CHUNK * sizeof(struct thread_shuttle),
494 "threads");
495
496 /*
497 * Fill in a template thread_shuttle for fast initialization.
498 * [Fields that must be (or are typically) reset at
499 * time of creation are so noted.]
500 */
501
502 /* thr_sh_template.links (none) */
503 thr_sh_template.runq = RUN_QUEUE_NULL;
504
505
506 /* thr_sh_template.task (later) */
507 /* thr_sh_template.thread_list (later) */
508 /* thr_sh_template.pset_threads (later) */
509
9bccf70c
A
510 /* reference for activation */
511 thr_sh_template.ref_count = 1;
1c79356b 512
9bccf70c
A
513 thr_sh_template.reason = AST_NONE;
514 thr_sh_template.at_safe_point = FALSE;
515 thr_sh_template.wait_event = NO_EVENT64;
1c79356b 516 thr_sh_template.wait_queue = WAIT_QUEUE_NULL;
9bccf70c
A
517 thr_sh_template.wait_result = THREAD_WAITING;
518 thr_sh_template.interrupt_level = THREAD_ABORTSAFE;
0b4e3aa0 519 thr_sh_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT;
9bccf70c
A
520 thr_sh_template.wake_active = FALSE;
521 thr_sh_template.active_callout = FALSE;
1c79356b
A
522 thr_sh_template.continuation = (void (*)(void))0;
523 thr_sh_template.top_act = THR_ACT_NULL;
524
525 thr_sh_template.importance = 0;
526 thr_sh_template.sched_mode = 0;
0b4e3aa0 527 thr_sh_template.safe_mode = 0;
1c79356b
A
528
529 thr_sh_template.priority = 0;
530 thr_sh_template.sched_pri = 0;
1c79356b 531 thr_sh_template.max_priority = 0;
0b4e3aa0 532 thr_sh_template.task_priority = 0;
9bccf70c
A
533 thr_sh_template.promotions = 0;
534 thr_sh_template.pending_promoter_index = 0;
535 thr_sh_template.pending_promoter[0] =
536 thr_sh_template.pending_promoter[1] = NULL;
0b4e3aa0
A
537
538 thr_sh_template.current_quantum = 0;
539
9bccf70c 540 thr_sh_template.computation_metered = 0;
0b4e3aa0 541 thr_sh_template.computation_epoch = 0;
1c79356b
A
542
543 thr_sh_template.cpu_usage = 0;
0b4e3aa0 544 thr_sh_template.cpu_delta = 0;
1c79356b 545 thr_sh_template.sched_usage = 0;
0b4e3aa0 546 thr_sh_template.sched_delta = 0;
1c79356b
A
547 thr_sh_template.sched_stamp = 0;
548 thr_sh_template.sleep_stamp = 0;
0b4e3aa0 549 thr_sh_template.safe_release = 0;
1c79356b 550
9bccf70c
A
551 thr_sh_template.bound_processor = PROCESSOR_NULL;
552 thr_sh_template.last_processor = PROCESSOR_NULL;
553 thr_sh_template.last_switch = 0;
554
1c79356b
A
555 thr_sh_template.vm_privilege = FALSE;
556
557 timer_init(&(thr_sh_template.user_timer));
558 timer_init(&(thr_sh_template.system_timer));
559 thr_sh_template.user_timer_save.low = 0;
560 thr_sh_template.user_timer_save.high = 0;
561 thr_sh_template.system_timer_save.low = 0;
562 thr_sh_template.system_timer_save.high = 0;
1c79356b
A
563
564 thr_sh_template.active = FALSE; /* reset */
565
9bccf70c 566 thr_sh_template.processor_set = PROCESSOR_SET_NULL;
1c79356b
A
567#if MACH_HOST
568 thr_sh_template.may_assign = TRUE;
569 thr_sh_template.assign_active = FALSE;
570#endif /* MACH_HOST */
571 thr_sh_template.funnel_state = 0;
572
1c79356b
A
573 /*
574 * Initialize other data structures used in
575 * this module.
576 */
577
578 queue_init(&reaper_queue);
579 simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER);
580 thr_sh_template.funnel_lock = THR_FUNNEL_NULL;
581
582#ifndef MACHINE_STACK
0b4e3aa0
A
583 simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */
584
585 if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */
586 panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
587 KERNEL_STACK_SIZE, PAGE_SIZE);
588 }
589
590 for(stack_alloc_bndry = PAGE_SIZE; stack_alloc_bndry <= KERNEL_STACK_SIZE; stack_alloc_bndry <<= 1); /* Find next power of 2 above stack size */
591
592 ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
593
594 &stack,
765c9de3 595 (stack_alloc_bndry * (2*THREAD_MAX + 64)), /* Allocate enough for all of it */
0b4e3aa0
A
596 FALSE, /* Say not pageable so that it is wired */
597 TRUE, /* Allocate from anywhere */
598 &stack_map); /* Allocate a submap */
599
600 if(ret != KERN_SUCCESS) { /* Did we get one? */
601 panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret); /* Die */
602 }
603 stack = vm_map_min(stack_map); /* Make sure we skip the first hunk */
604 ret = vm_map_enter(stack_map, &stack, PAGE_SIZE, 0, /* Make sure there is nothing at the start */
605 0, /* Force it at start */
606 VM_OBJECT_NULL, 0, /* No object yet */
607 FALSE, /* No copy */
608 VM_PROT_NONE, /* Allow no access */
609 VM_PROT_NONE, /* Allow no access */
610 VM_INHERIT_DEFAULT); /* Just be normal */
611
612 if(ret != KERN_SUCCESS) { /* Did it work? */
613 panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret);
614 }
615
1c79356b
A
616#endif /* MACHINE_STACK */
617
1c79356b 618#if MACH_LDEBUG
1c79356b
A
619 thr_sh_template.mutex_count = 0;
620#endif /* MACH_LDEBUG */
621
622 {
0b4e3aa0
A
623 uint64_t abstime;
624
625 clock_interval_to_absolutetime_interval(
626 std_quantum_us, NSEC_PER_USEC, &abstime);
627 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
628 std_quantum = abstime;
1c79356b 629
0b4e3aa0
A
630 /* 250 us */
631 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
632 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
633 min_std_quantum = abstime;
634
635 /* 50 us */
636 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
637 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
638 min_rt_quantum = abstime;
639
640 /* 50 ms */
1c79356b 641 clock_interval_to_absolutetime_interval(
0b4e3aa0
A
642 50, 1000*NSEC_PER_USEC, &abstime);
643 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
644 max_rt_quantum = abstime;
645
646 max_unsafe_computation = max_unsafe_quanta * std_quantum;
647 max_poll_computation = max_poll_quanta * std_quantum;
648
649 sched_safe_duration = 2 * max_unsafe_quanta *
650 (std_quantum_us / (1000 * 1000)) *
651 (1 << SCHED_TICK_SHIFT);
1c79356b
A
652 }
653
9bccf70c 654 first_thread = TRUE;
1c79356b
A
655 /*
656 * Initialize any machine-dependent
657 * per-thread structures necessary.
658 */
659 thread_machine_init();
660}
661
9bccf70c
A
662/*
663 * Called at splsched.
664 */
1c79356b
A
665void
666thread_reaper_enqueue(
667 thread_t thread)
668{
1c79356b 669 simple_lock(&reaper_lock);
1c79356b 670 enqueue_tail(&reaper_queue, (queue_entry_t)thread);
1c79356b
A
671 simple_unlock(&reaper_lock);
672
9bccf70c 673 thread_wakeup((event_t)&reaper_queue);
1c79356b
A
674}
675
9bccf70c
A
676void
677thread_termination_continue(void)
678{
679 panic("thread_termination_continue");
680 /*NOTREACHED*/
681}
1c79356b
A
682
683/*
684 * Routine: thread_terminate_self
685 *
686 * This routine is called by a thread which has unwound from
687 * its current RPC and kernel contexts and found that it's
688 * root activation has been marked for extinction. This lets
689 * it clean up the last few things that can only be cleaned
690 * up in this context and then impale itself on the reaper
691 * queue.
692 *
693 * When the reaper gets the thread, it will deallocate the
694 * thread_act's reference on itself, which in turn will release
695 * its own reference on this thread. By doing things in that
696 * order, a thread_act will always have a valid thread - but the
697 * thread may persist beyond having a thread_act (but must never
698 * run like that).
699 */
700void
701thread_terminate_self(void)
702{
9bccf70c
A
703 thread_act_t thr_act = current_act();
704 thread_t thread;
1c79356b 705 task_t task = thr_act->task;
9bccf70c 706 long active_acts;
1c79356b
A
707 spl_t s;
708
709 /*
710 * We should be at the base of the inheritance chain.
711 */
9bccf70c 712 thread = act_lock_thread(thr_act);
1c79356b
A
713 assert(thr_act->thread == thread);
714
9bccf70c
A
715 /* This will allow no more control ops on this thr_act. */
716 ipc_thr_act_disable(thr_act);
717
718 /* Clean-up any ulocks that are still owned by the thread
719 * activation (acquired but not released or handed-off).
720 */
721 act_ulock_release_all(thr_act);
722
723 act_unlock_thread(thr_act);
724
0b4e3aa0
A
725 _mk_sp_thread_depress_abort(thread, TRUE);
726
1c79356b
A
727 /*
728 * Check to see if this is the last active activation. By
729 * this we mean the last activation to call thread_terminate_self.
730 * If so, and the task is associated with a BSD process, we
731 * need to call BSD and let them clean up.
732 */
9bccf70c 733 active_acts = hw_atomic_sub(&task->active_act_count, 1);
1c79356b 734
9bccf70c
A
735 if (active_acts == 0 && task->bsd_info)
736 proc_exit(task->bsd_info);
1c79356b 737
9bccf70c 738 /* JMM - for now, no migration */
1c79356b
A
739 assert(!thr_act->lower);
740
1c79356b
A
741 s = splsched();
742 thread_lock(thread);
743 thread->active = FALSE;
744 thread_unlock(thread);
745 splx(s);
746
747 thread_timer_terminate();
748
749 /* flush any lazy HW state while in own context */
750 thread_machine_flush(thr_act);
751
752 ipc_thread_terminate(thread);
753
754 s = splsched();
755 thread_lock(thread);
9bccf70c 756 thread->state |= TH_TERMINATE;
1c79356b 757 assert((thread->state & TH_UNINT) == 0);
1c79356b 758 thread_mark_wait_locked(thread, THREAD_UNINT);
9bccf70c 759 assert(thread->promotions == 0);
1c79356b
A
760 thread_unlock(thread);
761 /* splx(s); */
762
763 ETAP_SET_REASON(thread, BLOCKED_ON_TERMINATION);
9bccf70c 764 thread_block(thread_termination_continue);
1c79356b
A
765 /*NOTREACHED*/
766}
767
1c79356b
A
768/*
769 * Create a new thread.
770 * Doesn't start the thread running; It first must be attached to
771 * an activation - then use thread_go to start it.
772 */
773kern_return_t
774thread_create_shuttle(
775 thread_act_t thr_act,
776 integer_t priority,
777 void (*start)(void),
778 thread_t *new_thread)
779{
9bccf70c 780 kern_return_t result;
1c79356b
A
781 thread_t new_shuttle;
782 task_t parent_task = thr_act->task;
783 processor_set_t pset;
1c79356b
A
784
785 /*
786 * Allocate a thread and initialize static fields
787 */
9bccf70c
A
788 if (first_thread) {
789 new_shuttle = &pageout_thread;
790 first_thread = FALSE;
791 } else
792 new_shuttle = (thread_t)zalloc(thread_shuttle_zone);
1c79356b
A
793 if (new_shuttle == THREAD_NULL)
794 return (KERN_RESOURCE_SHORTAGE);
795
9bccf70c
A
796#ifdef DEBUG
797 if (new_shuttle != &pageout_thread)
798 assert(!thr_act->thread);
799#endif
800
1c79356b
A
801 *new_shuttle = thr_sh_template;
802
803 thread_lock_init(new_shuttle);
1c79356b
A
804 wake_lock_init(new_shuttle);
805 new_shuttle->sleep_stamp = sched_tick;
806
0b4e3aa0
A
807 /*
808 * Thread still isn't runnable yet (our caller will do
809 * that). Initialize runtime-dependent fields here.
810 */
811 result = thread_machine_create(new_shuttle, thr_act, thread_continue);
812 assert (result == KERN_SUCCESS);
813
814 thread_start(new_shuttle, start);
815 thread_timer_setup(new_shuttle);
816 ipc_thread_init(new_shuttle);
817
1c79356b 818 pset = parent_task->processor_set;
9bccf70c 819 assert(pset == &default_pset);
1c79356b
A
820 pset_lock(pset);
821
822 task_lock(parent_task);
9bccf70c 823 assert(parent_task->processor_set == pset);
1c79356b
A
824
825 /*
826 * Don't need to initialize because the context switch
827 * code will set it before it can be used.
828 */
829 if (!parent_task->active) {
830 task_unlock(parent_task);
831 pset_unlock(pset);
0b4e3aa0 832 thread_machine_destroy(new_shuttle);
1c79356b
A
833 zfree(thread_shuttle_zone, (vm_offset_t) new_shuttle);
834 return (KERN_FAILURE);
835 }
836
837 act_attach(thr_act, new_shuttle, 0);
838
839 /* Chain the thr_act onto the task's list */
840 queue_enter(&parent_task->thr_acts, thr_act, thread_act_t, thr_acts);
841 parent_task->thr_act_count++;
842 parent_task->res_act_count++;
9bccf70c
A
843
844 /* So terminating threads don't need to take the task lock to decrement */
845 hw_atomic_add(&parent_task->active_act_count, 1);
1c79356b 846
1c79356b 847 /* Associate the thread with the processor set */
0b4e3aa0 848 pset_add_thread(pset, new_shuttle);
1c79356b
A
849
850 /* Set the thread's scheduling parameters */
0b4e3aa0
A
851 if (parent_task != kernel_task)
852 new_shuttle->sched_mode |= TH_MODE_TIMESHARE;
1c79356b 853 new_shuttle->max_priority = parent_task->max_priority;
0b4e3aa0 854 new_shuttle->task_priority = parent_task->priority;
1c79356b
A
855 new_shuttle->priority = (priority < 0)? parent_task->priority: priority;
856 if (new_shuttle->priority > new_shuttle->max_priority)
857 new_shuttle->priority = new_shuttle->max_priority;
0b4e3aa0
A
858 new_shuttle->importance =
859 new_shuttle->priority - new_shuttle->task_priority;
860 new_shuttle->sched_stamp = sched_tick;
9bccf70c 861 compute_priority(new_shuttle, FALSE);
1c79356b
A
862
863#if ETAP_EVENT_MONITOR
864 new_thread->etap_reason = 0;
865 new_thread->etap_trace = FALSE;
866#endif /* ETAP_EVENT_MONITOR */
867
868 new_shuttle->active = TRUE;
869 thr_act->active = TRUE;
1c79356b 870
1c79356b
A
871 *new_thread = new_shuttle;
872
873 {
9bccf70c 874 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
1c79356b 875
9bccf70c
A
876 KERNEL_DEBUG_CONSTANT(
877 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
878 (vm_address_t)new_shuttle, 0, 0, 0, 0);
1c79356b 879
9bccf70c
A
880 kdbg_trace_string(parent_task->bsd_info,
881 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
882
883 KERNEL_DEBUG_CONSTANT(
884 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
885 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
1c79356b
A
886 }
887
888 return (KERN_SUCCESS);
889}
890
9bccf70c
A
891extern void thread_bootstrap_return(void);
892
1c79356b
A
893kern_return_t
894thread_create(
895 task_t task,
896 thread_act_t *new_act)
897{
1c79356b 898 kern_return_t result;
9bccf70c
A
899 thread_t thread;
900 thread_act_t act;
1c79356b
A
901
902 if (task == TASK_NULL)
903 return KERN_INVALID_ARGUMENT;
904
9bccf70c 905 result = act_create(task, &act);
1c79356b
A
906 if (result != KERN_SUCCESS)
907 return (result);
908
9bccf70c 909 result = thread_create_shuttle(act, -1, thread_bootstrap_return, &thread);
1c79356b 910 if (result != KERN_SUCCESS) {
9bccf70c 911 act_deallocate(act);
1c79356b
A
912 return (result);
913 }
914
9bccf70c
A
915 act->user_stop_count = 1;
916 thread_hold(act);
917 if (task->suspend_count > 0)
918 thread_hold(act);
1c79356b 919
9bccf70c
A
920 pset_unlock(task->processor_set);
921 task_unlock(task);
1c79356b 922
9bccf70c 923 *new_act = act;
1c79356b
A
924
925 return (KERN_SUCCESS);
926}
927
1c79356b
A
928kern_return_t
929thread_create_running(
9bccf70c 930 register task_t task,
1c79356b
A
931 int flavor,
932 thread_state_t new_state,
933 mach_msg_type_number_t new_state_count,
9bccf70c 934 thread_act_t *new_act) /* OUT */
1c79356b
A
935{
936 register kern_return_t result;
9bccf70c
A
937 thread_t thread;
938 thread_act_t act;
939
940 if (task == TASK_NULL)
941 return KERN_INVALID_ARGUMENT;
1c79356b 942
9bccf70c 943 result = act_create(task, &act);
1c79356b
A
944 if (result != KERN_SUCCESS)
945 return (result);
946
9bccf70c 947 result = thread_create_shuttle(act, -1, thread_bootstrap_return, &thread);
1c79356b 948 if (result != KERN_SUCCESS) {
9bccf70c 949 act_deallocate(act);
1c79356b
A
950 return (result);
951 }
952
9bccf70c
A
953 act_lock(act);
954 result = act_machine_set_state(act, flavor, new_state, new_state_count);
1c79356b 955 if (result != KERN_SUCCESS) {
9bccf70c
A
956 act_unlock(act);
957 pset_unlock(task->processor_set);
958 task_unlock(task);
959
960 (void)thread_terminate(act);
1c79356b
A
961 return (result);
962 }
963
9bccf70c
A
964 clear_wait(thread, THREAD_AWAKENED);
965 act->inited = TRUE;
966 act_unlock(act);
967 pset_unlock(task->processor_set);
968 task_unlock(task);
969
970 *new_act = act;
971
1c79356b
A
972 return (result);
973}
974
975/*
976 * kernel_thread:
977 *
978 * Create and kernel thread in the specified task, and
979 * optionally start it running.
980 */
981thread_t
982kernel_thread_with_priority(
983 task_t task,
984 integer_t priority,
985 void (*start)(void),
0b4e3aa0 986 boolean_t alloc_stack,
1c79356b
A
987 boolean_t start_running)
988{
989 kern_return_t result;
990 thread_t thread;
9bccf70c 991 thread_act_t act;
1c79356b 992
9bccf70c
A
993 result = act_create(task, &act);
994 if (result != KERN_SUCCESS)
995 return (THREAD_NULL);
1c79356b 996
9bccf70c 997 result = thread_create_shuttle(act, priority, start, &thread);
1c79356b 998 if (result != KERN_SUCCESS) {
9bccf70c
A
999 act_deallocate(act);
1000 return (THREAD_NULL);
1c79356b
A
1001 }
1002
9bccf70c
A
1003 pset_unlock(task->processor_set);
1004 task_unlock(task);
1005
0b4e3aa0
A
1006 if (alloc_stack)
1007 thread_doswapin(thread);
1c79356b 1008
9bccf70c 1009 act_lock(act);
1c79356b 1010 if (start_running)
9bccf70c
A
1011 clear_wait(thread, THREAD_AWAKENED);
1012 act->inited = TRUE;
1013 act_unlock(act);
1c79356b 1014
9bccf70c 1015 act_deallocate(act);
1c79356b 1016
1c79356b
A
1017 return (thread);
1018}
1019
1020thread_t
1021kernel_thread(
1022 task_t task,
1023 void (*start)(void))
1024{
0b4e3aa0 1025 return kernel_thread_with_priority(task, -1, start, FALSE, TRUE);
1c79356b
A
1026}
1027
1028unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */
1029
9bccf70c
A
1030#if MACH_HOST
1031/* Preclude thread processor set assignement */
1032#define thread_freeze(thread) assert((thread)->processor_set == &default_pset)
1033
1034/* Allow thread processor set assignement */
1035#define thread_unfreeze(thread) assert((thread)->processor_set == &default_pset)
1036
1037#endif /* MACH_HOST */
1038
1c79356b
A
1039void
1040thread_deallocate(
1041 thread_t thread)
1042{
1043 task_t task;
1044 processor_set_t pset;
9bccf70c 1045 int refs;
1c79356b
A
1046 spl_t s;
1047
1048 if (thread == THREAD_NULL)
1049 return;
1050
1051 /*
9bccf70c 1052 * First, check for new count > 0 (the common case).
1c79356b
A
1053 * Only the thread needs to be locked.
1054 */
1055 s = splsched();
1056 thread_lock(thread);
9bccf70c
A
1057 refs = --thread->ref_count;
1058 thread_unlock(thread);
1059 splx(s);
1060
1061 if (refs > 0)
1c79356b 1062 return;
9bccf70c
A
1063
1064 if (thread == current_thread())
1065 panic("thread deallocating itself");
1c79356b
A
1066
1067 /*
9bccf70c
A
1068 * There is a dangling pointer to the thread from the
1069 * processor_set. To clean it up, we freeze the thread
1070 * in the pset (because pset destruction can cause even
1071 * reference-less threads to be reassigned to the default
1072 * pset) and then remove it.
1c79356b 1073 */
1c79356b 1074
9bccf70c 1075#if MACH_HOST
1c79356b 1076 thread_freeze(thread);
9bccf70c 1077#endif
1c79356b
A
1078
1079 pset = thread->processor_set;
1080 pset_lock(pset);
1c79356b 1081 pset_remove_thread(pset, thread);
1c79356b
A
1082 pset_unlock(pset);
1083
9bccf70c
A
1084#if MACH_HOST
1085 thread_unfreeze(thread);
1086#endif
1087
1088 pset_deallocate(pset);
1c79356b 1089
0b4e3aa0
A
1090 if (thread->stack_privilege != 0) {
1091 if (thread->stack_privilege != thread->kernel_stack)
1092 stack_free_stack(thread->stack_privilege);
1093 thread->stack_privilege = 0;
1c79356b 1094 }
0b4e3aa0 1095 /* frees kernel stack & other MD resources */
1c79356b
A
1096 thread_machine_destroy(thread);
1097
1098 zfree(thread_shuttle_zone, (vm_offset_t) thread);
1099}
1100
1101void
1102thread_reference(
1103 thread_t thread)
1104{
1105 spl_t s;
1106
1107 if (thread == THREAD_NULL)
1108 return;
1109
1110 s = splsched();
1111 thread_lock(thread);
9bccf70c 1112 thread_reference_locked(thread);
1c79356b
A
1113 thread_unlock(thread);
1114 splx(s);
1115}
1116
1117/*
1118 * Called with "appropriate" thread-related locks held on
1119 * thread and its top_act for synchrony with RPC (see
1120 * act_lock_thread()).
1121 */
1122kern_return_t
1123thread_info_shuttle(
1124 register thread_act_t thr_act,
1125 thread_flavor_t flavor,
1126 thread_info_t thread_info_out, /* ptr to OUT array */
1127 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1128{
1129 register thread_t thread = thr_act->thread;
1130 int state, flags;
1131 spl_t s;
1132
1133 if (thread == THREAD_NULL)
1134 return (KERN_INVALID_ARGUMENT);
1135
1136 if (flavor == THREAD_BASIC_INFO) {
1137 register thread_basic_info_t basic_info;
1138
1139 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
1140 return (KERN_INVALID_ARGUMENT);
1141
1142 basic_info = (thread_basic_info_t) thread_info_out;
1143
1144 s = splsched();
1145 thread_lock(thread);
1146
1147 /* fill in info */
1148
1149 thread_read_times(thread, &basic_info->user_time,
1150 &basic_info->system_time);
1151
0b4e3aa0
A
1152 /*
1153 * Update lazy-evaluated scheduler info because someone wants it.
1154 */
1155 if (thread->sched_stamp != sched_tick)
1156 update_priority(thread);
1157
1158 basic_info->sleep_time = 0;
1159
1160 /*
1161 * To calculate cpu_usage, first correct for timer rate,
1162 * then for 5/8 ageing. The correction factor [3/5] is
1163 * (1/(5/8) - 1).
1164 */
1165 basic_info->cpu_usage = (thread->cpu_usage << SCHED_TICK_SHIFT) /
1166 (TIMER_RATE / TH_USAGE_SCALE);
1167 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1c79356b 1168#if SIMPLE_CLOCK
0b4e3aa0
A
1169 /*
1170 * Clock drift compensation.
1171 */
1172 basic_info->cpu_usage = (basic_info->cpu_usage * 1000000) / sched_usec;
1c79356b 1173#endif /* SIMPLE_CLOCK */
1c79356b 1174
0b4e3aa0
A
1175 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
1176 POLICY_TIMESHARE: POLICY_RR);
1c79356b
A
1177
1178 flags = 0;
1c79356b 1179 if (thread->state & TH_IDLE)
0b4e3aa0
A
1180 flags |= TH_FLAGS_IDLE;
1181
1182 if (thread->state & TH_STACK_HANDOFF)
1183 flags |= TH_FLAGS_SWAPPED;
1c79356b
A
1184
1185 state = 0;
9bccf70c 1186 if (thread->state & TH_TERMINATE)
1c79356b
A
1187 state = TH_STATE_HALTED;
1188 else
1189 if (thread->state & TH_RUN)
1190 state = TH_STATE_RUNNING;
1191 else
1192 if (thread->state & TH_UNINT)
1193 state = TH_STATE_UNINTERRUPTIBLE;
1194 else
1195 if (thread->state & TH_SUSP)
1196 state = TH_STATE_STOPPED;
1197 else
1198 if (thread->state & TH_WAIT)
1199 state = TH_STATE_WAITING;
1200
1201 basic_info->run_state = state;
1202 basic_info->flags = flags;
1203
1204 basic_info->suspend_count = thr_act->user_stop_count;
1205
1206 thread_unlock(thread);
1207 splx(s);
1208
1209 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1210
1211 return (KERN_SUCCESS);
1212 }
1213 else
1214 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1215 policy_timeshare_info_t ts_info;
1216
1217 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1218 return (KERN_INVALID_ARGUMENT);
1219
1220 ts_info = (policy_timeshare_info_t)thread_info_out;
1221
1222 s = splsched();
1223 thread_lock(thread);
1224
0b4e3aa0 1225 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1c79356b
A
1226 thread_unlock(thread);
1227 splx(s);
1228
1229 return (KERN_INVALID_POLICY);
1230 }
1231
9bccf70c
A
1232 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1233 if (ts_info->depressed) {
1234 ts_info->base_priority = DEPRESSPRI;
1235 ts_info->depress_priority = thread->priority;
1236 }
1237 else {
1238 ts_info->base_priority = thread->priority;
1239 ts_info->depress_priority = -1;
1240 }
1c79356b 1241
9bccf70c
A
1242 ts_info->cur_priority = thread->sched_pri;
1243 ts_info->max_priority = thread->max_priority;
1c79356b
A
1244
1245 thread_unlock(thread);
1246 splx(s);
1247
1248 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1249
1250 return (KERN_SUCCESS);
1251 }
1252 else
1253 if (flavor == THREAD_SCHED_FIFO_INFO) {
1c79356b
A
1254 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1255 return (KERN_INVALID_ARGUMENT);
1256
0b4e3aa0 1257 return (KERN_INVALID_POLICY);
1c79356b
A
1258 }
1259 else
1260 if (flavor == THREAD_SCHED_RR_INFO) {
1261 policy_rr_info_t rr_info;
1262
1263 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1264 return (KERN_INVALID_ARGUMENT);
1265
1266 rr_info = (policy_rr_info_t) thread_info_out;
1267
1268 s = splsched();
1269 thread_lock(thread);
1270
0b4e3aa0 1271 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1c79356b
A
1272 thread_unlock(thread);
1273 splx(s);
1274
1275 return (KERN_INVALID_POLICY);
1276 }
1277
9bccf70c
A
1278 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1279 if (rr_info->depressed) {
1280 rr_info->base_priority = DEPRESSPRI;
1281 rr_info->depress_priority = thread->priority;
1282 }
1283 else {
1284 rr_info->base_priority = thread->priority;
1285 rr_info->depress_priority = -1;
1286 }
1287
1c79356b 1288 rr_info->max_priority = thread->max_priority;
0b4e3aa0 1289 rr_info->quantum = std_quantum_us / 1000;
1c79356b 1290
1c79356b
A
1291 thread_unlock(thread);
1292 splx(s);
1293
1294 *thread_info_count = POLICY_RR_INFO_COUNT;
1295
1296 return (KERN_SUCCESS);
1297 }
1298
1299 return (KERN_INVALID_ARGUMENT);
1300}
1301
1302void
1303thread_doreap(
1304 register thread_t thread)
1305{
1306 thread_act_t thr_act;
1c79356b
A
1307
1308
1309 thr_act = thread_lock_act(thread);
1310 assert(thr_act && thr_act->thread == thread);
1311
1312 act_locked_act_reference(thr_act);
1c79356b
A
1313
1314 /*
1315 * Replace `act_unlock_thread()' with individual
1316 * calls. (`act_detach()' can change fields used
1317 * to determine which locks are held, confusing
1318 * `act_unlock_thread()'.)
1319 */
1c79356b
A
1320 act_unlock(thr_act);
1321
1322 /* Remove the reference held by a rooted thread */
9bccf70c 1323 act_deallocate(thr_act);
1c79356b
A
1324
1325 /* Remove the reference held by the thread: */
1326 act_deallocate(thr_act);
1327}
1328
1c79356b
A
1329/*
1330 * reaper_thread:
1331 *
9bccf70c
A
1332 * This kernel thread runs forever looking for terminating
1333 * threads, releasing their "self" references.
1c79356b
A
1334 */
1335static void
9bccf70c 1336reaper_thread_continue(void)
1c79356b
A
1337{
1338 register thread_t thread;
1c79356b 1339
9bccf70c 1340 (void)splsched();
1c79356b
A
1341 simple_lock(&reaper_lock);
1342
1343 while ((thread = (thread_t) dequeue_head(&reaper_queue)) != THREAD_NULL) {
1344 simple_unlock(&reaper_lock);
9bccf70c 1345 (void)spllo();
1c79356b
A
1346
1347 thread_doreap(thread);
1348
9bccf70c 1349 (void)splsched();
1c79356b
A
1350 simple_lock(&reaper_lock);
1351 }
1352
9bccf70c 1353 assert_wait((event_t)&reaper_queue, THREAD_UNINT);
1c79356b 1354 simple_unlock(&reaper_lock);
9bccf70c
A
1355 (void)spllo();
1356
1357 thread_block(reaper_thread_continue);
1358 /*NOTREACHED*/
1c79356b
A
1359}
1360
9bccf70c
A
1361static void
1362reaper_thread(void)
1c79356b 1363{
9bccf70c
A
1364 thread_t self = current_thread();
1365
1366 stack_privilege(self);
1c79356b 1367
9bccf70c
A
1368 reaper_thread_continue();
1369 /*NOTREACHED*/
1370}
1371
1372void
1373thread_reaper_init(void)
1374{
1375 kernel_thread(kernel_task, reaper_thread);
1c79356b
A
1376}
1377
1378kern_return_t
1379thread_assign(
1380 thread_act_t thr_act,
1381 processor_set_t new_pset)
1382{
1c79356b
A
1383 return(KERN_FAILURE);
1384}
1385
1386/*
1387 * thread_assign_default:
1388 *
1389 * Special version of thread_assign for assigning threads to default
1390 * processor set.
1391 */
1392kern_return_t
1393thread_assign_default(
1394 thread_act_t thr_act)
1395{
1396 return (thread_assign(thr_act, &default_pset));
1397}
1398
1399/*
1400 * thread_get_assignment
1401 *
1402 * Return current assignment for this thread.
1403 */
1404kern_return_t
1405thread_get_assignment(
1406 thread_act_t thr_act,
1407 processor_set_t *pset)
1408{
1409 thread_t thread;
1410
1411 if (thr_act == THR_ACT_NULL)
1412 return(KERN_INVALID_ARGUMENT);
1413 thread = act_lock_thread(thr_act);
1414 if (thread == THREAD_NULL) {
1415 act_unlock_thread(thr_act);
1416 return(KERN_INVALID_ARGUMENT);
1417 }
1418 *pset = thread->processor_set;
1419 act_unlock_thread(thr_act);
1420 pset_reference(*pset);
1421 return(KERN_SUCCESS);
1422}
1423
1424/*
1425 * thread_wire:
1426 *
1427 * Specify that the target thread must always be able
1428 * to run and to allocate memory.
1429 */
1430kern_return_t
1431thread_wire(
1432 host_priv_t host_priv,
1433 thread_act_t thr_act,
1434 boolean_t wired)
1435{
1436 spl_t s;
1437 thread_t thread;
1438 extern void vm_page_free_reserve(int pages);
1439
1440 if (thr_act == THR_ACT_NULL || host_priv == HOST_PRIV_NULL)
1441 return (KERN_INVALID_ARGUMENT);
1442
1443 assert(host_priv == &realhost);
1444
1445 thread = act_lock_thread(thr_act);
1446 if (thread ==THREAD_NULL) {
1447 act_unlock_thread(thr_act);
1448 return(KERN_INVALID_ARGUMENT);
1449 }
1450
1451 /*
1452 * This implementation only works for the current thread.
1453 * See stack_privilege.
1454 */
1455 if (thr_act != current_act())
1456 return KERN_INVALID_ARGUMENT;
1457
1458 s = splsched();
1459 thread_lock(thread);
1460
1461 if (wired) {
1462 if (thread->vm_privilege == FALSE)
1463 vm_page_free_reserve(1); /* XXX */
1464 thread->vm_privilege = TRUE;
1465 } else {
1466 if (thread->vm_privilege == TRUE)
1467 vm_page_free_reserve(-1); /* XXX */
1468 thread->vm_privilege = FALSE;
1469 }
1470
1471 thread_unlock(thread);
1472 splx(s);
1473 act_unlock_thread(thr_act);
1474
1c79356b
A
1475 return KERN_SUCCESS;
1476}
1477
1478/*
1479 * thread_collect_scan:
1480 *
1481 * Attempt to free resources owned by threads.
1482 */
1483
1484void
1485thread_collect_scan(void)
1486{
1487 /* This code runs very quickly! */
1488}
1489
0b4e3aa0
A
1490/* Also disabled in vm/vm_pageout.c */
1491boolean_t thread_collect_allowed = FALSE;
1c79356b
A
1492unsigned thread_collect_last_tick = 0;
1493unsigned thread_collect_max_rate = 0; /* in ticks */
1494
1495/*
1496 * consider_thread_collect:
1497 *
1498 * Called by the pageout daemon when the system needs more free pages.
1499 */
1500
1501void
1502consider_thread_collect(void)
1503{
1504 /*
1505 * By default, don't attempt thread collection more frequently
0b4e3aa0 1506 * than once a second.
1c79356b
A
1507 */
1508
1509 if (thread_collect_max_rate == 0)
0b4e3aa0 1510 thread_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1;
1c79356b
A
1511
1512 if (thread_collect_allowed &&
1513 (sched_tick >
1514 (thread_collect_last_tick + thread_collect_max_rate))) {
1515 thread_collect_last_tick = sched_tick;
1516 thread_collect_scan();
1517 }
1518}
1519
1c79356b
A
1520kern_return_t
1521host_stack_usage(
1522 host_t host,
1523 vm_size_t *reservedp,
1524 unsigned int *totalp,
1525 vm_size_t *spacep,
1526 vm_size_t *residentp,
1527 vm_size_t *maxusagep,
1528 vm_offset_t *maxstackp)
1529{
1530#if !MACH_DEBUG
1531 return KERN_NOT_SUPPORTED;
1532#else
1533 unsigned int total;
1534 vm_size_t maxusage;
1535
1536 if (host == HOST_NULL)
1537 return KERN_INVALID_HOST;
1538
0b4e3aa0 1539 maxusage = 0;
1c79356b
A
1540
1541 stack_statistics(&total, &maxusage);
1542
1543 *reservedp = 0;
1544 *totalp = total;
1545 *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
1546 *maxusagep = maxusage;
1547 *maxstackp = 0;
1548 return KERN_SUCCESS;
1549
1550#endif /* MACH_DEBUG */
1551}
1552
1553/*
1554 * Return info on stack usage for threads in a specific processor set
1555 */
1556kern_return_t
1557processor_set_stack_usage(
1558 processor_set_t pset,
1559 unsigned int *totalp,
1560 vm_size_t *spacep,
1561 vm_size_t *residentp,
1562 vm_size_t *maxusagep,
1563 vm_offset_t *maxstackp)
1564{
1565#if !MACH_DEBUG
1566 return KERN_NOT_SUPPORTED;
1567#else
1568 unsigned int total;
1569 vm_size_t maxusage;
1570 vm_offset_t maxstack;
1571
1572 register thread_t *threads;
1573 register thread_t thread;
1574
1575 unsigned int actual; /* this many things */
1576 unsigned int i;
1577
1578 vm_size_t size, size_needed;
1579 vm_offset_t addr;
1580
9bccf70c
A
1581 spl_t s;
1582
1c79356b
A
1583 if (pset == PROCESSOR_SET_NULL)
1584 return KERN_INVALID_ARGUMENT;
1585
1586 size = 0; addr = 0;
1587
1588 for (;;) {
1589 pset_lock(pset);
1590 if (!pset->active) {
1591 pset_unlock(pset);
1592 return KERN_INVALID_ARGUMENT;
1593 }
1594
1595 actual = pset->thread_count;
1596
1597 /* do we have the memory we need? */
1598
1599 size_needed = actual * sizeof(thread_t);
1600 if (size_needed <= size)
1601 break;
1602
1603 /* unlock the pset and allocate more memory */
1604 pset_unlock(pset);
1605
1606 if (size != 0)
1607 kfree(addr, size);
1608
1609 assert(size_needed > 0);
1610 size = size_needed;
1611
1612 addr = kalloc(size);
1613 if (addr == 0)
1614 return KERN_RESOURCE_SHORTAGE;
1615 }
1616
1617 /* OK, have memory and the processor_set is locked & active */
9bccf70c 1618 s = splsched();
1c79356b
A
1619 threads = (thread_t *) addr;
1620 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
9bccf70c 1621 !queue_end(&pset->threads, (queue_entry_t) thread);
1c79356b 1622 thread = (thread_t) queue_next(&thread->pset_threads)) {
9bccf70c
A
1623 thread_lock(thread);
1624 if (thread->ref_count > 0) {
1625 thread_reference_locked(thread);
1626 threads[i++] = thread;
1627 }
1628 thread_unlock(thread);
1c79356b 1629 }
9bccf70c
A
1630 splx(s);
1631 assert(i <= actual);
1c79356b
A
1632
1633 /* can unlock processor set now that we have the thread refs */
1634 pset_unlock(pset);
1635
1636 /* calculate maxusage and free thread references */
1637
1638 total = 0;
1639 maxusage = 0;
1640 maxstack = 0;
9bccf70c 1641 while (i > 0) {
1c79356b 1642 int cpu;
9bccf70c 1643 thread_t thread = threads[--i];
1c79356b
A
1644 vm_offset_t stack = 0;
1645
1646 /*
1647 * thread->kernel_stack is only accurate if the
1648 * thread isn't swapped and is not executing.
1649 *
1650 * Of course, we don't have the appropriate locks
1651 * for these shenanigans.
1652 */
1653
1654 stack = thread->kernel_stack;
1655
1656 for (cpu = 0; cpu < NCPUS; cpu++)
9bccf70c 1657 if (cpu_to_processor(cpu)->cpu_data->active_thread == thread) {
1c79356b
A
1658 stack = active_stacks[cpu];
1659 break;
1660 }
1661
1662 if (stack != 0) {
1663 total++;
1c79356b
A
1664 }
1665
1666 thread_deallocate(thread);
1667 }
1668
1669 if (size != 0)
1670 kfree(addr, size);
1671
1672 *totalp = total;
1673 *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
1674 *maxusagep = maxusage;
1675 *maxstackp = maxstack;
1676 return KERN_SUCCESS;
1677
1678#endif /* MACH_DEBUG */
1679}
1680
9bccf70c 1681int split_funnel_off = 0;
1c79356b
A
1682funnel_t *
1683funnel_alloc(
1684 int type)
1685{
1686 mutex_t *m;
1687 funnel_t * fnl;
1688 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
0b4e3aa0 1689 bzero((void *)fnl, sizeof(funnel_t));
1c79356b 1690 if ((m = mutex_alloc(0)) == (mutex_t *)NULL) {
0b4e3aa0 1691 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1c79356b
A
1692 return(THR_FUNNEL_NULL);
1693 }
1694 fnl->fnl_mutex = m;
1695 fnl->fnl_type = type;
1696 }
1697 return(fnl);
1698}
1699
1700void
1701funnel_free(
1702 funnel_t * fnl)
1703{
1704 mutex_free(fnl->fnl_mutex);
1705 if (fnl->fnl_oldmutex)
1706 mutex_free(fnl->fnl_oldmutex);
0b4e3aa0 1707 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1c79356b
A
1708}
1709
1710void
1711funnel_lock(
1712 funnel_t * fnl)
1713{
1714 mutex_t * m;
1715
1716 m = fnl->fnl_mutex;
1717restart:
1718 mutex_lock(m);
1719 fnl->fnl_mtxholder = current_thread();
1720 if (split_funnel_off && (m != fnl->fnl_mutex)) {
1721 mutex_unlock(m);
1722 m = fnl->fnl_mutex;
1723 goto restart;
1724 }
1725}
1726
1727void
1728funnel_unlock(
1729 funnel_t * fnl)
1730{
1731 mutex_unlock(fnl->fnl_mutex);
1732 fnl->fnl_mtxrelease = current_thread();
1733}
1734
1735funnel_t *
1736thread_funnel_get(
1737 void)
1738{
1739 thread_t th = current_thread();
1740
1741 if (th->funnel_state & TH_FN_OWNED) {
1742 return(th->funnel_lock);
1743 }
1744 return(THR_FUNNEL_NULL);
1745}
1746
1747boolean_t
1748thread_funnel_set(
1749 funnel_t * fnl,
1750 boolean_t funneled)
1751{
1752 thread_t cur_thread;
1753 boolean_t funnel_state_prev;
1754 boolean_t intr;
1755
1756 cur_thread = current_thread();
1757 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1758
1759 if (funnel_state_prev != funneled) {
1760 intr = ml_set_interrupts_enabled(FALSE);
1761
1762 if (funneled == TRUE) {
1763 if (cur_thread->funnel_lock)
1764 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1765 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1766 fnl, 1, 0, 0, 0);
1767 funnel_lock(fnl);
1768 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1769 fnl, 1, 0, 0, 0);
1770 cur_thread->funnel_state |= TH_FN_OWNED;
1771 cur_thread->funnel_lock = fnl;
1772 } else {
1773 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1774 panic("Funnel unlock when not holding funnel");
1775 cur_thread->funnel_state &= ~TH_FN_OWNED;
1776 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1777 fnl, 1, 0, 0, 0);
1778
1779 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1780 funnel_unlock(fnl);
1781 }
1782 (void)ml_set_interrupts_enabled(intr);
1783 } else {
1784 /* if we are trying to acquire funnel recursively
1785 * check for funnel to be held already
1786 */
1787 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1788 panic("thread_funnel_set: already holding a different funnel");
1789 }
1790 }
1791 return(funnel_state_prev);
1792}
1793
1794boolean_t
1795thread_funnel_merge(
1796 funnel_t * fnl,
1797 funnel_t * otherfnl)
1798{
1799 mutex_t * m;
1800 mutex_t * otherm;
1801 funnel_t * gfnl;
1802 extern int disable_funnel;
1803
1804 if ((gfnl = thread_funnel_get()) == THR_FUNNEL_NULL)
1805 panic("thread_funnel_merge called with no funnels held");
1806
1807 if (gfnl->fnl_type != 1)
1808 panic("thread_funnel_merge called from non kernel funnel");
1809
1810 if (gfnl != fnl)
1811 panic("thread_funnel_merge incorrect invocation");
1812
1813 if (disable_funnel || split_funnel_off)
1814 return (KERN_FAILURE);
1815
1816 m = fnl->fnl_mutex;
1817 otherm = otherfnl->fnl_mutex;
1818
1819 /* Acquire other funnel mutex */
1820 mutex_lock(otherm);
1821 split_funnel_off = 1;
1822 disable_funnel = 1;
1823 otherfnl->fnl_mutex = m;
1824 otherfnl->fnl_type = fnl->fnl_type;
1825 otherfnl->fnl_oldmutex = otherm; /* save this for future use */
1826
1827 mutex_unlock(otherm);
1828 return(KERN_SUCCESS);
1829}
1830
1831void
0b4e3aa0
A
1832thread_set_cont_arg(
1833 int arg)
1c79356b 1834{
0b4e3aa0
A
1835 thread_t self = current_thread();
1836
1837 self->saved.misc = arg;
1c79356b
A
1838}
1839
1840int
1841thread_get_cont_arg(void)
1842{
0b4e3aa0
A
1843 thread_t self = current_thread();
1844
1845 return (self->saved.misc);
1c79356b
A
1846}
1847
1848/*
1849 * Export routines to other components for things that are done as macros
1850 * within the osfmk component.
1851 */
1852#undef thread_should_halt
1853boolean_t
1854thread_should_halt(
1855 thread_shuttle_t th)
1856{
1857 return(thread_should_halt_fast(th));
1858}