]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / kern / thread.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_FREE_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: kern/thread.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
58 * Date: 1986
59 *
60 * Thread/thread_shuttle management primitives implementation.
61 */
62/*
63 * Copyright (c) 1993 The University of Utah and
64 * the Computer Systems Laboratory (CSL). All rights reserved.
65 *
66 * Permission to use, copy, modify and distribute this software and its
67 * documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation.
71 *
72 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
73 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
74 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *
76 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
77 * improvements that they make and grant CSL redistribution rights.
78 *
79 */
80
81#include <cpus.h>
82#include <mach_host.h>
83#include <simple_clock.h>
84#include <mach_debug.h>
85#include <mach_prof.h>
1c79356b
A
86
87#include <mach/boolean.h>
88#include <mach/policy.h>
89#include <mach/thread_info.h>
90#include <mach/thread_special_ports.h>
91#include <mach/thread_status.h>
92#include <mach/time_value.h>
93#include <mach/vm_param.h>
94#include <kern/ast.h>
95#include <kern/cpu_data.h>
96#include <kern/counters.h>
97#include <kern/etap_macros.h>
98#include <kern/ipc_mig.h>
99#include <kern/ipc_tt.h>
100#include <kern/mach_param.h>
101#include <kern/machine.h>
102#include <kern/misc_protos.h>
103#include <kern/processor.h>
104#include <kern/queue.h>
105#include <kern/sched.h>
106#include <kern/sched_prim.h>
1c79356b
A
107#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
108#include <kern/task.h>
109#include <kern/thread.h>
110#include <kern/thread_act.h>
111#include <kern/thread_swap.h>
112#include <kern/host.h>
113#include <kern/zalloc.h>
114#include <vm/vm_kern.h>
115#include <ipc/ipc_kmsg.h>
116#include <ipc/ipc_port.h>
117#include <machine/thread.h> /* for MACHINE_STACK */
118#include <kern/profile.h>
119#include <kern/assert.h>
120#include <sys/kdebug.h>
121
122/*
123 * Exported interfaces
124 */
125
126#include <mach/thread_act_server.h>
127#include <mach/mach_host_server.h>
128
55e303ae 129static struct zone *thread_zone;
1c79356b 130
55e303ae
A
131static queue_head_t reaper_queue;
132decl_simple_lock_data(static,reaper_lock)
1c79356b
A
133
134extern int tick;
135
1c79356b 136/* private */
55e303ae 137static struct thread thread_template, init_thread;
1c79356b
A
138
139#if MACH_DEBUG
1c79356b
A
140
141#ifdef MACHINE_STACK
0b4e3aa0 142extern void stack_statistics(
1c79356b
A
143 unsigned int *totalp,
144 vm_size_t *maxusagep);
0b4e3aa0 145#endif /* MACHINE_STACK */
1c79356b
A
146#endif /* MACH_DEBUG */
147
1c79356b
A
148#ifdef MACHINE_STACK
149/*
150 * Machine-dependent code must define:
151 * stack_alloc_try
152 * stack_alloc
153 * stack_free
0b4e3aa0 154 * stack_free_stack
1c79356b
A
155 * stack_collect
156 * and if MACH_DEBUG:
157 * stack_statistics
158 */
159#else /* MACHINE_STACK */
160/*
161 * We allocate stacks from generic kernel VM.
162 * Machine-dependent code must define:
163 * machine_kernel_stack_init
164 *
165 * The stack_free_list can only be accessed at splsched,
166 * because stack_alloc_try/thread_invoke operate at splsched.
167 */
168
55e303ae
A
169decl_simple_lock_data(static,stack_lock_data)
170#define stack_lock() simple_lock(&stack_lock_data)
171#define stack_unlock() simple_unlock(&stack_lock_data)
172
173static vm_map_t stack_map;
174static vm_offset_t stack_free_list;
175
176static vm_offset_t stack_free_cache[NCPUS];
1c79356b 177
1c79356b 178unsigned int stack_free_max = 0;
55e303ae
A
179unsigned int stack_free_count = 0; /* splsched only */
180unsigned int stack_free_limit = 1; /* Arbitrary */
1c79356b 181
55e303ae
A
182unsigned int stack_cache_hits = 0; /* debugging */
183
184unsigned int stack_alloc_hits = 0; /* debugging */
1c79356b
A
185unsigned int stack_alloc_misses = 0; /* debugging */
186
187unsigned int stack_alloc_total = 0;
188unsigned int stack_alloc_hiwater = 0;
0b4e3aa0
A
189unsigned int stack_alloc_bndry = 0;
190
1c79356b
A
191
192/*
193 * The next field is at the base of the stack,
194 * so the low end is left unsullied.
195 */
196
197#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
198
199/*
200 * stack_alloc:
201 *
55e303ae 202 * Allocate a kernel stack for a thread.
1c79356b
A
203 * May block.
204 */
205vm_offset_t
206stack_alloc(
207 thread_t thread,
208 void (*start_pos)(thread_t))
209{
0b4e3aa0
A
210 vm_offset_t stack = thread->kernel_stack;
211 spl_t s;
1c79356b 212
0b4e3aa0
A
213 if (stack)
214 return (stack);
215
1c79356b
A
216 s = splsched();
217 stack_lock();
218 stack = stack_free_list;
219 if (stack != 0) {
220 stack_free_list = stack_next(stack);
221 stack_free_count--;
222 }
223 stack_unlock();
224 splx(s);
225
55e303ae
A
226 if (stack != 0) {
227 machine_stack_attach(thread, stack, start_pos);
228 return (stack);
0b4e3aa0
A
229 }
230
231 if (kernel_memory_allocate(
232 stack_map, &stack,
233 KERNEL_STACK_SIZE, stack_alloc_bndry - 1,
234 KMA_KOBJECT) != KERN_SUCCESS)
235 panic("stack_alloc: no space left for stack maps");
1c79356b 236
0b4e3aa0
A
237 stack_alloc_total++;
238 if (stack_alloc_total > stack_alloc_hiwater)
239 stack_alloc_hiwater = stack_alloc_total;
1c79356b 240
55e303ae 241 machine_stack_attach(thread, stack, start_pos);
1c79356b
A
242 return (stack);
243}
244
245/*
246 * stack_free:
247 *
248 * Free a kernel stack.
1c79356b
A
249 */
250
251void
252stack_free(
253 thread_t thread)
254{
55e303ae 255 vm_offset_t stack = machine_stack_detach(thread);
0b4e3aa0 256
1c79356b 257 assert(stack);
55e303ae
A
258 if (stack != thread->reserved_stack) {
259 spl_t s = splsched();
260 vm_offset_t *cache;
261
262 cache = &stack_free_cache[cpu_number()];
263 if (*cache == 0) {
264 *cache = stack;
265 splx(s);
266
267 return;
268 }
269
0b4e3aa0
A
270 stack_lock();
271 stack_next(stack) = stack_free_list;
272 stack_free_list = stack;
273 if (++stack_free_count > stack_free_max)
274 stack_free_max = stack_free_count;
275 stack_unlock();
55e303ae 276 splx(s);
1c79356b
A
277 }
278}
279
55e303ae 280void
0b4e3aa0
A
281stack_free_stack(
282 vm_offset_t stack)
283{
55e303ae
A
284 spl_t s = splsched();
285 vm_offset_t *cache;
286
287 cache = &stack_free_cache[cpu_number()];
288 if (*cache == 0) {
289 *cache = stack;
290 splx(s);
291
292 return;
293 }
0b4e3aa0 294
0b4e3aa0
A
295 stack_lock();
296 stack_next(stack) = stack_free_list;
297 stack_free_list = stack;
298 if (++stack_free_count > stack_free_max)
299 stack_free_max = stack_free_count;
300 stack_unlock();
301 splx(s);
302}
303
1c79356b
A
304/*
305 * stack_collect:
306 *
307 * Free excess kernel stacks.
308 * May block.
309 */
310
311void
312stack_collect(void)
313{
55e303ae 314 spl_t s = splsched();
1c79356b 315
1c79356b
A
316 stack_lock();
317 while (stack_free_count > stack_free_limit) {
55e303ae
A
318 vm_offset_t stack = stack_free_list;
319
1c79356b
A
320 stack_free_list = stack_next(stack);
321 stack_free_count--;
322 stack_unlock();
323 splx(s);
324
0b4e3aa0
A
325 if (vm_map_remove(
326 stack_map, stack, stack + KERNEL_STACK_SIZE,
327 VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
328 panic("stack_collect: vm_map_remove failed");
1c79356b
A
329
330 s = splsched();
1c79356b 331 stack_lock();
0b4e3aa0 332 stack_alloc_total--;
1c79356b
A
333 }
334 stack_unlock();
335 splx(s);
336}
337
55e303ae
A
338/*
339 * stack_alloc_try:
340 *
341 * Non-blocking attempt to allocate a kernel stack.
342 * Called at splsched with the thread locked.
343 */
344
345boolean_t stack_alloc_try(
346 thread_t thread,
347 void (*start)(thread_t))
348{
349 register vm_offset_t stack, *cache;
350
351 cache = &stack_free_cache[cpu_number()];
352 if (stack = *cache) {
353 *cache = 0;
354 machine_stack_attach(thread, stack, start);
355 stack_cache_hits++;
356
357 return (TRUE);
358 }
359
360 stack_lock();
361 stack = stack_free_list;
362 if (stack != (vm_offset_t)0) {
363 stack_free_list = stack_next(stack);
364 stack_free_count--;
365 }
366 stack_unlock();
367
368 if (stack == 0)
369 stack = thread->reserved_stack;
370
371 if (stack != 0) {
372 machine_stack_attach(thread, stack, start);
373 stack_alloc_hits++;
374
375 return (TRUE);
376 }
377 else {
378 stack_alloc_misses++;
379
380 return (FALSE);
381 }
382}
1c79356b
A
383
384#if MACH_DEBUG
385/*
386 * stack_statistics:
387 *
388 * Return statistics on cached kernel stacks.
389 * *maxusagep must be initialized by the caller.
390 */
391
392void
393stack_statistics(
394 unsigned int *totalp,
395 vm_size_t *maxusagep)
396{
397 spl_t s;
398
399 s = splsched();
400 stack_lock();
401
1c79356b 402 *totalp = stack_free_count;
0b4e3aa0
A
403 *maxusagep = 0;
404
1c79356b
A
405 stack_unlock();
406 splx(s);
407}
408#endif /* MACH_DEBUG */
409
410#endif /* MACHINE_STACK */
411
412
413stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
414 vm_size_t *alloc_size, int *collectable, int *exhaustable)
415{
416 *count = stack_alloc_total - stack_free_count;
417 *cur_size = KERNEL_STACK_SIZE * stack_alloc_total;
418 *max_size = KERNEL_STACK_SIZE * stack_alloc_hiwater;
419 *elem_size = KERNEL_STACK_SIZE;
420 *alloc_size = KERNEL_STACK_SIZE;
421 *collectable = 1;
422 *exhaustable = 0;
423}
424
1c79356b
A
425void
426stack_privilege(
55e303ae
A
427 register thread_t thread)
428{
429 /* OBSOLETE */
430}
431
432void
433thread_bootstrap(void)
1c79356b
A
434{
435 /*
55e303ae 436 * Fill in a template thread for fast initialization.
1c79356b
A
437 */
438
55e303ae 439 thread_template.runq = RUN_QUEUE_NULL;
1c79356b 440
55e303ae 441 thread_template.ref_count = 1;
1c79356b 442
55e303ae
A
443 thread_template.reason = AST_NONE;
444 thread_template.at_safe_point = FALSE;
445 thread_template.wait_event = NO_EVENT64;
446 thread_template.wait_queue = WAIT_QUEUE_NULL;
447 thread_template.wait_result = THREAD_WAITING;
448 thread_template.interrupt_level = THREAD_ABORTSAFE;
449 thread_template.state = TH_STACK_HANDOFF | TH_WAIT | TH_UNINT;
450 thread_template.wake_active = FALSE;
451 thread_template.active_callout = FALSE;
452 thread_template.continuation = (void (*)(void))0;
453 thread_template.top_act = THR_ACT_NULL;
1c79356b 454
55e303ae
A
455 thread_template.importance = 0;
456 thread_template.sched_mode = 0;
457 thread_template.safe_mode = 0;
0b4e3aa0 458
55e303ae
A
459 thread_template.priority = 0;
460 thread_template.sched_pri = 0;
461 thread_template.max_priority = 0;
462 thread_template.task_priority = 0;
463 thread_template.promotions = 0;
464 thread_template.pending_promoter_index = 0;
465 thread_template.pending_promoter[0] =
466 thread_template.pending_promoter[1] = NULL;
0b4e3aa0 467
55e303ae 468 thread_template.realtime.deadline = UINT64_MAX;
0b4e3aa0 469
55e303ae 470 thread_template.current_quantum = 0;
1c79356b 471
55e303ae
A
472 thread_template.computation_metered = 0;
473 thread_template.computation_epoch = 0;
0b4e3aa0 474
55e303ae
A
475 thread_template.cpu_usage = 0;
476 thread_template.cpu_delta = 0;
477 thread_template.sched_usage = 0;
478 thread_template.sched_delta = 0;
479 thread_template.sched_stamp = 0;
480 thread_template.sleep_stamp = 0;
481 thread_template.safe_release = 0;
0b4e3aa0 482
55e303ae
A
483 thread_template.bound_processor = PROCESSOR_NULL;
484 thread_template.last_processor = PROCESSOR_NULL;
485 thread_template.last_switch = 0;
1c79356b 486
55e303ae 487 thread_template.vm_privilege = FALSE;
0b4e3aa0 488
55e303ae
A
489 timer_init(&(thread_template.user_timer));
490 timer_init(&(thread_template.system_timer));
491 thread_template.user_timer_save.low = 0;
492 thread_template.user_timer_save.high = 0;
493 thread_template.system_timer_save.low = 0;
494 thread_template.system_timer_save.high = 0;
0b4e3aa0 495
55e303ae 496 thread_template.processor_set = PROCESSOR_SET_NULL;
0b4e3aa0 497
55e303ae 498 thread_template.act_ref_count = 2;
0b4e3aa0 499
55e303ae
A
500 thread_template.special_handler.handler = special_handler;
501 thread_template.special_handler.next = 0;
502
503#if MACH_HOST
504 thread_template.may_assign = TRUE;
505 thread_template.assign_active = FALSE;
506#endif /* MACH_HOST */
507 thread_template.funnel_lock = THR_FUNNEL_NULL;
508 thread_template.funnel_state = 0;
509#if MACH_LDEBUG
510 thread_template.mutex_count = 0;
511#endif /* MACH_LDEBUG */
512
513 init_thread = thread_template;
514
515 init_thread.top_act = &init_thread;
516 init_thread.thread = &init_thread;
517 machine_thread_set_current(&init_thread);
518}
1c79356b
A
519
520void
521thread_init(void)
522{
0b4e3aa0
A
523 kern_return_t ret;
524 unsigned int stack;
525
55e303ae
A
526 thread_zone = zinit(
527 sizeof(struct thread),
528 THREAD_MAX * sizeof(struct thread),
529 THREAD_CHUNK * sizeof(struct thread),
1c79356b
A
530 "threads");
531
1c79356b
A
532 /*
533 * Initialize other data structures used in
534 * this module.
535 */
536
537 queue_init(&reaper_queue);
538 simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER);
1c79356b
A
539
540#ifndef MACHINE_STACK
0b4e3aa0
A
541 simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */
542
55e303ae 543 if (KERNEL_STACK_SIZE < round_page_32(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */
0b4e3aa0
A
544 panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
545 KERNEL_STACK_SIZE, PAGE_SIZE);
546 }
547
548 for(stack_alloc_bndry = PAGE_SIZE; stack_alloc_bndry <= KERNEL_STACK_SIZE; stack_alloc_bndry <<= 1); /* Find next power of 2 above stack size */
549
550 ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
551
552 &stack,
765c9de3 553 (stack_alloc_bndry * (2*THREAD_MAX + 64)), /* Allocate enough for all of it */
0b4e3aa0
A
554 FALSE, /* Say not pageable so that it is wired */
555 TRUE, /* Allocate from anywhere */
556 &stack_map); /* Allocate a submap */
557
558 if(ret != KERN_SUCCESS) { /* Did we get one? */
559 panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret); /* Die */
560 }
561 stack = vm_map_min(stack_map); /* Make sure we skip the first hunk */
562 ret = vm_map_enter(stack_map, &stack, PAGE_SIZE, 0, /* Make sure there is nothing at the start */
563 0, /* Force it at start */
564 VM_OBJECT_NULL, 0, /* No object yet */
565 FALSE, /* No copy */
566 VM_PROT_NONE, /* Allow no access */
567 VM_PROT_NONE, /* Allow no access */
568 VM_INHERIT_DEFAULT); /* Just be normal */
569
570 if(ret != KERN_SUCCESS) { /* Did it work? */
571 panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret);
572 }
573
1c79356b
A
574#endif /* MACHINE_STACK */
575
1c79356b
A
576 /*
577 * Initialize any machine-dependent
578 * per-thread structures necessary.
579 */
55e303ae 580 machine_thread_init();
1c79356b
A
581}
582
9bccf70c
A
583/*
584 * Called at splsched.
585 */
1c79356b
A
586void
587thread_reaper_enqueue(
588 thread_t thread)
589{
1c79356b 590 simple_lock(&reaper_lock);
1c79356b 591 enqueue_tail(&reaper_queue, (queue_entry_t)thread);
1c79356b
A
592 simple_unlock(&reaper_lock);
593
9bccf70c 594 thread_wakeup((event_t)&reaper_queue);
1c79356b
A
595}
596
9bccf70c
A
597void
598thread_termination_continue(void)
599{
600 panic("thread_termination_continue");
601 /*NOTREACHED*/
602}
1c79356b
A
603
604/*
605 * Routine: thread_terminate_self
606 *
607 * This routine is called by a thread which has unwound from
608 * its current RPC and kernel contexts and found that it's
609 * root activation has been marked for extinction. This lets
610 * it clean up the last few things that can only be cleaned
611 * up in this context and then impale itself on the reaper
612 * queue.
613 *
614 * When the reaper gets the thread, it will deallocate the
615 * thread_act's reference on itself, which in turn will release
616 * its own reference on this thread. By doing things in that
617 * order, a thread_act will always have a valid thread - but the
618 * thread may persist beyond having a thread_act (but must never
619 * run like that).
620 */
621void
622thread_terminate_self(void)
623{
9bccf70c
A
624 thread_act_t thr_act = current_act();
625 thread_t thread;
1c79356b 626 task_t task = thr_act->task;
9bccf70c 627 long active_acts;
1c79356b
A
628 spl_t s;
629
630 /*
631 * We should be at the base of the inheritance chain.
632 */
9bccf70c 633 thread = act_lock_thread(thr_act);
1c79356b
A
634 assert(thr_act->thread == thread);
635
9bccf70c
A
636 /* This will allow no more control ops on this thr_act. */
637 ipc_thr_act_disable(thr_act);
638
639 /* Clean-up any ulocks that are still owned by the thread
640 * activation (acquired but not released or handed-off).
641 */
642 act_ulock_release_all(thr_act);
643
644 act_unlock_thread(thr_act);
645
0b4e3aa0
A
646 _mk_sp_thread_depress_abort(thread, TRUE);
647
1c79356b
A
648 /*
649 * Check to see if this is the last active activation. By
650 * this we mean the last activation to call thread_terminate_self.
651 * If so, and the task is associated with a BSD process, we
652 * need to call BSD and let them clean up.
653 */
55e303ae 654 active_acts = hw_atomic_sub(&task->active_thread_count, 1);
1c79356b 655
9bccf70c
A
656 if (active_acts == 0 && task->bsd_info)
657 proc_exit(task->bsd_info);
1c79356b 658
9bccf70c 659 /* JMM - for now, no migration */
1c79356b
A
660 assert(!thr_act->lower);
661
1c79356b
A
662 thread_timer_terminate();
663
1c79356b
A
664 ipc_thread_terminate(thread);
665
666 s = splsched();
667 thread_lock(thread);
9bccf70c 668 thread->state |= TH_TERMINATE;
1c79356b 669 assert((thread->state & TH_UNINT) == 0);
1c79356b 670 thread_mark_wait_locked(thread, THREAD_UNINT);
9bccf70c 671 assert(thread->promotions == 0);
1c79356b
A
672 thread_unlock(thread);
673 /* splx(s); */
674
675 ETAP_SET_REASON(thread, BLOCKED_ON_TERMINATION);
9bccf70c 676 thread_block(thread_termination_continue);
1c79356b
A
677 /*NOTREACHED*/
678}
679
1c79356b
A
680/*
681 * Create a new thread.
55e303ae 682 * Doesn't start the thread running.
1c79356b 683 */
55e303ae
A
684static kern_return_t
685thread_create_internal(
686 task_t parent_task,
1c79356b
A
687 integer_t priority,
688 void (*start)(void),
55e303ae 689 thread_t *out_thread)
1c79356b 690{
55e303ae 691 thread_t new_thread;
1c79356b 692 processor_set_t pset;
55e303ae 693 static thread_t first_thread;
1c79356b
A
694
695 /*
696 * Allocate a thread and initialize static fields
697 */
55e303ae
A
698 if (first_thread == NULL)
699 new_thread = first_thread = current_act();
700 else
701 new_thread = (thread_t)zalloc(thread_zone);
702 if (new_thread == NULL)
1c79356b
A
703 return (KERN_RESOURCE_SHORTAGE);
704
55e303ae
A
705 if (new_thread != first_thread)
706 *new_thread = thread_template;
707
708#ifdef MACH_BSD
709 {
710 extern void *uthread_alloc(task_t, thread_act_t);
9bccf70c 711
55e303ae
A
712 new_thread->uthread = uthread_alloc(parent_task, new_thread);
713 if (new_thread->uthread == NULL) {
714 zfree(thread_zone, (vm_offset_t)new_thread);
715 return (KERN_RESOURCE_SHORTAGE);
716 }
717 }
718#endif /* MACH_BSD */
1c79356b 719
55e303ae
A
720 if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
721#ifdef MACH_BSD
722 {
723 extern void uthread_free(task_t, void *, void *);
724 void *ut = new_thread->uthread;
1c79356b 725
55e303ae
A
726 new_thread->uthread = NULL;
727 uthread_free(parent_task, ut, parent_task->bsd_info);
728 }
729#endif /* MACH_BSD */
730 zfree(thread_zone, (vm_offset_t)new_thread);
731 return (KERN_FAILURE);
732 }
733
734 new_thread->task = parent_task;
735
736 thread_lock_init(new_thread);
737 wake_lock_init(new_thread);
738
739 mutex_init(&new_thread->lock, ETAP_THREAD_ACT);
740
741 ipc_thr_act_init(parent_task, new_thread);
0b4e3aa0 742
55e303ae
A
743 ipc_thread_init(new_thread);
744 queue_init(&new_thread->held_ulocks);
745 act_prof_init(new_thread, parent_task);
746
747 new_thread->continuation = start;
748 new_thread->sleep_stamp = sched_tick;
0b4e3aa0 749
1c79356b 750 pset = parent_task->processor_set;
9bccf70c 751 assert(pset == &default_pset);
1c79356b
A
752 pset_lock(pset);
753
754 task_lock(parent_task);
9bccf70c 755 assert(parent_task->processor_set == pset);
1c79356b 756
55e303ae
A
757 if ( !parent_task->active ||
758 (parent_task->thread_count >= THREAD_MAX &&
759 parent_task != kernel_task)) {
1c79356b
A
760 task_unlock(parent_task);
761 pset_unlock(pset);
55e303ae
A
762
763#ifdef MACH_BSD
764 {
765 extern void uthread_free(task_t, void *, void *);
766 void *ut = new_thread->uthread;
767
768 new_thread->uthread = NULL;
769 uthread_free(parent_task, ut, parent_task->bsd_info);
770 }
771#endif /* MACH_BSD */
772 act_prof_deallocate(new_thread);
773 ipc_thr_act_terminate(new_thread);
774 machine_thread_destroy(new_thread);
775 zfree(thread_zone, (vm_offset_t) new_thread);
1c79356b
A
776 return (KERN_FAILURE);
777 }
778
55e303ae
A
779 act_attach(new_thread, new_thread);
780
781 task_reference_locked(parent_task);
782
783 /* Cache the task's map */
784 new_thread->map = parent_task->map;
1c79356b 785
55e303ae
A
786 /* Chain the thread onto the task's list */
787 queue_enter(&parent_task->threads, new_thread, thread_act_t, task_threads);
788 parent_task->thread_count++;
789 parent_task->res_thread_count++;
9bccf70c
A
790
791 /* So terminating threads don't need to take the task lock to decrement */
55e303ae 792 hw_atomic_add(&parent_task->active_thread_count, 1);
1c79356b 793
1c79356b 794 /* Associate the thread with the processor set */
55e303ae
A
795 pset_add_thread(pset, new_thread);
796
797 thread_timer_setup(new_thread);
1c79356b
A
798
799 /* Set the thread's scheduling parameters */
0b4e3aa0 800 if (parent_task != kernel_task)
55e303ae
A
801 new_thread->sched_mode |= TH_MODE_TIMESHARE;
802 new_thread->max_priority = parent_task->max_priority;
803 new_thread->task_priority = parent_task->priority;
804 new_thread->priority = (priority < 0)? parent_task->priority: priority;
805 if (new_thread->priority > new_thread->max_priority)
806 new_thread->priority = new_thread->max_priority;
807 new_thread->importance =
808 new_thread->priority - new_thread->task_priority;
809 new_thread->sched_stamp = sched_tick;
810 compute_priority(new_thread, FALSE);
1c79356b
A
811
812#if ETAP_EVENT_MONITOR
813 new_thread->etap_reason = 0;
814 new_thread->etap_trace = FALSE;
815#endif /* ETAP_EVENT_MONITOR */
816
55e303ae 817 new_thread->active = TRUE;
1c79356b 818
55e303ae 819 *out_thread = new_thread;
1c79356b
A
820
821 {
9bccf70c 822 long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
1c79356b 823
55e303ae
A
824 kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
825
9bccf70c
A
826 KERNEL_DEBUG_CONSTANT(
827 TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
55e303ae 828 (vm_address_t)new_thread, dbg_arg2, 0, 0, 0);
1c79356b 829
9bccf70c
A
830 kdbg_trace_string(parent_task->bsd_info,
831 &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
832
833 KERNEL_DEBUG_CONSTANT(
834 TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
835 dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
1c79356b
A
836 }
837
838 return (KERN_SUCCESS);
839}
840
9bccf70c
A
841extern void thread_bootstrap_return(void);
842
1c79356b
A
843kern_return_t
844thread_create(
845 task_t task,
55e303ae 846 thread_act_t *new_thread)
1c79356b 847{
1c79356b 848 kern_return_t result;
9bccf70c 849 thread_t thread;
1c79356b 850
55e303ae
A
851 if (task == TASK_NULL || task == kernel_task)
852 return (KERN_INVALID_ARGUMENT);
1c79356b 853
55e303ae 854 result = thread_create_internal(task, -1, thread_bootstrap_return, &thread);
1c79356b
A
855 if (result != KERN_SUCCESS)
856 return (result);
857
55e303ae
A
858 thread->user_stop_count = 1;
859 thread_hold(thread);
9bccf70c 860 if (task->suspend_count > 0)
55e303ae 861 thread_hold(thread);
1c79356b 862
9bccf70c
A
863 pset_unlock(task->processor_set);
864 task_unlock(task);
1c79356b 865
55e303ae 866 *new_thread = thread;
1c79356b
A
867
868 return (KERN_SUCCESS);
869}
870
1c79356b
A
871kern_return_t
872thread_create_running(
9bccf70c 873 register task_t task,
1c79356b
A
874 int flavor,
875 thread_state_t new_state,
876 mach_msg_type_number_t new_state_count,
55e303ae 877 thread_act_t *new_thread)
1c79356b
A
878{
879 register kern_return_t result;
9bccf70c 880 thread_t thread;
9bccf70c 881
55e303ae
A
882 if (task == TASK_NULL || task == kernel_task)
883 return (KERN_INVALID_ARGUMENT);
1c79356b 884
55e303ae 885 result = thread_create_internal(task, -1, thread_bootstrap_return, &thread);
1c79356b
A
886 if (result != KERN_SUCCESS)
887 return (result);
888
55e303ae 889 result = machine_thread_set_state(thread, flavor, new_state, new_state_count);
1c79356b 890 if (result != KERN_SUCCESS) {
9bccf70c
A
891 pset_unlock(task->processor_set);
892 task_unlock(task);
893
55e303ae
A
894 thread_terminate(thread);
895 act_deallocate(thread);
1c79356b
A
896 return (result);
897 }
898
55e303ae 899 act_lock(thread);
9bccf70c 900 clear_wait(thread, THREAD_AWAKENED);
55e303ae
A
901 thread->started = TRUE;
902 act_unlock(thread);
9bccf70c
A
903 pset_unlock(task->processor_set);
904 task_unlock(task);
905
55e303ae 906 *new_thread = thread;
9bccf70c 907
1c79356b
A
908 return (result);
909}
910
911/*
912 * kernel_thread:
913 *
55e303ae
A
914 * Create a thread in the kernel task
915 * to execute in kernel context.
1c79356b
A
916 */
917thread_t
55e303ae 918kernel_thread_create(
1c79356b 919 void (*start)(void),
55e303ae 920 integer_t priority)
1c79356b
A
921{
922 kern_return_t result;
55e303ae 923 task_t task = kernel_task;
1c79356b 924 thread_t thread;
1c79356b 925
55e303ae 926 result = thread_create_internal(task, priority, start, &thread);
9bccf70c
A
927 if (result != KERN_SUCCESS)
928 return (THREAD_NULL);
1c79356b 929
9bccf70c
A
930 pset_unlock(task->processor_set);
931 task_unlock(task);
932
55e303ae
A
933 thread_doswapin(thread);
934 assert(thread->kernel_stack != 0);
935 thread->reserved_stack = thread->kernel_stack;
936
937 act_deallocate(thread);
938
939 return (thread);
940}
941
942thread_t
943kernel_thread_with_priority(
944 void (*start)(void),
945 integer_t priority)
946{
947 thread_t thread;
1c79356b 948
55e303ae
A
949 thread = kernel_thread_create(start, priority);
950 if (thread == THREAD_NULL)
951 return (THREAD_NULL);
1c79356b 952
55e303ae
A
953 act_lock(thread);
954 clear_wait(thread, THREAD_AWAKENED);
955 thread->started = TRUE;
956 act_unlock(thread);
1c79356b 957
55e303ae
A
958#ifdef i386
959 thread_bind(thread, master_processor);
960#endif /* i386 */
1c79356b
A
961 return (thread);
962}
963
964thread_t
965kernel_thread(
966 task_t task,
967 void (*start)(void))
968{
55e303ae
A
969 if (task != kernel_task)
970 panic("kernel_thread");
971
972 return kernel_thread_with_priority(start, -1);
1c79356b
A
973}
974
975unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */
976
9bccf70c
A
977#if MACH_HOST
978/* Preclude thread processor set assignement */
979#define thread_freeze(thread) assert((thread)->processor_set == &default_pset)
980
981/* Allow thread processor set assignement */
982#define thread_unfreeze(thread) assert((thread)->processor_set == &default_pset)
983
984#endif /* MACH_HOST */
985
1c79356b
A
986void
987thread_deallocate(
988 thread_t thread)
989{
990 task_t task;
991 processor_set_t pset;
9bccf70c 992 int refs;
1c79356b
A
993 spl_t s;
994
995 if (thread == THREAD_NULL)
996 return;
997
998 /*
9bccf70c 999 * First, check for new count > 0 (the common case).
1c79356b
A
1000 * Only the thread needs to be locked.
1001 */
1002 s = splsched();
1003 thread_lock(thread);
9bccf70c
A
1004 refs = --thread->ref_count;
1005 thread_unlock(thread);
1006 splx(s);
1007
1008 if (refs > 0)
1c79356b 1009 return;
9bccf70c
A
1010
1011 if (thread == current_thread())
55e303ae 1012 panic("thread_deallocate");
1c79356b
A
1013
1014 /*
9bccf70c
A
1015 * There is a dangling pointer to the thread from the
1016 * processor_set. To clean it up, we freeze the thread
1017 * in the pset (because pset destruction can cause even
1018 * reference-less threads to be reassigned to the default
1019 * pset) and then remove it.
1c79356b 1020 */
1c79356b 1021
9bccf70c 1022#if MACH_HOST
1c79356b 1023 thread_freeze(thread);
9bccf70c 1024#endif
1c79356b
A
1025
1026 pset = thread->processor_set;
1027 pset_lock(pset);
1c79356b 1028 pset_remove_thread(pset, thread);
1c79356b
A
1029 pset_unlock(pset);
1030
9bccf70c
A
1031#if MACH_HOST
1032 thread_unfreeze(thread);
1033#endif
1034
1035 pset_deallocate(pset);
1c79356b 1036
55e303ae
A
1037 if (thread->reserved_stack != 0) {
1038 if (thread->reserved_stack != thread->kernel_stack)
1039 stack_free_stack(thread->reserved_stack);
1040 thread->reserved_stack = 0;
1c79356b 1041 }
1c79356b 1042
55e303ae
A
1043 if (thread->kernel_stack != 0)
1044 stack_free(thread);
1045
1046 machine_thread_destroy(thread);
1047
1048 zfree(thread_zone, (vm_offset_t) thread);
1c79356b
A
1049}
1050
1051void
1052thread_reference(
1053 thread_t thread)
1054{
1055 spl_t s;
1056
1057 if (thread == THREAD_NULL)
1058 return;
1059
1060 s = splsched();
1061 thread_lock(thread);
9bccf70c 1062 thread_reference_locked(thread);
1c79356b
A
1063 thread_unlock(thread);
1064 splx(s);
1065}
1066
1067/*
1068 * Called with "appropriate" thread-related locks held on
1069 * thread and its top_act for synchrony with RPC (see
1070 * act_lock_thread()).
1071 */
1072kern_return_t
1073thread_info_shuttle(
1074 register thread_act_t thr_act,
1075 thread_flavor_t flavor,
1076 thread_info_t thread_info_out, /* ptr to OUT array */
1077 mach_msg_type_number_t *thread_info_count) /*IN/OUT*/
1078{
1079 register thread_t thread = thr_act->thread;
1080 int state, flags;
1081 spl_t s;
1082
1083 if (thread == THREAD_NULL)
1084 return (KERN_INVALID_ARGUMENT);
1085
1086 if (flavor == THREAD_BASIC_INFO) {
1087 register thread_basic_info_t basic_info;
1088
1089 if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
1090 return (KERN_INVALID_ARGUMENT);
1091
1092 basic_info = (thread_basic_info_t) thread_info_out;
1093
1094 s = splsched();
1095 thread_lock(thread);
1096
1097 /* fill in info */
1098
1099 thread_read_times(thread, &basic_info->user_time,
1100 &basic_info->system_time);
1101
0b4e3aa0
A
1102 /*
1103 * Update lazy-evaluated scheduler info because someone wants it.
1104 */
1105 if (thread->sched_stamp != sched_tick)
1106 update_priority(thread);
1107
1108 basic_info->sleep_time = 0;
1109
1110 /*
1111 * To calculate cpu_usage, first correct for timer rate,
1112 * then for 5/8 ageing. The correction factor [3/5] is
1113 * (1/(5/8) - 1).
1114 */
1115 basic_info->cpu_usage = (thread->cpu_usage << SCHED_TICK_SHIFT) /
1116 (TIMER_RATE / TH_USAGE_SCALE);
1117 basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
1c79356b 1118#if SIMPLE_CLOCK
0b4e3aa0
A
1119 /*
1120 * Clock drift compensation.
1121 */
1122 basic_info->cpu_usage = (basic_info->cpu_usage * 1000000) / sched_usec;
1c79356b 1123#endif /* SIMPLE_CLOCK */
1c79356b 1124
0b4e3aa0
A
1125 basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
1126 POLICY_TIMESHARE: POLICY_RR);
1c79356b
A
1127
1128 flags = 0;
1c79356b 1129 if (thread->state & TH_IDLE)
0b4e3aa0
A
1130 flags |= TH_FLAGS_IDLE;
1131
1132 if (thread->state & TH_STACK_HANDOFF)
1133 flags |= TH_FLAGS_SWAPPED;
1c79356b
A
1134
1135 state = 0;
9bccf70c 1136 if (thread->state & TH_TERMINATE)
1c79356b
A
1137 state = TH_STATE_HALTED;
1138 else
1139 if (thread->state & TH_RUN)
1140 state = TH_STATE_RUNNING;
1141 else
1142 if (thread->state & TH_UNINT)
1143 state = TH_STATE_UNINTERRUPTIBLE;
1144 else
1145 if (thread->state & TH_SUSP)
1146 state = TH_STATE_STOPPED;
1147 else
1148 if (thread->state & TH_WAIT)
1149 state = TH_STATE_WAITING;
1150
1151 basic_info->run_state = state;
1152 basic_info->flags = flags;
1153
1154 basic_info->suspend_count = thr_act->user_stop_count;
1155
1156 thread_unlock(thread);
1157 splx(s);
1158
1159 *thread_info_count = THREAD_BASIC_INFO_COUNT;
1160
1161 return (KERN_SUCCESS);
1162 }
1163 else
1164 if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
1165 policy_timeshare_info_t ts_info;
1166
1167 if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
1168 return (KERN_INVALID_ARGUMENT);
1169
1170 ts_info = (policy_timeshare_info_t)thread_info_out;
1171
1172 s = splsched();
1173 thread_lock(thread);
1174
0b4e3aa0 1175 if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
1c79356b
A
1176 thread_unlock(thread);
1177 splx(s);
1178
1179 return (KERN_INVALID_POLICY);
1180 }
1181
9bccf70c
A
1182 ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1183 if (ts_info->depressed) {
1184 ts_info->base_priority = DEPRESSPRI;
1185 ts_info->depress_priority = thread->priority;
1186 }
1187 else {
1188 ts_info->base_priority = thread->priority;
1189 ts_info->depress_priority = -1;
1190 }
1c79356b 1191
9bccf70c
A
1192 ts_info->cur_priority = thread->sched_pri;
1193 ts_info->max_priority = thread->max_priority;
1c79356b
A
1194
1195 thread_unlock(thread);
1196 splx(s);
1197
1198 *thread_info_count = POLICY_TIMESHARE_INFO_COUNT;
1199
1200 return (KERN_SUCCESS);
1201 }
1202 else
1203 if (flavor == THREAD_SCHED_FIFO_INFO) {
1c79356b
A
1204 if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
1205 return (KERN_INVALID_ARGUMENT);
1206
0b4e3aa0 1207 return (KERN_INVALID_POLICY);
1c79356b
A
1208 }
1209 else
1210 if (flavor == THREAD_SCHED_RR_INFO) {
1211 policy_rr_info_t rr_info;
1212
1213 if (*thread_info_count < POLICY_RR_INFO_COUNT)
1214 return (KERN_INVALID_ARGUMENT);
1215
1216 rr_info = (policy_rr_info_t) thread_info_out;
1217
1218 s = splsched();
1219 thread_lock(thread);
1220
0b4e3aa0 1221 if (thread->sched_mode & TH_MODE_TIMESHARE) {
1c79356b
A
1222 thread_unlock(thread);
1223 splx(s);
1224
1225 return (KERN_INVALID_POLICY);
1226 }
1227
9bccf70c
A
1228 rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
1229 if (rr_info->depressed) {
1230 rr_info->base_priority = DEPRESSPRI;
1231 rr_info->depress_priority = thread->priority;
1232 }
1233 else {
1234 rr_info->base_priority = thread->priority;
1235 rr_info->depress_priority = -1;
1236 }
1237
1c79356b 1238 rr_info->max_priority = thread->max_priority;
0b4e3aa0 1239 rr_info->quantum = std_quantum_us / 1000;
1c79356b 1240
1c79356b
A
1241 thread_unlock(thread);
1242 splx(s);
1243
1244 *thread_info_count = POLICY_RR_INFO_COUNT;
1245
1246 return (KERN_SUCCESS);
1247 }
1248
1249 return (KERN_INVALID_ARGUMENT);
1250}
1251
1252void
1253thread_doreap(
1254 register thread_t thread)
1255{
1256 thread_act_t thr_act;
1c79356b
A
1257
1258
1259 thr_act = thread_lock_act(thread);
1260 assert(thr_act && thr_act->thread == thread);
1261
55e303ae 1262 act_reference_locked(thr_act);
1c79356b
A
1263
1264 /*
1265 * Replace `act_unlock_thread()' with individual
1266 * calls. (`act_detach()' can change fields used
1267 * to determine which locks are held, confusing
1268 * `act_unlock_thread()'.)
1269 */
1c79356b
A
1270 act_unlock(thr_act);
1271
1272 /* Remove the reference held by a rooted thread */
9bccf70c 1273 act_deallocate(thr_act);
1c79356b
A
1274
1275 /* Remove the reference held by the thread: */
1276 act_deallocate(thr_act);
1277}
1278
1c79356b
A
1279/*
1280 * reaper_thread:
1281 *
9bccf70c
A
1282 * This kernel thread runs forever looking for terminating
1283 * threads, releasing their "self" references.
1c79356b
A
1284 */
1285static void
9bccf70c 1286reaper_thread_continue(void)
1c79356b
A
1287{
1288 register thread_t thread;
1c79356b 1289
9bccf70c 1290 (void)splsched();
1c79356b
A
1291 simple_lock(&reaper_lock);
1292
1293 while ((thread = (thread_t) dequeue_head(&reaper_queue)) != THREAD_NULL) {
1294 simple_unlock(&reaper_lock);
9bccf70c 1295 (void)spllo();
1c79356b
A
1296
1297 thread_doreap(thread);
1298
9bccf70c 1299 (void)splsched();
1c79356b
A
1300 simple_lock(&reaper_lock);
1301 }
1302
9bccf70c 1303 assert_wait((event_t)&reaper_queue, THREAD_UNINT);
1c79356b 1304 simple_unlock(&reaper_lock);
9bccf70c
A
1305 (void)spllo();
1306
1307 thread_block(reaper_thread_continue);
1308 /*NOTREACHED*/
1c79356b
A
1309}
1310
9bccf70c
A
1311static void
1312reaper_thread(void)
1c79356b 1313{
9bccf70c
A
1314 reaper_thread_continue();
1315 /*NOTREACHED*/
1316}
1317
1318void
1319thread_reaper_init(void)
1320{
55e303ae 1321 kernel_thread_with_priority(reaper_thread, MINPRI_KERNEL);
1c79356b
A
1322}
1323
1324kern_return_t
1325thread_assign(
1326 thread_act_t thr_act,
1327 processor_set_t new_pset)
1328{
1c79356b
A
1329 return(KERN_FAILURE);
1330}
1331
1332/*
1333 * thread_assign_default:
1334 *
1335 * Special version of thread_assign for assigning threads to default
1336 * processor set.
1337 */
1338kern_return_t
1339thread_assign_default(
1340 thread_act_t thr_act)
1341{
1342 return (thread_assign(thr_act, &default_pset));
1343}
1344
1345/*
1346 * thread_get_assignment
1347 *
1348 * Return current assignment for this thread.
1349 */
1350kern_return_t
1351thread_get_assignment(
1352 thread_act_t thr_act,
1353 processor_set_t *pset)
1354{
1355 thread_t thread;
1356
1357 if (thr_act == THR_ACT_NULL)
1358 return(KERN_INVALID_ARGUMENT);
1359 thread = act_lock_thread(thr_act);
1360 if (thread == THREAD_NULL) {
1361 act_unlock_thread(thr_act);
1362 return(KERN_INVALID_ARGUMENT);
1363 }
1364 *pset = thread->processor_set;
1365 act_unlock_thread(thr_act);
1366 pset_reference(*pset);
1367 return(KERN_SUCCESS);
1368}
1369
1370/*
55e303ae 1371 * thread_wire_internal:
1c79356b
A
1372 *
1373 * Specify that the target thread must always be able
1374 * to run and to allocate memory.
1375 */
1376kern_return_t
55e303ae 1377thread_wire_internal(
1c79356b
A
1378 host_priv_t host_priv,
1379 thread_act_t thr_act,
55e303ae
A
1380 boolean_t wired,
1381 boolean_t *prev_state)
1c79356b
A
1382{
1383 spl_t s;
1384 thread_t thread;
1385 extern void vm_page_free_reserve(int pages);
1386
1387 if (thr_act == THR_ACT_NULL || host_priv == HOST_PRIV_NULL)
1388 return (KERN_INVALID_ARGUMENT);
1389
1390 assert(host_priv == &realhost);
1391
1392 thread = act_lock_thread(thr_act);
1393 if (thread ==THREAD_NULL) {
1394 act_unlock_thread(thr_act);
1395 return(KERN_INVALID_ARGUMENT);
1396 }
1397
1398 /*
1399 * This implementation only works for the current thread.
1c79356b
A
1400 */
1401 if (thr_act != current_act())
1402 return KERN_INVALID_ARGUMENT;
1403
1404 s = splsched();
1405 thread_lock(thread);
1406
55e303ae
A
1407 if (prev_state) {
1408 *prev_state = thread->vm_privilege;
1409 }
1410
1c79356b
A
1411 if (wired) {
1412 if (thread->vm_privilege == FALSE)
1413 vm_page_free_reserve(1); /* XXX */
1414 thread->vm_privilege = TRUE;
1415 } else {
1416 if (thread->vm_privilege == TRUE)
1417 vm_page_free_reserve(-1); /* XXX */
1418 thread->vm_privilege = FALSE;
1419 }
1420
1421 thread_unlock(thread);
1422 splx(s);
1423 act_unlock_thread(thr_act);
1424
1c79356b
A
1425 return KERN_SUCCESS;
1426}
1427
1c79356b
A
1428
1429/*
55e303ae 1430 * thread_wire:
1c79356b 1431 *
55e303ae 1432 * User-api wrapper for thread_wire_internal()
1c79356b 1433 */
55e303ae
A
1434kern_return_t
1435thread_wire(
1436 host_priv_t host_priv,
1437 thread_act_t thr_act,
1438 boolean_t wired)
1c79356b 1439
1c79356b 1440{
55e303ae 1441 return thread_wire_internal(host_priv, thr_act, wired, NULL);
1c79356b
A
1442}
1443
1c79356b
A
1444kern_return_t
1445host_stack_usage(
1446 host_t host,
1447 vm_size_t *reservedp,
1448 unsigned int *totalp,
1449 vm_size_t *spacep,
1450 vm_size_t *residentp,
1451 vm_size_t *maxusagep,
1452 vm_offset_t *maxstackp)
1453{
1454#if !MACH_DEBUG
1455 return KERN_NOT_SUPPORTED;
1456#else
1457 unsigned int total;
1458 vm_size_t maxusage;
1459
1460 if (host == HOST_NULL)
1461 return KERN_INVALID_HOST;
1462
0b4e3aa0 1463 maxusage = 0;
1c79356b
A
1464
1465 stack_statistics(&total, &maxusage);
1466
1467 *reservedp = 0;
1468 *totalp = total;
55e303ae 1469 *spacep = *residentp = total * round_page_32(KERNEL_STACK_SIZE);
1c79356b
A
1470 *maxusagep = maxusage;
1471 *maxstackp = 0;
1472 return KERN_SUCCESS;
1473
1474#endif /* MACH_DEBUG */
1475}
1476
1477/*
1478 * Return info on stack usage for threads in a specific processor set
1479 */
1480kern_return_t
1481processor_set_stack_usage(
1482 processor_set_t pset,
1483 unsigned int *totalp,
1484 vm_size_t *spacep,
1485 vm_size_t *residentp,
1486 vm_size_t *maxusagep,
1487 vm_offset_t *maxstackp)
1488{
1489#if !MACH_DEBUG
1490 return KERN_NOT_SUPPORTED;
1491#else
1492 unsigned int total;
1493 vm_size_t maxusage;
1494 vm_offset_t maxstack;
1495
1496 register thread_t *threads;
1497 register thread_t thread;
1498
1499 unsigned int actual; /* this many things */
1500 unsigned int i;
1501
1502 vm_size_t size, size_needed;
1503 vm_offset_t addr;
1504
9bccf70c
A
1505 spl_t s;
1506
1c79356b
A
1507 if (pset == PROCESSOR_SET_NULL)
1508 return KERN_INVALID_ARGUMENT;
1509
1510 size = 0; addr = 0;
1511
1512 for (;;) {
1513 pset_lock(pset);
1514 if (!pset->active) {
1515 pset_unlock(pset);
1516 return KERN_INVALID_ARGUMENT;
1517 }
1518
1519 actual = pset->thread_count;
1520
1521 /* do we have the memory we need? */
1522
1523 size_needed = actual * sizeof(thread_t);
1524 if (size_needed <= size)
1525 break;
1526
1527 /* unlock the pset and allocate more memory */
1528 pset_unlock(pset);
1529
1530 if (size != 0)
1531 kfree(addr, size);
1532
1533 assert(size_needed > 0);
1534 size = size_needed;
1535
1536 addr = kalloc(size);
1537 if (addr == 0)
1538 return KERN_RESOURCE_SHORTAGE;
1539 }
1540
1541 /* OK, have memory and the processor_set is locked & active */
9bccf70c 1542 s = splsched();
1c79356b
A
1543 threads = (thread_t *) addr;
1544 for (i = 0, thread = (thread_t) queue_first(&pset->threads);
9bccf70c 1545 !queue_end(&pset->threads, (queue_entry_t) thread);
1c79356b 1546 thread = (thread_t) queue_next(&thread->pset_threads)) {
9bccf70c
A
1547 thread_lock(thread);
1548 if (thread->ref_count > 0) {
1549 thread_reference_locked(thread);
1550 threads[i++] = thread;
1551 }
1552 thread_unlock(thread);
1c79356b 1553 }
9bccf70c
A
1554 splx(s);
1555 assert(i <= actual);
1c79356b
A
1556
1557 /* can unlock processor set now that we have the thread refs */
1558 pset_unlock(pset);
1559
1560 /* calculate maxusage and free thread references */
1561
1562 total = 0;
1563 maxusage = 0;
1564 maxstack = 0;
9bccf70c 1565 while (i > 0) {
9bccf70c 1566 thread_t thread = threads[--i];
1c79356b 1567
55e303ae 1568 if (thread->kernel_stack != 0)
1c79356b 1569 total++;
1c79356b
A
1570
1571 thread_deallocate(thread);
1572 }
1573
1574 if (size != 0)
1575 kfree(addr, size);
1576
1577 *totalp = total;
55e303ae 1578 *residentp = *spacep = total * round_page_32(KERNEL_STACK_SIZE);
1c79356b
A
1579 *maxusagep = maxusage;
1580 *maxstackp = maxstack;
1581 return KERN_SUCCESS;
1582
1583#endif /* MACH_DEBUG */
1584}
1585
9bccf70c 1586int split_funnel_off = 0;
1c79356b
A
1587funnel_t *
1588funnel_alloc(
1589 int type)
1590{
1591 mutex_t *m;
1592 funnel_t * fnl;
1593 if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){
0b4e3aa0 1594 bzero((void *)fnl, sizeof(funnel_t));
1c79356b 1595 if ((m = mutex_alloc(0)) == (mutex_t *)NULL) {
0b4e3aa0 1596 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1c79356b
A
1597 return(THR_FUNNEL_NULL);
1598 }
1599 fnl->fnl_mutex = m;
1600 fnl->fnl_type = type;
1601 }
1602 return(fnl);
1603}
1604
1605void
1606funnel_free(
1607 funnel_t * fnl)
1608{
1609 mutex_free(fnl->fnl_mutex);
1610 if (fnl->fnl_oldmutex)
1611 mutex_free(fnl->fnl_oldmutex);
0b4e3aa0 1612 kfree((vm_offset_t)fnl, sizeof(funnel_t));
1c79356b
A
1613}
1614
1615void
1616funnel_lock(
1617 funnel_t * fnl)
1618{
1619 mutex_t * m;
1620
1621 m = fnl->fnl_mutex;
1622restart:
1623 mutex_lock(m);
1624 fnl->fnl_mtxholder = current_thread();
1625 if (split_funnel_off && (m != fnl->fnl_mutex)) {
1626 mutex_unlock(m);
1627 m = fnl->fnl_mutex;
1628 goto restart;
1629 }
1630}
1631
1632void
1633funnel_unlock(
1634 funnel_t * fnl)
1635{
1636 mutex_unlock(fnl->fnl_mutex);
1637 fnl->fnl_mtxrelease = current_thread();
1638}
1639
55e303ae
A
1640int refunnel_hint_enabled = 0;
1641
1642boolean_t
1643refunnel_hint(
1644 thread_t thread,
1645 wait_result_t wresult)
1646{
1647 if ( !(thread->funnel_state & TH_FN_REFUNNEL) ||
1648 wresult != THREAD_AWAKENED )
1649 return (FALSE);
1650
1651 if (!refunnel_hint_enabled)
1652 return (FALSE);
1653
1654 return (mutex_preblock(thread->funnel_lock->fnl_mutex, thread));
1655}
1656
1c79356b
A
1657funnel_t *
1658thread_funnel_get(
1659 void)
1660{
1661 thread_t th = current_thread();
1662
1663 if (th->funnel_state & TH_FN_OWNED) {
1664 return(th->funnel_lock);
1665 }
1666 return(THR_FUNNEL_NULL);
1667}
1668
1669boolean_t
1670thread_funnel_set(
1671 funnel_t * fnl,
1672 boolean_t funneled)
1673{
1674 thread_t cur_thread;
1675 boolean_t funnel_state_prev;
1676 boolean_t intr;
1677
1678 cur_thread = current_thread();
1679 funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED);
1680
1681 if (funnel_state_prev != funneled) {
1682 intr = ml_set_interrupts_enabled(FALSE);
1683
1684 if (funneled == TRUE) {
1685 if (cur_thread->funnel_lock)
1686 panic("Funnel lock called when holding one %x", cur_thread->funnel_lock);
1687 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1688 fnl, 1, 0, 0, 0);
1689 funnel_lock(fnl);
1690 KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE,
1691 fnl, 1, 0, 0, 0);
1692 cur_thread->funnel_state |= TH_FN_OWNED;
1693 cur_thread->funnel_lock = fnl;
1694 } else {
1695 if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex)
1696 panic("Funnel unlock when not holding funnel");
1697 cur_thread->funnel_state &= ~TH_FN_OWNED;
1698 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE,
1699 fnl, 1, 0, 0, 0);
1700
1701 cur_thread->funnel_lock = THR_FUNNEL_NULL;
1702 funnel_unlock(fnl);
1703 }
1704 (void)ml_set_interrupts_enabled(intr);
1705 } else {
1706 /* if we are trying to acquire funnel recursively
1707 * check for funnel to be held already
1708 */
1709 if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) {
1710 panic("thread_funnel_set: already holding a different funnel");
1711 }
1712 }
1713 return(funnel_state_prev);
1714}
1715
1716boolean_t
1717thread_funnel_merge(
1718 funnel_t * fnl,
1719 funnel_t * otherfnl)
1720{
1721 mutex_t * m;
1722 mutex_t * otherm;
1723 funnel_t * gfnl;
1724 extern int disable_funnel;
1725
1726 if ((gfnl = thread_funnel_get()) == THR_FUNNEL_NULL)
1727 panic("thread_funnel_merge called with no funnels held");
1728
1729 if (gfnl->fnl_type != 1)
1730 panic("thread_funnel_merge called from non kernel funnel");
1731
1732 if (gfnl != fnl)
1733 panic("thread_funnel_merge incorrect invocation");
1734
1735 if (disable_funnel || split_funnel_off)
1736 return (KERN_FAILURE);
1737
1738 m = fnl->fnl_mutex;
1739 otherm = otherfnl->fnl_mutex;
1740
1741 /* Acquire other funnel mutex */
1742 mutex_lock(otherm);
1743 split_funnel_off = 1;
1744 disable_funnel = 1;
1745 otherfnl->fnl_mutex = m;
1746 otherfnl->fnl_type = fnl->fnl_type;
1747 otherfnl->fnl_oldmutex = otherm; /* save this for future use */
1748
1749 mutex_unlock(otherm);
1750 return(KERN_SUCCESS);
1751}
1752
1753void
0b4e3aa0
A
1754thread_set_cont_arg(
1755 int arg)
1c79356b 1756{
0b4e3aa0
A
1757 thread_t self = current_thread();
1758
1759 self->saved.misc = arg;
1c79356b
A
1760}
1761
1762int
1763thread_get_cont_arg(void)
1764{
0b4e3aa0
A
1765 thread_t self = current_thread();
1766
1767 return (self->saved.misc);
1c79356b
A
1768}
1769
1770/*
1771 * Export routines to other components for things that are done as macros
1772 * within the osfmk component.
1773 */
1774#undef thread_should_halt
1775boolean_t
1776thread_should_halt(
55e303ae 1777 thread_t th)
1c79356b
A
1778{
1779 return(thread_should_halt_fast(th));
1780}
55e303ae
A
1781
1782vm_offset_t min_valid_stack_address(void)
1783{
1784 return vm_map_min(stack_map);
1785}
1786
1787vm_offset_t max_valid_stack_address(void)
1788{
1789 return vm_map_max(stack_map);
1790}