]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_act.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / kern / thread_act.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_FREE_COPYRIGHT@
27 */
28/*
29 * Copyright (c) 1993 The University of Utah and
30 * the Center for Software Science (CSS). All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation.
37 *
38 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
39 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
40 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 *
42 * CSS requests users of this software to return to css-dist@cs.utah.edu any
43 * improvements that they make and grant CSS redistribution rights.
44 *
45 * Author: Bryan Ford, University of Utah CSS
46 *
47 * Thread_Activation management routines
48 */
49
50#include <cpus.h>
51#include <task_swapper.h>
52#include <mach/kern_return.h>
53#include <mach/alert.h>
54#include <kern/etap_macros.h>
55#include <kern/mach_param.h>
56#include <kern/zalloc.h>
57#include <kern/thread.h>
58#include <kern/thread_swap.h>
59#include <kern/task.h>
60#include <kern/task_swap.h>
61#include <kern/thread_act.h>
1c79356b
A
62#include <kern/sched_prim.h>
63#include <kern/misc_protos.h>
64#include <kern/assert.h>
65#include <kern/exception.h>
66#include <kern/ipc_mig.h>
67#include <kern/ipc_tt.h>
68#include <kern/profile.h>
69#include <kern/machine.h>
70#include <kern/spl.h>
71#include <kern/syscall_subr.h>
72#include <kern/sync_lock.h>
1c79356b 73#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
0b4e3aa0 74#include <kern/processor.h>
1c79356b
A
75#include <mach_prof.h>
76#include <mach/rpc.h>
77
78/*
79 * Debugging printf control
80 */
81#if MACH_ASSERT
82unsigned int watchacts = 0 /* WA_ALL */
83 ; /* Do-it-yourself & patchable */
84#endif
85
86/*
87 * Track the number of times we need to swapin a thread to deallocate it.
88 */
89int act_free_swapin = 0;
9bccf70c 90boolean_t first_act;
1c79356b
A
91
92/*
93 * Forward declarations for functions local to this file.
94 */
9bccf70c 95kern_return_t act_abort( thread_act_t, boolean_t);
1c79356b 96void special_handler(ReturnHandler *, thread_act_t);
1c79356b
A
97kern_return_t act_set_state_locked(thread_act_t, int,
98 thread_state_t,
99 mach_msg_type_number_t);
100kern_return_t act_get_state_locked(thread_act_t, int,
101 thread_state_t,
102 mach_msg_type_number_t *);
9bccf70c 103void act_set_astbsd(thread_act_t);
1c79356b 104void act_set_apc(thread_act_t);
1c79356b
A
105void act_user_to_kernel(thread_act_t);
106void act_ulock_release_all(thread_act_t thr_act);
107
108void install_special_handler_locked(thread_act_t);
109
9bccf70c
A
110static void act_disable(thread_act_t);
111
112struct thread_activation pageout_act;
113
1c79356b
A
114static zone_t thr_act_zone;
115
116/*
117 * Thread interfaces accessed via a thread_activation:
118 */
119
120
121/*
122 * Internal routine to terminate a thread.
9bccf70c 123 * Sometimes called with task already locked.
1c79356b
A
124 */
125kern_return_t
126thread_terminate_internal(
9bccf70c 127 register thread_act_t act)
1c79356b 128{
9bccf70c
A
129 kern_return_t result;
130 thread_t thread;
1c79356b 131
9bccf70c 132 thread = act_lock_thread(act);
1c79356b 133
9bccf70c
A
134 if (!act->active) {
135 act_unlock_thread(act);
136 return (KERN_TERMINATED);
1c79356b
A
137 }
138
9bccf70c
A
139 act_disable(act);
140 result = act_abort(act, FALSE);
e7c99d92 141
1c79356b
A
142 /*
143 * Make sure this thread enters the kernel
9bccf70c
A
144 * Must unlock the act, but leave the shuttle
145 * captured in this act.
1c79356b
A
146 */
147 if (thread != current_thread()) {
9bccf70c 148 act_unlock(act);
1c79356b 149
9bccf70c 150 if (thread_stop(thread))
e7c99d92
A
151 thread_unstop(thread);
152 else
9bccf70c 153 result = KERN_ABORTED;
1c79356b 154
9bccf70c 155 act_lock(act);
1c79356b 156 }
1c79356b 157
9bccf70c
A
158 clear_wait(thread, act->inited? THREAD_INTERRUPTED: THREAD_AWAKENED);
159 act_unlock_thread(act);
160
161 return (result);
1c79356b
A
162}
163
164/*
9bccf70c 165 * Terminate a thread.
1c79356b
A
166 */
167kern_return_t
168thread_terminate(
9bccf70c 169 register thread_act_t act)
1c79356b 170{
9bccf70c 171 kern_return_t result;
1c79356b 172
9bccf70c
A
173 if (act == THR_ACT_NULL)
174 return (KERN_INVALID_ARGUMENT);
1c79356b 175
9bccf70c
A
176 if ( (act->task == kernel_task ||
177 act->kernel_loaded ) &&
178 act != current_act() )
179 return (KERN_FAILURE);
1c79356b 180
9bccf70c 181 result = thread_terminate_internal(act);
1c79356b
A
182
183 /*
184 * If a kernel thread is terminating itself, force an AST here.
185 * Kernel threads don't normally pass through the AST checking
186 * code - and all threads finish their own termination in the
187 * special handler APC.
188 */
9bccf70c
A
189 if ( act->task == kernel_task ||
190 act->kernel_loaded ) {
191 assert(act == current_act());
0b4e3aa0 192 ast_taken(AST_APC, FALSE);
9bccf70c
A
193 panic("thread_terminate");
194 }
1c79356b 195
9bccf70c 196 return (result);
1c79356b
A
197}
198
199/*
9bccf70c
A
200 * Suspend execution of the specified thread.
201 * This is a recursive-style suspension of the thread, a count of
202 * suspends is maintained.
1c79356b 203 *
9bccf70c 204 * Called with act_lock held.
1c79356b
A
205 */
206void
207thread_hold(
9bccf70c 208 register thread_act_t act)
1c79356b 209{
9bccf70c
A
210 thread_t thread = act->thread;
211
212 if (act->suspend_count++ == 0) {
213 install_special_handler(act);
214 if ( act->inited &&
215 thread != THREAD_NULL &&
216 thread->top_act == act )
217 thread_wakeup_one(&act->suspend_count);
1c79356b
A
218 }
219}
220
221/*
222 * Decrement internal suspension count for thr_act, setting thread
223 * runnable when count falls to zero.
224 *
9bccf70c 225 * Called with act_lock held.
1c79356b
A
226 */
227void
228thread_release(
9bccf70c 229 register thread_act_t act)
1c79356b 230{
9bccf70c
A
231 thread_t thread = act->thread;
232
233 if ( act->suspend_count > 0 &&
234 --act->suspend_count == 0 &&
235 thread != THREAD_NULL &&
236 thread->top_act == act ) {
237 if (!act->inited) {
238 clear_wait(thread, THREAD_AWAKENED);
239 act->inited = TRUE;
240 }
241 else
242 thread_wakeup_one(&act->suspend_count);
243 }
1c79356b
A
244}
245
246kern_return_t
247thread_suspend(
9bccf70c 248 register thread_act_t act)
1c79356b 249{
9bccf70c 250 thread_t thread;
1c79356b 251
9bccf70c
A
252 if (act == THR_ACT_NULL)
253 return (KERN_INVALID_ARGUMENT);
254
255 thread = act_lock_thread(act);
256
257 if (!act->active) {
258 act_unlock_thread(act);
259 return (KERN_TERMINATED);
1c79356b 260 }
9bccf70c
A
261
262 if ( act->user_stop_count++ == 0 &&
263 act->suspend_count++ == 0 ) {
264 install_special_handler(act);
265 if ( thread != current_thread() &&
266 thread != THREAD_NULL &&
267 thread->top_act == act ) {
268 assert(act->inited);
269 thread_wakeup_one(&act->suspend_count);
270 act_unlock_thread(act);
271
272 thread_wait(thread);
1c79356b 273 }
9bccf70c
A
274 else
275 act_unlock_thread(act);
1c79356b 276 }
9bccf70c
A
277 else
278 act_unlock_thread(act);
279
280 return (KERN_SUCCESS);
1c79356b
A
281}
282
283kern_return_t
284thread_resume(
9bccf70c 285 register thread_act_t act)
1c79356b 286{
9bccf70c 287 kern_return_t result = KERN_SUCCESS;
1c79356b
A
288 thread_t thread;
289
9bccf70c
A
290 if (act == THR_ACT_NULL)
291 return (KERN_INVALID_ARGUMENT);
1c79356b 292
9bccf70c
A
293 thread = act_lock_thread(act);
294
295 if (act->active) {
296 if (act->user_stop_count > 0) {
297 if ( --act->user_stop_count == 0 &&
298 --act->suspend_count == 0 &&
299 thread != THREAD_NULL &&
300 thread->top_act == act ) {
301 if (!act->inited) {
302 clear_wait(thread, THREAD_AWAKENED);
303 act->inited = TRUE;
304 }
305 else
306 thread_wakeup_one(&act->suspend_count);
1c79356b
A
307 }
308 }
309 else
9bccf70c 310 result = KERN_FAILURE;
1c79356b
A
311 }
312 else
9bccf70c
A
313 result = KERN_TERMINATED;
314
315 act_unlock_thread(act);
316
317 return (result);
1c79356b
A
318}
319
320/*
321 * This routine walks toward the head of an RPC chain starting at
322 * a specified thread activation. An alert bit is set and a special
323 * handler is installed for each thread it encounters.
324 *
325 * The target thread act and thread shuttle are already locked.
326 */
327kern_return_t
328post_alert(
9bccf70c
A
329 register thread_act_t act,
330 unsigned alert_bits)
1c79356b 331{
9bccf70c 332 panic("post_alert");
1c79356b
A
333}
334
335/*
336 * thread_depress_abort:
337 *
338 * Prematurely abort priority depression if there is one.
339 */
340kern_return_t
341thread_depress_abort(
342 register thread_act_t thr_act)
343{
344 register thread_t thread;
345 kern_return_t result;
1c79356b
A
346
347 if (thr_act == THR_ACT_NULL)
348 return (KERN_INVALID_ARGUMENT);
349
350 thread = act_lock_thread(thr_act);
351 /* if activation is terminating, this operation is not meaningful */
352 if (!thr_act->active) {
353 act_unlock_thread(thr_act);
354
355 return (KERN_TERMINATED);
356 }
357
0b4e3aa0 358 result = _mk_sp_thread_depress_abort(thread, FALSE);
1c79356b
A
359
360 act_unlock_thread(thr_act);
361
362 return (result);
363}
364
365
366/*
9bccf70c
A
367 * Indicate that the activation should run its
368 * special handler to detect the condition.
369 *
370 * Called with act_lock held.
1c79356b
A
371 */
372kern_return_t
9bccf70c
A
373act_abort(
374 thread_act_t act,
375 boolean_t chain_break )
1c79356b 376{
9bccf70c
A
377 thread_t thread = act->thread;
378 spl_t s = splsched();
1c79356b 379
9bccf70c 380 assert(thread->top_act == act);
1c79356b 381
9bccf70c
A
382 thread_lock(thread);
383 if (!(thread->state & TH_ABORT)) {
384 thread->state |= TH_ABORT;
385 install_special_handler_locked(act);
386 } else {
387 thread->state &= ~TH_ABORT_SAFELY;
1c79356b 388 }
9bccf70c
A
389 thread_unlock(thread);
390 splx(s);
1c79356b 391
9bccf70c 392 return (KERN_SUCCESS);
1c79356b
A
393}
394
395kern_return_t
396thread_abort(
9bccf70c 397 register thread_act_t act)
1c79356b 398{
9bccf70c 399 kern_return_t result;
1c79356b
A
400 thread_t thread;
401
9bccf70c 402 if (act == THR_ACT_NULL)
1c79356b 403 return (KERN_INVALID_ARGUMENT);
9bccf70c
A
404
405 thread = act_lock_thread(act);
406
407 if (!act->active) {
408 act_unlock_thread(act);
409 return (KERN_TERMINATED);
1c79356b
A
410 }
411
9bccf70c
A
412 result = act_abort(act, FALSE);
413 clear_wait(thread, THREAD_INTERRUPTED);
414 act_unlock_thread(act);
415
416 return (result);
1c79356b
A
417}
418
419kern_return_t
420thread_abort_safely(
9bccf70c 421 thread_act_t act)
1c79356b
A
422{
423 thread_t thread;
9bccf70c 424 kern_return_t ret;
1c79356b
A
425 spl_t s;
426
9bccf70c
A
427 if ( act == THR_ACT_NULL )
428 return (KERN_INVALID_ARGUMENT);
1c79356b 429
9bccf70c
A
430 thread = act_lock_thread(act);
431
432 if (!act->active) {
433 act_unlock_thread(act);
434 return (KERN_TERMINATED);
1c79356b 435 }
9bccf70c 436
1c79356b
A
437 s = splsched();
438 thread_lock(thread);
9bccf70c
A
439 if (!thread->at_safe_point ||
440 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
441 if (!(thread->state & TH_ABORT)) {
442 thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
443 install_special_handler_locked(act);
444 }
1c79356b 445 }
1c79356b 446 thread_unlock(thread);
1c79356b 447 splx(s);
9bccf70c
A
448
449 act_unlock_thread(act);
450
451 return (KERN_SUCCESS);
1c79356b
A
452}
453
454/*** backward compatibility hacks ***/
455#include <mach/thread_info.h>
456#include <mach/thread_special_ports.h>
457#include <ipc/ipc_port.h>
458#include <mach/thread_act_server.h>
459
460kern_return_t
461thread_info(
462 thread_act_t thr_act,
463 thread_flavor_t flavor,
464 thread_info_t thread_info_out,
465 mach_msg_type_number_t *thread_info_count)
466{
467 register thread_t thread;
468 kern_return_t result;
469
470 if (thr_act == THR_ACT_NULL)
471 return (KERN_INVALID_ARGUMENT);
472
473 thread = act_lock_thread(thr_act);
474 if (!thr_act->active) {
475 act_unlock_thread(thr_act);
476
477 return (KERN_TERMINATED);
478 }
479
480 result = thread_info_shuttle(thr_act, flavor,
481 thread_info_out, thread_info_count);
482
483 act_unlock_thread(thr_act);
484
485 return (result);
486}
487
488/*
489 * Routine: thread_get_special_port [kernel call]
490 * Purpose:
491 * Clones a send right for one of the thread's
492 * special ports.
493 * Conditions:
494 * Nothing locked.
495 * Returns:
496 * KERN_SUCCESS Extracted a send right.
497 * KERN_INVALID_ARGUMENT The thread is null.
498 * KERN_FAILURE The thread is dead.
499 * KERN_INVALID_ARGUMENT Invalid special port.
500 */
501
502kern_return_t
503thread_get_special_port(
504 thread_act_t thr_act,
505 int which,
506 ipc_port_t *portp)
507{
508 ipc_port_t *whichp;
509 ipc_port_t port;
510 thread_t thread;
511
512#if MACH_ASSERT
513 if (watchacts & WA_PORT)
514 printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
515 thr_act, which, portp, (portp ? *portp : 0));
516#endif /* MACH_ASSERT */
517
518 if (!thr_act)
519 return KERN_INVALID_ARGUMENT;
520 thread = act_lock_thread(thr_act);
521 switch (which) {
522 case THREAD_KERNEL_PORT:
523 whichp = &thr_act->ith_sself;
524 break;
525
526 default:
527 act_unlock_thread(thr_act);
528 return KERN_INVALID_ARGUMENT;
529 }
530
531 if (!thr_act->active) {
532 act_unlock_thread(thr_act);
533 return KERN_FAILURE;
534 }
535
536 port = ipc_port_copy_send(*whichp);
537 act_unlock_thread(thr_act);
538
539 *portp = port;
540 return KERN_SUCCESS;
541}
542
543/*
544 * Routine: thread_set_special_port [kernel call]
545 * Purpose:
546 * Changes one of the thread's special ports,
547 * setting it to the supplied send right.
548 * Conditions:
549 * Nothing locked. If successful, consumes
550 * the supplied send right.
551 * Returns:
552 * KERN_SUCCESS Changed the special port.
553 * KERN_INVALID_ARGUMENT The thread is null.
554 * KERN_FAILURE The thread is dead.
555 * KERN_INVALID_ARGUMENT Invalid special port.
556 */
557
558kern_return_t
559thread_set_special_port(
560 thread_act_t thr_act,
561 int which,
562 ipc_port_t port)
563{
564 ipc_port_t *whichp;
565 ipc_port_t old;
566 thread_t thread;
567
568#if MACH_ASSERT
569 if (watchacts & WA_PORT)
570 printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
571 thr_act, which, port);
572#endif /* MACH_ASSERT */
573
574 if (thr_act == 0)
575 return KERN_INVALID_ARGUMENT;
576
577 thread = act_lock_thread(thr_act);
578 switch (which) {
579 case THREAD_KERNEL_PORT:
580 whichp = &thr_act->ith_self;
581 break;
582
583 default:
584 act_unlock_thread(thr_act);
585 return KERN_INVALID_ARGUMENT;
586 }
587
588 if (!thr_act->active) {
589 act_unlock_thread(thr_act);
590 return KERN_FAILURE;
591 }
592
593 old = *whichp;
594 *whichp = port;
595 act_unlock_thread(thr_act);
596
597 if (IP_VALID(old))
598 ipc_port_release_send(old);
599 return KERN_SUCCESS;
600}
601
602/*
603 * thread state should always be accessible by locking the thread
604 * and copying it. The activation messes things up so for right
605 * now if it's not the top of the chain, use a special handler to
606 * get the information when the shuttle returns to the activation.
607 */
608kern_return_t
609thread_get_state(
9bccf70c
A
610 register thread_act_t act,
611 int flavor,
612 thread_state_t state, /* pointer to OUT array */
1c79356b
A
613 mach_msg_type_number_t *state_count) /*IN/OUT*/
614{
9bccf70c
A
615 kern_return_t result = KERN_SUCCESS;
616 thread_t thread;
1c79356b 617
9bccf70c 618 if (act == THR_ACT_NULL || act == current_act())
1c79356b
A
619 return (KERN_INVALID_ARGUMENT);
620
9bccf70c
A
621 thread = act_lock_thread(act);
622
623 if (!act->active) {
624 act_unlock_thread(act);
625 return (KERN_TERMINATED);
1c79356b
A
626 }
627
9bccf70c
A
628 thread_hold(act);
629
630 for (;;) {
631 thread_t thread1;
632
633 if ( thread == THREAD_NULL ||
634 thread->top_act != act )
1c79356b 635 break;
9bccf70c
A
636 act_unlock_thread(act);
637
638 if (!thread_stop(thread)) {
639 result = KERN_ABORTED;
640 (void)act_lock_thread(act);
641 thread = THREAD_NULL;
642 break;
643 }
644
645 thread1 = act_lock_thread(act);
646 if (thread1 == thread)
1c79356b 647 break;
9bccf70c 648
1c79356b 649 thread_unstop(thread);
9bccf70c 650 thread = thread1;
1c79356b 651 }
1c79356b 652
9bccf70c
A
653 if (result == KERN_SUCCESS)
654 result = act_machine_get_state(act, flavor, state, state_count);
655
656 if ( thread != THREAD_NULL &&
657 thread->top_act == act )
658 thread_unstop(thread);
659
660 thread_release(act);
661 act_unlock_thread(act);
662
663 return (result);
1c79356b
A
664}
665
666/*
667 * Change thread's machine-dependent state. Called with nothing
668 * locked. Returns same way.
669 */
670kern_return_t
671thread_set_state(
9bccf70c
A
672 register thread_act_t act,
673 int flavor,
674 thread_state_t state,
1c79356b
A
675 mach_msg_type_number_t state_count)
676{
9bccf70c
A
677 kern_return_t result = KERN_SUCCESS;
678 thread_t thread;
1c79356b 679
9bccf70c 680 if (act == THR_ACT_NULL || act == current_act())
1c79356b 681 return (KERN_INVALID_ARGUMENT);
1c79356b 682
9bccf70c
A
683 thread = act_lock_thread(act);
684
685 if (!act->active) {
686 act_unlock_thread(act);
687 return (KERN_TERMINATED);
1c79356b
A
688 }
689
9bccf70c
A
690 thread_hold(act);
691
692 for (;;) {
693 thread_t thread1;
694
695 if ( thread == THREAD_NULL ||
696 thread->top_act != act )
1c79356b 697 break;
9bccf70c
A
698 act_unlock_thread(act);
699
700 if (!thread_stop(thread)) {
701 result = KERN_ABORTED;
702 (void)act_lock_thread(act);
703 thread = THREAD_NULL;
1c79356b 704 break;
9bccf70c
A
705 }
706
707 thread1 = act_lock_thread(act);
708 if (thread1 == thread)
709 break;
710
1c79356b 711 thread_unstop(thread);
9bccf70c 712 thread = thread1;
1c79356b 713 }
9bccf70c
A
714
715 if (result == KERN_SUCCESS)
716 result = act_machine_set_state(act, flavor, state, state_count);
717
718 if ( thread != THREAD_NULL &&
719 thread->top_act == act )
1c79356b 720 thread_unstop(thread);
1c79356b 721
9bccf70c
A
722 thread_release(act);
723 act_unlock_thread(act);
724
725 return (result);
1c79356b
A
726}
727
728/*
729 * Kernel-internal "thread" interfaces used outside this file:
730 */
731
732kern_return_t
733thread_dup(
9bccf70c 734 register thread_act_t target)
1c79356b 735{
9bccf70c
A
736 kern_return_t result = KERN_SUCCESS;
737 thread_act_t self = current_act();
738 thread_t thread;
1c79356b 739
9bccf70c 740 if (target == THR_ACT_NULL || target == self)
1c79356b
A
741 return (KERN_INVALID_ARGUMENT);
742
9bccf70c
A
743 thread = act_lock_thread(target);
744
745 if (!target->active) {
746 act_unlock_thread(target);
747 return (KERN_TERMINATED);
1c79356b
A
748 }
749
9bccf70c
A
750 thread_hold(target);
751
752 for (;;) {
753 thread_t thread1;
754
755 if ( thread == THREAD_NULL ||
756 thread->top_act != target )
757 break;
758 act_unlock_thread(target);
759
760 if (!thread_stop(thread)) {
761 result = KERN_ABORTED;
762 (void)act_lock_thread(target);
763 thread = THREAD_NULL;
1c79356b 764 break;
9bccf70c
A
765 }
766
767 thread1 = act_lock_thread(target);
768 if (thread1 == thread)
1c79356b 769 break;
9bccf70c 770
1c79356b 771 thread_unstop(thread);
9bccf70c 772 thread = thread1;
1c79356b 773 }
9bccf70c
A
774
775 if (result == KERN_SUCCESS)
776 result = act_thread_dup(self, target);
777
778 if ( thread != THREAD_NULL &&
779 thread->top_act == target )
1c79356b 780 thread_unstop(thread);
1c79356b 781
9bccf70c
A
782 thread_release(target);
783 act_unlock_thread(target);
784
785 return (result);
1c79356b
A
786}
787
788
789/*
790 * thread_setstatus:
791 *
792 * Set the status of the specified thread.
793 * Called with (and returns with) no locks held.
794 */
795kern_return_t
796thread_setstatus(
9bccf70c
A
797 register thread_act_t act,
798 int flavor,
799 thread_state_t tstate,
1c79356b
A
800 mach_msg_type_number_t count)
801{
9bccf70c
A
802 kern_return_t result = KERN_SUCCESS;
803 thread_t thread;
1c79356b 804
9bccf70c
A
805 thread = act_lock_thread(act);
806
807 if ( act != current_act() &&
808 (act->suspend_count == 0 ||
809 thread == THREAD_NULL ||
810 (thread->state & TH_RUN) ||
811 thread->top_act != act) )
812 result = KERN_FAILURE;
813
814 if (result == KERN_SUCCESS)
815 result = act_machine_set_state(act, flavor, tstate, count);
816
817 act_unlock_thread(act);
818
819 return (result);
1c79356b
A
820}
821
822/*
823 * thread_getstatus:
824 *
825 * Get the status of the specified thread.
826 */
827kern_return_t
828thread_getstatus(
9bccf70c
A
829 register thread_act_t act,
830 int flavor,
831 thread_state_t tstate,
1c79356b
A
832 mach_msg_type_number_t *count)
833{
9bccf70c
A
834 kern_return_t result = KERN_SUCCESS;
835 thread_t thread;
1c79356b 836
9bccf70c
A
837 thread = act_lock_thread(act);
838
839 if ( act != current_act() &&
840 (act->suspend_count == 0 ||
841 thread == THREAD_NULL ||
842 (thread->state & TH_RUN) ||
843 thread->top_act != act) )
844 result = KERN_FAILURE;
845
846 if (result == KERN_SUCCESS)
847 result = act_machine_get_state(act, flavor, tstate, count);
848
849 act_unlock_thread(act);
850
851 return (result);
1c79356b
A
852}
853
854/*
855 * Kernel-internal thread_activation interfaces used outside this file:
856 */
857
858/*
859 * act_init() - Initialize activation handling code
860 */
861void
862act_init()
863{
864 thr_act_zone = zinit(
865 sizeof(struct thread_activation),
866 ACT_MAX * sizeof(struct thread_activation), /* XXX */
867 ACT_CHUNK * sizeof(struct thread_activation),
868 "activations");
9bccf70c 869 first_act = TRUE;
1c79356b
A
870 act_machine_init();
871}
872
873
874/*
875 * act_create - Create a new activation in a specific task.
876 */
877kern_return_t
878act_create(task_t task,
879 thread_act_t *new_act)
880{
881 thread_act_t thr_act;
882 int rc;
883 vm_map_t map;
884
9bccf70c
A
885 if (first_act) {
886 thr_act = &pageout_act;
887 first_act = FALSE;
888 } else
889 thr_act = (thread_act_t)zalloc(thr_act_zone);
1c79356b
A
890 if (thr_act == 0)
891 return(KERN_RESOURCE_SHORTAGE);
892
893#if MACH_ASSERT
894 if (watchacts & WA_ACT_LNK)
895 printf("act_create(task=%x,thr_act@%x=%x)\n",
896 task, new_act, thr_act);
897#endif /* MACH_ASSERT */
898
899 /* Start by zeroing everything; then init non-zero items only */
900 bzero((char *)thr_act, sizeof(*thr_act));
901
9bccf70c
A
902 if (thr_act == &pageout_act)
903 thr_act->thread = &pageout_thread;
904
1c79356b
A
905#ifdef MACH_BSD
906 {
907 /*
908 * Take care of the uthread allocation
909 * do it early in order to make KERN_RESOURCE_SHORTAGE
910 * handling trivial
911 * uthread_alloc() will bzero the storage allocated.
912 */
9bccf70c
A
913 extern void *uthread_alloc(task_t, thread_act_t);
914
915 thr_act->uthread = uthread_alloc(task, thr_act);
1c79356b
A
916 if(thr_act->uthread == 0) {
917 /* Put the thr_act back on the thr_act zone */
918 zfree(thr_act_zone, (vm_offset_t)thr_act);
919 return(KERN_RESOURCE_SHORTAGE);
920 }
921 }
922#endif /* MACH_BSD */
923
924 /*
925 * Start with one reference for the caller and one for the
926 * act being alive.
927 */
928 act_lock_init(thr_act);
929 thr_act->ref_count = 2;
930
931 /* Latch onto the task. */
932 thr_act->task = task;
933 task_reference(task);
934
1c79356b
A
935 /* special_handler will always be last on the returnhandlers list. */
936 thr_act->special_handler.next = 0;
937 thr_act->special_handler.handler = special_handler;
938
939#if MACH_PROF
940 thr_act->act_profiled = FALSE;
941 thr_act->act_profiled_own = FALSE;
942 thr_act->profil_buffer = NULLPROFDATA;
943#endif
944
945 /* Initialize the held_ulocks queue as empty */
946 queue_init(&thr_act->held_ulocks);
947
948 /* Inherit the profiling status of the parent task */
949 act_prof_init(thr_act, task);
950
951 ipc_thr_act_init(task, thr_act);
952 act_machine_create(task, thr_act);
953
954 /*
955 * If thr_act created in kernel-loaded task, alter its saved
956 * state to so indicate
957 */
958 if (task->kernel_loaded) {
959 act_user_to_kernel(thr_act);
960 }
961
962 /* Cache the task's map and take a reference to it */
963 map = task->map;
964 thr_act->map = map;
965
966 /* Inline vm_map_reference cause we don't want to increment res_count */
967 mutex_lock(&map->s_lock);
1c79356b
A
968 map->ref_count++;
969 mutex_unlock(&map->s_lock);
970
971 *new_act = thr_act;
972 return KERN_SUCCESS;
973}
974
975/*
976 * act_free - called when an thr_act's ref_count drops to zero.
977 *
978 * This can only happen after the activation has been reaped, and
979 * all other references to it have gone away. We can now release
980 * the last critical resources, unlink the activation from the
981 * task, and release the reference on the thread shuttle itself.
982 *
983 * Called with activation locked.
984 */
985#if MACH_ASSERT
986int dangerous_bzero = 1; /* paranoia & safety */
987#endif
988
989void
990act_free(thread_act_t thr_act)
991{
992 task_t task;
993 thread_t thr;
994 vm_map_t map;
995 unsigned int ref;
9bccf70c 996 void * task_proc;
1c79356b
A
997
998#if MACH_ASSERT
999 if (watchacts & WA_EXIT)
9bccf70c 1000 printf("act_free(%x(%d)) thr=%x tsk=%x(%d) %sactive\n",
1c79356b
A
1001 thr_act, thr_act->ref_count, thr_act->thread,
1002 thr_act->task,
1003 thr_act->task ? thr_act->task->ref_count : 0,
1c79356b
A
1004 thr_act->active ? " " : " !");
1005#endif /* MACH_ASSERT */
1006
1c79356b 1007 assert(!thr_act->active);
1c79356b
A
1008
1009 task = thr_act->task;
1010 task_lock(task);
1011
9bccf70c 1012 task_proc = task->bsd_info;
1c79356b
A
1013 if (thr = thr_act->thread) {
1014 time_value_t user_time, system_time;
1015
1016 thread_read_times(thr, &user_time, &system_time);
1017 time_value_add(&task->total_user_time, &user_time);
1018 time_value_add(&task->total_system_time, &system_time);
1019
1020 /* Unlink the thr_act from the task's thr_act list,
1021 * so it doesn't appear in calls to task_threads and such.
1022 * The thr_act still keeps its ref on the task, however.
1023 */
1024 queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts);
1025 thr_act->thr_acts.next = NULL;
1026 task->thr_act_count--;
1c79356b
A
1027 task->res_act_count--;
1028 task_unlock(task);
1029 task_deallocate(task);
1030 thread_deallocate(thr);
1031 act_machine_destroy(thr_act);
1032 } else {
1033 /*
1034 * Must have never really gotten started
1035 * no unlinking from the task and no need
1036 * to free the shuttle.
1037 */
1038 task_unlock(task);
1039 task_deallocate(task);
1040 }
1041
1c79356b
A
1042 act_prof_deallocate(thr_act);
1043 ipc_thr_act_terminate(thr_act);
1044
1045 /*
1046 * Drop the cached map reference.
1047 * Inline version of vm_map_deallocate() because we
1048 * don't want to decrement the map's residence count here.
1049 */
1050 map = thr_act->map;
1051 mutex_lock(&map->s_lock);
1c79356b
A
1052 ref = --map->ref_count;
1053 mutex_unlock(&map->s_lock);
1054 if (ref == 0)
1055 vm_map_destroy(map);
1056
1057#ifdef MACH_BSD
1058 {
1059 /*
1060 * Free uthread BEFORE the bzero.
1061 * Not doing so will result in a leak.
1062 */
9bccf70c
A
1063 extern void uthread_free(task_t, void *, void *);
1064
1c79356b
A
1065 void *ut = thr_act->uthread;
1066 thr_act->uthread = 0;
9bccf70c 1067 uthread_free(task, ut, task_proc);
1c79356b
A
1068 }
1069#endif /* MACH_BSD */
1070
1071#if MACH_ASSERT
1072 if (dangerous_bzero) /* dangerous if we're still using it! */
1073 bzero((char *)thr_act, sizeof(*thr_act));
1074#endif /* MACH_ASSERT */
1075 /* Put the thr_act back on the thr_act zone */
1076 zfree(thr_act_zone, (vm_offset_t)thr_act);
1077}
1078
1079
1080/*
1081 * act_attach - Attach an thr_act to the top of a thread ("push the stack").
1082 *
1083 * The thread_shuttle must be either the current one or a brand-new one.
9bccf70c 1084 * Assumes the thr_act is active but not in use.
1c79356b
A
1085 *
1086 * Already locked: thr_act plus "appropriate" thread-related locks
1087 * (see act_lock_thread()).
1088 */
1089void
1090act_attach(
1091 thread_act_t thr_act,
1092 thread_t thread,
1093 unsigned init_alert_mask)
1094{
1095 thread_act_t lower;
1096
1097#if MACH_ASSERT
1098 assert(thread == current_thread() || thread->top_act == THR_ACT_NULL);
1099 if (watchacts & WA_ACT_LNK)
1100 printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
1101 thr_act, thr_act->ref_count, thread, thread->ref_count,
1102 init_alert_mask);
1103#endif /* MACH_ASSERT */
1104
1105 /*
1106 * Chain the thr_act onto the thread's thr_act stack.
1107 * Set mask and auto-propagate alerts from below.
1108 */
1109 thr_act->ref_count++;
1110 thr_act->thread = thread;
1111 thr_act->higher = THR_ACT_NULL; /*safety*/
1112 thr_act->alerts = 0;
1113 thr_act->alert_mask = init_alert_mask;
1114 lower = thr_act->lower = thread->top_act;
1115
1116 if (lower != THR_ACT_NULL) {
1117 lower->higher = thr_act;
1118 thr_act->alerts = (lower->alerts & init_alert_mask);
1119 }
1120
1121 thread->top_act = thr_act;
1122}
1123
1124/*
1125 * act_detach
1126 *
1127 * Remove the current thr_act from the top of the current thread, i.e.
1128 * "pop the stack". Assumes already locked: thr_act plus "appropriate"
1129 * thread-related locks (see act_lock_thread).
1130 */
1131void
1132act_detach(
1133 thread_act_t cur_act)
1134{
1135 thread_t cur_thread = cur_act->thread;
1136
1137#if MACH_ASSERT
1138 if (watchacts & (WA_EXIT|WA_ACT_LNK))
1139 printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
1140 cur_act, cur_act->ref_count,
1141 cur_thread, cur_thread->ref_count,
1142 cur_act->task,
1143 cur_act->task ? cur_act->task->ref_count : 0);
1144#endif /* MACH_ASSERT */
1145
1146 /* Unlink the thr_act from the thread's thr_act stack */
1147 cur_thread->top_act = cur_act->lower;
1148 cur_act->thread = 0;
1149 cur_act->ref_count--;
1150 assert(cur_act->ref_count > 0);
1151
1c79356b
A
1152#if MACH_ASSERT
1153 cur_act->lower = cur_act->higher = THR_ACT_NULL;
1154 if (cur_thread->top_act)
1155 cur_thread->top_act->higher = THR_ACT_NULL;
1156#endif /* MACH_ASSERT */
1157
1158 return;
1159}
1160
1161
1162/*
9bccf70c
A
1163 * Synchronize a thread operation with migration.
1164 * Called with nothing locked.
1165 * Returns with thr_act locked.
1c79356b
A
1166 */
1167thread_t
1168act_lock_thread(
1169 thread_act_t thr_act)
1170{
1c79356b
A
1171
1172 /*
9bccf70c
A
1173 * JMM - We have moved away from explicit RPC locks
1174 * and towards a generic migration approach. The wait
1175 * queue lock will be the point of synchronization for
1176 * the shuttle linkage when this is rolled out. Until
1177 * then, just lock the act.
1c79356b 1178 */
9bccf70c 1179 act_lock(thr_act);
1c79356b
A
1180 return (thr_act->thread);
1181}
1182
1183/*
9bccf70c 1184 * Unsynchronize with migration (i.e., undo an act_lock_thread() call).
1c79356b
A
1185 * Called with thr_act locked, plus thread locks held that are
1186 * "correct" for thr_act's state. Returns with nothing locked.
1187 */
1188void
1189act_unlock_thread(thread_act_t thr_act)
1190{
1c79356b
A
1191 act_unlock(thr_act);
1192}
1193
1194/*
9bccf70c 1195 * Synchronize with migration given a pointer to a shuttle (instead of an
1c79356b
A
1196 * activation). Called with nothing locked; returns with all
1197 * "appropriate" thread-related locks held (see act_lock_thread()).
1198 */
1199thread_act_t
1200thread_lock_act(
1201 thread_t thread)
1202{
1203 thread_act_t thr_act;
1204
1205 while (1) {
1c79356b
A
1206 thr_act = thread->top_act;
1207 if (!thr_act)
1208 break;
1209 if (!act_lock_try(thr_act)) {
1c79356b
A
1210 mutex_pause();
1211 continue;
1212 }
1213 break;
1214 }
1215 return (thr_act);
1216}
1217
1218/*
9bccf70c
A
1219 * Unsynchronize with an activation starting from a pointer to
1220 * a shuttle.
1c79356b
A
1221 */
1222void
1223thread_unlock_act(
1224 thread_t thread)
1225{
1226 thread_act_t thr_act;
1227
1228 if (thr_act = thread->top_act) {
1c79356b
A
1229 act_unlock(thr_act);
1230 }
1c79356b
A
1231}
1232
1233/*
1234 * switch_act
1235 *
1236 * If a new activation is given, switch to it. If not,
1237 * switch to the lower activation (pop). Returns the old
9bccf70c 1238 * activation. This is for migration support.
1c79356b
A
1239 */
1240thread_act_t
1241switch_act(
1242 thread_act_t act)
1243{
1244 thread_t thread;
1245 thread_act_t old, new;
1246 unsigned cpu;
1247 spl_t spl;
1248
1249
1250 disable_preemption();
1251
1252 cpu = cpu_number();
1253 thread = current_thread();
1254
1255 /*
1256 * Find the old and new activation for switch.
1257 */
1258 old = thread->top_act;
1259
1260 if (act) {
1261 new = act;
1262 new->thread = thread;
1263 }
1264 else {
1265 new = old->lower;
1266 }
1267
1268 assert(new != THR_ACT_NULL);
9bccf70c 1269 assert(cpu_to_processor(cpu)->cpu_data->active_thread == thread);
1c79356b
A
1270 active_kloaded[cpu] = (new->kernel_loaded) ? new : 0;
1271
1272 /* This is where all the work happens */
1273 machine_switch_act(thread, old, new, cpu);
1274
1275 /*
1276 * Push or pop an activation on the chain.
1277 */
1278 if (act) {
1279 act_attach(new, thread, 0);
1280 }
1281 else {
1282 act_detach(old);
1283 }
1284
1285 enable_preemption();
1286
1287 return(old);
1288}
1289
1290/*
1291 * install_special_handler
1292 * Install the special returnhandler that handles suspension and
1293 * termination, if it hasn't been installed already.
1294 *
1295 * Already locked: RPC-related locks for thr_act, but not
1296 * scheduling lock (thread_lock()) of the associated thread.
1297 */
1298void
1299install_special_handler(
1300 thread_act_t thr_act)
1301{
1302 spl_t spl;
1303 thread_t thread = thr_act->thread;
1304
1305#if MACH_ASSERT
1306 if (watchacts & WA_ACT_HDLR)
1307 printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act);
1308#endif /* MACH_ASSERT */
1309
1310 spl = splsched();
e7c99d92 1311 thread_lock(thread);
1c79356b 1312 install_special_handler_locked(thr_act);
e7c99d92 1313 thread_unlock(thread);
1c79356b
A
1314 splx(spl);
1315}
1316
1317/*
1318 * install_special_handler_locked
1319 * Do the work of installing the special_handler.
1320 *
1321 * Already locked: RPC-related locks for thr_act, plus the
1322 * scheduling lock (thread_lock()) of the associated thread.
1323 */
1324void
1325install_special_handler_locked(
9bccf70c 1326 thread_act_t act)
1c79356b 1327{
9bccf70c 1328 thread_t thread = act->thread;
1c79356b 1329 ReturnHandler **rh;
1c79356b
A
1330
1331 /* The work handler must always be the last ReturnHandler on the list,
1332 because it can do tricky things like detach the thr_act. */
9bccf70c
A
1333 for (rh = &act->handlers; *rh; rh = &(*rh)->next)
1334 continue;
1335 if (rh != &act->special_handler.next)
1336 *rh = &act->special_handler;
1337
1338 if (act == thread->top_act) {
1c79356b
A
1339 /*
1340 * Temporarily undepress, so target has
1341 * a chance to do locking required to
1342 * block itself in special_handler().
1343 */
9bccf70c
A
1344 if (thread->sched_mode & TH_MODE_ISDEPRESSED)
1345 compute_priority(thread, TRUE);
1c79356b 1346 }
1c79356b 1347
9bccf70c
A
1348 thread_ast_set(act, AST_APC);
1349 if (act == current_act())
1350 ast_propagate(act->ast);
1351 else {
1352 processor_t processor = thread->last_processor;
1353
1354 if ( processor != PROCESSOR_NULL &&
1355 processor->state == PROCESSOR_RUNNING &&
1356 processor->cpu_data->active_thread == thread )
1357 cause_ast_check(processor);
1358 }
1359}
1c79356b
A
1360
1361kern_return_t
1362thread_apc_set(
9bccf70c
A
1363 thread_act_t act,
1364 thread_apc_handler_t apc)
1c79356b 1365{
9bccf70c
A
1366 extern thread_apc_handler_t bsd_ast;
1367
1c79356b 1368 assert(apc == bsd_ast);
9bccf70c 1369 return (KERN_FAILURE);
1c79356b
A
1370}
1371
1372kern_return_t
1373thread_apc_clear(
9bccf70c
A
1374 thread_act_t act,
1375 thread_apc_handler_t apc)
1c79356b 1376{
9bccf70c 1377 extern thread_apc_handler_t bsd_ast;
1c79356b 1378
9bccf70c
A
1379 assert(apc == bsd_ast);
1380 return (KERN_FAILURE);
1c79356b
A
1381}
1382
1383/*
1384 * Activation control support routines internal to this file:
1385 */
1386
1387/*
1388 * act_execute_returnhandlers() - does just what the name says
1389 *
1390 * This is called by system-dependent code when it detects that
1391 * thr_act->handlers is non-null while returning into user mode.
1c79356b 1392 */
9bccf70c
A
1393void
1394act_execute_returnhandlers(void)
1c79356b 1395{
9bccf70c 1396 thread_act_t act = current_act();
1c79356b
A
1397
1398#if MACH_ASSERT
1399 if (watchacts & WA_ACT_HDLR)
9bccf70c 1400 printf("execute_rtn_hdlrs: act=%x\n", act);
1c79356b
A
1401#endif /* MACH_ASSERT */
1402
9bccf70c 1403 thread_ast_clear(act, AST_APC);
1c79356b 1404 spllo();
1c79356b 1405
9bccf70c
A
1406 for (;;) {
1407 ReturnHandler *rh;
1408 thread_t thread = act_lock_thread(act);
1409
1c79356b
A
1410 (void)splsched();
1411 thread_lock(thread);
9bccf70c 1412 rh = act->handlers;
1c79356b
A
1413 if (!rh) {
1414 thread_unlock(thread);
9bccf70c
A
1415 spllo();
1416 act_unlock_thread(act);
1c79356b
A
1417 return;
1418 }
9bccf70c 1419 act->handlers = rh->next;
1c79356b
A
1420 thread_unlock(thread);
1421 spllo();
9bccf70c 1422 act_unlock_thread(act);
1c79356b
A
1423
1424#if MACH_ASSERT
1425 if (watchacts & WA_ACT_HDLR)
9bccf70c
A
1426 printf( (rh == &act->special_handler) ?
1427 "\tspecial_handler\n" : "\thandler=%x\n", rh->handler);
1c79356b
A
1428#endif /* MACH_ASSERT */
1429
1430 /* Execute it */
9bccf70c 1431 (*rh->handler)(rh, act);
1c79356b
A
1432 }
1433}
1434
1435/*
1436 * special_handler_continue
1437 *
1438 * Continuation routine for the special handler blocks. It checks
1439 * to see whether there has been any new suspensions. If so, it
1440 * installs the special handler again. Otherwise, it checks to see
1441 * if the current depression needs to be re-instated (it may have
1442 * been temporarily removed in order to get to this point in a hurry).
1443 */
1444void
1445special_handler_continue(void)
1446{
9bccf70c 1447 thread_act_t self = current_act();
1c79356b 1448
9bccf70c
A
1449 if (self->suspend_count > 0)
1450 install_special_handler(self);
1c79356b 1451 else {
9bccf70c
A
1452 thread_t thread = self->thread;
1453 spl_t s = splsched();
1454
1c79356b 1455 thread_lock(thread);
9bccf70c
A
1456 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1457 processor_t myprocessor = thread->last_processor;
1458
1459 thread->sched_pri = DEPRESSPRI;
1460 myprocessor->current_pri = thread->sched_pri;
1461 thread->sched_mode &= ~TH_MODE_PREEMPT;
1c79356b
A
1462 }
1463 thread_unlock(thread);
1464 splx(s);
1465 }
9bccf70c 1466
1c79356b 1467 thread_exception_return();
9bccf70c 1468 /*NOTREACHED*/
1c79356b
A
1469}
1470
1471/*
1472 * special_handler - handles suspension, termination. Called
1473 * with nothing locked. Returns (if it returns) the same way.
1474 */
1475void
1476special_handler(
1477 ReturnHandler *rh,
9bccf70c 1478 thread_act_t self)
1c79356b 1479{
9bccf70c
A
1480 thread_t thread = act_lock_thread(self);
1481 spl_t s;
1c79356b
A
1482
1483 assert(thread != THREAD_NULL);
1c79356b
A
1484
1485 s = splsched();
1c79356b 1486 thread_lock(thread);
9bccf70c 1487 thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */
1c79356b
A
1488 thread_unlock(thread);
1489 splx(s);
1490
1491 /*
1492 * If someone has killed this invocation,
1493 * invoke the return path with a terminated exception.
1494 */
9bccf70c
A
1495 if (!self->active) {
1496 act_unlock_thread(self);
1c79356b
A
1497 act_machine_return(KERN_TERMINATED);
1498 }
1499
1c79356b
A
1500 /*
1501 * If we're suspended, go to sleep and wait for someone to wake us up.
1502 */
9bccf70c
A
1503 if (self->suspend_count > 0) {
1504 if (self->handlers == NULL) {
1505 assert_wait(&self->suspend_count, THREAD_ABORTSAFE);
1506 act_unlock_thread(self);
1c79356b
A
1507 thread_block(special_handler_continue);
1508 /* NOTREACHED */
1509 }
1c79356b 1510
9bccf70c 1511 act_unlock_thread(self);
1c79356b 1512
9bccf70c
A
1513 special_handler_continue();
1514 /*NOTREACHED*/
1c79356b 1515 }
9bccf70c
A
1516
1517 act_unlock_thread(self);
1c79356b
A
1518}
1519
1520/*
1521 * Update activation that belongs to a task created via kernel_task_create().
1522 */
1523void
1524act_user_to_kernel(
1525 thread_act_t thr_act)
1526{
1527 pcb_user_to_kernel(thr_act);
1528 thr_act->kernel_loading = TRUE;
1529}
1530
1531/*
9bccf70c 1532 * Already locked: activation (shuttle frozen within)
1c79356b 1533 *
9bccf70c 1534 * Mark an activation inactive, and prepare it to terminate
1c79356b
A
1535 * itself.
1536 */
9bccf70c
A
1537static void
1538act_disable(
1c79356b
A
1539 thread_act_t thr_act)
1540{
1c79356b
A
1541
1542#if MACH_ASSERT
1543 if (watchacts & WA_EXIT) {
9bccf70c 1544 printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive",
1c79356b 1545 current_act(), thr_act, thr_act->ref_count,
9bccf70c 1546 (thr_act->active ? " " : " !"));
1c79356b
A
1547 printf("\n");
1548 (void) dump_act(thr_act);
1549 }
1550#endif /* MACH_ASSERT */
1551
1c79356b 1552 thr_act->active = 0;
1c79356b
A
1553
1554 /* Drop the thr_act reference taken for being active.
1555 * (There is still at least one reference left:
1556 * the one we were passed.)
1557 * Inline the deallocate because thr_act is locked.
1558 */
1559 act_locked_act_deallocate(thr_act);
1c79356b
A
1560}
1561
1562/*
1563 * act_alert - Register an alert from this activation.
1564 *
1565 * Each set bit is propagated upward from (but not including) this activation,
1566 * until the top of the chain is reached or the bit is masked.
1567 */
1568kern_return_t
1569act_alert(thread_act_t thr_act, unsigned alerts)
1570{
1571 thread_t thread = act_lock_thread(thr_act);
1572
1573#if MACH_ASSERT
1574 if (watchacts & WA_ACT_LNK)
1575 printf("act_alert %x: %x\n", thr_act, alerts);
1576#endif /* MACH_ASSERT */
1577
1578 if (thread) {
1579 thread_act_t act_up = thr_act;
1580 while ((alerts) && (act_up != thread->top_act)) {
1581 act_up = act_up->higher;
1582 alerts &= act_up->alert_mask;
1583 act_up->alerts |= alerts;
1584 }
1585 /*
1586 * XXXX If we reach the top, and it is blocked in glue
1587 * code, do something to kick it. XXXX
1588 */
1589 }
1590 act_unlock_thread(thr_act);
1591
1592 return KERN_SUCCESS;
1593}
1594
1595kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask)
1596{
1597 panic("act_alert_mask NOT YET IMPLEMENTED\n");
1598 return KERN_SUCCESS;
1599}
1600
1601typedef struct GetSetState {
1602 struct ReturnHandler rh;
1603 int flavor;
1604 void *state;
1605 int *pcount;
1606 int result;
1607} GetSetState;
1608
1609/* Local Forward decls */
1610kern_return_t get_set_state(
1611 thread_act_t thr_act, int flavor,
1612 thread_state_t state, int *pcount,
1613 void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
1614void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1615void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
1616
1617/*
1618 * get_set_state(thr_act ...)
1619 *
1620 * General code to install g/set_state handler.
1621 * Called with thr_act's act_lock() and "appropriate"
1622 * thread-related locks held. (See act_lock_thread().)
1623 */
1624kern_return_t
9bccf70c
A
1625get_set_state(
1626 thread_act_t act,
1627 int flavor,
1628 thread_state_t state,
1629 int *pcount,
1630 void (*handler)(
1631 ReturnHandler *rh,
1632 thread_act_t act))
1c79356b 1633{
9bccf70c 1634 GetSetState gss;
1c79356b
A
1635
1636 /* Initialize a small parameter structure */
1637 gss.rh.handler = handler;
1638 gss.flavor = flavor;
1639 gss.state = state;
1640 gss.pcount = pcount;
1641 gss.result = KERN_ABORTED; /* iff wait below is interrupted */
1642
1643 /* Add it to the thr_act's return handler list */
9bccf70c
A
1644 gss.rh.next = act->handlers;
1645 act->handlers = &gss.rh;
1c79356b 1646
9bccf70c 1647 act_set_apc(act);
1c79356b
A
1648
1649#if MACH_ASSERT
1650 if (watchacts & WA_ACT_HDLR) {
9bccf70c
A
1651 printf("act_%x: get_set_state(act=%x flv=%x state=%x ptr@%x=%x)",
1652 current_act(), act, flavor, state,
1c79356b
A
1653 pcount, (pcount ? *pcount : 0));
1654 printf((handler == get_state_handler ? "get_state_hdlr\n" :
1655 (handler == set_state_handler ? "set_state_hdlr\n" :
1656 "hndler=%x\n")), handler);
1657 }
1658#endif /* MACH_ASSERT */
1659
9bccf70c
A
1660 assert(act->thread);
1661 assert(act != current_act());
1662
1c79356b 1663 for (;;) {
9bccf70c
A
1664 wait_result_t result;
1665
1666 if ( act->inited &&
1667 act->thread->top_act == act )
1668 thread_wakeup_one(&act->suspend_count);
1669
1c79356b
A
1670 /*
1671 * Wait must be interruptible to avoid deadlock (e.g.) with
1672 * task_suspend() when caller and target of get_set_state()
1673 * are in same task.
1674 */
9bccf70c
A
1675 result = assert_wait(&gss, THREAD_ABORTSAFE);
1676 act_unlock_thread(act);
1677
1678 if (result == THREAD_WAITING)
1679 result = thread_block(THREAD_CONTINUE_NULL);
1680
1681 assert(result != THREAD_WAITING);
1682
1683 if (gss.result != KERN_ABORTED) {
1684 assert(result != THREAD_INTERRUPTED);
1c79356b 1685 break;
9bccf70c
A
1686 }
1687
1688 /* JMM - What about other aborts (like BSD signals)? */
1c79356b
A
1689 if (current_act()->handlers)
1690 act_execute_returnhandlers();
9bccf70c
A
1691
1692 act_lock_thread(act);
1c79356b
A
1693 }
1694
1695#if MACH_ASSERT
1696 if (watchacts & WA_ACT_HDLR)
1697 printf("act_%x: get_set_state returns %x\n",
1698 current_act(), gss.result);
1699#endif /* MACH_ASSERT */
1700
9bccf70c 1701 return (gss.result);
1c79356b
A
1702}
1703
1704void
1705set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1706{
1707 GetSetState *gss = (GetSetState*)rh;
1708
1709#if MACH_ASSERT
1710 if (watchacts & WA_ACT_HDLR)
1711 printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
1712 current_act(), rh, thr_act);
1713#endif /* MACH_ASSERT */
1714
1715 gss->result = act_machine_set_state(thr_act, gss->flavor,
1716 gss->state, *gss->pcount);
1717 thread_wakeup((event_t)gss);
1718}
1719
1720void
1721get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
1722{
1723 GetSetState *gss = (GetSetState*)rh;
1724
1725#if MACH_ASSERT
1726 if (watchacts & WA_ACT_HDLR)
1727 printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
1728 current_act(), rh, thr_act);
1729#endif /* MACH_ASSERT */
1730
1731 gss->result = act_machine_get_state(thr_act, gss->flavor,
1732 gss->state,
1733 (mach_msg_type_number_t *) gss->pcount);
1734 thread_wakeup((event_t)gss);
1735}
1736
1737kern_return_t
1738act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1739 mach_msg_type_number_t *pcount)
1740{
1741#if MACH_ASSERT
1742 if (watchacts & WA_ACT_HDLR)
1743 printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
1744 current_act(), thr_act, flavor, state, pcount,
1745 (pcount? *pcount : 0));
1746#endif /* MACH_ASSERT */
1747
1748 return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
1749}
1750
1751kern_return_t
1752act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
1753 mach_msg_type_number_t count)
1754{
1755#if MACH_ASSERT
1756 if (watchacts & WA_ACT_HDLR)
1757 printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
1758 current_act(), thr_act, flavor, state, count, count);
1759#endif /* MACH_ASSERT */
1760
1761 return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
1762}
1763
1764kern_return_t
1765act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
1766 mach_msg_type_number_t count)
1767{
1768 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1769 return(KERN_INVALID_ARGUMENT);
1770
1771 act_lock_thread(thr_act);
1772 return(act_set_state_locked(thr_act, flavor, state, count));
1773
1774}
1775
1776kern_return_t
1777act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
1778 mach_msg_type_number_t *pcount)
1779{
1780 if (thr_act == THR_ACT_NULL || thr_act == current_act())
1781 return(KERN_INVALID_ARGUMENT);
1782
1783 act_lock_thread(thr_act);
1784 return(act_get_state_locked(thr_act, flavor, state, pcount));
1785}
1786
1c79356b 1787void
9bccf70c
A
1788act_set_astbsd(
1789 thread_act_t act)
1c79356b 1790{
9bccf70c 1791 spl_t s = splsched();
0b4e3aa0 1792
9bccf70c
A
1793 if (act == current_act()) {
1794 thread_ast_set(act, AST_BSD);
1795 ast_propagate(act->ast);
1c79356b 1796 }
9bccf70c
A
1797 else {
1798 thread_t thread = act->thread;
1799 processor_t processor;
0b4e3aa0 1800
9bccf70c
A
1801 thread_lock(thread);
1802 thread_ast_set(act, AST_BSD);
1803 processor = thread->last_processor;
1804 if ( processor != PROCESSOR_NULL &&
1805 processor->state == PROCESSOR_RUNNING &&
1806 processor->cpu_data->active_thread == thread )
1807 cause_ast_check(processor);
1808 thread_unlock(thread);
0b4e3aa0
A
1809 }
1810
9bccf70c 1811 splx(s);
1c79356b
A
1812}
1813
1814void
9bccf70c
A
1815act_set_apc(
1816 thread_act_t act)
1c79356b 1817{
9bccf70c
A
1818 spl_t s = splsched();
1819
1820 if (act == current_act()) {
1821 thread_ast_set(act, AST_APC);
1822 ast_propagate(act->ast);
1823 }
1824 else {
1825 thread_t thread = act->thread;
1826 processor_t processor;
1827
1828 thread_lock(thread);
1829 thread_ast_set(act, AST_APC);
1830 processor = thread->last_processor;
1831 if ( processor != PROCESSOR_NULL &&
1832 processor->state == PROCESSOR_RUNNING &&
1833 processor->cpu_data->active_thread == thread )
1834 cause_ast_check(processor);
1835 thread_unlock(thread);
1836 }
1837
1838 splx(s);
1c79356b
A
1839}
1840
1841void
1842act_ulock_release_all(thread_act_t thr_act)
1843{
1844 ulock_t ulock;
1845
1846 while (!queue_empty(&thr_act->held_ulocks)) {
1847 ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
1848 (void) lock_make_unstable(ulock, thr_act);
1849 (void) lock_release_internal(ulock, thr_act);
1850 }
1851}
1852
1853/*
1854 * Provide routines (for export to other components) of things that
1855 * are implemented as macros insternally.
1856 */
1c79356b
A
1857thread_act_t
1858thread_self(void)
1859{
1860 thread_act_t self = current_act_fast();
1861
1862 act_reference(self);
1863 return self;
1864}
1865
1866thread_act_t
1867mach_thread_self(void)
1868{
1869 thread_act_t self = current_act_fast();
1870
1871 act_reference(self);
1872 return self;
1873}
1874
1875#undef act_reference
1876void
1877act_reference(
1878 thread_act_t thr_act)
1879{
1880 act_reference_fast(thr_act);
1881}
1882
1883#undef act_deallocate
1884void
1885act_deallocate(
1886 thread_act_t thr_act)
1887{
1888 act_deallocate_fast(thr_act);
1889}