]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/ipc_tt.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37
A
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55
56/*
57 * File: ipc_tt.c
58 * Purpose:
59 * Task and thread related IPC functions.
60 */
61
62#include <mach/boolean.h>
63#include <mach_rt.h>
64#include <mach/kern_return.h>
65#include <mach/mach_param.h>
66#include <mach/task_special_ports.h>
67#include <mach/thread_special_ports.h>
68#include <mach/thread_status.h>
69#include <mach/exception_types.h>
70#include <mach/mach_traps.h>
71#include <mach/task_server.h>
72#include <mach/thread_act_server.h>
73#include <mach/mach_host_server.h>
74#include <mach/vm_map_server.h>
75#include <kern/host.h>
76#include <kern/ipc_tt.h>
77#include <kern/thread_act.h>
78#include <kern/misc_protos.h>
79#include <vm/vm_pageout.h>
80
81/*
82 * Routine: ipc_task_init
83 * Purpose:
84 * Initialize a task's IPC state.
85 *
86 * If non-null, some state will be inherited from the parent.
87 * The parent must be appropriately initialized.
88 * Conditions:
89 * Nothing locked.
90 */
91
92void
93ipc_task_init(
94 task_t task,
95 task_t parent)
96{
97 ipc_space_t space;
98 ipc_port_t kport;
99 kern_return_t kr;
100 int i;
101
102
103 kr = ipc_space_create(&ipc_table_entries[0], &space);
104 if (kr != KERN_SUCCESS)
105 panic("ipc_task_init");
106
107
108 kport = ipc_port_alloc_kernel();
109 if (kport == IP_NULL)
110 panic("ipc_task_init");
111
112 itk_lock_init(task);
113 task->itk_self = kport;
114 task->itk_sself = ipc_port_make_send(kport);
115 task->itk_space = space;
116 space->is_fast = task->kernel_loaded;
117
118 if (parent == TASK_NULL) {
119 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
120 task->exc_actions[i].port = IP_NULL;
121 }/* for */
1c79356b
A
122 task->itk_host = ipc_port_make_send(realhost.host_self);
123 task->itk_bootstrap = IP_NULL;
124 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
125 task->itk_registered[i] = IP_NULL;
126 } else {
127 itk_lock(parent);
128 assert(parent->itk_self != IP_NULL);
129
130 /* inherit registered ports */
131
132 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
133 task->itk_registered[i] =
134 ipc_port_copy_send(parent->itk_registered[i]);
135
136 /* inherit exception and bootstrap ports */
137
138 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
139 task->exc_actions[i].port =
140 ipc_port_copy_send(parent->exc_actions[i].port);
141 task->exc_actions[i].flavor =
142 parent->exc_actions[i].flavor;
143 task->exc_actions[i].behavior =
144 parent->exc_actions[i].behavior;
145 }/* for */
146 task->itk_host =
147 ipc_port_copy_send(parent->itk_host);
148
149 task->itk_bootstrap =
150 ipc_port_copy_send(parent->itk_bootstrap);
151
152 itk_unlock(parent);
153 }
154}
155
156/*
157 * Routine: ipc_task_enable
158 * Purpose:
159 * Enable a task for IPC access.
160 * Conditions:
161 * Nothing locked.
162 */
163
164void
165ipc_task_enable(
166 task_t task)
167{
168 ipc_port_t kport;
169
170 itk_lock(task);
171 kport = task->itk_self;
172 if (kport != IP_NULL)
173 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
174 itk_unlock(task);
175}
176
177/*
178 * Routine: ipc_task_disable
179 * Purpose:
180 * Disable IPC access to a task.
181 * Conditions:
182 * Nothing locked.
183 */
184
185void
186ipc_task_disable(
187 task_t task)
188{
189 ipc_port_t kport;
190
191 itk_lock(task);
192 kport = task->itk_self;
193 if (kport != IP_NULL)
194 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
195 itk_unlock(task);
196}
197
198/*
199 * Routine: ipc_task_terminate
200 * Purpose:
201 * Clean up and destroy a task's IPC state.
202 * Conditions:
203 * Nothing locked. The task must be suspended.
204 * (Or the current thread must be in the task.)
205 */
206
207void
208ipc_task_terminate(
209 task_t task)
210{
211 ipc_port_t kport;
212 int i;
213
214 itk_lock(task);
215 kport = task->itk_self;
216
217 if (kport == IP_NULL) {
218 /* the task is already terminated (can this happen?) */
219 itk_unlock(task);
220 return;
221 }
222
223 task->itk_self = IP_NULL;
224 itk_unlock(task);
225
226 /* release the naked send rights */
227
228 if (IP_VALID(task->itk_sself))
229 ipc_port_release_send(task->itk_sself);
230
231 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
232 if (IP_VALID(task->exc_actions[i].port)) {
233 ipc_port_release_send(task->exc_actions[i].port);
234 }
235 }/* for */
236 if (IP_VALID(task->itk_host))
237 ipc_port_release_send(task->itk_host);
238
239 if (IP_VALID(task->itk_bootstrap))
240 ipc_port_release_send(task->itk_bootstrap);
241
242 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
243 if (IP_VALID(task->itk_registered[i]))
244 ipc_port_release_send(task->itk_registered[i]);
245
246 ipc_port_release_send(task->wired_ledger_port);
247 ipc_port_release_send(task->paged_ledger_port);
248
249 /* destroy the kernel port */
250 ipc_port_dealloc_kernel(kport);
251}
252
253/*
254 * Routine: ipc_thread_init
255 * Purpose:
256 * Initialize a thread's IPC state.
257 * Conditions:
258 * Nothing locked.
259 */
260
261void
262ipc_thread_init(
263 thread_t thread)
264{
265 ipc_kmsg_queue_init(&thread->ith_messages);
266 thread->ith_mig_reply = MACH_PORT_NULL;
267 thread->ith_rpc_reply = IP_NULL;
268}
269
270/*
271 * Routine: ipc_thread_terminate
272 * Purpose:
273 * Clean up and destroy a thread's IPC state.
274 * Conditions:
275 * Nothing locked. The thread must be suspended.
276 * (Or be the current thread.)
277 */
278
279void
280ipc_thread_terminate(
281 thread_t thread)
282{
283 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
284
285 if (thread->ith_rpc_reply != IP_NULL)
286 ipc_port_dealloc_reply(thread->ith_rpc_reply);
287 thread->ith_rpc_reply = IP_NULL;
288}
289
290/*
291 * Routine: ipc_thr_act_init
292 * Purpose:
293 * Initialize an thr_act's IPC state.
294 * Conditions:
295 * Nothing locked.
296 */
297
298void
299ipc_thr_act_init(task_t task, thread_act_t thr_act)
300{
301 ipc_port_t kport; int i;
302
303 kport = ipc_port_alloc_kernel();
304 if (kport == IP_NULL)
305 panic("ipc_thr_act_init");
306
307 thr_act->ith_self = kport;
308 thr_act->ith_sself = ipc_port_make_send(kport);
309
310 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
311 thr_act->exc_actions[i].port = IP_NULL;
312
1c79356b
A
313 ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
314}
315
316void
317ipc_thr_act_disable(thread_act_t thr_act)
318{
319 int i;
320 ipc_port_t kport;
321
322 kport = thr_act->ith_self;
323
324 if (kport != IP_NULL)
325 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
326}
327
328void
329ipc_thr_act_terminate(thread_act_t thr_act)
330{
331 ipc_port_t kport; int i;
332
333 kport = thr_act->ith_self;
334
335 if (kport == IP_NULL) {
336 /* the thread is already terminated (can this happen?) */
337 return;
338 }
339
340 thr_act->ith_self = IP_NULL;
341
342 /* release the naked send rights */
343
344 if (IP_VALID(thr_act->ith_sself))
345 ipc_port_release_send(thr_act->ith_sself);
346 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
347 if (IP_VALID(thr_act->exc_actions[i].port))
348 ipc_port_release_send(thr_act->exc_actions[i].port);
349 }
350
351 /* destroy the kernel port */
352 ipc_port_dealloc_kernel(kport);
353}
354
355/*
356 * Routine: retrieve_task_self_fast
357 * Purpose:
358 * Optimized version of retrieve_task_self,
359 * that only works for the current task.
360 *
361 * Return a send right (possibly null/dead)
362 * for the task's user-visible self port.
363 * Conditions:
364 * Nothing locked.
365 */
366
367ipc_port_t
368retrieve_task_self_fast(
369 register task_t task)
370{
371 register ipc_port_t port;
372
373 assert(task == current_task());
374
375 itk_lock(task);
376 assert(task->itk_self != IP_NULL);
377
378 if ((port = task->itk_sself) == task->itk_self) {
379 /* no interposing */
380
381 ip_lock(port);
382 assert(ip_active(port));
383 ip_reference(port);
384 port->ip_srights++;
385 ip_unlock(port);
386 } else
387 port = ipc_port_copy_send(port);
388 itk_unlock(task);
389
390 return port;
391}
392
393/*
394 * Routine: retrieve_act_self_fast
395 * Purpose:
396 * Optimized version of retrieve_thread_self,
397 * that only works for the current thread.
398 *
399 * Return a send right (possibly null/dead)
400 * for the thread's user-visible self port.
401 * Conditions:
402 * Nothing locked.
403 */
404
405ipc_port_t
406retrieve_act_self_fast(thread_act_t thr_act)
407{
408 register ipc_port_t port;
409
410 assert(thr_act == current_act());
411 act_lock(thr_act);
412 assert(thr_act->ith_self != IP_NULL);
413
414 if ((port = thr_act->ith_sself) == thr_act->ith_self) {
415 /* no interposing */
416
417 ip_lock(port);
418 assert(ip_active(port));
419 ip_reference(port);
420 port->ip_srights++;
421 ip_unlock(port);
422 } else
423 port = ipc_port_copy_send(port);
424 act_unlock(thr_act);
425
426 return port;
427}
428
429/*
430 * Routine: task_self_trap [mach trap]
431 * Purpose:
432 * Give the caller send rights for his own task port.
433 * Conditions:
434 * Nothing locked.
435 * Returns:
436 * MACH_PORT_NULL if there are any resource failures
437 * or other errors.
438 */
439
440mach_port_name_t
441task_self_trap(void)
442{
443 task_t task = current_task();
444 ipc_port_t sright;
445
446 sright = retrieve_task_self_fast(task);
447 return ipc_port_copyout_send(sright, task->itk_space);
448}
449
450/*
451 * Routine: thread_self_trap [mach trap]
452 * Purpose:
453 * Give the caller send rights for his own thread port.
454 * Conditions:
455 * Nothing locked.
456 * Returns:
457 * MACH_PORT_NULL if there are any resource failures
458 * or other errors.
459 */
460
461mach_port_name_t
462thread_self_trap(void)
463{
464 thread_act_t thr_act = current_act();
465 task_t task = thr_act->task;
466 ipc_port_t sright;
467
468 sright = retrieve_act_self_fast(thr_act);
469 return ipc_port_copyout_send(sright, task->itk_space);
470}
471
472/*
473 * Routine: mach_reply_port [mach trap]
474 * Purpose:
475 * Allocate a port for the caller.
476 * Conditions:
477 * Nothing locked.
478 * Returns:
479 * MACH_PORT_NULL if there are any resource failures
480 * or other errors.
481 */
482
483mach_port_name_t
484mach_reply_port(void)
485{
486 ipc_port_t port;
487 mach_port_name_t name;
488 kern_return_t kr;
489
490 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
491 if (kr == KERN_SUCCESS)
492 ip_unlock(port);
493 else
494 name = MACH_PORT_NULL;
495
496 return name;
497}
498
499/*
500 * Routine: task_get_special_port [kernel call]
501 * Purpose:
502 * Clones a send right for one of the task's
503 * special ports.
504 * Conditions:
505 * Nothing locked.
506 * Returns:
507 * KERN_SUCCESS Extracted a send right.
508 * KERN_INVALID_ARGUMENT The task is null.
509 * KERN_FAILURE The task/space is dead.
510 * KERN_INVALID_ARGUMENT Invalid special port.
511 */
512
513kern_return_t
514task_get_special_port(
515 task_t task,
516 int which,
517 ipc_port_t *portp)
518{
519 ipc_port_t *whichp;
520 ipc_port_t port;
521
522 if (task == TASK_NULL)
523 return KERN_INVALID_ARGUMENT;
524
525 switch (which) {
526 case TASK_KERNEL_PORT:
527 whichp = &task->itk_sself;
528 break;
529
530 case TASK_HOST_PORT:
531 whichp = &task->itk_host;
532 break;
533
534 case TASK_BOOTSTRAP_PORT:
535 whichp = &task->itk_bootstrap;
536 break;
537
538 case TASK_WIRED_LEDGER_PORT:
539 whichp = &task->wired_ledger_port;
540 break;
541
542 case TASK_PAGED_LEDGER_PORT:
543 whichp = &task->paged_ledger_port;
544 break;
545
546 default:
547 return KERN_INVALID_ARGUMENT;
548 }
549
550 itk_lock(task);
551 if (task->itk_self == IP_NULL) {
552 itk_unlock(task);
553 return KERN_FAILURE;
554 }
555
556 port = ipc_port_copy_send(*whichp);
557 itk_unlock(task);
558
559 *portp = port;
560 return KERN_SUCCESS;
561}
562
563/*
564 * Routine: task_set_special_port [kernel call]
565 * Purpose:
566 * Changes one of the task's special ports,
567 * setting it to the supplied send right.
568 * Conditions:
569 * Nothing locked. If successful, consumes
570 * the supplied send right.
571 * Returns:
572 * KERN_SUCCESS Changed the special port.
573 * KERN_INVALID_ARGUMENT The task is null.
574 * KERN_FAILURE The task/space is dead.
575 * KERN_INVALID_ARGUMENT Invalid special port.
576 */
577
578kern_return_t
579task_set_special_port(
580 task_t task,
581 int which,
582 ipc_port_t port)
583{
584 ipc_port_t *whichp;
585 ipc_port_t old;
586
587 if (task == TASK_NULL)
588 return KERN_INVALID_ARGUMENT;
589
590 switch (which) {
591 case TASK_KERNEL_PORT:
592 whichp = &task->itk_sself;
593 break;
594
595 case TASK_HOST_PORT:
596 whichp = &task->itk_host;
597 break;
598
599 case TASK_BOOTSTRAP_PORT:
600 whichp = &task->itk_bootstrap;
601 break;
602
603 case TASK_WIRED_LEDGER_PORT:
604 whichp = &task->wired_ledger_port;
605 break;
606
607 case TASK_PAGED_LEDGER_PORT:
608 whichp = &task->paged_ledger_port;
609 break;
610
611 default:
612 return KERN_INVALID_ARGUMENT;
613 }/* switch */
614
615 itk_lock(task);
616 if (task->itk_self == IP_NULL) {
617 itk_unlock(task);
618 return KERN_FAILURE;
619 }
620
621 old = *whichp;
622 *whichp = port;
623 itk_unlock(task);
624
625 if (IP_VALID(old))
626 ipc_port_release_send(old);
627 return KERN_SUCCESS;
628}
629
630
631/*
632 * Routine: mach_ports_register [kernel call]
633 * Purpose:
634 * Stash a handful of port send rights in the task.
635 * Child tasks will inherit these rights, but they
636 * must use mach_ports_lookup to acquire them.
637 *
638 * The rights are supplied in a (wired) kalloc'd segment.
639 * Rights which aren't supplied are assumed to be null.
640 * Conditions:
641 * Nothing locked. If successful, consumes
642 * the supplied rights and memory.
643 * Returns:
644 * KERN_SUCCESS Stashed the port rights.
645 * KERN_INVALID_ARGUMENT The task is null.
646 * KERN_INVALID_ARGUMENT The task is dead.
647 * KERN_INVALID_ARGUMENT Too many port rights supplied.
648 */
649
650kern_return_t
651mach_ports_register(
652 task_t task,
653 mach_port_array_t memory,
654 mach_msg_type_number_t portsCnt)
655{
656 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
657 int i;
658
659 if ((task == TASK_NULL) ||
660 (portsCnt > TASK_PORT_REGISTER_MAX))
661 return KERN_INVALID_ARGUMENT;
662
663 /*
664 * Pad the port rights with nulls.
665 */
666
667 for (i = 0; i < portsCnt; i++)
668 ports[i] = memory[i];
669 for (; i < TASK_PORT_REGISTER_MAX; i++)
670 ports[i] = IP_NULL;
671
672 itk_lock(task);
673 if (task->itk_self == IP_NULL) {
674 itk_unlock(task);
675 return KERN_INVALID_ARGUMENT;
676 }
677
678 /*
679 * Replace the old send rights with the new.
680 * Release the old rights after unlocking.
681 */
682
683 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
684 ipc_port_t old;
685
686 old = task->itk_registered[i];
687 task->itk_registered[i] = ports[i];
688 ports[i] = old;
689 }
690
691 itk_unlock(task);
692
693 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
694 if (IP_VALID(ports[i]))
695 ipc_port_release_send(ports[i]);
696
697 /*
698 * Now that the operation is known to be successful,
699 * we can free the memory.
700 */
701
702 if (portsCnt != 0)
703 kfree((vm_offset_t) memory,
704 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
705
706 return KERN_SUCCESS;
707}
708
709/*
710 * Routine: mach_ports_lookup [kernel call]
711 * Purpose:
712 * Retrieves (clones) the stashed port send rights.
713 * Conditions:
714 * Nothing locked. If successful, the caller gets
715 * rights and memory.
716 * Returns:
717 * KERN_SUCCESS Retrieved the send rights.
718 * KERN_INVALID_ARGUMENT The task is null.
719 * KERN_INVALID_ARGUMENT The task is dead.
720 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
721 */
722
723kern_return_t
724mach_ports_lookup(
725 task_t task,
726 mach_port_array_t *portsp,
727 mach_msg_type_number_t *portsCnt)
728{
729 vm_offset_t memory;
730 vm_size_t size;
731 ipc_port_t *ports;
732 int i;
733
734 kern_return_t kr;
735
736 if (task == TASK_NULL)
737 return KERN_INVALID_ARGUMENT;
738
739 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
740
741 memory = kalloc(size);
742 if (memory == 0)
743 return KERN_RESOURCE_SHORTAGE;
744
745 itk_lock(task);
746 if (task->itk_self == IP_NULL) {
747 itk_unlock(task);
748
749 kfree(memory, size);
750 return KERN_INVALID_ARGUMENT;
751 }
752
753 ports = (ipc_port_t *) memory;
754
755 /*
756 * Clone port rights. Because kalloc'd memory
757 * is wired, we won't fault while holding the task lock.
758 */
759
760 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
761 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
762
763 itk_unlock(task);
764
765 *portsp = (mach_port_array_t) ports;
766 *portsCnt = TASK_PORT_REGISTER_MAX;
767 return KERN_SUCCESS;
768}
769
770/*
771 * Routine: convert_port_to_locked_task
772 * Purpose:
773 * Internal helper routine to convert from a port to a locked
774 * task. Used by several routines that try to convert from a
775 * task port to a reference on some task related object.
776 * Conditions:
777 * Nothing locked, blocking OK.
778 */
779task_t
780convert_port_to_locked_task(ipc_port_t port)
781{
782 while (IP_VALID(port)) {
783 task_t task;
784
785 ip_lock(port);
786 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
787 ip_unlock(port);
788 return TASK_NULL;
789 }
790 task = (task_t) port->ip_kobject;
791 assert(task != TASK_NULL);
792
793 /*
794 * Normal lock ordering puts task_lock() before ip_lock().
795 * Attempt out-of-order locking here.
796 */
797 if (task_lock_try(task)) {
798 ip_unlock(port);
799 return(task);
800 }
801
802 ip_unlock(port);
803 mutex_pause();
804 }
805 return TASK_NULL;
806}
807
808/*
809 * Routine: convert_port_to_task
810 * Purpose:
811 * Convert from a port to a task.
812 * Doesn't consume the port ref; produces a task ref,
813 * which may be null.
814 * Conditions:
815 * Nothing locked.
816 */
817task_t
818convert_port_to_task(
819 ipc_port_t port)
820{
821 task_t task;
822
823 task = convert_port_to_locked_task(port);
824 if (task) {
825 task->ref_count++;
826 task_unlock(task);
827 }
828 return task;
829}
830
831/*
832 * Routine: convert_port_to_space
833 * Purpose:
834 * Convert from a port to a space.
835 * Doesn't consume the port ref; produces a space ref,
836 * which may be null.
837 * Conditions:
838 * Nothing locked.
839 */
840ipc_space_t
841convert_port_to_space(
842 ipc_port_t port)
843{
844 ipc_space_t space;
845 task_t task;
846
847 task = convert_port_to_locked_task(port);
848
849 if (task == TASK_NULL)
850 return IPC_SPACE_NULL;
851
852 if (!task->active) {
853 task_unlock(task);
854 return IPC_SPACE_NULL;
855 }
856
857 space = task->itk_space;
858 is_reference(space);
859 task_unlock(task);
860 return (space);
861}
862
863upl_t
864convert_port_to_upl(
865 ipc_port_t port)
866{
867 upl_t upl;
868
869 ip_lock(port);
870 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
871 ip_unlock(port);
872 return (upl_t)NULL;
873 }
874 upl = (upl_t) port->ip_kobject;
875 ip_unlock(port);
876 upl_lock(upl);
877 upl->ref_count+=1;
878 upl_unlock(upl);
879 return upl;
880}
881
0b4e3aa0
A
882mach_port_t
883convert_upl_to_port(
884 upl_t upl)
885{
886 return MACH_PORT_NULL;
887}
888
889__private_extern__ void
890upl_no_senders(
891 upl_t upl,
892 mach_port_mscount_t mscount)
893{
894 return;
895}
896
1c79356b
A
897/*
898 * Routine: convert_port_entry_to_map
899 * Purpose:
900 * Convert from a port specifying an entry or a task
901 * to a map. Doesn't consume the port ref; produces a map ref,
902 * which may be null. Unlike convert_port_to_map, the
903 * port may be task or a named entry backed.
904 * Conditions:
905 * Nothing locked.
906 */
907
908
909vm_map_t
910convert_port_entry_to_map(
911 ipc_port_t port)
912{
913 task_t task;
914 vm_map_t map;
915 vm_named_entry_t named_entry;
916
917 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
918 while(TRUE) {
919 ip_lock(port);
920 if(ip_active(port) && (ip_kotype(port)
921 == IKOT_NAMED_ENTRY)) {
922 named_entry =
923 (vm_named_entry_t)port->ip_kobject;
924 if (!(mutex_try(&(named_entry)->Lock))) {
925 ip_unlock(port);
926 mutex_pause();
927 continue;
928 }
929 named_entry->ref_count++;
930 mutex_unlock(&(named_entry)->Lock);
931 ip_unlock(port);
932 if ((named_entry->is_sub_map) &&
933 (named_entry->protection
934 & VM_PROT_WRITE)) {
935 map = named_entry->backing.map;
936 } else {
937 mach_destroy_memory_entry(port);
938 return VM_MAP_NULL;
939 }
940 vm_map_reference_swap(map);
941 mach_destroy_memory_entry(port);
942 break;
943 }
944 else
945 return VM_MAP_NULL;
946 }
947 } else {
948 task_t task;
949
950 task = convert_port_to_locked_task(port);
951
952 if (task == TASK_NULL)
953 return VM_MAP_NULL;
954
955 if (!task->active) {
956 task_unlock(task);
957 return VM_MAP_NULL;
958 }
959
960 map = task->map;
961 vm_map_reference_swap(map);
962 task_unlock(task);
963 }
964
965 return map;
966}
967
968/*
969 * Routine: convert_port_entry_to_object
970 * Purpose:
971 * Convert from a port specifying a named entry to an
972 * object. Doesn't consume the port ref; produces a map ref,
973 * which may be null.
974 * Conditions:
975 * Nothing locked.
976 */
977
978
979vm_object_t
980convert_port_entry_to_object(
981 ipc_port_t port)
982{
983 vm_object_t object;
984 vm_named_entry_t named_entry;
985
986 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
987 while(TRUE) {
988 ip_lock(port);
989 if(ip_active(port) && (ip_kotype(port)
990 == IKOT_NAMED_ENTRY)) {
991 named_entry =
992 (vm_named_entry_t)port->ip_kobject;
993 if (!(mutex_try(&(named_entry)->Lock))) {
994 ip_unlock(port);
995 mutex_pause();
996 continue;
997 }
998 named_entry->ref_count++;
999 mutex_unlock(&(named_entry)->Lock);
1000 ip_unlock(port);
1001 if ((!named_entry->is_sub_map) &&
1002 (named_entry->protection
1003 & VM_PROT_WRITE)) {
1004 object = named_entry->object;
1005 } else {
1006 mach_destroy_memory_entry(port);
1007 return (vm_object_t)NULL;
1008 }
1009 vm_object_reference(named_entry->object);
1010 mach_destroy_memory_entry(port);
1011 break;
1012 }
1013 else
1014 return (vm_object_t)NULL;
1015 }
1016 } else {
1017 return (vm_object_t)NULL;
1018 }
1019
1020 return object;
1021}
1022
1023/*
1024 * Routine: convert_port_to_map
1025 * Purpose:
1026 * Convert from a port to a map.
1027 * Doesn't consume the port ref; produces a map ref,
1028 * which may be null.
1029 * Conditions:
1030 * Nothing locked.
1031 */
1032
1033vm_map_t
1034convert_port_to_map(
1035 ipc_port_t port)
1036{
1037 task_t task;
1038 vm_map_t map;
1039
1040 task = convert_port_to_locked_task(port);
1041
1042 if (task == TASK_NULL)
1043 return VM_MAP_NULL;
1044
1045 if (!task->active) {
1046 task_unlock(task);
1047 return VM_MAP_NULL;
1048 }
1049
1050 map = task->map;
1051 vm_map_reference_swap(map);
1052 task_unlock(task);
1053 return map;
1054}
1055
1056
1057/*
1058 * Routine: convert_port_to_act
1059 * Purpose:
1060 * Convert from a port to a thr_act.
1061 * Doesn't consume the port ref; produces an thr_act ref,
1062 * which may be null.
1063 * Conditions:
1064 * Nothing locked.
1065 */
1066
1067thread_act_t
1068convert_port_to_act( ipc_port_t port )
1069{
1070 boolean_t r;
1071 thread_act_t thr_act = 0;
1072
1073 r = FALSE;
1074 while (!r && IP_VALID(port)) {
1075 ip_lock(port);
1076 r = ref_act_port_locked(port, &thr_act);
1077 /* port unlocked */
1078 }
1079 return (thr_act);
1080}
1081
1082boolean_t
1083ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
1084{
1085 thread_act_t thr_act;
1086
1087 thr_act = 0;
1088 if (ip_active(port) &&
1089 (ip_kotype(port) == IKOT_ACT)) {
1090 thr_act = (thread_act_t) port->ip_kobject;
1091 assert(thr_act != THR_ACT_NULL);
1092
1093 /*
1094 * Normal lock ordering is act_lock(), then ip_lock().
1095 * Allow out-of-order locking here, using
1096 * act_reference_act_locked() to accomodate it.
1097 */
1098 if (!act_lock_try(thr_act)) {
1099 ip_unlock(port);
1100 mutex_pause();
1101 return (FALSE);
1102 }
1103 act_locked_act_reference(thr_act);
1104 act_unlock(thr_act);
1105 }
1106 *pthr_act = thr_act;
1107 ip_unlock(port);
1108 return (TRUE);
1109}
1110
1111/*
1112 * Routine: port_name_to_act
1113 * Purpose:
1114 * Convert from a port name to an act reference
1115 * A name of MACH_PORT_NULL is valid for the null act
1116 * Conditions:
1117 * Nothing locked.
1118 */
1119thread_act_t
1120port_name_to_act(
1121 mach_port_name_t name)
1122{
1123 thread_act_t thr_act = THR_ACT_NULL;
1124 ipc_port_t kern_port;
1125 kern_return_t kr;
1126
1127 if (MACH_PORT_VALID(name)) {
1128 kr = ipc_object_copyin(current_space(), name,
1129 MACH_MSG_TYPE_COPY_SEND,
1130 (ipc_object_t *) &kern_port);
1131 if (kr != KERN_SUCCESS)
1132 return THR_ACT_NULL;
1133
1134 thr_act = convert_port_to_act(kern_port);
1135
1136 if (IP_VALID(kern_port))
1137 ipc_port_release_send(kern_port);
1138 }
1139 return thr_act;
1140}
1141
1142task_t
1143port_name_to_task(
1144 mach_port_name_t name)
1145{
1146 ipc_port_t kern_port;
1147 kern_return_t kr;
1148 task_t task = TASK_NULL;
1149
1150 if (MACH_PORT_VALID(name)) {
1151 kr = ipc_object_copyin(current_space(), name,
1152 MACH_MSG_TYPE_COPY_SEND,
1153 (ipc_object_t *) &kern_port);
1154 if (kr != KERN_SUCCESS)
1155 return TASK_NULL;
1156
1157 task = convert_port_to_task(kern_port);
1158
1159 if (IP_VALID(kern_port))
1160 ipc_port_release_send(kern_port);
1161 }
1162 return task;
1163}
1164
1165/*
1166 * Routine: convert_task_to_port
1167 * Purpose:
1168 * Convert from a task to a port.
1169 * Consumes a task ref; produces a naked send right
1170 * which may be invalid.
1171 * Conditions:
1172 * Nothing locked.
1173 */
1174
1175ipc_port_t
1176convert_task_to_port(
1177 task_t task)
1178{
1179 ipc_port_t port;
1180
1181 itk_lock(task);
1182 if (task->itk_self != IP_NULL)
1183#if NORMA_TASK
1184 if (task->map == VM_MAP_NULL)
1185 /* norma placeholder task */
1186 port = ipc_port_copy_send(task->itk_self);
1187 else
1188#endif /* NORMA_TASK */
1189 port = ipc_port_make_send(task->itk_self);
1190 else
1191 port = IP_NULL;
1192 itk_unlock(task);
1193
1194 task_deallocate(task);
1195 return port;
1196}
1197
1198/*
1199 * Routine: convert_act_to_port
1200 * Purpose:
1201 * Convert from a thr_act to a port.
1202 * Consumes an thr_act ref; produces a naked send right
1203 * which may be invalid.
1204 * Conditions:
1205 * Nothing locked.
1206 */
1207
1208ipc_port_t
1209convert_act_to_port(thr_act)
1210 thread_act_t thr_act;
1211{
1212 ipc_port_t port;
1213
1214 act_lock(thr_act);
1215 if (thr_act->ith_self != IP_NULL)
1216 port = ipc_port_make_send(thr_act->ith_self);
1217 else
1218 port = IP_NULL;
1219 act_unlock(thr_act);
1220
1221 act_deallocate(thr_act);
1222 return port;
1223}
1224
1225/*
1226 * Routine: space_deallocate
1227 * Purpose:
1228 * Deallocate a space ref produced by convert_port_to_space.
1229 * Conditions:
1230 * Nothing locked.
1231 */
1232
1233void
1234space_deallocate(
1235 ipc_space_t space)
1236{
1237 if (space != IS_NULL)
1238 is_release(space);
1239}
1240
1241/*
1242 * Routine: thread/task_set_exception_ports [kernel call]
1243 * Purpose:
1244 * Sets the thread/task exception port, flavor and
1245 * behavior for the exception types specified by the mask.
1246 * There will be one send right per exception per valid
1247 * port.
1248 * Conditions:
1249 * Nothing locked. If successful, consumes
1250 * the supplied send right.
1251 * Returns:
1252 * KERN_SUCCESS Changed the special port.
1253 * KERN_INVALID_ARGUMENT The thread is null,
1254 * Illegal mask bit set.
1255 * Illegal exception behavior
1256 * KERN_FAILURE The thread is dead.
1257 */
1258
1259kern_return_t
1260thread_set_exception_ports(
1261 thread_act_t thr_act,
1262 exception_mask_t exception_mask,
1263 ipc_port_t new_port,
1264 exception_behavior_t new_behavior,
1265 thread_state_flavor_t new_flavor)
1266{
1267 register int i;
1268 ipc_port_t old_port[EXC_TYPES_COUNT];
1269
1270 if (!thr_act)
1271 return KERN_INVALID_ARGUMENT;
1272
1273 if (exception_mask & ~EXC_MASK_ALL)
1274 return KERN_INVALID_ARGUMENT;
1275
1276 if (IP_VALID(new_port)) {
1277 switch (new_behavior) {
1278 case EXCEPTION_DEFAULT:
1279 case EXCEPTION_STATE:
1280 case EXCEPTION_STATE_IDENTITY:
1281 break;
1282 default:
1283 return KERN_INVALID_ARGUMENT;
1284 }
1285 }
1286
1287 /*
1288 * Check the validity of the thread_state_flavor by calling the
1289 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1290 * osfmk/mach/ARCHITECTURE/thread_status.h
1291 */
1292 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
1293 return KERN_INVALID_ARGUMENT;
1294 }
1295
1296 act_lock(thr_act);
1297 if (!thr_act->active) {
1298 act_unlock(thr_act);
1299 return KERN_FAILURE;
1300 }
1301
1302 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1303 if (exception_mask & (1 << i)) {
1304 old_port[i] = thr_act->exc_actions[i].port;
1305 thr_act->exc_actions[i].port =
1306 ipc_port_copy_send(new_port);
1307 thr_act->exc_actions[i].behavior = new_behavior;
1308 thr_act->exc_actions[i].flavor = new_flavor;
1309 } else
1310 old_port[i] = IP_NULL;
1311 }/* for */
1312 /*
1313 * Consume send rights without any lock held.
1314 */
1315 act_unlock(thr_act);
1316 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1317 if (IP_VALID(old_port[i]))
1318 ipc_port_release_send(old_port[i]);
1319 if (IP_VALID(new_port)) /* consume send right */
1320 ipc_port_release_send(new_port);
1321
1322 return KERN_SUCCESS;
1323}/* thread_set_exception_port */
1324
1325kern_return_t
1326task_set_exception_ports(
1327 task_t task,
1328 exception_mask_t exception_mask,
1329 ipc_port_t new_port,
1330 exception_behavior_t new_behavior,
1331 thread_state_flavor_t new_flavor)
1332{
1333 register int i;
1334 ipc_port_t old_port[EXC_TYPES_COUNT];
1335
1336 if (task == TASK_NULL) {
1337 return KERN_INVALID_ARGUMENT;
1338 }
1339
1340 if (exception_mask & ~EXC_MASK_ALL) {
1341 return KERN_INVALID_ARGUMENT;
1342 }
1343
1344 if (IP_VALID(new_port)) {
1345 switch (new_behavior) {
1346 case EXCEPTION_DEFAULT:
1347 case EXCEPTION_STATE:
1348 case EXCEPTION_STATE_IDENTITY:
1349 break;
1350 default:
1351 return KERN_INVALID_ARGUMENT;
1352 }
1353 }
1354 /* Cannot easily check "new_flavor", but that just means that
1355 * the flavor in the generated exception message might be garbage:
1356 * GIGO */
1357
1358 itk_lock(task);
1359 if (task->itk_self == IP_NULL) {
1360 itk_unlock(task);
1361 return KERN_FAILURE;
1362 }
1363
1364 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1365 if (exception_mask & (1 << i)) {
1366 old_port[i] = task->exc_actions[i].port;
1367 task->exc_actions[i].port =
1368 ipc_port_copy_send(new_port);
1369 task->exc_actions[i].behavior = new_behavior;
1370 task->exc_actions[i].flavor = new_flavor;
1371 } else
1372 old_port[i] = IP_NULL;
1373 }/* for */
1374
1375 /*
1376 * Consume send rights without any lock held.
1377 */
1378 itk_unlock(task);
1379 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1380 if (IP_VALID(old_port[i]))
1381 ipc_port_release_send(old_port[i]);
1382 if (IP_VALID(new_port)) /* consume send right */
1383 ipc_port_release_send(new_port);
1384
1385 return KERN_SUCCESS;
1386}/* task_set_exception_port */
1387
1388/*
1389 * Routine: thread/task_swap_exception_ports [kernel call]
1390 * Purpose:
1391 * Sets the thread/task exception port, flavor and
1392 * behavior for the exception types specified by the
1393 * mask.
1394 *
1395 * The old ports, behavior and flavors are returned
1396 * Count specifies the array sizes on input and
1397 * the number of returned ports etc. on output. The
1398 * arrays must be large enough to hold all the returned
1399 * data, MIG returnes an error otherwise. The masks
1400 * array specifies the corresponding exception type(s).
1401 *
1402 * Conditions:
1403 * Nothing locked. If successful, consumes
1404 * the supplied send right.
1405 *
1406 * Returns upto [in} CountCnt elements.
1407 * Returns:
1408 * KERN_SUCCESS Changed the special port.
1409 * KERN_INVALID_ARGUMENT The thread is null,
1410 * Illegal mask bit set.
1411 * Illegal exception behavior
1412 * KERN_FAILURE The thread is dead.
1413 */
1414
1415kern_return_t
1416thread_swap_exception_ports(
1417 thread_act_t thr_act,
1418 exception_mask_t exception_mask,
1419 ipc_port_t new_port,
1420 exception_behavior_t new_behavior,
1421 thread_state_flavor_t new_flavor,
1422 exception_mask_array_t masks,
1423 mach_msg_type_number_t * CountCnt,
1424 exception_port_array_t ports,
1425 exception_behavior_array_t behaviors,
1426 thread_state_flavor_array_t flavors )
1427{
1428 register int i,
1429 j,
1430 count;
1431 ipc_port_t old_port[EXC_TYPES_COUNT];
1432
1433 if (!thr_act)
1434 return KERN_INVALID_ARGUMENT;
1435
1436 if (exception_mask & ~EXC_MASK_ALL) {
1437 return KERN_INVALID_ARGUMENT;
1438 }
1439
1440 if (IP_VALID(new_port)) {
1441 switch (new_behavior) {
1442 case EXCEPTION_DEFAULT:
1443 case EXCEPTION_STATE:
1444 case EXCEPTION_STATE_IDENTITY:
1445 break;
1446 default:
1447 return KERN_INVALID_ARGUMENT;
1448 }
1449 }
1450 /* Cannot easily check "new_flavor", but that just means that
1451 * the flavor in the generated exception message might be garbage:
1452 * GIGO */
1453
1454 act_lock(thr_act);
1455 if (!thr_act->active) {
1456 act_unlock(thr_act);
1457 return KERN_FAILURE;
1458 }
1459
1460 count = 0;
1461
1462 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1463 if (exception_mask & (1 << i)) {
1464 for (j = 0; j < count; j++) {
1465/*
1466 * search for an identical entry, if found
1467 * set corresponding mask for this exception.
1468 */
1469 if (thr_act->exc_actions[i].port == ports[j] &&
1470 thr_act->exc_actions[i].behavior ==behaviors[j]
1471 && thr_act->exc_actions[i].flavor ==flavors[j])
1472 {
1473 masks[j] |= (1 << i);
1474 break;
1475 }
1476 }/* for */
1477 if (j == count) {
1478 masks[j] = (1 << i);
1479 ports[j] =
1480 ipc_port_copy_send(thr_act->exc_actions[i].port);
1481
1482 behaviors[j] = thr_act->exc_actions[i].behavior;
1483 flavors[j] = thr_act->exc_actions[i].flavor;
1484 count++;
1485 }
1486
1487 old_port[i] = thr_act->exc_actions[i].port;
1488 thr_act->exc_actions[i].port =
1489 ipc_port_copy_send(new_port);
1490 thr_act->exc_actions[i].behavior = new_behavior;
1491 thr_act->exc_actions[i].flavor = new_flavor;
1492 if (count > *CountCnt) {
1493 break;
1494 }
1495 } else
1496 old_port[i] = IP_NULL;
1497 }/* for */
1498
1499 /*
1500 * Consume send rights without any lock held.
1501 */
1502 act_unlock(thr_act);
1503 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1504 if (IP_VALID(old_port[i]))
1505 ipc_port_release_send(old_port[i]);
1506 if (IP_VALID(new_port)) /* consume send right */
1507 ipc_port_release_send(new_port);
1508 *CountCnt = count;
1509 return KERN_SUCCESS;
1510}/* thread_swap_exception_ports */
1511
1512kern_return_t
1513task_swap_exception_ports(
1514 task_t task,
1515 exception_mask_t exception_mask,
1516 ipc_port_t new_port,
1517 exception_behavior_t new_behavior,
1518 thread_state_flavor_t new_flavor,
1519 exception_mask_array_t masks,
1520 mach_msg_type_number_t * CountCnt,
1521 exception_port_array_t ports,
1522 exception_behavior_array_t behaviors,
1523 thread_state_flavor_array_t flavors )
1524{
1525 register int i,
1526 j,
1527 count;
1528 ipc_port_t old_port[EXC_TYPES_COUNT];
1529
1530 if (task == TASK_NULL)
1531 return KERN_INVALID_ARGUMENT;
1532
1533 if (exception_mask & ~EXC_MASK_ALL) {
1534 return KERN_INVALID_ARGUMENT;
1535 }
1536
1537 if (IP_VALID(new_port)) {
1538 switch (new_behavior) {
1539 case EXCEPTION_DEFAULT:
1540 case EXCEPTION_STATE:
1541 case EXCEPTION_STATE_IDENTITY:
1542 break;
1543 default:
1544 return KERN_INVALID_ARGUMENT;
1545 }
1546 }
1547 /* Cannot easily check "new_flavor", but that just means that
1548 * the flavor in the generated exception message might be garbage:
1549 * GIGO */
1550
1551 itk_lock(task);
1552 if (task->itk_self == IP_NULL) {
1553 itk_unlock(task);
1554 return KERN_FAILURE;
1555 }
1556
1557 count = 0;
1558
1559 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1560 if (exception_mask & (1 << i)) {
1561 for (j = 0; j < count; j++) {
1562/*
1563 * search for an identical entry, if found
1564 * set corresponding mask for this exception.
1565 */
1566 if (task->exc_actions[i].port == ports[j] &&
1567 task->exc_actions[i].behavior == behaviors[j]
1568 && task->exc_actions[i].flavor == flavors[j])
1569 {
1570 masks[j] |= (1 << i);
1571 break;
1572 }
1573 }/* for */
1574 if (j == count) {
1575 masks[j] = (1 << i);
1576 ports[j] =
1577 ipc_port_copy_send(task->exc_actions[i].port);
1578 behaviors[j] = task->exc_actions[i].behavior;
1579 flavors[j] = task->exc_actions[i].flavor;
1580 count++;
1581 }
1582 old_port[i] = task->exc_actions[i].port;
1583 task->exc_actions[i].port =
1584 ipc_port_copy_send(new_port);
1585 task->exc_actions[i].behavior = new_behavior;
1586 task->exc_actions[i].flavor = new_flavor;
1587 if (count > *CountCnt) {
1588 break;
1589 }
1590 } else
1591 old_port[i] = IP_NULL;
1592 }/* for */
1593
1594
1595 /*
1596 * Consume send rights without any lock held.
1597 */
1598 itk_unlock(task);
1599 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1600 if (IP_VALID(old_port[i]))
1601 ipc_port_release_send(old_port[i]);
1602 if (IP_VALID(new_port)) /* consume send right */
1603 ipc_port_release_send(new_port);
1604 *CountCnt = count;
1605
1606 return KERN_SUCCESS;
1607}/* task_swap_exception_ports */
1608
1609/*
1610 * Routine: thread/task_get_exception_ports [kernel call]
1611 * Purpose:
1612 * Clones a send right for each of the thread/task's exception
1613 * ports specified in the mask and returns the behaviour
1614 * and flavor of said port.
1615 *
1616 * Returns upto [in} CountCnt elements.
1617 *
1618 * Conditions:
1619 * Nothing locked.
1620 * Returns:
1621 * KERN_SUCCESS Extracted a send right.
1622 * KERN_INVALID_ARGUMENT The thread is null,
1623 * Invalid special port,
1624 * Illegal mask bit set.
1625 * KERN_FAILURE The thread is dead.
1626 */
1627
1628kern_return_t
1629thread_get_exception_ports(
1630 thread_act_t thr_act,
1631 exception_mask_t exception_mask,
1632 exception_mask_array_t masks,
1633 mach_msg_type_number_t * CountCnt,
1634 exception_port_array_t ports,
1635 exception_behavior_array_t behaviors,
1636 thread_state_flavor_array_t flavors )
1637{
1638 register int i,
1639 j,
1640 count;
1641
1642 if (!thr_act)
1643 return KERN_INVALID_ARGUMENT;
1644
1645 if (exception_mask & ~EXC_MASK_ALL) {
1646 return KERN_INVALID_ARGUMENT;
1647 }
1648
1649 act_lock(thr_act);
1650 if (!thr_act->active) {
1651 act_unlock(thr_act);
1652 return KERN_FAILURE;
1653 }
1654
1655 count = 0;
1656
1657 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1658 if (exception_mask & (1 << i)) {
1659 for (j = 0; j < count; j++) {
1660/*
1661 * search for an identical entry, if found
1662 * set corresponding mask for this exception.
1663 */
1664 if (thr_act->exc_actions[i].port == ports[j] &&
1665 thr_act->exc_actions[i].behavior ==behaviors[j]
1666 && thr_act->exc_actions[i].flavor == flavors[j])
1667 {
1668 masks[j] |= (1 << i);
1669 break;
1670 }
1671 }/* for */
1672 if (j == count) {
1673 masks[j] = (1 << i);
1674 ports[j] =
1675 ipc_port_copy_send(thr_act->exc_actions[i].port);
1676 behaviors[j] = thr_act->exc_actions[i].behavior;
1677 flavors[j] = thr_act->exc_actions[i].flavor;
1678 count++;
1679 if (count >= *CountCnt) {
1680 break;
1681 }
1682 }
1683 }
1684 }/* for */
1685
1686 act_unlock(thr_act);
1687
1688 *CountCnt = count;
1689 return KERN_SUCCESS;
1690}/* thread_get_exception_ports */
1691
1692kern_return_t
1693task_get_exception_ports(
1694 task_t task,
1695 exception_mask_t exception_mask,
1696 exception_mask_array_t masks,
1697 mach_msg_type_number_t * CountCnt,
1698 exception_port_array_t ports,
1699 exception_behavior_array_t behaviors,
1700 thread_state_flavor_array_t flavors )
1701{
1702 register int i,
1703 j,
1704 count;
1705
1706 if (task == TASK_NULL)
1707 return KERN_INVALID_ARGUMENT;
1708
1709 if (exception_mask & ~EXC_MASK_ALL) {
1710 return KERN_INVALID_ARGUMENT;
1711 }
1712
1713 itk_lock(task);
1714 if (task->itk_self == IP_NULL) {
1715 itk_unlock(task);
1716 return KERN_FAILURE;
1717 }
1718
1719 count = 0;
1720
1721 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1722 if (exception_mask & (1 << i)) {
1723 for (j = 0; j < count; j++) {
1724/*
1725 * search for an identical entry, if found
1726 * set corresponding mask for this exception.
1727 */
1728 if (task->exc_actions[i].port == ports[j] &&
1729 task->exc_actions[i].behavior == behaviors[j]
1730 && task->exc_actions[i].flavor == flavors[j])
1731 {
1732 masks[j] |= (1 << i);
1733 break;
1734 }
1735 }/* for */
1736 if (j == count) {
1737 masks[j] = (1 << i);
1738 ports[j] =
1739 ipc_port_copy_send(task->exc_actions[i].port);
1740 behaviors[j] = task->exc_actions[i].behavior;
1741 flavors[j] = task->exc_actions[i].flavor;
1742 count++;
1743 if (count > *CountCnt) {
1744 break;
1745 }
1746 }
1747 }
1748 }/* for */
1749
1750 itk_unlock(task);
1751
1752 *CountCnt = count;
1753 return KERN_SUCCESS;
1754}/* task_get_exception_ports */