]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/ipc_tt.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * @OSF_COPYRIGHT@
25 */
26/*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51/*
52 */
53
54/*
55 * File: ipc_tt.c
56 * Purpose:
57 * Task and thread related IPC functions.
58 */
59
60#include <mach/mach_types.h>
61#include <mach/boolean.h>
62#include <mach/kern_return.h>
63#include <mach/mach_param.h>
64#include <mach/task_special_ports.h>
65#include <mach/thread_special_ports.h>
66#include <mach/thread_status.h>
67#include <mach/exception_types.h>
68#include <mach/memory_object_types.h>
69#include <mach/mach_traps.h>
70#include <mach/task_server.h>
71#include <mach/thread_act_server.h>
72#include <mach/mach_host_server.h>
73#include <mach/host_priv_server.h>
74#include <mach/vm_map_server.h>
75
76#include <kern/kern_types.h>
77#include <kern/host.h>
78#include <kern/ipc_kobject.h>
79#include <kern/ipc_tt.h>
80#include <kern/kalloc.h>
81#include <kern/thread.h>
82#include <kern/misc_protos.h>
83
84#include <vm/vm_map.h>
85#include <vm/vm_pageout.h>
86#include <vm/vm_shared_memory_server.h>
87#include <vm/vm_protos.h>
88
89/* forward declarations */
90task_t convert_port_to_locked_task(ipc_port_t port);
91
92
93/*
94 * Routine: ipc_task_init
95 * Purpose:
96 * Initialize a task's IPC state.
97 *
98 * If non-null, some state will be inherited from the parent.
99 * The parent must be appropriately initialized.
100 * Conditions:
101 * Nothing locked.
102 */
103
104void
105ipc_task_init(
106 task_t task,
107 task_t parent)
108{
109 ipc_space_t space;
110 ipc_port_t kport;
111 kern_return_t kr;
112 int i;
113
114
115 kr = ipc_space_create(&ipc_table_entries[0], &space);
116 if (kr != KERN_SUCCESS)
117 panic("ipc_task_init");
118
119
120 kport = ipc_port_alloc_kernel();
121 if (kport == IP_NULL)
122 panic("ipc_task_init");
123
124 itk_lock_init(task);
125 task->itk_self = kport;
126 task->itk_sself = ipc_port_make_send(kport);
127 task->itk_space = space;
128 space->is_fast = FALSE;
129
130 if (parent == TASK_NULL) {
131 ipc_port_t port;
132
133 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
134 task->exc_actions[i].port = IP_NULL;
135 }/* for */
136
137 kr = host_get_host_port(host_priv_self(), &port);
138 assert(kr == KERN_SUCCESS);
139 task->itk_host = port;
140
141 task->itk_bootstrap = IP_NULL;
142
143 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
144 task->itk_registered[i] = IP_NULL;
145 } else {
146 itk_lock(parent);
147 assert(parent->itk_self != IP_NULL);
148
149 /* inherit registered ports */
150
151 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
152 task->itk_registered[i] =
153 ipc_port_copy_send(parent->itk_registered[i]);
154
155 /* inherit exception and bootstrap ports */
156
157 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
158 task->exc_actions[i].port =
159 ipc_port_copy_send(parent->exc_actions[i].port);
160 task->exc_actions[i].flavor =
161 parent->exc_actions[i].flavor;
162 task->exc_actions[i].behavior =
163 parent->exc_actions[i].behavior;
164 }/* for */
165 task->itk_host =
166 ipc_port_copy_send(parent->itk_host);
167
168 task->itk_bootstrap =
169 ipc_port_copy_send(parent->itk_bootstrap);
170
171 itk_unlock(parent);
172 }
173}
174
175/*
176 * Routine: ipc_task_enable
177 * Purpose:
178 * Enable a task for IPC access.
179 * Conditions:
180 * Nothing locked.
181 */
182
183void
184ipc_task_enable(
185 task_t task)
186{
187 ipc_port_t kport;
188
189 itk_lock(task);
190 kport = task->itk_self;
191 if (kport != IP_NULL)
192 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
193 itk_unlock(task);
194}
195
196/*
197 * Routine: ipc_task_disable
198 * Purpose:
199 * Disable IPC access to a task.
200 * Conditions:
201 * Nothing locked.
202 */
203
204void
205ipc_task_disable(
206 task_t task)
207{
208 ipc_port_t kport;
209
210 itk_lock(task);
211 kport = task->itk_self;
212 if (kport != IP_NULL)
213 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
214 itk_unlock(task);
215}
216
217/*
218 * Routine: ipc_task_terminate
219 * Purpose:
220 * Clean up and destroy a task's IPC state.
221 * Conditions:
222 * Nothing locked. The task must be suspended.
223 * (Or the current thread must be in the task.)
224 */
225
226void
227ipc_task_terminate(
228 task_t task)
229{
230 ipc_port_t kport;
231 int i;
232
233 itk_lock(task);
234 kport = task->itk_self;
235
236 if (kport == IP_NULL) {
237 /* the task is already terminated (can this happen?) */
238 itk_unlock(task);
239 return;
240 }
241
242 task->itk_self = IP_NULL;
243 itk_unlock(task);
244
245 /* release the naked send rights */
246
247 if (IP_VALID(task->itk_sself))
248 ipc_port_release_send(task->itk_sself);
249
250 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
251 if (IP_VALID(task->exc_actions[i].port)) {
252 ipc_port_release_send(task->exc_actions[i].port);
253 }
254 }
255
256 if (IP_VALID(task->itk_host))
257 ipc_port_release_send(task->itk_host);
258
259 if (IP_VALID(task->itk_bootstrap))
260 ipc_port_release_send(task->itk_bootstrap);
261
262 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
263 if (IP_VALID(task->itk_registered[i]))
264 ipc_port_release_send(task->itk_registered[i]);
265
266 ipc_port_release_send(task->wired_ledger_port);
267 ipc_port_release_send(task->paged_ledger_port);
268
269 /* destroy the kernel port */
270 ipc_port_dealloc_kernel(kport);
271}
272
273/*
274 * Routine: ipc_task_reset
275 * Purpose:
276 * Reset a task's IPC state to protect it when
277 * it enters an elevated security context.
278 * Conditions:
279 * Nothing locked. The task must be suspended.
280 * (Or the current thread must be in the task.)
281 */
282
283void
284ipc_task_reset(
285 task_t task)
286{
287 ipc_port_t old_kport, new_kport;
288 ipc_port_t old_sself;
289#if 0
290 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
291 int i;
292#endif
293
294 new_kport = ipc_port_alloc_kernel();
295 if (new_kport == IP_NULL)
296 panic("ipc_task_reset");
297
298 itk_lock(task);
299
300 old_kport = task->itk_self;
301
302 if (old_kport == IP_NULL) {
303 /* the task is already terminated (can this happen?) */
304 itk_unlock(task);
305 ipc_port_dealloc_kernel(new_kport);
306 return;
307 }
308
309 task->itk_self = new_kport;
310 old_sself = task->itk_sself;
311 task->itk_sself = ipc_port_make_send(new_kport);
312 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
313 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
314
315#if 0
316 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
317 old_exc_actions[i] = task->exc_action[i].port;
318 task->exc_actions[i].port = IP_NULL;
319 }/* for */
320#endif
321
322 itk_unlock(task);
323
324 /* release the naked send rights */
325
326 if (IP_VALID(old_sself))
327 ipc_port_release_send(old_sself);
328
329#if 0
330 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
331 if (IP_VALID(old_exc_actions[i])) {
332 ipc_port_release_send(old_exc_actions[i]);
333 }
334 }/* for */
335#endif
336
337 /* destroy the kernel port */
338 ipc_port_dealloc_kernel(old_kport);
339}
340
341/*
342 * Routine: ipc_thread_init
343 * Purpose:
344 * Initialize a thread's IPC state.
345 * Conditions:
346 * Nothing locked.
347 */
348
349void
350ipc_thread_init(
351 thread_t thread)
352{
353 ipc_port_t kport;
354 int i;
355
356 kport = ipc_port_alloc_kernel();
357 if (kport == IP_NULL)
358 panic("ipc_thread_init");
359
360 thread->ith_self = kport;
361 thread->ith_sself = ipc_port_make_send(kport);
362
363 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
364 thread->exc_actions[i].port = IP_NULL;
365
366 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
367
368 ipc_kmsg_queue_init(&thread->ith_messages);
369
370 thread->ith_rpc_reply = IP_NULL;
371}
372
373void
374ipc_thread_disable(
375 thread_t thread)
376{
377 ipc_port_t kport = thread->ith_self;
378
379 if (kport != IP_NULL)
380 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
381}
382
383/*
384 * Routine: ipc_thread_terminate
385 * Purpose:
386 * Clean up and destroy a thread's IPC state.
387 * Conditions:
388 * Nothing locked.
389 */
390
391void
392ipc_thread_terminate(
393 thread_t thread)
394{
395 ipc_port_t kport = thread->ith_self;
396
397 if (kport != IP_NULL) {
398 int i;
399
400 if (IP_VALID(thread->ith_sself))
401 ipc_port_release_send(thread->ith_sself);
402
403 thread->ith_sself = thread->ith_self = IP_NULL;
404
405 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
406 if (IP_VALID(thread->exc_actions[i].port))
407 ipc_port_release_send(thread->exc_actions[i].port);
408 }
409
410 ipc_port_dealloc_kernel(kport);
411 }
412
413 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
414
415 if (thread->ith_rpc_reply != IP_NULL)
416 ipc_port_dealloc_reply(thread->ith_rpc_reply);
417
418 thread->ith_rpc_reply = IP_NULL;
419}
420
421/*
422 * Routine: retrieve_task_self_fast
423 * Purpose:
424 * Optimized version of retrieve_task_self,
425 * that only works for the current task.
426 *
427 * Return a send right (possibly null/dead)
428 * for the task's user-visible self port.
429 * Conditions:
430 * Nothing locked.
431 */
432
433ipc_port_t
434retrieve_task_self_fast(
435 register task_t task)
436{
437 register ipc_port_t port;
438
439 assert(task == current_task());
440
441 itk_lock(task);
442 assert(task->itk_self != IP_NULL);
443
444 if ((port = task->itk_sself) == task->itk_self) {
445 /* no interposing */
446
447 ip_lock(port);
448 assert(ip_active(port));
449 ip_reference(port);
450 port->ip_srights++;
451 ip_unlock(port);
452 } else
453 port = ipc_port_copy_send(port);
454 itk_unlock(task);
455
456 return port;
457}
458
459/*
460 * Routine: retrieve_thread_self_fast
461 * Purpose:
462 * Return a send right (possibly null/dead)
463 * for the thread's user-visible self port.
464 *
465 * Only works for the current thread.
466 *
467 * Conditions:
468 * Nothing locked.
469 */
470
471ipc_port_t
472retrieve_thread_self_fast(
473 thread_t thread)
474{
475 register ipc_port_t port;
476
477 assert(thread == current_thread());
478
479 thread_mtx_lock(thread);
480
481 assert(thread->ith_self != IP_NULL);
482
483 if ((port = thread->ith_sself) == thread->ith_self) {
484 /* no interposing */
485
486 ip_lock(port);
487 assert(ip_active(port));
488 ip_reference(port);
489 port->ip_srights++;
490 ip_unlock(port);
491 }
492 else
493 port = ipc_port_copy_send(port);
494
495 thread_mtx_unlock(thread);
496
497 return port;
498}
499
500/*
501 * Routine: task_self_trap [mach trap]
502 * Purpose:
503 * Give the caller send rights for his own task port.
504 * Conditions:
505 * Nothing locked.
506 * Returns:
507 * MACH_PORT_NULL if there are any resource failures
508 * or other errors.
509 */
510
511mach_port_name_t
512task_self_trap(
513 __unused struct task_self_trap_args *args)
514{
515 task_t task = current_task();
516 ipc_port_t sright;
517 mach_port_name_t name;
518
519 sright = retrieve_task_self_fast(task);
520 name = ipc_port_copyout_send(sright, task->itk_space);
521 return name;
522}
523
524/*
525 * Routine: thread_self_trap [mach trap]
526 * Purpose:
527 * Give the caller send rights for his own thread port.
528 * Conditions:
529 * Nothing locked.
530 * Returns:
531 * MACH_PORT_NULL if there are any resource failures
532 * or other errors.
533 */
534
535mach_port_name_t
536thread_self_trap(
537 __unused struct thread_self_trap_args *args)
538{
539 thread_t thread = current_thread();
540 task_t task = thread->task;
541 ipc_port_t sright;
542 mach_port_name_t name;
543
544 sright = retrieve_thread_self_fast(thread);
545 name = ipc_port_copyout_send(sright, task->itk_space);
546 return name;
547
548}
549
550/*
551 * Routine: mach_reply_port [mach trap]
552 * Purpose:
553 * Allocate a port for the caller.
554 * Conditions:
555 * Nothing locked.
556 * Returns:
557 * MACH_PORT_NULL if there are any resource failures
558 * or other errors.
559 */
560
561mach_port_name_t
562mach_reply_port(
563 __unused struct mach_reply_port_args *args)
564{
565 ipc_port_t port;
566 mach_port_name_t name;
567 kern_return_t kr;
568
569 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
570 if (kr == KERN_SUCCESS)
571 ip_unlock(port);
572 else
573 name = MACH_PORT_NULL;
574 return name;
575}
576
577/*
578 * Routine: thread_get_special_port [kernel call]
579 * Purpose:
580 * Clones a send right for one of the thread's
581 * special ports.
582 * Conditions:
583 * Nothing locked.
584 * Returns:
585 * KERN_SUCCESS Extracted a send right.
586 * KERN_INVALID_ARGUMENT The thread is null.
587 * KERN_FAILURE The thread is dead.
588 * KERN_INVALID_ARGUMENT Invalid special port.
589 */
590
591kern_return_t
592thread_get_special_port(
593 thread_t thread,
594 int which,
595 ipc_port_t *portp)
596{
597 kern_return_t result = KERN_SUCCESS;
598 ipc_port_t *whichp;
599
600 if (thread == THREAD_NULL)
601 return (KERN_INVALID_ARGUMENT);
602
603 switch (which) {
604
605 case THREAD_KERNEL_PORT:
606 whichp = &thread->ith_sself;
607 break;
608
609 default:
610 return (KERN_INVALID_ARGUMENT);
611 }
612
613 thread_mtx_lock(thread);
614
615 if (thread->active)
616 *portp = ipc_port_copy_send(*whichp);
617 else
618 result = KERN_FAILURE;
619
620 thread_mtx_unlock(thread);
621
622 return (result);
623}
624
625/*
626 * Routine: thread_set_special_port [kernel call]
627 * Purpose:
628 * Changes one of the thread's special ports,
629 * setting it to the supplied send right.
630 * Conditions:
631 * Nothing locked. If successful, consumes
632 * the supplied send right.
633 * Returns:
634 * KERN_SUCCESS Changed the special port.
635 * KERN_INVALID_ARGUMENT The thread is null.
636 * KERN_FAILURE The thread is dead.
637 * KERN_INVALID_ARGUMENT Invalid special port.
638 */
639
640kern_return_t
641thread_set_special_port(
642 thread_t thread,
643 int which,
644 ipc_port_t port)
645{
646 kern_return_t result = KERN_SUCCESS;
647 ipc_port_t *whichp, old = IP_NULL;
648
649 if (thread == THREAD_NULL)
650 return (KERN_INVALID_ARGUMENT);
651
652 switch (which) {
653
654 case THREAD_KERNEL_PORT:
655 whichp = &thread->ith_sself;
656 break;
657
658 default:
659 return (KERN_INVALID_ARGUMENT);
660 }
661
662 thread_mtx_lock(thread);
663
664 if (thread->active) {
665 old = *whichp;
666 *whichp = port;
667 }
668 else
669 result = KERN_FAILURE;
670
671 thread_mtx_unlock(thread);
672
673 if (IP_VALID(old))
674 ipc_port_release_send(old);
675
676 return (result);
677}
678
679/*
680 * Routine: task_get_special_port [kernel call]
681 * Purpose:
682 * Clones a send right for one of the task's
683 * special ports.
684 * Conditions:
685 * Nothing locked.
686 * Returns:
687 * KERN_SUCCESS Extracted a send right.
688 * KERN_INVALID_ARGUMENT The task is null.
689 * KERN_FAILURE The task/space is dead.
690 * KERN_INVALID_ARGUMENT Invalid special port.
691 */
692
693kern_return_t
694task_get_special_port(
695 task_t task,
696 int which,
697 ipc_port_t *portp)
698{
699 ipc_port_t *whichp;
700 ipc_port_t port;
701
702 if (task == TASK_NULL)
703 return KERN_INVALID_ARGUMENT;
704
705 switch (which) {
706 case TASK_KERNEL_PORT:
707 whichp = &task->itk_sself;
708 break;
709
710 case TASK_HOST_PORT:
711 whichp = &task->itk_host;
712 break;
713
714 case TASK_BOOTSTRAP_PORT:
715 whichp = &task->itk_bootstrap;
716 break;
717
718 case TASK_WIRED_LEDGER_PORT:
719 whichp = &task->wired_ledger_port;
720 break;
721
722 case TASK_PAGED_LEDGER_PORT:
723 whichp = &task->paged_ledger_port;
724 break;
725
726 default:
727 return KERN_INVALID_ARGUMENT;
728 }
729
730 itk_lock(task);
731 if (task->itk_self == IP_NULL) {
732 itk_unlock(task);
733 return KERN_FAILURE;
734 }
735
736 port = ipc_port_copy_send(*whichp);
737 itk_unlock(task);
738
739 *portp = port;
740 return KERN_SUCCESS;
741}
742
743/*
744 * Routine: task_set_special_port [kernel call]
745 * Purpose:
746 * Changes one of the task's special ports,
747 * setting it to the supplied send right.
748 * Conditions:
749 * Nothing locked. If successful, consumes
750 * the supplied send right.
751 * Returns:
752 * KERN_SUCCESS Changed the special port.
753 * KERN_INVALID_ARGUMENT The task is null.
754 * KERN_FAILURE The task/space is dead.
755 * KERN_INVALID_ARGUMENT Invalid special port.
756 */
757
758kern_return_t
759task_set_special_port(
760 task_t task,
761 int which,
762 ipc_port_t port)
763{
764 ipc_port_t *whichp;
765 ipc_port_t old;
766
767 if (task == TASK_NULL)
768 return KERN_INVALID_ARGUMENT;
769
770 switch (which) {
771 case TASK_KERNEL_PORT:
772 whichp = &task->itk_sself;
773 break;
774
775 case TASK_HOST_PORT:
776 whichp = &task->itk_host;
777 break;
778
779 case TASK_BOOTSTRAP_PORT:
780 whichp = &task->itk_bootstrap;
781 break;
782
783 case TASK_WIRED_LEDGER_PORT:
784 whichp = &task->wired_ledger_port;
785 break;
786
787 case TASK_PAGED_LEDGER_PORT:
788 whichp = &task->paged_ledger_port;
789 break;
790
791 default:
792 return KERN_INVALID_ARGUMENT;
793 }/* switch */
794
795 itk_lock(task);
796 if (task->itk_self == IP_NULL) {
797 itk_unlock(task);
798 return KERN_FAILURE;
799 }
800
801 old = *whichp;
802 *whichp = port;
803 itk_unlock(task);
804
805 if (IP_VALID(old))
806 ipc_port_release_send(old);
807 return KERN_SUCCESS;
808}
809
810
811/*
812 * Routine: mach_ports_register [kernel call]
813 * Purpose:
814 * Stash a handful of port send rights in the task.
815 * Child tasks will inherit these rights, but they
816 * must use mach_ports_lookup to acquire them.
817 *
818 * The rights are supplied in a (wired) kalloc'd segment.
819 * Rights which aren't supplied are assumed to be null.
820 * Conditions:
821 * Nothing locked. If successful, consumes
822 * the supplied rights and memory.
823 * Returns:
824 * KERN_SUCCESS Stashed the port rights.
825 * KERN_INVALID_ARGUMENT The task is null.
826 * KERN_INVALID_ARGUMENT The task is dead.
827 * KERN_INVALID_ARGUMENT Too many port rights supplied.
828 */
829
830kern_return_t
831mach_ports_register(
832 task_t task,
833 mach_port_array_t memory,
834 mach_msg_type_number_t portsCnt)
835{
836 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
837 unsigned int i;
838
839 if ((task == TASK_NULL) ||
840 (portsCnt > TASK_PORT_REGISTER_MAX))
841 return KERN_INVALID_ARGUMENT;
842
843 /*
844 * Pad the port rights with nulls.
845 */
846
847 for (i = 0; i < portsCnt; i++)
848 ports[i] = memory[i];
849 for (; i < TASK_PORT_REGISTER_MAX; i++)
850 ports[i] = IP_NULL;
851
852 itk_lock(task);
853 if (task->itk_self == IP_NULL) {
854 itk_unlock(task);
855 return KERN_INVALID_ARGUMENT;
856 }
857
858 /*
859 * Replace the old send rights with the new.
860 * Release the old rights after unlocking.
861 */
862
863 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
864 ipc_port_t old;
865
866 old = task->itk_registered[i];
867 task->itk_registered[i] = ports[i];
868 ports[i] = old;
869 }
870
871 itk_unlock(task);
872
873 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
874 if (IP_VALID(ports[i]))
875 ipc_port_release_send(ports[i]);
876
877 /*
878 * Now that the operation is known to be successful,
879 * we can free the memory.
880 */
881
882 if (portsCnt != 0)
883 kfree(memory,
884 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
885
886 return KERN_SUCCESS;
887}
888
889/*
890 * Routine: mach_ports_lookup [kernel call]
891 * Purpose:
892 * Retrieves (clones) the stashed port send rights.
893 * Conditions:
894 * Nothing locked. If successful, the caller gets
895 * rights and memory.
896 * Returns:
897 * KERN_SUCCESS Retrieved the send rights.
898 * KERN_INVALID_ARGUMENT The task is null.
899 * KERN_INVALID_ARGUMENT The task is dead.
900 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
901 */
902
903kern_return_t
904mach_ports_lookup(
905 task_t task,
906 mach_port_array_t *portsp,
907 mach_msg_type_number_t *portsCnt)
908{
909 void *memory;
910 vm_size_t size;
911 ipc_port_t *ports;
912 int i;
913
914 if (task == TASK_NULL)
915 return KERN_INVALID_ARGUMENT;
916
917 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
918
919 memory = kalloc(size);
920 if (memory == 0)
921 return KERN_RESOURCE_SHORTAGE;
922
923 itk_lock(task);
924 if (task->itk_self == IP_NULL) {
925 itk_unlock(task);
926
927 kfree(memory, size);
928 return KERN_INVALID_ARGUMENT;
929 }
930
931 ports = (ipc_port_t *) memory;
932
933 /*
934 * Clone port rights. Because kalloc'd memory
935 * is wired, we won't fault while holding the task lock.
936 */
937
938 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
939 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
940
941 itk_unlock(task);
942
943 *portsp = (mach_port_array_t) ports;
944 *portsCnt = TASK_PORT_REGISTER_MAX;
945 return KERN_SUCCESS;
946}
947
948/*
949 * Routine: convert_port_to_locked_task
950 * Purpose:
951 * Internal helper routine to convert from a port to a locked
952 * task. Used by several routines that try to convert from a
953 * task port to a reference on some task related object.
954 * Conditions:
955 * Nothing locked, blocking OK.
956 */
957task_t
958convert_port_to_locked_task(ipc_port_t port)
959{
960 while (IP_VALID(port)) {
961 task_t task;
962
963 ip_lock(port);
964 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
965 ip_unlock(port);
966 return TASK_NULL;
967 }
968 task = (task_t) port->ip_kobject;
969 assert(task != TASK_NULL);
970
971 /*
972 * Normal lock ordering puts task_lock() before ip_lock().
973 * Attempt out-of-order locking here.
974 */
975 if (task_lock_try(task)) {
976 ip_unlock(port);
977 return(task);
978 }
979
980 ip_unlock(port);
981 mutex_pause();
982 }
983 return TASK_NULL;
984}
985
986/*
987 * Routine: convert_port_to_task
988 * Purpose:
989 * Convert from a port to a task.
990 * Doesn't consume the port ref; produces a task ref,
991 * which may be null.
992 * Conditions:
993 * Nothing locked.
994 */
995task_t
996convert_port_to_task(
997 ipc_port_t port)
998{
999 task_t task = TASK_NULL;
1000
1001 if (IP_VALID(port)) {
1002 ip_lock(port);
1003
1004 if ( ip_active(port) &&
1005 ip_kotype(port) == IKOT_TASK ) {
1006 task = (task_t)port->ip_kobject;
1007 assert(task != TASK_NULL);
1008
1009 task_reference_internal(task);
1010 }
1011
1012 ip_unlock(port);
1013 }
1014
1015 return (task);
1016}
1017
1018/*
1019 * Routine: convert_port_to_space
1020 * Purpose:
1021 * Convert from a port to a space.
1022 * Doesn't consume the port ref; produces a space ref,
1023 * which may be null.
1024 * Conditions:
1025 * Nothing locked.
1026 */
1027ipc_space_t
1028convert_port_to_space(
1029 ipc_port_t port)
1030{
1031 ipc_space_t space;
1032 task_t task;
1033
1034 task = convert_port_to_locked_task(port);
1035
1036 if (task == TASK_NULL)
1037 return IPC_SPACE_NULL;
1038
1039 if (!task->active) {
1040 task_unlock(task);
1041 return IPC_SPACE_NULL;
1042 }
1043
1044 space = task->itk_space;
1045 is_reference(space);
1046 task_unlock(task);
1047 return (space);
1048}
1049
1050/*
1051 * Routine: convert_port_to_map
1052 * Purpose:
1053 * Convert from a port to a map.
1054 * Doesn't consume the port ref; produces a map ref,
1055 * which may be null.
1056 * Conditions:
1057 * Nothing locked.
1058 */
1059
1060vm_map_t
1061convert_port_to_map(
1062 ipc_port_t port)
1063{
1064 task_t task;
1065 vm_map_t map;
1066
1067 task = convert_port_to_locked_task(port);
1068
1069 if (task == TASK_NULL)
1070 return VM_MAP_NULL;
1071
1072 if (!task->active) {
1073 task_unlock(task);
1074 return VM_MAP_NULL;
1075 }
1076
1077 map = task->map;
1078 vm_map_reference_swap(map);
1079 task_unlock(task);
1080 return map;
1081}
1082
1083
1084/*
1085 * Routine: convert_port_to_thread
1086 * Purpose:
1087 * Convert from a port to a thread.
1088 * Doesn't consume the port ref; produces an thread ref,
1089 * which may be null.
1090 * Conditions:
1091 * Nothing locked.
1092 */
1093
1094thread_t
1095convert_port_to_thread(
1096 ipc_port_t port)
1097{
1098 thread_t thread = THREAD_NULL;
1099
1100 if (IP_VALID(port)) {
1101 ip_lock(port);
1102
1103 if ( ip_active(port) &&
1104 ip_kotype(port) == IKOT_THREAD ) {
1105 thread = (thread_t)port->ip_kobject;
1106 assert(thread != THREAD_NULL);
1107
1108 thread_reference_internal(thread);
1109 }
1110
1111 ip_unlock(port);
1112 }
1113
1114 return (thread);
1115}
1116
1117/*
1118 * Routine: port_name_to_thread
1119 * Purpose:
1120 * Convert from a port name to an thread reference
1121 * A name of MACH_PORT_NULL is valid for the null thread.
1122 * Conditions:
1123 * Nothing locked.
1124 */
1125thread_t
1126port_name_to_thread(
1127 mach_port_name_t name)
1128{
1129 thread_t thread = THREAD_NULL;
1130 ipc_port_t kport;
1131
1132 if (MACH_PORT_VALID(name)) {
1133 if (ipc_object_copyin(current_space(), name,
1134 MACH_MSG_TYPE_COPY_SEND,
1135 (ipc_object_t *)&kport) != KERN_SUCCESS)
1136 return (THREAD_NULL);
1137
1138 thread = convert_port_to_thread(kport);
1139
1140 if (IP_VALID(kport))
1141 ipc_port_release_send(kport);
1142 }
1143
1144 return (thread);
1145}
1146
1147task_t
1148port_name_to_task(
1149 mach_port_name_t name)
1150{
1151 ipc_port_t kern_port;
1152 kern_return_t kr;
1153 task_t task = TASK_NULL;
1154
1155 if (MACH_PORT_VALID(name)) {
1156 kr = ipc_object_copyin(current_space(), name,
1157 MACH_MSG_TYPE_COPY_SEND,
1158 (ipc_object_t *) &kern_port);
1159 if (kr != KERN_SUCCESS)
1160 return TASK_NULL;
1161
1162 task = convert_port_to_task(kern_port);
1163
1164 if (IP_VALID(kern_port))
1165 ipc_port_release_send(kern_port);
1166 }
1167 return task;
1168}
1169
1170/*
1171 * Routine: convert_task_to_port
1172 * Purpose:
1173 * Convert from a task to a port.
1174 * Consumes a task ref; produces a naked send right
1175 * which may be invalid.
1176 * Conditions:
1177 * Nothing locked.
1178 */
1179
1180ipc_port_t
1181convert_task_to_port(
1182 task_t task)
1183{
1184 ipc_port_t port;
1185
1186 itk_lock(task);
1187 if (task->itk_self != IP_NULL)
1188 port = ipc_port_make_send(task->itk_self);
1189 else
1190 port = IP_NULL;
1191 itk_unlock(task);
1192
1193 task_deallocate(task);
1194 return port;
1195}
1196
1197/*
1198 * Routine: convert_thread_to_port
1199 * Purpose:
1200 * Convert from a thread to a port.
1201 * Consumes an thread ref; produces a naked send right
1202 * which may be invalid.
1203 * Conditions:
1204 * Nothing locked.
1205 */
1206
1207ipc_port_t
1208convert_thread_to_port(
1209 thread_t thread)
1210{
1211 ipc_port_t port;
1212
1213 thread_mtx_lock(thread);
1214
1215 if (thread->ith_self != IP_NULL)
1216 port = ipc_port_make_send(thread->ith_self);
1217 else
1218 port = IP_NULL;
1219
1220 thread_mtx_unlock(thread);
1221
1222 thread_deallocate(thread);
1223
1224 return (port);
1225}
1226
1227/*
1228 * Routine: space_deallocate
1229 * Purpose:
1230 * Deallocate a space ref produced by convert_port_to_space.
1231 * Conditions:
1232 * Nothing locked.
1233 */
1234
1235void
1236space_deallocate(
1237 ipc_space_t space)
1238{
1239 if (space != IS_NULL)
1240 is_release(space);
1241}
1242
1243/*
1244 * Routine: thread/task_set_exception_ports [kernel call]
1245 * Purpose:
1246 * Sets the thread/task exception port, flavor and
1247 * behavior for the exception types specified by the mask.
1248 * There will be one send right per exception per valid
1249 * port.
1250 * Conditions:
1251 * Nothing locked. If successful, consumes
1252 * the supplied send right.
1253 * Returns:
1254 * KERN_SUCCESS Changed the special port.
1255 * KERN_INVALID_ARGUMENT The thread is null,
1256 * Illegal mask bit set.
1257 * Illegal exception behavior
1258 * KERN_FAILURE The thread is dead.
1259 */
1260
1261kern_return_t
1262thread_set_exception_ports(
1263 thread_t thread,
1264 exception_mask_t exception_mask,
1265 ipc_port_t new_port,
1266 exception_behavior_t new_behavior,
1267 thread_state_flavor_t new_flavor)
1268{
1269 ipc_port_t old_port[EXC_TYPES_COUNT];
1270 register int i;
1271
1272 if (thread == THREAD_NULL)
1273 return (KERN_INVALID_ARGUMENT);
1274
1275 if (exception_mask & ~EXC_MASK_ALL)
1276 return (KERN_INVALID_ARGUMENT);
1277
1278 if (IP_VALID(new_port)) {
1279 switch (new_behavior) {
1280
1281 case EXCEPTION_DEFAULT:
1282 case EXCEPTION_STATE:
1283 case EXCEPTION_STATE_IDENTITY:
1284 break;
1285
1286 default:
1287 return (KERN_INVALID_ARGUMENT);
1288 }
1289 }
1290
1291 /*
1292 * Check the validity of the thread_state_flavor by calling the
1293 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1294 * osfmk/mach/ARCHITECTURE/thread_status.h
1295 */
1296 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1297 return (KERN_INVALID_ARGUMENT);
1298
1299 thread_mtx_lock(thread);
1300
1301 if (!thread->active) {
1302 thread_mtx_unlock(thread);
1303
1304 return (KERN_FAILURE);
1305 }
1306
1307 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1308 if (exception_mask & (1 << i)) {
1309 old_port[i] = thread->exc_actions[i].port;
1310 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1311 thread->exc_actions[i].behavior = new_behavior;
1312 thread->exc_actions[i].flavor = new_flavor;
1313 }
1314 else
1315 old_port[i] = IP_NULL;
1316 }
1317
1318 thread_mtx_unlock(thread);
1319
1320 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1321 if (IP_VALID(old_port[i]))
1322 ipc_port_release_send(old_port[i]);
1323
1324 if (IP_VALID(new_port)) /* consume send right */
1325 ipc_port_release_send(new_port);
1326
1327 return (KERN_SUCCESS);
1328}
1329
1330kern_return_t
1331task_set_exception_ports(
1332 task_t task,
1333 exception_mask_t exception_mask,
1334 ipc_port_t new_port,
1335 exception_behavior_t new_behavior,
1336 thread_state_flavor_t new_flavor)
1337{
1338 ipc_port_t old_port[EXC_TYPES_COUNT];
1339 register int i;
1340
1341 if (task == TASK_NULL)
1342 return (KERN_INVALID_ARGUMENT);
1343
1344 if (exception_mask & ~EXC_MASK_ALL)
1345 return (KERN_INVALID_ARGUMENT);
1346
1347 if (IP_VALID(new_port)) {
1348 switch (new_behavior) {
1349
1350 case EXCEPTION_DEFAULT:
1351 case EXCEPTION_STATE:
1352 case EXCEPTION_STATE_IDENTITY:
1353 break;
1354
1355 default:
1356 return (KERN_INVALID_ARGUMENT);
1357 }
1358 }
1359
1360 itk_lock(task);
1361
1362 if (task->itk_self == IP_NULL) {
1363 itk_unlock(task);
1364
1365 return (KERN_FAILURE);
1366 }
1367
1368 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1369 if (exception_mask & (1 << i)) {
1370 old_port[i] = task->exc_actions[i].port;
1371 task->exc_actions[i].port =
1372 ipc_port_copy_send(new_port);
1373 task->exc_actions[i].behavior = new_behavior;
1374 task->exc_actions[i].flavor = new_flavor;
1375 }
1376 else
1377 old_port[i] = IP_NULL;
1378 }
1379
1380 itk_unlock(task);
1381
1382 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1383 if (IP_VALID(old_port[i]))
1384 ipc_port_release_send(old_port[i]);
1385
1386 if (IP_VALID(new_port)) /* consume send right */
1387 ipc_port_release_send(new_port);
1388
1389 return (KERN_SUCCESS);
1390}
1391
1392/*
1393 * Routine: thread/task_swap_exception_ports [kernel call]
1394 * Purpose:
1395 * Sets the thread/task exception port, flavor and
1396 * behavior for the exception types specified by the
1397 * mask.
1398 *
1399 * The old ports, behavior and flavors are returned
1400 * Count specifies the array sizes on input and
1401 * the number of returned ports etc. on output. The
1402 * arrays must be large enough to hold all the returned
1403 * data, MIG returnes an error otherwise. The masks
1404 * array specifies the corresponding exception type(s).
1405 *
1406 * Conditions:
1407 * Nothing locked. If successful, consumes
1408 * the supplied send right.
1409 *
1410 * Returns upto [in} CountCnt elements.
1411 * Returns:
1412 * KERN_SUCCESS Changed the special port.
1413 * KERN_INVALID_ARGUMENT The thread is null,
1414 * Illegal mask bit set.
1415 * Illegal exception behavior
1416 * KERN_FAILURE The thread is dead.
1417 */
1418
1419kern_return_t
1420thread_swap_exception_ports(
1421 thread_t thread,
1422 exception_mask_t exception_mask,
1423 ipc_port_t new_port,
1424 exception_behavior_t new_behavior,
1425 thread_state_flavor_t new_flavor,
1426 exception_mask_array_t masks,
1427 mach_msg_type_number_t *CountCnt,
1428 exception_port_array_t ports,
1429 exception_behavior_array_t behaviors,
1430 thread_state_flavor_array_t flavors)
1431{
1432 ipc_port_t old_port[EXC_TYPES_COUNT];
1433 unsigned int i, j, count;
1434
1435 if (thread == THREAD_NULL)
1436 return (KERN_INVALID_ARGUMENT);
1437
1438 if (exception_mask & ~EXC_MASK_ALL)
1439 return (KERN_INVALID_ARGUMENT);
1440
1441 if (IP_VALID(new_port)) {
1442 switch (new_behavior) {
1443
1444 case EXCEPTION_DEFAULT:
1445 case EXCEPTION_STATE:
1446 case EXCEPTION_STATE_IDENTITY:
1447 break;
1448
1449 default:
1450 return (KERN_INVALID_ARGUMENT);
1451 }
1452 }
1453
1454 thread_mtx_lock(thread);
1455
1456 if (!thread->active) {
1457 thread_mtx_unlock(thread);
1458
1459 return (KERN_FAILURE);
1460 }
1461
1462 count = 0;
1463
1464 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1465 if (exception_mask & (1 << i)) {
1466 for (j = 0; j < count; ++j) {
1467 /*
1468 * search for an identical entry, if found
1469 * set corresponding mask for this exception.
1470 */
1471 if ( thread->exc_actions[i].port == ports[j] &&
1472 thread->exc_actions[i].behavior == behaviors[j] &&
1473 thread->exc_actions[i].flavor == flavors[j] ) {
1474 masks[j] |= (1 << i);
1475 break;
1476 }
1477 }
1478
1479 if (j == count) {
1480 masks[j] = (1 << i);
1481 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1482
1483 behaviors[j] = thread->exc_actions[i].behavior;
1484 flavors[j] = thread->exc_actions[i].flavor;
1485 ++count;
1486 }
1487
1488 old_port[i] = thread->exc_actions[i].port;
1489 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1490 thread->exc_actions[i].behavior = new_behavior;
1491 thread->exc_actions[i].flavor = new_flavor;
1492 if (count > *CountCnt)
1493 break;
1494 }
1495 else
1496 old_port[i] = IP_NULL;
1497 }
1498
1499 thread_mtx_unlock(thread);
1500
1501 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1502 if (IP_VALID(old_port[i]))
1503 ipc_port_release_send(old_port[i]);
1504
1505 if (IP_VALID(new_port)) /* consume send right */
1506 ipc_port_release_send(new_port);
1507
1508 *CountCnt = count;
1509
1510 return (KERN_SUCCESS);
1511}
1512
1513kern_return_t
1514task_swap_exception_ports(
1515 task_t task,
1516 exception_mask_t exception_mask,
1517 ipc_port_t new_port,
1518 exception_behavior_t new_behavior,
1519 thread_state_flavor_t new_flavor,
1520 exception_mask_array_t masks,
1521 mach_msg_type_number_t *CountCnt,
1522 exception_port_array_t ports,
1523 exception_behavior_array_t behaviors,
1524 thread_state_flavor_array_t flavors)
1525{
1526 ipc_port_t old_port[EXC_TYPES_COUNT];
1527 unsigned int i, j, count;
1528
1529 if (task == TASK_NULL)
1530 return (KERN_INVALID_ARGUMENT);
1531
1532 if (exception_mask & ~EXC_MASK_ALL)
1533 return (KERN_INVALID_ARGUMENT);
1534
1535 if (IP_VALID(new_port)) {
1536 switch (new_behavior) {
1537
1538 case EXCEPTION_DEFAULT:
1539 case EXCEPTION_STATE:
1540 case EXCEPTION_STATE_IDENTITY:
1541 break;
1542
1543 default:
1544 return (KERN_INVALID_ARGUMENT);
1545 }
1546 }
1547
1548 itk_lock(task);
1549
1550 if (task->itk_self == IP_NULL) {
1551 itk_unlock(task);
1552
1553 return (KERN_FAILURE);
1554 }
1555
1556 count = 0;
1557
1558 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1559 if (exception_mask & (1 << i)) {
1560 for (j = 0; j < count; j++) {
1561 /*
1562 * search for an identical entry, if found
1563 * set corresponding mask for this exception.
1564 */
1565 if ( task->exc_actions[i].port == ports[j] &&
1566 task->exc_actions[i].behavior == behaviors[j] &&
1567 task->exc_actions[i].flavor == flavors[j] ) {
1568 masks[j] |= (1 << i);
1569 break;
1570 }
1571 }
1572
1573 if (j == count) {
1574 masks[j] = (1 << i);
1575 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1576 behaviors[j] = task->exc_actions[i].behavior;
1577 flavors[j] = task->exc_actions[i].flavor;
1578 ++count;
1579 }
1580
1581 old_port[i] = task->exc_actions[i].port;
1582 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1583 task->exc_actions[i].behavior = new_behavior;
1584 task->exc_actions[i].flavor = new_flavor;
1585 if (count > *CountCnt)
1586 break;
1587 }
1588 else
1589 old_port[i] = IP_NULL;
1590 }
1591
1592 itk_unlock(task);
1593
1594 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1595 if (IP_VALID(old_port[i]))
1596 ipc_port_release_send(old_port[i]);
1597
1598 if (IP_VALID(new_port)) /* consume send right */
1599 ipc_port_release_send(new_port);
1600
1601 *CountCnt = count;
1602
1603 return (KERN_SUCCESS);
1604}
1605
1606/*
1607 * Routine: thread/task_get_exception_ports [kernel call]
1608 * Purpose:
1609 * Clones a send right for each of the thread/task's exception
1610 * ports specified in the mask and returns the behaviour
1611 * and flavor of said port.
1612 *
1613 * Returns upto [in} CountCnt elements.
1614 *
1615 * Conditions:
1616 * Nothing locked.
1617 * Returns:
1618 * KERN_SUCCESS Extracted a send right.
1619 * KERN_INVALID_ARGUMENT The thread is null,
1620 * Invalid special port,
1621 * Illegal mask bit set.
1622 * KERN_FAILURE The thread is dead.
1623 */
1624
1625kern_return_t
1626thread_get_exception_ports(
1627 thread_t thread,
1628 exception_mask_t exception_mask,
1629 exception_mask_array_t masks,
1630 mach_msg_type_number_t *CountCnt,
1631 exception_port_array_t ports,
1632 exception_behavior_array_t behaviors,
1633 thread_state_flavor_array_t flavors)
1634{
1635 unsigned int i, j, count;
1636
1637 if (thread == THREAD_NULL)
1638 return (KERN_INVALID_ARGUMENT);
1639
1640 if (exception_mask & ~EXC_MASK_ALL)
1641 return (KERN_INVALID_ARGUMENT);
1642
1643 thread_mtx_lock(thread);
1644
1645 if (!thread->active) {
1646 thread_mtx_unlock(thread);
1647
1648 return (KERN_FAILURE);
1649 }
1650
1651 count = 0;
1652
1653 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1654 if (exception_mask & (1 << i)) {
1655 for (j = 0; j < count; ++j) {
1656 /*
1657 * search for an identical entry, if found
1658 * set corresponding mask for this exception.
1659 */
1660 if ( thread->exc_actions[i].port == ports[j] &&
1661 thread->exc_actions[i].behavior ==behaviors[j] &&
1662 thread->exc_actions[i].flavor == flavors[j] ) {
1663 masks[j] |= (1 << i);
1664 break;
1665 }
1666 }
1667
1668 if (j == count) {
1669 masks[j] = (1 << i);
1670 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1671 behaviors[j] = thread->exc_actions[i].behavior;
1672 flavors[j] = thread->exc_actions[i].flavor;
1673 ++count;
1674 if (count >= *CountCnt)
1675 break;
1676 }
1677 }
1678 }
1679
1680 thread_mtx_unlock(thread);
1681
1682 *CountCnt = count;
1683
1684 return (KERN_SUCCESS);
1685}
1686
1687kern_return_t
1688task_get_exception_ports(
1689 task_t task,
1690 exception_mask_t exception_mask,
1691 exception_mask_array_t masks,
1692 mach_msg_type_number_t *CountCnt,
1693 exception_port_array_t ports,
1694 exception_behavior_array_t behaviors,
1695 thread_state_flavor_array_t flavors)
1696{
1697 unsigned int i, j, count;
1698
1699 if (task == TASK_NULL)
1700 return (KERN_INVALID_ARGUMENT);
1701
1702 if (exception_mask & ~EXC_MASK_ALL)
1703 return (KERN_INVALID_ARGUMENT);
1704
1705 itk_lock(task);
1706
1707 if (task->itk_self == IP_NULL) {
1708 itk_unlock(task);
1709
1710 return (KERN_FAILURE);
1711 }
1712
1713 count = 0;
1714
1715 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1716 if (exception_mask & (1 << i)) {
1717 for (j = 0; j < count; ++j) {
1718 /*
1719 * search for an identical entry, if found
1720 * set corresponding mask for this exception.
1721 */
1722 if ( task->exc_actions[i].port == ports[j] &&
1723 task->exc_actions[i].behavior == behaviors[j] &&
1724 task->exc_actions[i].flavor == flavors[j] ) {
1725 masks[j] |= (1 << i);
1726 break;
1727 }
1728 }
1729
1730 if (j == count) {
1731 masks[j] = (1 << i);
1732 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1733 behaviors[j] = task->exc_actions[i].behavior;
1734 flavors[j] = task->exc_actions[i].flavor;
1735 ++count;
1736 if (count > *CountCnt)
1737 break;
1738 }
1739 }
1740 }
1741
1742 itk_unlock(task);
1743
1744 *CountCnt = count;
1745
1746 return (KERN_SUCCESS);
1747}