2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 /* forward declarations */
102 task_t
convert_port_to_locked_task(ipc_port_t port
);
106 * Routine: ipc_task_init
108 * Initialize a task's IPC state.
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
128 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
129 if (kr
!= KERN_SUCCESS
)
130 panic("ipc_task_init");
132 space
->is_task
= task
;
134 kport
= ipc_port_alloc_kernel();
135 if (kport
== IP_NULL
)
136 panic("ipc_task_init");
138 nport
= ipc_port_alloc_kernel();
139 if (nport
== IP_NULL
)
140 panic("ipc_task_init");
143 task
->itk_self
= kport
;
144 task
->itk_nself
= nport
;
145 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
146 task
->itk_sself
= ipc_port_make_send(kport
);
147 task
->itk_space
= space
;
151 mac_task_label_associate(parent
, task
, &parent
->maclabel
,
152 &task
->maclabel
, &kport
->ip_label
);
154 mac_task_label_associate_kernel(task
, &task
->maclabel
, &kport
->ip_label
);
157 if (parent
== TASK_NULL
) {
160 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
161 task
->exc_actions
[i
].port
= IP_NULL
;
164 kr
= host_get_host_port(host_priv_self(), &port
);
165 assert(kr
== KERN_SUCCESS
);
166 task
->itk_host
= port
;
168 task
->itk_bootstrap
= IP_NULL
;
169 task
->itk_seatbelt
= IP_NULL
;
170 task
->itk_gssd
= IP_NULL
;
171 task
->itk_task_access
= IP_NULL
;
173 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
174 task
->itk_registered
[i
] = IP_NULL
;
177 assert(parent
->itk_self
!= IP_NULL
);
179 /* inherit registered ports */
181 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
182 task
->itk_registered
[i
] =
183 ipc_port_copy_send(parent
->itk_registered
[i
]);
185 /* inherit exception and bootstrap ports */
187 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
188 task
->exc_actions
[i
].port
=
189 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
190 task
->exc_actions
[i
].flavor
=
191 parent
->exc_actions
[i
].flavor
;
192 task
->exc_actions
[i
].behavior
=
193 parent
->exc_actions
[i
].behavior
;
194 task
->exc_actions
[i
].privileged
=
195 parent
->exc_actions
[i
].privileged
;
198 ipc_port_copy_send(parent
->itk_host
);
200 task
->itk_bootstrap
=
201 ipc_port_copy_send(parent
->itk_bootstrap
);
204 ipc_port_copy_send(parent
->itk_seatbelt
);
207 ipc_port_copy_send(parent
->itk_gssd
);
209 task
->itk_task_access
=
210 ipc_port_copy_send(parent
->itk_task_access
);
217 * Routine: ipc_task_enable
219 * Enable a task for IPC access.
232 kport
= task
->itk_self
;
233 if (kport
!= IP_NULL
)
234 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
235 nport
= task
->itk_nself
;
236 if (nport
!= IP_NULL
)
237 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
242 * Routine: ipc_task_disable
244 * Disable IPC access to a task.
258 kport
= task
->itk_self
;
259 if (kport
!= IP_NULL
)
260 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
261 nport
= task
->itk_nself
;
262 if (nport
!= IP_NULL
)
263 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
265 rport
= task
->itk_resume
;
266 if (rport
!= IP_NULL
) {
268 * From this point onwards this task is no longer accepting
271 * There are still outstanding suspensions on this task,
272 * even as it is being torn down. Disconnect the task
273 * from the rport, thereby "orphaning" the rport. The rport
274 * itself will go away only when the last suspension holder
275 * destroys his SO right to it -- when he either
276 * exits, or tries to actually use that last SO right to
277 * resume this (now non-existent) task.
279 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
285 * Routine: ipc_task_terminate
287 * Clean up and destroy a task's IPC state.
289 * Nothing locked. The task must be suspended.
290 * (Or the current thread must be in the task.)
303 kport
= task
->itk_self
;
305 if (kport
== IP_NULL
) {
306 /* the task is already terminated (can this happen?) */
310 task
->itk_self
= IP_NULL
;
312 nport
= task
->itk_nself
;
313 assert(nport
!= IP_NULL
);
314 task
->itk_nself
= IP_NULL
;
316 rport
= task
->itk_resume
;
317 task
->itk_resume
= IP_NULL
;
321 /* release the naked send rights */
323 if (IP_VALID(task
->itk_sself
))
324 ipc_port_release_send(task
->itk_sself
);
326 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
327 if (IP_VALID(task
->exc_actions
[i
].port
)) {
328 ipc_port_release_send(task
->exc_actions
[i
].port
);
332 if (IP_VALID(task
->itk_host
))
333 ipc_port_release_send(task
->itk_host
);
335 if (IP_VALID(task
->itk_bootstrap
))
336 ipc_port_release_send(task
->itk_bootstrap
);
338 if (IP_VALID(task
->itk_seatbelt
))
339 ipc_port_release_send(task
->itk_seatbelt
);
341 if (IP_VALID(task
->itk_gssd
))
342 ipc_port_release_send(task
->itk_gssd
);
344 if (IP_VALID(task
->itk_task_access
))
345 ipc_port_release_send(task
->itk_task_access
);
347 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
348 if (IP_VALID(task
->itk_registered
[i
]))
349 ipc_port_release_send(task
->itk_registered
[i
]);
351 /* destroy the kernel ports */
352 ipc_port_dealloc_kernel(kport
);
353 ipc_port_dealloc_kernel(nport
);
354 if (rport
!= IP_NULL
)
355 ipc_port_dealloc_kernel(rport
);
357 itk_lock_destroy(task
);
361 * Routine: ipc_task_reset
363 * Reset a task's IPC state to protect it when
364 * it enters an elevated security context. The
365 * task name port can remain the same - since
366 * it represents no specific privilege.
368 * Nothing locked. The task must be suspended.
369 * (Or the current thread must be in the task.)
376 ipc_port_t old_kport
, new_kport
;
377 ipc_port_t old_sself
;
378 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
381 new_kport
= ipc_port_alloc_kernel();
382 if (new_kport
== IP_NULL
)
383 panic("ipc_task_reset");
387 old_kport
= task
->itk_self
;
389 if (old_kport
== IP_NULL
) {
390 /* the task is already terminated (can this happen?) */
392 ipc_port_dealloc_kernel(new_kport
);
396 task
->itk_self
= new_kport
;
397 old_sself
= task
->itk_sself
;
398 task
->itk_sself
= ipc_port_make_send(new_kport
);
399 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
400 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
402 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
403 if (!task
->exc_actions
[i
].privileged
) {
404 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
405 task
->exc_actions
[i
].port
= IP_NULL
;
407 old_exc_actions
[i
] = IP_NULL
;
413 /* release the naked send rights */
415 if (IP_VALID(old_sself
))
416 ipc_port_release_send(old_sself
);
418 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
419 if (IP_VALID(old_exc_actions
[i
])) {
420 ipc_port_release_send(old_exc_actions
[i
]);
424 /* destroy the kernel port */
425 ipc_port_dealloc_kernel(old_kport
);
429 * Routine: ipc_thread_init
431 * Initialize a thread's IPC state.
442 kport
= ipc_port_alloc_kernel();
443 if (kport
== IP_NULL
)
444 panic("ipc_thread_init");
446 thread
->ith_self
= kport
;
447 thread
->ith_sself
= ipc_port_make_send(kport
);
448 thread
->exc_actions
= NULL
;
450 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
452 #if IMPORTANCE_INHERITANCE
453 thread
->ith_assertions
= 0;
456 ipc_kmsg_queue_init(&thread
->ith_messages
);
458 thread
->ith_rpc_reply
= IP_NULL
;
462 ipc_thread_init_exc_actions(
465 assert(thread
->exc_actions
== NULL
);
467 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
468 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
472 ipc_thread_destroy_exc_actions(
475 if (thread
->exc_actions
!= NULL
) {
476 kfree(thread
->exc_actions
,
477 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
478 thread
->exc_actions
= NULL
;
486 ipc_port_t kport
= thread
->ith_self
;
488 if (kport
!= IP_NULL
)
489 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
493 * Routine: ipc_thread_terminate
495 * Clean up and destroy a thread's IPC state.
501 ipc_thread_terminate(
504 ipc_port_t kport
= thread
->ith_self
;
506 if (kport
!= IP_NULL
) {
509 if (IP_VALID(thread
->ith_sself
))
510 ipc_port_release_send(thread
->ith_sself
);
512 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
514 if (thread
->exc_actions
!= NULL
) {
515 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
516 if (IP_VALID(thread
->exc_actions
[i
].port
))
517 ipc_port_release_send(thread
->exc_actions
[i
].port
);
519 ipc_thread_destroy_exc_actions(thread
);
522 ipc_port_dealloc_kernel(kport
);
525 #if IMPORTANCE_INHERITANCE
526 assert(thread
->ith_assertions
== 0);
529 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
531 if (thread
->ith_rpc_reply
!= IP_NULL
)
532 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
534 thread
->ith_rpc_reply
= IP_NULL
;
538 * Routine: ipc_thread_reset
540 * Reset the IPC state for a given Mach thread when
541 * its task enters an elevated security context.
542 * Both the thread port and its exception ports have
543 * to be reset. Its RPC reply port cannot have any
544 * rights outstanding, so it should be fine.
553 ipc_port_t old_kport
, new_kport
;
554 ipc_port_t old_sself
;
555 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
556 boolean_t has_old_exc_actions
= FALSE
;
559 new_kport
= ipc_port_alloc_kernel();
560 if (new_kport
== IP_NULL
)
561 panic("ipc_task_reset");
563 thread_mtx_lock(thread
);
565 old_kport
= thread
->ith_self
;
567 if (old_kport
== IP_NULL
) {
568 /* the is already terminated (can this happen?) */
569 thread_mtx_unlock(thread
);
570 ipc_port_dealloc_kernel(new_kport
);
574 thread
->ith_self
= new_kport
;
575 old_sself
= thread
->ith_sself
;
576 thread
->ith_sself
= ipc_port_make_send(new_kport
);
577 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
578 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
581 * Only ports that were set by root-owned processes
582 * (privileged ports) should survive
584 if (thread
->exc_actions
!= NULL
) {
585 has_old_exc_actions
= TRUE
;
586 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
587 if (thread
->exc_actions
[i
].privileged
) {
588 old_exc_actions
[i
] = IP_NULL
;
590 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
591 thread
->exc_actions
[i
].port
= IP_NULL
;
596 thread_mtx_unlock(thread
);
598 /* release the naked send rights */
600 if (IP_VALID(old_sself
))
601 ipc_port_release_send(old_sself
);
603 if (has_old_exc_actions
) {
604 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
605 ipc_port_release_send(old_exc_actions
[i
]);
609 /* destroy the kernel port */
610 ipc_port_dealloc_kernel(old_kport
);
614 * Routine: retrieve_task_self_fast
616 * Optimized version of retrieve_task_self,
617 * that only works for the current task.
619 * Return a send right (possibly null/dead)
620 * for the task's user-visible self port.
626 retrieve_task_self_fast(
627 register task_t task
)
629 register ipc_port_t port
;
631 assert(task
== current_task());
634 assert(task
->itk_self
!= IP_NULL
);
636 if ((port
= task
->itk_sself
) == task
->itk_self
) {
640 assert(ip_active(port
));
645 port
= ipc_port_copy_send(port
);
652 * Routine: retrieve_thread_self_fast
654 * Return a send right (possibly null/dead)
655 * for the thread's user-visible self port.
657 * Only works for the current thread.
664 retrieve_thread_self_fast(
667 register ipc_port_t port
;
669 assert(thread
== current_thread());
671 thread_mtx_lock(thread
);
673 assert(thread
->ith_self
!= IP_NULL
);
675 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
679 assert(ip_active(port
));
685 port
= ipc_port_copy_send(port
);
687 thread_mtx_unlock(thread
);
693 * Routine: task_self_trap [mach trap]
695 * Give the caller send rights for his own task port.
699 * MACH_PORT_NULL if there are any resource failures
705 __unused
struct task_self_trap_args
*args
)
707 task_t task
= current_task();
709 mach_port_name_t name
;
711 sright
= retrieve_task_self_fast(task
);
712 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
717 * Routine: thread_self_trap [mach trap]
719 * Give the caller send rights for his own thread port.
723 * MACH_PORT_NULL if there are any resource failures
729 __unused
struct thread_self_trap_args
*args
)
731 thread_t thread
= current_thread();
732 task_t task
= thread
->task
;
734 mach_port_name_t name
;
736 sright
= retrieve_thread_self_fast(thread
);
737 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
743 * Routine: mach_reply_port [mach trap]
745 * Allocate a port for the caller.
749 * MACH_PORT_NULL if there are any resource failures
755 __unused
struct mach_reply_port_args
*args
)
758 mach_port_name_t name
;
761 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
762 if (kr
== KERN_SUCCESS
)
765 name
= MACH_PORT_NULL
;
770 * Routine: thread_get_special_port [kernel call]
772 * Clones a send right for one of the thread's
777 * KERN_SUCCESS Extracted a send right.
778 * KERN_INVALID_ARGUMENT The thread is null.
779 * KERN_FAILURE The thread is dead.
780 * KERN_INVALID_ARGUMENT Invalid special port.
784 thread_get_special_port(
789 kern_return_t result
= KERN_SUCCESS
;
792 if (thread
== THREAD_NULL
)
793 return (KERN_INVALID_ARGUMENT
);
797 case THREAD_KERNEL_PORT
:
798 whichp
= &thread
->ith_sself
;
802 return (KERN_INVALID_ARGUMENT
);
805 thread_mtx_lock(thread
);
808 *portp
= ipc_port_copy_send(*whichp
);
810 result
= KERN_FAILURE
;
812 thread_mtx_unlock(thread
);
818 * Routine: thread_set_special_port [kernel call]
820 * Changes one of the thread's special ports,
821 * setting it to the supplied send right.
823 * Nothing locked. If successful, consumes
824 * the supplied send right.
826 * KERN_SUCCESS Changed the special port.
827 * KERN_INVALID_ARGUMENT The thread is null.
828 * KERN_FAILURE The thread is dead.
829 * KERN_INVALID_ARGUMENT Invalid special port.
833 thread_set_special_port(
838 kern_return_t result
= KERN_SUCCESS
;
839 ipc_port_t
*whichp
, old
= IP_NULL
;
841 if (thread
== THREAD_NULL
)
842 return (KERN_INVALID_ARGUMENT
);
846 case THREAD_KERNEL_PORT
:
847 whichp
= &thread
->ith_sself
;
851 return (KERN_INVALID_ARGUMENT
);
854 thread_mtx_lock(thread
);
856 if (thread
->active
) {
861 result
= KERN_FAILURE
;
863 thread_mtx_unlock(thread
);
866 ipc_port_release_send(old
);
872 * Routine: task_get_special_port [kernel call]
874 * Clones a send right for one of the task's
879 * KERN_SUCCESS Extracted a send right.
880 * KERN_INVALID_ARGUMENT The task is null.
881 * KERN_FAILURE The task/space is dead.
882 * KERN_INVALID_ARGUMENT Invalid special port.
886 task_get_special_port(
893 if (task
== TASK_NULL
)
894 return KERN_INVALID_ARGUMENT
;
897 if (task
->itk_self
== IP_NULL
) {
903 case TASK_KERNEL_PORT
:
904 port
= ipc_port_copy_send(task
->itk_sself
);
908 port
= ipc_port_make_send(task
->itk_nself
);
912 port
= ipc_port_copy_send(task
->itk_host
);
915 case TASK_BOOTSTRAP_PORT
:
916 port
= ipc_port_copy_send(task
->itk_bootstrap
);
919 case TASK_SEATBELT_PORT
:
920 port
= ipc_port_copy_send(task
->itk_seatbelt
);
923 case TASK_ACCESS_PORT
:
924 port
= ipc_port_copy_send(task
->itk_task_access
);
929 return KERN_INVALID_ARGUMENT
;
938 * Routine: task_set_special_port [kernel call]
940 * Changes one of the task's special ports,
941 * setting it to the supplied send right.
943 * Nothing locked. If successful, consumes
944 * the supplied send right.
946 * KERN_SUCCESS Changed the special port.
947 * KERN_INVALID_ARGUMENT The task is null.
948 * KERN_FAILURE The task/space is dead.
949 * KERN_INVALID_ARGUMENT Invalid special port.
950 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
954 task_set_special_port(
962 if (task
== TASK_NULL
)
963 return KERN_INVALID_ARGUMENT
;
966 case TASK_KERNEL_PORT
:
967 whichp
= &task
->itk_sself
;
971 whichp
= &task
->itk_host
;
974 case TASK_BOOTSTRAP_PORT
:
975 whichp
= &task
->itk_bootstrap
;
978 case TASK_SEATBELT_PORT
:
979 whichp
= &task
->itk_seatbelt
;
982 case TASK_ACCESS_PORT
:
983 whichp
= &task
->itk_task_access
;
987 return KERN_INVALID_ARGUMENT
;
991 if (task
->itk_self
== IP_NULL
) {
996 /* do not allow overwrite of seatbelt or task access ports */
997 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
998 && IP_VALID(*whichp
)) {
1000 return KERN_NO_ACCESS
;
1003 #if CONFIG_MACF_MACH
1004 if (mac_task_check_service(current_task(), task
, "set_special_port")) {
1006 return KERN_NO_ACCESS
;
1015 ipc_port_release_send(old
);
1016 return KERN_SUCCESS
;
1021 * Routine: mach_ports_register [kernel call]
1023 * Stash a handful of port send rights in the task.
1024 * Child tasks will inherit these rights, but they
1025 * must use mach_ports_lookup to acquire them.
1027 * The rights are supplied in a (wired) kalloc'd segment.
1028 * Rights which aren't supplied are assumed to be null.
1030 * Nothing locked. If successful, consumes
1031 * the supplied rights and memory.
1033 * KERN_SUCCESS Stashed the port rights.
1034 * KERN_INVALID_ARGUMENT The task is null.
1035 * KERN_INVALID_ARGUMENT The task is dead.
1036 * KERN_INVALID_ARGUMENT The memory param is null.
1037 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1041 mach_ports_register(
1043 mach_port_array_t memory
,
1044 mach_msg_type_number_t portsCnt
)
1046 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1049 if ((task
== TASK_NULL
) ||
1050 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1051 (portsCnt
&& memory
== NULL
))
1052 return KERN_INVALID_ARGUMENT
;
1055 * Pad the port rights with nulls.
1058 for (i
= 0; i
< portsCnt
; i
++)
1059 ports
[i
] = memory
[i
];
1060 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
1064 if (task
->itk_self
== IP_NULL
) {
1066 return KERN_INVALID_ARGUMENT
;
1070 * Replace the old send rights with the new.
1071 * Release the old rights after unlocking.
1074 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1077 old
= task
->itk_registered
[i
];
1078 task
->itk_registered
[i
] = ports
[i
];
1084 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1085 if (IP_VALID(ports
[i
]))
1086 ipc_port_release_send(ports
[i
]);
1089 * Now that the operation is known to be successful,
1090 * we can free the memory.
1095 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1097 return KERN_SUCCESS
;
1101 * Routine: mach_ports_lookup [kernel call]
1103 * Retrieves (clones) the stashed port send rights.
1105 * Nothing locked. If successful, the caller gets
1106 * rights and memory.
1108 * KERN_SUCCESS Retrieved the send rights.
1109 * KERN_INVALID_ARGUMENT The task is null.
1110 * KERN_INVALID_ARGUMENT The task is dead.
1111 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1117 mach_port_array_t
*portsp
,
1118 mach_msg_type_number_t
*portsCnt
)
1125 if (task
== TASK_NULL
)
1126 return KERN_INVALID_ARGUMENT
;
1128 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1130 memory
= kalloc(size
);
1132 return KERN_RESOURCE_SHORTAGE
;
1135 if (task
->itk_self
== IP_NULL
) {
1138 kfree(memory
, size
);
1139 return KERN_INVALID_ARGUMENT
;
1142 ports
= (ipc_port_t
*) memory
;
1145 * Clone port rights. Because kalloc'd memory
1146 * is wired, we won't fault while holding the task lock.
1149 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1150 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1154 *portsp
= (mach_port_array_t
) ports
;
1155 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1156 return KERN_SUCCESS
;
1160 * Routine: convert_port_to_locked_task
1162 * Internal helper routine to convert from a port to a locked
1163 * task. Used by several routines that try to convert from a
1164 * task port to a reference on some task related object.
1166 * Nothing locked, blocking OK.
1169 convert_port_to_locked_task(ipc_port_t port
)
1171 int try_failed_count
= 0;
1173 while (IP_VALID(port
)) {
1177 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1181 task
= (task_t
) port
->ip_kobject
;
1182 assert(task
!= TASK_NULL
);
1185 * Normal lock ordering puts task_lock() before ip_lock().
1186 * Attempt out-of-order locking here.
1188 if (task_lock_try(task
)) {
1195 mutex_pause(try_failed_count
);
1201 * Routine: convert_port_to_task
1203 * Convert from a port to a task.
1204 * Doesn't consume the port ref; produces a task ref,
1205 * which may be null.
1210 convert_port_to_task(
1213 task_t task
= TASK_NULL
;
1215 if (IP_VALID(port
)) {
1218 if ( ip_active(port
) &&
1219 ip_kotype(port
) == IKOT_TASK
) {
1220 task
= (task_t
)port
->ip_kobject
;
1221 assert(task
!= TASK_NULL
);
1223 task_reference_internal(task
);
1233 * Routine: convert_port_to_task_name
1235 * Convert from a port to a task name.
1236 * Doesn't consume the port ref; produces a task name ref,
1237 * which may be null.
1242 convert_port_to_task_name(
1245 task_name_t task
= TASK_NULL
;
1247 if (IP_VALID(port
)) {
1250 if ( ip_active(port
) &&
1251 (ip_kotype(port
) == IKOT_TASK
||
1252 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1253 task
= (task_name_t
)port
->ip_kobject
;
1254 assert(task
!= TASK_NAME_NULL
);
1256 task_reference_internal(task
);
1266 * Routine: convert_port_to_task_suspension_token
1268 * Convert from a port to a task suspension token.
1269 * Doesn't consume the port ref; produces a suspension token ref,
1270 * which may be null.
1274 task_suspension_token_t
1275 convert_port_to_task_suspension_token(
1278 task_suspension_token_t task
= TASK_NULL
;
1280 if (IP_VALID(port
)) {
1283 if ( ip_active(port
) &&
1284 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1285 task
= (task_suspension_token_t
)port
->ip_kobject
;
1286 assert(task
!= TASK_NULL
);
1288 task_reference_internal(task
);
1298 * Routine: convert_port_to_space
1300 * Convert from a port to a space.
1301 * Doesn't consume the port ref; produces a space ref,
1302 * which may be null.
1307 convert_port_to_space(
1313 task
= convert_port_to_locked_task(port
);
1315 if (task
== TASK_NULL
)
1316 return IPC_SPACE_NULL
;
1318 if (!task
->active
) {
1320 return IPC_SPACE_NULL
;
1323 space
= task
->itk_space
;
1324 is_reference(space
);
1330 * Routine: convert_port_to_map
1332 * Convert from a port to a map.
1333 * Doesn't consume the port ref; produces a map ref,
1334 * which may be null.
1340 convert_port_to_map(
1346 task
= convert_port_to_locked_task(port
);
1348 if (task
== TASK_NULL
)
1351 if (!task
->active
) {
1357 vm_map_reference_swap(map
);
1364 * Routine: convert_port_to_thread
1366 * Convert from a port to a thread.
1367 * Doesn't consume the port ref; produces an thread ref,
1368 * which may be null.
1374 convert_port_to_thread(
1377 thread_t thread
= THREAD_NULL
;
1379 if (IP_VALID(port
)) {
1382 if ( ip_active(port
) &&
1383 ip_kotype(port
) == IKOT_THREAD
) {
1384 thread
= (thread_t
)port
->ip_kobject
;
1385 assert(thread
!= THREAD_NULL
);
1387 thread_reference_internal(thread
);
1397 * Routine: port_name_to_thread
1399 * Convert from a port name to an thread reference
1400 * A name of MACH_PORT_NULL is valid for the null thread.
1405 port_name_to_thread(
1406 mach_port_name_t name
)
1408 thread_t thread
= THREAD_NULL
;
1411 if (MACH_PORT_VALID(name
)) {
1412 if (ipc_object_copyin(current_space(), name
,
1413 MACH_MSG_TYPE_COPY_SEND
,
1414 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1415 return (THREAD_NULL
);
1417 thread
= convert_port_to_thread(kport
);
1419 if (IP_VALID(kport
))
1420 ipc_port_release_send(kport
);
1428 mach_port_name_t name
)
1430 ipc_port_t kern_port
;
1432 task_t task
= TASK_NULL
;
1434 if (MACH_PORT_VALID(name
)) {
1435 kr
= ipc_object_copyin(current_space(), name
,
1436 MACH_MSG_TYPE_COPY_SEND
,
1437 (ipc_object_t
*) &kern_port
);
1438 if (kr
!= KERN_SUCCESS
)
1441 task
= convert_port_to_task(kern_port
);
1443 if (IP_VALID(kern_port
))
1444 ipc_port_release_send(kern_port
);
1450 * Routine: convert_task_to_port
1452 * Convert from a task to a port.
1453 * Consumes a task ref; produces a naked send right
1454 * which may be invalid.
1460 convert_task_to_port(
1466 if (task
->itk_self
!= IP_NULL
)
1467 port
= ipc_port_make_send(task
->itk_self
);
1472 task_deallocate(task
);
1477 * Routine: convert_task_suspend_token_to_port
1479 * Convert from a task suspension token to a port.
1480 * Consumes a task suspension token ref; produces a naked send-once right
1481 * which may be invalid.
1486 convert_task_suspension_token_to_port(
1487 task_suspension_token_t task
)
1493 if (task
->itk_resume
== IP_NULL
) {
1494 task
->itk_resume
= ipc_port_alloc_kernel();
1495 if (!IP_VALID(task
->itk_resume
)) {
1496 panic("failed to create resume port");
1499 ipc_kobject_set(task
->itk_resume
, (ipc_kobject_t
) task
, IKOT_TASK_RESUME
);
1503 * Create a send-once right for each instance of a direct user-called
1504 * task_suspend2 call. Each time one of these send-once rights is abandoned,
1505 * the notification handler will resume the target task.
1507 port
= ipc_port_make_sonce(task
->itk_resume
);
1508 assert(IP_VALID(port
));
1514 task_suspension_token_deallocate(task
);
1521 * Routine: convert_task_name_to_port
1523 * Convert from a task name ref to a port.
1524 * Consumes a task name ref; produces a naked send right
1525 * which may be invalid.
1531 convert_task_name_to_port(
1532 task_name_t task_name
)
1536 itk_lock(task_name
);
1537 if (task_name
->itk_nself
!= IP_NULL
)
1538 port
= ipc_port_make_send(task_name
->itk_nself
);
1541 itk_unlock(task_name
);
1543 task_name_deallocate(task_name
);
1548 * Routine: convert_thread_to_port
1550 * Convert from a thread to a port.
1551 * Consumes an thread ref; produces a naked send right
1552 * which may be invalid.
1558 convert_thread_to_port(
1563 thread_mtx_lock(thread
);
1565 if (thread
->ith_self
!= IP_NULL
)
1566 port
= ipc_port_make_send(thread
->ith_self
);
1570 thread_mtx_unlock(thread
);
1572 thread_deallocate(thread
);
1578 * Routine: space_deallocate
1580 * Deallocate a space ref produced by convert_port_to_space.
1589 if (space
!= IS_NULL
)
1594 * Routine: thread/task_set_exception_ports [kernel call]
1596 * Sets the thread/task exception port, flavor and
1597 * behavior for the exception types specified by the mask.
1598 * There will be one send right per exception per valid
1601 * Nothing locked. If successful, consumes
1602 * the supplied send right.
1604 * KERN_SUCCESS Changed the special port.
1605 * KERN_INVALID_ARGUMENT The thread is null,
1606 * Illegal mask bit set.
1607 * Illegal exception behavior
1608 * KERN_FAILURE The thread is dead.
1612 thread_set_exception_ports(
1614 exception_mask_t exception_mask
,
1615 ipc_port_t new_port
,
1616 exception_behavior_t new_behavior
,
1617 thread_state_flavor_t new_flavor
)
1619 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1620 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1623 if (thread
== THREAD_NULL
)
1624 return (KERN_INVALID_ARGUMENT
);
1626 if (exception_mask
& ~EXC_MASK_VALID
)
1627 return (KERN_INVALID_ARGUMENT
);
1629 if (IP_VALID(new_port
)) {
1630 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1632 case EXCEPTION_DEFAULT
:
1633 case EXCEPTION_STATE
:
1634 case EXCEPTION_STATE_IDENTITY
:
1638 return (KERN_INVALID_ARGUMENT
);
1643 * Check the validity of the thread_state_flavor by calling the
1644 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1645 * osfmk/mach/ARCHITECTURE/thread_status.h
1647 if (!VALID_THREAD_STATE_FLAVOR(new_flavor
))
1648 return (KERN_INVALID_ARGUMENT
);
1650 thread_mtx_lock(thread
);
1652 if (!thread
->active
) {
1653 thread_mtx_unlock(thread
);
1655 return (KERN_FAILURE
);
1658 if (thread
->exc_actions
== NULL
) {
1659 ipc_thread_init_exc_actions(thread
);
1661 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1662 if (exception_mask
& (1 << i
)) {
1663 old_port
[i
] = thread
->exc_actions
[i
].port
;
1664 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1665 thread
->exc_actions
[i
].behavior
= new_behavior
;
1666 thread
->exc_actions
[i
].flavor
= new_flavor
;
1667 thread
->exc_actions
[i
].privileged
= privileged
;
1670 old_port
[i
] = IP_NULL
;
1673 thread_mtx_unlock(thread
);
1675 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1676 if (IP_VALID(old_port
[i
]))
1677 ipc_port_release_send(old_port
[i
]);
1679 if (IP_VALID(new_port
)) /* consume send right */
1680 ipc_port_release_send(new_port
);
1682 return (KERN_SUCCESS
);
1686 task_set_exception_ports(
1688 exception_mask_t exception_mask
,
1689 ipc_port_t new_port
,
1690 exception_behavior_t new_behavior
,
1691 thread_state_flavor_t new_flavor
)
1693 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1694 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1697 if (task
== TASK_NULL
)
1698 return (KERN_INVALID_ARGUMENT
);
1700 if (exception_mask
& ~EXC_MASK_VALID
)
1701 return (KERN_INVALID_ARGUMENT
);
1703 if (IP_VALID(new_port
)) {
1704 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1706 case EXCEPTION_DEFAULT
:
1707 case EXCEPTION_STATE
:
1708 case EXCEPTION_STATE_IDENTITY
:
1712 return (KERN_INVALID_ARGUMENT
);
1718 if (task
->itk_self
== IP_NULL
) {
1721 return (KERN_FAILURE
);
1724 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1725 if (exception_mask
& (1 << i
)) {
1726 old_port
[i
] = task
->exc_actions
[i
].port
;
1727 task
->exc_actions
[i
].port
=
1728 ipc_port_copy_send(new_port
);
1729 task
->exc_actions
[i
].behavior
= new_behavior
;
1730 task
->exc_actions
[i
].flavor
= new_flavor
;
1731 task
->exc_actions
[i
].privileged
= privileged
;
1734 old_port
[i
] = IP_NULL
;
1739 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1740 if (IP_VALID(old_port
[i
]))
1741 ipc_port_release_send(old_port
[i
]);
1743 if (IP_VALID(new_port
)) /* consume send right */
1744 ipc_port_release_send(new_port
);
1746 return (KERN_SUCCESS
);
1750 * Routine: thread/task_swap_exception_ports [kernel call]
1752 * Sets the thread/task exception port, flavor and
1753 * behavior for the exception types specified by the
1756 * The old ports, behavior and flavors are returned
1757 * Count specifies the array sizes on input and
1758 * the number of returned ports etc. on output. The
1759 * arrays must be large enough to hold all the returned
1760 * data, MIG returnes an error otherwise. The masks
1761 * array specifies the corresponding exception type(s).
1764 * Nothing locked. If successful, consumes
1765 * the supplied send right.
1767 * Returns upto [in} CountCnt elements.
1769 * KERN_SUCCESS Changed the special port.
1770 * KERN_INVALID_ARGUMENT The thread is null,
1771 * Illegal mask bit set.
1772 * Illegal exception behavior
1773 * KERN_FAILURE The thread is dead.
1777 thread_swap_exception_ports(
1779 exception_mask_t exception_mask
,
1780 ipc_port_t new_port
,
1781 exception_behavior_t new_behavior
,
1782 thread_state_flavor_t new_flavor
,
1783 exception_mask_array_t masks
,
1784 mach_msg_type_number_t
*CountCnt
,
1785 exception_port_array_t ports
,
1786 exception_behavior_array_t behaviors
,
1787 thread_state_flavor_array_t flavors
)
1789 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1790 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1791 unsigned int i
, j
, count
;
1793 if (thread
== THREAD_NULL
)
1794 return (KERN_INVALID_ARGUMENT
);
1796 if (exception_mask
& ~EXC_MASK_VALID
)
1797 return (KERN_INVALID_ARGUMENT
);
1799 if (IP_VALID(new_port
)) {
1800 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1802 case EXCEPTION_DEFAULT
:
1803 case EXCEPTION_STATE
:
1804 case EXCEPTION_STATE_IDENTITY
:
1808 return (KERN_INVALID_ARGUMENT
);
1812 thread_mtx_lock(thread
);
1814 if (!thread
->active
) {
1815 thread_mtx_unlock(thread
);
1817 return (KERN_FAILURE
);
1820 if (thread
->exc_actions
== NULL
) {
1821 ipc_thread_init_exc_actions(thread
);
1824 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
1825 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
1826 if (exception_mask
& (1 << i
)) {
1827 for (j
= 0; j
< count
; ++j
) {
1829 * search for an identical entry, if found
1830 * set corresponding mask for this exception.
1832 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1833 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1834 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1835 masks
[j
] |= (1 << i
);
1841 masks
[j
] = (1 << i
);
1842 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1844 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1845 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1849 old_port
[i
] = thread
->exc_actions
[i
].port
;
1850 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1851 thread
->exc_actions
[i
].behavior
= new_behavior
;
1852 thread
->exc_actions
[i
].flavor
= new_flavor
;
1853 thread
->exc_actions
[i
].privileged
= privileged
;
1856 old_port
[i
] = IP_NULL
;
1859 thread_mtx_unlock(thread
);
1861 while (--i
>= FIRST_EXCEPTION
) {
1862 if (IP_VALID(old_port
[i
]))
1863 ipc_port_release_send(old_port
[i
]);
1866 if (IP_VALID(new_port
)) /* consume send right */
1867 ipc_port_release_send(new_port
);
1871 return (KERN_SUCCESS
);
1875 task_swap_exception_ports(
1877 exception_mask_t exception_mask
,
1878 ipc_port_t new_port
,
1879 exception_behavior_t new_behavior
,
1880 thread_state_flavor_t new_flavor
,
1881 exception_mask_array_t masks
,
1882 mach_msg_type_number_t
*CountCnt
,
1883 exception_port_array_t ports
,
1884 exception_behavior_array_t behaviors
,
1885 thread_state_flavor_array_t flavors
)
1887 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1888 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1889 unsigned int i
, j
, count
;
1891 if (task
== TASK_NULL
)
1892 return (KERN_INVALID_ARGUMENT
);
1894 if (exception_mask
& ~EXC_MASK_VALID
)
1895 return (KERN_INVALID_ARGUMENT
);
1897 if (IP_VALID(new_port
)) {
1898 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1900 case EXCEPTION_DEFAULT
:
1901 case EXCEPTION_STATE
:
1902 case EXCEPTION_STATE_IDENTITY
:
1906 return (KERN_INVALID_ARGUMENT
);
1912 if (task
->itk_self
== IP_NULL
) {
1915 return (KERN_FAILURE
);
1918 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
1919 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
1920 if (exception_mask
& (1 << i
)) {
1921 for (j
= 0; j
< count
; j
++) {
1923 * search for an identical entry, if found
1924 * set corresponding mask for this exception.
1926 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1927 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1928 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1929 masks
[j
] |= (1 << i
);
1935 masks
[j
] = (1 << i
);
1936 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1937 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1938 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1942 old_port
[i
] = task
->exc_actions
[i
].port
;
1944 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1945 task
->exc_actions
[i
].behavior
= new_behavior
;
1946 task
->exc_actions
[i
].flavor
= new_flavor
;
1947 task
->exc_actions
[i
].privileged
= privileged
;
1950 old_port
[i
] = IP_NULL
;
1955 while (--i
>= FIRST_EXCEPTION
) {
1956 if (IP_VALID(old_port
[i
]))
1957 ipc_port_release_send(old_port
[i
]);
1960 if (IP_VALID(new_port
)) /* consume send right */
1961 ipc_port_release_send(new_port
);
1965 return (KERN_SUCCESS
);
1969 * Routine: thread/task_get_exception_ports [kernel call]
1971 * Clones a send right for each of the thread/task's exception
1972 * ports specified in the mask and returns the behaviour
1973 * and flavor of said port.
1975 * Returns upto [in} CountCnt elements.
1980 * KERN_SUCCESS Extracted a send right.
1981 * KERN_INVALID_ARGUMENT The thread is null,
1982 * Invalid special port,
1983 * Illegal mask bit set.
1984 * KERN_FAILURE The thread is dead.
1988 thread_get_exception_ports(
1990 exception_mask_t exception_mask
,
1991 exception_mask_array_t masks
,
1992 mach_msg_type_number_t
*CountCnt
,
1993 exception_port_array_t ports
,
1994 exception_behavior_array_t behaviors
,
1995 thread_state_flavor_array_t flavors
)
1997 unsigned int i
, j
, count
;
1999 if (thread
== THREAD_NULL
)
2000 return (KERN_INVALID_ARGUMENT
);
2002 if (exception_mask
& ~EXC_MASK_VALID
)
2003 return (KERN_INVALID_ARGUMENT
);
2005 thread_mtx_lock(thread
);
2007 if (!thread
->active
) {
2008 thread_mtx_unlock(thread
);
2010 return (KERN_FAILURE
);
2015 if (thread
->exc_actions
== NULL
) {
2019 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2020 if (exception_mask
& (1 << i
)) {
2021 for (j
= 0; j
< count
; ++j
) {
2023 * search for an identical entry, if found
2024 * set corresponding mask for this exception.
2026 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
2027 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
2028 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2029 masks
[j
] |= (1 << i
);
2035 masks
[j
] = (1 << i
);
2036 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2037 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2038 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2040 if (count
>= *CountCnt
)
2047 thread_mtx_unlock(thread
);
2051 return (KERN_SUCCESS
);
2055 task_get_exception_ports(
2057 exception_mask_t exception_mask
,
2058 exception_mask_array_t masks
,
2059 mach_msg_type_number_t
*CountCnt
,
2060 exception_port_array_t ports
,
2061 exception_behavior_array_t behaviors
,
2062 thread_state_flavor_array_t flavors
)
2064 unsigned int i
, j
, count
;
2066 if (task
== TASK_NULL
)
2067 return (KERN_INVALID_ARGUMENT
);
2069 if (exception_mask
& ~EXC_MASK_VALID
)
2070 return (KERN_INVALID_ARGUMENT
);
2074 if (task
->itk_self
== IP_NULL
) {
2077 return (KERN_FAILURE
);
2082 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2083 if (exception_mask
& (1 << i
)) {
2084 for (j
= 0; j
< count
; ++j
) {
2086 * search for an identical entry, if found
2087 * set corresponding mask for this exception.
2089 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
2090 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2091 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2092 masks
[j
] |= (1 << i
);
2098 masks
[j
] = (1 << i
);
2099 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2100 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2101 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2103 if (count
> *CountCnt
)
2113 return (KERN_SUCCESS
);