2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 /* forward declarations */
102 task_t
convert_port_to_locked_task(ipc_port_t port
);
106 * Routine: ipc_task_init
108 * Initialize a task's IPC state.
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
128 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
129 if (kr
!= KERN_SUCCESS
)
130 panic("ipc_task_init");
132 space
->is_task
= task
;
134 kport
= ipc_port_alloc_kernel();
135 if (kport
== IP_NULL
)
136 panic("ipc_task_init");
138 nport
= ipc_port_alloc_kernel();
139 if (nport
== IP_NULL
)
140 panic("ipc_task_init");
143 task
->itk_self
= kport
;
144 task
->itk_nself
= nport
;
145 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
146 task
->itk_sself
= ipc_port_make_send(kport
);
147 task
->itk_debug_control
= IP_NULL
;
148 task
->itk_space
= space
;
150 if (parent
== TASK_NULL
) {
153 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
154 task
->exc_actions
[i
].port
= IP_NULL
;
157 kr
= host_get_host_port(host_priv_self(), &port
);
158 assert(kr
== KERN_SUCCESS
);
159 task
->itk_host
= port
;
161 task
->itk_bootstrap
= IP_NULL
;
162 task
->itk_seatbelt
= IP_NULL
;
163 task
->itk_gssd
= IP_NULL
;
164 task
->itk_task_access
= IP_NULL
;
166 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
167 task
->itk_registered
[i
] = IP_NULL
;
170 assert(parent
->itk_self
!= IP_NULL
);
172 /* inherit registered ports */
174 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
175 task
->itk_registered
[i
] =
176 ipc_port_copy_send(parent
->itk_registered
[i
]);
178 /* inherit exception and bootstrap ports */
180 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
181 task
->exc_actions
[i
].port
=
182 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
183 task
->exc_actions
[i
].flavor
=
184 parent
->exc_actions
[i
].flavor
;
185 task
->exc_actions
[i
].behavior
=
186 parent
->exc_actions
[i
].behavior
;
187 task
->exc_actions
[i
].privileged
=
188 parent
->exc_actions
[i
].privileged
;
191 ipc_port_copy_send(parent
->itk_host
);
193 task
->itk_bootstrap
=
194 ipc_port_copy_send(parent
->itk_bootstrap
);
197 ipc_port_copy_send(parent
->itk_seatbelt
);
200 ipc_port_copy_send(parent
->itk_gssd
);
202 task
->itk_task_access
=
203 ipc_port_copy_send(parent
->itk_task_access
);
210 * Routine: ipc_task_enable
212 * Enable a task for IPC access.
225 kport
= task
->itk_self
;
226 if (kport
!= IP_NULL
)
227 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
228 nport
= task
->itk_nself
;
229 if (nport
!= IP_NULL
)
230 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
235 * Routine: ipc_task_disable
237 * Disable IPC access to a task.
251 kport
= task
->itk_self
;
252 if (kport
!= IP_NULL
)
253 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
254 nport
= task
->itk_nself
;
255 if (nport
!= IP_NULL
)
256 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
258 rport
= task
->itk_resume
;
259 if (rport
!= IP_NULL
) {
261 * From this point onwards this task is no longer accepting
264 * There are still outstanding suspensions on this task,
265 * even as it is being torn down. Disconnect the task
266 * from the rport, thereby "orphaning" the rport. The rport
267 * itself will go away only when the last suspension holder
268 * destroys his SO right to it -- when he either
269 * exits, or tries to actually use that last SO right to
270 * resume this (now non-existent) task.
272 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
278 * Routine: ipc_task_terminate
280 * Clean up and destroy a task's IPC state.
282 * Nothing locked. The task must be suspended.
283 * (Or the current thread must be in the task.)
296 kport
= task
->itk_self
;
298 if (kport
== IP_NULL
) {
299 /* the task is already terminated (can this happen?) */
303 task
->itk_self
= IP_NULL
;
305 nport
= task
->itk_nself
;
306 assert(nport
!= IP_NULL
);
307 task
->itk_nself
= IP_NULL
;
309 rport
= task
->itk_resume
;
310 task
->itk_resume
= IP_NULL
;
314 /* release the naked send rights */
316 if (IP_VALID(task
->itk_sself
))
317 ipc_port_release_send(task
->itk_sself
);
319 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
320 if (IP_VALID(task
->exc_actions
[i
].port
)) {
321 ipc_port_release_send(task
->exc_actions
[i
].port
);
325 if (IP_VALID(task
->itk_host
))
326 ipc_port_release_send(task
->itk_host
);
328 if (IP_VALID(task
->itk_bootstrap
))
329 ipc_port_release_send(task
->itk_bootstrap
);
331 if (IP_VALID(task
->itk_seatbelt
))
332 ipc_port_release_send(task
->itk_seatbelt
);
334 if (IP_VALID(task
->itk_gssd
))
335 ipc_port_release_send(task
->itk_gssd
);
337 if (IP_VALID(task
->itk_task_access
))
338 ipc_port_release_send(task
->itk_task_access
);
340 if (IP_VALID(task
->itk_debug_control
))
341 ipc_port_release_send(task
->itk_debug_control
);
343 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
344 if (IP_VALID(task
->itk_registered
[i
]))
345 ipc_port_release_send(task
->itk_registered
[i
]);
347 /* destroy the kernel ports */
348 ipc_port_dealloc_kernel(kport
);
349 ipc_port_dealloc_kernel(nport
);
350 if (rport
!= IP_NULL
)
351 ipc_port_dealloc_kernel(rport
);
353 itk_lock_destroy(task
);
357 * Routine: ipc_task_reset
359 * Reset a task's IPC state to protect it when
360 * it enters an elevated security context. The
361 * task name port can remain the same - since
362 * it represents no specific privilege.
364 * Nothing locked. The task must be suspended.
365 * (Or the current thread must be in the task.)
372 ipc_port_t old_kport
, new_kport
;
373 ipc_port_t old_sself
;
374 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
377 new_kport
= ipc_port_alloc_kernel();
378 if (new_kport
== IP_NULL
)
379 panic("ipc_task_reset");
383 old_kport
= task
->itk_self
;
385 if (old_kport
== IP_NULL
) {
386 /* the task is already terminated (can this happen?) */
388 ipc_port_dealloc_kernel(new_kport
);
392 task
->itk_self
= new_kport
;
393 old_sself
= task
->itk_sself
;
394 task
->itk_sself
= ipc_port_make_send(new_kport
);
395 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
396 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
398 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
399 if (!task
->exc_actions
[i
].privileged
) {
400 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
401 task
->exc_actions
[i
].port
= IP_NULL
;
403 old_exc_actions
[i
] = IP_NULL
;
407 if (IP_VALID(task
->itk_debug_control
)) {
408 ipc_port_release_send(task
->itk_debug_control
);
410 task
->itk_debug_control
= IP_NULL
;
414 /* release the naked send rights */
416 if (IP_VALID(old_sself
))
417 ipc_port_release_send(old_sself
);
419 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
420 if (IP_VALID(old_exc_actions
[i
])) {
421 ipc_port_release_send(old_exc_actions
[i
]);
425 /* destroy the kernel port */
426 ipc_port_dealloc_kernel(old_kport
);
430 * Routine: ipc_thread_init
432 * Initialize a thread's IPC state.
443 kport
= ipc_port_alloc_kernel();
444 if (kport
== IP_NULL
)
445 panic("ipc_thread_init");
447 thread
->ith_self
= kport
;
448 thread
->ith_sself
= ipc_port_make_send(kport
);
449 thread
->exc_actions
= NULL
;
451 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
453 #if IMPORTANCE_INHERITANCE
454 thread
->ith_assertions
= 0;
457 ipc_kmsg_queue_init(&thread
->ith_messages
);
459 thread
->ith_rpc_reply
= IP_NULL
;
463 ipc_thread_init_exc_actions(
466 assert(thread
->exc_actions
== NULL
);
468 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
469 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
473 ipc_thread_destroy_exc_actions(
476 if (thread
->exc_actions
!= NULL
) {
477 kfree(thread
->exc_actions
,
478 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
479 thread
->exc_actions
= NULL
;
487 ipc_port_t kport
= thread
->ith_self
;
489 if (kport
!= IP_NULL
)
490 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
494 * Routine: ipc_thread_terminate
496 * Clean up and destroy a thread's IPC state.
502 ipc_thread_terminate(
505 ipc_port_t kport
= thread
->ith_self
;
507 if (kport
!= IP_NULL
) {
510 if (IP_VALID(thread
->ith_sself
))
511 ipc_port_release_send(thread
->ith_sself
);
513 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
515 if (thread
->exc_actions
!= NULL
) {
516 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
517 if (IP_VALID(thread
->exc_actions
[i
].port
))
518 ipc_port_release_send(thread
->exc_actions
[i
].port
);
520 ipc_thread_destroy_exc_actions(thread
);
523 ipc_port_dealloc_kernel(kport
);
526 #if IMPORTANCE_INHERITANCE
527 assert(thread
->ith_assertions
== 0);
530 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
532 if (thread
->ith_rpc_reply
!= IP_NULL
)
533 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
535 thread
->ith_rpc_reply
= IP_NULL
;
539 * Routine: ipc_thread_reset
541 * Reset the IPC state for a given Mach thread when
542 * its task enters an elevated security context.
543 * Both the thread port and its exception ports have
544 * to be reset. Its RPC reply port cannot have any
545 * rights outstanding, so it should be fine.
554 ipc_port_t old_kport
, new_kport
;
555 ipc_port_t old_sself
;
556 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
557 boolean_t has_old_exc_actions
= FALSE
;
560 new_kport
= ipc_port_alloc_kernel();
561 if (new_kport
== IP_NULL
)
562 panic("ipc_task_reset");
564 thread_mtx_lock(thread
);
566 old_kport
= thread
->ith_self
;
568 if (old_kport
== IP_NULL
) {
569 /* the is already terminated (can this happen?) */
570 thread_mtx_unlock(thread
);
571 ipc_port_dealloc_kernel(new_kport
);
575 thread
->ith_self
= new_kport
;
576 old_sself
= thread
->ith_sself
;
577 thread
->ith_sself
= ipc_port_make_send(new_kport
);
578 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
579 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
582 * Only ports that were set by root-owned processes
583 * (privileged ports) should survive
585 if (thread
->exc_actions
!= NULL
) {
586 has_old_exc_actions
= TRUE
;
587 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
588 if (thread
->exc_actions
[i
].privileged
) {
589 old_exc_actions
[i
] = IP_NULL
;
591 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
592 thread
->exc_actions
[i
].port
= IP_NULL
;
597 thread_mtx_unlock(thread
);
599 /* release the naked send rights */
601 if (IP_VALID(old_sself
))
602 ipc_port_release_send(old_sself
);
604 if (has_old_exc_actions
) {
605 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
606 ipc_port_release_send(old_exc_actions
[i
]);
610 /* destroy the kernel port */
611 ipc_port_dealloc_kernel(old_kport
);
615 * Routine: retrieve_task_self_fast
617 * Optimized version of retrieve_task_self,
618 * that only works for the current task.
620 * Return a send right (possibly null/dead)
621 * for the task's user-visible self port.
627 retrieve_task_self_fast(
628 register task_t task
)
630 register ipc_port_t port
;
632 assert(task
== current_task());
635 assert(task
->itk_self
!= IP_NULL
);
637 if ((port
= task
->itk_sself
) == task
->itk_self
) {
641 assert(ip_active(port
));
646 port
= ipc_port_copy_send(port
);
653 * Routine: retrieve_thread_self_fast
655 * Return a send right (possibly null/dead)
656 * for the thread's user-visible self port.
658 * Only works for the current thread.
665 retrieve_thread_self_fast(
668 register ipc_port_t port
;
670 assert(thread
== current_thread());
672 thread_mtx_lock(thread
);
674 assert(thread
->ith_self
!= IP_NULL
);
676 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
680 assert(ip_active(port
));
686 port
= ipc_port_copy_send(port
);
688 thread_mtx_unlock(thread
);
694 * Routine: task_self_trap [mach trap]
696 * Give the caller send rights for his own task port.
700 * MACH_PORT_NULL if there are any resource failures
706 __unused
struct task_self_trap_args
*args
)
708 task_t task
= current_task();
710 mach_port_name_t name
;
712 sright
= retrieve_task_self_fast(task
);
713 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
718 * Routine: thread_self_trap [mach trap]
720 * Give the caller send rights for his own thread port.
724 * MACH_PORT_NULL if there are any resource failures
730 __unused
struct thread_self_trap_args
*args
)
732 thread_t thread
= current_thread();
733 task_t task
= thread
->task
;
735 mach_port_name_t name
;
737 sright
= retrieve_thread_self_fast(thread
);
738 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
744 * Routine: mach_reply_port [mach trap]
746 * Allocate a port for the caller.
750 * MACH_PORT_NULL if there are any resource failures
756 __unused
struct mach_reply_port_args
*args
)
759 mach_port_name_t name
;
762 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
763 if (kr
== KERN_SUCCESS
)
766 name
= MACH_PORT_NULL
;
771 * Routine: thread_get_special_port [kernel call]
773 * Clones a send right for one of the thread's
778 * KERN_SUCCESS Extracted a send right.
779 * KERN_INVALID_ARGUMENT The thread is null.
780 * KERN_FAILURE The thread is dead.
781 * KERN_INVALID_ARGUMENT Invalid special port.
785 thread_get_special_port(
790 kern_return_t result
= KERN_SUCCESS
;
793 if (thread
== THREAD_NULL
)
794 return (KERN_INVALID_ARGUMENT
);
798 case THREAD_KERNEL_PORT
:
799 whichp
= &thread
->ith_sself
;
803 return (KERN_INVALID_ARGUMENT
);
806 thread_mtx_lock(thread
);
809 *portp
= ipc_port_copy_send(*whichp
);
811 result
= KERN_FAILURE
;
813 thread_mtx_unlock(thread
);
819 * Routine: thread_set_special_port [kernel call]
821 * Changes one of the thread's special ports,
822 * setting it to the supplied send right.
824 * Nothing locked. If successful, consumes
825 * the supplied send right.
827 * KERN_SUCCESS Changed the special port.
828 * KERN_INVALID_ARGUMENT The thread is null.
829 * KERN_FAILURE The thread is dead.
830 * KERN_INVALID_ARGUMENT Invalid special port.
834 thread_set_special_port(
839 kern_return_t result
= KERN_SUCCESS
;
840 ipc_port_t
*whichp
, old
= IP_NULL
;
842 if (thread
== THREAD_NULL
)
843 return (KERN_INVALID_ARGUMENT
);
847 case THREAD_KERNEL_PORT
:
848 whichp
= &thread
->ith_sself
;
852 return (KERN_INVALID_ARGUMENT
);
855 thread_mtx_lock(thread
);
857 if (thread
->active
) {
862 result
= KERN_FAILURE
;
864 thread_mtx_unlock(thread
);
867 ipc_port_release_send(old
);
873 * Routine: task_get_special_port [kernel call]
875 * Clones a send right for one of the task's
880 * KERN_SUCCESS Extracted a send right.
881 * KERN_INVALID_ARGUMENT The task is null.
882 * KERN_FAILURE The task/space is dead.
883 * KERN_INVALID_ARGUMENT Invalid special port.
887 task_get_special_port(
894 if (task
== TASK_NULL
)
895 return KERN_INVALID_ARGUMENT
;
898 if (task
->itk_self
== IP_NULL
) {
904 case TASK_KERNEL_PORT
:
905 port
= ipc_port_copy_send(task
->itk_sself
);
909 port
= ipc_port_make_send(task
->itk_nself
);
913 port
= ipc_port_copy_send(task
->itk_host
);
916 case TASK_BOOTSTRAP_PORT
:
917 port
= ipc_port_copy_send(task
->itk_bootstrap
);
920 case TASK_SEATBELT_PORT
:
921 port
= ipc_port_copy_send(task
->itk_seatbelt
);
924 case TASK_ACCESS_PORT
:
925 port
= ipc_port_copy_send(task
->itk_task_access
);
928 case TASK_DEBUG_CONTROL_PORT
:
929 port
= ipc_port_copy_send(task
->itk_debug_control
);
934 return KERN_INVALID_ARGUMENT
;
943 * Routine: task_set_special_port [kernel call]
945 * Changes one of the task's special ports,
946 * setting it to the supplied send right.
948 * Nothing locked. If successful, consumes
949 * the supplied send right.
951 * KERN_SUCCESS Changed the special port.
952 * KERN_INVALID_ARGUMENT The task is null.
953 * KERN_FAILURE The task/space is dead.
954 * KERN_INVALID_ARGUMENT Invalid special port.
955 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
959 task_set_special_port(
967 if (task
== TASK_NULL
)
968 return KERN_INVALID_ARGUMENT
;
971 case TASK_KERNEL_PORT
:
972 whichp
= &task
->itk_sself
;
976 whichp
= &task
->itk_host
;
979 case TASK_BOOTSTRAP_PORT
:
980 whichp
= &task
->itk_bootstrap
;
983 case TASK_SEATBELT_PORT
:
984 whichp
= &task
->itk_seatbelt
;
987 case TASK_ACCESS_PORT
:
988 whichp
= &task
->itk_task_access
;
991 case TASK_DEBUG_CONTROL_PORT
:
992 whichp
= &task
->itk_debug_control
;
997 return KERN_INVALID_ARGUMENT
;
1001 if (task
->itk_self
== IP_NULL
) {
1003 return KERN_FAILURE
;
1006 /* do not allow overwrite of seatbelt or task access ports */
1007 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
1008 && IP_VALID(*whichp
)) {
1010 return KERN_NO_ACCESS
;
1018 ipc_port_release_send(old
);
1019 return KERN_SUCCESS
;
1024 * Routine: mach_ports_register [kernel call]
1026 * Stash a handful of port send rights in the task.
1027 * Child tasks will inherit these rights, but they
1028 * must use mach_ports_lookup to acquire them.
1030 * The rights are supplied in a (wired) kalloc'd segment.
1031 * Rights which aren't supplied are assumed to be null.
1033 * Nothing locked. If successful, consumes
1034 * the supplied rights and memory.
1036 * KERN_SUCCESS Stashed the port rights.
1037 * KERN_INVALID_ARGUMENT The task is null.
1038 * KERN_INVALID_ARGUMENT The task is dead.
1039 * KERN_INVALID_ARGUMENT The memory param is null.
1040 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1044 mach_ports_register(
1046 mach_port_array_t memory
,
1047 mach_msg_type_number_t portsCnt
)
1049 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1052 if ((task
== TASK_NULL
) ||
1053 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1054 (portsCnt
&& memory
== NULL
))
1055 return KERN_INVALID_ARGUMENT
;
1058 * Pad the port rights with nulls.
1061 for (i
= 0; i
< portsCnt
; i
++)
1062 ports
[i
] = memory
[i
];
1063 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
1067 if (task
->itk_self
== IP_NULL
) {
1069 return KERN_INVALID_ARGUMENT
;
1073 * Replace the old send rights with the new.
1074 * Release the old rights after unlocking.
1077 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1080 old
= task
->itk_registered
[i
];
1081 task
->itk_registered
[i
] = ports
[i
];
1087 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1088 if (IP_VALID(ports
[i
]))
1089 ipc_port_release_send(ports
[i
]);
1092 * Now that the operation is known to be successful,
1093 * we can free the memory.
1098 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1100 return KERN_SUCCESS
;
1104 * Routine: mach_ports_lookup [kernel call]
1106 * Retrieves (clones) the stashed port send rights.
1108 * Nothing locked. If successful, the caller gets
1109 * rights and memory.
1111 * KERN_SUCCESS Retrieved the send rights.
1112 * KERN_INVALID_ARGUMENT The task is null.
1113 * KERN_INVALID_ARGUMENT The task is dead.
1114 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1120 mach_port_array_t
*portsp
,
1121 mach_msg_type_number_t
*portsCnt
)
1128 if (task
== TASK_NULL
)
1129 return KERN_INVALID_ARGUMENT
;
1131 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1133 memory
= kalloc(size
);
1135 return KERN_RESOURCE_SHORTAGE
;
1138 if (task
->itk_self
== IP_NULL
) {
1141 kfree(memory
, size
);
1142 return KERN_INVALID_ARGUMENT
;
1145 ports
= (ipc_port_t
*) memory
;
1148 * Clone port rights. Because kalloc'd memory
1149 * is wired, we won't fault while holding the task lock.
1152 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1153 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1157 *portsp
= (mach_port_array_t
) ports
;
1158 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1159 return KERN_SUCCESS
;
1163 * Routine: convert_port_to_locked_task
1165 * Internal helper routine to convert from a port to a locked
1166 * task. Used by several routines that try to convert from a
1167 * task port to a reference on some task related object.
1169 * Nothing locked, blocking OK.
1172 convert_port_to_locked_task(ipc_port_t port
)
1174 int try_failed_count
= 0;
1176 while (IP_VALID(port
)) {
1180 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1184 task
= (task_t
) port
->ip_kobject
;
1185 assert(task
!= TASK_NULL
);
1188 * Normal lock ordering puts task_lock() before ip_lock().
1189 * Attempt out-of-order locking here.
1191 if (task_lock_try(task
)) {
1198 mutex_pause(try_failed_count
);
1204 * Routine: convert_port_to_task
1206 * Convert from a port to a task.
1207 * Doesn't consume the port ref; produces a task ref,
1208 * which may be null.
1213 convert_port_to_task(
1216 task_t task
= TASK_NULL
;
1218 if (IP_VALID(port
)) {
1221 if ( ip_active(port
) &&
1222 ip_kotype(port
) == IKOT_TASK
) {
1223 task
= (task_t
)port
->ip_kobject
;
1224 assert(task
!= TASK_NULL
);
1226 task_reference_internal(task
);
1236 * Routine: convert_port_to_task_name
1238 * Convert from a port to a task name.
1239 * Doesn't consume the port ref; produces a task name ref,
1240 * which may be null.
1245 convert_port_to_task_name(
1248 task_name_t task
= TASK_NULL
;
1250 if (IP_VALID(port
)) {
1253 if ( ip_active(port
) &&
1254 (ip_kotype(port
) == IKOT_TASK
||
1255 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1256 task
= (task_name_t
)port
->ip_kobject
;
1257 assert(task
!= TASK_NAME_NULL
);
1259 task_reference_internal(task
);
1269 * Routine: convert_port_to_task_suspension_token
1271 * Convert from a port to a task suspension token.
1272 * Doesn't consume the port ref; produces a suspension token ref,
1273 * which may be null.
1277 task_suspension_token_t
1278 convert_port_to_task_suspension_token(
1281 task_suspension_token_t task
= TASK_NULL
;
1283 if (IP_VALID(port
)) {
1286 if ( ip_active(port
) &&
1287 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1288 task
= (task_suspension_token_t
)port
->ip_kobject
;
1289 assert(task
!= TASK_NULL
);
1291 task_reference_internal(task
);
1301 * Routine: convert_port_to_space
1303 * Convert from a port to a space.
1304 * Doesn't consume the port ref; produces a space ref,
1305 * which may be null.
1310 convert_port_to_space(
1316 task
= convert_port_to_locked_task(port
);
1318 if (task
== TASK_NULL
)
1319 return IPC_SPACE_NULL
;
1321 if (!task
->active
) {
1323 return IPC_SPACE_NULL
;
1326 space
= task
->itk_space
;
1327 is_reference(space
);
1333 * Routine: convert_port_to_map
1335 * Convert from a port to a map.
1336 * Doesn't consume the port ref; produces a map ref,
1337 * which may be null.
1343 convert_port_to_map(
1349 task
= convert_port_to_locked_task(port
);
1351 if (task
== TASK_NULL
)
1354 if (!task
->active
) {
1360 vm_map_reference_swap(map
);
1367 * Routine: convert_port_to_thread
1369 * Convert from a port to a thread.
1370 * Doesn't consume the port ref; produces an thread ref,
1371 * which may be null.
1377 convert_port_to_thread(
1380 thread_t thread
= THREAD_NULL
;
1382 if (IP_VALID(port
)) {
1385 if ( ip_active(port
) &&
1386 ip_kotype(port
) == IKOT_THREAD
) {
1387 thread
= (thread_t
)port
->ip_kobject
;
1388 assert(thread
!= THREAD_NULL
);
1390 thread_reference_internal(thread
);
1400 * Routine: port_name_to_thread
1402 * Convert from a port name to an thread reference
1403 * A name of MACH_PORT_NULL is valid for the null thread.
1408 port_name_to_thread(
1409 mach_port_name_t name
)
1411 thread_t thread
= THREAD_NULL
;
1414 if (MACH_PORT_VALID(name
)) {
1415 if (ipc_object_copyin(current_space(), name
,
1416 MACH_MSG_TYPE_COPY_SEND
,
1417 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1418 return (THREAD_NULL
);
1420 thread
= convert_port_to_thread(kport
);
1422 if (IP_VALID(kport
))
1423 ipc_port_release_send(kport
);
1431 mach_port_name_t name
)
1433 ipc_port_t kern_port
;
1435 task_t task
= TASK_NULL
;
1437 if (MACH_PORT_VALID(name
)) {
1438 kr
= ipc_object_copyin(current_space(), name
,
1439 MACH_MSG_TYPE_COPY_SEND
,
1440 (ipc_object_t
*) &kern_port
);
1441 if (kr
!= KERN_SUCCESS
)
1444 task
= convert_port_to_task(kern_port
);
1446 if (IP_VALID(kern_port
))
1447 ipc_port_release_send(kern_port
);
1453 * Routine: convert_task_to_port
1455 * Convert from a task to a port.
1456 * Consumes a task ref; produces a naked send right
1457 * which may be invalid.
1463 convert_task_to_port(
1469 if (task
->itk_self
!= IP_NULL
)
1470 port
= ipc_port_make_send(task
->itk_self
);
1475 task_deallocate(task
);
1480 * Routine: convert_task_suspend_token_to_port
1482 * Convert from a task suspension token to a port.
1483 * Consumes a task suspension token ref; produces a naked send-once right
1484 * which may be invalid.
1489 convert_task_suspension_token_to_port(
1490 task_suspension_token_t task
)
1496 if (task
->itk_resume
== IP_NULL
) {
1497 task
->itk_resume
= ipc_port_alloc_kernel();
1498 if (!IP_VALID(task
->itk_resume
)) {
1499 panic("failed to create resume port");
1502 ipc_kobject_set(task
->itk_resume
, (ipc_kobject_t
) task
, IKOT_TASK_RESUME
);
1506 * Create a send-once right for each instance of a direct user-called
1507 * task_suspend2 call. Each time one of these send-once rights is abandoned,
1508 * the notification handler will resume the target task.
1510 port
= ipc_port_make_sonce(task
->itk_resume
);
1511 assert(IP_VALID(port
));
1517 task_suspension_token_deallocate(task
);
1524 * Routine: convert_task_name_to_port
1526 * Convert from a task name ref to a port.
1527 * Consumes a task name ref; produces a naked send right
1528 * which may be invalid.
1534 convert_task_name_to_port(
1535 task_name_t task_name
)
1539 itk_lock(task_name
);
1540 if (task_name
->itk_nself
!= IP_NULL
)
1541 port
= ipc_port_make_send(task_name
->itk_nself
);
1544 itk_unlock(task_name
);
1546 task_name_deallocate(task_name
);
1551 * Routine: convert_thread_to_port
1553 * Convert from a thread to a port.
1554 * Consumes an thread ref; produces a naked send right
1555 * which may be invalid.
1561 convert_thread_to_port(
1566 thread_mtx_lock(thread
);
1568 if (thread
->ith_self
!= IP_NULL
)
1569 port
= ipc_port_make_send(thread
->ith_self
);
1573 thread_mtx_unlock(thread
);
1575 thread_deallocate(thread
);
1581 * Routine: space_deallocate
1583 * Deallocate a space ref produced by convert_port_to_space.
1592 if (space
!= IS_NULL
)
1597 * Routine: thread/task_set_exception_ports [kernel call]
1599 * Sets the thread/task exception port, flavor and
1600 * behavior for the exception types specified by the mask.
1601 * There will be one send right per exception per valid
1604 * Nothing locked. If successful, consumes
1605 * the supplied send right.
1607 * KERN_SUCCESS Changed the special port.
1608 * KERN_INVALID_ARGUMENT The thread is null,
1609 * Illegal mask bit set.
1610 * Illegal exception behavior
1611 * KERN_FAILURE The thread is dead.
1615 thread_set_exception_ports(
1617 exception_mask_t exception_mask
,
1618 ipc_port_t new_port
,
1619 exception_behavior_t new_behavior
,
1620 thread_state_flavor_t new_flavor
)
1622 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1623 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1626 if (thread
== THREAD_NULL
)
1627 return (KERN_INVALID_ARGUMENT
);
1629 if (exception_mask
& ~EXC_MASK_VALID
)
1630 return (KERN_INVALID_ARGUMENT
);
1632 if (IP_VALID(new_port
)) {
1633 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1635 case EXCEPTION_DEFAULT
:
1636 case EXCEPTION_STATE
:
1637 case EXCEPTION_STATE_IDENTITY
:
1641 return (KERN_INVALID_ARGUMENT
);
1646 * Check the validity of the thread_state_flavor by calling the
1647 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1648 * osfmk/mach/ARCHITECTURE/thread_status.h
1650 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1651 return (KERN_INVALID_ARGUMENT
);
1653 thread_mtx_lock(thread
);
1655 if (!thread
->active
) {
1656 thread_mtx_unlock(thread
);
1658 return (KERN_FAILURE
);
1661 if (thread
->exc_actions
== NULL
) {
1662 ipc_thread_init_exc_actions(thread
);
1664 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1665 if (exception_mask
& (1 << i
)) {
1666 old_port
[i
] = thread
->exc_actions
[i
].port
;
1667 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1668 thread
->exc_actions
[i
].behavior
= new_behavior
;
1669 thread
->exc_actions
[i
].flavor
= new_flavor
;
1670 thread
->exc_actions
[i
].privileged
= privileged
;
1673 old_port
[i
] = IP_NULL
;
1676 thread_mtx_unlock(thread
);
1678 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1679 if (IP_VALID(old_port
[i
]))
1680 ipc_port_release_send(old_port
[i
]);
1682 if (IP_VALID(new_port
)) /* consume send right */
1683 ipc_port_release_send(new_port
);
1685 return (KERN_SUCCESS
);
1689 task_set_exception_ports(
1691 exception_mask_t exception_mask
,
1692 ipc_port_t new_port
,
1693 exception_behavior_t new_behavior
,
1694 thread_state_flavor_t new_flavor
)
1696 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1697 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1700 if (task
== TASK_NULL
)
1701 return (KERN_INVALID_ARGUMENT
);
1703 if (exception_mask
& ~EXC_MASK_VALID
)
1704 return (KERN_INVALID_ARGUMENT
);
1706 if (IP_VALID(new_port
)) {
1707 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1709 case EXCEPTION_DEFAULT
:
1710 case EXCEPTION_STATE
:
1711 case EXCEPTION_STATE_IDENTITY
:
1715 return (KERN_INVALID_ARGUMENT
);
1720 * Check the validity of the thread_state_flavor by calling the
1721 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1722 * osfmk/mach/ARCHITECTURE/thread_status.h
1724 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1725 return (KERN_INVALID_ARGUMENT
);
1729 if (task
->itk_self
== IP_NULL
) {
1732 return (KERN_FAILURE
);
1735 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1736 if (exception_mask
& (1 << i
)) {
1737 old_port
[i
] = task
->exc_actions
[i
].port
;
1738 task
->exc_actions
[i
].port
=
1739 ipc_port_copy_send(new_port
);
1740 task
->exc_actions
[i
].behavior
= new_behavior
;
1741 task
->exc_actions
[i
].flavor
= new_flavor
;
1742 task
->exc_actions
[i
].privileged
= privileged
;
1745 old_port
[i
] = IP_NULL
;
1750 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1751 if (IP_VALID(old_port
[i
]))
1752 ipc_port_release_send(old_port
[i
]);
1754 if (IP_VALID(new_port
)) /* consume send right */
1755 ipc_port_release_send(new_port
);
1757 return (KERN_SUCCESS
);
1761 * Routine: thread/task_swap_exception_ports [kernel call]
1763 * Sets the thread/task exception port, flavor and
1764 * behavior for the exception types specified by the
1767 * The old ports, behavior and flavors are returned
1768 * Count specifies the array sizes on input and
1769 * the number of returned ports etc. on output. The
1770 * arrays must be large enough to hold all the returned
1771 * data, MIG returnes an error otherwise. The masks
1772 * array specifies the corresponding exception type(s).
1775 * Nothing locked. If successful, consumes
1776 * the supplied send right.
1778 * Returns upto [in} CountCnt elements.
1780 * KERN_SUCCESS Changed the special port.
1781 * KERN_INVALID_ARGUMENT The thread is null,
1782 * Illegal mask bit set.
1783 * Illegal exception behavior
1784 * KERN_FAILURE The thread is dead.
1788 thread_swap_exception_ports(
1790 exception_mask_t exception_mask
,
1791 ipc_port_t new_port
,
1792 exception_behavior_t new_behavior
,
1793 thread_state_flavor_t new_flavor
,
1794 exception_mask_array_t masks
,
1795 mach_msg_type_number_t
*CountCnt
,
1796 exception_port_array_t ports
,
1797 exception_behavior_array_t behaviors
,
1798 thread_state_flavor_array_t flavors
)
1800 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1801 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1802 unsigned int i
, j
, count
;
1804 if (thread
== THREAD_NULL
)
1805 return (KERN_INVALID_ARGUMENT
);
1807 if (exception_mask
& ~EXC_MASK_VALID
)
1808 return (KERN_INVALID_ARGUMENT
);
1810 if (IP_VALID(new_port
)) {
1811 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1813 case EXCEPTION_DEFAULT
:
1814 case EXCEPTION_STATE
:
1815 case EXCEPTION_STATE_IDENTITY
:
1819 return (KERN_INVALID_ARGUMENT
);
1823 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1824 return (KERN_INVALID_ARGUMENT
);
1826 thread_mtx_lock(thread
);
1828 if (!thread
->active
) {
1829 thread_mtx_unlock(thread
);
1831 return (KERN_FAILURE
);
1834 if (thread
->exc_actions
== NULL
) {
1835 ipc_thread_init_exc_actions(thread
);
1838 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
1839 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
1840 if (exception_mask
& (1 << i
)) {
1841 for (j
= 0; j
< count
; ++j
) {
1843 * search for an identical entry, if found
1844 * set corresponding mask for this exception.
1846 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1847 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1848 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1849 masks
[j
] |= (1 << i
);
1855 masks
[j
] = (1 << i
);
1856 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1858 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1859 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1863 old_port
[i
] = thread
->exc_actions
[i
].port
;
1864 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1865 thread
->exc_actions
[i
].behavior
= new_behavior
;
1866 thread
->exc_actions
[i
].flavor
= new_flavor
;
1867 thread
->exc_actions
[i
].privileged
= privileged
;
1870 old_port
[i
] = IP_NULL
;
1873 thread_mtx_unlock(thread
);
1875 while (--i
>= FIRST_EXCEPTION
) {
1876 if (IP_VALID(old_port
[i
]))
1877 ipc_port_release_send(old_port
[i
]);
1880 if (IP_VALID(new_port
)) /* consume send right */
1881 ipc_port_release_send(new_port
);
1885 return (KERN_SUCCESS
);
1889 task_swap_exception_ports(
1891 exception_mask_t exception_mask
,
1892 ipc_port_t new_port
,
1893 exception_behavior_t new_behavior
,
1894 thread_state_flavor_t new_flavor
,
1895 exception_mask_array_t masks
,
1896 mach_msg_type_number_t
*CountCnt
,
1897 exception_port_array_t ports
,
1898 exception_behavior_array_t behaviors
,
1899 thread_state_flavor_array_t flavors
)
1901 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1902 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1903 unsigned int i
, j
, count
;
1905 if (task
== TASK_NULL
)
1906 return (KERN_INVALID_ARGUMENT
);
1908 if (exception_mask
& ~EXC_MASK_VALID
)
1909 return (KERN_INVALID_ARGUMENT
);
1911 if (IP_VALID(new_port
)) {
1912 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1914 case EXCEPTION_DEFAULT
:
1915 case EXCEPTION_STATE
:
1916 case EXCEPTION_STATE_IDENTITY
:
1920 return (KERN_INVALID_ARGUMENT
);
1924 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1925 return (KERN_INVALID_ARGUMENT
);
1929 if (task
->itk_self
== IP_NULL
) {
1932 return (KERN_FAILURE
);
1935 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
1936 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
1937 if (exception_mask
& (1 << i
)) {
1938 for (j
= 0; j
< count
; j
++) {
1940 * search for an identical entry, if found
1941 * set corresponding mask for this exception.
1943 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1944 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1945 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1946 masks
[j
] |= (1 << i
);
1952 masks
[j
] = (1 << i
);
1953 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1954 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1955 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1959 old_port
[i
] = task
->exc_actions
[i
].port
;
1961 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1962 task
->exc_actions
[i
].behavior
= new_behavior
;
1963 task
->exc_actions
[i
].flavor
= new_flavor
;
1964 task
->exc_actions
[i
].privileged
= privileged
;
1967 old_port
[i
] = IP_NULL
;
1972 while (--i
>= FIRST_EXCEPTION
) {
1973 if (IP_VALID(old_port
[i
]))
1974 ipc_port_release_send(old_port
[i
]);
1977 if (IP_VALID(new_port
)) /* consume send right */
1978 ipc_port_release_send(new_port
);
1982 return (KERN_SUCCESS
);
1986 * Routine: thread/task_get_exception_ports [kernel call]
1988 * Clones a send right for each of the thread/task's exception
1989 * ports specified in the mask and returns the behaviour
1990 * and flavor of said port.
1992 * Returns upto [in} CountCnt elements.
1997 * KERN_SUCCESS Extracted a send right.
1998 * KERN_INVALID_ARGUMENT The thread is null,
1999 * Invalid special port,
2000 * Illegal mask bit set.
2001 * KERN_FAILURE The thread is dead.
2005 thread_get_exception_ports(
2007 exception_mask_t exception_mask
,
2008 exception_mask_array_t masks
,
2009 mach_msg_type_number_t
*CountCnt
,
2010 exception_port_array_t ports
,
2011 exception_behavior_array_t behaviors
,
2012 thread_state_flavor_array_t flavors
)
2014 unsigned int i
, j
, count
;
2016 if (thread
== THREAD_NULL
)
2017 return (KERN_INVALID_ARGUMENT
);
2019 if (exception_mask
& ~EXC_MASK_VALID
)
2020 return (KERN_INVALID_ARGUMENT
);
2022 thread_mtx_lock(thread
);
2024 if (!thread
->active
) {
2025 thread_mtx_unlock(thread
);
2027 return (KERN_FAILURE
);
2032 if (thread
->exc_actions
== NULL
) {
2036 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2037 if (exception_mask
& (1 << i
)) {
2038 for (j
= 0; j
< count
; ++j
) {
2040 * search for an identical entry, if found
2041 * set corresponding mask for this exception.
2043 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
2044 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
2045 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2046 masks
[j
] |= (1 << i
);
2052 masks
[j
] = (1 << i
);
2053 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2054 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2055 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2057 if (count
>= *CountCnt
)
2064 thread_mtx_unlock(thread
);
2068 return (KERN_SUCCESS
);
2072 task_get_exception_ports(
2074 exception_mask_t exception_mask
,
2075 exception_mask_array_t masks
,
2076 mach_msg_type_number_t
*CountCnt
,
2077 exception_port_array_t ports
,
2078 exception_behavior_array_t behaviors
,
2079 thread_state_flavor_array_t flavors
)
2081 unsigned int i
, j
, count
;
2083 if (task
== TASK_NULL
)
2084 return (KERN_INVALID_ARGUMENT
);
2086 if (exception_mask
& ~EXC_MASK_VALID
)
2087 return (KERN_INVALID_ARGUMENT
);
2091 if (task
->itk_self
== IP_NULL
) {
2094 return (KERN_FAILURE
);
2099 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2100 if (exception_mask
& (1 << i
)) {
2101 for (j
= 0; j
< count
; ++j
) {
2103 * search for an identical entry, if found
2104 * set corresponding mask for this exception.
2106 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
2107 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2108 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2109 masks
[j
] |= (1 << i
);
2115 masks
[j
] = (1 << i
);
2116 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2117 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2118 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2120 if (count
> *CountCnt
)
2130 return (KERN_SUCCESS
);