2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 /* forward declarations */
102 task_t
convert_port_to_locked_task(ipc_port_t port
);
106 * Routine: ipc_task_init
108 * Initialize a task's IPC state.
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
128 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
129 if (kr
!= KERN_SUCCESS
)
130 panic("ipc_task_init");
132 space
->is_task
= task
;
134 kport
= ipc_port_alloc_kernel();
135 if (kport
== IP_NULL
)
136 panic("ipc_task_init");
138 nport
= ipc_port_alloc_kernel();
139 if (nport
== IP_NULL
)
140 panic("ipc_task_init");
143 task
->itk_self
= kport
;
144 task
->itk_nself
= nport
;
145 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
146 task
->itk_sself
= ipc_port_make_send(kport
);
147 task
->itk_debug_control
= IP_NULL
;
148 task
->itk_space
= space
;
150 if (parent
== TASK_NULL
) {
153 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
154 task
->exc_actions
[i
].port
= IP_NULL
;
157 kr
= host_get_host_port(host_priv_self(), &port
);
158 assert(kr
== KERN_SUCCESS
);
159 task
->itk_host
= port
;
161 task
->itk_bootstrap
= IP_NULL
;
162 task
->itk_seatbelt
= IP_NULL
;
163 task
->itk_gssd
= IP_NULL
;
164 task
->itk_task_access
= IP_NULL
;
166 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
167 task
->itk_registered
[i
] = IP_NULL
;
170 assert(parent
->itk_self
!= IP_NULL
);
172 /* inherit registered ports */
174 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
175 task
->itk_registered
[i
] =
176 ipc_port_copy_send(parent
->itk_registered
[i
]);
178 /* inherit exception and bootstrap ports */
180 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
181 task
->exc_actions
[i
].port
=
182 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
183 task
->exc_actions
[i
].flavor
=
184 parent
->exc_actions
[i
].flavor
;
185 task
->exc_actions
[i
].behavior
=
186 parent
->exc_actions
[i
].behavior
;
187 task
->exc_actions
[i
].privileged
=
188 parent
->exc_actions
[i
].privileged
;
191 ipc_port_copy_send(parent
->itk_host
);
193 task
->itk_bootstrap
=
194 ipc_port_copy_send(parent
->itk_bootstrap
);
197 ipc_port_copy_send(parent
->itk_seatbelt
);
200 ipc_port_copy_send(parent
->itk_gssd
);
202 task
->itk_task_access
=
203 ipc_port_copy_send(parent
->itk_task_access
);
210 * Routine: ipc_task_enable
212 * Enable a task for IPC access.
225 kport
= task
->itk_self
;
226 if (kport
!= IP_NULL
)
227 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
228 nport
= task
->itk_nself
;
229 if (nport
!= IP_NULL
)
230 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
235 * Routine: ipc_task_disable
237 * Disable IPC access to a task.
251 kport
= task
->itk_self
;
252 if (kport
!= IP_NULL
)
253 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
254 nport
= task
->itk_nself
;
255 if (nport
!= IP_NULL
)
256 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
258 rport
= task
->itk_resume
;
259 if (rport
!= IP_NULL
) {
261 * From this point onwards this task is no longer accepting
264 * There are still outstanding suspensions on this task,
265 * even as it is being torn down. Disconnect the task
266 * from the rport, thereby "orphaning" the rport. The rport
267 * itself will go away only when the last suspension holder
268 * destroys his SO right to it -- when he either
269 * exits, or tries to actually use that last SO right to
270 * resume this (now non-existent) task.
272 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
278 * Routine: ipc_task_terminate
280 * Clean up and destroy a task's IPC state.
282 * Nothing locked. The task must be suspended.
283 * (Or the current thread must be in the task.)
296 kport
= task
->itk_self
;
298 if (kport
== IP_NULL
) {
299 /* the task is already terminated (can this happen?) */
303 task
->itk_self
= IP_NULL
;
305 nport
= task
->itk_nself
;
306 assert(nport
!= IP_NULL
);
307 task
->itk_nself
= IP_NULL
;
309 rport
= task
->itk_resume
;
310 task
->itk_resume
= IP_NULL
;
314 /* release the naked send rights */
316 if (IP_VALID(task
->itk_sself
))
317 ipc_port_release_send(task
->itk_sself
);
319 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
320 if (IP_VALID(task
->exc_actions
[i
].port
)) {
321 ipc_port_release_send(task
->exc_actions
[i
].port
);
325 if (IP_VALID(task
->itk_host
))
326 ipc_port_release_send(task
->itk_host
);
328 if (IP_VALID(task
->itk_bootstrap
))
329 ipc_port_release_send(task
->itk_bootstrap
);
331 if (IP_VALID(task
->itk_seatbelt
))
332 ipc_port_release_send(task
->itk_seatbelt
);
334 if (IP_VALID(task
->itk_gssd
))
335 ipc_port_release_send(task
->itk_gssd
);
337 if (IP_VALID(task
->itk_task_access
))
338 ipc_port_release_send(task
->itk_task_access
);
340 if (IP_VALID(task
->itk_debug_control
))
341 ipc_port_release_send(task
->itk_debug_control
);
343 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
344 if (IP_VALID(task
->itk_registered
[i
]))
345 ipc_port_release_send(task
->itk_registered
[i
]);
347 /* destroy the kernel ports */
348 ipc_port_dealloc_kernel(kport
);
349 ipc_port_dealloc_kernel(nport
);
350 if (rport
!= IP_NULL
)
351 ipc_port_dealloc_kernel(rport
);
353 itk_lock_destroy(task
);
357 * Routine: ipc_task_reset
359 * Reset a task's IPC state to protect it when
360 * it enters an elevated security context. The
361 * task name port can remain the same - since
362 * it represents no specific privilege.
364 * Nothing locked. The task must be suspended.
365 * (Or the current thread must be in the task.)
372 ipc_port_t old_kport
, new_kport
;
373 ipc_port_t old_sself
;
374 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
377 new_kport
= ipc_port_alloc_kernel();
378 if (new_kport
== IP_NULL
)
379 panic("ipc_task_reset");
383 old_kport
= task
->itk_self
;
385 if (old_kport
== IP_NULL
) {
386 /* the task is already terminated (can this happen?) */
388 ipc_port_dealloc_kernel(new_kport
);
392 task
->itk_self
= new_kport
;
393 old_sself
= task
->itk_sself
;
394 task
->itk_sself
= ipc_port_make_send(new_kport
);
395 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
396 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
398 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
399 old_exc_actions
[i
] = IP_NULL
;
401 if (i
== EXC_CORPSE_NOTIFY
&& task_corpse_pending_report(task
)) {
405 if (!task
->exc_actions
[i
].privileged
) {
406 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
407 task
->exc_actions
[i
].port
= IP_NULL
;
411 if (IP_VALID(task
->itk_debug_control
)) {
412 ipc_port_release_send(task
->itk_debug_control
);
414 task
->itk_debug_control
= IP_NULL
;
418 /* release the naked send rights */
420 if (IP_VALID(old_sself
))
421 ipc_port_release_send(old_sself
);
423 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
424 if (IP_VALID(old_exc_actions
[i
])) {
425 ipc_port_release_send(old_exc_actions
[i
]);
429 /* destroy the kernel port */
430 ipc_port_dealloc_kernel(old_kport
);
434 * Routine: ipc_thread_init
436 * Initialize a thread's IPC state.
447 kport
= ipc_port_alloc_kernel();
448 if (kport
== IP_NULL
)
449 panic("ipc_thread_init");
451 thread
->ith_self
= kport
;
452 thread
->ith_sself
= ipc_port_make_send(kport
);
453 thread
->exc_actions
= NULL
;
455 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
457 #if IMPORTANCE_INHERITANCE
458 thread
->ith_assertions
= 0;
461 ipc_kmsg_queue_init(&thread
->ith_messages
);
463 thread
->ith_rpc_reply
= IP_NULL
;
467 ipc_thread_init_exc_actions(
470 assert(thread
->exc_actions
== NULL
);
472 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
473 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
477 ipc_thread_destroy_exc_actions(
480 if (thread
->exc_actions
!= NULL
) {
481 kfree(thread
->exc_actions
,
482 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
483 thread
->exc_actions
= NULL
;
491 ipc_port_t kport
= thread
->ith_self
;
493 if (kport
!= IP_NULL
)
494 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
498 * Routine: ipc_thread_terminate
500 * Clean up and destroy a thread's IPC state.
506 ipc_thread_terminate(
509 ipc_port_t kport
= thread
->ith_self
;
511 if (kport
!= IP_NULL
) {
514 if (IP_VALID(thread
->ith_sself
))
515 ipc_port_release_send(thread
->ith_sself
);
517 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
519 if (thread
->exc_actions
!= NULL
) {
520 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
521 if (IP_VALID(thread
->exc_actions
[i
].port
))
522 ipc_port_release_send(thread
->exc_actions
[i
].port
);
524 ipc_thread_destroy_exc_actions(thread
);
527 ipc_port_dealloc_kernel(kport
);
530 #if IMPORTANCE_INHERITANCE
531 assert(thread
->ith_assertions
== 0);
534 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
536 if (thread
->ith_rpc_reply
!= IP_NULL
)
537 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
539 thread
->ith_rpc_reply
= IP_NULL
;
543 * Routine: ipc_thread_reset
545 * Reset the IPC state for a given Mach thread when
546 * its task enters an elevated security context.
547 * Both the thread port and its exception ports have
548 * to be reset. Its RPC reply port cannot have any
549 * rights outstanding, so it should be fine.
558 ipc_port_t old_kport
, new_kport
;
559 ipc_port_t old_sself
;
560 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
561 boolean_t has_old_exc_actions
= FALSE
;
564 new_kport
= ipc_port_alloc_kernel();
565 if (new_kport
== IP_NULL
)
566 panic("ipc_task_reset");
568 thread_mtx_lock(thread
);
570 old_kport
= thread
->ith_self
;
572 if (old_kport
== IP_NULL
&& thread
->inspection
== FALSE
) {
573 /* the is already terminated (can this happen?) */
574 thread_mtx_unlock(thread
);
575 ipc_port_dealloc_kernel(new_kport
);
579 thread
->ith_self
= new_kport
;
580 old_sself
= thread
->ith_sself
;
581 thread
->ith_sself
= ipc_port_make_send(new_kport
);
582 if (old_kport
!= IP_NULL
) {
583 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
585 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
588 * Only ports that were set by root-owned processes
589 * (privileged ports) should survive
591 if (thread
->exc_actions
!= NULL
) {
592 has_old_exc_actions
= TRUE
;
593 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
594 if (thread
->exc_actions
[i
].privileged
) {
595 old_exc_actions
[i
] = IP_NULL
;
597 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
598 thread
->exc_actions
[i
].port
= IP_NULL
;
603 thread_mtx_unlock(thread
);
605 /* release the naked send rights */
607 if (IP_VALID(old_sself
))
608 ipc_port_release_send(old_sself
);
610 if (has_old_exc_actions
) {
611 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
612 ipc_port_release_send(old_exc_actions
[i
]);
616 /* destroy the kernel port */
617 if (old_kport
!= IP_NULL
) {
618 ipc_port_dealloc_kernel(old_kport
);
623 * Routine: retrieve_task_self_fast
625 * Optimized version of retrieve_task_self,
626 * that only works for the current task.
628 * Return a send right (possibly null/dead)
629 * for the task's user-visible self port.
635 retrieve_task_self_fast(
636 register task_t task
)
638 register ipc_port_t port
;
640 assert(task
== current_task());
643 assert(task
->itk_self
!= IP_NULL
);
645 if ((port
= task
->itk_sself
) == task
->itk_self
) {
649 assert(ip_active(port
));
654 port
= ipc_port_copy_send(port
);
661 * Routine: retrieve_thread_self_fast
663 * Return a send right (possibly null/dead)
664 * for the thread's user-visible self port.
666 * Only works for the current thread.
673 retrieve_thread_self_fast(
676 register ipc_port_t port
;
678 assert(thread
== current_thread());
680 thread_mtx_lock(thread
);
682 assert(thread
->ith_self
!= IP_NULL
);
684 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
688 assert(ip_active(port
));
694 port
= ipc_port_copy_send(port
);
696 thread_mtx_unlock(thread
);
702 * Routine: task_self_trap [mach trap]
704 * Give the caller send rights for his own task port.
708 * MACH_PORT_NULL if there are any resource failures
714 __unused
struct task_self_trap_args
*args
)
716 task_t task
= current_task();
718 mach_port_name_t name
;
720 sright
= retrieve_task_self_fast(task
);
721 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
726 * Routine: thread_self_trap [mach trap]
728 * Give the caller send rights for his own thread port.
732 * MACH_PORT_NULL if there are any resource failures
738 __unused
struct thread_self_trap_args
*args
)
740 thread_t thread
= current_thread();
741 task_t task
= thread
->task
;
743 mach_port_name_t name
;
745 sright
= retrieve_thread_self_fast(thread
);
746 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
752 * Routine: mach_reply_port [mach trap]
754 * Allocate a port for the caller.
758 * MACH_PORT_NULL if there are any resource failures
764 __unused
struct mach_reply_port_args
*args
)
767 mach_port_name_t name
;
770 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
771 if (kr
== KERN_SUCCESS
)
774 name
= MACH_PORT_NULL
;
779 * Routine: thread_get_special_port [kernel call]
781 * Clones a send right for one of the thread's
786 * KERN_SUCCESS Extracted a send right.
787 * KERN_INVALID_ARGUMENT The thread is null.
788 * KERN_FAILURE The thread is dead.
789 * KERN_INVALID_ARGUMENT Invalid special port.
793 thread_get_special_port(
798 kern_return_t result
= KERN_SUCCESS
;
801 if (thread
== THREAD_NULL
)
802 return (KERN_INVALID_ARGUMENT
);
806 case THREAD_KERNEL_PORT
:
807 whichp
= &thread
->ith_sself
;
811 return (KERN_INVALID_ARGUMENT
);
814 thread_mtx_lock(thread
);
817 *portp
= ipc_port_copy_send(*whichp
);
819 result
= KERN_FAILURE
;
821 thread_mtx_unlock(thread
);
827 * Routine: thread_set_special_port [kernel call]
829 * Changes one of the thread's special ports,
830 * setting it to the supplied send right.
832 * Nothing locked. If successful, consumes
833 * the supplied send right.
835 * KERN_SUCCESS Changed the special port.
836 * KERN_INVALID_ARGUMENT The thread is null.
837 * KERN_FAILURE The thread is dead.
838 * KERN_INVALID_ARGUMENT Invalid special port.
842 thread_set_special_port(
847 kern_return_t result
= KERN_SUCCESS
;
848 ipc_port_t
*whichp
, old
= IP_NULL
;
850 if (thread
== THREAD_NULL
)
851 return (KERN_INVALID_ARGUMENT
);
855 case THREAD_KERNEL_PORT
:
856 whichp
= &thread
->ith_sself
;
860 return (KERN_INVALID_ARGUMENT
);
863 thread_mtx_lock(thread
);
865 if (thread
->active
) {
870 result
= KERN_FAILURE
;
872 thread_mtx_unlock(thread
);
875 ipc_port_release_send(old
);
881 * Routine: task_get_special_port [kernel call]
883 * Clones a send right for one of the task's
888 * KERN_SUCCESS Extracted a send right.
889 * KERN_INVALID_ARGUMENT The task is null.
890 * KERN_FAILURE The task/space is dead.
891 * KERN_INVALID_ARGUMENT Invalid special port.
895 task_get_special_port(
902 if (task
== TASK_NULL
)
903 return KERN_INVALID_ARGUMENT
;
906 if (task
->itk_self
== IP_NULL
) {
912 case TASK_KERNEL_PORT
:
913 port
= ipc_port_copy_send(task
->itk_sself
);
917 port
= ipc_port_make_send(task
->itk_nself
);
921 port
= ipc_port_copy_send(task
->itk_host
);
924 case TASK_BOOTSTRAP_PORT
:
925 port
= ipc_port_copy_send(task
->itk_bootstrap
);
928 case TASK_SEATBELT_PORT
:
929 port
= ipc_port_copy_send(task
->itk_seatbelt
);
932 case TASK_ACCESS_PORT
:
933 port
= ipc_port_copy_send(task
->itk_task_access
);
936 case TASK_DEBUG_CONTROL_PORT
:
937 port
= ipc_port_copy_send(task
->itk_debug_control
);
942 return KERN_INVALID_ARGUMENT
;
951 * Routine: task_set_special_port [kernel call]
953 * Changes one of the task's special ports,
954 * setting it to the supplied send right.
956 * Nothing locked. If successful, consumes
957 * the supplied send right.
959 * KERN_SUCCESS Changed the special port.
960 * KERN_INVALID_ARGUMENT The task is null.
961 * KERN_FAILURE The task/space is dead.
962 * KERN_INVALID_ARGUMENT Invalid special port.
963 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
967 task_set_special_port(
975 if (task
== TASK_NULL
)
976 return KERN_INVALID_ARGUMENT
;
979 case TASK_KERNEL_PORT
:
980 whichp
= &task
->itk_sself
;
984 whichp
= &task
->itk_host
;
987 case TASK_BOOTSTRAP_PORT
:
988 whichp
= &task
->itk_bootstrap
;
991 case TASK_SEATBELT_PORT
:
992 whichp
= &task
->itk_seatbelt
;
995 case TASK_ACCESS_PORT
:
996 whichp
= &task
->itk_task_access
;
999 case TASK_DEBUG_CONTROL_PORT
:
1000 whichp
= &task
->itk_debug_control
;
1005 return KERN_INVALID_ARGUMENT
;
1009 if (task
->itk_self
== IP_NULL
) {
1011 return KERN_FAILURE
;
1014 /* do not allow overwrite of seatbelt or task access ports */
1015 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
1016 && IP_VALID(*whichp
)) {
1018 return KERN_NO_ACCESS
;
1026 ipc_port_release_send(old
);
1027 return KERN_SUCCESS
;
1032 * Routine: mach_ports_register [kernel call]
1034 * Stash a handful of port send rights in the task.
1035 * Child tasks will inherit these rights, but they
1036 * must use mach_ports_lookup to acquire them.
1038 * The rights are supplied in a (wired) kalloc'd segment.
1039 * Rights which aren't supplied are assumed to be null.
1041 * Nothing locked. If successful, consumes
1042 * the supplied rights and memory.
1044 * KERN_SUCCESS Stashed the port rights.
1045 * KERN_INVALID_ARGUMENT The task is null.
1046 * KERN_INVALID_ARGUMENT The task is dead.
1047 * KERN_INVALID_ARGUMENT The memory param is null.
1048 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1052 mach_ports_register(
1054 mach_port_array_t memory
,
1055 mach_msg_type_number_t portsCnt
)
1057 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1060 if ((task
== TASK_NULL
) ||
1061 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1062 (portsCnt
&& memory
== NULL
))
1063 return KERN_INVALID_ARGUMENT
;
1066 * Pad the port rights with nulls.
1069 for (i
= 0; i
< portsCnt
; i
++)
1070 ports
[i
] = memory
[i
];
1071 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
1075 if (task
->itk_self
== IP_NULL
) {
1077 return KERN_INVALID_ARGUMENT
;
1081 * Replace the old send rights with the new.
1082 * Release the old rights after unlocking.
1085 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1088 old
= task
->itk_registered
[i
];
1089 task
->itk_registered
[i
] = ports
[i
];
1095 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1096 if (IP_VALID(ports
[i
]))
1097 ipc_port_release_send(ports
[i
]);
1100 * Now that the operation is known to be successful,
1101 * we can free the memory.
1106 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1108 return KERN_SUCCESS
;
1112 * Routine: mach_ports_lookup [kernel call]
1114 * Retrieves (clones) the stashed port send rights.
1116 * Nothing locked. If successful, the caller gets
1117 * rights and memory.
1119 * KERN_SUCCESS Retrieved the send rights.
1120 * KERN_INVALID_ARGUMENT The task is null.
1121 * KERN_INVALID_ARGUMENT The task is dead.
1122 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1128 mach_port_array_t
*portsp
,
1129 mach_msg_type_number_t
*portsCnt
)
1136 if (task
== TASK_NULL
)
1137 return KERN_INVALID_ARGUMENT
;
1139 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1141 memory
= kalloc(size
);
1143 return KERN_RESOURCE_SHORTAGE
;
1146 if (task
->itk_self
== IP_NULL
) {
1149 kfree(memory
, size
);
1150 return KERN_INVALID_ARGUMENT
;
1153 ports
= (ipc_port_t
*) memory
;
1156 * Clone port rights. Because kalloc'd memory
1157 * is wired, we won't fault while holding the task lock.
1160 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1161 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1165 *portsp
= (mach_port_array_t
) ports
;
1166 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1167 return KERN_SUCCESS
;
1171 * Routine: convert_port_to_locked_task
1173 * Internal helper routine to convert from a port to a locked
1174 * task. Used by several routines that try to convert from a
1175 * task port to a reference on some task related object.
1177 * Nothing locked, blocking OK.
1180 convert_port_to_locked_task(ipc_port_t port
)
1182 int try_failed_count
= 0;
1184 while (IP_VALID(port
)) {
1188 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1192 task
= (task_t
) port
->ip_kobject
;
1193 assert(task
!= TASK_NULL
);
1196 * Normal lock ordering puts task_lock() before ip_lock().
1197 * Attempt out-of-order locking here.
1199 if (task_lock_try(task
)) {
1206 mutex_pause(try_failed_count
);
1212 * Routine: convert_port_to_task
1214 * Convert from a port to a task.
1215 * Doesn't consume the port ref; produces a task ref,
1216 * which may be null.
1221 convert_port_to_task(
1224 task_t task
= TASK_NULL
;
1226 if (IP_VALID(port
)) {
1229 if ( ip_active(port
) &&
1230 ip_kotype(port
) == IKOT_TASK
) {
1231 task
= (task_t
)port
->ip_kobject
;
1232 assert(task
!= TASK_NULL
);
1234 task_reference_internal(task
);
1244 * Routine: convert_port_to_task_name
1246 * Convert from a port to a task name.
1247 * Doesn't consume the port ref; produces a task name ref,
1248 * which may be null.
1253 convert_port_to_task_name(
1256 task_name_t task
= TASK_NULL
;
1258 if (IP_VALID(port
)) {
1261 if ( ip_active(port
) &&
1262 (ip_kotype(port
) == IKOT_TASK
||
1263 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1264 task
= (task_name_t
)port
->ip_kobject
;
1265 assert(task
!= TASK_NAME_NULL
);
1267 task_reference_internal(task
);
1277 * Routine: convert_port_to_task_suspension_token
1279 * Convert from a port to a task suspension token.
1280 * Doesn't consume the port ref; produces a suspension token ref,
1281 * which may be null.
1285 task_suspension_token_t
1286 convert_port_to_task_suspension_token(
1289 task_suspension_token_t task
= TASK_NULL
;
1291 if (IP_VALID(port
)) {
1294 if ( ip_active(port
) &&
1295 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1296 task
= (task_suspension_token_t
)port
->ip_kobject
;
1297 assert(task
!= TASK_NULL
);
1299 task_reference_internal(task
);
1309 * Routine: convert_port_to_space
1311 * Convert from a port to a space.
1312 * Doesn't consume the port ref; produces a space ref,
1313 * which may be null.
1318 convert_port_to_space(
1324 task
= convert_port_to_locked_task(port
);
1326 if (task
== TASK_NULL
)
1327 return IPC_SPACE_NULL
;
1329 if (!task
->active
) {
1331 return IPC_SPACE_NULL
;
1334 space
= task
->itk_space
;
1335 is_reference(space
);
1341 * Routine: convert_port_to_map
1343 * Convert from a port to a map.
1344 * Doesn't consume the port ref; produces a map ref,
1345 * which may be null.
1351 convert_port_to_map(
1357 task
= convert_port_to_locked_task(port
);
1359 if (task
== TASK_NULL
)
1362 if (!task
->active
) {
1368 vm_map_reference_swap(map
);
1375 * Routine: convert_port_to_thread
1377 * Convert from a port to a thread.
1378 * Doesn't consume the port ref; produces an thread ref,
1379 * which may be null.
1385 convert_port_to_thread(
1388 thread_t thread
= THREAD_NULL
;
1390 if (IP_VALID(port
)) {
1393 if ( ip_active(port
) &&
1394 ip_kotype(port
) == IKOT_THREAD
) {
1395 thread
= (thread_t
)port
->ip_kobject
;
1396 assert(thread
!= THREAD_NULL
);
1398 thread_reference_internal(thread
);
1408 * Routine: port_name_to_thread
1410 * Convert from a port name to an thread reference
1411 * A name of MACH_PORT_NULL is valid for the null thread.
1415 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1416 * We could avoid extra lock/unlock and extra ref operations on the port.
1419 port_name_to_thread(
1420 mach_port_name_t name
)
1422 thread_t thread
= THREAD_NULL
;
1425 if (MACH_PORT_VALID(name
)) {
1426 if (ipc_object_copyin(current_space(), name
,
1427 MACH_MSG_TYPE_COPY_SEND
,
1428 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1429 return (THREAD_NULL
);
1431 thread
= convert_port_to_thread(kport
);
1433 if (IP_VALID(kport
))
1434 ipc_port_release_send(kport
);
1442 mach_port_name_t name
)
1444 ipc_port_t kern_port
;
1446 task_t task
= TASK_NULL
;
1448 if (MACH_PORT_VALID(name
)) {
1449 kr
= ipc_object_copyin(current_space(), name
,
1450 MACH_MSG_TYPE_COPY_SEND
,
1451 (ipc_object_t
*) &kern_port
);
1452 if (kr
!= KERN_SUCCESS
)
1455 task
= convert_port_to_task(kern_port
);
1457 if (IP_VALID(kern_port
))
1458 ipc_port_release_send(kern_port
);
1464 * Routine: convert_task_to_port
1466 * Convert from a task to a port.
1467 * Consumes a task ref; produces a naked send right
1468 * which may be invalid.
1474 convert_task_to_port(
1480 if (task
->itk_self
!= IP_NULL
)
1481 port
= ipc_port_make_send(task
->itk_self
);
1486 task_deallocate(task
);
1491 * Routine: convert_task_suspend_token_to_port
1493 * Convert from a task suspension token to a port.
1494 * Consumes a task suspension token ref; produces a naked send-once right
1495 * which may be invalid.
1500 convert_task_suspension_token_to_port(
1501 task_suspension_token_t task
)
1507 if (task
->itk_resume
== IP_NULL
) {
1508 task
->itk_resume
= ipc_port_alloc_kernel();
1509 if (!IP_VALID(task
->itk_resume
)) {
1510 panic("failed to create resume port");
1513 ipc_kobject_set(task
->itk_resume
, (ipc_kobject_t
) task
, IKOT_TASK_RESUME
);
1517 * Create a send-once right for each instance of a direct user-called
1518 * task_suspend2 call. Each time one of these send-once rights is abandoned,
1519 * the notification handler will resume the target task.
1521 port
= ipc_port_make_sonce(task
->itk_resume
);
1522 assert(IP_VALID(port
));
1528 task_suspension_token_deallocate(task
);
1535 * Routine: convert_task_name_to_port
1537 * Convert from a task name ref to a port.
1538 * Consumes a task name ref; produces a naked send right
1539 * which may be invalid.
1545 convert_task_name_to_port(
1546 task_name_t task_name
)
1550 itk_lock(task_name
);
1551 if (task_name
->itk_nself
!= IP_NULL
)
1552 port
= ipc_port_make_send(task_name
->itk_nself
);
1555 itk_unlock(task_name
);
1557 task_name_deallocate(task_name
);
1562 * Routine: convert_thread_to_port
1564 * Convert from a thread to a port.
1565 * Consumes an thread ref; produces a naked send right
1566 * which may be invalid.
1572 convert_thread_to_port(
1577 thread_mtx_lock(thread
);
1579 if (thread
->ith_self
!= IP_NULL
)
1580 port
= ipc_port_make_send(thread
->ith_self
);
1584 thread_mtx_unlock(thread
);
1586 thread_deallocate(thread
);
1592 * Routine: space_deallocate
1594 * Deallocate a space ref produced by convert_port_to_space.
1603 if (space
!= IS_NULL
)
1608 * Routine: thread/task_set_exception_ports [kernel call]
1610 * Sets the thread/task exception port, flavor and
1611 * behavior for the exception types specified by the mask.
1612 * There will be one send right per exception per valid
1615 * Nothing locked. If successful, consumes
1616 * the supplied send right.
1618 * KERN_SUCCESS Changed the special port.
1619 * KERN_INVALID_ARGUMENT The thread is null,
1620 * Illegal mask bit set.
1621 * Illegal exception behavior
1622 * KERN_FAILURE The thread is dead.
1626 thread_set_exception_ports(
1628 exception_mask_t exception_mask
,
1629 ipc_port_t new_port
,
1630 exception_behavior_t new_behavior
,
1631 thread_state_flavor_t new_flavor
)
1633 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1634 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1637 if (thread
== THREAD_NULL
)
1638 return (KERN_INVALID_ARGUMENT
);
1640 if (exception_mask
& ~EXC_MASK_VALID
)
1641 return (KERN_INVALID_ARGUMENT
);
1643 if (IP_VALID(new_port
)) {
1644 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1646 case EXCEPTION_DEFAULT
:
1647 case EXCEPTION_STATE
:
1648 case EXCEPTION_STATE_IDENTITY
:
1652 return (KERN_INVALID_ARGUMENT
);
1657 * Check the validity of the thread_state_flavor by calling the
1658 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1659 * osfmk/mach/ARCHITECTURE/thread_status.h
1661 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1662 return (KERN_INVALID_ARGUMENT
);
1664 thread_mtx_lock(thread
);
1666 if (!thread
->active
) {
1667 thread_mtx_unlock(thread
);
1669 return (KERN_FAILURE
);
1672 if (thread
->exc_actions
== NULL
) {
1673 ipc_thread_init_exc_actions(thread
);
1675 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1676 if (exception_mask
& (1 << i
)) {
1677 old_port
[i
] = thread
->exc_actions
[i
].port
;
1678 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1679 thread
->exc_actions
[i
].behavior
= new_behavior
;
1680 thread
->exc_actions
[i
].flavor
= new_flavor
;
1681 thread
->exc_actions
[i
].privileged
= privileged
;
1684 old_port
[i
] = IP_NULL
;
1687 thread_mtx_unlock(thread
);
1689 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1690 if (IP_VALID(old_port
[i
]))
1691 ipc_port_release_send(old_port
[i
]);
1693 if (IP_VALID(new_port
)) /* consume send right */
1694 ipc_port_release_send(new_port
);
1696 return (KERN_SUCCESS
);
1700 task_set_exception_ports(
1702 exception_mask_t exception_mask
,
1703 ipc_port_t new_port
,
1704 exception_behavior_t new_behavior
,
1705 thread_state_flavor_t new_flavor
)
1707 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1708 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1711 if (task
== TASK_NULL
)
1712 return (KERN_INVALID_ARGUMENT
);
1714 if (exception_mask
& ~EXC_MASK_VALID
)
1715 return (KERN_INVALID_ARGUMENT
);
1717 if (IP_VALID(new_port
)) {
1718 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1720 case EXCEPTION_DEFAULT
:
1721 case EXCEPTION_STATE
:
1722 case EXCEPTION_STATE_IDENTITY
:
1726 return (KERN_INVALID_ARGUMENT
);
1731 * Check the validity of the thread_state_flavor by calling the
1732 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1733 * osfmk/mach/ARCHITECTURE/thread_status.h
1735 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1736 return (KERN_INVALID_ARGUMENT
);
1740 if (task
->itk_self
== IP_NULL
) {
1743 return (KERN_FAILURE
);
1746 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1747 if (exception_mask
& (1 << i
)) {
1748 old_port
[i
] = task
->exc_actions
[i
].port
;
1749 task
->exc_actions
[i
].port
=
1750 ipc_port_copy_send(new_port
);
1751 task
->exc_actions
[i
].behavior
= new_behavior
;
1752 task
->exc_actions
[i
].flavor
= new_flavor
;
1753 task
->exc_actions
[i
].privileged
= privileged
;
1756 old_port
[i
] = IP_NULL
;
1761 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1762 if (IP_VALID(old_port
[i
]))
1763 ipc_port_release_send(old_port
[i
]);
1765 if (IP_VALID(new_port
)) /* consume send right */
1766 ipc_port_release_send(new_port
);
1768 return (KERN_SUCCESS
);
1772 * Routine: thread/task_swap_exception_ports [kernel call]
1774 * Sets the thread/task exception port, flavor and
1775 * behavior for the exception types specified by the
1778 * The old ports, behavior and flavors are returned
1779 * Count specifies the array sizes on input and
1780 * the number of returned ports etc. on output. The
1781 * arrays must be large enough to hold all the returned
1782 * data, MIG returnes an error otherwise. The masks
1783 * array specifies the corresponding exception type(s).
1786 * Nothing locked. If successful, consumes
1787 * the supplied send right.
1789 * Returns upto [in} CountCnt elements.
1791 * KERN_SUCCESS Changed the special port.
1792 * KERN_INVALID_ARGUMENT The thread is null,
1793 * Illegal mask bit set.
1794 * Illegal exception behavior
1795 * KERN_FAILURE The thread is dead.
1799 thread_swap_exception_ports(
1801 exception_mask_t exception_mask
,
1802 ipc_port_t new_port
,
1803 exception_behavior_t new_behavior
,
1804 thread_state_flavor_t new_flavor
,
1805 exception_mask_array_t masks
,
1806 mach_msg_type_number_t
*CountCnt
,
1807 exception_port_array_t ports
,
1808 exception_behavior_array_t behaviors
,
1809 thread_state_flavor_array_t flavors
)
1811 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1812 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1813 unsigned int i
, j
, count
;
1815 if (thread
== THREAD_NULL
)
1816 return (KERN_INVALID_ARGUMENT
);
1818 if (exception_mask
& ~EXC_MASK_VALID
)
1819 return (KERN_INVALID_ARGUMENT
);
1821 if (IP_VALID(new_port
)) {
1822 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1824 case EXCEPTION_DEFAULT
:
1825 case EXCEPTION_STATE
:
1826 case EXCEPTION_STATE_IDENTITY
:
1830 return (KERN_INVALID_ARGUMENT
);
1834 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1835 return (KERN_INVALID_ARGUMENT
);
1837 thread_mtx_lock(thread
);
1839 if (!thread
->active
) {
1840 thread_mtx_unlock(thread
);
1842 return (KERN_FAILURE
);
1845 if (thread
->exc_actions
== NULL
) {
1846 ipc_thread_init_exc_actions(thread
);
1849 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
1850 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
1851 if (exception_mask
& (1 << i
)) {
1852 for (j
= 0; j
< count
; ++j
) {
1854 * search for an identical entry, if found
1855 * set corresponding mask for this exception.
1857 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1858 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1859 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1860 masks
[j
] |= (1 << i
);
1866 masks
[j
] = (1 << i
);
1867 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1869 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1870 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1874 old_port
[i
] = thread
->exc_actions
[i
].port
;
1875 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1876 thread
->exc_actions
[i
].behavior
= new_behavior
;
1877 thread
->exc_actions
[i
].flavor
= new_flavor
;
1878 thread
->exc_actions
[i
].privileged
= privileged
;
1881 old_port
[i
] = IP_NULL
;
1884 thread_mtx_unlock(thread
);
1886 while (--i
>= FIRST_EXCEPTION
) {
1887 if (IP_VALID(old_port
[i
]))
1888 ipc_port_release_send(old_port
[i
]);
1891 if (IP_VALID(new_port
)) /* consume send right */
1892 ipc_port_release_send(new_port
);
1896 return (KERN_SUCCESS
);
1900 task_swap_exception_ports(
1902 exception_mask_t exception_mask
,
1903 ipc_port_t new_port
,
1904 exception_behavior_t new_behavior
,
1905 thread_state_flavor_t new_flavor
,
1906 exception_mask_array_t masks
,
1907 mach_msg_type_number_t
*CountCnt
,
1908 exception_port_array_t ports
,
1909 exception_behavior_array_t behaviors
,
1910 thread_state_flavor_array_t flavors
)
1912 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1913 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1914 unsigned int i
, j
, count
;
1916 if (task
== TASK_NULL
)
1917 return (KERN_INVALID_ARGUMENT
);
1919 if (exception_mask
& ~EXC_MASK_VALID
)
1920 return (KERN_INVALID_ARGUMENT
);
1922 if (IP_VALID(new_port
)) {
1923 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1925 case EXCEPTION_DEFAULT
:
1926 case EXCEPTION_STATE
:
1927 case EXCEPTION_STATE_IDENTITY
:
1931 return (KERN_INVALID_ARGUMENT
);
1935 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
1936 return (KERN_INVALID_ARGUMENT
);
1940 if (task
->itk_self
== IP_NULL
) {
1943 return (KERN_FAILURE
);
1946 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
1947 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
1948 if (exception_mask
& (1 << i
)) {
1949 for (j
= 0; j
< count
; j
++) {
1951 * search for an identical entry, if found
1952 * set corresponding mask for this exception.
1954 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1955 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1956 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1957 masks
[j
] |= (1 << i
);
1963 masks
[j
] = (1 << i
);
1964 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1965 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1966 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1970 old_port
[i
] = task
->exc_actions
[i
].port
;
1972 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1973 task
->exc_actions
[i
].behavior
= new_behavior
;
1974 task
->exc_actions
[i
].flavor
= new_flavor
;
1975 task
->exc_actions
[i
].privileged
= privileged
;
1978 old_port
[i
] = IP_NULL
;
1983 while (--i
>= FIRST_EXCEPTION
) {
1984 if (IP_VALID(old_port
[i
]))
1985 ipc_port_release_send(old_port
[i
]);
1988 if (IP_VALID(new_port
)) /* consume send right */
1989 ipc_port_release_send(new_port
);
1993 return (KERN_SUCCESS
);
1997 * Routine: thread/task_get_exception_ports [kernel call]
1999 * Clones a send right for each of the thread/task's exception
2000 * ports specified in the mask and returns the behaviour
2001 * and flavor of said port.
2003 * Returns upto [in} CountCnt elements.
2008 * KERN_SUCCESS Extracted a send right.
2009 * KERN_INVALID_ARGUMENT The thread is null,
2010 * Invalid special port,
2011 * Illegal mask bit set.
2012 * KERN_FAILURE The thread is dead.
2016 thread_get_exception_ports(
2018 exception_mask_t exception_mask
,
2019 exception_mask_array_t masks
,
2020 mach_msg_type_number_t
*CountCnt
,
2021 exception_port_array_t ports
,
2022 exception_behavior_array_t behaviors
,
2023 thread_state_flavor_array_t flavors
)
2025 unsigned int i
, j
, count
;
2027 if (thread
== THREAD_NULL
)
2028 return (KERN_INVALID_ARGUMENT
);
2030 if (exception_mask
& ~EXC_MASK_VALID
)
2031 return (KERN_INVALID_ARGUMENT
);
2033 thread_mtx_lock(thread
);
2035 if (!thread
->active
) {
2036 thread_mtx_unlock(thread
);
2038 return (KERN_FAILURE
);
2043 if (thread
->exc_actions
== NULL
) {
2047 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2048 if (exception_mask
& (1 << i
)) {
2049 for (j
= 0; j
< count
; ++j
) {
2051 * search for an identical entry, if found
2052 * set corresponding mask for this exception.
2054 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
2055 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
2056 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2057 masks
[j
] |= (1 << i
);
2063 masks
[j
] = (1 << i
);
2064 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2065 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2066 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2068 if (count
>= *CountCnt
)
2075 thread_mtx_unlock(thread
);
2079 return (KERN_SUCCESS
);
2083 task_get_exception_ports(
2085 exception_mask_t exception_mask
,
2086 exception_mask_array_t masks
,
2087 mach_msg_type_number_t
*CountCnt
,
2088 exception_port_array_t ports
,
2089 exception_behavior_array_t behaviors
,
2090 thread_state_flavor_array_t flavors
)
2092 unsigned int i
, j
, count
;
2094 if (task
== TASK_NULL
)
2095 return (KERN_INVALID_ARGUMENT
);
2097 if (exception_mask
& ~EXC_MASK_VALID
)
2098 return (KERN_INVALID_ARGUMENT
);
2102 if (task
->itk_self
== IP_NULL
) {
2105 return (KERN_FAILURE
);
2110 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2111 if (exception_mask
& (1 << i
)) {
2112 for (j
= 0; j
< count
; ++j
) {
2114 * search for an identical entry, if found
2115 * set corresponding mask for this exception.
2117 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
2118 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2119 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2120 masks
[j
] |= (1 << i
);
2126 masks
[j
] = (1 << i
);
2127 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2128 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2129 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2131 if (count
> *CountCnt
)
2141 return (KERN_SUCCESS
);