2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
105 #if CONFIG_EMBEDDED && !SECURE_KERNEL
106 extern int cs_relax_platform_task_ports
;
109 /* forward declarations */
110 task_t
convert_port_to_locked_task(ipc_port_t port
);
111 task_inspect_t
convert_port_to_locked_task_inspect(ipc_port_t port
);
112 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port
);
113 static kern_return_t
ipc_port_unbind_special_reply_port(thread_t thread
, boolean_t unbind_active_port
);
114 kern_return_t
task_conversion_eval(task_t caller
, task_t victim
);
117 * Routine: ipc_task_init
119 * Initialize a task's IPC state.
121 * If non-null, some state will be inherited from the parent.
122 * The parent must be appropriately initialized.
139 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
140 if (kr
!= KERN_SUCCESS
) {
141 panic("ipc_task_init");
144 space
->is_task
= task
;
146 kport
= ipc_port_alloc_kernel();
147 if (kport
== IP_NULL
) {
148 panic("ipc_task_init");
151 nport
= ipc_port_alloc_kernel();
152 if (nport
== IP_NULL
) {
153 panic("ipc_task_init");
157 task
->itk_self
= kport
;
158 task
->itk_nself
= nport
;
159 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
160 if (task_is_a_corpse_fork(task
)) {
162 * No sender's notification for corpse would not
163 * work with a naked send right in kernel.
165 task
->itk_sself
= IP_NULL
;
167 task
->itk_sself
= ipc_port_make_send(kport
);
169 task
->itk_debug_control
= IP_NULL
;
170 task
->itk_space
= space
;
173 task
->exc_actions
[0].label
= NULL
;
174 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
175 mac_exc_associate_action_label(&task
->exc_actions
[i
], mac_exc_create_label());
179 /* always zero-out the first (unused) array element */
180 bzero(&task
->exc_actions
[0], sizeof(task
->exc_actions
[0]));
182 if (parent
== TASK_NULL
) {
184 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
185 task
->exc_actions
[i
].port
= IP_NULL
;
186 task
->exc_actions
[i
].flavor
= 0;
187 task
->exc_actions
[i
].behavior
= 0;
188 task
->exc_actions
[i
].privileged
= FALSE
;
191 kr
= host_get_host_port(host_priv_self(), &port
);
192 assert(kr
== KERN_SUCCESS
);
193 task
->itk_host
= port
;
195 task
->itk_bootstrap
= IP_NULL
;
196 task
->itk_seatbelt
= IP_NULL
;
197 task
->itk_gssd
= IP_NULL
;
198 task
->itk_task_access
= IP_NULL
;
200 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
201 task
->itk_registered
[i
] = IP_NULL
;
205 assert(parent
->itk_self
!= IP_NULL
);
207 /* inherit registered ports */
209 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
210 task
->itk_registered
[i
] =
211 ipc_port_copy_send(parent
->itk_registered
[i
]);
214 /* inherit exception and bootstrap ports */
216 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
217 task
->exc_actions
[i
].port
=
218 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
219 task
->exc_actions
[i
].flavor
=
220 parent
->exc_actions
[i
].flavor
;
221 task
->exc_actions
[i
].behavior
=
222 parent
->exc_actions
[i
].behavior
;
223 task
->exc_actions
[i
].privileged
=
224 parent
->exc_actions
[i
].privileged
;
226 mac_exc_inherit_action_label(parent
->exc_actions
+ i
, task
->exc_actions
+ i
);
230 ipc_port_copy_send(parent
->itk_host
);
232 task
->itk_bootstrap
=
233 ipc_port_copy_send(parent
->itk_bootstrap
);
236 ipc_port_copy_send(parent
->itk_seatbelt
);
239 ipc_port_copy_send(parent
->itk_gssd
);
241 task
->itk_task_access
=
242 ipc_port_copy_send(parent
->itk_task_access
);
249 * Routine: ipc_task_enable
251 * Enable a task for IPC access.
264 kport
= task
->itk_self
;
265 if (kport
!= IP_NULL
) {
266 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
268 nport
= task
->itk_nself
;
269 if (nport
!= IP_NULL
) {
270 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
276 * Routine: ipc_task_disable
278 * Disable IPC access to a task.
292 kport
= task
->itk_self
;
293 if (kport
!= IP_NULL
) {
294 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
296 nport
= task
->itk_nself
;
297 if (nport
!= IP_NULL
) {
298 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
301 rport
= task
->itk_resume
;
302 if (rport
!= IP_NULL
) {
304 * From this point onwards this task is no longer accepting
307 * There are still outstanding suspensions on this task,
308 * even as it is being torn down. Disconnect the task
309 * from the rport, thereby "orphaning" the rport. The rport
310 * itself will go away only when the last suspension holder
311 * destroys his SO right to it -- when he either
312 * exits, or tries to actually use that last SO right to
313 * resume this (now non-existent) task.
315 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
321 * Routine: ipc_task_terminate
323 * Clean up and destroy a task's IPC state.
325 * Nothing locked. The task must be suspended.
326 * (Or the current thread must be in the task.)
339 kport
= task
->itk_self
;
341 if (kport
== IP_NULL
) {
342 /* the task is already terminated (can this happen?) */
346 task
->itk_self
= IP_NULL
;
348 nport
= task
->itk_nself
;
349 assert(nport
!= IP_NULL
);
350 task
->itk_nself
= IP_NULL
;
352 rport
= task
->itk_resume
;
353 task
->itk_resume
= IP_NULL
;
357 /* release the naked send rights */
359 if (IP_VALID(task
->itk_sself
)) {
360 ipc_port_release_send(task
->itk_sself
);
363 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
364 if (IP_VALID(task
->exc_actions
[i
].port
)) {
365 ipc_port_release_send(task
->exc_actions
[i
].port
);
368 mac_exc_free_action_label(task
->exc_actions
+ i
);
372 if (IP_VALID(task
->itk_host
)) {
373 ipc_port_release_send(task
->itk_host
);
376 if (IP_VALID(task
->itk_bootstrap
)) {
377 ipc_port_release_send(task
->itk_bootstrap
);
380 if (IP_VALID(task
->itk_seatbelt
)) {
381 ipc_port_release_send(task
->itk_seatbelt
);
384 if (IP_VALID(task
->itk_gssd
)) {
385 ipc_port_release_send(task
->itk_gssd
);
388 if (IP_VALID(task
->itk_task_access
)) {
389 ipc_port_release_send(task
->itk_task_access
);
392 if (IP_VALID(task
->itk_debug_control
)) {
393 ipc_port_release_send(task
->itk_debug_control
);
396 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
397 if (IP_VALID(task
->itk_registered
[i
])) {
398 ipc_port_release_send(task
->itk_registered
[i
]);
402 /* destroy the kernel ports */
403 ipc_port_dealloc_kernel(kport
);
404 ipc_port_dealloc_kernel(nport
);
405 if (rport
!= IP_NULL
) {
406 ipc_port_dealloc_kernel(rport
);
409 itk_lock_destroy(task
);
413 * Routine: ipc_task_reset
415 * Reset a task's IPC state to protect it when
416 * it enters an elevated security context. The
417 * task name port can remain the same - since
418 * it represents no specific privilege.
420 * Nothing locked. The task must be suspended.
421 * (Or the current thread must be in the task.)
428 ipc_port_t old_kport
, new_kport
;
429 ipc_port_t old_sself
;
430 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
434 /* Fresh label to unset credentials in existing labels. */
435 struct label
*unset_label
= mac_exc_create_label();
438 new_kport
= ipc_kobject_alloc_port((ipc_kobject_t
)task
, IKOT_TASK
,
439 IPC_KOBJECT_ALLOC_MAKE_SEND
);
443 old_kport
= task
->itk_self
;
445 if (old_kport
== IP_NULL
) {
446 /* the task is already terminated (can this happen?) */
448 ipc_port_release_send(new_kport
);
449 ipc_port_dealloc_kernel(new_kport
);
451 mac_exc_free_label(unset_label
);
456 old_sself
= task
->itk_sself
;
457 task
->itk_sself
= task
->itk_self
= new_kport
;
459 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
461 ipc_kobject_set_atomically(old_kport
, IKO_NULL
, IKOT_NONE
);
462 task
->exec_token
+= 1;
463 ip_unlock(old_kport
);
465 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
466 old_exc_actions
[i
] = IP_NULL
;
468 if (i
== EXC_CORPSE_NOTIFY
&& task_corpse_pending_report(task
)) {
472 if (!task
->exc_actions
[i
].privileged
) {
474 mac_exc_update_action_label(task
->exc_actions
+ i
, unset_label
);
476 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
477 task
->exc_actions
[i
].port
= IP_NULL
;
481 if (IP_VALID(task
->itk_debug_control
)) {
482 ipc_port_release_send(task
->itk_debug_control
);
484 task
->itk_debug_control
= IP_NULL
;
489 mac_exc_free_label(unset_label
);
492 /* release the naked send rights */
494 if (IP_VALID(old_sself
)) {
495 ipc_port_release_send(old_sself
);
498 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
499 if (IP_VALID(old_exc_actions
[i
])) {
500 ipc_port_release_send(old_exc_actions
[i
]);
504 /* destroy the kernel port */
505 ipc_port_dealloc_kernel(old_kport
);
509 * Routine: ipc_thread_init
511 * Initialize a thread's IPC state.
522 kport
= ipc_kobject_alloc_port((ipc_kobject_t
)thread
, IKOT_THREAD
,
523 IPC_KOBJECT_ALLOC_MAKE_SEND
);
525 thread
->ith_sself
= thread
->ith_self
= kport
;
526 thread
->ith_special_reply_port
= NULL
;
527 thread
->exc_actions
= NULL
;
529 #if IMPORTANCE_INHERITANCE
530 thread
->ith_assertions
= 0;
533 ipc_kmsg_queue_init(&thread
->ith_messages
);
535 thread
->ith_rpc_reply
= IP_NULL
;
539 ipc_thread_init_exc_actions(
542 assert(thread
->exc_actions
== NULL
);
544 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
545 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
548 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
549 mac_exc_associate_action_label(thread
->exc_actions
+ i
, mac_exc_create_label());
555 ipc_thread_destroy_exc_actions(
558 if (thread
->exc_actions
!= NULL
) {
560 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
561 mac_exc_free_action_label(thread
->exc_actions
+ i
);
565 kfree(thread
->exc_actions
,
566 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
567 thread
->exc_actions
= NULL
;
575 ipc_port_t kport
= thread
->ith_self
;
577 if (kport
!= IP_NULL
) {
578 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
581 /* unbind the thread special reply port */
582 if (IP_VALID(thread
->ith_special_reply_port
)) {
583 ipc_port_unbind_special_reply_port(thread
, TRUE
);
588 * Routine: ipc_thread_terminate
590 * Clean up and destroy a thread's IPC state.
596 ipc_thread_terminate(
599 ipc_port_t kport
= thread
->ith_self
;
601 if (kport
!= IP_NULL
) {
604 if (IP_VALID(thread
->ith_sself
)) {
605 ipc_port_release_send(thread
->ith_sself
);
608 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
610 if (thread
->exc_actions
!= NULL
) {
611 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
612 if (IP_VALID(thread
->exc_actions
[i
].port
)) {
613 ipc_port_release_send(thread
->exc_actions
[i
].port
);
616 ipc_thread_destroy_exc_actions(thread
);
619 ipc_port_dealloc_kernel(kport
);
622 #if IMPORTANCE_INHERITANCE
623 assert(thread
->ith_assertions
== 0);
626 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
628 if (thread
->ith_rpc_reply
!= IP_NULL
) {
629 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
632 thread
->ith_rpc_reply
= IP_NULL
;
636 * Routine: ipc_thread_reset
638 * Reset the IPC state for a given Mach thread when
639 * its task enters an elevated security context.
640 * Both the thread port and its exception ports have
641 * to be reset. Its RPC reply port cannot have any
642 * rights outstanding, so it should be fine.
651 ipc_port_t old_kport
, new_kport
;
652 ipc_port_t old_sself
;
653 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
654 boolean_t has_old_exc_actions
= FALSE
;
658 struct label
*new_label
= mac_exc_create_label();
661 new_kport
= ipc_kobject_alloc_port((ipc_kobject_t
)thread
, IKOT_THREAD
,
662 IPC_KOBJECT_ALLOC_MAKE_SEND
);
664 thread_mtx_lock(thread
);
666 old_kport
= thread
->ith_self
;
667 old_sself
= thread
->ith_sself
;
669 if (old_kport
== IP_NULL
&& thread
->inspection
== FALSE
) {
670 /* the is already terminated (can this happen?) */
671 thread_mtx_unlock(thread
);
672 ipc_port_release_send(new_kport
);
673 ipc_port_dealloc_kernel(new_kport
);
675 mac_exc_free_label(new_label
);
680 thread
->ith_sself
= thread
->ith_self
= new_kport
;
681 if (old_kport
!= IP_NULL
) {
682 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
686 * Only ports that were set by root-owned processes
687 * (privileged ports) should survive
689 if (thread
->exc_actions
!= NULL
) {
690 has_old_exc_actions
= TRUE
;
691 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
692 if (thread
->exc_actions
[i
].privileged
) {
693 old_exc_actions
[i
] = IP_NULL
;
696 mac_exc_update_action_label(thread
->exc_actions
+ i
, new_label
);
698 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
699 thread
->exc_actions
[i
].port
= IP_NULL
;
704 thread_mtx_unlock(thread
);
707 mac_exc_free_label(new_label
);
710 /* release the naked send rights */
712 if (IP_VALID(old_sself
)) {
713 ipc_port_release_send(old_sself
);
716 if (has_old_exc_actions
) {
717 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
718 ipc_port_release_send(old_exc_actions
[i
]);
722 /* destroy the kernel port */
723 if (old_kport
!= IP_NULL
) {
724 ipc_port_dealloc_kernel(old_kport
);
727 /* unbind the thread special reply port */
728 if (IP_VALID(thread
->ith_special_reply_port
)) {
729 ipc_port_unbind_special_reply_port(thread
, TRUE
);
734 * Routine: retrieve_task_self_fast
736 * Optimized version of retrieve_task_self,
737 * that only works for the current task.
739 * Return a send right (possibly null/dead)
740 * for the task's user-visible self port.
746 retrieve_task_self_fast(
749 __assert_only ipc_port_t sright
;
752 assert(task
== current_task());
755 assert(task
->itk_self
!= IP_NULL
);
757 if ((port
= task
->itk_sself
) == task
->itk_self
) {
759 sright
= ipc_port_copy_send(port
);
760 assert(sright
== port
);
762 port
= ipc_port_copy_send(port
);
770 * Routine: retrieve_thread_self_fast
772 * Return a send right (possibly null/dead)
773 * for the thread's user-visible self port.
775 * Only works for the current thread.
782 retrieve_thread_self_fast(
785 __assert_only ipc_port_t sright
;
788 assert(thread
== current_thread());
790 thread_mtx_lock(thread
);
792 assert(thread
->ith_self
!= IP_NULL
);
794 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
796 sright
= ipc_port_copy_send(port
);
797 assert(sright
== port
);
799 port
= ipc_port_copy_send(port
);
802 thread_mtx_unlock(thread
);
808 * Routine: task_self_trap [mach trap]
810 * Give the caller send rights for his own task port.
814 * MACH_PORT_NULL if there are any resource failures
820 __unused
struct task_self_trap_args
*args
)
822 task_t task
= current_task();
824 mach_port_name_t name
;
826 sright
= retrieve_task_self_fast(task
);
827 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
832 * Routine: thread_self_trap [mach trap]
834 * Give the caller send rights for his own thread port.
838 * MACH_PORT_NULL if there are any resource failures
844 __unused
struct thread_self_trap_args
*args
)
846 thread_t thread
= current_thread();
847 task_t task
= thread
->task
;
849 mach_port_name_t name
;
851 sright
= retrieve_thread_self_fast(thread
);
852 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
857 * Routine: mach_reply_port [mach trap]
859 * Allocate a port for the caller.
863 * MACH_PORT_NULL if there are any resource failures
869 __unused
struct mach_reply_port_args
*args
)
872 mach_port_name_t name
;
875 kr
= ipc_port_alloc(current_task()->itk_space
, IPC_PORT_INIT_MESSAGE_QUEUE
,
877 if (kr
== KERN_SUCCESS
) {
880 name
= MACH_PORT_NULL
;
886 * Routine: thread_get_special_reply_port [mach trap]
888 * Allocate a special reply port for the calling thread.
892 * mach_port_name_t: send right & receive right for special reply port.
893 * MACH_PORT_NULL if there are any resource failures
898 thread_get_special_reply_port(
899 __unused
struct thread_get_special_reply_port_args
*args
)
902 mach_port_name_t name
;
904 thread_t thread
= current_thread();
905 ipc_port_init_flags_t flags
= IPC_PORT_INIT_MESSAGE_QUEUE
|
906 IPC_PORT_INIT_MAKE_SEND_RIGHT
| IPC_PORT_INIT_SPECIAL_REPLY
;
908 /* unbind the thread special reply port */
909 if (IP_VALID(thread
->ith_special_reply_port
)) {
910 kr
= ipc_port_unbind_special_reply_port(thread
, TRUE
);
911 if (kr
!= KERN_SUCCESS
) {
912 return MACH_PORT_NULL
;
916 kr
= ipc_port_alloc(current_task()->itk_space
, flags
, &name
, &port
);
917 if (kr
== KERN_SUCCESS
) {
918 ipc_port_bind_special_reply_port_locked(port
);
921 name
= MACH_PORT_NULL
;
927 * Routine: ipc_port_bind_special_reply_port_locked
929 * Bind the given port to current thread as a special reply port.
937 ipc_port_bind_special_reply_port_locked(
940 thread_t thread
= current_thread();
941 assert(thread
->ith_special_reply_port
== NULL
);
942 assert(port
->ip_specialreply
);
943 assert(port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
);
946 thread
->ith_special_reply_port
= port
;
947 port
->ip_messages
.imq_srp_owner_thread
= thread
;
949 ipc_special_reply_port_bits_reset(port
);
953 * Routine: ipc_port_unbind_special_reply_port
955 * Unbind the thread's special reply port.
956 * If the special port has threads waiting on turnstile,
957 * update it's inheritor.
964 ipc_port_unbind_special_reply_port(
966 boolean_t unbind_active_port
)
968 ipc_port_t special_reply_port
= thread
->ith_special_reply_port
;
970 ip_lock(special_reply_port
);
972 /* Return error if port active and unbind_active_port set to FALSE */
973 if (unbind_active_port
== FALSE
&& ip_active(special_reply_port
)) {
974 ip_unlock(special_reply_port
);
978 thread
->ith_special_reply_port
= NULL
;
979 ipc_port_adjust_special_reply_port_locked(special_reply_port
, NULL
,
980 IPC_PORT_ADJUST_UNLINK_THREAD
, FALSE
);
983 ip_release(special_reply_port
);
988 * Routine: thread_get_special_port [kernel call]
990 * Clones a send right for one of the thread's
995 * KERN_SUCCESS Extracted a send right.
996 * KERN_INVALID_ARGUMENT The thread is null.
997 * KERN_FAILURE The thread is dead.
998 * KERN_INVALID_ARGUMENT Invalid special port.
1002 thread_get_special_port(
1007 kern_return_t result
= KERN_SUCCESS
;
1010 if (thread
== THREAD_NULL
) {
1011 return KERN_INVALID_ARGUMENT
;
1015 case THREAD_KERNEL_PORT
:
1016 whichp
= &thread
->ith_sself
;
1020 return KERN_INVALID_ARGUMENT
;
1023 thread_mtx_lock(thread
);
1025 if (thread
->active
) {
1026 *portp
= ipc_port_copy_send(*whichp
);
1028 result
= KERN_FAILURE
;
1031 thread_mtx_unlock(thread
);
1037 * Routine: thread_set_special_port [kernel call]
1039 * Changes one of the thread's special ports,
1040 * setting it to the supplied send right.
1042 * Nothing locked. If successful, consumes
1043 * the supplied send right.
1045 * KERN_SUCCESS Changed the special port.
1046 * KERN_INVALID_ARGUMENT The thread is null.
1047 * KERN_FAILURE The thread is dead.
1048 * KERN_INVALID_ARGUMENT Invalid special port.
1052 thread_set_special_port(
1057 kern_return_t result
= KERN_SUCCESS
;
1058 ipc_port_t
*whichp
, old
= IP_NULL
;
1060 if (thread
== THREAD_NULL
) {
1061 return KERN_INVALID_ARGUMENT
;
1065 case THREAD_KERNEL_PORT
:
1066 whichp
= &thread
->ith_sself
;
1070 return KERN_INVALID_ARGUMENT
;
1073 thread_mtx_lock(thread
);
1075 if (thread
->active
) {
1079 result
= KERN_FAILURE
;
1082 thread_mtx_unlock(thread
);
1084 if (IP_VALID(old
)) {
1085 ipc_port_release_send(old
);
1092 * Routine: task_get_special_port [kernel call]
1094 * Clones a send right for one of the task's
1099 * KERN_SUCCESS Extracted a send right.
1100 * KERN_INVALID_ARGUMENT The task is null.
1101 * KERN_FAILURE The task/space is dead.
1102 * KERN_INVALID_ARGUMENT Invalid special port.
1106 task_get_special_port(
1113 if (task
== TASK_NULL
) {
1114 return KERN_INVALID_ARGUMENT
;
1118 if (task
->itk_self
== IP_NULL
) {
1120 return KERN_FAILURE
;
1124 case TASK_KERNEL_PORT
:
1125 port
= ipc_port_copy_send(task
->itk_sself
);
1128 case TASK_NAME_PORT
:
1129 port
= ipc_port_make_send(task
->itk_nself
);
1132 case TASK_HOST_PORT
:
1133 port
= ipc_port_copy_send(task
->itk_host
);
1136 case TASK_BOOTSTRAP_PORT
:
1137 port
= ipc_port_copy_send(task
->itk_bootstrap
);
1140 case TASK_SEATBELT_PORT
:
1141 port
= ipc_port_copy_send(task
->itk_seatbelt
);
1144 case TASK_ACCESS_PORT
:
1145 port
= ipc_port_copy_send(task
->itk_task_access
);
1148 case TASK_DEBUG_CONTROL_PORT
:
1149 port
= ipc_port_copy_send(task
->itk_debug_control
);
1154 return KERN_INVALID_ARGUMENT
;
1159 return KERN_SUCCESS
;
1163 * Routine: task_set_special_port [kernel call]
1165 * Changes one of the task's special ports,
1166 * setting it to the supplied send right.
1168 * Nothing locked. If successful, consumes
1169 * the supplied send right.
1171 * KERN_SUCCESS Changed the special port.
1172 * KERN_INVALID_ARGUMENT The task is null.
1173 * KERN_FAILURE The task/space is dead.
1174 * KERN_INVALID_ARGUMENT Invalid special port.
1175 * KERN_NO_ACCESS Restricted access to set port.
1179 task_set_special_port(
1184 if (task
== TASK_NULL
) {
1185 return KERN_INVALID_ARGUMENT
;
1188 if (task_is_driver(current_task())) {
1189 return KERN_NO_ACCESS
;
1193 case TASK_KERNEL_PORT
:
1194 case TASK_HOST_PORT
:
1196 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER
) == 0) {
1198 * Only allow setting of task-self / task-host
1199 * special ports from user-space when SIP is
1200 * disabled (for Mach-on-Mach emulation).
1205 return KERN_NO_ACCESS
;
1210 return task_set_special_port_internal(task
, which
, port
);
1214 * Routine: task_set_special_port_internal
1216 * Changes one of the task's special ports,
1217 * setting it to the supplied send right.
1219 * Nothing locked. If successful, consumes
1220 * the supplied send right.
1222 * KERN_SUCCESS Changed the special port.
1223 * KERN_INVALID_ARGUMENT The task is null.
1224 * KERN_FAILURE The task/space is dead.
1225 * KERN_INVALID_ARGUMENT Invalid special port.
1226 * KERN_NO_ACCESS Restricted access to overwrite port.
1230 task_set_special_port_internal(
1238 if (task
== TASK_NULL
) {
1239 return KERN_INVALID_ARGUMENT
;
1243 case TASK_KERNEL_PORT
:
1244 whichp
= &task
->itk_sself
;
1247 case TASK_HOST_PORT
:
1248 whichp
= &task
->itk_host
;
1251 case TASK_BOOTSTRAP_PORT
:
1252 whichp
= &task
->itk_bootstrap
;
1255 case TASK_SEATBELT_PORT
:
1256 whichp
= &task
->itk_seatbelt
;
1259 case TASK_ACCESS_PORT
:
1260 whichp
= &task
->itk_task_access
;
1263 case TASK_DEBUG_CONTROL_PORT
:
1264 whichp
= &task
->itk_debug_control
;
1268 return KERN_INVALID_ARGUMENT
;
1272 if (task
->itk_self
== IP_NULL
) {
1274 return KERN_FAILURE
;
1277 /* Never allow overwrite of seatbelt, or task access ports */
1279 case TASK_SEATBELT_PORT
:
1280 case TASK_ACCESS_PORT
:
1281 if (IP_VALID(*whichp
)) {
1283 return KERN_NO_ACCESS
;
1294 if (IP_VALID(old
)) {
1295 ipc_port_release_send(old
);
1297 return KERN_SUCCESS
;
1301 * Routine: mach_ports_register [kernel call]
1303 * Stash a handful of port send rights in the task.
1304 * Child tasks will inherit these rights, but they
1305 * must use mach_ports_lookup to acquire them.
1307 * The rights are supplied in a (wired) kalloc'd segment.
1308 * Rights which aren't supplied are assumed to be null.
1310 * Nothing locked. If successful, consumes
1311 * the supplied rights and memory.
1313 * KERN_SUCCESS Stashed the port rights.
1314 * KERN_INVALID_ARGUMENT The task is null.
1315 * KERN_INVALID_ARGUMENT The task is dead.
1316 * KERN_INVALID_ARGUMENT The memory param is null.
1317 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1321 mach_ports_register(
1323 mach_port_array_t memory
,
1324 mach_msg_type_number_t portsCnt
)
1326 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1329 if ((task
== TASK_NULL
) ||
1330 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1331 (portsCnt
&& memory
== NULL
)) {
1332 return KERN_INVALID_ARGUMENT
;
1336 * Pad the port rights with nulls.
1339 for (i
= 0; i
< portsCnt
; i
++) {
1340 ports
[i
] = memory
[i
];
1342 for (; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1347 if (task
->itk_self
== IP_NULL
) {
1349 return KERN_INVALID_ARGUMENT
;
1353 * Replace the old send rights with the new.
1354 * Release the old rights after unlocking.
1357 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1360 old
= task
->itk_registered
[i
];
1361 task
->itk_registered
[i
] = ports
[i
];
1367 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1368 if (IP_VALID(ports
[i
])) {
1369 ipc_port_release_send(ports
[i
]);
1374 * Now that the operation is known to be successful,
1375 * we can free the memory.
1378 if (portsCnt
!= 0) {
1380 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1383 return KERN_SUCCESS
;
1387 * Routine: mach_ports_lookup [kernel call]
1389 * Retrieves (clones) the stashed port send rights.
1391 * Nothing locked. If successful, the caller gets
1392 * rights and memory.
1394 * KERN_SUCCESS Retrieved the send rights.
1395 * KERN_INVALID_ARGUMENT The task is null.
1396 * KERN_INVALID_ARGUMENT The task is dead.
1397 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1403 mach_port_array_t
*portsp
,
1404 mach_msg_type_number_t
*portsCnt
)
1411 if (task
== TASK_NULL
) {
1412 return KERN_INVALID_ARGUMENT
;
1415 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1417 memory
= kalloc(size
);
1419 return KERN_RESOURCE_SHORTAGE
;
1423 if (task
->itk_self
== IP_NULL
) {
1426 kfree(memory
, size
);
1427 return KERN_INVALID_ARGUMENT
;
1430 ports
= (ipc_port_t
*) memory
;
1433 * Clone port rights. Because kalloc'd memory
1434 * is wired, we won't fault while holding the task lock.
1437 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1438 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1443 *portsp
= (mach_port_array_t
) ports
;
1444 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1445 return KERN_SUCCESS
;
1448 extern zone_t task_zone
;
1451 task_conversion_eval(task_t caller
, task_t victim
)
1454 * Tasks are allowed to resolve their own task ports, and the kernel is
1455 * allowed to resolve anyone's task port.
1457 if (caller
== kernel_task
) {
1458 return KERN_SUCCESS
;
1461 if (caller
== victim
) {
1462 return KERN_SUCCESS
;
1466 * Only the kernel can can resolve the kernel's task port. We've established
1467 * by this point that the caller is not kernel_task.
1469 if (victim
== TASK_NULL
|| victim
== kernel_task
) {
1470 return KERN_INVALID_SECURITY
;
1473 zone_require(victim
, task_zone
);
1477 * On embedded platforms, only a platform binary can resolve the task port
1478 * of another platform binary.
1480 if ((victim
->t_flags
& TF_PLATFORM
) && !(caller
->t_flags
& TF_PLATFORM
)) {
1482 return KERN_INVALID_SECURITY
;
1484 if (cs_relax_platform_task_ports
) {
1485 return KERN_SUCCESS
;
1487 return KERN_INVALID_SECURITY
;
1489 #endif /* SECURE_KERNEL */
1491 #endif /* CONFIG_EMBEDDED */
1493 return KERN_SUCCESS
;
1497 * Routine: convert_port_to_locked_task
1499 * Internal helper routine to convert from a port to a locked
1500 * task. Used by several routines that try to convert from a
1501 * task port to a reference on some task related object.
1503 * Nothing locked, blocking OK.
1506 convert_port_to_locked_task(ipc_port_t port
)
1508 int try_failed_count
= 0;
1510 while (IP_VALID(port
)) {
1511 task_t ct
= current_task();
1515 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1519 task
= (task_t
) port
->ip_kobject
;
1520 assert(task
!= TASK_NULL
);
1522 if (task_conversion_eval(ct
, task
)) {
1528 * Normal lock ordering puts task_lock() before ip_lock().
1529 * Attempt out-of-order locking here.
1531 if (task_lock_try(task
)) {
1538 mutex_pause(try_failed_count
);
1544 * Routine: convert_port_to_locked_task_inspect
1546 * Internal helper routine to convert from a port to a locked
1547 * task inspect right. Used by internal routines that try to convert from a
1548 * task inspect port to a reference on some task related object.
1550 * Nothing locked, blocking OK.
1553 convert_port_to_locked_task_inspect(ipc_port_t port
)
1555 int try_failed_count
= 0;
1557 while (IP_VALID(port
)) {
1558 task_inspect_t task
;
1561 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1563 return TASK_INSPECT_NULL
;
1565 task
= (task_inspect_t
)port
->ip_kobject
;
1566 assert(task
!= TASK_INSPECT_NULL
);
1568 * Normal lock ordering puts task_lock() before ip_lock().
1569 * Attempt out-of-order locking here.
1571 if (task_lock_try((task_t
)task
)) {
1578 mutex_pause(try_failed_count
);
1580 return TASK_INSPECT_NULL
;
1584 convert_port_to_task_locked(
1586 uint32_t *exec_token
)
1588 task_t task
= TASK_NULL
;
1591 require_ip_active(port
);
1593 if (ip_kotype(port
) == IKOT_TASK
) {
1594 task_t ct
= current_task();
1595 task
= (task_t
)port
->ip_kobject
;
1596 assert(task
!= TASK_NULL
);
1598 if (task_conversion_eval(ct
, task
)) {
1603 *exec_token
= task
->exec_token
;
1605 task_reference_internal(task
);
1612 * Routine: convert_port_to_task_with_exec_token
1614 * Convert from a port to a task and return
1615 * the exec token stored in the task.
1616 * Doesn't consume the port ref; produces a task ref,
1617 * which may be null.
1622 convert_port_to_task_with_exec_token(
1624 uint32_t *exec_token
)
1626 task_t task
= TASK_NULL
;
1628 if (IP_VALID(port
)) {
1630 if (ip_active(port
)) {
1631 task
= convert_port_to_task_locked(port
, exec_token
);
1640 * Routine: convert_port_to_task
1642 * Convert from a port to a task.
1643 * Doesn't consume the port ref; produces a task ref,
1644 * which may be null.
1649 convert_port_to_task(
1652 return convert_port_to_task_with_exec_token(port
, NULL
);
1657 * Routine: convert_port_to_task_name
1659 * Convert from a port to a task name.
1660 * Doesn't consume the port ref; produces a task name ref,
1661 * which may be null.
1666 convert_port_to_task_name(
1669 task_name_t task
= TASK_NULL
;
1671 if (IP_VALID(port
)) {
1674 if (ip_active(port
) &&
1675 (ip_kotype(port
) == IKOT_TASK
||
1676 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1677 task
= (task_name_t
)port
->ip_kobject
;
1678 assert(task
!= TASK_NAME_NULL
);
1680 task_reference_internal(task
);
1689 static task_inspect_t
1690 convert_port_to_task_inspect_locked(
1693 task_inspect_t task
= TASK_INSPECT_NULL
;
1696 require_ip_active(port
);
1698 if (ip_kotype(port
) == IKOT_TASK
) {
1699 task
= (task_inspect_t
)port
->ip_kobject
;
1700 assert(task
!= TASK_INSPECT_NULL
);
1702 task_reference_internal(task
);
1709 * Routine: convert_port_to_task_inspect
1711 * Convert from a port to a task inspection right
1712 * Doesn't consume the port ref; produces a task ref,
1713 * which may be null.
1718 convert_port_to_task_inspect(
1721 task_inspect_t task
= TASK_INSPECT_NULL
;
1723 if (IP_VALID(port
)) {
1725 if (ip_active(port
)) {
1726 task
= convert_port_to_task_inspect_locked(port
);
1735 * Routine: convert_port_to_task_suspension_token
1737 * Convert from a port to a task suspension token.
1738 * Doesn't consume the port ref; produces a suspension token ref,
1739 * which may be null.
1743 task_suspension_token_t
1744 convert_port_to_task_suspension_token(
1747 task_suspension_token_t task
= TASK_NULL
;
1749 if (IP_VALID(port
)) {
1752 if (ip_active(port
) &&
1753 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1754 task
= (task_suspension_token_t
)port
->ip_kobject
;
1755 assert(task
!= TASK_NULL
);
1757 task_reference_internal(task
);
1767 * Routine: convert_port_to_space
1769 * Convert from a port to a space.
1770 * Doesn't consume the port ref; produces a space ref,
1771 * which may be null.
1776 convert_port_to_space(
1782 task
= convert_port_to_locked_task(port
);
1784 if (task
== TASK_NULL
) {
1785 return IPC_SPACE_NULL
;
1788 if (!task
->active
) {
1790 return IPC_SPACE_NULL
;
1793 space
= task
->itk_space
;
1794 is_reference(space
);
1800 * Routine: convert_port_to_space_inspect
1802 * Convert from a port to a space inspect right.
1803 * Doesn't consume the port ref; produces a space inspect ref,
1804 * which may be null.
1809 convert_port_to_space_inspect(
1812 ipc_space_inspect_t space
;
1813 task_inspect_t task
;
1815 task
= convert_port_to_locked_task_inspect(port
);
1817 if (task
== TASK_INSPECT_NULL
) {
1818 return IPC_SPACE_INSPECT_NULL
;
1821 if (!task
->active
) {
1823 return IPC_SPACE_INSPECT_NULL
;
1826 space
= (ipc_space_inspect_t
)task
->itk_space
;
1827 is_reference((ipc_space_t
)space
);
1828 task_unlock((task_t
)task
);
1833 * Routine: convert_port_to_map
1835 * Convert from a port to a map.
1836 * Doesn't consume the port ref; produces a map ref,
1837 * which may be null.
1843 convert_port_to_map(
1849 task
= convert_port_to_locked_task(port
);
1851 if (task
== TASK_NULL
) {
1855 if (!task
->active
) {
1861 vm_map_reference_swap(map
);
1868 * Routine: convert_port_to_thread
1870 * Convert from a port to a thread.
1871 * Doesn't consume the port ref; produces an thread ref,
1872 * which may be null.
1878 convert_port_to_thread_locked(
1880 port_to_thread_options_t options
)
1882 thread_t thread
= THREAD_NULL
;
1885 require_ip_active(port
);
1887 if (ip_kotype(port
) == IKOT_THREAD
) {
1888 thread
= (thread_t
)port
->ip_kobject
;
1889 assert(thread
!= THREAD_NULL
);
1891 if (options
& PORT_TO_THREAD_NOT_CURRENT_THREAD
) {
1892 if (thread
== current_thread()) {
1897 if (options
& PORT_TO_THREAD_IN_CURRENT_TASK
) {
1898 if (thread
->task
!= current_task()) {
1902 /* Use task conversion rules for thread control conversions */
1903 if (task_conversion_eval(current_task(), thread
->task
) != KERN_SUCCESS
) {
1908 thread_reference_internal(thread
);
1915 convert_port_to_thread(
1918 thread_t thread
= THREAD_NULL
;
1920 if (IP_VALID(port
)) {
1922 if (ip_active(port
)) {
1923 thread
= convert_port_to_thread_locked(port
, PORT_TO_THREAD_NONE
);
1932 * Routine: convert_port_to_thread_inspect
1934 * Convert from a port to a thread inspection right
1935 * Doesn't consume the port ref; produces a thread ref,
1936 * which may be null.
1941 convert_port_to_thread_inspect(
1944 thread_inspect_t thread
= THREAD_INSPECT_NULL
;
1946 if (IP_VALID(port
)) {
1949 if (ip_active(port
) &&
1950 ip_kotype(port
) == IKOT_THREAD
) {
1951 thread
= (thread_inspect_t
)port
->ip_kobject
;
1952 assert(thread
!= THREAD_INSPECT_NULL
);
1953 thread_reference_internal((thread_t
)thread
);
1962 * Routine: convert_thread_inspect_to_port
1964 * Convert from a thread inspect reference to a port.
1965 * Consumes a thread ref;
1966 * As we never export thread inspect ports, always
1967 * creates a NULL port.
1973 convert_thread_inspect_to_port(thread_inspect_t thread
)
1975 thread_deallocate(thread
);
1981 * Routine: port_name_to_thread
1983 * Convert from a port name to an thread reference
1984 * A name of MACH_PORT_NULL is valid for the null thread.
1989 port_name_to_thread(
1990 mach_port_name_t name
,
1991 port_to_thread_options_t options
)
1993 thread_t thread
= THREAD_NULL
;
1997 if (MACH_PORT_VALID(name
)) {
1998 kr
= ipc_port_translate_send(current_space(), name
, &kport
);
1999 if (kr
== KERN_SUCCESS
) {
2000 thread
= convert_port_to_thread_locked(kport
, options
);
2010 mach_port_name_t name
)
2014 task_t task
= TASK_NULL
;
2016 if (MACH_PORT_VALID(name
)) {
2017 kr
= ipc_port_translate_send(current_space(), name
, &kport
);
2018 if (kr
== KERN_SUCCESS
) {
2019 task
= convert_port_to_task_locked(kport
, NULL
);
2027 port_name_to_task_inspect(
2028 mach_port_name_t name
)
2032 task_inspect_t ti
= TASK_INSPECT_NULL
;
2034 if (MACH_PORT_VALID(name
)) {
2035 kr
= ipc_port_translate_send(current_space(), name
, &kport
);
2036 if (kr
== KERN_SUCCESS
) {
2037 ti
= convert_port_to_task_inspect_locked(kport
);
2045 * Routine: port_name_to_host
2047 * Convert from a port name to a host pointer.
2048 * NOTE: This does _not_ return a +1 reference to the host_t
2054 mach_port_name_t name
)
2056 host_t host
= HOST_NULL
;
2060 if (MACH_PORT_VALID(name
)) {
2061 kr
= ipc_port_translate_send(current_space(), name
, &port
);
2062 if (kr
== KERN_SUCCESS
) {
2063 host
= convert_port_to_host(port
);
2071 * Routine: convert_task_to_port
2073 * Convert from a task to a port.
2074 * Consumes a task ref; produces a naked send right
2075 * which may be invalid.
2081 convert_task_to_port(
2088 if (task
->itk_self
!= IP_NULL
) {
2089 port
= ipc_port_make_send(task
->itk_self
);
2096 task_deallocate(task
);
2101 * Routine: convert_task_inspect_to_port
2103 * Convert from a task inspect reference to a port.
2104 * Consumes a task ref;
2105 * As we never export task inspect ports, always
2106 * creates a NULL port.
2111 convert_task_inspect_to_port(
2112 task_inspect_t task
)
2114 task_deallocate(task
);
2120 * Routine: convert_task_suspend_token_to_port
2122 * Convert from a task suspension token to a port.
2123 * Consumes a task suspension token ref; produces a naked send-once right
2124 * which may be invalid.
2129 convert_task_suspension_token_to_port(
2130 task_suspension_token_t task
)
2136 if (task
->itk_resume
== IP_NULL
) {
2137 task
->itk_resume
= ipc_kobject_alloc_port((ipc_kobject_t
) task
,
2138 IKOT_TASK_RESUME
, IPC_KOBJECT_ALLOC_NONE
);
2142 * Create a send-once right for each instance of a direct user-called
2143 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2144 * the notification handler will resume the target task.
2146 port
= ipc_port_make_sonce(task
->itk_resume
);
2147 assert(IP_VALID(port
));
2153 task_suspension_token_deallocate(task
);
2160 * Routine: convert_task_name_to_port
2162 * Convert from a task name ref to a port.
2163 * Consumes a task name ref; produces a naked send right
2164 * which may be invalid.
2170 convert_task_name_to_port(
2171 task_name_t task_name
)
2175 itk_lock(task_name
);
2176 if (task_name
->itk_nself
!= IP_NULL
) {
2177 port
= ipc_port_make_send(task_name
->itk_nself
);
2181 itk_unlock(task_name
);
2183 task_name_deallocate(task_name
);
2188 * Routine: convert_thread_to_port
2190 * Convert from a thread to a port.
2191 * Consumes an thread ref; produces a naked send right
2192 * which may be invalid.
2198 convert_thread_to_port(
2203 thread_mtx_lock(thread
);
2205 if (thread
->ith_self
!= IP_NULL
) {
2206 port
= ipc_port_make_send(thread
->ith_self
);
2211 thread_mtx_unlock(thread
);
2213 thread_deallocate(thread
);
2219 * Routine: space_deallocate
2221 * Deallocate a space ref produced by convert_port_to_space.
2230 if (space
!= IS_NULL
) {
2236 * Routine: space_inspect_deallocate
2238 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2244 space_inspect_deallocate(
2245 ipc_space_inspect_t space
)
2247 if (space
!= IS_INSPECT_NULL
) {
2248 is_release((ipc_space_t
)space
);
2253 * Routine: thread/task_set_exception_ports [kernel call]
2255 * Sets the thread/task exception port, flavor and
2256 * behavior for the exception types specified by the mask.
2257 * There will be one send right per exception per valid
2260 * Nothing locked. If successful, consumes
2261 * the supplied send right.
2263 * KERN_SUCCESS Changed the special port.
2264 * KERN_INVALID_ARGUMENT The thread is null,
2265 * Illegal mask bit set.
2266 * Illegal exception behavior
2267 * KERN_FAILURE The thread is dead.
2271 thread_set_exception_ports(
2273 exception_mask_t exception_mask
,
2274 ipc_port_t new_port
,
2275 exception_behavior_t new_behavior
,
2276 thread_state_flavor_t new_flavor
)
2278 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2279 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2283 struct label
*new_label
;
2286 if (thread
== THREAD_NULL
) {
2287 return KERN_INVALID_ARGUMENT
;
2290 if (exception_mask
& ~EXC_MASK_VALID
) {
2291 return KERN_INVALID_ARGUMENT
;
2294 if (IP_VALID(new_port
)) {
2295 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2296 case EXCEPTION_DEFAULT
:
2297 case EXCEPTION_STATE
:
2298 case EXCEPTION_STATE_IDENTITY
:
2302 return KERN_INVALID_ARGUMENT
;
2307 * Check the validity of the thread_state_flavor by calling the
2308 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2309 * osfmk/mach/ARCHITECTURE/thread_status.h
2311 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2312 return KERN_INVALID_ARGUMENT
;
2316 new_label
= mac_exc_create_label_for_current_proc();
2319 thread_mtx_lock(thread
);
2321 if (!thread
->active
) {
2322 thread_mtx_unlock(thread
);
2324 return KERN_FAILURE
;
2327 if (thread
->exc_actions
== NULL
) {
2328 ipc_thread_init_exc_actions(thread
);
2330 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2331 if ((exception_mask
& (1 << i
))
2333 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2336 old_port
[i
] = thread
->exc_actions
[i
].port
;
2337 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2338 thread
->exc_actions
[i
].behavior
= new_behavior
;
2339 thread
->exc_actions
[i
].flavor
= new_flavor
;
2340 thread
->exc_actions
[i
].privileged
= privileged
;
2342 old_port
[i
] = IP_NULL
;
2346 thread_mtx_unlock(thread
);
2349 mac_exc_free_label(new_label
);
2352 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2353 if (IP_VALID(old_port
[i
])) {
2354 ipc_port_release_send(old_port
[i
]);
2358 if (IP_VALID(new_port
)) { /* consume send right */
2359 ipc_port_release_send(new_port
);
2362 return KERN_SUCCESS
;
2366 task_set_exception_ports(
2368 exception_mask_t exception_mask
,
2369 ipc_port_t new_port
,
2370 exception_behavior_t new_behavior
,
2371 thread_state_flavor_t new_flavor
)
2373 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2374 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2378 struct label
*new_label
;
2381 if (task
== TASK_NULL
) {
2382 return KERN_INVALID_ARGUMENT
;
2385 if (exception_mask
& ~EXC_MASK_VALID
) {
2386 return KERN_INVALID_ARGUMENT
;
2389 if (IP_VALID(new_port
)) {
2390 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2391 case EXCEPTION_DEFAULT
:
2392 case EXCEPTION_STATE
:
2393 case EXCEPTION_STATE_IDENTITY
:
2397 return KERN_INVALID_ARGUMENT
;
2402 * Check the validity of the thread_state_flavor by calling the
2403 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2404 * osfmk/mach/ARCHITECTURE/thread_status.h
2406 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2407 return KERN_INVALID_ARGUMENT
;
2411 new_label
= mac_exc_create_label_for_current_proc();
2416 if (task
->itk_self
== IP_NULL
) {
2419 return KERN_FAILURE
;
2422 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2423 if ((exception_mask
& (1 << i
))
2425 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2428 old_port
[i
] = task
->exc_actions
[i
].port
;
2429 task
->exc_actions
[i
].port
=
2430 ipc_port_copy_send(new_port
);
2431 task
->exc_actions
[i
].behavior
= new_behavior
;
2432 task
->exc_actions
[i
].flavor
= new_flavor
;
2433 task
->exc_actions
[i
].privileged
= privileged
;
2435 old_port
[i
] = IP_NULL
;
2442 mac_exc_free_label(new_label
);
2445 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2446 if (IP_VALID(old_port
[i
])) {
2447 ipc_port_release_send(old_port
[i
]);
2451 if (IP_VALID(new_port
)) { /* consume send right */
2452 ipc_port_release_send(new_port
);
2455 return KERN_SUCCESS
;
2459 * Routine: thread/task_swap_exception_ports [kernel call]
2461 * Sets the thread/task exception port, flavor and
2462 * behavior for the exception types specified by the
2465 * The old ports, behavior and flavors are returned
2466 * Count specifies the array sizes on input and
2467 * the number of returned ports etc. on output. The
2468 * arrays must be large enough to hold all the returned
2469 * data, MIG returnes an error otherwise. The masks
2470 * array specifies the corresponding exception type(s).
2473 * Nothing locked. If successful, consumes
2474 * the supplied send right.
2476 * Returns upto [in} CountCnt elements.
2478 * KERN_SUCCESS Changed the special port.
2479 * KERN_INVALID_ARGUMENT The thread is null,
2480 * Illegal mask bit set.
2481 * Illegal exception behavior
2482 * KERN_FAILURE The thread is dead.
2486 thread_swap_exception_ports(
2488 exception_mask_t exception_mask
,
2489 ipc_port_t new_port
,
2490 exception_behavior_t new_behavior
,
2491 thread_state_flavor_t new_flavor
,
2492 exception_mask_array_t masks
,
2493 mach_msg_type_number_t
*CountCnt
,
2494 exception_port_array_t ports
,
2495 exception_behavior_array_t behaviors
,
2496 thread_state_flavor_array_t flavors
)
2498 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2499 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2500 unsigned int i
, j
, count
;
2503 struct label
*new_label
;
2506 if (thread
== THREAD_NULL
) {
2507 return KERN_INVALID_ARGUMENT
;
2510 if (exception_mask
& ~EXC_MASK_VALID
) {
2511 return KERN_INVALID_ARGUMENT
;
2514 if (IP_VALID(new_port
)) {
2515 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2516 case EXCEPTION_DEFAULT
:
2517 case EXCEPTION_STATE
:
2518 case EXCEPTION_STATE_IDENTITY
:
2522 return KERN_INVALID_ARGUMENT
;
2526 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2527 return KERN_INVALID_ARGUMENT
;
2531 new_label
= mac_exc_create_label_for_current_proc();
2534 thread_mtx_lock(thread
);
2536 if (!thread
->active
) {
2537 thread_mtx_unlock(thread
);
2539 return KERN_FAILURE
;
2542 if (thread
->exc_actions
== NULL
) {
2543 ipc_thread_init_exc_actions(thread
);
2546 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2547 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2548 if ((exception_mask
& (1 << i
))
2550 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2553 for (j
= 0; j
< count
; ++j
) {
2555 * search for an identical entry, if found
2556 * set corresponding mask for this exception.
2558 if (thread
->exc_actions
[i
].port
== ports
[j
] &&
2559 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2560 thread
->exc_actions
[i
].flavor
== flavors
[j
]) {
2561 masks
[j
] |= (1 << i
);
2567 masks
[j
] = (1 << i
);
2568 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2570 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2571 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2575 old_port
[i
] = thread
->exc_actions
[i
].port
;
2576 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2577 thread
->exc_actions
[i
].behavior
= new_behavior
;
2578 thread
->exc_actions
[i
].flavor
= new_flavor
;
2579 thread
->exc_actions
[i
].privileged
= privileged
;
2581 old_port
[i
] = IP_NULL
;
2585 thread_mtx_unlock(thread
);
2588 mac_exc_free_label(new_label
);
2591 while (--i
>= FIRST_EXCEPTION
) {
2592 if (IP_VALID(old_port
[i
])) {
2593 ipc_port_release_send(old_port
[i
]);
2597 if (IP_VALID(new_port
)) { /* consume send right */
2598 ipc_port_release_send(new_port
);
2603 return KERN_SUCCESS
;
2607 task_swap_exception_ports(
2609 exception_mask_t exception_mask
,
2610 ipc_port_t new_port
,
2611 exception_behavior_t new_behavior
,
2612 thread_state_flavor_t new_flavor
,
2613 exception_mask_array_t masks
,
2614 mach_msg_type_number_t
*CountCnt
,
2615 exception_port_array_t ports
,
2616 exception_behavior_array_t behaviors
,
2617 thread_state_flavor_array_t flavors
)
2619 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2620 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2621 unsigned int i
, j
, count
;
2624 struct label
*new_label
;
2627 if (task
== TASK_NULL
) {
2628 return KERN_INVALID_ARGUMENT
;
2631 if (exception_mask
& ~EXC_MASK_VALID
) {
2632 return KERN_INVALID_ARGUMENT
;
2635 if (IP_VALID(new_port
)) {
2636 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2637 case EXCEPTION_DEFAULT
:
2638 case EXCEPTION_STATE
:
2639 case EXCEPTION_STATE_IDENTITY
:
2643 return KERN_INVALID_ARGUMENT
;
2647 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2648 return KERN_INVALID_ARGUMENT
;
2652 new_label
= mac_exc_create_label_for_current_proc();
2657 if (task
->itk_self
== IP_NULL
) {
2660 return KERN_FAILURE
;
2663 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2664 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2665 if ((exception_mask
& (1 << i
))
2667 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2670 for (j
= 0; j
< count
; j
++) {
2672 * search for an identical entry, if found
2673 * set corresponding mask for this exception.
2675 if (task
->exc_actions
[i
].port
== ports
[j
] &&
2676 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2677 task
->exc_actions
[i
].flavor
== flavors
[j
]) {
2678 masks
[j
] |= (1 << i
);
2684 masks
[j
] = (1 << i
);
2685 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2686 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2687 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2691 old_port
[i
] = task
->exc_actions
[i
].port
;
2693 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2694 task
->exc_actions
[i
].behavior
= new_behavior
;
2695 task
->exc_actions
[i
].flavor
= new_flavor
;
2696 task
->exc_actions
[i
].privileged
= privileged
;
2698 old_port
[i
] = IP_NULL
;
2705 mac_exc_free_label(new_label
);
2708 while (--i
>= FIRST_EXCEPTION
) {
2709 if (IP_VALID(old_port
[i
])) {
2710 ipc_port_release_send(old_port
[i
]);
2714 if (IP_VALID(new_port
)) { /* consume send right */
2715 ipc_port_release_send(new_port
);
2720 return KERN_SUCCESS
;
2724 * Routine: thread/task_get_exception_ports [kernel call]
2726 * Clones a send right for each of the thread/task's exception
2727 * ports specified in the mask and returns the behaviour
2728 * and flavor of said port.
2730 * Returns upto [in} CountCnt elements.
2735 * KERN_SUCCESS Extracted a send right.
2736 * KERN_INVALID_ARGUMENT The thread is null,
2737 * Invalid special port,
2738 * Illegal mask bit set.
2739 * KERN_FAILURE The thread is dead.
2743 thread_get_exception_ports(
2745 exception_mask_t exception_mask
,
2746 exception_mask_array_t masks
,
2747 mach_msg_type_number_t
*CountCnt
,
2748 exception_port_array_t ports
,
2749 exception_behavior_array_t behaviors
,
2750 thread_state_flavor_array_t flavors
)
2752 unsigned int i
, j
, count
;
2754 if (thread
== THREAD_NULL
) {
2755 return KERN_INVALID_ARGUMENT
;
2758 if (exception_mask
& ~EXC_MASK_VALID
) {
2759 return KERN_INVALID_ARGUMENT
;
2762 thread_mtx_lock(thread
);
2764 if (!thread
->active
) {
2765 thread_mtx_unlock(thread
);
2767 return KERN_FAILURE
;
2772 if (thread
->exc_actions
== NULL
) {
2776 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2777 if (exception_mask
& (1 << i
)) {
2778 for (j
= 0; j
< count
; ++j
) {
2780 * search for an identical entry, if found
2781 * set corresponding mask for this exception.
2783 if (thread
->exc_actions
[i
].port
== ports
[j
] &&
2784 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2785 thread
->exc_actions
[i
].flavor
== flavors
[j
]) {
2786 masks
[j
] |= (1 << i
);
2792 masks
[j
] = (1 << i
);
2793 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2794 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2795 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2797 if (count
>= *CountCnt
) {
2805 thread_mtx_unlock(thread
);
2809 return KERN_SUCCESS
;
2813 task_get_exception_ports(
2815 exception_mask_t exception_mask
,
2816 exception_mask_array_t masks
,
2817 mach_msg_type_number_t
*CountCnt
,
2818 exception_port_array_t ports
,
2819 exception_behavior_array_t behaviors
,
2820 thread_state_flavor_array_t flavors
)
2822 unsigned int i
, j
, count
;
2824 if (task
== TASK_NULL
) {
2825 return KERN_INVALID_ARGUMENT
;
2828 if (exception_mask
& ~EXC_MASK_VALID
) {
2829 return KERN_INVALID_ARGUMENT
;
2834 if (task
->itk_self
== IP_NULL
) {
2837 return KERN_FAILURE
;
2842 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2843 if (exception_mask
& (1 << i
)) {
2844 for (j
= 0; j
< count
; ++j
) {
2846 * search for an identical entry, if found
2847 * set corresponding mask for this exception.
2849 if (task
->exc_actions
[i
].port
== ports
[j
] &&
2850 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2851 task
->exc_actions
[i
].flavor
== flavors
[j
]) {
2852 masks
[j
] |= (1 << i
);
2858 masks
[j
] = (1 << i
);
2859 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2860 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2861 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2863 if (count
> *CountCnt
) {
2874 return KERN_SUCCESS
;