2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports
;
105 /* forward declarations */
106 task_t
convert_port_to_locked_task(ipc_port_t port
);
107 task_inspect_t
convert_port_to_locked_task_inspect(ipc_port_t port
);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port
);
109 static kern_return_t
ipc_port_unbind_special_reply_port(thread_t thread
, boolean_t unbind_active_port
);
110 kern_return_t
task_conversion_eval(task_t caller
, task_t victim
);
113 * Routine: ipc_task_init
115 * Initialize a task's IPC state.
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
135 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
136 if (kr
!= KERN_SUCCESS
) {
137 panic("ipc_task_init");
140 space
->is_task
= task
;
142 kport
= ipc_port_alloc_kernel();
143 if (kport
== IP_NULL
) {
144 panic("ipc_task_init");
147 nport
= ipc_port_alloc_kernel();
148 if (nport
== IP_NULL
) {
149 panic("ipc_task_init");
153 task
->itk_self
= kport
;
154 task
->itk_nself
= nport
;
155 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
156 if (task_is_a_corpse_fork(task
)) {
158 * No sender's notification for corpse would not
159 * work with a naked send right in kernel.
161 task
->itk_sself
= IP_NULL
;
163 task
->itk_sself
= ipc_port_make_send(kport
);
165 task
->itk_debug_control
= IP_NULL
;
166 task
->itk_space
= space
;
169 task
->exc_actions
[0].label
= NULL
;
170 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
171 mac_exc_associate_action_label(&task
->exc_actions
[i
], mac_exc_create_label());
175 /* always zero-out the first (unused) array element */
176 bzero(&task
->exc_actions
[0], sizeof(task
->exc_actions
[0]));
178 if (parent
== TASK_NULL
) {
180 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
181 task
->exc_actions
[i
].port
= IP_NULL
;
182 task
->exc_actions
[i
].flavor
= 0;
183 task
->exc_actions
[i
].behavior
= 0;
184 task
->exc_actions
[i
].privileged
= FALSE
;
187 kr
= host_get_host_port(host_priv_self(), &port
);
188 assert(kr
== KERN_SUCCESS
);
189 task
->itk_host
= port
;
191 task
->itk_bootstrap
= IP_NULL
;
192 task
->itk_seatbelt
= IP_NULL
;
193 task
->itk_gssd
= IP_NULL
;
194 task
->itk_task_access
= IP_NULL
;
196 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
197 task
->itk_registered
[i
] = IP_NULL
;
201 assert(parent
->itk_self
!= IP_NULL
);
203 /* inherit registered ports */
205 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
206 task
->itk_registered
[i
] =
207 ipc_port_copy_send(parent
->itk_registered
[i
]);
210 /* inherit exception and bootstrap ports */
212 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
213 task
->exc_actions
[i
].port
=
214 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
215 task
->exc_actions
[i
].flavor
=
216 parent
->exc_actions
[i
].flavor
;
217 task
->exc_actions
[i
].behavior
=
218 parent
->exc_actions
[i
].behavior
;
219 task
->exc_actions
[i
].privileged
=
220 parent
->exc_actions
[i
].privileged
;
222 mac_exc_inherit_action_label(parent
->exc_actions
+ i
, task
->exc_actions
+ i
);
226 ipc_port_copy_send(parent
->itk_host
);
228 task
->itk_bootstrap
=
229 ipc_port_copy_send(parent
->itk_bootstrap
);
232 ipc_port_copy_send(parent
->itk_seatbelt
);
235 ipc_port_copy_send(parent
->itk_gssd
);
237 task
->itk_task_access
=
238 ipc_port_copy_send(parent
->itk_task_access
);
245 * Routine: ipc_task_enable
247 * Enable a task for IPC access.
260 kport
= task
->itk_self
;
261 if (kport
!= IP_NULL
) {
262 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
264 nport
= task
->itk_nself
;
265 if (nport
!= IP_NULL
) {
266 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
272 * Routine: ipc_task_disable
274 * Disable IPC access to a task.
288 kport
= task
->itk_self
;
289 if (kport
!= IP_NULL
) {
290 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
292 nport
= task
->itk_nself
;
293 if (nport
!= IP_NULL
) {
294 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
297 rport
= task
->itk_resume
;
298 if (rport
!= IP_NULL
) {
300 * From this point onwards this task is no longer accepting
303 * There are still outstanding suspensions on this task,
304 * even as it is being torn down. Disconnect the task
305 * from the rport, thereby "orphaning" the rport. The rport
306 * itself will go away only when the last suspension holder
307 * destroys his SO right to it -- when he either
308 * exits, or tries to actually use that last SO right to
309 * resume this (now non-existent) task.
311 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
317 * Routine: ipc_task_terminate
319 * Clean up and destroy a task's IPC state.
321 * Nothing locked. The task must be suspended.
322 * (Or the current thread must be in the task.)
335 kport
= task
->itk_self
;
337 if (kport
== IP_NULL
) {
338 /* the task is already terminated (can this happen?) */
342 task
->itk_self
= IP_NULL
;
344 nport
= task
->itk_nself
;
345 assert(nport
!= IP_NULL
);
346 task
->itk_nself
= IP_NULL
;
348 rport
= task
->itk_resume
;
349 task
->itk_resume
= IP_NULL
;
353 /* release the naked send rights */
355 if (IP_VALID(task
->itk_sself
)) {
356 ipc_port_release_send(task
->itk_sself
);
359 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
360 if (IP_VALID(task
->exc_actions
[i
].port
)) {
361 ipc_port_release_send(task
->exc_actions
[i
].port
);
364 mac_exc_free_action_label(task
->exc_actions
+ i
);
368 if (IP_VALID(task
->itk_host
)) {
369 ipc_port_release_send(task
->itk_host
);
372 if (IP_VALID(task
->itk_bootstrap
)) {
373 ipc_port_release_send(task
->itk_bootstrap
);
376 if (IP_VALID(task
->itk_seatbelt
)) {
377 ipc_port_release_send(task
->itk_seatbelt
);
380 if (IP_VALID(task
->itk_gssd
)) {
381 ipc_port_release_send(task
->itk_gssd
);
384 if (IP_VALID(task
->itk_task_access
)) {
385 ipc_port_release_send(task
->itk_task_access
);
388 if (IP_VALID(task
->itk_debug_control
)) {
389 ipc_port_release_send(task
->itk_debug_control
);
392 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
393 if (IP_VALID(task
->itk_registered
[i
])) {
394 ipc_port_release_send(task
->itk_registered
[i
]);
398 /* destroy the kernel ports */
399 ipc_port_dealloc_kernel(kport
);
400 ipc_port_dealloc_kernel(nport
);
401 if (rport
!= IP_NULL
) {
402 ipc_port_dealloc_kernel(rport
);
405 itk_lock_destroy(task
);
409 * Routine: ipc_task_reset
411 * Reset a task's IPC state to protect it when
412 * it enters an elevated security context. The
413 * task name port can remain the same - since
414 * it represents no specific privilege.
416 * Nothing locked. The task must be suspended.
417 * (Or the current thread must be in the task.)
424 ipc_port_t old_kport
, new_kport
;
425 ipc_port_t old_sself
;
426 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
430 /* Fresh label to unset credentials in existing labels. */
431 struct label
*unset_label
= mac_exc_create_label();
434 new_kport
= ipc_kobject_alloc_port((ipc_kobject_t
)task
, IKOT_TASK
,
435 IPC_KOBJECT_ALLOC_MAKE_SEND
);
439 old_kport
= task
->itk_self
;
441 if (old_kport
== IP_NULL
) {
442 /* the task is already terminated (can this happen?) */
444 ipc_port_release_send(new_kport
);
445 ipc_port_dealloc_kernel(new_kport
);
447 mac_exc_free_label(unset_label
);
452 old_sself
= task
->itk_sself
;
453 task
->itk_sself
= task
->itk_self
= new_kport
;
455 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
457 ipc_kobject_set_atomically(old_kport
, IKO_NULL
, IKOT_NONE
);
458 task
->exec_token
+= 1;
459 ip_unlock(old_kport
);
461 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
462 old_exc_actions
[i
] = IP_NULL
;
464 if (i
== EXC_CORPSE_NOTIFY
&& task_corpse_pending_report(task
)) {
468 if (!task
->exc_actions
[i
].privileged
) {
470 mac_exc_update_action_label(task
->exc_actions
+ i
, unset_label
);
472 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
473 task
->exc_actions
[i
].port
= IP_NULL
;
477 if (IP_VALID(task
->itk_debug_control
)) {
478 ipc_port_release_send(task
->itk_debug_control
);
480 task
->itk_debug_control
= IP_NULL
;
485 mac_exc_free_label(unset_label
);
488 /* release the naked send rights */
490 if (IP_VALID(old_sself
)) {
491 ipc_port_release_send(old_sself
);
494 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
495 if (IP_VALID(old_exc_actions
[i
])) {
496 ipc_port_release_send(old_exc_actions
[i
]);
500 /* destroy the kernel port */
501 ipc_port_dealloc_kernel(old_kport
);
505 * Routine: ipc_thread_init
507 * Initialize a thread's IPC state.
518 kport
= ipc_kobject_alloc_port((ipc_kobject_t
)thread
, IKOT_THREAD
,
519 IPC_KOBJECT_ALLOC_MAKE_SEND
);
521 thread
->ith_sself
= thread
->ith_self
= kport
;
522 thread
->ith_special_reply_port
= NULL
;
523 thread
->exc_actions
= NULL
;
525 #if IMPORTANCE_INHERITANCE
526 thread
->ith_assertions
= 0;
529 ipc_kmsg_queue_init(&thread
->ith_messages
);
531 thread
->ith_rpc_reply
= IP_NULL
;
535 ipc_thread_init_exc_actions(
538 assert(thread
->exc_actions
== NULL
);
540 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
541 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
544 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
545 mac_exc_associate_action_label(thread
->exc_actions
+ i
, mac_exc_create_label());
551 ipc_thread_destroy_exc_actions(
554 if (thread
->exc_actions
!= NULL
) {
556 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
557 mac_exc_free_action_label(thread
->exc_actions
+ i
);
561 kfree(thread
->exc_actions
,
562 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
563 thread
->exc_actions
= NULL
;
571 ipc_port_t kport
= thread
->ith_self
;
573 if (kport
!= IP_NULL
) {
574 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
577 /* unbind the thread special reply port */
578 if (IP_VALID(thread
->ith_special_reply_port
)) {
579 ipc_port_unbind_special_reply_port(thread
, TRUE
);
584 * Routine: ipc_thread_terminate
586 * Clean up and destroy a thread's IPC state.
592 ipc_thread_terminate(
595 ipc_port_t kport
= thread
->ith_self
;
597 if (kport
!= IP_NULL
) {
600 if (IP_VALID(thread
->ith_sself
)) {
601 ipc_port_release_send(thread
->ith_sself
);
604 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
606 if (thread
->exc_actions
!= NULL
) {
607 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
608 if (IP_VALID(thread
->exc_actions
[i
].port
)) {
609 ipc_port_release_send(thread
->exc_actions
[i
].port
);
612 ipc_thread_destroy_exc_actions(thread
);
615 ipc_port_dealloc_kernel(kport
);
618 #if IMPORTANCE_INHERITANCE
619 assert(thread
->ith_assertions
== 0);
622 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
624 if (thread
->ith_rpc_reply
!= IP_NULL
) {
625 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
628 thread
->ith_rpc_reply
= IP_NULL
;
632 * Routine: ipc_thread_reset
634 * Reset the IPC state for a given Mach thread when
635 * its task enters an elevated security context.
636 * Both the thread port and its exception ports have
637 * to be reset. Its RPC reply port cannot have any
638 * rights outstanding, so it should be fine.
647 ipc_port_t old_kport
, new_kport
;
648 ipc_port_t old_sself
;
649 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
650 boolean_t has_old_exc_actions
= FALSE
;
654 struct label
*new_label
= mac_exc_create_label();
657 new_kport
= ipc_kobject_alloc_port((ipc_kobject_t
)thread
, IKOT_THREAD
,
658 IPC_KOBJECT_ALLOC_MAKE_SEND
);
660 thread_mtx_lock(thread
);
662 old_kport
= thread
->ith_self
;
663 old_sself
= thread
->ith_sself
;
665 if (old_kport
== IP_NULL
&& thread
->inspection
== FALSE
) {
666 /* the is already terminated (can this happen?) */
667 thread_mtx_unlock(thread
);
668 ipc_port_release_send(new_kport
);
669 ipc_port_dealloc_kernel(new_kport
);
671 mac_exc_free_label(new_label
);
676 thread
->ith_sself
= thread
->ith_self
= new_kport
;
677 if (old_kport
!= IP_NULL
) {
678 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
682 * Only ports that were set by root-owned processes
683 * (privileged ports) should survive
685 if (thread
->exc_actions
!= NULL
) {
686 has_old_exc_actions
= TRUE
;
687 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
688 if (thread
->exc_actions
[i
].privileged
) {
689 old_exc_actions
[i
] = IP_NULL
;
692 mac_exc_update_action_label(thread
->exc_actions
+ i
, new_label
);
694 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
695 thread
->exc_actions
[i
].port
= IP_NULL
;
700 thread_mtx_unlock(thread
);
703 mac_exc_free_label(new_label
);
706 /* release the naked send rights */
708 if (IP_VALID(old_sself
)) {
709 ipc_port_release_send(old_sself
);
712 if (has_old_exc_actions
) {
713 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
714 ipc_port_release_send(old_exc_actions
[i
]);
718 /* destroy the kernel port */
719 if (old_kport
!= IP_NULL
) {
720 ipc_port_dealloc_kernel(old_kport
);
723 /* unbind the thread special reply port */
724 if (IP_VALID(thread
->ith_special_reply_port
)) {
725 ipc_port_unbind_special_reply_port(thread
, TRUE
);
730 * Routine: retrieve_task_self_fast
732 * Optimized version of retrieve_task_self,
733 * that only works for the current task.
735 * Return a send right (possibly null/dead)
736 * for the task's user-visible self port.
742 retrieve_task_self_fast(
745 __assert_only ipc_port_t sright
;
748 assert(task
== current_task());
751 assert(task
->itk_self
!= IP_NULL
);
753 if ((port
= task
->itk_sself
) == task
->itk_self
) {
755 sright
= ipc_port_copy_send(port
);
756 assert(sright
== port
);
758 port
= ipc_port_copy_send(port
);
766 * Routine: retrieve_thread_self_fast
768 * Return a send right (possibly null/dead)
769 * for the thread's user-visible self port.
771 * Only works for the current thread.
778 retrieve_thread_self_fast(
781 __assert_only ipc_port_t sright
;
784 assert(thread
== current_thread());
786 thread_mtx_lock(thread
);
788 assert(thread
->ith_self
!= IP_NULL
);
790 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
792 sright
= ipc_port_copy_send(port
);
793 assert(sright
== port
);
795 port
= ipc_port_copy_send(port
);
798 thread_mtx_unlock(thread
);
804 * Routine: task_self_trap [mach trap]
806 * Give the caller send rights for his own task port.
810 * MACH_PORT_NULL if there are any resource failures
816 __unused
struct task_self_trap_args
*args
)
818 task_t task
= current_task();
820 mach_port_name_t name
;
822 sright
= retrieve_task_self_fast(task
);
823 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
828 * Routine: thread_self_trap [mach trap]
830 * Give the caller send rights for his own thread port.
834 * MACH_PORT_NULL if there are any resource failures
840 __unused
struct thread_self_trap_args
*args
)
842 thread_t thread
= current_thread();
843 task_t task
= thread
->task
;
845 mach_port_name_t name
;
847 sright
= retrieve_thread_self_fast(thread
);
848 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
853 * Routine: mach_reply_port [mach trap]
855 * Allocate a port for the caller.
859 * MACH_PORT_NULL if there are any resource failures
865 __unused
struct mach_reply_port_args
*args
)
868 mach_port_name_t name
;
871 kr
= ipc_port_alloc(current_task()->itk_space
, IPC_PORT_INIT_MESSAGE_QUEUE
,
873 if (kr
== KERN_SUCCESS
) {
876 name
= MACH_PORT_NULL
;
882 * Routine: thread_get_special_reply_port [mach trap]
884 * Allocate a special reply port for the calling thread.
888 * mach_port_name_t: send right & receive right for special reply port.
889 * MACH_PORT_NULL if there are any resource failures
894 thread_get_special_reply_port(
895 __unused
struct thread_get_special_reply_port_args
*args
)
898 mach_port_name_t name
;
900 thread_t thread
= current_thread();
901 ipc_port_init_flags_t flags
= IPC_PORT_INIT_MESSAGE_QUEUE
|
902 IPC_PORT_INIT_MAKE_SEND_RIGHT
| IPC_PORT_INIT_SPECIAL_REPLY
;
904 /* unbind the thread special reply port */
905 if (IP_VALID(thread
->ith_special_reply_port
)) {
906 kr
= ipc_port_unbind_special_reply_port(thread
, TRUE
);
907 if (kr
!= KERN_SUCCESS
) {
908 return MACH_PORT_NULL
;
912 kr
= ipc_port_alloc(current_task()->itk_space
, flags
, &name
, &port
);
913 if (kr
== KERN_SUCCESS
) {
914 ipc_port_bind_special_reply_port_locked(port
);
917 name
= MACH_PORT_NULL
;
923 * Routine: ipc_port_bind_special_reply_port_locked
925 * Bind the given port to current thread as a special reply port.
933 ipc_port_bind_special_reply_port_locked(
936 thread_t thread
= current_thread();
937 assert(thread
->ith_special_reply_port
== NULL
);
938 assert(port
->ip_specialreply
);
939 assert(port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
);
942 thread
->ith_special_reply_port
= port
;
943 port
->ip_messages
.imq_srp_owner_thread
= thread
;
945 ipc_special_reply_port_bits_reset(port
);
949 * Routine: ipc_port_unbind_special_reply_port
951 * Unbind the thread's special reply port.
952 * If the special port has threads waiting on turnstile,
953 * update it's inheritor.
960 ipc_port_unbind_special_reply_port(
962 boolean_t unbind_active_port
)
964 ipc_port_t special_reply_port
= thread
->ith_special_reply_port
;
966 ip_lock(special_reply_port
);
968 /* Return error if port active and unbind_active_port set to FALSE */
969 if (unbind_active_port
== FALSE
&& ip_active(special_reply_port
)) {
970 ip_unlock(special_reply_port
);
974 thread
->ith_special_reply_port
= NULL
;
975 ipc_port_adjust_special_reply_port_locked(special_reply_port
, NULL
,
976 IPC_PORT_ADJUST_UNLINK_THREAD
, FALSE
);
979 ip_release(special_reply_port
);
984 * Routine: thread_get_special_port [kernel call]
986 * Clones a send right for one of the thread's
991 * KERN_SUCCESS Extracted a send right.
992 * KERN_INVALID_ARGUMENT The thread is null.
993 * KERN_FAILURE The thread is dead.
994 * KERN_INVALID_ARGUMENT Invalid special port.
998 thread_get_special_port(
1003 kern_return_t result
= KERN_SUCCESS
;
1006 if (thread
== THREAD_NULL
) {
1007 return KERN_INVALID_ARGUMENT
;
1011 case THREAD_KERNEL_PORT
:
1012 whichp
= &thread
->ith_sself
;
1016 return KERN_INVALID_ARGUMENT
;
1019 thread_mtx_lock(thread
);
1021 if (thread
->active
) {
1022 *portp
= ipc_port_copy_send(*whichp
);
1024 result
= KERN_FAILURE
;
1027 thread_mtx_unlock(thread
);
1033 * Routine: thread_set_special_port [kernel call]
1035 * Changes one of the thread's special ports,
1036 * setting it to the supplied send right.
1038 * Nothing locked. If successful, consumes
1039 * the supplied send right.
1041 * KERN_SUCCESS Changed the special port.
1042 * KERN_INVALID_ARGUMENT The thread is null.
1043 * KERN_FAILURE The thread is dead.
1044 * KERN_INVALID_ARGUMENT Invalid special port.
1048 thread_set_special_port(
1053 kern_return_t result
= KERN_SUCCESS
;
1054 ipc_port_t
*whichp
, old
= IP_NULL
;
1056 if (thread
== THREAD_NULL
) {
1057 return KERN_INVALID_ARGUMENT
;
1061 case THREAD_KERNEL_PORT
:
1062 whichp
= &thread
->ith_sself
;
1066 return KERN_INVALID_ARGUMENT
;
1069 thread_mtx_lock(thread
);
1071 if (thread
->active
) {
1075 result
= KERN_FAILURE
;
1078 thread_mtx_unlock(thread
);
1080 if (IP_VALID(old
)) {
1081 ipc_port_release_send(old
);
1088 * Routine: task_get_special_port [kernel call]
1090 * Clones a send right for one of the task's
1095 * KERN_SUCCESS Extracted a send right.
1096 * KERN_INVALID_ARGUMENT The task is null.
1097 * KERN_FAILURE The task/space is dead.
1098 * KERN_INVALID_ARGUMENT Invalid special port.
1102 task_get_special_port(
1109 if (task
== TASK_NULL
) {
1110 return KERN_INVALID_ARGUMENT
;
1114 if (task
->itk_self
== IP_NULL
) {
1116 return KERN_FAILURE
;
1120 case TASK_KERNEL_PORT
:
1121 port
= ipc_port_copy_send(task
->itk_sself
);
1124 case TASK_NAME_PORT
:
1125 port
= ipc_port_make_send(task
->itk_nself
);
1128 case TASK_HOST_PORT
:
1129 port
= ipc_port_copy_send(task
->itk_host
);
1132 case TASK_BOOTSTRAP_PORT
:
1133 port
= ipc_port_copy_send(task
->itk_bootstrap
);
1136 case TASK_SEATBELT_PORT
:
1137 port
= ipc_port_copy_send(task
->itk_seatbelt
);
1140 case TASK_ACCESS_PORT
:
1141 port
= ipc_port_copy_send(task
->itk_task_access
);
1144 case TASK_DEBUG_CONTROL_PORT
:
1145 port
= ipc_port_copy_send(task
->itk_debug_control
);
1150 return KERN_INVALID_ARGUMENT
;
1155 return KERN_SUCCESS
;
1159 * Routine: task_set_special_port [kernel call]
1161 * Changes one of the task's special ports,
1162 * setting it to the supplied send right.
1164 * Nothing locked. If successful, consumes
1165 * the supplied send right.
1167 * KERN_SUCCESS Changed the special port.
1168 * KERN_INVALID_ARGUMENT The task is null.
1169 * KERN_FAILURE The task/space is dead.
1170 * KERN_INVALID_ARGUMENT Invalid special port.
1171 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1175 task_set_special_port(
1183 if (task
== TASK_NULL
) {
1184 return KERN_INVALID_ARGUMENT
;
1187 if (task_is_driver(current_task())) {
1188 return KERN_NO_ACCESS
;
1192 case TASK_KERNEL_PORT
:
1193 whichp
= &task
->itk_sself
;
1196 case TASK_HOST_PORT
:
1197 whichp
= &task
->itk_host
;
1200 case TASK_BOOTSTRAP_PORT
:
1201 whichp
= &task
->itk_bootstrap
;
1204 case TASK_SEATBELT_PORT
:
1205 whichp
= &task
->itk_seatbelt
;
1208 case TASK_ACCESS_PORT
:
1209 whichp
= &task
->itk_task_access
;
1212 case TASK_DEBUG_CONTROL_PORT
:
1213 whichp
= &task
->itk_debug_control
;
1217 return KERN_INVALID_ARGUMENT
;
1221 if (task
->itk_self
== IP_NULL
) {
1223 return KERN_FAILURE
;
1226 /* do not allow overwrite of seatbelt or task access ports */
1227 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
1228 && IP_VALID(*whichp
)) {
1230 return KERN_NO_ACCESS
;
1237 if (IP_VALID(old
)) {
1238 ipc_port_release_send(old
);
1240 return KERN_SUCCESS
;
1245 * Routine: mach_ports_register [kernel call]
1247 * Stash a handful of port send rights in the task.
1248 * Child tasks will inherit these rights, but they
1249 * must use mach_ports_lookup to acquire them.
1251 * The rights are supplied in a (wired) kalloc'd segment.
1252 * Rights which aren't supplied are assumed to be null.
1254 * Nothing locked. If successful, consumes
1255 * the supplied rights and memory.
1257 * KERN_SUCCESS Stashed the port rights.
1258 * KERN_INVALID_ARGUMENT The task is null.
1259 * KERN_INVALID_ARGUMENT The task is dead.
1260 * KERN_INVALID_ARGUMENT The memory param is null.
1261 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1265 mach_ports_register(
1267 mach_port_array_t memory
,
1268 mach_msg_type_number_t portsCnt
)
1270 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1273 if ((task
== TASK_NULL
) ||
1274 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1275 (portsCnt
&& memory
== NULL
)) {
1276 return KERN_INVALID_ARGUMENT
;
1280 * Pad the port rights with nulls.
1283 for (i
= 0; i
< portsCnt
; i
++) {
1284 ports
[i
] = memory
[i
];
1286 for (; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1291 if (task
->itk_self
== IP_NULL
) {
1293 return KERN_INVALID_ARGUMENT
;
1297 * Replace the old send rights with the new.
1298 * Release the old rights after unlocking.
1301 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1304 old
= task
->itk_registered
[i
];
1305 task
->itk_registered
[i
] = ports
[i
];
1311 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1312 if (IP_VALID(ports
[i
])) {
1313 ipc_port_release_send(ports
[i
]);
1318 * Now that the operation is known to be successful,
1319 * we can free the memory.
1322 if (portsCnt
!= 0) {
1324 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1327 return KERN_SUCCESS
;
1331 * Routine: mach_ports_lookup [kernel call]
1333 * Retrieves (clones) the stashed port send rights.
1335 * Nothing locked. If successful, the caller gets
1336 * rights and memory.
1338 * KERN_SUCCESS Retrieved the send rights.
1339 * KERN_INVALID_ARGUMENT The task is null.
1340 * KERN_INVALID_ARGUMENT The task is dead.
1341 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1347 mach_port_array_t
*portsp
,
1348 mach_msg_type_number_t
*portsCnt
)
1355 if (task
== TASK_NULL
) {
1356 return KERN_INVALID_ARGUMENT
;
1359 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1361 memory
= kalloc(size
);
1363 return KERN_RESOURCE_SHORTAGE
;
1367 if (task
->itk_self
== IP_NULL
) {
1370 kfree(memory
, size
);
1371 return KERN_INVALID_ARGUMENT
;
1374 ports
= (ipc_port_t
*) memory
;
1377 * Clone port rights. Because kalloc'd memory
1378 * is wired, we won't fault while holding the task lock.
1381 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1382 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1387 *portsp
= (mach_port_array_t
) ports
;
1388 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1389 return KERN_SUCCESS
;
1392 extern zone_t task_zone
;
1395 task_conversion_eval(task_t caller
, task_t victim
)
1398 * Tasks are allowed to resolve their own task ports, and the kernel is
1399 * allowed to resolve anyone's task port.
1401 if (caller
== kernel_task
) {
1402 return KERN_SUCCESS
;
1405 if (caller
== victim
) {
1406 return KERN_SUCCESS
;
1410 * Only the kernel can can resolve the kernel's task port. We've established
1411 * by this point that the caller is not kernel_task.
1413 if (victim
== TASK_NULL
|| victim
== kernel_task
) {
1414 return KERN_INVALID_SECURITY
;
1417 zone_require(victim
, task_zone
);
1421 * On embedded platforms, only a platform binary can resolve the task port
1422 * of another platform binary.
1424 if ((victim
->t_flags
& TF_PLATFORM
) && !(caller
->t_flags
& TF_PLATFORM
)) {
1426 return KERN_INVALID_SECURITY
;
1428 if (cs_relax_platform_task_ports
) {
1429 return KERN_SUCCESS
;
1431 return KERN_INVALID_SECURITY
;
1433 #endif /* SECURE_KERNEL */
1435 #endif /* CONFIG_EMBEDDED */
1437 return KERN_SUCCESS
;
1441 * Routine: convert_port_to_locked_task
1443 * Internal helper routine to convert from a port to a locked
1444 * task. Used by several routines that try to convert from a
1445 * task port to a reference on some task related object.
1447 * Nothing locked, blocking OK.
1450 convert_port_to_locked_task(ipc_port_t port
)
1452 int try_failed_count
= 0;
1454 while (IP_VALID(port
)) {
1455 task_t ct
= current_task();
1459 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1463 task
= (task_t
) port
->ip_kobject
;
1464 assert(task
!= TASK_NULL
);
1466 if (task_conversion_eval(ct
, task
)) {
1472 * Normal lock ordering puts task_lock() before ip_lock().
1473 * Attempt out-of-order locking here.
1475 if (task_lock_try(task
)) {
1482 mutex_pause(try_failed_count
);
1488 * Routine: convert_port_to_locked_task_inspect
1490 * Internal helper routine to convert from a port to a locked
1491 * task inspect right. Used by internal routines that try to convert from a
1492 * task inspect port to a reference on some task related object.
1494 * Nothing locked, blocking OK.
1497 convert_port_to_locked_task_inspect(ipc_port_t port
)
1499 int try_failed_count
= 0;
1501 while (IP_VALID(port
)) {
1502 task_inspect_t task
;
1505 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1507 return TASK_INSPECT_NULL
;
1509 task
= (task_inspect_t
)port
->ip_kobject
;
1510 assert(task
!= TASK_INSPECT_NULL
);
1512 * Normal lock ordering puts task_lock() before ip_lock().
1513 * Attempt out-of-order locking here.
1515 if (task_lock_try((task_t
)task
)) {
1522 mutex_pause(try_failed_count
);
1524 return TASK_INSPECT_NULL
;
1528 convert_port_to_task_locked(
1530 uint32_t *exec_token
)
1532 task_t task
= TASK_NULL
;
1535 require_ip_active(port
);
1537 if (ip_kotype(port
) == IKOT_TASK
) {
1538 task_t ct
= current_task();
1539 task
= (task_t
)port
->ip_kobject
;
1540 assert(task
!= TASK_NULL
);
1542 if (task_conversion_eval(ct
, task
)) {
1547 *exec_token
= task
->exec_token
;
1549 task_reference_internal(task
);
1556 * Routine: convert_port_to_task_with_exec_token
1558 * Convert from a port to a task and return
1559 * the exec token stored in the task.
1560 * Doesn't consume the port ref; produces a task ref,
1561 * which may be null.
1566 convert_port_to_task_with_exec_token(
1568 uint32_t *exec_token
)
1570 task_t task
= TASK_NULL
;
1572 if (IP_VALID(port
)) {
1574 if (ip_active(port
)) {
1575 task
= convert_port_to_task_locked(port
, exec_token
);
1584 * Routine: convert_port_to_task
1586 * Convert from a port to a task.
1587 * Doesn't consume the port ref; produces a task ref,
1588 * which may be null.
1593 convert_port_to_task(
1596 return convert_port_to_task_with_exec_token(port
, NULL
);
1601 * Routine: convert_port_to_task_name
1603 * Convert from a port to a task name.
1604 * Doesn't consume the port ref; produces a task name ref,
1605 * which may be null.
1610 convert_port_to_task_name(
1613 task_name_t task
= TASK_NULL
;
1615 if (IP_VALID(port
)) {
1618 if (ip_active(port
) &&
1619 (ip_kotype(port
) == IKOT_TASK
||
1620 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1621 task
= (task_name_t
)port
->ip_kobject
;
1622 assert(task
!= TASK_NAME_NULL
);
1624 task_reference_internal(task
);
1633 static task_inspect_t
1634 convert_port_to_task_inspect_locked(
1637 task_inspect_t task
= TASK_INSPECT_NULL
;
1640 require_ip_active(port
);
1642 if (ip_kotype(port
) == IKOT_TASK
) {
1643 task
= (task_inspect_t
)port
->ip_kobject
;
1644 assert(task
!= TASK_INSPECT_NULL
);
1646 task_reference_internal(task
);
1653 * Routine: convert_port_to_task_inspect
1655 * Convert from a port to a task inspection right
1656 * Doesn't consume the port ref; produces a task ref,
1657 * which may be null.
1662 convert_port_to_task_inspect(
1665 task_inspect_t task
= TASK_INSPECT_NULL
;
1667 if (IP_VALID(port
)) {
1669 if (ip_active(port
)) {
1670 task
= convert_port_to_task_inspect_locked(port
);
1679 * Routine: convert_port_to_task_suspension_token
1681 * Convert from a port to a task suspension token.
1682 * Doesn't consume the port ref; produces a suspension token ref,
1683 * which may be null.
1687 task_suspension_token_t
1688 convert_port_to_task_suspension_token(
1691 task_suspension_token_t task
= TASK_NULL
;
1693 if (IP_VALID(port
)) {
1696 if (ip_active(port
) &&
1697 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1698 task
= (task_suspension_token_t
)port
->ip_kobject
;
1699 assert(task
!= TASK_NULL
);
1701 task_reference_internal(task
);
1711 * Routine: convert_port_to_space
1713 * Convert from a port to a space.
1714 * Doesn't consume the port ref; produces a space ref,
1715 * which may be null.
1720 convert_port_to_space(
1726 task
= convert_port_to_locked_task(port
);
1728 if (task
== TASK_NULL
) {
1729 return IPC_SPACE_NULL
;
1732 if (!task
->active
) {
1734 return IPC_SPACE_NULL
;
1737 space
= task
->itk_space
;
1738 is_reference(space
);
1744 * Routine: convert_port_to_space_inspect
1746 * Convert from a port to a space inspect right.
1747 * Doesn't consume the port ref; produces a space inspect ref,
1748 * which may be null.
1753 convert_port_to_space_inspect(
1756 ipc_space_inspect_t space
;
1757 task_inspect_t task
;
1759 task
= convert_port_to_locked_task_inspect(port
);
1761 if (task
== TASK_INSPECT_NULL
) {
1762 return IPC_SPACE_INSPECT_NULL
;
1765 if (!task
->active
) {
1767 return IPC_SPACE_INSPECT_NULL
;
1770 space
= (ipc_space_inspect_t
)task
->itk_space
;
1771 is_reference((ipc_space_t
)space
);
1772 task_unlock((task_t
)task
);
1777 * Routine: convert_port_to_map
1779 * Convert from a port to a map.
1780 * Doesn't consume the port ref; produces a map ref,
1781 * which may be null.
1787 convert_port_to_map(
1793 task
= convert_port_to_locked_task(port
);
1795 if (task
== TASK_NULL
) {
1799 if (!task
->active
) {
1805 vm_map_reference_swap(map
);
1812 * Routine: convert_port_to_thread
1814 * Convert from a port to a thread.
1815 * Doesn't consume the port ref; produces an thread ref,
1816 * which may be null.
1822 convert_port_to_thread_locked(
1824 port_to_thread_options_t options
)
1826 thread_t thread
= THREAD_NULL
;
1829 require_ip_active(port
);
1831 if (ip_kotype(port
) == IKOT_THREAD
) {
1832 thread
= (thread_t
)port
->ip_kobject
;
1833 assert(thread
!= THREAD_NULL
);
1835 if (options
& PORT_TO_THREAD_NOT_CURRENT_THREAD
) {
1836 if (thread
== current_thread()) {
1841 if (options
& PORT_TO_THREAD_IN_CURRENT_TASK
) {
1842 if (thread
->task
!= current_task()) {
1846 /* Use task conversion rules for thread control conversions */
1847 if (task_conversion_eval(current_task(), thread
->task
) != KERN_SUCCESS
) {
1852 thread_reference_internal(thread
);
1859 convert_port_to_thread(
1862 thread_t thread
= THREAD_NULL
;
1864 if (IP_VALID(port
)) {
1866 if (ip_active(port
)) {
1867 thread
= convert_port_to_thread_locked(port
, PORT_TO_THREAD_NONE
);
1876 * Routine: convert_port_to_thread_inspect
1878 * Convert from a port to a thread inspection right
1879 * Doesn't consume the port ref; produces a thread ref,
1880 * which may be null.
1885 convert_port_to_thread_inspect(
1888 thread_inspect_t thread
= THREAD_INSPECT_NULL
;
1890 if (IP_VALID(port
)) {
1893 if (ip_active(port
) &&
1894 ip_kotype(port
) == IKOT_THREAD
) {
1895 thread
= (thread_inspect_t
)port
->ip_kobject
;
1896 assert(thread
!= THREAD_INSPECT_NULL
);
1897 thread_reference_internal((thread_t
)thread
);
1906 * Routine: convert_thread_inspect_to_port
1908 * Convert from a thread inspect reference to a port.
1909 * Consumes a thread ref;
1910 * As we never export thread inspect ports, always
1911 * creates a NULL port.
1917 convert_thread_inspect_to_port(thread_inspect_t thread
)
1919 thread_deallocate(thread
);
1925 * Routine: port_name_to_thread
1927 * Convert from a port name to an thread reference
1928 * A name of MACH_PORT_NULL is valid for the null thread.
1933 port_name_to_thread(
1934 mach_port_name_t name
,
1935 port_to_thread_options_t options
)
1937 thread_t thread
= THREAD_NULL
;
1941 if (MACH_PORT_VALID(name
)) {
1942 kr
= ipc_port_translate_send(current_space(), name
, &kport
);
1943 if (kr
== KERN_SUCCESS
) {
1944 thread
= convert_port_to_thread_locked(kport
, options
);
1954 mach_port_name_t name
)
1958 task_t task
= TASK_NULL
;
1960 if (MACH_PORT_VALID(name
)) {
1961 kr
= ipc_port_translate_send(current_space(), name
, &kport
);
1962 if (kr
== KERN_SUCCESS
) {
1963 task
= convert_port_to_task_locked(kport
, NULL
);
1971 port_name_to_task_inspect(
1972 mach_port_name_t name
)
1976 task_inspect_t ti
= TASK_INSPECT_NULL
;
1978 if (MACH_PORT_VALID(name
)) {
1979 kr
= ipc_port_translate_send(current_space(), name
, &kport
);
1980 if (kr
== KERN_SUCCESS
) {
1981 ti
= convert_port_to_task_inspect_locked(kport
);
1989 * Routine: port_name_to_host
1991 * Convert from a port name to a host pointer.
1992 * NOTE: This does _not_ return a +1 reference to the host_t
1998 mach_port_name_t name
)
2000 host_t host
= HOST_NULL
;
2004 if (MACH_PORT_VALID(name
)) {
2005 kr
= ipc_port_translate_send(current_space(), name
, &port
);
2006 if (kr
== KERN_SUCCESS
) {
2007 host
= convert_port_to_host(port
);
2015 * Routine: convert_task_to_port
2017 * Convert from a task to a port.
2018 * Consumes a task ref; produces a naked send right
2019 * which may be invalid.
2025 convert_task_to_port(
2032 if (task
->itk_self
!= IP_NULL
) {
2033 port
= ipc_port_make_send(task
->itk_self
);
2040 task_deallocate(task
);
2045 * Routine: convert_task_inspect_to_port
2047 * Convert from a task inspect reference to a port.
2048 * Consumes a task ref;
2049 * As we never export task inspect ports, always
2050 * creates a NULL port.
2055 convert_task_inspect_to_port(
2056 task_inspect_t task
)
2058 task_deallocate(task
);
2064 * Routine: convert_task_suspend_token_to_port
2066 * Convert from a task suspension token to a port.
2067 * Consumes a task suspension token ref; produces a naked send-once right
2068 * which may be invalid.
2073 convert_task_suspension_token_to_port(
2074 task_suspension_token_t task
)
2080 if (task
->itk_resume
== IP_NULL
) {
2081 task
->itk_resume
= ipc_kobject_alloc_port((ipc_kobject_t
) task
,
2082 IKOT_TASK_RESUME
, IPC_KOBJECT_ALLOC_NONE
);
2086 * Create a send-once right for each instance of a direct user-called
2087 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2088 * the notification handler will resume the target task.
2090 port
= ipc_port_make_sonce(task
->itk_resume
);
2091 assert(IP_VALID(port
));
2097 task_suspension_token_deallocate(task
);
2104 * Routine: convert_task_name_to_port
2106 * Convert from a task name ref to a port.
2107 * Consumes a task name ref; produces a naked send right
2108 * which may be invalid.
2114 convert_task_name_to_port(
2115 task_name_t task_name
)
2119 itk_lock(task_name
);
2120 if (task_name
->itk_nself
!= IP_NULL
) {
2121 port
= ipc_port_make_send(task_name
->itk_nself
);
2125 itk_unlock(task_name
);
2127 task_name_deallocate(task_name
);
2132 * Routine: convert_thread_to_port
2134 * Convert from a thread to a port.
2135 * Consumes an thread ref; produces a naked send right
2136 * which may be invalid.
2142 convert_thread_to_port(
2147 thread_mtx_lock(thread
);
2149 if (thread
->ith_self
!= IP_NULL
) {
2150 port
= ipc_port_make_send(thread
->ith_self
);
2155 thread_mtx_unlock(thread
);
2157 thread_deallocate(thread
);
2163 * Routine: space_deallocate
2165 * Deallocate a space ref produced by convert_port_to_space.
2174 if (space
!= IS_NULL
) {
2180 * Routine: space_inspect_deallocate
2182 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2188 space_inspect_deallocate(
2189 ipc_space_inspect_t space
)
2191 if (space
!= IS_INSPECT_NULL
) {
2192 is_release((ipc_space_t
)space
);
2197 * Routine: thread/task_set_exception_ports [kernel call]
2199 * Sets the thread/task exception port, flavor and
2200 * behavior for the exception types specified by the mask.
2201 * There will be one send right per exception per valid
2204 * Nothing locked. If successful, consumes
2205 * the supplied send right.
2207 * KERN_SUCCESS Changed the special port.
2208 * KERN_INVALID_ARGUMENT The thread is null,
2209 * Illegal mask bit set.
2210 * Illegal exception behavior
2211 * KERN_FAILURE The thread is dead.
2215 thread_set_exception_ports(
2217 exception_mask_t exception_mask
,
2218 ipc_port_t new_port
,
2219 exception_behavior_t new_behavior
,
2220 thread_state_flavor_t new_flavor
)
2222 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2223 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2227 struct label
*new_label
;
2230 if (thread
== THREAD_NULL
) {
2231 return KERN_INVALID_ARGUMENT
;
2234 if (exception_mask
& ~EXC_MASK_VALID
) {
2235 return KERN_INVALID_ARGUMENT
;
2238 if (IP_VALID(new_port
)) {
2239 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2240 case EXCEPTION_DEFAULT
:
2241 case EXCEPTION_STATE
:
2242 case EXCEPTION_STATE_IDENTITY
:
2246 return KERN_INVALID_ARGUMENT
;
2251 * Check the validity of the thread_state_flavor by calling the
2252 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2253 * osfmk/mach/ARCHITECTURE/thread_status.h
2255 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2256 return KERN_INVALID_ARGUMENT
;
2260 new_label
= mac_exc_create_label_for_current_proc();
2263 thread_mtx_lock(thread
);
2265 if (!thread
->active
) {
2266 thread_mtx_unlock(thread
);
2268 return KERN_FAILURE
;
2271 if (thread
->exc_actions
== NULL
) {
2272 ipc_thread_init_exc_actions(thread
);
2274 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2275 if ((exception_mask
& (1 << i
))
2277 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2280 old_port
[i
] = thread
->exc_actions
[i
].port
;
2281 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2282 thread
->exc_actions
[i
].behavior
= new_behavior
;
2283 thread
->exc_actions
[i
].flavor
= new_flavor
;
2284 thread
->exc_actions
[i
].privileged
= privileged
;
2286 old_port
[i
] = IP_NULL
;
2290 thread_mtx_unlock(thread
);
2293 mac_exc_free_label(new_label
);
2296 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2297 if (IP_VALID(old_port
[i
])) {
2298 ipc_port_release_send(old_port
[i
]);
2302 if (IP_VALID(new_port
)) { /* consume send right */
2303 ipc_port_release_send(new_port
);
2306 return KERN_SUCCESS
;
2310 task_set_exception_ports(
2312 exception_mask_t exception_mask
,
2313 ipc_port_t new_port
,
2314 exception_behavior_t new_behavior
,
2315 thread_state_flavor_t new_flavor
)
2317 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2318 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2322 struct label
*new_label
;
2325 if (task
== TASK_NULL
) {
2326 return KERN_INVALID_ARGUMENT
;
2329 if (exception_mask
& ~EXC_MASK_VALID
) {
2330 return KERN_INVALID_ARGUMENT
;
2333 if (IP_VALID(new_port
)) {
2334 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2335 case EXCEPTION_DEFAULT
:
2336 case EXCEPTION_STATE
:
2337 case EXCEPTION_STATE_IDENTITY
:
2341 return KERN_INVALID_ARGUMENT
;
2346 * Check the validity of the thread_state_flavor by calling the
2347 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2348 * osfmk/mach/ARCHITECTURE/thread_status.h
2350 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2351 return KERN_INVALID_ARGUMENT
;
2355 new_label
= mac_exc_create_label_for_current_proc();
2360 if (task
->itk_self
== IP_NULL
) {
2363 return KERN_FAILURE
;
2366 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2367 if ((exception_mask
& (1 << i
))
2369 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2372 old_port
[i
] = task
->exc_actions
[i
].port
;
2373 task
->exc_actions
[i
].port
=
2374 ipc_port_copy_send(new_port
);
2375 task
->exc_actions
[i
].behavior
= new_behavior
;
2376 task
->exc_actions
[i
].flavor
= new_flavor
;
2377 task
->exc_actions
[i
].privileged
= privileged
;
2379 old_port
[i
] = IP_NULL
;
2386 mac_exc_free_label(new_label
);
2389 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2390 if (IP_VALID(old_port
[i
])) {
2391 ipc_port_release_send(old_port
[i
]);
2395 if (IP_VALID(new_port
)) { /* consume send right */
2396 ipc_port_release_send(new_port
);
2399 return KERN_SUCCESS
;
2403 * Routine: thread/task_swap_exception_ports [kernel call]
2405 * Sets the thread/task exception port, flavor and
2406 * behavior for the exception types specified by the
2409 * The old ports, behavior and flavors are returned
2410 * Count specifies the array sizes on input and
2411 * the number of returned ports etc. on output. The
2412 * arrays must be large enough to hold all the returned
2413 * data, MIG returnes an error otherwise. The masks
2414 * array specifies the corresponding exception type(s).
2417 * Nothing locked. If successful, consumes
2418 * the supplied send right.
2420 * Returns upto [in} CountCnt elements.
2422 * KERN_SUCCESS Changed the special port.
2423 * KERN_INVALID_ARGUMENT The thread is null,
2424 * Illegal mask bit set.
2425 * Illegal exception behavior
2426 * KERN_FAILURE The thread is dead.
2430 thread_swap_exception_ports(
2432 exception_mask_t exception_mask
,
2433 ipc_port_t new_port
,
2434 exception_behavior_t new_behavior
,
2435 thread_state_flavor_t new_flavor
,
2436 exception_mask_array_t masks
,
2437 mach_msg_type_number_t
*CountCnt
,
2438 exception_port_array_t ports
,
2439 exception_behavior_array_t behaviors
,
2440 thread_state_flavor_array_t flavors
)
2442 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2443 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2444 unsigned int i
, j
, count
;
2447 struct label
*new_label
;
2450 if (thread
== THREAD_NULL
) {
2451 return KERN_INVALID_ARGUMENT
;
2454 if (exception_mask
& ~EXC_MASK_VALID
) {
2455 return KERN_INVALID_ARGUMENT
;
2458 if (IP_VALID(new_port
)) {
2459 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2460 case EXCEPTION_DEFAULT
:
2461 case EXCEPTION_STATE
:
2462 case EXCEPTION_STATE_IDENTITY
:
2466 return KERN_INVALID_ARGUMENT
;
2470 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2471 return KERN_INVALID_ARGUMENT
;
2475 new_label
= mac_exc_create_label_for_current_proc();
2478 thread_mtx_lock(thread
);
2480 if (!thread
->active
) {
2481 thread_mtx_unlock(thread
);
2483 return KERN_FAILURE
;
2486 if (thread
->exc_actions
== NULL
) {
2487 ipc_thread_init_exc_actions(thread
);
2490 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2491 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2492 if ((exception_mask
& (1 << i
))
2494 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2497 for (j
= 0; j
< count
; ++j
) {
2499 * search for an identical entry, if found
2500 * set corresponding mask for this exception.
2502 if (thread
->exc_actions
[i
].port
== ports
[j
] &&
2503 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2504 thread
->exc_actions
[i
].flavor
== flavors
[j
]) {
2505 masks
[j
] |= (1 << i
);
2511 masks
[j
] = (1 << i
);
2512 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2514 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2515 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2519 old_port
[i
] = thread
->exc_actions
[i
].port
;
2520 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2521 thread
->exc_actions
[i
].behavior
= new_behavior
;
2522 thread
->exc_actions
[i
].flavor
= new_flavor
;
2523 thread
->exc_actions
[i
].privileged
= privileged
;
2525 old_port
[i
] = IP_NULL
;
2529 thread_mtx_unlock(thread
);
2532 mac_exc_free_label(new_label
);
2535 while (--i
>= FIRST_EXCEPTION
) {
2536 if (IP_VALID(old_port
[i
])) {
2537 ipc_port_release_send(old_port
[i
]);
2541 if (IP_VALID(new_port
)) { /* consume send right */
2542 ipc_port_release_send(new_port
);
2547 return KERN_SUCCESS
;
2551 task_swap_exception_ports(
2553 exception_mask_t exception_mask
,
2554 ipc_port_t new_port
,
2555 exception_behavior_t new_behavior
,
2556 thread_state_flavor_t new_flavor
,
2557 exception_mask_array_t masks
,
2558 mach_msg_type_number_t
*CountCnt
,
2559 exception_port_array_t ports
,
2560 exception_behavior_array_t behaviors
,
2561 thread_state_flavor_array_t flavors
)
2563 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2564 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2565 unsigned int i
, j
, count
;
2568 struct label
*new_label
;
2571 if (task
== TASK_NULL
) {
2572 return KERN_INVALID_ARGUMENT
;
2575 if (exception_mask
& ~EXC_MASK_VALID
) {
2576 return KERN_INVALID_ARGUMENT
;
2579 if (IP_VALID(new_port
)) {
2580 switch (new_behavior
& ~MACH_EXCEPTION_MASK
) {
2581 case EXCEPTION_DEFAULT
:
2582 case EXCEPTION_STATE
:
2583 case EXCEPTION_STATE_IDENTITY
:
2587 return KERN_INVALID_ARGUMENT
;
2591 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2592 return KERN_INVALID_ARGUMENT
;
2596 new_label
= mac_exc_create_label_for_current_proc();
2601 if (task
->itk_self
== IP_NULL
) {
2604 return KERN_FAILURE
;
2607 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2608 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2609 if ((exception_mask
& (1 << i
))
2611 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2614 for (j
= 0; j
< count
; j
++) {
2616 * search for an identical entry, if found
2617 * set corresponding mask for this exception.
2619 if (task
->exc_actions
[i
].port
== ports
[j
] &&
2620 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2621 task
->exc_actions
[i
].flavor
== flavors
[j
]) {
2622 masks
[j
] |= (1 << i
);
2628 masks
[j
] = (1 << i
);
2629 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2630 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2631 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2635 old_port
[i
] = task
->exc_actions
[i
].port
;
2637 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2638 task
->exc_actions
[i
].behavior
= new_behavior
;
2639 task
->exc_actions
[i
].flavor
= new_flavor
;
2640 task
->exc_actions
[i
].privileged
= privileged
;
2642 old_port
[i
] = IP_NULL
;
2649 mac_exc_free_label(new_label
);
2652 while (--i
>= FIRST_EXCEPTION
) {
2653 if (IP_VALID(old_port
[i
])) {
2654 ipc_port_release_send(old_port
[i
]);
2658 if (IP_VALID(new_port
)) { /* consume send right */
2659 ipc_port_release_send(new_port
);
2664 return KERN_SUCCESS
;
2668 * Routine: thread/task_get_exception_ports [kernel call]
2670 * Clones a send right for each of the thread/task's exception
2671 * ports specified in the mask and returns the behaviour
2672 * and flavor of said port.
2674 * Returns upto [in} CountCnt elements.
2679 * KERN_SUCCESS Extracted a send right.
2680 * KERN_INVALID_ARGUMENT The thread is null,
2681 * Invalid special port,
2682 * Illegal mask bit set.
2683 * KERN_FAILURE The thread is dead.
2687 thread_get_exception_ports(
2689 exception_mask_t exception_mask
,
2690 exception_mask_array_t masks
,
2691 mach_msg_type_number_t
*CountCnt
,
2692 exception_port_array_t ports
,
2693 exception_behavior_array_t behaviors
,
2694 thread_state_flavor_array_t flavors
)
2696 unsigned int i
, j
, count
;
2698 if (thread
== THREAD_NULL
) {
2699 return KERN_INVALID_ARGUMENT
;
2702 if (exception_mask
& ~EXC_MASK_VALID
) {
2703 return KERN_INVALID_ARGUMENT
;
2706 thread_mtx_lock(thread
);
2708 if (!thread
->active
) {
2709 thread_mtx_unlock(thread
);
2711 return KERN_FAILURE
;
2716 if (thread
->exc_actions
== NULL
) {
2720 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2721 if (exception_mask
& (1 << i
)) {
2722 for (j
= 0; j
< count
; ++j
) {
2724 * search for an identical entry, if found
2725 * set corresponding mask for this exception.
2727 if (thread
->exc_actions
[i
].port
== ports
[j
] &&
2728 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2729 thread
->exc_actions
[i
].flavor
== flavors
[j
]) {
2730 masks
[j
] |= (1 << i
);
2736 masks
[j
] = (1 << i
);
2737 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2738 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2739 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2741 if (count
>= *CountCnt
) {
2749 thread_mtx_unlock(thread
);
2753 return KERN_SUCCESS
;
2757 task_get_exception_ports(
2759 exception_mask_t exception_mask
,
2760 exception_mask_array_t masks
,
2761 mach_msg_type_number_t
*CountCnt
,
2762 exception_port_array_t ports
,
2763 exception_behavior_array_t behaviors
,
2764 thread_state_flavor_array_t flavors
)
2766 unsigned int i
, j
, count
;
2768 if (task
== TASK_NULL
) {
2769 return KERN_INVALID_ARGUMENT
;
2772 if (exception_mask
& ~EXC_MASK_VALID
) {
2773 return KERN_INVALID_ARGUMENT
;
2778 if (task
->itk_self
== IP_NULL
) {
2781 return KERN_FAILURE
;
2786 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2787 if (exception_mask
& (1 << i
)) {
2788 for (j
= 0; j
< count
; ++j
) {
2790 * search for an identical entry, if found
2791 * set corresponding mask for this exception.
2793 if (task
->exc_actions
[i
].port
== ports
[j
] &&
2794 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2795 task
->exc_actions
[i
].flavor
== flavors
[j
]) {
2796 masks
[j
] |= (1 << i
);
2802 masks
[j
] = (1 << i
);
2803 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2804 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2805 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2807 if (count
> *CountCnt
) {
2818 return KERN_SUCCESS
;