2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports
;
105 /* forward declarations */
106 task_t
convert_port_to_locked_task(ipc_port_t port
);
107 task_inspect_t
convert_port_to_locked_task_inspect(ipc_port_t port
);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port
);
109 static kern_return_t
ipc_port_unbind_special_reply_port(thread_t thread
, boolean_t unbind_active_port
);
110 kern_return_t
task_conversion_eval(task_t caller
, task_t victim
);
113 * Routine: ipc_task_init
115 * Initialize a task's IPC state.
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
135 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
136 if (kr
!= KERN_SUCCESS
) {
137 panic("ipc_task_init");
140 space
->is_task
= task
;
142 kport
= ipc_port_alloc_kernel();
143 if (kport
== IP_NULL
) {
144 panic("ipc_task_init");
147 nport
= ipc_port_alloc_kernel();
148 if (nport
== IP_NULL
) {
149 panic("ipc_task_init");
153 task
->itk_self
= kport
;
154 task
->itk_nself
= nport
;
155 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
156 if (task_is_a_corpse_fork(task
)) {
158 * No sender's notification for corpse would not
159 * work with a naked send right in kernel.
161 task
->itk_sself
= IP_NULL
;
163 task
->itk_sself
= ipc_port_make_send(kport
);
165 task
->itk_debug_control
= IP_NULL
;
166 task
->itk_space
= space
;
169 task
->exc_actions
[0].label
= NULL
;
170 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
171 mac_exc_associate_action_label(&task
->exc_actions
[i
], mac_exc_create_label());
175 /* always zero-out the first (unused) array element */
176 bzero(&task
->exc_actions
[0], sizeof(task
->exc_actions
[0]));
178 if (parent
== TASK_NULL
) {
180 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
181 task
->exc_actions
[i
].port
= IP_NULL
;
182 task
->exc_actions
[i
].flavor
= 0;
183 task
->exc_actions
[i
].behavior
= 0;
184 task
->exc_actions
[i
].privileged
= FALSE
;
187 kr
= host_get_host_port(host_priv_self(), &port
);
188 assert(kr
== KERN_SUCCESS
);
189 task
->itk_host
= port
;
191 task
->itk_bootstrap
= IP_NULL
;
192 task
->itk_seatbelt
= IP_NULL
;
193 task
->itk_gssd
= IP_NULL
;
194 task
->itk_task_access
= IP_NULL
;
196 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
197 task
->itk_registered
[i
] = IP_NULL
;
201 assert(parent
->itk_self
!= IP_NULL
);
203 /* inherit registered ports */
205 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
206 task
->itk_registered
[i
] =
207 ipc_port_copy_send(parent
->itk_registered
[i
]);
210 /* inherit exception and bootstrap ports */
212 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
213 task
->exc_actions
[i
].port
=
214 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
215 task
->exc_actions
[i
].flavor
=
216 parent
->exc_actions
[i
].flavor
;
217 task
->exc_actions
[i
].behavior
=
218 parent
->exc_actions
[i
].behavior
;
219 task
->exc_actions
[i
].privileged
=
220 parent
->exc_actions
[i
].privileged
;
222 mac_exc_inherit_action_label(parent
->exc_actions
+ i
, task
->exc_actions
+ i
);
226 ipc_port_copy_send(parent
->itk_host
);
228 task
->itk_bootstrap
=
229 ipc_port_copy_send(parent
->itk_bootstrap
);
232 ipc_port_copy_send(parent
->itk_seatbelt
);
235 ipc_port_copy_send(parent
->itk_gssd
);
237 task
->itk_task_access
=
238 ipc_port_copy_send(parent
->itk_task_access
);
245 * Routine: ipc_task_enable
247 * Enable a task for IPC access.
260 kport
= task
->itk_self
;
261 if (kport
!= IP_NULL
) {
262 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
264 nport
= task
->itk_nself
;
265 if (nport
!= IP_NULL
) {
266 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
272 * Routine: ipc_task_disable
274 * Disable IPC access to a task.
288 kport
= task
->itk_self
;
289 if (kport
!= IP_NULL
) {
290 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
292 nport
= task
->itk_nself
;
293 if (nport
!= IP_NULL
) {
294 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
297 rport
= task
->itk_resume
;
298 if (rport
!= IP_NULL
) {
300 * From this point onwards this task is no longer accepting
303 * There are still outstanding suspensions on this task,
304 * even as it is being torn down. Disconnect the task
305 * from the rport, thereby "orphaning" the rport. The rport
306 * itself will go away only when the last suspension holder
307 * destroys his SO right to it -- when he either
308 * exits, or tries to actually use that last SO right to
309 * resume this (now non-existent) task.
311 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
317 * Routine: ipc_task_terminate
319 * Clean up and destroy a task's IPC state.
321 * Nothing locked. The task must be suspended.
322 * (Or the current thread must be in the task.)
335 kport
= task
->itk_self
;
337 if (kport
== IP_NULL
) {
338 /* the task is already terminated (can this happen?) */
342 task
->itk_self
= IP_NULL
;
344 nport
= task
->itk_nself
;
345 assert(nport
!= IP_NULL
);
346 task
->itk_nself
= IP_NULL
;
348 rport
= task
->itk_resume
;
349 task
->itk_resume
= IP_NULL
;
353 /* release the naked send rights */
355 if (IP_VALID(task
->itk_sself
)) {
356 ipc_port_release_send(task
->itk_sself
);
359 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
360 if (IP_VALID(task
->exc_actions
[i
].port
)) {
361 ipc_port_release_send(task
->exc_actions
[i
].port
);
364 mac_exc_free_action_label(task
->exc_actions
+ i
);
368 if (IP_VALID(task
->itk_host
)) {
369 ipc_port_release_send(task
->itk_host
);
372 if (IP_VALID(task
->itk_bootstrap
)) {
373 ipc_port_release_send(task
->itk_bootstrap
);
376 if (IP_VALID(task
->itk_seatbelt
)) {
377 ipc_port_release_send(task
->itk_seatbelt
);
380 if (IP_VALID(task
->itk_gssd
)) {
381 ipc_port_release_send(task
->itk_gssd
);
384 if (IP_VALID(task
->itk_task_access
)) {
385 ipc_port_release_send(task
->itk_task_access
);
388 if (IP_VALID(task
->itk_debug_control
)) {
389 ipc_port_release_send(task
->itk_debug_control
);
392 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
393 if (IP_VALID(task
->itk_registered
[i
])) {
394 ipc_port_release_send(task
->itk_registered
[i
]);
398 /* destroy the kernel ports */
399 ipc_port_dealloc_kernel(kport
);
400 ipc_port_dealloc_kernel(nport
);
401 if (rport
!= IP_NULL
) {
402 ipc_port_dealloc_kernel(rport
);
405 itk_lock_destroy(task
);
409 * Routine: ipc_task_reset
411 * Reset a task's IPC state to protect it when
412 * it enters an elevated security context. The
413 * task name port can remain the same - since
414 * it represents no specific privilege.
416 * Nothing locked. The task must be suspended.
417 * (Or the current thread must be in the task.)
424 ipc_port_t old_kport
, new_kport
;
425 ipc_port_t old_sself
;
426 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
430 /* Fresh label to unset credentials in existing labels. */
431 struct label
*unset_label
= mac_exc_create_label();
434 new_kport
= ipc_port_alloc_kernel();
435 if (new_kport
== IP_NULL
) {
436 panic("ipc_task_reset");
441 old_kport
= task
->itk_self
;
443 if (old_kport
== IP_NULL
) {
444 /* the task is already terminated (can this happen?) */
446 ipc_port_dealloc_kernel(new_kport
);
448 mac_exc_free_label(unset_label
);
453 task
->itk_self
= new_kport
;
454 old_sself
= task
->itk_sself
;
455 task
->itk_sself
= ipc_port_make_send(new_kport
);
457 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
459 ipc_kobject_set_atomically(old_kport
, IKO_NULL
, IKOT_NONE
);
460 task
->exec_token
+= 1;
461 ip_unlock(old_kport
);
463 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
465 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
466 old_exc_actions
[i
] = IP_NULL
;
468 if (i
== EXC_CORPSE_NOTIFY
&& task_corpse_pending_report(task
)) {
472 if (!task
->exc_actions
[i
].privileged
) {
474 mac_exc_update_action_label(task
->exc_actions
+ i
, unset_label
);
476 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
477 task
->exc_actions
[i
].port
= IP_NULL
;
481 if (IP_VALID(task
->itk_debug_control
)) {
482 ipc_port_release_send(task
->itk_debug_control
);
484 task
->itk_debug_control
= IP_NULL
;
489 mac_exc_free_label(unset_label
);
492 /* release the naked send rights */
494 if (IP_VALID(old_sself
)) {
495 ipc_port_release_send(old_sself
);
498 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
499 if (IP_VALID(old_exc_actions
[i
])) {
500 ipc_port_release_send(old_exc_actions
[i
]);
504 /* destroy the kernel port */
505 ipc_port_dealloc_kernel(old_kport
);
509 * Routine: ipc_thread_init
511 * Initialize a thread's IPC state.
522 kport
= ipc_port_alloc_kernel();
523 if (kport
== IP_NULL
) {
524 panic("ipc_thread_init");
527 thread
->ith_self
= kport
;
528 thread
->ith_sself
= ipc_port_make_send(kport
);
529 thread
->ith_special_reply_port
= NULL
;
530 thread
->exc_actions
= NULL
;
532 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
534 #if IMPORTANCE_INHERITANCE
535 thread
->ith_assertions
= 0;
538 ipc_kmsg_queue_init(&thread
->ith_messages
);
540 thread
->ith_rpc_reply
= IP_NULL
;
544 ipc_thread_init_exc_actions(
547 assert(thread
->exc_actions
== NULL
);
549 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
550 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
553 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
554 mac_exc_associate_action_label(thread
->exc_actions
+ i
, mac_exc_create_label());
560 ipc_thread_destroy_exc_actions(
563 if (thread
->exc_actions
!= NULL
) {
565 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
566 mac_exc_free_action_label(thread
->exc_actions
+ i
);
570 kfree(thread
->exc_actions
,
571 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
572 thread
->exc_actions
= NULL
;
580 ipc_port_t kport
= thread
->ith_self
;
582 if (kport
!= IP_NULL
) {
583 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
588 * Routine: ipc_thread_terminate
590 * Clean up and destroy a thread's IPC state.
596 ipc_thread_terminate(
599 ipc_port_t kport
= thread
->ith_self
;
601 if (kport
!= IP_NULL
) {
604 if (IP_VALID(thread
->ith_sself
)) {
605 ipc_port_release_send(thread
->ith_sself
);
608 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
610 if (thread
->exc_actions
!= NULL
) {
611 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
612 if (IP_VALID(thread
->exc_actions
[i
].port
)) {
613 ipc_port_release_send(thread
->exc_actions
[i
].port
);
616 ipc_thread_destroy_exc_actions(thread
);
619 ipc_port_dealloc_kernel(kport
);
622 #if IMPORTANCE_INHERITANCE
623 assert(thread
->ith_assertions
== 0);
626 /* unbind the thread special reply port */
627 if (IP_VALID(thread
->ith_special_reply_port
)) {
628 ipc_port_unbind_special_reply_port(thread
, TRUE
);
631 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
633 if (thread
->ith_rpc_reply
!= IP_NULL
) {
634 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
637 thread
->ith_rpc_reply
= IP_NULL
;
641 * Routine: ipc_thread_reset
643 * Reset the IPC state for a given Mach thread when
644 * its task enters an elevated security context.
645 * Both the thread port and its exception ports have
646 * to be reset. Its RPC reply port cannot have any
647 * rights outstanding, so it should be fine.
656 ipc_port_t old_kport
, new_kport
;
657 ipc_port_t old_sself
;
658 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
659 boolean_t has_old_exc_actions
= FALSE
;
663 struct label
*new_label
= mac_exc_create_label();
666 new_kport
= ipc_port_alloc_kernel();
667 if (new_kport
== IP_NULL
) {
668 panic("ipc_task_reset");
671 thread_mtx_lock(thread
);
673 old_kport
= thread
->ith_self
;
675 if (old_kport
== IP_NULL
&& thread
->inspection
== FALSE
) {
676 /* the is already terminated (can this happen?) */
677 thread_mtx_unlock(thread
);
678 ipc_port_dealloc_kernel(new_kport
);
680 mac_exc_free_label(new_label
);
685 thread
->ith_self
= new_kport
;
686 old_sself
= thread
->ith_sself
;
687 thread
->ith_sself
= ipc_port_make_send(new_kport
);
688 if (old_kport
!= IP_NULL
) {
689 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
691 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
694 * Only ports that were set by root-owned processes
695 * (privileged ports) should survive
697 if (thread
->exc_actions
!= NULL
) {
698 has_old_exc_actions
= TRUE
;
699 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
700 if (thread
->exc_actions
[i
].privileged
) {
701 old_exc_actions
[i
] = IP_NULL
;
704 mac_exc_update_action_label(thread
->exc_actions
+ i
, new_label
);
706 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
707 thread
->exc_actions
[i
].port
= IP_NULL
;
712 thread_mtx_unlock(thread
);
715 mac_exc_free_label(new_label
);
718 /* release the naked send rights */
720 if (IP_VALID(old_sself
)) {
721 ipc_port_release_send(old_sself
);
724 if (has_old_exc_actions
) {
725 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
726 ipc_port_release_send(old_exc_actions
[i
]);
730 /* destroy the kernel port */
731 if (old_kport
!= IP_NULL
) {
732 ipc_port_dealloc_kernel(old_kport
);
735 /* unbind the thread special reply port */
736 if (IP_VALID(thread
->ith_special_reply_port
)) {
737 ipc_port_unbind_special_reply_port(thread
, TRUE
);
742 * Routine: retrieve_task_self_fast
744 * Optimized version of retrieve_task_self,
745 * that only works for the current task.
747 * Return a send right (possibly null/dead)
748 * for the task's user-visible self port.
754 retrieve_task_self_fast(
759 assert(task
== current_task());
762 assert(task
->itk_self
!= IP_NULL
);
764 if ((port
= task
->itk_sself
) == task
->itk_self
) {
768 assert(ip_active(port
));
773 port
= ipc_port_copy_send(port
);
781 * Routine: retrieve_thread_self_fast
783 * Return a send right (possibly null/dead)
784 * for the thread's user-visible self port.
786 * Only works for the current thread.
793 retrieve_thread_self_fast(
798 assert(thread
== current_thread());
800 thread_mtx_lock(thread
);
802 assert(thread
->ith_self
!= IP_NULL
);
804 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
808 assert(ip_active(port
));
813 port
= ipc_port_copy_send(port
);
816 thread_mtx_unlock(thread
);
822 * Routine: task_self_trap [mach trap]
824 * Give the caller send rights for his own task port.
828 * MACH_PORT_NULL if there are any resource failures
834 __unused
struct task_self_trap_args
*args
)
836 task_t task
= current_task();
838 mach_port_name_t name
;
840 sright
= retrieve_task_self_fast(task
);
841 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
846 * Routine: thread_self_trap [mach trap]
848 * Give the caller send rights for his own thread port.
852 * MACH_PORT_NULL if there are any resource failures
858 __unused
struct thread_self_trap_args
*args
)
860 thread_t thread
= current_thread();
861 task_t task
= thread
->task
;
863 mach_port_name_t name
;
865 sright
= retrieve_thread_self_fast(thread
);
866 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
871 * Routine: mach_reply_port [mach trap]
873 * Allocate a port for the caller.
877 * MACH_PORT_NULL if there are any resource failures
883 __unused
struct mach_reply_port_args
*args
)
886 mach_port_name_t name
;
889 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
890 if (kr
== KERN_SUCCESS
) {
893 name
= MACH_PORT_NULL
;
899 * Routine: thread_get_special_reply_port [mach trap]
901 * Allocate a special reply port for the calling thread.
905 * mach_port_name_t: send right & receive right for special reply port.
906 * MACH_PORT_NULL if there are any resource failures
911 thread_get_special_reply_port(
912 __unused
struct thread_get_special_reply_port_args
*args
)
915 mach_port_name_t name
;
916 mach_port_name_t send_name
;
918 thread_t thread
= current_thread();
920 /* unbind the thread special reply port */
921 if (IP_VALID(thread
->ith_special_reply_port
)) {
922 kr
= ipc_port_unbind_special_reply_port(thread
, TRUE
);
923 if (kr
!= KERN_SUCCESS
) {
924 return MACH_PORT_NULL
;
928 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
929 if (kr
== KERN_SUCCESS
) {
930 ipc_port_bind_special_reply_port_locked(port
);
932 /* Make a send right and insert it in the space at specified name */
933 ipc_port_make_send_locked(port
);
935 send_name
= ipc_port_copyout_name_send(port
, current_task()->itk_space
, name
);
937 * If insertion of send right failed, userland is doing something bad, error out.
938 * The space was marked inactive or the receive right just inserted above at the
939 * given name was moved, in either case do not try to deallocate the receive right.
941 if (send_name
== MACH_PORT_NULL
|| send_name
== MACH_PORT_DEAD
) {
942 if (IP_VALID(thread
->ith_special_reply_port
)) {
943 ipc_port_unbind_special_reply_port(thread
, TRUE
);
945 name
= MACH_PORT_NULL
;
948 name
= MACH_PORT_NULL
;
954 * Routine: ipc_port_bind_special_reply_port_locked
956 * Bind the given port to current thread as a special reply port.
964 ipc_port_bind_special_reply_port_locked(
967 thread_t thread
= current_thread();
968 assert(thread
->ith_special_reply_port
== NULL
);
971 thread
->ith_special_reply_port
= port
;
972 port
->ip_specialreply
= 1;
973 port
->ip_sync_link_state
= PORT_SYNC_LINK_ANY
;
975 reset_ip_srp_bits(port
);
979 * Routine: ipc_port_unbind_special_reply_port
981 * Unbind the thread's special reply port.
982 * If the special port has threads waiting on turnstile,
983 * update it's inheritor.
990 ipc_port_unbind_special_reply_port(
992 boolean_t unbind_active_port
)
994 ipc_port_t special_reply_port
= thread
->ith_special_reply_port
;
996 ip_lock(special_reply_port
);
998 /* Return error if port active and unbind_active_port set to FALSE */
999 if (unbind_active_port
== FALSE
&& ip_active(special_reply_port
)) {
1000 ip_unlock(special_reply_port
);
1001 return KERN_FAILURE
;
1004 thread
->ith_special_reply_port
= NULL
;
1005 ipc_port_adjust_special_reply_port_locked(special_reply_port
, NULL
,
1006 IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY
, FALSE
);
1009 ip_release(special_reply_port
);
1010 return KERN_SUCCESS
;
1014 * Routine: thread_get_special_port [kernel call]
1016 * Clones a send right for one of the thread's
1021 * KERN_SUCCESS Extracted a send right.
1022 * KERN_INVALID_ARGUMENT The thread is null.
1023 * KERN_FAILURE The thread is dead.
1024 * KERN_INVALID_ARGUMENT Invalid special port.
1028 thread_get_special_port(
1033 kern_return_t result
= KERN_SUCCESS
;
1036 if (thread
== THREAD_NULL
) {
1037 return KERN_INVALID_ARGUMENT
;
1041 case THREAD_KERNEL_PORT
:
1042 whichp
= &thread
->ith_sself
;
1046 return KERN_INVALID_ARGUMENT
;
1049 thread_mtx_lock(thread
);
1051 if (thread
->active
) {
1052 *portp
= ipc_port_copy_send(*whichp
);
1054 result
= KERN_FAILURE
;
1057 thread_mtx_unlock(thread
);
1063 * Routine: thread_set_special_port [kernel call]
1065 * Changes one of the thread's special ports,
1066 * setting it to the supplied send right.
1068 * Nothing locked. If successful, consumes
1069 * the supplied send right.
1071 * KERN_SUCCESS Changed the special port.
1072 * KERN_INVALID_ARGUMENT The thread is null.
1073 * KERN_FAILURE The thread is dead.
1074 * KERN_INVALID_ARGUMENT Invalid special port.
1078 thread_set_special_port(
1083 kern_return_t result
= KERN_SUCCESS
;
1084 ipc_port_t
*whichp
, old
= IP_NULL
;
1086 if (thread
== THREAD_NULL
) {
1087 return KERN_INVALID_ARGUMENT
;
1091 case THREAD_KERNEL_PORT
:
1092 whichp
= &thread
->ith_sself
;
1096 return KERN_INVALID_ARGUMENT
;
1099 thread_mtx_lock(thread
);
1101 if (thread
->active
) {
1105 result
= KERN_FAILURE
;
1108 thread_mtx_unlock(thread
);
1110 if (IP_VALID(old
)) {
1111 ipc_port_release_send(old
);
1118 * Routine: task_get_special_port [kernel call]
1120 * Clones a send right for one of the task's
1125 * KERN_SUCCESS Extracted a send right.
1126 * KERN_INVALID_ARGUMENT The task is null.
1127 * KERN_FAILURE The task/space is dead.
1128 * KERN_INVALID_ARGUMENT Invalid special port.
1132 task_get_special_port(
1139 if (task
== TASK_NULL
) {
1140 return KERN_INVALID_ARGUMENT
;
1144 if (task
->itk_self
== IP_NULL
) {
1146 return KERN_FAILURE
;
1150 case TASK_KERNEL_PORT
:
1151 port
= ipc_port_copy_send(task
->itk_sself
);
1154 case TASK_NAME_PORT
:
1155 port
= ipc_port_make_send(task
->itk_nself
);
1158 case TASK_HOST_PORT
:
1159 port
= ipc_port_copy_send(task
->itk_host
);
1162 case TASK_BOOTSTRAP_PORT
:
1163 port
= ipc_port_copy_send(task
->itk_bootstrap
);
1166 case TASK_SEATBELT_PORT
:
1167 port
= ipc_port_copy_send(task
->itk_seatbelt
);
1170 case TASK_ACCESS_PORT
:
1171 port
= ipc_port_copy_send(task
->itk_task_access
);
1174 case TASK_DEBUG_CONTROL_PORT
:
1175 port
= ipc_port_copy_send(task
->itk_debug_control
);
1180 return KERN_INVALID_ARGUMENT
;
1185 return KERN_SUCCESS
;
1189 * Routine: task_set_special_port [kernel call]
1191 * Changes one of the task's special ports,
1192 * setting it to the supplied send right.
1194 * Nothing locked. If successful, consumes
1195 * the supplied send right.
1197 * KERN_SUCCESS Changed the special port.
1198 * KERN_INVALID_ARGUMENT The task is null.
1199 * KERN_FAILURE The task/space is dead.
1200 * KERN_INVALID_ARGUMENT Invalid special port.
1201 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1205 task_set_special_port(
1213 if (task
== TASK_NULL
) {
1214 return KERN_INVALID_ARGUMENT
;
1218 case TASK_KERNEL_PORT
:
1219 whichp
= &task
->itk_sself
;
1222 case TASK_HOST_PORT
:
1223 whichp
= &task
->itk_host
;
1226 case TASK_BOOTSTRAP_PORT
:
1227 whichp
= &task
->itk_bootstrap
;
1230 case TASK_SEATBELT_PORT
:
1231 whichp
= &task
->itk_seatbelt
;
1234 case TASK_ACCESS_PORT
:
1235 whichp
= &task
->itk_task_access
;
1238 case TASK_DEBUG_CONTROL_PORT
:
1239 whichp
= &task
->itk_debug_control
;
1243 return KERN_INVALID_ARGUMENT
;
1247 if (task
->itk_self
== IP_NULL
) {
1249 return KERN_FAILURE
;
1252 /* do not allow overwrite of seatbelt or task access ports */
1253 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
1254 && IP_VALID(*whichp
)) {
1256 return KERN_NO_ACCESS
;
1263 if (IP_VALID(old
)) {
1264 ipc_port_release_send(old
);
1266 return KERN_SUCCESS
;
1271 * Routine: mach_ports_register [kernel call]
1273 * Stash a handful of port send rights in the task.
1274 * Child tasks will inherit these rights, but they
1275 * must use mach_ports_lookup to acquire them.
1277 * The rights are supplied in a (wired) kalloc'd segment.
1278 * Rights which aren't supplied are assumed to be null.
1280 * Nothing locked. If successful, consumes
1281 * the supplied rights and memory.
1283 * KERN_SUCCESS Stashed the port rights.
1284 * KERN_INVALID_ARGUMENT The task is null.
1285 * KERN_INVALID_ARGUMENT The task is dead.
1286 * KERN_INVALID_ARGUMENT The memory param is null.
1287 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1291 mach_ports_register(
1293 mach_port_array_t memory
,
1294 mach_msg_type_number_t portsCnt
)
1296 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1299 if ((task
== TASK_NULL
) ||
1300 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1301 (portsCnt
&& memory
== NULL
)) {
1302 return KERN_INVALID_ARGUMENT
;
1306 * Pad the port rights with nulls.
1309 for (i
= 0; i
< portsCnt
; i
++) {
1310 ports
[i
] = memory
[i
];
1312 for (; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1317 if (task
->itk_self
== IP_NULL
) {
1319 return KERN_INVALID_ARGUMENT
;
1323 * Replace the old send rights with the new.
1324 * Release the old rights after unlocking.
1327 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1330 old
= task
->itk_registered
[i
];
1331 task
->itk_registered
[i
] = ports
[i
];
1337 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1338 if (IP_VALID(ports
[i
])) {
1339 ipc_port_release_send(ports
[i
]);
1344 * Now that the operation is known to be successful,
1345 * we can free the memory.
1348 if (portsCnt
!= 0) {
1350 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1353 return KERN_SUCCESS
;
1357 * Routine: mach_ports_lookup [kernel call]
1359 * Retrieves (clones) the stashed port send rights.
1361 * Nothing locked. If successful, the caller gets
1362 * rights and memory.
1364 * KERN_SUCCESS Retrieved the send rights.
1365 * KERN_INVALID_ARGUMENT The task is null.
1366 * KERN_INVALID_ARGUMENT The task is dead.
1367 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1373 mach_port_array_t
*portsp
,
1374 mach_msg_type_number_t
*portsCnt
)
1381 if (task
== TASK_NULL
) {
1382 return KERN_INVALID_ARGUMENT
;
1385 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1387 memory
= kalloc(size
);
1389 return KERN_RESOURCE_SHORTAGE
;
1393 if (task
->itk_self
== IP_NULL
) {
1396 kfree(memory
, size
);
1397 return KERN_INVALID_ARGUMENT
;
1400 ports
= (ipc_port_t
*) memory
;
1403 * Clone port rights. Because kalloc'd memory
1404 * is wired, we won't fault while holding the task lock.
1407 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1408 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1413 *portsp
= (mach_port_array_t
) ports
;
1414 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1415 return KERN_SUCCESS
;
1419 task_conversion_eval(task_t caller
, task_t victim
)
1422 * Tasks are allowed to resolve their own task ports, and the kernel is
1423 * allowed to resolve anyone's task port.
1425 if (caller
== kernel_task
) {
1426 return KERN_SUCCESS
;
1429 if (caller
== victim
) {
1430 return KERN_SUCCESS
;
1434 * Only the kernel can can resolve the kernel's task port. We've established
1435 * by this point that the caller is not kernel_task.
1437 if (victim
== TASK_NULL
|| victim
== kernel_task
) {
1438 return KERN_INVALID_SECURITY
;
1443 * On embedded platforms, only a platform binary can resolve the task port
1444 * of another platform binary.
1446 if ((victim
->t_flags
& TF_PLATFORM
) && !(caller
->t_flags
& TF_PLATFORM
)) {
1448 return KERN_INVALID_SECURITY
;
1450 if (cs_relax_platform_task_ports
) {
1451 return KERN_SUCCESS
;
1453 return KERN_INVALID_SECURITY
;
1455 #endif /* SECURE_KERNEL */
1457 #endif /* CONFIG_EMBEDDED */
1459 return KERN_SUCCESS
;
1463 * Routine: convert_port_to_locked_task
1465 * Internal helper routine to convert from a port to a locked
1466 * task. Used by several routines that try to convert from a
1467 * task port to a reference on some task related object.
1469 * Nothing locked, blocking OK.
1472 convert_port_to_locked_task(ipc_port_t port
)
1474 int try_failed_count
= 0;
1476 while (IP_VALID(port
)) {
1477 task_t ct
= current_task();
1481 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1485 task
= (task_t
) port
->ip_kobject
;
1486 assert(task
!= TASK_NULL
);
1488 if (task_conversion_eval(ct
, task
)) {
1494 * Normal lock ordering puts task_lock() before ip_lock().
1495 * Attempt out-of-order locking here.
1497 if (task_lock_try(task
)) {
1504 mutex_pause(try_failed_count
);
1510 * Routine: convert_port_to_locked_task_inspect
1512 * Internal helper routine to convert from a port to a locked
1513 * task inspect right. Used by internal routines that try to convert from a
1514 * task inspect port to a reference on some task related object.
1516 * Nothing locked, blocking OK.
1519 convert_port_to_locked_task_inspect(ipc_port_t port
)
1521 int try_failed_count
= 0;
1523 while (IP_VALID(port
)) {
1524 task_inspect_t task
;
1527 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1529 return TASK_INSPECT_NULL
;
1531 task
= (task_inspect_t
)port
->ip_kobject
;
1532 assert(task
!= TASK_INSPECT_NULL
);
1534 * Normal lock ordering puts task_lock() before ip_lock().
1535 * Attempt out-of-order locking here.
1537 if (task_lock_try((task_t
)task
)) {
1544 mutex_pause(try_failed_count
);
1546 return TASK_INSPECT_NULL
;
1551 * Routine: convert_port_to_task
1553 * Convert from a port to a task.
1554 * Doesn't consume the port ref; produces a task ref,
1555 * which may be null.
1560 convert_port_to_task(
1563 return convert_port_to_task_with_exec_token(port
, NULL
);
1567 * Routine: convert_port_to_task_with_exec_token
1569 * Convert from a port to a task and return
1570 * the exec token stored in the task.
1571 * Doesn't consume the port ref; produces a task ref,
1572 * which may be null.
1577 convert_port_to_task_with_exec_token(
1579 uint32_t *exec_token
)
1581 task_t task
= TASK_NULL
;
1583 if (IP_VALID(port
)) {
1586 if (ip_active(port
) &&
1587 ip_kotype(port
) == IKOT_TASK
) {
1588 task_t ct
= current_task();
1589 task
= (task_t
)port
->ip_kobject
;
1590 assert(task
!= TASK_NULL
);
1592 if (task_conversion_eval(ct
, task
)) {
1598 *exec_token
= task
->exec_token
;
1600 task_reference_internal(task
);
1610 * Routine: convert_port_to_task_name
1612 * Convert from a port to a task name.
1613 * Doesn't consume the port ref; produces a task name ref,
1614 * which may be null.
1619 convert_port_to_task_name(
1622 task_name_t task
= TASK_NULL
;
1624 if (IP_VALID(port
)) {
1627 if (ip_active(port
) &&
1628 (ip_kotype(port
) == IKOT_TASK
||
1629 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1630 task
= (task_name_t
)port
->ip_kobject
;
1631 assert(task
!= TASK_NAME_NULL
);
1633 task_reference_internal(task
);
1643 * Routine: convert_port_to_task_inspect
1645 * Convert from a port to a task inspection right
1646 * Doesn't consume the port ref; produces a task ref,
1647 * which may be null.
1652 convert_port_to_task_inspect(
1655 task_inspect_t task
= TASK_INSPECT_NULL
;
1657 if (IP_VALID(port
)) {
1660 if (ip_active(port
) &&
1661 ip_kotype(port
) == IKOT_TASK
) {
1662 task
= (task_inspect_t
)port
->ip_kobject
;
1663 assert(task
!= TASK_INSPECT_NULL
);
1665 task_reference_internal(task
);
1675 * Routine: convert_port_to_task_suspension_token
1677 * Convert from a port to a task suspension token.
1678 * Doesn't consume the port ref; produces a suspension token ref,
1679 * which may be null.
1683 task_suspension_token_t
1684 convert_port_to_task_suspension_token(
1687 task_suspension_token_t task
= TASK_NULL
;
1689 if (IP_VALID(port
)) {
1692 if (ip_active(port
) &&
1693 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1694 task
= (task_suspension_token_t
)port
->ip_kobject
;
1695 assert(task
!= TASK_NULL
);
1697 task_reference_internal(task
);
1707 * Routine: convert_port_to_space
1709 * Convert from a port to a space.
1710 * Doesn't consume the port ref; produces a space ref,
1711 * which may be null.
1716 convert_port_to_space(
1722 task
= convert_port_to_locked_task(port
);
1724 if (task
== TASK_NULL
) {
1725 return IPC_SPACE_NULL
;
1728 if (!task
->active
) {
1730 return IPC_SPACE_NULL
;
1733 space
= task
->itk_space
;
1734 is_reference(space
);
1740 * Routine: convert_port_to_space_inspect
1742 * Convert from a port to a space inspect right.
1743 * Doesn't consume the port ref; produces a space inspect ref,
1744 * which may be null.
1749 convert_port_to_space_inspect(
1752 ipc_space_inspect_t space
;
1753 task_inspect_t task
;
1755 task
= convert_port_to_locked_task_inspect(port
);
1757 if (task
== TASK_INSPECT_NULL
) {
1758 return IPC_SPACE_INSPECT_NULL
;
1761 if (!task
->active
) {
1763 return IPC_SPACE_INSPECT_NULL
;
1766 space
= (ipc_space_inspect_t
)task
->itk_space
;
1767 is_reference((ipc_space_t
)space
);
1768 task_unlock((task_t
)task
);
1773 * Routine: convert_port_to_map
1775 * Convert from a port to a map.
1776 * Doesn't consume the port ref; produces a map ref,
1777 * which may be null.
1783 convert_port_to_map(
1789 task
= convert_port_to_locked_task(port
);
1791 if (task
== TASK_NULL
) {
1795 if (!task
->active
) {
1801 vm_map_reference_swap(map
);
1808 * Routine: convert_port_to_thread
1810 * Convert from a port to a thread.
1811 * Doesn't consume the port ref; produces an thread ref,
1812 * which may be null.
1818 convert_port_to_thread(
1821 thread_t thread
= THREAD_NULL
;
1823 if (IP_VALID(port
)) {
1826 if (ip_active(port
) &&
1827 ip_kotype(port
) == IKOT_THREAD
) {
1828 thread
= (thread_t
)port
->ip_kobject
;
1829 assert(thread
!= THREAD_NULL
);
1831 /* Use task conversion rules for thread control conversions */
1832 if (task_conversion_eval(current_task(), thread
->task
) != KERN_SUCCESS
) {
1837 thread_reference_internal(thread
);
1847 * Routine: convert_port_to_thread_inspect
1849 * Convert from a port to a thread inspection right
1850 * Doesn't consume the port ref; produces a thread ref,
1851 * which may be null.
1856 convert_port_to_thread_inspect(
1859 thread_inspect_t thread
= THREAD_INSPECT_NULL
;
1861 if (IP_VALID(port
)) {
1864 if (ip_active(port
) &&
1865 ip_kotype(port
) == IKOT_THREAD
) {
1866 thread
= (thread_inspect_t
)port
->ip_kobject
;
1867 assert(thread
!= THREAD_INSPECT_NULL
);
1868 thread_reference_internal((thread_t
)thread
);
1877 * Routine: convert_thread_inspect_to_port
1879 * Convert from a thread inspect reference to a port.
1880 * Consumes a thread ref;
1881 * As we never export thread inspect ports, always
1882 * creates a NULL port.
1888 convert_thread_inspect_to_port(thread_inspect_t thread
)
1890 thread_deallocate(thread
);
1896 * Routine: port_name_to_thread
1898 * Convert from a port name to an thread reference
1899 * A name of MACH_PORT_NULL is valid for the null thread.
1903 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1904 * We could avoid extra lock/unlock and extra ref operations on the port.
1907 port_name_to_thread(
1908 mach_port_name_t name
)
1910 thread_t thread
= THREAD_NULL
;
1913 if (MACH_PORT_VALID(name
)) {
1914 if (ipc_object_copyin(current_space(), name
,
1915 MACH_MSG_TYPE_COPY_SEND
,
1916 (ipc_object_t
*)&kport
) != KERN_SUCCESS
) {
1920 thread
= convert_port_to_thread(kport
);
1922 if (IP_VALID(kport
)) {
1923 ipc_port_release_send(kport
);
1932 mach_port_name_t name
)
1934 ipc_port_t kern_port
;
1936 task_t task
= TASK_NULL
;
1938 if (MACH_PORT_VALID(name
)) {
1939 kr
= ipc_object_copyin(current_space(), name
,
1940 MACH_MSG_TYPE_COPY_SEND
,
1941 (ipc_object_t
*) &kern_port
);
1942 if (kr
!= KERN_SUCCESS
) {
1946 task
= convert_port_to_task(kern_port
);
1948 if (IP_VALID(kern_port
)) {
1949 ipc_port_release_send(kern_port
);
1956 port_name_to_task_inspect(
1957 mach_port_name_t name
)
1959 ipc_port_t kern_port
;
1961 task_inspect_t ti
= TASK_INSPECT_NULL
;
1963 if (MACH_PORT_VALID(name
)) {
1964 kr
= ipc_object_copyin(current_space(), name
,
1965 MACH_MSG_TYPE_COPY_SEND
,
1966 (ipc_object_t
*)&kern_port
);
1967 if (kr
!= KERN_SUCCESS
) {
1971 ti
= convert_port_to_task_inspect(kern_port
);
1973 if (IP_VALID(kern_port
)) {
1974 ipc_port_release_send(kern_port
);
1981 * Routine: port_name_to_host
1983 * Convert from a port name to a host pointer.
1984 * NOTE: This does _not_ return a +1 reference to the host_t
1990 mach_port_name_t name
)
1992 host_t host
= HOST_NULL
;
1996 if (MACH_PORT_VALID(name
)) {
1997 kr
= ipc_port_translate_send(current_space(), name
, &port
);
1998 if (kr
== KERN_SUCCESS
) {
1999 host
= convert_port_to_host(port
);
2007 * Routine: convert_task_to_port
2009 * Convert from a task to a port.
2010 * Consumes a task ref; produces a naked send right
2011 * which may be invalid.
2017 convert_task_to_port(
2024 if (task
->itk_self
!= IP_NULL
) {
2025 port
= ipc_port_make_send(task
->itk_self
);
2032 task_deallocate(task
);
2037 * Routine: convert_task_inspect_to_port
2039 * Convert from a task inspect reference to a port.
2040 * Consumes a task ref;
2041 * As we never export task inspect ports, always
2042 * creates a NULL port.
2047 convert_task_inspect_to_port(
2048 task_inspect_t task
)
2050 task_deallocate(task
);
2056 * Routine: convert_task_suspend_token_to_port
2058 * Convert from a task suspension token to a port.
2059 * Consumes a task suspension token ref; produces a naked send-once right
2060 * which may be invalid.
2065 convert_task_suspension_token_to_port(
2066 task_suspension_token_t task
)
2072 if (task
->itk_resume
== IP_NULL
) {
2073 task
->itk_resume
= ipc_port_alloc_kernel();
2074 if (!IP_VALID(task
->itk_resume
)) {
2075 panic("failed to create resume port");
2078 ipc_kobject_set(task
->itk_resume
, (ipc_kobject_t
) task
, IKOT_TASK_RESUME
);
2082 * Create a send-once right for each instance of a direct user-called
2083 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2084 * the notification handler will resume the target task.
2086 port
= ipc_port_make_sonce(task
->itk_resume
);
2087 assert(IP_VALID(port
));
2093 task_suspension_token_deallocate(task
);
2100 * Routine: convert_task_name_to_port
2102 * Convert from a task name ref to a port.
2103 * Consumes a task name ref; produces a naked send right
2104 * which may be invalid.
2110 convert_task_name_to_port(
2111 task_name_t task_name
)
2115 itk_lock(task_name
);
2116 if (task_name
->itk_nself
!= IP_NULL
) {
2117 port
= ipc_port_make_send(task_name
->itk_nself
);
2121 itk_unlock(task_name
);
2123 task_name_deallocate(task_name
);
2128 * Routine: convert_thread_to_port
2130 * Convert from a thread to a port.
2131 * Consumes an thread ref; produces a naked send right
2132 * which may be invalid.
2138 convert_thread_to_port(
2143 thread_mtx_lock(thread
);
2145 if (thread
->ith_self
!= IP_NULL
) {
2146 port
= ipc_port_make_send(thread
->ith_self
);
2151 thread_mtx_unlock(thread
);
2153 thread_deallocate(thread
);
2159 * Routine: space_deallocate
2161 * Deallocate a space ref produced by convert_port_to_space.
2170 if (space
!= IS_NULL
) {
2176 * Routine: space_inspect_deallocate
2178 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2184 space_inspect_deallocate(
2185 ipc_space_inspect_t space
)
2187 if (space
!= IS_INSPECT_NULL
) {
2188 is_release((ipc_space_t
)space
);
2193 * Routine: thread/task_set_exception_ports [kernel call]
2195 * Sets the thread/task exception port, flavor and
2196 * behavior for the exception types specified by the mask.
2197 * There will be one send right per exception per valid
2200 * Nothing locked. If successful, consumes
2201 * the supplied send right.
2203 * KERN_SUCCESS Changed the special port.
2204 * KERN_INVALID_ARGUMENT The thread is null,
2205 * Illegal mask bit set.
2206 * Illegal exception behavior
2207 * KERN_FAILURE The thread is dead.
2211 thread_set_exception_ports(
2213 exception_mask_t exception_mask
,
2214 ipc_port_t new_port
,
2215 exception_behavior_t new_behavior
,
2216 thread_state_flavor_t new_flavor
)
2218 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2219 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2223 struct label
*new_label
;
2226 if (thread
== THREAD_NULL
) {
2227 return KERN_INVALID_ARGUMENT
;
2230 if (exception_mask
& ~EXC_MASK_VALID
) {
2231 return KERN_INVALID_ARGUMENT
;
2234 if (IP_VALID(new_port
)) {
2235 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2236 case EXCEPTION_DEFAULT
:
2237 case EXCEPTION_STATE
:
2238 case EXCEPTION_STATE_IDENTITY
:
2242 return KERN_INVALID_ARGUMENT
;
2247 * Check the validity of the thread_state_flavor by calling the
2248 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2249 * osfmk/mach/ARCHITECTURE/thread_status.h
2251 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2252 return KERN_INVALID_ARGUMENT
;
2256 new_label
= mac_exc_create_label_for_current_proc();
2259 thread_mtx_lock(thread
);
2261 if (!thread
->active
) {
2262 thread_mtx_unlock(thread
);
2264 return KERN_FAILURE
;
2267 if (thread
->exc_actions
== NULL
) {
2268 ipc_thread_init_exc_actions(thread
);
2270 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2271 if ((exception_mask
& (1 << i
))
2273 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2276 old_port
[i
] = thread
->exc_actions
[i
].port
;
2277 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2278 thread
->exc_actions
[i
].behavior
= new_behavior
;
2279 thread
->exc_actions
[i
].flavor
= new_flavor
;
2280 thread
->exc_actions
[i
].privileged
= privileged
;
2282 old_port
[i
] = IP_NULL
;
2286 thread_mtx_unlock(thread
);
2289 mac_exc_free_label(new_label
);
2292 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2293 if (IP_VALID(old_port
[i
])) {
2294 ipc_port_release_send(old_port
[i
]);
2298 if (IP_VALID(new_port
)) { /* consume send right */
2299 ipc_port_release_send(new_port
);
2302 return KERN_SUCCESS
;
2306 task_set_exception_ports(
2308 exception_mask_t exception_mask
,
2309 ipc_port_t new_port
,
2310 exception_behavior_t new_behavior
,
2311 thread_state_flavor_t new_flavor
)
2313 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2314 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2318 struct label
*new_label
;
2321 if (task
== TASK_NULL
) {
2322 return KERN_INVALID_ARGUMENT
;
2325 if (exception_mask
& ~EXC_MASK_VALID
) {
2326 return KERN_INVALID_ARGUMENT
;
2329 if (IP_VALID(new_port
)) {
2330 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2331 case EXCEPTION_DEFAULT
:
2332 case EXCEPTION_STATE
:
2333 case EXCEPTION_STATE_IDENTITY
:
2337 return KERN_INVALID_ARGUMENT
;
2342 * Check the validity of the thread_state_flavor by calling the
2343 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2344 * osfmk/mach/ARCHITECTURE/thread_status.h
2346 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2347 return KERN_INVALID_ARGUMENT
;
2351 new_label
= mac_exc_create_label_for_current_proc();
2356 if (task
->itk_self
== IP_NULL
) {
2359 return KERN_FAILURE
;
2362 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2363 if ((exception_mask
& (1 << i
))
2365 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2368 old_port
[i
] = task
->exc_actions
[i
].port
;
2369 task
->exc_actions
[i
].port
=
2370 ipc_port_copy_send(new_port
);
2371 task
->exc_actions
[i
].behavior
= new_behavior
;
2372 task
->exc_actions
[i
].flavor
= new_flavor
;
2373 task
->exc_actions
[i
].privileged
= privileged
;
2375 old_port
[i
] = IP_NULL
;
2382 mac_exc_free_label(new_label
);
2385 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2386 if (IP_VALID(old_port
[i
])) {
2387 ipc_port_release_send(old_port
[i
]);
2391 if (IP_VALID(new_port
)) { /* consume send right */
2392 ipc_port_release_send(new_port
);
2395 return KERN_SUCCESS
;
2399 * Routine: thread/task_swap_exception_ports [kernel call]
2401 * Sets the thread/task exception port, flavor and
2402 * behavior for the exception types specified by the
2405 * The old ports, behavior and flavors are returned
2406 * Count specifies the array sizes on input and
2407 * the number of returned ports etc. on output. The
2408 * arrays must be large enough to hold all the returned
2409 * data, MIG returnes an error otherwise. The masks
2410 * array specifies the corresponding exception type(s).
2413 * Nothing locked. If successful, consumes
2414 * the supplied send right.
2416 * Returns upto [in} CountCnt elements.
2418 * KERN_SUCCESS Changed the special port.
2419 * KERN_INVALID_ARGUMENT The thread is null,
2420 * Illegal mask bit set.
2421 * Illegal exception behavior
2422 * KERN_FAILURE The thread is dead.
2426 thread_swap_exception_ports(
2428 exception_mask_t exception_mask
,
2429 ipc_port_t new_port
,
2430 exception_behavior_t new_behavior
,
2431 thread_state_flavor_t new_flavor
,
2432 exception_mask_array_t masks
,
2433 mach_msg_type_number_t
*CountCnt
,
2434 exception_port_array_t ports
,
2435 exception_behavior_array_t behaviors
,
2436 thread_state_flavor_array_t flavors
)
2438 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2439 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2440 unsigned int i
, j
, count
;
2443 struct label
*new_label
;
2446 if (thread
== THREAD_NULL
) {
2447 return KERN_INVALID_ARGUMENT
;
2450 if (exception_mask
& ~EXC_MASK_VALID
) {
2451 return KERN_INVALID_ARGUMENT
;
2454 if (IP_VALID(new_port
)) {
2455 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2456 case EXCEPTION_DEFAULT
:
2457 case EXCEPTION_STATE
:
2458 case EXCEPTION_STATE_IDENTITY
:
2462 return KERN_INVALID_ARGUMENT
;
2466 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2467 return KERN_INVALID_ARGUMENT
;
2471 new_label
= mac_exc_create_label_for_current_proc();
2474 thread_mtx_lock(thread
);
2476 if (!thread
->active
) {
2477 thread_mtx_unlock(thread
);
2479 return KERN_FAILURE
;
2482 if (thread
->exc_actions
== NULL
) {
2483 ipc_thread_init_exc_actions(thread
);
2486 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2487 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2488 if ((exception_mask
& (1 << i
))
2490 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2493 for (j
= 0; j
< count
; ++j
) {
2495 * search for an identical entry, if found
2496 * set corresponding mask for this exception.
2498 if (thread
->exc_actions
[i
].port
== ports
[j
] &&
2499 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2500 thread
->exc_actions
[i
].flavor
== flavors
[j
]) {
2501 masks
[j
] |= (1 << i
);
2507 masks
[j
] = (1 << i
);
2508 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2510 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2511 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2515 old_port
[i
] = thread
->exc_actions
[i
].port
;
2516 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2517 thread
->exc_actions
[i
].behavior
= new_behavior
;
2518 thread
->exc_actions
[i
].flavor
= new_flavor
;
2519 thread
->exc_actions
[i
].privileged
= privileged
;
2521 old_port
[i
] = IP_NULL
;
2525 thread_mtx_unlock(thread
);
2528 mac_exc_free_label(new_label
);
2531 while (--i
>= FIRST_EXCEPTION
) {
2532 if (IP_VALID(old_port
[i
])) {
2533 ipc_port_release_send(old_port
[i
]);
2537 if (IP_VALID(new_port
)) { /* consume send right */
2538 ipc_port_release_send(new_port
);
2543 return KERN_SUCCESS
;
2547 task_swap_exception_ports(
2549 exception_mask_t exception_mask
,
2550 ipc_port_t new_port
,
2551 exception_behavior_t new_behavior
,
2552 thread_state_flavor_t new_flavor
,
2553 exception_mask_array_t masks
,
2554 mach_msg_type_number_t
*CountCnt
,
2555 exception_port_array_t ports
,
2556 exception_behavior_array_t behaviors
,
2557 thread_state_flavor_array_t flavors
)
2559 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2560 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2561 unsigned int i
, j
, count
;
2564 struct label
*new_label
;
2567 if (task
== TASK_NULL
) {
2568 return KERN_INVALID_ARGUMENT
;
2571 if (exception_mask
& ~EXC_MASK_VALID
) {
2572 return KERN_INVALID_ARGUMENT
;
2575 if (IP_VALID(new_port
)) {
2576 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2577 case EXCEPTION_DEFAULT
:
2578 case EXCEPTION_STATE
:
2579 case EXCEPTION_STATE_IDENTITY
:
2583 return KERN_INVALID_ARGUMENT
;
2587 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
)) {
2588 return KERN_INVALID_ARGUMENT
;
2592 new_label
= mac_exc_create_label_for_current_proc();
2597 if (task
->itk_self
== IP_NULL
) {
2600 return KERN_FAILURE
;
2603 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2604 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2605 if ((exception_mask
& (1 << i
))
2607 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2610 for (j
= 0; j
< count
; j
++) {
2612 * search for an identical entry, if found
2613 * set corresponding mask for this exception.
2615 if (task
->exc_actions
[i
].port
== ports
[j
] &&
2616 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2617 task
->exc_actions
[i
].flavor
== flavors
[j
]) {
2618 masks
[j
] |= (1 << i
);
2624 masks
[j
] = (1 << i
);
2625 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2626 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2627 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2631 old_port
[i
] = task
->exc_actions
[i
].port
;
2633 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2634 task
->exc_actions
[i
].behavior
= new_behavior
;
2635 task
->exc_actions
[i
].flavor
= new_flavor
;
2636 task
->exc_actions
[i
].privileged
= privileged
;
2638 old_port
[i
] = IP_NULL
;
2645 mac_exc_free_label(new_label
);
2648 while (--i
>= FIRST_EXCEPTION
) {
2649 if (IP_VALID(old_port
[i
])) {
2650 ipc_port_release_send(old_port
[i
]);
2654 if (IP_VALID(new_port
)) { /* consume send right */
2655 ipc_port_release_send(new_port
);
2660 return KERN_SUCCESS
;
2664 * Routine: thread/task_get_exception_ports [kernel call]
2666 * Clones a send right for each of the thread/task's exception
2667 * ports specified in the mask and returns the behaviour
2668 * and flavor of said port.
2670 * Returns upto [in} CountCnt elements.
2675 * KERN_SUCCESS Extracted a send right.
2676 * KERN_INVALID_ARGUMENT The thread is null,
2677 * Invalid special port,
2678 * Illegal mask bit set.
2679 * KERN_FAILURE The thread is dead.
2683 thread_get_exception_ports(
2685 exception_mask_t exception_mask
,
2686 exception_mask_array_t masks
,
2687 mach_msg_type_number_t
*CountCnt
,
2688 exception_port_array_t ports
,
2689 exception_behavior_array_t behaviors
,
2690 thread_state_flavor_array_t flavors
)
2692 unsigned int i
, j
, count
;
2694 if (thread
== THREAD_NULL
) {
2695 return KERN_INVALID_ARGUMENT
;
2698 if (exception_mask
& ~EXC_MASK_VALID
) {
2699 return KERN_INVALID_ARGUMENT
;
2702 thread_mtx_lock(thread
);
2704 if (!thread
->active
) {
2705 thread_mtx_unlock(thread
);
2707 return KERN_FAILURE
;
2712 if (thread
->exc_actions
== NULL
) {
2716 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2717 if (exception_mask
& (1 << i
)) {
2718 for (j
= 0; j
< count
; ++j
) {
2720 * search for an identical entry, if found
2721 * set corresponding mask for this exception.
2723 if (thread
->exc_actions
[i
].port
== ports
[j
] &&
2724 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2725 thread
->exc_actions
[i
].flavor
== flavors
[j
]) {
2726 masks
[j
] |= (1 << i
);
2732 masks
[j
] = (1 << i
);
2733 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2734 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2735 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2737 if (count
>= *CountCnt
) {
2745 thread_mtx_unlock(thread
);
2749 return KERN_SUCCESS
;
2753 task_get_exception_ports(
2755 exception_mask_t exception_mask
,
2756 exception_mask_array_t masks
,
2757 mach_msg_type_number_t
*CountCnt
,
2758 exception_port_array_t ports
,
2759 exception_behavior_array_t behaviors
,
2760 thread_state_flavor_array_t flavors
)
2762 unsigned int i
, j
, count
;
2764 if (task
== TASK_NULL
) {
2765 return KERN_INVALID_ARGUMENT
;
2768 if (exception_mask
& ~EXC_MASK_VALID
) {
2769 return KERN_INVALID_ARGUMENT
;
2774 if (task
->itk_self
== IP_NULL
) {
2777 return KERN_FAILURE
;
2782 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2783 if (exception_mask
& (1 << i
)) {
2784 for (j
= 0; j
< count
; ++j
) {
2786 * search for an identical entry, if found
2787 * set corresponding mask for this exception.
2789 if (task
->exc_actions
[i
].port
== ports
[j
] &&
2790 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2791 task
->exc_actions
[i
].flavor
== flavors
[j
]) {
2792 masks
[j
] |= (1 << i
);
2798 masks
[j
] = (1 << i
);
2799 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2800 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2801 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2803 if (count
> *CountCnt
) {
2814 return KERN_SUCCESS
;