2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports
;
105 /* forward declarations */
106 task_t
convert_port_to_locked_task(ipc_port_t port
);
107 task_inspect_t
convert_port_to_locked_task_inspect(ipc_port_t port
);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port
);
109 static kern_return_t
ipc_port_unbind_special_reply_port(thread_t thread
, boolean_t unbind_active_port
);
110 kern_return_t
task_conversion_eval(task_t caller
, task_t victim
);
113 * Routine: ipc_task_init
115 * Initialize a task's IPC state.
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
135 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
136 if (kr
!= KERN_SUCCESS
)
137 panic("ipc_task_init");
139 space
->is_task
= task
;
141 kport
= ipc_port_alloc_kernel();
142 if (kport
== IP_NULL
)
143 panic("ipc_task_init");
145 nport
= ipc_port_alloc_kernel();
146 if (nport
== IP_NULL
)
147 panic("ipc_task_init");
150 task
->itk_self
= kport
;
151 task
->itk_nself
= nport
;
152 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
153 if (task_is_a_corpse_fork(task
)) {
155 * No sender's notification for corpse would not
156 * work with a naked send right in kernel.
158 task
->itk_sself
= IP_NULL
;
160 task
->itk_sself
= ipc_port_make_send(kport
);
162 task
->itk_debug_control
= IP_NULL
;
163 task
->itk_space
= space
;
166 task
->exc_actions
[0].label
= NULL
;
167 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
168 mac_exc_associate_action_label(&task
->exc_actions
[i
], mac_exc_create_label());
172 /* always zero-out the first (unused) array element */
174 bzero(&task
->exc_actions
[0], sizeof(task
->exc_actions
[0]));
175 if (parent
== TASK_NULL
) {
178 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
179 task
->exc_actions
[i
].port
= IP_NULL
;
180 task
->exc_actions
[i
].flavor
= 0;
181 task
->exc_actions
[i
].behavior
= 0;
182 task
->exc_actions
[i
].privileged
= FALSE
;
185 kr
= host_get_host_port(host_priv_self(), &port
);
186 assert(kr
== KERN_SUCCESS
);
187 task
->itk_host
= port
;
189 task
->itk_bootstrap
= IP_NULL
;
190 task
->itk_seatbelt
= IP_NULL
;
191 task
->itk_gssd
= IP_NULL
;
192 task
->itk_task_access
= IP_NULL
;
194 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
195 task
->itk_registered
[i
] = IP_NULL
;
198 assert(parent
->itk_self
!= IP_NULL
);
200 /* inherit registered ports */
202 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
203 task
->itk_registered
[i
] =
204 ipc_port_copy_send(parent
->itk_registered
[i
]);
206 /* inherit exception and bootstrap ports */
208 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
209 task
->exc_actions
[i
].port
=
210 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
211 task
->exc_actions
[i
].flavor
=
212 parent
->exc_actions
[i
].flavor
;
213 task
->exc_actions
[i
].behavior
=
214 parent
->exc_actions
[i
].behavior
;
215 task
->exc_actions
[i
].privileged
=
216 parent
->exc_actions
[i
].privileged
;
218 mac_exc_inherit_action_label(parent
->exc_actions
+ i
, task
->exc_actions
+ i
);
222 ipc_port_copy_send(parent
->itk_host
);
224 task
->itk_bootstrap
=
225 ipc_port_copy_send(parent
->itk_bootstrap
);
228 ipc_port_copy_send(parent
->itk_seatbelt
);
231 ipc_port_copy_send(parent
->itk_gssd
);
233 task
->itk_task_access
=
234 ipc_port_copy_send(parent
->itk_task_access
);
241 * Routine: ipc_task_enable
243 * Enable a task for IPC access.
256 kport
= task
->itk_self
;
257 if (kport
!= IP_NULL
)
258 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
259 nport
= task
->itk_nself
;
260 if (nport
!= IP_NULL
)
261 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
266 * Routine: ipc_task_disable
268 * Disable IPC access to a task.
282 kport
= task
->itk_self
;
283 if (kport
!= IP_NULL
)
284 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
285 nport
= task
->itk_nself
;
286 if (nport
!= IP_NULL
)
287 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
289 rport
= task
->itk_resume
;
290 if (rport
!= IP_NULL
) {
292 * From this point onwards this task is no longer accepting
295 * There are still outstanding suspensions on this task,
296 * even as it is being torn down. Disconnect the task
297 * from the rport, thereby "orphaning" the rport. The rport
298 * itself will go away only when the last suspension holder
299 * destroys his SO right to it -- when he either
300 * exits, or tries to actually use that last SO right to
301 * resume this (now non-existent) task.
303 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
309 * Routine: ipc_task_terminate
311 * Clean up and destroy a task's IPC state.
313 * Nothing locked. The task must be suspended.
314 * (Or the current thread must be in the task.)
327 kport
= task
->itk_self
;
329 if (kport
== IP_NULL
) {
330 /* the task is already terminated (can this happen?) */
334 task
->itk_self
= IP_NULL
;
336 nport
= task
->itk_nself
;
337 assert(nport
!= IP_NULL
);
338 task
->itk_nself
= IP_NULL
;
340 rport
= task
->itk_resume
;
341 task
->itk_resume
= IP_NULL
;
345 /* release the naked send rights */
347 if (IP_VALID(task
->itk_sself
))
348 ipc_port_release_send(task
->itk_sself
);
350 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
351 if (IP_VALID(task
->exc_actions
[i
].port
)) {
352 ipc_port_release_send(task
->exc_actions
[i
].port
);
355 mac_exc_free_action_label(task
->exc_actions
+ i
);
359 if (IP_VALID(task
->itk_host
))
360 ipc_port_release_send(task
->itk_host
);
362 if (IP_VALID(task
->itk_bootstrap
))
363 ipc_port_release_send(task
->itk_bootstrap
);
365 if (IP_VALID(task
->itk_seatbelt
))
366 ipc_port_release_send(task
->itk_seatbelt
);
368 if (IP_VALID(task
->itk_gssd
))
369 ipc_port_release_send(task
->itk_gssd
);
371 if (IP_VALID(task
->itk_task_access
))
372 ipc_port_release_send(task
->itk_task_access
);
374 if (IP_VALID(task
->itk_debug_control
))
375 ipc_port_release_send(task
->itk_debug_control
);
377 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
378 if (IP_VALID(task
->itk_registered
[i
]))
379 ipc_port_release_send(task
->itk_registered
[i
]);
381 /* destroy the kernel ports */
382 ipc_port_dealloc_kernel(kport
);
383 ipc_port_dealloc_kernel(nport
);
384 if (rport
!= IP_NULL
)
385 ipc_port_dealloc_kernel(rport
);
387 itk_lock_destroy(task
);
391 * Routine: ipc_task_reset
393 * Reset a task's IPC state to protect it when
394 * it enters an elevated security context. The
395 * task name port can remain the same - since
396 * it represents no specific privilege.
398 * Nothing locked. The task must be suspended.
399 * (Or the current thread must be in the task.)
406 ipc_port_t old_kport
, new_kport
;
407 ipc_port_t old_sself
;
408 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
412 /* Fresh label to unset credentials in existing labels. */
413 struct label
*unset_label
= mac_exc_create_label();
416 new_kport
= ipc_port_alloc_kernel();
417 if (new_kport
== IP_NULL
)
418 panic("ipc_task_reset");
422 old_kport
= task
->itk_self
;
424 if (old_kport
== IP_NULL
) {
425 /* the task is already terminated (can this happen?) */
427 ipc_port_dealloc_kernel(new_kport
);
429 mac_exc_free_label(unset_label
);
434 task
->itk_self
= new_kport
;
435 old_sself
= task
->itk_sself
;
436 task
->itk_sself
= ipc_port_make_send(new_kport
);
438 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
440 ipc_kobject_set_atomically(old_kport
, IKO_NULL
, IKOT_NONE
);
441 task
->exec_token
+= 1;
442 ip_unlock(old_kport
);
444 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
446 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
447 old_exc_actions
[i
] = IP_NULL
;
449 if (i
== EXC_CORPSE_NOTIFY
&& task_corpse_pending_report(task
)) {
453 if (!task
->exc_actions
[i
].privileged
) {
455 mac_exc_update_action_label(task
->exc_actions
+ i
, unset_label
);
457 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
458 task
->exc_actions
[i
].port
= IP_NULL
;
462 if (IP_VALID(task
->itk_debug_control
)) {
463 ipc_port_release_send(task
->itk_debug_control
);
465 task
->itk_debug_control
= IP_NULL
;
470 mac_exc_free_label(unset_label
);
473 /* release the naked send rights */
475 if (IP_VALID(old_sself
))
476 ipc_port_release_send(old_sself
);
478 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
479 if (IP_VALID(old_exc_actions
[i
])) {
480 ipc_port_release_send(old_exc_actions
[i
]);
484 /* destroy the kernel port */
485 ipc_port_dealloc_kernel(old_kport
);
489 * Routine: ipc_thread_init
491 * Initialize a thread's IPC state.
502 kport
= ipc_port_alloc_kernel();
503 if (kport
== IP_NULL
)
504 panic("ipc_thread_init");
506 thread
->ith_self
= kport
;
507 thread
->ith_sself
= ipc_port_make_send(kport
);
508 thread
->ith_special_reply_port
= NULL
;
509 thread
->exc_actions
= NULL
;
511 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
513 #if IMPORTANCE_INHERITANCE
514 thread
->ith_assertions
= 0;
517 ipc_kmsg_queue_init(&thread
->ith_messages
);
519 thread
->ith_rpc_reply
= IP_NULL
;
523 ipc_thread_init_exc_actions(
526 assert(thread
->exc_actions
== NULL
);
528 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
529 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
532 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
533 mac_exc_associate_action_label(thread
->exc_actions
+ i
, mac_exc_create_label());
539 ipc_thread_destroy_exc_actions(
542 if (thread
->exc_actions
!= NULL
) {
544 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
545 mac_exc_free_action_label(thread
->exc_actions
+ i
);
549 kfree(thread
->exc_actions
,
550 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
551 thread
->exc_actions
= NULL
;
559 ipc_port_t kport
= thread
->ith_self
;
561 if (kport
!= IP_NULL
)
562 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
566 * Routine: ipc_thread_terminate
568 * Clean up and destroy a thread's IPC state.
574 ipc_thread_terminate(
577 ipc_port_t kport
= thread
->ith_self
;
579 if (kport
!= IP_NULL
) {
582 if (IP_VALID(thread
->ith_sself
))
583 ipc_port_release_send(thread
->ith_sself
);
585 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
587 if (thread
->exc_actions
!= NULL
) {
588 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
589 if (IP_VALID(thread
->exc_actions
[i
].port
))
590 ipc_port_release_send(thread
->exc_actions
[i
].port
);
592 ipc_thread_destroy_exc_actions(thread
);
595 ipc_port_dealloc_kernel(kport
);
598 #if IMPORTANCE_INHERITANCE
599 assert(thread
->ith_assertions
== 0);
602 /* unbind the thread special reply port */
603 if (IP_VALID(thread
->ith_special_reply_port
)) {
604 ipc_port_unbind_special_reply_port(thread
, TRUE
);
607 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
609 if (thread
->ith_rpc_reply
!= IP_NULL
)
610 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
612 thread
->ith_rpc_reply
= IP_NULL
;
616 * Routine: ipc_thread_reset
618 * Reset the IPC state for a given Mach thread when
619 * its task enters an elevated security context.
620 * Both the thread port and its exception ports have
621 * to be reset. Its RPC reply port cannot have any
622 * rights outstanding, so it should be fine.
631 ipc_port_t old_kport
, new_kport
;
632 ipc_port_t old_sself
;
633 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
634 boolean_t has_old_exc_actions
= FALSE
;
638 struct label
*new_label
= mac_exc_create_label();
641 new_kport
= ipc_port_alloc_kernel();
642 if (new_kport
== IP_NULL
)
643 panic("ipc_task_reset");
645 thread_mtx_lock(thread
);
647 old_kport
= thread
->ith_self
;
649 if (old_kport
== IP_NULL
&& thread
->inspection
== FALSE
) {
650 /* the is already terminated (can this happen?) */
651 thread_mtx_unlock(thread
);
652 ipc_port_dealloc_kernel(new_kport
);
654 mac_exc_free_label(new_label
);
659 thread
->ith_self
= new_kport
;
660 old_sself
= thread
->ith_sself
;
661 thread
->ith_sself
= ipc_port_make_send(new_kport
);
662 if (old_kport
!= IP_NULL
) {
663 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
665 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
668 * Only ports that were set by root-owned processes
669 * (privileged ports) should survive
671 if (thread
->exc_actions
!= NULL
) {
672 has_old_exc_actions
= TRUE
;
673 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
674 if (thread
->exc_actions
[i
].privileged
) {
675 old_exc_actions
[i
] = IP_NULL
;
678 mac_exc_update_action_label(thread
->exc_actions
+ i
, new_label
);
680 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
681 thread
->exc_actions
[i
].port
= IP_NULL
;
686 thread_mtx_unlock(thread
);
689 mac_exc_free_label(new_label
);
692 /* release the naked send rights */
694 if (IP_VALID(old_sself
))
695 ipc_port_release_send(old_sself
);
697 if (has_old_exc_actions
) {
698 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
699 ipc_port_release_send(old_exc_actions
[i
]);
703 /* destroy the kernel port */
704 if (old_kport
!= IP_NULL
) {
705 ipc_port_dealloc_kernel(old_kport
);
708 /* unbind the thread special reply port */
709 if (IP_VALID(thread
->ith_special_reply_port
)) {
710 ipc_port_unbind_special_reply_port(thread
, TRUE
);
715 * Routine: retrieve_task_self_fast
717 * Optimized version of retrieve_task_self,
718 * that only works for the current task.
720 * Return a send right (possibly null/dead)
721 * for the task's user-visible self port.
727 retrieve_task_self_fast(
732 assert(task
== current_task());
735 assert(task
->itk_self
!= IP_NULL
);
737 if ((port
= task
->itk_sself
) == task
->itk_self
) {
741 assert(ip_active(port
));
746 port
= ipc_port_copy_send(port
);
753 * Routine: retrieve_thread_self_fast
755 * Return a send right (possibly null/dead)
756 * for the thread's user-visible self port.
758 * Only works for the current thread.
765 retrieve_thread_self_fast(
770 assert(thread
== current_thread());
772 thread_mtx_lock(thread
);
774 assert(thread
->ith_self
!= IP_NULL
);
776 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
780 assert(ip_active(port
));
786 port
= ipc_port_copy_send(port
);
788 thread_mtx_unlock(thread
);
794 * Routine: task_self_trap [mach trap]
796 * Give the caller send rights for his own task port.
800 * MACH_PORT_NULL if there are any resource failures
806 __unused
struct task_self_trap_args
*args
)
808 task_t task
= current_task();
810 mach_port_name_t name
;
812 sright
= retrieve_task_self_fast(task
);
813 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
818 * Routine: thread_self_trap [mach trap]
820 * Give the caller send rights for his own thread port.
824 * MACH_PORT_NULL if there are any resource failures
830 __unused
struct thread_self_trap_args
*args
)
832 thread_t thread
= current_thread();
833 task_t task
= thread
->task
;
835 mach_port_name_t name
;
837 sright
= retrieve_thread_self_fast(thread
);
838 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
844 * Routine: mach_reply_port [mach trap]
846 * Allocate a port for the caller.
850 * MACH_PORT_NULL if there are any resource failures
856 __unused
struct mach_reply_port_args
*args
)
859 mach_port_name_t name
;
862 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
863 if (kr
== KERN_SUCCESS
)
866 name
= MACH_PORT_NULL
;
871 * Routine: thread_get_special_reply_port [mach trap]
873 * Allocate a special reply port for the calling thread.
877 * mach_port_name_t: send right & receive right for special reply port.
878 * MACH_PORT_NULL if there are any resource failures
883 thread_get_special_reply_port(
884 __unused
struct thread_get_special_reply_port_args
*args
)
887 mach_port_name_t name
;
888 mach_port_name_t send_name
;
890 thread_t thread
= current_thread();
892 /* unbind the thread special reply port */
893 if (IP_VALID(thread
->ith_special_reply_port
)) {
894 kr
= ipc_port_unbind_special_reply_port(thread
, TRUE
);
895 if (kr
!= KERN_SUCCESS
) {
896 return MACH_PORT_NULL
;
900 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
901 if (kr
== KERN_SUCCESS
) {
902 ipc_port_bind_special_reply_port_locked(port
);
904 /* Make a send right and insert it in the space at specified name */
905 ipc_port_make_send_locked(port
);
907 send_name
= ipc_port_copyout_name_send(port
, current_task()->itk_space
, name
);
909 * If insertion of send right failed, userland is doing something bad, error out.
910 * The space was marked inactive or the receive right just inserted above at the
911 * given name was moved, in either case do not try to deallocate the receive right.
913 if (send_name
== MACH_PORT_NULL
|| send_name
== MACH_PORT_DEAD
) {
914 if (IP_VALID(thread
->ith_special_reply_port
)) {
915 ipc_port_unbind_special_reply_port(thread
, TRUE
);
917 name
= MACH_PORT_NULL
;
920 name
= MACH_PORT_NULL
;
926 * Routine: ipc_port_bind_special_reply_port_locked
928 * Bind the given port to current thread as a special reply port.
936 ipc_port_bind_special_reply_port_locked(
939 thread_t thread
= current_thread();
940 assert(thread
->ith_special_reply_port
== NULL
);
943 thread
->ith_special_reply_port
= port
;
944 port
->ip_specialreply
= 1;
945 port
->ip_sync_link_state
= PORT_SYNC_LINK_ANY
;
947 reset_ip_srp_bits(port
);
951 * Routine: ipc_port_unbind_special_reply_port
953 * Unbind the thread's special reply port.
954 * If the special port has threads waiting on turnstile,
955 * update it's inheritor.
962 ipc_port_unbind_special_reply_port(
964 boolean_t unbind_active_port
)
966 ipc_port_t special_reply_port
= thread
->ith_special_reply_port
;
968 ip_lock(special_reply_port
);
970 /* Return error if port active and unbind_active_port set to FALSE */
971 if (unbind_active_port
== FALSE
&& ip_active(special_reply_port
)) {
972 ip_unlock(special_reply_port
);
976 thread
->ith_special_reply_port
= NULL
;
977 ipc_port_adjust_special_reply_port_locked(special_reply_port
, NULL
,
978 IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY
, FALSE
);
981 ip_release(special_reply_port
);
986 * Routine: thread_get_special_port [kernel call]
988 * Clones a send right for one of the thread's
993 * KERN_SUCCESS Extracted a send right.
994 * KERN_INVALID_ARGUMENT The thread is null.
995 * KERN_FAILURE The thread is dead.
996 * KERN_INVALID_ARGUMENT Invalid special port.
1000 thread_get_special_port(
1005 kern_return_t result
= KERN_SUCCESS
;
1008 if (thread
== THREAD_NULL
)
1009 return (KERN_INVALID_ARGUMENT
);
1013 case THREAD_KERNEL_PORT
:
1014 whichp
= &thread
->ith_sself
;
1018 return (KERN_INVALID_ARGUMENT
);
1021 thread_mtx_lock(thread
);
1024 *portp
= ipc_port_copy_send(*whichp
);
1026 result
= KERN_FAILURE
;
1028 thread_mtx_unlock(thread
);
1034 * Routine: thread_set_special_port [kernel call]
1036 * Changes one of the thread's special ports,
1037 * setting it to the supplied send right.
1039 * Nothing locked. If successful, consumes
1040 * the supplied send right.
1042 * KERN_SUCCESS Changed the special port.
1043 * KERN_INVALID_ARGUMENT The thread is null.
1044 * KERN_FAILURE The thread is dead.
1045 * KERN_INVALID_ARGUMENT Invalid special port.
1049 thread_set_special_port(
1054 kern_return_t result
= KERN_SUCCESS
;
1055 ipc_port_t
*whichp
, old
= IP_NULL
;
1057 if (thread
== THREAD_NULL
)
1058 return (KERN_INVALID_ARGUMENT
);
1062 case THREAD_KERNEL_PORT
:
1063 whichp
= &thread
->ith_sself
;
1067 return (KERN_INVALID_ARGUMENT
);
1070 thread_mtx_lock(thread
);
1072 if (thread
->active
) {
1077 result
= KERN_FAILURE
;
1079 thread_mtx_unlock(thread
);
1082 ipc_port_release_send(old
);
1088 * Routine: task_get_special_port [kernel call]
1090 * Clones a send right for one of the task's
1095 * KERN_SUCCESS Extracted a send right.
1096 * KERN_INVALID_ARGUMENT The task is null.
1097 * KERN_FAILURE The task/space is dead.
1098 * KERN_INVALID_ARGUMENT Invalid special port.
1102 task_get_special_port(
1109 if (task
== TASK_NULL
)
1110 return KERN_INVALID_ARGUMENT
;
1113 if (task
->itk_self
== IP_NULL
) {
1115 return KERN_FAILURE
;
1119 case TASK_KERNEL_PORT
:
1120 port
= ipc_port_copy_send(task
->itk_sself
);
1123 case TASK_NAME_PORT
:
1124 port
= ipc_port_make_send(task
->itk_nself
);
1127 case TASK_HOST_PORT
:
1128 port
= ipc_port_copy_send(task
->itk_host
);
1131 case TASK_BOOTSTRAP_PORT
:
1132 port
= ipc_port_copy_send(task
->itk_bootstrap
);
1135 case TASK_SEATBELT_PORT
:
1136 port
= ipc_port_copy_send(task
->itk_seatbelt
);
1139 case TASK_ACCESS_PORT
:
1140 port
= ipc_port_copy_send(task
->itk_task_access
);
1143 case TASK_DEBUG_CONTROL_PORT
:
1144 port
= ipc_port_copy_send(task
->itk_debug_control
);
1149 return KERN_INVALID_ARGUMENT
;
1154 return KERN_SUCCESS
;
1158 * Routine: task_set_special_port [kernel call]
1160 * Changes one of the task's special ports,
1161 * setting it to the supplied send right.
1163 * Nothing locked. If successful, consumes
1164 * the supplied send right.
1166 * KERN_SUCCESS Changed the special port.
1167 * KERN_INVALID_ARGUMENT The task is null.
1168 * KERN_FAILURE The task/space is dead.
1169 * KERN_INVALID_ARGUMENT Invalid special port.
1170 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1174 task_set_special_port(
1182 if (task
== TASK_NULL
)
1183 return KERN_INVALID_ARGUMENT
;
1186 case TASK_KERNEL_PORT
:
1187 whichp
= &task
->itk_sself
;
1190 case TASK_HOST_PORT
:
1191 whichp
= &task
->itk_host
;
1194 case TASK_BOOTSTRAP_PORT
:
1195 whichp
= &task
->itk_bootstrap
;
1198 case TASK_SEATBELT_PORT
:
1199 whichp
= &task
->itk_seatbelt
;
1202 case TASK_ACCESS_PORT
:
1203 whichp
= &task
->itk_task_access
;
1206 case TASK_DEBUG_CONTROL_PORT
:
1207 whichp
= &task
->itk_debug_control
;
1211 return KERN_INVALID_ARGUMENT
;
1215 if (task
->itk_self
== IP_NULL
) {
1217 return KERN_FAILURE
;
1220 /* do not allow overwrite of seatbelt or task access ports */
1221 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
1222 && IP_VALID(*whichp
)) {
1224 return KERN_NO_ACCESS
;
1232 ipc_port_release_send(old
);
1233 return KERN_SUCCESS
;
1238 * Routine: mach_ports_register [kernel call]
1240 * Stash a handful of port send rights in the task.
1241 * Child tasks will inherit these rights, but they
1242 * must use mach_ports_lookup to acquire them.
1244 * The rights are supplied in a (wired) kalloc'd segment.
1245 * Rights which aren't supplied are assumed to be null.
1247 * Nothing locked. If successful, consumes
1248 * the supplied rights and memory.
1250 * KERN_SUCCESS Stashed the port rights.
1251 * KERN_INVALID_ARGUMENT The task is null.
1252 * KERN_INVALID_ARGUMENT The task is dead.
1253 * KERN_INVALID_ARGUMENT The memory param is null.
1254 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1258 mach_ports_register(
1260 mach_port_array_t memory
,
1261 mach_msg_type_number_t portsCnt
)
1263 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1266 if ((task
== TASK_NULL
) ||
1267 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1268 (portsCnt
&& memory
== NULL
))
1269 return KERN_INVALID_ARGUMENT
;
1272 * Pad the port rights with nulls.
1275 for (i
= 0; i
< portsCnt
; i
++)
1276 ports
[i
] = memory
[i
];
1277 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
1281 if (task
->itk_self
== IP_NULL
) {
1283 return KERN_INVALID_ARGUMENT
;
1287 * Replace the old send rights with the new.
1288 * Release the old rights after unlocking.
1291 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1294 old
= task
->itk_registered
[i
];
1295 task
->itk_registered
[i
] = ports
[i
];
1301 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1302 if (IP_VALID(ports
[i
]))
1303 ipc_port_release_send(ports
[i
]);
1306 * Now that the operation is known to be successful,
1307 * we can free the memory.
1312 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1314 return KERN_SUCCESS
;
1318 * Routine: mach_ports_lookup [kernel call]
1320 * Retrieves (clones) the stashed port send rights.
1322 * Nothing locked. If successful, the caller gets
1323 * rights and memory.
1325 * KERN_SUCCESS Retrieved the send rights.
1326 * KERN_INVALID_ARGUMENT The task is null.
1327 * KERN_INVALID_ARGUMENT The task is dead.
1328 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1334 mach_port_array_t
*portsp
,
1335 mach_msg_type_number_t
*portsCnt
)
1342 if (task
== TASK_NULL
)
1343 return KERN_INVALID_ARGUMENT
;
1345 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1347 memory
= kalloc(size
);
1349 return KERN_RESOURCE_SHORTAGE
;
1352 if (task
->itk_self
== IP_NULL
) {
1355 kfree(memory
, size
);
1356 return KERN_INVALID_ARGUMENT
;
1359 ports
= (ipc_port_t
*) memory
;
1362 * Clone port rights. Because kalloc'd memory
1363 * is wired, we won't fault while holding the task lock.
1366 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1367 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1371 *portsp
= (mach_port_array_t
) ports
;
1372 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1373 return KERN_SUCCESS
;
1377 task_conversion_eval(task_t caller
, task_t victim
)
1380 * Tasks are allowed to resolve their own task ports, and the kernel is
1381 * allowed to resolve anyone's task port.
1383 if (caller
== kernel_task
) {
1384 return KERN_SUCCESS
;
1387 if (caller
== victim
) {
1388 return KERN_SUCCESS
;
1392 * Only the kernel can can resolve the kernel's task port. We've established
1393 * by this point that the caller is not kernel_task.
1395 if (victim
== TASK_NULL
|| victim
== kernel_task
) {
1396 return KERN_INVALID_SECURITY
;
1401 * On embedded platforms, only a platform binary can resolve the task port
1402 * of another platform binary.
1404 if ((victim
->t_flags
& TF_PLATFORM
) && !(caller
->t_flags
& TF_PLATFORM
)) {
1406 return KERN_INVALID_SECURITY
;
1408 if (cs_relax_platform_task_ports
) {
1409 return KERN_SUCCESS
;
1411 return KERN_INVALID_SECURITY
;
1413 #endif /* SECURE_KERNEL */
1415 #endif /* CONFIG_EMBEDDED */
1417 return KERN_SUCCESS
;
1421 * Routine: convert_port_to_locked_task
1423 * Internal helper routine to convert from a port to a locked
1424 * task. Used by several routines that try to convert from a
1425 * task port to a reference on some task related object.
1427 * Nothing locked, blocking OK.
1430 convert_port_to_locked_task(ipc_port_t port
)
1432 int try_failed_count
= 0;
1434 while (IP_VALID(port
)) {
1435 task_t ct
= current_task();
1439 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1443 task
= (task_t
) port
->ip_kobject
;
1444 assert(task
!= TASK_NULL
);
1446 if (task_conversion_eval(ct
, task
)) {
1452 * Normal lock ordering puts task_lock() before ip_lock().
1453 * Attempt out-of-order locking here.
1455 if (task_lock_try(task
)) {
1462 mutex_pause(try_failed_count
);
1468 * Routine: convert_port_to_locked_task_inspect
1470 * Internal helper routine to convert from a port to a locked
1471 * task inspect right. Used by internal routines that try to convert from a
1472 * task inspect port to a reference on some task related object.
1474 * Nothing locked, blocking OK.
1477 convert_port_to_locked_task_inspect(ipc_port_t port
)
1479 int try_failed_count
= 0;
1481 while (IP_VALID(port
)) {
1482 task_inspect_t task
;
1485 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1487 return TASK_INSPECT_NULL
;
1489 task
= (task_inspect_t
)port
->ip_kobject
;
1490 assert(task
!= TASK_INSPECT_NULL
);
1492 * Normal lock ordering puts task_lock() before ip_lock().
1493 * Attempt out-of-order locking here.
1495 if (task_lock_try((task_t
)task
)) {
1502 mutex_pause(try_failed_count
);
1504 return TASK_INSPECT_NULL
;
1509 * Routine: convert_port_to_task
1511 * Convert from a port to a task.
1512 * Doesn't consume the port ref; produces a task ref,
1513 * which may be null.
1518 convert_port_to_task(
1521 return convert_port_to_task_with_exec_token(port
, NULL
);
1525 * Routine: convert_port_to_task_with_exec_token
1527 * Convert from a port to a task and return
1528 * the exec token stored in the task.
1529 * Doesn't consume the port ref; produces a task ref,
1530 * which may be null.
1535 convert_port_to_task_with_exec_token(
1537 uint32_t *exec_token
)
1539 task_t task
= TASK_NULL
;
1541 if (IP_VALID(port
)) {
1544 if ( ip_active(port
) &&
1545 ip_kotype(port
) == IKOT_TASK
) {
1546 task_t ct
= current_task();
1547 task
= (task_t
)port
->ip_kobject
;
1548 assert(task
!= TASK_NULL
);
1550 if (task_conversion_eval(ct
, task
)) {
1556 *exec_token
= task
->exec_token
;
1558 task_reference_internal(task
);
1568 * Routine: convert_port_to_task_name
1570 * Convert from a port to a task name.
1571 * Doesn't consume the port ref; produces a task name ref,
1572 * which may be null.
1577 convert_port_to_task_name(
1580 task_name_t task
= TASK_NULL
;
1582 if (IP_VALID(port
)) {
1585 if ( ip_active(port
) &&
1586 (ip_kotype(port
) == IKOT_TASK
||
1587 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1588 task
= (task_name_t
)port
->ip_kobject
;
1589 assert(task
!= TASK_NAME_NULL
);
1591 task_reference_internal(task
);
1601 * Routine: convert_port_to_task_inspect
1603 * Convert from a port to a task inspection right
1604 * Doesn't consume the port ref; produces a task ref,
1605 * which may be null.
1610 convert_port_to_task_inspect(
1613 task_inspect_t task
= TASK_INSPECT_NULL
;
1615 if (IP_VALID(port
)) {
1618 if (ip_active(port
) &&
1619 ip_kotype(port
) == IKOT_TASK
) {
1620 task
= (task_inspect_t
)port
->ip_kobject
;
1621 assert(task
!= TASK_INSPECT_NULL
);
1623 task_reference_internal(task
);
1633 * Routine: convert_port_to_task_suspension_token
1635 * Convert from a port to a task suspension token.
1636 * Doesn't consume the port ref; produces a suspension token ref,
1637 * which may be null.
1641 task_suspension_token_t
1642 convert_port_to_task_suspension_token(
1645 task_suspension_token_t task
= TASK_NULL
;
1647 if (IP_VALID(port
)) {
1650 if ( ip_active(port
) &&
1651 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1652 task
= (task_suspension_token_t
)port
->ip_kobject
;
1653 assert(task
!= TASK_NULL
);
1655 task_reference_internal(task
);
1665 * Routine: convert_port_to_space
1667 * Convert from a port to a space.
1668 * Doesn't consume the port ref; produces a space ref,
1669 * which may be null.
1674 convert_port_to_space(
1680 task
= convert_port_to_locked_task(port
);
1682 if (task
== TASK_NULL
)
1683 return IPC_SPACE_NULL
;
1685 if (!task
->active
) {
1687 return IPC_SPACE_NULL
;
1690 space
= task
->itk_space
;
1691 is_reference(space
);
1697 * Routine: convert_port_to_space_inspect
1699 * Convert from a port to a space inspect right.
1700 * Doesn't consume the port ref; produces a space inspect ref,
1701 * which may be null.
1706 convert_port_to_space_inspect(
1709 ipc_space_inspect_t space
;
1710 task_inspect_t task
;
1712 task
= convert_port_to_locked_task_inspect(port
);
1714 if (task
== TASK_INSPECT_NULL
)
1715 return IPC_SPACE_INSPECT_NULL
;
1717 if (!task
->active
) {
1719 return IPC_SPACE_INSPECT_NULL
;
1722 space
= (ipc_space_inspect_t
)task
->itk_space
;
1723 is_reference((ipc_space_t
)space
);
1724 task_unlock((task_t
)task
);
1729 * Routine: convert_port_to_map
1731 * Convert from a port to a map.
1732 * Doesn't consume the port ref; produces a map ref,
1733 * which may be null.
1739 convert_port_to_map(
1745 task
= convert_port_to_locked_task(port
);
1747 if (task
== TASK_NULL
)
1750 if (!task
->active
) {
1756 vm_map_reference_swap(map
);
1763 * Routine: convert_port_to_thread
1765 * Convert from a port to a thread.
1766 * Doesn't consume the port ref; produces an thread ref,
1767 * which may be null.
1773 convert_port_to_thread(
1776 thread_t thread
= THREAD_NULL
;
1778 if (IP_VALID(port
)) {
1781 if (ip_active(port
) &&
1782 ip_kotype(port
) == IKOT_THREAD
) {
1783 thread
= (thread_t
)port
->ip_kobject
;
1784 assert(thread
!= THREAD_NULL
);
1786 /* Use task conversion rules for thread control conversions */
1787 if (task_conversion_eval(current_task(), thread
->task
) != KERN_SUCCESS
) {
1792 thread_reference_internal(thread
);
1802 * Routine: convert_port_to_thread_inspect
1804 * Convert from a port to a thread inspection right
1805 * Doesn't consume the port ref; produces a thread ref,
1806 * which may be null.
1811 convert_port_to_thread_inspect(
1814 thread_inspect_t thread
= THREAD_INSPECT_NULL
;
1816 if (IP_VALID(port
)) {
1819 if (ip_active(port
) &&
1820 ip_kotype(port
) == IKOT_THREAD
) {
1821 thread
= (thread_inspect_t
)port
->ip_kobject
;
1822 assert(thread
!= THREAD_INSPECT_NULL
);
1823 thread_reference_internal((thread_t
)thread
);
1832 * Routine: convert_thread_inspect_to_port
1834 * Convert from a thread inspect reference to a port.
1835 * Consumes a thread ref;
1836 * As we never export thread inspect ports, always
1837 * creates a NULL port.
1843 convert_thread_inspect_to_port(thread_inspect_t thread
)
1845 thread_deallocate(thread
);
1851 * Routine: port_name_to_thread
1853 * Convert from a port name to an thread reference
1854 * A name of MACH_PORT_NULL is valid for the null thread.
1858 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1859 * We could avoid extra lock/unlock and extra ref operations on the port.
1862 port_name_to_thread(
1863 mach_port_name_t name
)
1865 thread_t thread
= THREAD_NULL
;
1868 if (MACH_PORT_VALID(name
)) {
1869 if (ipc_object_copyin(current_space(), name
,
1870 MACH_MSG_TYPE_COPY_SEND
,
1871 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1872 return (THREAD_NULL
);
1874 thread
= convert_port_to_thread(kport
);
1876 if (IP_VALID(kport
))
1877 ipc_port_release_send(kport
);
1885 mach_port_name_t name
)
1887 ipc_port_t kern_port
;
1889 task_t task
= TASK_NULL
;
1891 if (MACH_PORT_VALID(name
)) {
1892 kr
= ipc_object_copyin(current_space(), name
,
1893 MACH_MSG_TYPE_COPY_SEND
,
1894 (ipc_object_t
*) &kern_port
);
1895 if (kr
!= KERN_SUCCESS
)
1898 task
= convert_port_to_task(kern_port
);
1900 if (IP_VALID(kern_port
))
1901 ipc_port_release_send(kern_port
);
1907 port_name_to_task_inspect(
1908 mach_port_name_t name
)
1910 ipc_port_t kern_port
;
1912 task_inspect_t ti
= TASK_INSPECT_NULL
;
1914 if (MACH_PORT_VALID(name
)) {
1915 kr
= ipc_object_copyin(current_space(), name
,
1916 MACH_MSG_TYPE_COPY_SEND
,
1917 (ipc_object_t
*)&kern_port
);
1918 if (kr
!= KERN_SUCCESS
)
1921 ti
= convert_port_to_task_inspect(kern_port
);
1923 if (IP_VALID(kern_port
))
1924 ipc_port_release_send(kern_port
);
1930 * Routine: port_name_to_host
1932 * Convert from a port name to a host pointer.
1933 * NOTE: This does _not_ return a +1 reference to the host_t
1939 mach_port_name_t name
)
1942 host_t host
= HOST_NULL
;
1946 if (MACH_PORT_VALID(name
)) {
1947 kr
= ipc_port_translate_send(current_space(), name
, &port
);
1948 if (kr
== KERN_SUCCESS
) {
1949 host
= convert_port_to_host(port
);
1957 * Routine: convert_task_to_port
1959 * Convert from a task to a port.
1960 * Consumes a task ref; produces a naked send right
1961 * which may be invalid.
1967 convert_task_to_port(
1974 if (task
->itk_self
!= IP_NULL
)
1975 port
= ipc_port_make_send(task
->itk_self
);
1981 task_deallocate(task
);
1986 * Routine: convert_task_inspect_to_port
1988 * Convert from a task inspect reference to a port.
1989 * Consumes a task ref;
1990 * As we never export task inspect ports, always
1991 * creates a NULL port.
1996 convert_task_inspect_to_port(
1997 task_inspect_t task
)
1999 task_deallocate(task
);
2005 * Routine: convert_task_suspend_token_to_port
2007 * Convert from a task suspension token to a port.
2008 * Consumes a task suspension token ref; produces a naked send-once right
2009 * which may be invalid.
2014 convert_task_suspension_token_to_port(
2015 task_suspension_token_t task
)
2021 if (task
->itk_resume
== IP_NULL
) {
2022 task
->itk_resume
= ipc_port_alloc_kernel();
2023 if (!IP_VALID(task
->itk_resume
)) {
2024 panic("failed to create resume port");
2027 ipc_kobject_set(task
->itk_resume
, (ipc_kobject_t
) task
, IKOT_TASK_RESUME
);
2031 * Create a send-once right for each instance of a direct user-called
2032 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2033 * the notification handler will resume the target task.
2035 port
= ipc_port_make_sonce(task
->itk_resume
);
2036 assert(IP_VALID(port
));
2042 task_suspension_token_deallocate(task
);
2049 * Routine: convert_task_name_to_port
2051 * Convert from a task name ref to a port.
2052 * Consumes a task name ref; produces a naked send right
2053 * which may be invalid.
2059 convert_task_name_to_port(
2060 task_name_t task_name
)
2064 itk_lock(task_name
);
2065 if (task_name
->itk_nself
!= IP_NULL
)
2066 port
= ipc_port_make_send(task_name
->itk_nself
);
2069 itk_unlock(task_name
);
2071 task_name_deallocate(task_name
);
2076 * Routine: convert_thread_to_port
2078 * Convert from a thread to a port.
2079 * Consumes an thread ref; produces a naked send right
2080 * which may be invalid.
2086 convert_thread_to_port(
2091 thread_mtx_lock(thread
);
2093 if (thread
->ith_self
!= IP_NULL
)
2094 port
= ipc_port_make_send(thread
->ith_self
);
2098 thread_mtx_unlock(thread
);
2100 thread_deallocate(thread
);
2106 * Routine: space_deallocate
2108 * Deallocate a space ref produced by convert_port_to_space.
2117 if (space
!= IS_NULL
)
2122 * Routine: space_inspect_deallocate
2124 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2130 space_inspect_deallocate(
2131 ipc_space_inspect_t space
)
2133 if (space
!= IS_INSPECT_NULL
)
2134 is_release((ipc_space_t
)space
);
2138 * Routine: thread/task_set_exception_ports [kernel call]
2140 * Sets the thread/task exception port, flavor and
2141 * behavior for the exception types specified by the mask.
2142 * There will be one send right per exception per valid
2145 * Nothing locked. If successful, consumes
2146 * the supplied send right.
2148 * KERN_SUCCESS Changed the special port.
2149 * KERN_INVALID_ARGUMENT The thread is null,
2150 * Illegal mask bit set.
2151 * Illegal exception behavior
2152 * KERN_FAILURE The thread is dead.
2156 thread_set_exception_ports(
2158 exception_mask_t exception_mask
,
2159 ipc_port_t new_port
,
2160 exception_behavior_t new_behavior
,
2161 thread_state_flavor_t new_flavor
)
2163 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2164 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2168 struct label
*new_label
;
2171 if (thread
== THREAD_NULL
)
2172 return (KERN_INVALID_ARGUMENT
);
2174 if (exception_mask
& ~EXC_MASK_VALID
)
2175 return (KERN_INVALID_ARGUMENT
);
2177 if (IP_VALID(new_port
)) {
2178 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2180 case EXCEPTION_DEFAULT
:
2181 case EXCEPTION_STATE
:
2182 case EXCEPTION_STATE_IDENTITY
:
2186 return (KERN_INVALID_ARGUMENT
);
2191 * Check the validity of the thread_state_flavor by calling the
2192 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2193 * osfmk/mach/ARCHITECTURE/thread_status.h
2195 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2196 return (KERN_INVALID_ARGUMENT
);
2199 new_label
= mac_exc_create_label_for_current_proc();
2202 thread_mtx_lock(thread
);
2204 if (!thread
->active
) {
2205 thread_mtx_unlock(thread
);
2207 return (KERN_FAILURE
);
2210 if (thread
->exc_actions
== NULL
) {
2211 ipc_thread_init_exc_actions(thread
);
2213 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2214 if ((exception_mask
& (1 << i
))
2216 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2219 old_port
[i
] = thread
->exc_actions
[i
].port
;
2220 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2221 thread
->exc_actions
[i
].behavior
= new_behavior
;
2222 thread
->exc_actions
[i
].flavor
= new_flavor
;
2223 thread
->exc_actions
[i
].privileged
= privileged
;
2226 old_port
[i
] = IP_NULL
;
2229 thread_mtx_unlock(thread
);
2232 mac_exc_free_label(new_label
);
2235 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
2236 if (IP_VALID(old_port
[i
]))
2237 ipc_port_release_send(old_port
[i
]);
2239 if (IP_VALID(new_port
)) /* consume send right */
2240 ipc_port_release_send(new_port
);
2242 return (KERN_SUCCESS
);
2246 task_set_exception_ports(
2248 exception_mask_t exception_mask
,
2249 ipc_port_t new_port
,
2250 exception_behavior_t new_behavior
,
2251 thread_state_flavor_t new_flavor
)
2253 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2254 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2258 struct label
*new_label
;
2261 if (task
== TASK_NULL
)
2262 return (KERN_INVALID_ARGUMENT
);
2264 if (exception_mask
& ~EXC_MASK_VALID
)
2265 return (KERN_INVALID_ARGUMENT
);
2267 if (IP_VALID(new_port
)) {
2268 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2270 case EXCEPTION_DEFAULT
:
2271 case EXCEPTION_STATE
:
2272 case EXCEPTION_STATE_IDENTITY
:
2276 return (KERN_INVALID_ARGUMENT
);
2281 * Check the validity of the thread_state_flavor by calling the
2282 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2283 * osfmk/mach/ARCHITECTURE/thread_status.h
2285 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2286 return (KERN_INVALID_ARGUMENT
);
2289 new_label
= mac_exc_create_label_for_current_proc();
2294 if (task
->itk_self
== IP_NULL
) {
2297 return (KERN_FAILURE
);
2300 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2301 if ((exception_mask
& (1 << i
))
2303 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2306 old_port
[i
] = task
->exc_actions
[i
].port
;
2307 task
->exc_actions
[i
].port
=
2308 ipc_port_copy_send(new_port
);
2309 task
->exc_actions
[i
].behavior
= new_behavior
;
2310 task
->exc_actions
[i
].flavor
= new_flavor
;
2311 task
->exc_actions
[i
].privileged
= privileged
;
2314 old_port
[i
] = IP_NULL
;
2320 mac_exc_free_label(new_label
);
2323 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
2324 if (IP_VALID(old_port
[i
]))
2325 ipc_port_release_send(old_port
[i
]);
2327 if (IP_VALID(new_port
)) /* consume send right */
2328 ipc_port_release_send(new_port
);
2330 return (KERN_SUCCESS
);
2334 * Routine: thread/task_swap_exception_ports [kernel call]
2336 * Sets the thread/task exception port, flavor and
2337 * behavior for the exception types specified by the
2340 * The old ports, behavior and flavors are returned
2341 * Count specifies the array sizes on input and
2342 * the number of returned ports etc. on output. The
2343 * arrays must be large enough to hold all the returned
2344 * data, MIG returnes an error otherwise. The masks
2345 * array specifies the corresponding exception type(s).
2348 * Nothing locked. If successful, consumes
2349 * the supplied send right.
2351 * Returns upto [in} CountCnt elements.
2353 * KERN_SUCCESS Changed the special port.
2354 * KERN_INVALID_ARGUMENT The thread is null,
2355 * Illegal mask bit set.
2356 * Illegal exception behavior
2357 * KERN_FAILURE The thread is dead.
2361 thread_swap_exception_ports(
2363 exception_mask_t exception_mask
,
2364 ipc_port_t new_port
,
2365 exception_behavior_t new_behavior
,
2366 thread_state_flavor_t new_flavor
,
2367 exception_mask_array_t masks
,
2368 mach_msg_type_number_t
*CountCnt
,
2369 exception_port_array_t ports
,
2370 exception_behavior_array_t behaviors
,
2371 thread_state_flavor_array_t flavors
)
2373 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2374 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2375 unsigned int i
, j
, count
;
2378 struct label
*new_label
;
2381 if (thread
== THREAD_NULL
)
2382 return (KERN_INVALID_ARGUMENT
);
2384 if (exception_mask
& ~EXC_MASK_VALID
)
2385 return (KERN_INVALID_ARGUMENT
);
2387 if (IP_VALID(new_port
)) {
2388 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2390 case EXCEPTION_DEFAULT
:
2391 case EXCEPTION_STATE
:
2392 case EXCEPTION_STATE_IDENTITY
:
2396 return (KERN_INVALID_ARGUMENT
);
2400 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2401 return (KERN_INVALID_ARGUMENT
);
2404 new_label
= mac_exc_create_label_for_current_proc();
2407 thread_mtx_lock(thread
);
2409 if (!thread
->active
) {
2410 thread_mtx_unlock(thread
);
2412 return (KERN_FAILURE
);
2415 if (thread
->exc_actions
== NULL
) {
2416 ipc_thread_init_exc_actions(thread
);
2419 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2420 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2421 if ((exception_mask
& (1 << i
))
2423 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2426 for (j
= 0; j
< count
; ++j
) {
2428 * search for an identical entry, if found
2429 * set corresponding mask for this exception.
2431 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
2432 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2433 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2434 masks
[j
] |= (1 << i
);
2440 masks
[j
] = (1 << i
);
2441 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2443 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2444 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2448 old_port
[i
] = thread
->exc_actions
[i
].port
;
2449 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2450 thread
->exc_actions
[i
].behavior
= new_behavior
;
2451 thread
->exc_actions
[i
].flavor
= new_flavor
;
2452 thread
->exc_actions
[i
].privileged
= privileged
;
2455 old_port
[i
] = IP_NULL
;
2458 thread_mtx_unlock(thread
);
2461 mac_exc_free_label(new_label
);
2464 while (--i
>= FIRST_EXCEPTION
) {
2465 if (IP_VALID(old_port
[i
]))
2466 ipc_port_release_send(old_port
[i
]);
2469 if (IP_VALID(new_port
)) /* consume send right */
2470 ipc_port_release_send(new_port
);
2474 return (KERN_SUCCESS
);
2478 task_swap_exception_ports(
2480 exception_mask_t exception_mask
,
2481 ipc_port_t new_port
,
2482 exception_behavior_t new_behavior
,
2483 thread_state_flavor_t new_flavor
,
2484 exception_mask_array_t masks
,
2485 mach_msg_type_number_t
*CountCnt
,
2486 exception_port_array_t ports
,
2487 exception_behavior_array_t behaviors
,
2488 thread_state_flavor_array_t flavors
)
2490 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2491 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2492 unsigned int i
, j
, count
;
2495 struct label
*new_label
;
2498 if (task
== TASK_NULL
)
2499 return (KERN_INVALID_ARGUMENT
);
2501 if (exception_mask
& ~EXC_MASK_VALID
)
2502 return (KERN_INVALID_ARGUMENT
);
2504 if (IP_VALID(new_port
)) {
2505 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2507 case EXCEPTION_DEFAULT
:
2508 case EXCEPTION_STATE
:
2509 case EXCEPTION_STATE_IDENTITY
:
2513 return (KERN_INVALID_ARGUMENT
);
2517 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2518 return (KERN_INVALID_ARGUMENT
);
2521 new_label
= mac_exc_create_label_for_current_proc();
2526 if (task
->itk_self
== IP_NULL
) {
2529 return (KERN_FAILURE
);
2532 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2533 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2534 if ((exception_mask
& (1 << i
))
2536 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2539 for (j
= 0; j
< count
; j
++) {
2541 * search for an identical entry, if found
2542 * set corresponding mask for this exception.
2544 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
2545 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2546 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2547 masks
[j
] |= (1 << i
);
2553 masks
[j
] = (1 << i
);
2554 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2555 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2556 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2560 old_port
[i
] = task
->exc_actions
[i
].port
;
2562 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2563 task
->exc_actions
[i
].behavior
= new_behavior
;
2564 task
->exc_actions
[i
].flavor
= new_flavor
;
2565 task
->exc_actions
[i
].privileged
= privileged
;
2568 old_port
[i
] = IP_NULL
;
2574 mac_exc_free_label(new_label
);
2577 while (--i
>= FIRST_EXCEPTION
) {
2578 if (IP_VALID(old_port
[i
]))
2579 ipc_port_release_send(old_port
[i
]);
2582 if (IP_VALID(new_port
)) /* consume send right */
2583 ipc_port_release_send(new_port
);
2587 return (KERN_SUCCESS
);
2591 * Routine: thread/task_get_exception_ports [kernel call]
2593 * Clones a send right for each of the thread/task's exception
2594 * ports specified in the mask and returns the behaviour
2595 * and flavor of said port.
2597 * Returns upto [in} CountCnt elements.
2602 * KERN_SUCCESS Extracted a send right.
2603 * KERN_INVALID_ARGUMENT The thread is null,
2604 * Invalid special port,
2605 * Illegal mask bit set.
2606 * KERN_FAILURE The thread is dead.
2610 thread_get_exception_ports(
2612 exception_mask_t exception_mask
,
2613 exception_mask_array_t masks
,
2614 mach_msg_type_number_t
*CountCnt
,
2615 exception_port_array_t ports
,
2616 exception_behavior_array_t behaviors
,
2617 thread_state_flavor_array_t flavors
)
2619 unsigned int i
, j
, count
;
2621 if (thread
== THREAD_NULL
)
2622 return (KERN_INVALID_ARGUMENT
);
2624 if (exception_mask
& ~EXC_MASK_VALID
)
2625 return (KERN_INVALID_ARGUMENT
);
2627 thread_mtx_lock(thread
);
2629 if (!thread
->active
) {
2630 thread_mtx_unlock(thread
);
2632 return (KERN_FAILURE
);
2637 if (thread
->exc_actions
== NULL
) {
2641 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2642 if (exception_mask
& (1 << i
)) {
2643 for (j
= 0; j
< count
; ++j
) {
2645 * search for an identical entry, if found
2646 * set corresponding mask for this exception.
2648 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
2649 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
2650 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2651 masks
[j
] |= (1 << i
);
2657 masks
[j
] = (1 << i
);
2658 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2659 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2660 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2662 if (count
>= *CountCnt
)
2669 thread_mtx_unlock(thread
);
2673 return (KERN_SUCCESS
);
2677 task_get_exception_ports(
2679 exception_mask_t exception_mask
,
2680 exception_mask_array_t masks
,
2681 mach_msg_type_number_t
*CountCnt
,
2682 exception_port_array_t ports
,
2683 exception_behavior_array_t behaviors
,
2684 thread_state_flavor_array_t flavors
)
2686 unsigned int i
, j
, count
;
2688 if (task
== TASK_NULL
)
2689 return (KERN_INVALID_ARGUMENT
);
2691 if (exception_mask
& ~EXC_MASK_VALID
)
2692 return (KERN_INVALID_ARGUMENT
);
2696 if (task
->itk_self
== IP_NULL
) {
2699 return (KERN_FAILURE
);
2704 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2705 if (exception_mask
& (1 << i
)) {
2706 for (j
= 0; j
< count
; ++j
) {
2708 * search for an identical entry, if found
2709 * set corresponding mask for this exception.
2711 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
2712 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2713 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2714 masks
[j
] |= (1 << i
);
2720 masks
[j
] = (1 << i
);
2721 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2722 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2723 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2725 if (count
> *CountCnt
)
2735 return (KERN_SUCCESS
);