2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports
;
105 /* forward declarations */
106 task_t
convert_port_to_locked_task(ipc_port_t port
);
107 task_inspect_t
convert_port_to_locked_task_inspect(ipc_port_t port
);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port
);
109 static kern_return_t
ipc_port_unbind_special_reply_port(thread_t thread
, boolean_t unbind_active_port
);
110 kern_return_t
task_conversion_eval(task_t caller
, task_t victim
);
113 * Routine: ipc_task_init
115 * Initialize a task's IPC state.
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
135 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
136 if (kr
!= KERN_SUCCESS
)
137 panic("ipc_task_init");
139 space
->is_task
= task
;
141 kport
= ipc_port_alloc_kernel();
142 if (kport
== IP_NULL
)
143 panic("ipc_task_init");
145 nport
= ipc_port_alloc_kernel();
146 if (nport
== IP_NULL
)
147 panic("ipc_task_init");
150 task
->itk_self
= kport
;
151 task
->itk_nself
= nport
;
152 task
->itk_resume
= IP_NULL
; /* Lazily allocated on-demand */
153 if (task_is_a_corpse_fork(task
)) {
155 * No sender's notification for corpse would not
156 * work with a naked send right in kernel.
158 task
->itk_sself
= IP_NULL
;
160 task
->itk_sself
= ipc_port_make_send(kport
);
162 task
->itk_debug_control
= IP_NULL
;
163 task
->itk_space
= space
;
166 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
167 mac_exc_associate_action_label(&task
->exc_actions
[i
], mac_exc_create_label());
171 if (parent
== TASK_NULL
) {
174 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
175 task
->exc_actions
[i
].port
= IP_NULL
;
178 kr
= host_get_host_port(host_priv_self(), &port
);
179 assert(kr
== KERN_SUCCESS
);
180 task
->itk_host
= port
;
182 task
->itk_bootstrap
= IP_NULL
;
183 task
->itk_seatbelt
= IP_NULL
;
184 task
->itk_gssd
= IP_NULL
;
185 task
->itk_task_access
= IP_NULL
;
187 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
188 task
->itk_registered
[i
] = IP_NULL
;
191 assert(parent
->itk_self
!= IP_NULL
);
193 /* inherit registered ports */
195 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
196 task
->itk_registered
[i
] =
197 ipc_port_copy_send(parent
->itk_registered
[i
]);
199 /* inherit exception and bootstrap ports */
201 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
202 task
->exc_actions
[i
].port
=
203 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
204 task
->exc_actions
[i
].flavor
=
205 parent
->exc_actions
[i
].flavor
;
206 task
->exc_actions
[i
].behavior
=
207 parent
->exc_actions
[i
].behavior
;
208 task
->exc_actions
[i
].privileged
=
209 parent
->exc_actions
[i
].privileged
;
211 mac_exc_inherit_action_label(parent
->exc_actions
+ i
, task
->exc_actions
+ i
);
215 ipc_port_copy_send(parent
->itk_host
);
217 task
->itk_bootstrap
=
218 ipc_port_copy_send(parent
->itk_bootstrap
);
221 ipc_port_copy_send(parent
->itk_seatbelt
);
224 ipc_port_copy_send(parent
->itk_gssd
);
226 task
->itk_task_access
=
227 ipc_port_copy_send(parent
->itk_task_access
);
234 * Routine: ipc_task_enable
236 * Enable a task for IPC access.
249 kport
= task
->itk_self
;
250 if (kport
!= IP_NULL
)
251 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
252 nport
= task
->itk_nself
;
253 if (nport
!= IP_NULL
)
254 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
259 * Routine: ipc_task_disable
261 * Disable IPC access to a task.
275 kport
= task
->itk_self
;
276 if (kport
!= IP_NULL
)
277 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
278 nport
= task
->itk_nself
;
279 if (nport
!= IP_NULL
)
280 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
282 rport
= task
->itk_resume
;
283 if (rport
!= IP_NULL
) {
285 * From this point onwards this task is no longer accepting
288 * There are still outstanding suspensions on this task,
289 * even as it is being torn down. Disconnect the task
290 * from the rport, thereby "orphaning" the rport. The rport
291 * itself will go away only when the last suspension holder
292 * destroys his SO right to it -- when he either
293 * exits, or tries to actually use that last SO right to
294 * resume this (now non-existent) task.
296 ipc_kobject_set(rport
, IKO_NULL
, IKOT_NONE
);
302 * Routine: ipc_task_terminate
304 * Clean up and destroy a task's IPC state.
306 * Nothing locked. The task must be suspended.
307 * (Or the current thread must be in the task.)
320 kport
= task
->itk_self
;
322 if (kport
== IP_NULL
) {
323 /* the task is already terminated (can this happen?) */
327 task
->itk_self
= IP_NULL
;
329 nport
= task
->itk_nself
;
330 assert(nport
!= IP_NULL
);
331 task
->itk_nself
= IP_NULL
;
333 rport
= task
->itk_resume
;
334 task
->itk_resume
= IP_NULL
;
338 /* release the naked send rights */
340 if (IP_VALID(task
->itk_sself
))
341 ipc_port_release_send(task
->itk_sself
);
343 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
344 if (IP_VALID(task
->exc_actions
[i
].port
)) {
345 ipc_port_release_send(task
->exc_actions
[i
].port
);
348 mac_exc_free_action_label(task
->exc_actions
+ i
);
352 if (IP_VALID(task
->itk_host
))
353 ipc_port_release_send(task
->itk_host
);
355 if (IP_VALID(task
->itk_bootstrap
))
356 ipc_port_release_send(task
->itk_bootstrap
);
358 if (IP_VALID(task
->itk_seatbelt
))
359 ipc_port_release_send(task
->itk_seatbelt
);
361 if (IP_VALID(task
->itk_gssd
))
362 ipc_port_release_send(task
->itk_gssd
);
364 if (IP_VALID(task
->itk_task_access
))
365 ipc_port_release_send(task
->itk_task_access
);
367 if (IP_VALID(task
->itk_debug_control
))
368 ipc_port_release_send(task
->itk_debug_control
);
370 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
371 if (IP_VALID(task
->itk_registered
[i
]))
372 ipc_port_release_send(task
->itk_registered
[i
]);
374 /* destroy the kernel ports */
375 ipc_port_dealloc_kernel(kport
);
376 ipc_port_dealloc_kernel(nport
);
377 if (rport
!= IP_NULL
)
378 ipc_port_dealloc_kernel(rport
);
380 itk_lock_destroy(task
);
384 * Routine: ipc_task_reset
386 * Reset a task's IPC state to protect it when
387 * it enters an elevated security context. The
388 * task name port can remain the same - since
389 * it represents no specific privilege.
391 * Nothing locked. The task must be suspended.
392 * (Or the current thread must be in the task.)
399 ipc_port_t old_kport
, new_kport
;
400 ipc_port_t old_sself
;
401 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
405 /* Fresh label to unset credentials in existing labels. */
406 struct label
*unset_label
= mac_exc_create_label();
409 new_kport
= ipc_port_alloc_kernel();
410 if (new_kport
== IP_NULL
)
411 panic("ipc_task_reset");
415 old_kport
= task
->itk_self
;
417 if (old_kport
== IP_NULL
) {
418 /* the task is already terminated (can this happen?) */
420 ipc_port_dealloc_kernel(new_kport
);
422 mac_exc_free_label(unset_label
);
427 task
->itk_self
= new_kport
;
428 old_sself
= task
->itk_sself
;
429 task
->itk_sself
= ipc_port_make_send(new_kport
);
431 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
433 ipc_kobject_set_atomically(old_kport
, IKO_NULL
, IKOT_NONE
);
434 task
->exec_token
+= 1;
435 ip_unlock(old_kport
);
437 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
439 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
440 old_exc_actions
[i
] = IP_NULL
;
442 if (i
== EXC_CORPSE_NOTIFY
&& task_corpse_pending_report(task
)) {
446 if (!task
->exc_actions
[i
].privileged
) {
448 mac_exc_update_action_label(task
->exc_actions
+ i
, unset_label
);
450 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
451 task
->exc_actions
[i
].port
= IP_NULL
;
455 if (IP_VALID(task
->itk_debug_control
)) {
456 ipc_port_release_send(task
->itk_debug_control
);
458 task
->itk_debug_control
= IP_NULL
;
463 mac_exc_free_label(unset_label
);
466 /* release the naked send rights */
468 if (IP_VALID(old_sself
))
469 ipc_port_release_send(old_sself
);
471 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
472 if (IP_VALID(old_exc_actions
[i
])) {
473 ipc_port_release_send(old_exc_actions
[i
]);
477 /* destroy the kernel port */
478 ipc_port_dealloc_kernel(old_kport
);
482 * Routine: ipc_thread_init
484 * Initialize a thread's IPC state.
495 kport
= ipc_port_alloc_kernel();
496 if (kport
== IP_NULL
)
497 panic("ipc_thread_init");
499 thread
->ith_self
= kport
;
500 thread
->ith_sself
= ipc_port_make_send(kport
);
501 thread
->ith_special_reply_port
= NULL
;
502 thread
->exc_actions
= NULL
;
504 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
506 #if IMPORTANCE_INHERITANCE
507 thread
->ith_assertions
= 0;
510 ipc_kmsg_queue_init(&thread
->ith_messages
);
512 thread
->ith_rpc_reply
= IP_NULL
;
516 ipc_thread_init_exc_actions(
519 assert(thread
->exc_actions
== NULL
);
521 thread
->exc_actions
= kalloc(sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
522 bzero(thread
->exc_actions
, sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
525 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
526 mac_exc_associate_action_label(thread
->exc_actions
+ i
, mac_exc_create_label());
532 ipc_thread_destroy_exc_actions(
535 if (thread
->exc_actions
!= NULL
) {
537 for (size_t i
= 0; i
< EXC_TYPES_COUNT
; ++i
) {
538 mac_exc_free_action_label(thread
->exc_actions
+ i
);
542 kfree(thread
->exc_actions
,
543 sizeof(struct exception_action
) * EXC_TYPES_COUNT
);
544 thread
->exc_actions
= NULL
;
552 ipc_port_t kport
= thread
->ith_self
;
554 if (kport
!= IP_NULL
)
555 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
559 * Routine: ipc_thread_terminate
561 * Clean up and destroy a thread's IPC state.
567 ipc_thread_terminate(
570 ipc_port_t kport
= thread
->ith_self
;
572 if (kport
!= IP_NULL
) {
575 if (IP_VALID(thread
->ith_sself
))
576 ipc_port_release_send(thread
->ith_sself
);
578 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
580 if (thread
->exc_actions
!= NULL
) {
581 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
582 if (IP_VALID(thread
->exc_actions
[i
].port
))
583 ipc_port_release_send(thread
->exc_actions
[i
].port
);
585 ipc_thread_destroy_exc_actions(thread
);
588 ipc_port_dealloc_kernel(kport
);
591 #if IMPORTANCE_INHERITANCE
592 assert(thread
->ith_assertions
== 0);
595 /* unbind the thread special reply port */
596 if (IP_VALID(thread
->ith_special_reply_port
)) {
597 ipc_port_unbind_special_reply_port(thread
, TRUE
);
600 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
602 if (thread
->ith_rpc_reply
!= IP_NULL
)
603 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
605 thread
->ith_rpc_reply
= IP_NULL
;
609 * Routine: ipc_thread_reset
611 * Reset the IPC state for a given Mach thread when
612 * its task enters an elevated security context.
613 * Both the thread port and its exception ports have
614 * to be reset. Its RPC reply port cannot have any
615 * rights outstanding, so it should be fine.
624 ipc_port_t old_kport
, new_kport
;
625 ipc_port_t old_sself
;
626 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
627 boolean_t has_old_exc_actions
= FALSE
;
631 struct label
*new_label
= mac_exc_create_label();
634 new_kport
= ipc_port_alloc_kernel();
635 if (new_kport
== IP_NULL
)
636 panic("ipc_task_reset");
638 thread_mtx_lock(thread
);
640 old_kport
= thread
->ith_self
;
642 if (old_kport
== IP_NULL
&& thread
->inspection
== FALSE
) {
643 /* the is already terminated (can this happen?) */
644 thread_mtx_unlock(thread
);
645 ipc_port_dealloc_kernel(new_kport
);
647 mac_exc_free_label(new_label
);
652 thread
->ith_self
= new_kport
;
653 old_sself
= thread
->ith_sself
;
654 thread
->ith_sself
= ipc_port_make_send(new_kport
);
655 if (old_kport
!= IP_NULL
) {
656 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
658 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
661 * Only ports that were set by root-owned processes
662 * (privileged ports) should survive
664 if (thread
->exc_actions
!= NULL
) {
665 has_old_exc_actions
= TRUE
;
666 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
667 if (thread
->exc_actions
[i
].privileged
) {
668 old_exc_actions
[i
] = IP_NULL
;
671 mac_exc_update_action_label(thread
->exc_actions
+ i
, new_label
);
673 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
674 thread
->exc_actions
[i
].port
= IP_NULL
;
679 thread_mtx_unlock(thread
);
682 mac_exc_free_label(new_label
);
685 /* release the naked send rights */
687 if (IP_VALID(old_sself
))
688 ipc_port_release_send(old_sself
);
690 if (has_old_exc_actions
) {
691 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
692 ipc_port_release_send(old_exc_actions
[i
]);
696 /* destroy the kernel port */
697 if (old_kport
!= IP_NULL
) {
698 ipc_port_dealloc_kernel(old_kport
);
701 /* unbind the thread special reply port */
702 if (IP_VALID(thread
->ith_special_reply_port
)) {
703 ipc_port_unbind_special_reply_port(thread
, TRUE
);
708 * Routine: retrieve_task_self_fast
710 * Optimized version of retrieve_task_self,
711 * that only works for the current task.
713 * Return a send right (possibly null/dead)
714 * for the task's user-visible self port.
720 retrieve_task_self_fast(
725 assert(task
== current_task());
728 assert(task
->itk_self
!= IP_NULL
);
730 if ((port
= task
->itk_sself
) == task
->itk_self
) {
734 assert(ip_active(port
));
739 port
= ipc_port_copy_send(port
);
746 * Routine: retrieve_thread_self_fast
748 * Return a send right (possibly null/dead)
749 * for the thread's user-visible self port.
751 * Only works for the current thread.
758 retrieve_thread_self_fast(
763 assert(thread
== current_thread());
765 thread_mtx_lock(thread
);
767 assert(thread
->ith_self
!= IP_NULL
);
769 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
773 assert(ip_active(port
));
779 port
= ipc_port_copy_send(port
);
781 thread_mtx_unlock(thread
);
787 * Routine: task_self_trap [mach trap]
789 * Give the caller send rights for his own task port.
793 * MACH_PORT_NULL if there are any resource failures
799 __unused
struct task_self_trap_args
*args
)
801 task_t task
= current_task();
803 mach_port_name_t name
;
805 sright
= retrieve_task_self_fast(task
);
806 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
811 * Routine: thread_self_trap [mach trap]
813 * Give the caller send rights for his own thread port.
817 * MACH_PORT_NULL if there are any resource failures
823 __unused
struct thread_self_trap_args
*args
)
825 thread_t thread
= current_thread();
826 task_t task
= thread
->task
;
828 mach_port_name_t name
;
830 sright
= retrieve_thread_self_fast(thread
);
831 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
837 * Routine: mach_reply_port [mach trap]
839 * Allocate a port for the caller.
843 * MACH_PORT_NULL if there are any resource failures
849 __unused
struct mach_reply_port_args
*args
)
852 mach_port_name_t name
;
855 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
856 if (kr
== KERN_SUCCESS
)
859 name
= MACH_PORT_NULL
;
864 * Routine: thread_get_special_reply_port [mach trap]
866 * Allocate a special reply port for the calling thread.
870 * mach_port_name_t: send right & receive right for special reply port.
871 * MACH_PORT_NULL if there are any resource failures
876 thread_get_special_reply_port(
877 __unused
struct thread_get_special_reply_port_args
*args
)
880 mach_port_name_t name
;
881 mach_port_name_t send_name
;
883 thread_t thread
= current_thread();
885 /* unbind the thread special reply port */
886 if (IP_VALID(thread
->ith_special_reply_port
)) {
887 kr
= ipc_port_unbind_special_reply_port(thread
, TRUE
);
888 if (kr
!= KERN_SUCCESS
) {
889 return MACH_PORT_NULL
;
893 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
894 if (kr
== KERN_SUCCESS
) {
895 ipc_port_bind_special_reply_port_locked(port
);
897 /* Make a send right and insert it in the space at specified name */
898 ipc_port_make_send_locked(port
);
900 send_name
= ipc_port_copyout_name_send(port
, current_task()->itk_space
, name
);
902 * If insertion of send right failed, userland is doing something bad, error out.
903 * The space was marked inactive or the receive right just inserted above at the
904 * given name was moved, in either case do not try to deallocate the receive right.
906 if (send_name
== MACH_PORT_NULL
|| send_name
== MACH_PORT_DEAD
) {
907 if (IP_VALID(thread
->ith_special_reply_port
)) {
908 ipc_port_unbind_special_reply_port(thread
, TRUE
);
910 name
= MACH_PORT_NULL
;
913 name
= MACH_PORT_NULL
;
919 * Routine: ipc_port_bind_special_reply_port_locked
921 * Bind the given port to current thread as a special reply port.
929 ipc_port_bind_special_reply_port_locked(
932 thread_t thread
= current_thread();
933 assert(thread
->ith_special_reply_port
== NULL
);
936 thread
->ith_special_reply_port
= port
;
937 port
->ip_specialreply
= 1;
938 port
->ip_sync_link_state
= PORT_SYNC_LINK_ANY
;
940 reset_ip_srp_bits(port
);
944 * Routine: ipc_port_unbind_special_reply_port
946 * Unbind the thread's special reply port.
947 * If the special port has threads waiting on turnstile,
948 * update it's inheritor.
955 ipc_port_unbind_special_reply_port(
957 boolean_t unbind_active_port
)
959 ipc_port_t special_reply_port
= thread
->ith_special_reply_port
;
961 ip_lock(special_reply_port
);
963 /* Return error if port active and unbind_active_port set to FALSE */
964 if (unbind_active_port
== FALSE
&& ip_active(special_reply_port
)) {
965 ip_unlock(special_reply_port
);
969 thread
->ith_special_reply_port
= NULL
;
970 ipc_port_adjust_special_reply_port_locked(special_reply_port
, NULL
,
971 IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY
, FALSE
);
974 ip_release(special_reply_port
);
979 * Routine: thread_get_special_port [kernel call]
981 * Clones a send right for one of the thread's
986 * KERN_SUCCESS Extracted a send right.
987 * KERN_INVALID_ARGUMENT The thread is null.
988 * KERN_FAILURE The thread is dead.
989 * KERN_INVALID_ARGUMENT Invalid special port.
993 thread_get_special_port(
998 kern_return_t result
= KERN_SUCCESS
;
1001 if (thread
== THREAD_NULL
)
1002 return (KERN_INVALID_ARGUMENT
);
1006 case THREAD_KERNEL_PORT
:
1007 whichp
= &thread
->ith_sself
;
1011 return (KERN_INVALID_ARGUMENT
);
1014 thread_mtx_lock(thread
);
1017 *portp
= ipc_port_copy_send(*whichp
);
1019 result
= KERN_FAILURE
;
1021 thread_mtx_unlock(thread
);
1027 * Routine: thread_set_special_port [kernel call]
1029 * Changes one of the thread's special ports,
1030 * setting it to the supplied send right.
1032 * Nothing locked. If successful, consumes
1033 * the supplied send right.
1035 * KERN_SUCCESS Changed the special port.
1036 * KERN_INVALID_ARGUMENT The thread is null.
1037 * KERN_FAILURE The thread is dead.
1038 * KERN_INVALID_ARGUMENT Invalid special port.
1042 thread_set_special_port(
1047 kern_return_t result
= KERN_SUCCESS
;
1048 ipc_port_t
*whichp
, old
= IP_NULL
;
1050 if (thread
== THREAD_NULL
)
1051 return (KERN_INVALID_ARGUMENT
);
1055 case THREAD_KERNEL_PORT
:
1056 whichp
= &thread
->ith_sself
;
1060 return (KERN_INVALID_ARGUMENT
);
1063 thread_mtx_lock(thread
);
1065 if (thread
->active
) {
1070 result
= KERN_FAILURE
;
1072 thread_mtx_unlock(thread
);
1075 ipc_port_release_send(old
);
1081 * Routine: task_get_special_port [kernel call]
1083 * Clones a send right for one of the task's
1088 * KERN_SUCCESS Extracted a send right.
1089 * KERN_INVALID_ARGUMENT The task is null.
1090 * KERN_FAILURE The task/space is dead.
1091 * KERN_INVALID_ARGUMENT Invalid special port.
1095 task_get_special_port(
1102 if (task
== TASK_NULL
)
1103 return KERN_INVALID_ARGUMENT
;
1106 if (task
->itk_self
== IP_NULL
) {
1108 return KERN_FAILURE
;
1112 case TASK_KERNEL_PORT
:
1113 port
= ipc_port_copy_send(task
->itk_sself
);
1116 case TASK_NAME_PORT
:
1117 port
= ipc_port_make_send(task
->itk_nself
);
1120 case TASK_HOST_PORT
:
1121 port
= ipc_port_copy_send(task
->itk_host
);
1124 case TASK_BOOTSTRAP_PORT
:
1125 port
= ipc_port_copy_send(task
->itk_bootstrap
);
1128 case TASK_SEATBELT_PORT
:
1129 port
= ipc_port_copy_send(task
->itk_seatbelt
);
1132 case TASK_ACCESS_PORT
:
1133 port
= ipc_port_copy_send(task
->itk_task_access
);
1136 case TASK_DEBUG_CONTROL_PORT
:
1137 port
= ipc_port_copy_send(task
->itk_debug_control
);
1142 return KERN_INVALID_ARGUMENT
;
1147 return KERN_SUCCESS
;
1151 * Routine: task_set_special_port [kernel call]
1153 * Changes one of the task's special ports,
1154 * setting it to the supplied send right.
1156 * Nothing locked. If successful, consumes
1157 * the supplied send right.
1159 * KERN_SUCCESS Changed the special port.
1160 * KERN_INVALID_ARGUMENT The task is null.
1161 * KERN_FAILURE The task/space is dead.
1162 * KERN_INVALID_ARGUMENT Invalid special port.
1163 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1167 task_set_special_port(
1175 if (task
== TASK_NULL
)
1176 return KERN_INVALID_ARGUMENT
;
1179 case TASK_KERNEL_PORT
:
1180 whichp
= &task
->itk_sself
;
1183 case TASK_HOST_PORT
:
1184 whichp
= &task
->itk_host
;
1187 case TASK_BOOTSTRAP_PORT
:
1188 whichp
= &task
->itk_bootstrap
;
1191 case TASK_SEATBELT_PORT
:
1192 whichp
= &task
->itk_seatbelt
;
1195 case TASK_ACCESS_PORT
:
1196 whichp
= &task
->itk_task_access
;
1199 case TASK_DEBUG_CONTROL_PORT
:
1200 whichp
= &task
->itk_debug_control
;
1204 return KERN_INVALID_ARGUMENT
;
1208 if (task
->itk_self
== IP_NULL
) {
1210 return KERN_FAILURE
;
1213 /* do not allow overwrite of seatbelt or task access ports */
1214 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
1215 && IP_VALID(*whichp
)) {
1217 return KERN_NO_ACCESS
;
1225 ipc_port_release_send(old
);
1226 return KERN_SUCCESS
;
1231 * Routine: mach_ports_register [kernel call]
1233 * Stash a handful of port send rights in the task.
1234 * Child tasks will inherit these rights, but they
1235 * must use mach_ports_lookup to acquire them.
1237 * The rights are supplied in a (wired) kalloc'd segment.
1238 * Rights which aren't supplied are assumed to be null.
1240 * Nothing locked. If successful, consumes
1241 * the supplied rights and memory.
1243 * KERN_SUCCESS Stashed the port rights.
1244 * KERN_INVALID_ARGUMENT The task is null.
1245 * KERN_INVALID_ARGUMENT The task is dead.
1246 * KERN_INVALID_ARGUMENT The memory param is null.
1247 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1251 mach_ports_register(
1253 mach_port_array_t memory
,
1254 mach_msg_type_number_t portsCnt
)
1256 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1259 if ((task
== TASK_NULL
) ||
1260 (portsCnt
> TASK_PORT_REGISTER_MAX
) ||
1261 (portsCnt
&& memory
== NULL
))
1262 return KERN_INVALID_ARGUMENT
;
1265 * Pad the port rights with nulls.
1268 for (i
= 0; i
< portsCnt
; i
++)
1269 ports
[i
] = memory
[i
];
1270 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
1274 if (task
->itk_self
== IP_NULL
) {
1276 return KERN_INVALID_ARGUMENT
;
1280 * Replace the old send rights with the new.
1281 * Release the old rights after unlocking.
1284 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1287 old
= task
->itk_registered
[i
];
1288 task
->itk_registered
[i
] = ports
[i
];
1294 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1295 if (IP_VALID(ports
[i
]))
1296 ipc_port_release_send(ports
[i
]);
1299 * Now that the operation is known to be successful,
1300 * we can free the memory.
1305 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1307 return KERN_SUCCESS
;
1311 * Routine: mach_ports_lookup [kernel call]
1313 * Retrieves (clones) the stashed port send rights.
1315 * Nothing locked. If successful, the caller gets
1316 * rights and memory.
1318 * KERN_SUCCESS Retrieved the send rights.
1319 * KERN_INVALID_ARGUMENT The task is null.
1320 * KERN_INVALID_ARGUMENT The task is dead.
1321 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1327 mach_port_array_t
*portsp
,
1328 mach_msg_type_number_t
*portsCnt
)
1335 if (task
== TASK_NULL
)
1336 return KERN_INVALID_ARGUMENT
;
1338 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1340 memory
= kalloc(size
);
1342 return KERN_RESOURCE_SHORTAGE
;
1345 if (task
->itk_self
== IP_NULL
) {
1348 kfree(memory
, size
);
1349 return KERN_INVALID_ARGUMENT
;
1352 ports
= (ipc_port_t
*) memory
;
1355 * Clone port rights. Because kalloc'd memory
1356 * is wired, we won't fault while holding the task lock.
1359 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1360 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1364 *portsp
= (mach_port_array_t
) ports
;
1365 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1366 return KERN_SUCCESS
;
1370 task_conversion_eval(task_t caller
, task_t victim
)
1373 * Tasks are allowed to resolve their own task ports, and the kernel is
1374 * allowed to resolve anyone's task port.
1376 if (caller
== kernel_task
) {
1377 return KERN_SUCCESS
;
1380 if (caller
== victim
) {
1381 return KERN_SUCCESS
;
1385 * Only the kernel can can resolve the kernel's task port. We've established
1386 * by this point that the caller is not kernel_task.
1388 if (victim
== TASK_NULL
|| victim
== kernel_task
) {
1389 return KERN_INVALID_SECURITY
;
1394 * On embedded platforms, only a platform binary can resolve the task port
1395 * of another platform binary.
1397 if ((victim
->t_flags
& TF_PLATFORM
) && !(caller
->t_flags
& TF_PLATFORM
)) {
1399 return KERN_INVALID_SECURITY
;
1401 if (cs_relax_platform_task_ports
) {
1402 return KERN_SUCCESS
;
1404 return KERN_INVALID_SECURITY
;
1406 #endif /* SECURE_KERNEL */
1408 #endif /* CONFIG_EMBEDDED */
1410 return KERN_SUCCESS
;
1414 * Routine: convert_port_to_locked_task
1416 * Internal helper routine to convert from a port to a locked
1417 * task. Used by several routines that try to convert from a
1418 * task port to a reference on some task related object.
1420 * Nothing locked, blocking OK.
1423 convert_port_to_locked_task(ipc_port_t port
)
1425 int try_failed_count
= 0;
1427 while (IP_VALID(port
)) {
1428 task_t ct
= current_task();
1432 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1436 task
= (task_t
) port
->ip_kobject
;
1437 assert(task
!= TASK_NULL
);
1439 if (task_conversion_eval(ct
, task
)) {
1445 * Normal lock ordering puts task_lock() before ip_lock().
1446 * Attempt out-of-order locking here.
1448 if (task_lock_try(task
)) {
1455 mutex_pause(try_failed_count
);
1461 * Routine: convert_port_to_locked_task_inspect
1463 * Internal helper routine to convert from a port to a locked
1464 * task inspect right. Used by internal routines that try to convert from a
1465 * task inspect port to a reference on some task related object.
1467 * Nothing locked, blocking OK.
1470 convert_port_to_locked_task_inspect(ipc_port_t port
)
1472 int try_failed_count
= 0;
1474 while (IP_VALID(port
)) {
1475 task_inspect_t task
;
1478 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1480 return TASK_INSPECT_NULL
;
1482 task
= (task_inspect_t
)port
->ip_kobject
;
1483 assert(task
!= TASK_INSPECT_NULL
);
1485 * Normal lock ordering puts task_lock() before ip_lock().
1486 * Attempt out-of-order locking here.
1488 if (task_lock_try((task_t
)task
)) {
1495 mutex_pause(try_failed_count
);
1497 return TASK_INSPECT_NULL
;
1502 * Routine: convert_port_to_task
1504 * Convert from a port to a task.
1505 * Doesn't consume the port ref; produces a task ref,
1506 * which may be null.
1511 convert_port_to_task(
1514 return convert_port_to_task_with_exec_token(port
, NULL
);
1518 * Routine: convert_port_to_task_with_exec_token
1520 * Convert from a port to a task and return
1521 * the exec token stored in the task.
1522 * Doesn't consume the port ref; produces a task ref,
1523 * which may be null.
1528 convert_port_to_task_with_exec_token(
1530 uint32_t *exec_token
)
1532 task_t task
= TASK_NULL
;
1534 if (IP_VALID(port
)) {
1537 if ( ip_active(port
) &&
1538 ip_kotype(port
) == IKOT_TASK
) {
1539 task_t ct
= current_task();
1540 task
= (task_t
)port
->ip_kobject
;
1541 assert(task
!= TASK_NULL
);
1543 if (task_conversion_eval(ct
, task
)) {
1549 *exec_token
= task
->exec_token
;
1551 task_reference_internal(task
);
1561 * Routine: convert_port_to_task_name
1563 * Convert from a port to a task name.
1564 * Doesn't consume the port ref; produces a task name ref,
1565 * which may be null.
1570 convert_port_to_task_name(
1573 task_name_t task
= TASK_NULL
;
1575 if (IP_VALID(port
)) {
1578 if ( ip_active(port
) &&
1579 (ip_kotype(port
) == IKOT_TASK
||
1580 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1581 task
= (task_name_t
)port
->ip_kobject
;
1582 assert(task
!= TASK_NAME_NULL
);
1584 task_reference_internal(task
);
1594 * Routine: convert_port_to_task_inspect
1596 * Convert from a port to a task inspection right
1597 * Doesn't consume the port ref; produces a task ref,
1598 * which may be null.
1603 convert_port_to_task_inspect(
1606 task_inspect_t task
= TASK_INSPECT_NULL
;
1608 if (IP_VALID(port
)) {
1611 if (ip_active(port
) &&
1612 ip_kotype(port
) == IKOT_TASK
) {
1613 task
= (task_inspect_t
)port
->ip_kobject
;
1614 assert(task
!= TASK_INSPECT_NULL
);
1616 task_reference_internal(task
);
1626 * Routine: convert_port_to_task_suspension_token
1628 * Convert from a port to a task suspension token.
1629 * Doesn't consume the port ref; produces a suspension token ref,
1630 * which may be null.
1634 task_suspension_token_t
1635 convert_port_to_task_suspension_token(
1638 task_suspension_token_t task
= TASK_NULL
;
1640 if (IP_VALID(port
)) {
1643 if ( ip_active(port
) &&
1644 ip_kotype(port
) == IKOT_TASK_RESUME
) {
1645 task
= (task_suspension_token_t
)port
->ip_kobject
;
1646 assert(task
!= TASK_NULL
);
1648 task_reference_internal(task
);
1658 * Routine: convert_port_to_space
1660 * Convert from a port to a space.
1661 * Doesn't consume the port ref; produces a space ref,
1662 * which may be null.
1667 convert_port_to_space(
1673 task
= convert_port_to_locked_task(port
);
1675 if (task
== TASK_NULL
)
1676 return IPC_SPACE_NULL
;
1678 if (!task
->active
) {
1680 return IPC_SPACE_NULL
;
1683 space
= task
->itk_space
;
1684 is_reference(space
);
1690 * Routine: convert_port_to_space_inspect
1692 * Convert from a port to a space inspect right.
1693 * Doesn't consume the port ref; produces a space inspect ref,
1694 * which may be null.
1699 convert_port_to_space_inspect(
1702 ipc_space_inspect_t space
;
1703 task_inspect_t task
;
1705 task
= convert_port_to_locked_task_inspect(port
);
1707 if (task
== TASK_INSPECT_NULL
)
1708 return IPC_SPACE_INSPECT_NULL
;
1710 if (!task
->active
) {
1712 return IPC_SPACE_INSPECT_NULL
;
1715 space
= (ipc_space_inspect_t
)task
->itk_space
;
1716 is_reference((ipc_space_t
)space
);
1717 task_unlock((task_t
)task
);
1722 * Routine: convert_port_to_map
1724 * Convert from a port to a map.
1725 * Doesn't consume the port ref; produces a map ref,
1726 * which may be null.
1732 convert_port_to_map(
1738 task
= convert_port_to_locked_task(port
);
1740 if (task
== TASK_NULL
)
1743 if (!task
->active
) {
1749 vm_map_reference_swap(map
);
1756 * Routine: convert_port_to_thread
1758 * Convert from a port to a thread.
1759 * Doesn't consume the port ref; produces an thread ref,
1760 * which may be null.
1766 convert_port_to_thread(
1769 thread_t thread
= THREAD_NULL
;
1771 if (IP_VALID(port
)) {
1774 if (ip_active(port
) &&
1775 ip_kotype(port
) == IKOT_THREAD
) {
1776 thread
= (thread_t
)port
->ip_kobject
;
1777 assert(thread
!= THREAD_NULL
);
1779 /* Use task conversion rules for thread control conversions */
1780 if (task_conversion_eval(current_task(), thread
->task
) != KERN_SUCCESS
) {
1785 thread_reference_internal(thread
);
1795 * Routine: convert_port_to_thread_inspect
1797 * Convert from a port to a thread inspection right
1798 * Doesn't consume the port ref; produces a thread ref,
1799 * which may be null.
1804 convert_port_to_thread_inspect(
1807 thread_inspect_t thread
= THREAD_INSPECT_NULL
;
1809 if (IP_VALID(port
)) {
1812 if (ip_active(port
) &&
1813 ip_kotype(port
) == IKOT_THREAD
) {
1814 thread
= (thread_inspect_t
)port
->ip_kobject
;
1815 assert(thread
!= THREAD_INSPECT_NULL
);
1816 thread_reference_internal((thread_t
)thread
);
1825 * Routine: convert_thread_inspect_to_port
1827 * Convert from a thread inspect reference to a port.
1828 * Consumes a thread ref;
1829 * As we never export thread inspect ports, always
1830 * creates a NULL port.
1836 convert_thread_inspect_to_port(thread_inspect_t thread
)
1838 thread_deallocate(thread
);
1844 * Routine: port_name_to_thread
1846 * Convert from a port name to an thread reference
1847 * A name of MACH_PORT_NULL is valid for the null thread.
1851 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1852 * We could avoid extra lock/unlock and extra ref operations on the port.
1855 port_name_to_thread(
1856 mach_port_name_t name
)
1858 thread_t thread
= THREAD_NULL
;
1861 if (MACH_PORT_VALID(name
)) {
1862 if (ipc_object_copyin(current_space(), name
,
1863 MACH_MSG_TYPE_COPY_SEND
,
1864 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1865 return (THREAD_NULL
);
1867 thread
= convert_port_to_thread(kport
);
1869 if (IP_VALID(kport
))
1870 ipc_port_release_send(kport
);
1878 mach_port_name_t name
)
1880 ipc_port_t kern_port
;
1882 task_t task
= TASK_NULL
;
1884 if (MACH_PORT_VALID(name
)) {
1885 kr
= ipc_object_copyin(current_space(), name
,
1886 MACH_MSG_TYPE_COPY_SEND
,
1887 (ipc_object_t
*) &kern_port
);
1888 if (kr
!= KERN_SUCCESS
)
1891 task
= convert_port_to_task(kern_port
);
1893 if (IP_VALID(kern_port
))
1894 ipc_port_release_send(kern_port
);
1900 port_name_to_task_inspect(
1901 mach_port_name_t name
)
1903 ipc_port_t kern_port
;
1905 task_inspect_t ti
= TASK_INSPECT_NULL
;
1907 if (MACH_PORT_VALID(name
)) {
1908 kr
= ipc_object_copyin(current_space(), name
,
1909 MACH_MSG_TYPE_COPY_SEND
,
1910 (ipc_object_t
*)&kern_port
);
1911 if (kr
!= KERN_SUCCESS
)
1914 ti
= convert_port_to_task_inspect(kern_port
);
1916 if (IP_VALID(kern_port
))
1917 ipc_port_release_send(kern_port
);
1923 * Routine: port_name_to_host
1925 * Convert from a port name to a host pointer.
1926 * NOTE: This does _not_ return a +1 reference to the host_t
1932 mach_port_name_t name
)
1935 host_t host
= HOST_NULL
;
1939 if (MACH_PORT_VALID(name
)) {
1940 kr
= ipc_port_translate_send(current_space(), name
, &port
);
1941 if (kr
== KERN_SUCCESS
) {
1942 host
= convert_port_to_host(port
);
1950 * Routine: convert_task_to_port
1952 * Convert from a task to a port.
1953 * Consumes a task ref; produces a naked send right
1954 * which may be invalid.
1960 convert_task_to_port(
1967 if (task
->itk_self
!= IP_NULL
)
1968 port
= ipc_port_make_send(task
->itk_self
);
1974 task_deallocate(task
);
1979 * Routine: convert_task_inspect_to_port
1981 * Convert from a task inspect reference to a port.
1982 * Consumes a task ref;
1983 * As we never export task inspect ports, always
1984 * creates a NULL port.
1989 convert_task_inspect_to_port(
1990 task_inspect_t task
)
1992 task_deallocate(task
);
1998 * Routine: convert_task_suspend_token_to_port
2000 * Convert from a task suspension token to a port.
2001 * Consumes a task suspension token ref; produces a naked send-once right
2002 * which may be invalid.
2007 convert_task_suspension_token_to_port(
2008 task_suspension_token_t task
)
2014 if (task
->itk_resume
== IP_NULL
) {
2015 task
->itk_resume
= ipc_port_alloc_kernel();
2016 if (!IP_VALID(task
->itk_resume
)) {
2017 panic("failed to create resume port");
2020 ipc_kobject_set(task
->itk_resume
, (ipc_kobject_t
) task
, IKOT_TASK_RESUME
);
2024 * Create a send-once right for each instance of a direct user-called
2025 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2026 * the notification handler will resume the target task.
2028 port
= ipc_port_make_sonce(task
->itk_resume
);
2029 assert(IP_VALID(port
));
2035 task_suspension_token_deallocate(task
);
2042 * Routine: convert_task_name_to_port
2044 * Convert from a task name ref to a port.
2045 * Consumes a task name ref; produces a naked send right
2046 * which may be invalid.
2052 convert_task_name_to_port(
2053 task_name_t task_name
)
2057 itk_lock(task_name
);
2058 if (task_name
->itk_nself
!= IP_NULL
)
2059 port
= ipc_port_make_send(task_name
->itk_nself
);
2062 itk_unlock(task_name
);
2064 task_name_deallocate(task_name
);
2069 * Routine: convert_thread_to_port
2071 * Convert from a thread to a port.
2072 * Consumes an thread ref; produces a naked send right
2073 * which may be invalid.
2079 convert_thread_to_port(
2084 thread_mtx_lock(thread
);
2086 if (thread
->ith_self
!= IP_NULL
)
2087 port
= ipc_port_make_send(thread
->ith_self
);
2091 thread_mtx_unlock(thread
);
2093 thread_deallocate(thread
);
2099 * Routine: space_deallocate
2101 * Deallocate a space ref produced by convert_port_to_space.
2110 if (space
!= IS_NULL
)
2115 * Routine: space_inspect_deallocate
2117 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2123 space_inspect_deallocate(
2124 ipc_space_inspect_t space
)
2126 if (space
!= IS_INSPECT_NULL
)
2127 is_release((ipc_space_t
)space
);
2131 * Routine: thread/task_set_exception_ports [kernel call]
2133 * Sets the thread/task exception port, flavor and
2134 * behavior for the exception types specified by the mask.
2135 * There will be one send right per exception per valid
2138 * Nothing locked. If successful, consumes
2139 * the supplied send right.
2141 * KERN_SUCCESS Changed the special port.
2142 * KERN_INVALID_ARGUMENT The thread is null,
2143 * Illegal mask bit set.
2144 * Illegal exception behavior
2145 * KERN_FAILURE The thread is dead.
2149 thread_set_exception_ports(
2151 exception_mask_t exception_mask
,
2152 ipc_port_t new_port
,
2153 exception_behavior_t new_behavior
,
2154 thread_state_flavor_t new_flavor
)
2156 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2157 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2161 struct label
*new_label
;
2164 if (thread
== THREAD_NULL
)
2165 return (KERN_INVALID_ARGUMENT
);
2167 if (exception_mask
& ~EXC_MASK_VALID
)
2168 return (KERN_INVALID_ARGUMENT
);
2170 if (IP_VALID(new_port
)) {
2171 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2173 case EXCEPTION_DEFAULT
:
2174 case EXCEPTION_STATE
:
2175 case EXCEPTION_STATE_IDENTITY
:
2179 return (KERN_INVALID_ARGUMENT
);
2184 * Check the validity of the thread_state_flavor by calling the
2185 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2186 * osfmk/mach/ARCHITECTURE/thread_status.h
2188 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2189 return (KERN_INVALID_ARGUMENT
);
2192 new_label
= mac_exc_create_label_for_current_proc();
2195 thread_mtx_lock(thread
);
2197 if (!thread
->active
) {
2198 thread_mtx_unlock(thread
);
2200 return (KERN_FAILURE
);
2203 if (thread
->exc_actions
== NULL
) {
2204 ipc_thread_init_exc_actions(thread
);
2206 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2207 if ((exception_mask
& (1 << i
))
2209 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2212 old_port
[i
] = thread
->exc_actions
[i
].port
;
2213 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2214 thread
->exc_actions
[i
].behavior
= new_behavior
;
2215 thread
->exc_actions
[i
].flavor
= new_flavor
;
2216 thread
->exc_actions
[i
].privileged
= privileged
;
2219 old_port
[i
] = IP_NULL
;
2222 thread_mtx_unlock(thread
);
2225 mac_exc_free_label(new_label
);
2228 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
2229 if (IP_VALID(old_port
[i
]))
2230 ipc_port_release_send(old_port
[i
]);
2232 if (IP_VALID(new_port
)) /* consume send right */
2233 ipc_port_release_send(new_port
);
2235 return (KERN_SUCCESS
);
2239 task_set_exception_ports(
2241 exception_mask_t exception_mask
,
2242 ipc_port_t new_port
,
2243 exception_behavior_t new_behavior
,
2244 thread_state_flavor_t new_flavor
)
2246 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2247 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2251 struct label
*new_label
;
2254 if (task
== TASK_NULL
)
2255 return (KERN_INVALID_ARGUMENT
);
2257 if (exception_mask
& ~EXC_MASK_VALID
)
2258 return (KERN_INVALID_ARGUMENT
);
2260 if (IP_VALID(new_port
)) {
2261 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2263 case EXCEPTION_DEFAULT
:
2264 case EXCEPTION_STATE
:
2265 case EXCEPTION_STATE_IDENTITY
:
2269 return (KERN_INVALID_ARGUMENT
);
2274 * Check the validity of the thread_state_flavor by calling the
2275 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2276 * osfmk/mach/ARCHITECTURE/thread_status.h
2278 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2279 return (KERN_INVALID_ARGUMENT
);
2282 new_label
= mac_exc_create_label_for_current_proc();
2287 if (task
->itk_self
== IP_NULL
) {
2290 return (KERN_FAILURE
);
2293 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2294 if ((exception_mask
& (1 << i
))
2296 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2299 old_port
[i
] = task
->exc_actions
[i
].port
;
2300 task
->exc_actions
[i
].port
=
2301 ipc_port_copy_send(new_port
);
2302 task
->exc_actions
[i
].behavior
= new_behavior
;
2303 task
->exc_actions
[i
].flavor
= new_flavor
;
2304 task
->exc_actions
[i
].privileged
= privileged
;
2307 old_port
[i
] = IP_NULL
;
2313 mac_exc_free_label(new_label
);
2316 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
2317 if (IP_VALID(old_port
[i
]))
2318 ipc_port_release_send(old_port
[i
]);
2320 if (IP_VALID(new_port
)) /* consume send right */
2321 ipc_port_release_send(new_port
);
2323 return (KERN_SUCCESS
);
2327 * Routine: thread/task_swap_exception_ports [kernel call]
2329 * Sets the thread/task exception port, flavor and
2330 * behavior for the exception types specified by the
2333 * The old ports, behavior and flavors are returned
2334 * Count specifies the array sizes on input and
2335 * the number of returned ports etc. on output. The
2336 * arrays must be large enough to hold all the returned
2337 * data, MIG returnes an error otherwise. The masks
2338 * array specifies the corresponding exception type(s).
2341 * Nothing locked. If successful, consumes
2342 * the supplied send right.
2344 * Returns upto [in} CountCnt elements.
2346 * KERN_SUCCESS Changed the special port.
2347 * KERN_INVALID_ARGUMENT The thread is null,
2348 * Illegal mask bit set.
2349 * Illegal exception behavior
2350 * KERN_FAILURE The thread is dead.
2354 thread_swap_exception_ports(
2356 exception_mask_t exception_mask
,
2357 ipc_port_t new_port
,
2358 exception_behavior_t new_behavior
,
2359 thread_state_flavor_t new_flavor
,
2360 exception_mask_array_t masks
,
2361 mach_msg_type_number_t
*CountCnt
,
2362 exception_port_array_t ports
,
2363 exception_behavior_array_t behaviors
,
2364 thread_state_flavor_array_t flavors
)
2366 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2367 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2368 unsigned int i
, j
, count
;
2371 struct label
*new_label
;
2374 if (thread
== THREAD_NULL
)
2375 return (KERN_INVALID_ARGUMENT
);
2377 if (exception_mask
& ~EXC_MASK_VALID
)
2378 return (KERN_INVALID_ARGUMENT
);
2380 if (IP_VALID(new_port
)) {
2381 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2383 case EXCEPTION_DEFAULT
:
2384 case EXCEPTION_STATE
:
2385 case EXCEPTION_STATE_IDENTITY
:
2389 return (KERN_INVALID_ARGUMENT
);
2393 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2394 return (KERN_INVALID_ARGUMENT
);
2397 new_label
= mac_exc_create_label_for_current_proc();
2400 thread_mtx_lock(thread
);
2402 if (!thread
->active
) {
2403 thread_mtx_unlock(thread
);
2405 return (KERN_FAILURE
);
2408 if (thread
->exc_actions
== NULL
) {
2409 ipc_thread_init_exc_actions(thread
);
2412 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2413 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2414 if ((exception_mask
& (1 << i
))
2416 && mac_exc_update_action_label(&thread
->exc_actions
[i
], new_label
) == 0
2419 for (j
= 0; j
< count
; ++j
) {
2421 * search for an identical entry, if found
2422 * set corresponding mask for this exception.
2424 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
2425 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2426 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2427 masks
[j
] |= (1 << i
);
2433 masks
[j
] = (1 << i
);
2434 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2436 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2437 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2441 old_port
[i
] = thread
->exc_actions
[i
].port
;
2442 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2443 thread
->exc_actions
[i
].behavior
= new_behavior
;
2444 thread
->exc_actions
[i
].flavor
= new_flavor
;
2445 thread
->exc_actions
[i
].privileged
= privileged
;
2448 old_port
[i
] = IP_NULL
;
2451 thread_mtx_unlock(thread
);
2454 mac_exc_free_label(new_label
);
2457 while (--i
>= FIRST_EXCEPTION
) {
2458 if (IP_VALID(old_port
[i
]))
2459 ipc_port_release_send(old_port
[i
]);
2462 if (IP_VALID(new_port
)) /* consume send right */
2463 ipc_port_release_send(new_port
);
2467 return (KERN_SUCCESS
);
2471 task_swap_exception_ports(
2473 exception_mask_t exception_mask
,
2474 ipc_port_t new_port
,
2475 exception_behavior_t new_behavior
,
2476 thread_state_flavor_t new_flavor
,
2477 exception_mask_array_t masks
,
2478 mach_msg_type_number_t
*CountCnt
,
2479 exception_port_array_t ports
,
2480 exception_behavior_array_t behaviors
,
2481 thread_state_flavor_array_t flavors
)
2483 ipc_port_t old_port
[EXC_TYPES_COUNT
];
2484 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
2485 unsigned int i
, j
, count
;
2488 struct label
*new_label
;
2491 if (task
== TASK_NULL
)
2492 return (KERN_INVALID_ARGUMENT
);
2494 if (exception_mask
& ~EXC_MASK_VALID
)
2495 return (KERN_INVALID_ARGUMENT
);
2497 if (IP_VALID(new_port
)) {
2498 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
2500 case EXCEPTION_DEFAULT
:
2501 case EXCEPTION_STATE
:
2502 case EXCEPTION_STATE_IDENTITY
:
2506 return (KERN_INVALID_ARGUMENT
);
2510 if (new_flavor
!= 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor
))
2511 return (KERN_INVALID_ARGUMENT
);
2514 new_label
= mac_exc_create_label_for_current_proc();
2519 if (task
->itk_self
== IP_NULL
) {
2522 return (KERN_FAILURE
);
2525 assert(EXC_TYPES_COUNT
> FIRST_EXCEPTION
);
2526 for (count
= 0, i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
&& count
< *CountCnt
; ++i
) {
2527 if ((exception_mask
& (1 << i
))
2529 && mac_exc_update_action_label(&task
->exc_actions
[i
], new_label
) == 0
2532 for (j
= 0; j
< count
; j
++) {
2534 * search for an identical entry, if found
2535 * set corresponding mask for this exception.
2537 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
2538 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2539 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2540 masks
[j
] |= (1 << i
);
2546 masks
[j
] = (1 << i
);
2547 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2548 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2549 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2553 old_port
[i
] = task
->exc_actions
[i
].port
;
2555 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
2556 task
->exc_actions
[i
].behavior
= new_behavior
;
2557 task
->exc_actions
[i
].flavor
= new_flavor
;
2558 task
->exc_actions
[i
].privileged
= privileged
;
2561 old_port
[i
] = IP_NULL
;
2567 mac_exc_free_label(new_label
);
2570 while (--i
>= FIRST_EXCEPTION
) {
2571 if (IP_VALID(old_port
[i
]))
2572 ipc_port_release_send(old_port
[i
]);
2575 if (IP_VALID(new_port
)) /* consume send right */
2576 ipc_port_release_send(new_port
);
2580 return (KERN_SUCCESS
);
2584 * Routine: thread/task_get_exception_ports [kernel call]
2586 * Clones a send right for each of the thread/task's exception
2587 * ports specified in the mask and returns the behaviour
2588 * and flavor of said port.
2590 * Returns upto [in} CountCnt elements.
2595 * KERN_SUCCESS Extracted a send right.
2596 * KERN_INVALID_ARGUMENT The thread is null,
2597 * Invalid special port,
2598 * Illegal mask bit set.
2599 * KERN_FAILURE The thread is dead.
2603 thread_get_exception_ports(
2605 exception_mask_t exception_mask
,
2606 exception_mask_array_t masks
,
2607 mach_msg_type_number_t
*CountCnt
,
2608 exception_port_array_t ports
,
2609 exception_behavior_array_t behaviors
,
2610 thread_state_flavor_array_t flavors
)
2612 unsigned int i
, j
, count
;
2614 if (thread
== THREAD_NULL
)
2615 return (KERN_INVALID_ARGUMENT
);
2617 if (exception_mask
& ~EXC_MASK_VALID
)
2618 return (KERN_INVALID_ARGUMENT
);
2620 thread_mtx_lock(thread
);
2622 if (!thread
->active
) {
2623 thread_mtx_unlock(thread
);
2625 return (KERN_FAILURE
);
2630 if (thread
->exc_actions
== NULL
) {
2634 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2635 if (exception_mask
& (1 << i
)) {
2636 for (j
= 0; j
< count
; ++j
) {
2638 * search for an identical entry, if found
2639 * set corresponding mask for this exception.
2641 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
2642 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
2643 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2644 masks
[j
] |= (1 << i
);
2650 masks
[j
] = (1 << i
);
2651 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
2652 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
2653 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
2655 if (count
>= *CountCnt
)
2662 thread_mtx_unlock(thread
);
2666 return (KERN_SUCCESS
);
2670 task_get_exception_ports(
2672 exception_mask_t exception_mask
,
2673 exception_mask_array_t masks
,
2674 mach_msg_type_number_t
*CountCnt
,
2675 exception_port_array_t ports
,
2676 exception_behavior_array_t behaviors
,
2677 thread_state_flavor_array_t flavors
)
2679 unsigned int i
, j
, count
;
2681 if (task
== TASK_NULL
)
2682 return (KERN_INVALID_ARGUMENT
);
2684 if (exception_mask
& ~EXC_MASK_VALID
)
2685 return (KERN_INVALID_ARGUMENT
);
2689 if (task
->itk_self
== IP_NULL
) {
2692 return (KERN_FAILURE
);
2697 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
2698 if (exception_mask
& (1 << i
)) {
2699 for (j
= 0; j
< count
; ++j
) {
2701 * search for an identical entry, if found
2702 * set corresponding mask for this exception.
2704 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
2705 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
2706 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
2707 masks
[j
] |= (1 << i
);
2713 masks
[j
] = (1 << i
);
2714 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
2715 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
2716 flavors
[j
] = task
->exc_actions
[i
].flavor
;
2718 if (count
> *CountCnt
)
2728 return (KERN_SUCCESS
);