2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
68 * Task and thread related IPC functions.
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
99 #include <security/mac_mach_internal.h>
101 /* forward declarations */
102 task_t
convert_port_to_locked_task(ipc_port_t port
);
106 * Routine: ipc_task_init
108 * Initialize a task's IPC state.
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
128 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
129 if (kr
!= KERN_SUCCESS
)
130 panic("ipc_task_init");
132 space
->is_task
= task
;
134 kport
= ipc_port_alloc_kernel();
135 if (kport
== IP_NULL
)
136 panic("ipc_task_init");
138 nport
= ipc_port_alloc_kernel();
139 if (nport
== IP_NULL
)
140 panic("ipc_task_init");
143 task
->itk_self
= kport
;
144 task
->itk_nself
= nport
;
145 task
->itk_sself
= ipc_port_make_send(kport
);
146 task
->itk_space
= space
;
147 space
->is_fast
= FALSE
;
151 mac_task_label_associate(parent
, task
, &parent
->maclabel
,
152 &task
->maclabel
, &kport
->ip_label
);
154 mac_task_label_associate_kernel(task
, &task
->maclabel
, &kport
->ip_label
);
157 if (parent
== TASK_NULL
) {
160 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
161 task
->exc_actions
[i
].port
= IP_NULL
;
164 kr
= host_get_host_port(host_priv_self(), &port
);
165 assert(kr
== KERN_SUCCESS
);
166 task
->itk_host
= port
;
168 task
->itk_bootstrap
= IP_NULL
;
169 task
->itk_seatbelt
= IP_NULL
;
170 task
->itk_gssd
= IP_NULL
;
171 task
->itk_automountd
= IP_NULL
;
172 task
->itk_task_access
= IP_NULL
;
174 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
175 task
->itk_registered
[i
] = IP_NULL
;
178 assert(parent
->itk_self
!= IP_NULL
);
180 /* inherit registered ports */
182 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
183 task
->itk_registered
[i
] =
184 ipc_port_copy_send(parent
->itk_registered
[i
]);
186 /* inherit exception and bootstrap ports */
188 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
189 task
->exc_actions
[i
].port
=
190 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
191 task
->exc_actions
[i
].flavor
=
192 parent
->exc_actions
[i
].flavor
;
193 task
->exc_actions
[i
].behavior
=
194 parent
->exc_actions
[i
].behavior
;
195 task
->exc_actions
[i
].privileged
=
196 parent
->exc_actions
[i
].privileged
;
199 ipc_port_copy_send(parent
->itk_host
);
201 task
->itk_bootstrap
=
202 ipc_port_copy_send(parent
->itk_bootstrap
);
205 ipc_port_copy_send(parent
->itk_seatbelt
);
208 ipc_port_copy_send(parent
->itk_gssd
);
210 task
->itk_automountd
=
211 ipc_port_copy_send(parent
->itk_automountd
);
213 task
->itk_task_access
=
214 ipc_port_copy_send(parent
->itk_task_access
);
221 * Routine: ipc_task_enable
223 * Enable a task for IPC access.
236 kport
= task
->itk_self
;
237 if (kport
!= IP_NULL
)
238 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
239 nport
= task
->itk_nself
;
240 if (nport
!= IP_NULL
)
241 ipc_kobject_set(nport
, (ipc_kobject_t
) task
, IKOT_TASK_NAME
);
246 * Routine: ipc_task_disable
248 * Disable IPC access to a task.
261 kport
= task
->itk_self
;
262 if (kport
!= IP_NULL
)
263 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
264 nport
= task
->itk_nself
;
265 if (nport
!= IP_NULL
)
266 ipc_kobject_set(nport
, IKO_NULL
, IKOT_NONE
);
271 * Routine: ipc_task_terminate
273 * Clean up and destroy a task's IPC state.
275 * Nothing locked. The task must be suspended.
276 * (Or the current thread must be in the task.)
288 kport
= task
->itk_self
;
290 if (kport
== IP_NULL
) {
291 /* the task is already terminated (can this happen?) */
295 task
->itk_self
= IP_NULL
;
297 nport
= task
->itk_nself
;
298 assert(nport
!= IP_NULL
);
299 task
->itk_nself
= IP_NULL
;
303 /* release the naked send rights */
305 if (IP_VALID(task
->itk_sself
))
306 ipc_port_release_send(task
->itk_sself
);
308 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
309 if (IP_VALID(task
->exc_actions
[i
].port
)) {
310 ipc_port_release_send(task
->exc_actions
[i
].port
);
314 if (IP_VALID(task
->itk_host
))
315 ipc_port_release_send(task
->itk_host
);
317 if (IP_VALID(task
->itk_bootstrap
))
318 ipc_port_release_send(task
->itk_bootstrap
);
320 if (IP_VALID(task
->itk_seatbelt
))
321 ipc_port_release_send(task
->itk_seatbelt
);
323 if (IP_VALID(task
->itk_gssd
))
324 ipc_port_release_send(task
->itk_gssd
);
326 if (IP_VALID(task
->itk_automountd
))
327 ipc_port_release_send(task
->itk_automountd
);
329 if (IP_VALID(task
->itk_task_access
))
330 ipc_port_release_send(task
->itk_task_access
);
332 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
333 if (IP_VALID(task
->itk_registered
[i
]))
334 ipc_port_release_send(task
->itk_registered
[i
]);
336 ipc_port_release_send(task
->wired_ledger_port
);
337 ipc_port_release_send(task
->paged_ledger_port
);
339 /* destroy the kernel ports */
340 ipc_port_dealloc_kernel(kport
);
341 ipc_port_dealloc_kernel(nport
);
345 * Routine: ipc_task_reset
347 * Reset a task's IPC state to protect it when
348 * it enters an elevated security context. The
349 * task name port can remain the same - since
350 * it represents no specific privilege.
352 * Nothing locked. The task must be suspended.
353 * (Or the current thread must be in the task.)
360 ipc_port_t old_kport
, new_kport
;
361 ipc_port_t old_sself
;
362 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
365 new_kport
= ipc_port_alloc_kernel();
366 if (new_kport
== IP_NULL
)
367 panic("ipc_task_reset");
371 old_kport
= task
->itk_self
;
373 if (old_kport
== IP_NULL
) {
374 /* the task is already terminated (can this happen?) */
376 ipc_port_dealloc_kernel(new_kport
);
380 task
->itk_self
= new_kport
;
381 old_sself
= task
->itk_sself
;
382 task
->itk_sself
= ipc_port_make_send(new_kport
);
383 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
384 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
386 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
387 if (!task
->exc_actions
[i
].privileged
) {
388 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
389 task
->exc_actions
[i
].port
= IP_NULL
;
391 old_exc_actions
[i
] = IP_NULL
;
397 /* release the naked send rights */
399 if (IP_VALID(old_sself
))
400 ipc_port_release_send(old_sself
);
402 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
403 if (IP_VALID(old_exc_actions
[i
])) {
404 ipc_port_release_send(old_exc_actions
[i
]);
408 /* destroy the kernel port */
409 ipc_port_dealloc_kernel(old_kport
);
413 * Routine: ipc_thread_init
415 * Initialize a thread's IPC state.
427 kport
= ipc_port_alloc_kernel();
428 if (kport
== IP_NULL
)
429 panic("ipc_thread_init");
431 thread
->ith_self
= kport
;
432 thread
->ith_sself
= ipc_port_make_send(kport
);
434 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
435 thread
->exc_actions
[i
].port
= IP_NULL
;
437 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
439 ipc_kmsg_queue_init(&thread
->ith_messages
);
441 thread
->ith_rpc_reply
= IP_NULL
;
448 ipc_port_t kport
= thread
->ith_self
;
450 if (kport
!= IP_NULL
)
451 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
455 * Routine: ipc_thread_terminate
457 * Clean up and destroy a thread's IPC state.
463 ipc_thread_terminate(
466 ipc_port_t kport
= thread
->ith_self
;
468 if (kport
!= IP_NULL
) {
471 if (IP_VALID(thread
->ith_sself
))
472 ipc_port_release_send(thread
->ith_sself
);
474 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
476 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
477 if (IP_VALID(thread
->exc_actions
[i
].port
))
478 ipc_port_release_send(thread
->exc_actions
[i
].port
);
481 ipc_port_dealloc_kernel(kport
);
484 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
486 if (thread
->ith_rpc_reply
!= IP_NULL
)
487 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
489 thread
->ith_rpc_reply
= IP_NULL
;
493 * Routine: ipc_thread_reset
495 * Reset the IPC state for a given Mach thread when
496 * its task enters an elevated security context.
497 * Both the thread port and its exception ports have
498 * to be reset. Its RPC reply port cannot have any
499 * rights outstanding, so it should be fine.
508 ipc_port_t old_kport
, new_kport
;
509 ipc_port_t old_sself
;
510 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
513 new_kport
= ipc_port_alloc_kernel();
514 if (new_kport
== IP_NULL
)
515 panic("ipc_task_reset");
517 thread_mtx_lock(thread
);
519 old_kport
= thread
->ith_self
;
521 if (old_kport
== IP_NULL
) {
522 /* the is already terminated (can this happen?) */
523 thread_mtx_unlock(thread
);
524 ipc_port_dealloc_kernel(new_kport
);
528 thread
->ith_self
= new_kport
;
529 old_sself
= thread
->ith_sself
;
530 thread
->ith_sself
= ipc_port_make_send(new_kport
);
531 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
532 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
534 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
535 if (!thread
->exc_actions
[i
].privileged
) {
536 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
537 thread
->exc_actions
[i
].port
= IP_NULL
;
539 old_exc_actions
[i
] = IP_NULL
;
543 thread_mtx_unlock(thread
);
545 /* release the naked send rights */
547 if (IP_VALID(old_sself
))
548 ipc_port_release_send(old_sself
);
550 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
551 if (IP_VALID(old_exc_actions
[i
])) {
552 ipc_port_release_send(old_exc_actions
[i
]);
556 /* destroy the kernel port */
557 ipc_port_dealloc_kernel(old_kport
);
561 * Routine: retrieve_task_self_fast
563 * Optimized version of retrieve_task_self,
564 * that only works for the current task.
566 * Return a send right (possibly null/dead)
567 * for the task's user-visible self port.
573 retrieve_task_self_fast(
574 register task_t task
)
576 register ipc_port_t port
;
578 assert(task
== current_task());
581 assert(task
->itk_self
!= IP_NULL
);
583 if ((port
= task
->itk_sself
) == task
->itk_self
) {
587 assert(ip_active(port
));
592 port
= ipc_port_copy_send(port
);
599 * Routine: retrieve_thread_self_fast
601 * Return a send right (possibly null/dead)
602 * for the thread's user-visible self port.
604 * Only works for the current thread.
611 retrieve_thread_self_fast(
614 register ipc_port_t port
;
616 assert(thread
== current_thread());
618 thread_mtx_lock(thread
);
620 assert(thread
->ith_self
!= IP_NULL
);
622 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
626 assert(ip_active(port
));
632 port
= ipc_port_copy_send(port
);
634 thread_mtx_unlock(thread
);
640 * Routine: task_self_trap [mach trap]
642 * Give the caller send rights for his own task port.
646 * MACH_PORT_NULL if there are any resource failures
652 __unused
struct task_self_trap_args
*args
)
654 task_t task
= current_task();
656 mach_port_name_t name
;
658 sright
= retrieve_task_self_fast(task
);
659 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
664 * Routine: thread_self_trap [mach trap]
666 * Give the caller send rights for his own thread port.
670 * MACH_PORT_NULL if there are any resource failures
676 __unused
struct thread_self_trap_args
*args
)
678 thread_t thread
= current_thread();
679 task_t task
= thread
->task
;
681 mach_port_name_t name
;
683 sright
= retrieve_thread_self_fast(thread
);
684 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
690 * Routine: mach_reply_port [mach trap]
692 * Allocate a port for the caller.
696 * MACH_PORT_NULL if there are any resource failures
702 __unused
struct mach_reply_port_args
*args
)
705 mach_port_name_t name
;
708 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
709 if (kr
== KERN_SUCCESS
)
712 name
= MACH_PORT_NULL
;
717 * Routine: thread_get_special_port [kernel call]
719 * Clones a send right for one of the thread's
724 * KERN_SUCCESS Extracted a send right.
725 * KERN_INVALID_ARGUMENT The thread is null.
726 * KERN_FAILURE The thread is dead.
727 * KERN_INVALID_ARGUMENT Invalid special port.
731 thread_get_special_port(
736 kern_return_t result
= KERN_SUCCESS
;
739 if (thread
== THREAD_NULL
)
740 return (KERN_INVALID_ARGUMENT
);
744 case THREAD_KERNEL_PORT
:
745 whichp
= &thread
->ith_sself
;
749 return (KERN_INVALID_ARGUMENT
);
752 thread_mtx_lock(thread
);
755 *portp
= ipc_port_copy_send(*whichp
);
757 result
= KERN_FAILURE
;
759 thread_mtx_unlock(thread
);
765 * Routine: thread_set_special_port [kernel call]
767 * Changes one of the thread's special ports,
768 * setting it to the supplied send right.
770 * Nothing locked. If successful, consumes
771 * the supplied send right.
773 * KERN_SUCCESS Changed the special port.
774 * KERN_INVALID_ARGUMENT The thread is null.
775 * KERN_FAILURE The thread is dead.
776 * KERN_INVALID_ARGUMENT Invalid special port.
780 thread_set_special_port(
785 kern_return_t result
= KERN_SUCCESS
;
786 ipc_port_t
*whichp
, old
= IP_NULL
;
788 if (thread
== THREAD_NULL
)
789 return (KERN_INVALID_ARGUMENT
);
793 case THREAD_KERNEL_PORT
:
794 whichp
= &thread
->ith_sself
;
798 return (KERN_INVALID_ARGUMENT
);
801 thread_mtx_lock(thread
);
803 if (thread
->active
) {
808 result
= KERN_FAILURE
;
810 thread_mtx_unlock(thread
);
813 ipc_port_release_send(old
);
819 * Routine: task_get_special_port [kernel call]
821 * Clones a send right for one of the task's
826 * KERN_SUCCESS Extracted a send right.
827 * KERN_INVALID_ARGUMENT The task is null.
828 * KERN_FAILURE The task/space is dead.
829 * KERN_INVALID_ARGUMENT Invalid special port.
833 task_get_special_port(
840 if (task
== TASK_NULL
)
841 return KERN_INVALID_ARGUMENT
;
844 if (task
->itk_self
== IP_NULL
) {
850 case TASK_KERNEL_PORT
:
851 port
= ipc_port_copy_send(task
->itk_sself
);
855 port
= ipc_port_make_send(task
->itk_nself
);
859 port
= ipc_port_copy_send(task
->itk_host
);
862 case TASK_BOOTSTRAP_PORT
:
863 port
= ipc_port_copy_send(task
->itk_bootstrap
);
866 case TASK_WIRED_LEDGER_PORT
:
867 port
= ipc_port_copy_send(task
->wired_ledger_port
);
870 case TASK_PAGED_LEDGER_PORT
:
871 port
= ipc_port_copy_send(task
->paged_ledger_port
);
874 case TASK_SEATBELT_PORT
:
875 port
= ipc_port_copy_send(task
->itk_seatbelt
);
879 port
= ipc_port_copy_send(task
->itk_gssd
);
882 case TASK_ACCESS_PORT
:
883 port
= ipc_port_copy_send(task
->itk_task_access
);
886 case TASK_AUTOMOUNTD_PORT
:
887 port
= ipc_port_copy_send(task
->itk_automountd
);
892 return KERN_INVALID_ARGUMENT
;
901 * Routine: task_set_special_port [kernel call]
903 * Changes one of the task's special ports,
904 * setting it to the supplied send right.
906 * Nothing locked. If successful, consumes
907 * the supplied send right.
909 * KERN_SUCCESS Changed the special port.
910 * KERN_INVALID_ARGUMENT The task is null.
911 * KERN_FAILURE The task/space is dead.
912 * KERN_INVALID_ARGUMENT Invalid special port.
913 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
917 task_set_special_port(
925 if (task
== TASK_NULL
)
926 return KERN_INVALID_ARGUMENT
;
929 case TASK_KERNEL_PORT
:
930 whichp
= &task
->itk_sself
;
934 whichp
= &task
->itk_host
;
937 case TASK_BOOTSTRAP_PORT
:
938 whichp
= &task
->itk_bootstrap
;
941 case TASK_WIRED_LEDGER_PORT
:
942 whichp
= &task
->wired_ledger_port
;
945 case TASK_PAGED_LEDGER_PORT
:
946 whichp
= &task
->paged_ledger_port
;
949 case TASK_SEATBELT_PORT
:
950 whichp
= &task
->itk_seatbelt
;
954 whichp
= &task
->itk_gssd
;
957 case TASK_ACCESS_PORT
:
958 whichp
= &task
->itk_task_access
;
961 case TASK_AUTOMOUNTD_PORT
:
962 whichp
= &task
->itk_automountd
;
966 return KERN_INVALID_ARGUMENT
;
970 if (task
->itk_self
== IP_NULL
) {
975 /* do not allow overwrite of seatbelt or task access ports */
976 if ((TASK_SEATBELT_PORT
== which
|| TASK_ACCESS_PORT
== which
)
977 && IP_VALID(*whichp
)) {
979 return KERN_NO_ACCESS
;
983 if (mac_task_check_service(current_task(), task
, "set_special_port")) {
985 return KERN_NO_ACCESS
;
994 ipc_port_release_send(old
);
1000 * Routine: mach_ports_register [kernel call]
1002 * Stash a handful of port send rights in the task.
1003 * Child tasks will inherit these rights, but they
1004 * must use mach_ports_lookup to acquire them.
1006 * The rights are supplied in a (wired) kalloc'd segment.
1007 * Rights which aren't supplied are assumed to be null.
1009 * Nothing locked. If successful, consumes
1010 * the supplied rights and memory.
1012 * KERN_SUCCESS Stashed the port rights.
1013 * KERN_INVALID_ARGUMENT The task is null.
1014 * KERN_INVALID_ARGUMENT The task is dead.
1015 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1019 mach_ports_register(
1021 mach_port_array_t memory
,
1022 mach_msg_type_number_t portsCnt
)
1024 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
1027 if ((task
== TASK_NULL
) ||
1028 (portsCnt
> TASK_PORT_REGISTER_MAX
))
1029 return KERN_INVALID_ARGUMENT
;
1032 * Pad the port rights with nulls.
1035 for (i
= 0; i
< portsCnt
; i
++)
1036 ports
[i
] = memory
[i
];
1037 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
1041 if (task
->itk_self
== IP_NULL
) {
1043 return KERN_INVALID_ARGUMENT
;
1047 * Replace the old send rights with the new.
1048 * Release the old rights after unlocking.
1051 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
1054 old
= task
->itk_registered
[i
];
1055 task
->itk_registered
[i
] = ports
[i
];
1061 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1062 if (IP_VALID(ports
[i
]))
1063 ipc_port_release_send(ports
[i
]);
1066 * Now that the operation is known to be successful,
1067 * we can free the memory.
1072 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
1074 return KERN_SUCCESS
;
1078 * Routine: mach_ports_lookup [kernel call]
1080 * Retrieves (clones) the stashed port send rights.
1082 * Nothing locked. If successful, the caller gets
1083 * rights and memory.
1085 * KERN_SUCCESS Retrieved the send rights.
1086 * KERN_INVALID_ARGUMENT The task is null.
1087 * KERN_INVALID_ARGUMENT The task is dead.
1088 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1094 mach_port_array_t
*portsp
,
1095 mach_msg_type_number_t
*portsCnt
)
1102 if (task
== TASK_NULL
)
1103 return KERN_INVALID_ARGUMENT
;
1105 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
1107 memory
= kalloc(size
);
1109 return KERN_RESOURCE_SHORTAGE
;
1112 if (task
->itk_self
== IP_NULL
) {
1115 kfree(memory
, size
);
1116 return KERN_INVALID_ARGUMENT
;
1119 ports
= (ipc_port_t
*) memory
;
1122 * Clone port rights. Because kalloc'd memory
1123 * is wired, we won't fault while holding the task lock.
1126 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1127 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1131 *portsp
= (mach_port_array_t
) ports
;
1132 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1133 return KERN_SUCCESS
;
1137 * Routine: convert_port_to_locked_task
1139 * Internal helper routine to convert from a port to a locked
1140 * task. Used by several routines that try to convert from a
1141 * task port to a reference on some task related object.
1143 * Nothing locked, blocking OK.
1146 convert_port_to_locked_task(ipc_port_t port
)
1148 int try_failed_count
= 0;
1150 while (IP_VALID(port
)) {
1154 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1158 task
= (task_t
) port
->ip_kobject
;
1159 assert(task
!= TASK_NULL
);
1162 * Normal lock ordering puts task_lock() before ip_lock().
1163 * Attempt out-of-order locking here.
1165 if (task_lock_try(task
)) {
1172 mutex_pause(try_failed_count
);
1178 * Routine: convert_port_to_task
1180 * Convert from a port to a task.
1181 * Doesn't consume the port ref; produces a task ref,
1182 * which may be null.
1187 convert_port_to_task(
1190 task_t task
= TASK_NULL
;
1192 if (IP_VALID(port
)) {
1195 if ( ip_active(port
) &&
1196 ip_kotype(port
) == IKOT_TASK
) {
1197 task
= (task_t
)port
->ip_kobject
;
1198 assert(task
!= TASK_NULL
);
1200 task_reference_internal(task
);
1210 * Routine: convert_port_to_task_name
1212 * Convert from a port to a task name.
1213 * Doesn't consume the port ref; produces a task name ref,
1214 * which may be null.
1219 convert_port_to_task_name(
1222 task_name_t task
= TASK_NULL
;
1224 if (IP_VALID(port
)) {
1227 if ( ip_active(port
) &&
1228 (ip_kotype(port
) == IKOT_TASK
||
1229 ip_kotype(port
) == IKOT_TASK_NAME
)) {
1230 task
= (task_name_t
)port
->ip_kobject
;
1231 assert(task
!= TASK_NAME_NULL
);
1233 task_reference_internal(task
);
1243 * Routine: convert_port_to_space
1245 * Convert from a port to a space.
1246 * Doesn't consume the port ref; produces a space ref,
1247 * which may be null.
1252 convert_port_to_space(
1258 task
= convert_port_to_locked_task(port
);
1260 if (task
== TASK_NULL
)
1261 return IPC_SPACE_NULL
;
1263 if (!task
->active
) {
1265 return IPC_SPACE_NULL
;
1268 space
= task
->itk_space
;
1269 is_reference(space
);
1275 * Routine: convert_port_to_map
1277 * Convert from a port to a map.
1278 * Doesn't consume the port ref; produces a map ref,
1279 * which may be null.
1285 convert_port_to_map(
1291 task
= convert_port_to_locked_task(port
);
1293 if (task
== TASK_NULL
)
1296 if (!task
->active
) {
1302 vm_map_reference_swap(map
);
1309 * Routine: convert_port_to_thread
1311 * Convert from a port to a thread.
1312 * Doesn't consume the port ref; produces an thread ref,
1313 * which may be null.
1319 convert_port_to_thread(
1322 thread_t thread
= THREAD_NULL
;
1324 if (IP_VALID(port
)) {
1327 if ( ip_active(port
) &&
1328 ip_kotype(port
) == IKOT_THREAD
) {
1329 thread
= (thread_t
)port
->ip_kobject
;
1330 assert(thread
!= THREAD_NULL
);
1332 thread_reference_internal(thread
);
1342 * Routine: port_name_to_thread
1344 * Convert from a port name to an thread reference
1345 * A name of MACH_PORT_NULL is valid for the null thread.
1350 port_name_to_thread(
1351 mach_port_name_t name
)
1353 thread_t thread
= THREAD_NULL
;
1356 if (MACH_PORT_VALID(name
)) {
1357 if (ipc_object_copyin(current_space(), name
,
1358 MACH_MSG_TYPE_COPY_SEND
,
1359 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1360 return (THREAD_NULL
);
1362 thread
= convert_port_to_thread(kport
);
1364 if (IP_VALID(kport
))
1365 ipc_port_release_send(kport
);
1373 mach_port_name_t name
)
1375 ipc_port_t kern_port
;
1377 task_t task
= TASK_NULL
;
1379 if (MACH_PORT_VALID(name
)) {
1380 kr
= ipc_object_copyin(current_space(), name
,
1381 MACH_MSG_TYPE_COPY_SEND
,
1382 (ipc_object_t
*) &kern_port
);
1383 if (kr
!= KERN_SUCCESS
)
1386 task
= convert_port_to_task(kern_port
);
1388 if (IP_VALID(kern_port
))
1389 ipc_port_release_send(kern_port
);
1395 * Routine: convert_task_to_port
1397 * Convert from a task to a port.
1398 * Consumes a task ref; produces a naked send right
1399 * which may be invalid.
1405 convert_task_to_port(
1411 if (task
->itk_self
!= IP_NULL
)
1412 port
= ipc_port_make_send(task
->itk_self
);
1417 task_deallocate(task
);
1422 * Routine: convert_task_name_to_port
1424 * Convert from a task name ref to a port.
1425 * Consumes a task name ref; produces a naked send right
1426 * which may be invalid.
1432 convert_task_name_to_port(
1433 task_name_t task_name
)
1437 itk_lock(task_name
);
1438 if (task_name
->itk_nself
!= IP_NULL
)
1439 port
= ipc_port_make_send(task_name
->itk_nself
);
1442 itk_unlock(task_name
);
1444 task_name_deallocate(task_name
);
1449 * Routine: convert_thread_to_port
1451 * Convert from a thread to a port.
1452 * Consumes an thread ref; produces a naked send right
1453 * which may be invalid.
1459 convert_thread_to_port(
1464 thread_mtx_lock(thread
);
1466 if (thread
->ith_self
!= IP_NULL
)
1467 port
= ipc_port_make_send(thread
->ith_self
);
1471 thread_mtx_unlock(thread
);
1473 thread_deallocate(thread
);
1479 * Routine: space_deallocate
1481 * Deallocate a space ref produced by convert_port_to_space.
1490 if (space
!= IS_NULL
)
1495 * Routine: thread/task_set_exception_ports [kernel call]
1497 * Sets the thread/task exception port, flavor and
1498 * behavior for the exception types specified by the mask.
1499 * There will be one send right per exception per valid
1502 * Nothing locked. If successful, consumes
1503 * the supplied send right.
1505 * KERN_SUCCESS Changed the special port.
1506 * KERN_INVALID_ARGUMENT The thread is null,
1507 * Illegal mask bit set.
1508 * Illegal exception behavior
1509 * KERN_FAILURE The thread is dead.
1513 thread_set_exception_ports(
1515 exception_mask_t exception_mask
,
1516 ipc_port_t new_port
,
1517 exception_behavior_t new_behavior
,
1518 thread_state_flavor_t new_flavor
)
1520 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1521 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1524 if (thread
== THREAD_NULL
)
1525 return (KERN_INVALID_ARGUMENT
);
1527 if (exception_mask
& ~EXC_MASK_ALL
)
1528 return (KERN_INVALID_ARGUMENT
);
1530 if (IP_VALID(new_port
)) {
1531 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1533 case EXCEPTION_DEFAULT
:
1534 case EXCEPTION_STATE
:
1535 case EXCEPTION_STATE_IDENTITY
:
1539 return (KERN_INVALID_ARGUMENT
);
1544 * Check the validity of the thread_state_flavor by calling the
1545 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1546 * osfmk/mach/ARCHITECTURE/thread_status.h
1548 if (!VALID_THREAD_STATE_FLAVOR(new_flavor
))
1549 return (KERN_INVALID_ARGUMENT
);
1551 thread_mtx_lock(thread
);
1553 if (!thread
->active
) {
1554 thread_mtx_unlock(thread
);
1556 return (KERN_FAILURE
);
1559 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1560 if (exception_mask
& (1 << i
)) {
1561 old_port
[i
] = thread
->exc_actions
[i
].port
;
1562 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1563 thread
->exc_actions
[i
].behavior
= new_behavior
;
1564 thread
->exc_actions
[i
].flavor
= new_flavor
;
1565 thread
->exc_actions
[i
].privileged
= privileged
;
1568 old_port
[i
] = IP_NULL
;
1571 thread_mtx_unlock(thread
);
1573 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1574 if (IP_VALID(old_port
[i
]))
1575 ipc_port_release_send(old_port
[i
]);
1577 if (IP_VALID(new_port
)) /* consume send right */
1578 ipc_port_release_send(new_port
);
1580 return (KERN_SUCCESS
);
1584 task_set_exception_ports(
1586 exception_mask_t exception_mask
,
1587 ipc_port_t new_port
,
1588 exception_behavior_t new_behavior
,
1589 thread_state_flavor_t new_flavor
)
1591 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1592 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1595 if (task
== TASK_NULL
)
1596 return (KERN_INVALID_ARGUMENT
);
1598 if (exception_mask
& ~EXC_MASK_ALL
)
1599 return (KERN_INVALID_ARGUMENT
);
1601 if (IP_VALID(new_port
)) {
1602 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1604 case EXCEPTION_DEFAULT
:
1605 case EXCEPTION_STATE
:
1606 case EXCEPTION_STATE_IDENTITY
:
1610 return (KERN_INVALID_ARGUMENT
);
1616 if (task
->itk_self
== IP_NULL
) {
1619 return (KERN_FAILURE
);
1622 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1623 if (exception_mask
& (1 << i
)) {
1624 old_port
[i
] = task
->exc_actions
[i
].port
;
1625 task
->exc_actions
[i
].port
=
1626 ipc_port_copy_send(new_port
);
1627 task
->exc_actions
[i
].behavior
= new_behavior
;
1628 task
->exc_actions
[i
].flavor
= new_flavor
;
1629 task
->exc_actions
[i
].privileged
= privileged
;
1632 old_port
[i
] = IP_NULL
;
1637 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1638 if (IP_VALID(old_port
[i
]))
1639 ipc_port_release_send(old_port
[i
]);
1641 if (IP_VALID(new_port
)) /* consume send right */
1642 ipc_port_release_send(new_port
);
1644 return (KERN_SUCCESS
);
1648 * Routine: thread/task_swap_exception_ports [kernel call]
1650 * Sets the thread/task exception port, flavor and
1651 * behavior for the exception types specified by the
1654 * The old ports, behavior and flavors are returned
1655 * Count specifies the array sizes on input and
1656 * the number of returned ports etc. on output. The
1657 * arrays must be large enough to hold all the returned
1658 * data, MIG returnes an error otherwise. The masks
1659 * array specifies the corresponding exception type(s).
1662 * Nothing locked. If successful, consumes
1663 * the supplied send right.
1665 * Returns upto [in} CountCnt elements.
1667 * KERN_SUCCESS Changed the special port.
1668 * KERN_INVALID_ARGUMENT The thread is null,
1669 * Illegal mask bit set.
1670 * Illegal exception behavior
1671 * KERN_FAILURE The thread is dead.
1675 thread_swap_exception_ports(
1677 exception_mask_t exception_mask
,
1678 ipc_port_t new_port
,
1679 exception_behavior_t new_behavior
,
1680 thread_state_flavor_t new_flavor
,
1681 exception_mask_array_t masks
,
1682 mach_msg_type_number_t
*CountCnt
,
1683 exception_port_array_t ports
,
1684 exception_behavior_array_t behaviors
,
1685 thread_state_flavor_array_t flavors
)
1687 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1688 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1689 unsigned int i
, j
, count
;
1691 if (thread
== THREAD_NULL
)
1692 return (KERN_INVALID_ARGUMENT
);
1694 if (exception_mask
& ~EXC_MASK_ALL
)
1695 return (KERN_INVALID_ARGUMENT
);
1697 if (IP_VALID(new_port
)) {
1698 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1700 case EXCEPTION_DEFAULT
:
1701 case EXCEPTION_STATE
:
1702 case EXCEPTION_STATE_IDENTITY
:
1706 return (KERN_INVALID_ARGUMENT
);
1710 thread_mtx_lock(thread
);
1712 if (!thread
->active
) {
1713 thread_mtx_unlock(thread
);
1715 return (KERN_FAILURE
);
1720 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1721 if (exception_mask
& (1 << i
)) {
1722 for (j
= 0; j
< count
; ++j
) {
1724 * search for an identical entry, if found
1725 * set corresponding mask for this exception.
1727 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1728 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1729 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1730 masks
[j
] |= (1 << i
);
1736 masks
[j
] = (1 << i
);
1737 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1739 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1740 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1744 old_port
[i
] = thread
->exc_actions
[i
].port
;
1745 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1746 thread
->exc_actions
[i
].behavior
= new_behavior
;
1747 thread
->exc_actions
[i
].flavor
= new_flavor
;
1748 thread
->exc_actions
[i
].privileged
= privileged
;
1749 if (count
> *CountCnt
)
1753 old_port
[i
] = IP_NULL
;
1756 thread_mtx_unlock(thread
);
1758 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1759 if (IP_VALID(old_port
[i
]))
1760 ipc_port_release_send(old_port
[i
]);
1762 if (IP_VALID(new_port
)) /* consume send right */
1763 ipc_port_release_send(new_port
);
1767 return (KERN_SUCCESS
);
1771 task_swap_exception_ports(
1773 exception_mask_t exception_mask
,
1774 ipc_port_t new_port
,
1775 exception_behavior_t new_behavior
,
1776 thread_state_flavor_t new_flavor
,
1777 exception_mask_array_t masks
,
1778 mach_msg_type_number_t
*CountCnt
,
1779 exception_port_array_t ports
,
1780 exception_behavior_array_t behaviors
,
1781 thread_state_flavor_array_t flavors
)
1783 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1784 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1785 unsigned int i
, j
, count
;
1787 if (task
== TASK_NULL
)
1788 return (KERN_INVALID_ARGUMENT
);
1790 if (exception_mask
& ~EXC_MASK_ALL
)
1791 return (KERN_INVALID_ARGUMENT
);
1793 if (IP_VALID(new_port
)) {
1794 switch (new_behavior
& ~MACH_EXCEPTION_CODES
) {
1796 case EXCEPTION_DEFAULT
:
1797 case EXCEPTION_STATE
:
1798 case EXCEPTION_STATE_IDENTITY
:
1802 return (KERN_INVALID_ARGUMENT
);
1808 if (task
->itk_self
== IP_NULL
) {
1811 return (KERN_FAILURE
);
1816 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1817 if (exception_mask
& (1 << i
)) {
1818 for (j
= 0; j
< count
; j
++) {
1820 * search for an identical entry, if found
1821 * set corresponding mask for this exception.
1823 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1824 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1825 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1826 masks
[j
] |= (1 << i
);
1832 masks
[j
] = (1 << i
);
1833 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1834 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1835 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1839 old_port
[i
] = task
->exc_actions
[i
].port
;
1840 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1841 task
->exc_actions
[i
].behavior
= new_behavior
;
1842 task
->exc_actions
[i
].flavor
= new_flavor
;
1843 task
->exc_actions
[i
].privileged
= privileged
;
1844 if (count
> *CountCnt
)
1848 old_port
[i
] = IP_NULL
;
1853 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++)
1854 if (IP_VALID(old_port
[i
]))
1855 ipc_port_release_send(old_port
[i
]);
1857 if (IP_VALID(new_port
)) /* consume send right */
1858 ipc_port_release_send(new_port
);
1862 return (KERN_SUCCESS
);
1866 * Routine: thread/task_get_exception_ports [kernel call]
1868 * Clones a send right for each of the thread/task's exception
1869 * ports specified in the mask and returns the behaviour
1870 * and flavor of said port.
1872 * Returns upto [in} CountCnt elements.
1877 * KERN_SUCCESS Extracted a send right.
1878 * KERN_INVALID_ARGUMENT The thread is null,
1879 * Invalid special port,
1880 * Illegal mask bit set.
1881 * KERN_FAILURE The thread is dead.
1885 thread_get_exception_ports(
1887 exception_mask_t exception_mask
,
1888 exception_mask_array_t masks
,
1889 mach_msg_type_number_t
*CountCnt
,
1890 exception_port_array_t ports
,
1891 exception_behavior_array_t behaviors
,
1892 thread_state_flavor_array_t flavors
)
1894 unsigned int i
, j
, count
;
1896 if (thread
== THREAD_NULL
)
1897 return (KERN_INVALID_ARGUMENT
);
1899 if (exception_mask
& ~EXC_MASK_ALL
)
1900 return (KERN_INVALID_ARGUMENT
);
1902 thread_mtx_lock(thread
);
1904 if (!thread
->active
) {
1905 thread_mtx_unlock(thread
);
1907 return (KERN_FAILURE
);
1912 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1913 if (exception_mask
& (1 << i
)) {
1914 for (j
= 0; j
< count
; ++j
) {
1916 * search for an identical entry, if found
1917 * set corresponding mask for this exception.
1919 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1920 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
1921 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1922 masks
[j
] |= (1 << i
);
1928 masks
[j
] = (1 << i
);
1929 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1930 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1931 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1933 if (count
>= *CountCnt
)
1939 thread_mtx_unlock(thread
);
1943 return (KERN_SUCCESS
);
1947 task_get_exception_ports(
1949 exception_mask_t exception_mask
,
1950 exception_mask_array_t masks
,
1951 mach_msg_type_number_t
*CountCnt
,
1952 exception_port_array_t ports
,
1953 exception_behavior_array_t behaviors
,
1954 thread_state_flavor_array_t flavors
)
1956 unsigned int i
, j
, count
;
1958 if (task
== TASK_NULL
)
1959 return (KERN_INVALID_ARGUMENT
);
1961 if (exception_mask
& ~EXC_MASK_ALL
)
1962 return (KERN_INVALID_ARGUMENT
);
1966 if (task
->itk_self
== IP_NULL
) {
1969 return (KERN_FAILURE
);
1974 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1975 if (exception_mask
& (1 << i
)) {
1976 for (j
= 0; j
< count
; ++j
) {
1978 * search for an identical entry, if found
1979 * set corresponding mask for this exception.
1981 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1982 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1983 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1984 masks
[j
] |= (1 << i
);
1990 masks
[j
] = (1 << i
);
1991 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1992 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1993 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1995 if (count
> *CountCnt
)
2005 return (KERN_SUCCESS
);