2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
56 * Task and thread related IPC functions.
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/memory_object_types.h>
68 #include <mach/mach_traps.h>
69 #include <mach/task_server.h>
70 #include <mach/thread_act_server.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/host_priv_server.h>
73 #include <mach/vm_map_server.h>
75 #include <kern/kern_types.h>
76 #include <kern/host.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/ipc_tt.h>
79 #include <kern/kalloc.h>
80 #include <kern/thread.h>
81 #include <kern/misc_protos.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_shared_memory_server.h>
86 #include <vm/vm_protos.h>
88 /* forward declarations */
89 task_t
convert_port_to_locked_task(ipc_port_t port
);
93 * Routine: ipc_task_init
95 * Initialize a task's IPC state.
97 * If non-null, some state will be inherited from the parent.
98 * The parent must be appropriately initialized.
114 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
115 if (kr
!= KERN_SUCCESS
)
116 panic("ipc_task_init");
119 kport
= ipc_port_alloc_kernel();
120 if (kport
== IP_NULL
)
121 panic("ipc_task_init");
124 task
->itk_self
= kport
;
125 task
->itk_sself
= ipc_port_make_send(kport
);
126 task
->itk_space
= space
;
127 space
->is_fast
= FALSE
;
129 if (parent
== TASK_NULL
) {
132 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
133 task
->exc_actions
[i
].port
= IP_NULL
;
136 kr
= host_get_host_port(host_priv_self(), &port
);
137 assert(kr
== KERN_SUCCESS
);
138 task
->itk_host
= port
;
140 task
->itk_bootstrap
= IP_NULL
;
142 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
143 task
->itk_registered
[i
] = IP_NULL
;
146 assert(parent
->itk_self
!= IP_NULL
);
148 /* inherit registered ports */
150 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
151 task
->itk_registered
[i
] =
152 ipc_port_copy_send(parent
->itk_registered
[i
]);
154 /* inherit exception and bootstrap ports */
156 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
157 task
->exc_actions
[i
].port
=
158 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
159 task
->exc_actions
[i
].flavor
=
160 parent
->exc_actions
[i
].flavor
;
161 task
->exc_actions
[i
].behavior
=
162 parent
->exc_actions
[i
].behavior
;
163 task
->exc_actions
[i
].privileged
=
164 parent
->exc_actions
[i
].privileged
;
167 ipc_port_copy_send(parent
->itk_host
);
169 task
->itk_bootstrap
=
170 ipc_port_copy_send(parent
->itk_bootstrap
);
177 * Routine: ipc_task_enable
179 * Enable a task for IPC access.
191 kport
= task
->itk_self
;
192 if (kport
!= IP_NULL
)
193 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
198 * Routine: ipc_task_disable
200 * Disable IPC access to a task.
212 kport
= task
->itk_self
;
213 if (kport
!= IP_NULL
)
214 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
219 * Routine: ipc_task_terminate
221 * Clean up and destroy a task's IPC state.
223 * Nothing locked. The task must be suspended.
224 * (Or the current thread must be in the task.)
235 kport
= task
->itk_self
;
237 if (kport
== IP_NULL
) {
238 /* the task is already terminated (can this happen?) */
243 task
->itk_self
= IP_NULL
;
246 /* release the naked send rights */
248 if (IP_VALID(task
->itk_sself
))
249 ipc_port_release_send(task
->itk_sself
);
251 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
252 if (IP_VALID(task
->exc_actions
[i
].port
)) {
253 ipc_port_release_send(task
->exc_actions
[i
].port
);
257 if (IP_VALID(task
->itk_host
))
258 ipc_port_release_send(task
->itk_host
);
260 if (IP_VALID(task
->itk_bootstrap
))
261 ipc_port_release_send(task
->itk_bootstrap
);
263 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
264 if (IP_VALID(task
->itk_registered
[i
]))
265 ipc_port_release_send(task
->itk_registered
[i
]);
267 ipc_port_release_send(task
->wired_ledger_port
);
268 ipc_port_release_send(task
->paged_ledger_port
);
270 /* destroy the kernel port */
271 ipc_port_dealloc_kernel(kport
);
275 * Routine: ipc_task_reset
277 * Reset a task's IPC state to protect it when
278 * it enters an elevated security context.
280 * Nothing locked. The task must be suspended.
281 * (Or the current thread must be in the task.)
288 ipc_port_t old_kport
, new_kport
;
289 ipc_port_t old_sself
;
290 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
293 new_kport
= ipc_port_alloc_kernel();
294 if (new_kport
== IP_NULL
)
295 panic("ipc_task_reset");
299 old_kport
= task
->itk_self
;
301 if (old_kport
== IP_NULL
) {
302 /* the task is already terminated (can this happen?) */
304 ipc_port_dealloc_kernel(new_kport
);
308 task
->itk_self
= new_kport
;
309 old_sself
= task
->itk_sself
;
310 task
->itk_sself
= ipc_port_make_send(new_kport
);
311 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
312 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
314 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
315 if (!task
->exc_actions
[i
].privileged
) {
316 old_exc_actions
[i
] = task
->exc_actions
[i
].port
;
317 task
->exc_actions
[i
].port
= IP_NULL
;
319 old_exc_actions
[i
] = IP_NULL
;
325 /* release the naked send rights */
327 if (IP_VALID(old_sself
))
328 ipc_port_release_send(old_sself
);
330 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
331 if (IP_VALID(old_exc_actions
[i
])) {
332 ipc_port_release_send(old_exc_actions
[i
]);
336 /* destroy the kernel port */
337 ipc_port_dealloc_kernel(old_kport
);
341 * Routine: ipc_thread_init
343 * Initialize a thread's IPC state.
355 kport
= ipc_port_alloc_kernel();
356 if (kport
== IP_NULL
)
357 panic("ipc_thread_init");
359 thread
->ith_self
= kport
;
360 thread
->ith_sself
= ipc_port_make_send(kport
);
362 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
363 thread
->exc_actions
[i
].port
= IP_NULL
;
365 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
367 ipc_kmsg_queue_init(&thread
->ith_messages
);
369 thread
->ith_rpc_reply
= IP_NULL
;
376 ipc_port_t kport
= thread
->ith_self
;
378 if (kport
!= IP_NULL
)
379 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
383 * Routine: ipc_thread_terminate
385 * Clean up and destroy a thread's IPC state.
391 ipc_thread_terminate(
394 ipc_port_t kport
= thread
->ith_self
;
396 if (kport
!= IP_NULL
) {
399 if (IP_VALID(thread
->ith_sself
))
400 ipc_port_release_send(thread
->ith_sself
);
402 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
404 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
405 if (IP_VALID(thread
->exc_actions
[i
].port
))
406 ipc_port_release_send(thread
->exc_actions
[i
].port
);
409 ipc_port_dealloc_kernel(kport
);
412 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
414 if (thread
->ith_rpc_reply
!= IP_NULL
)
415 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
417 thread
->ith_rpc_reply
= IP_NULL
;
421 * Routine: ipc_thread_reset
423 * Reset the IPC state for a given Mach thread when
424 * its task enters an elevated security context.
425 * Both the thread port and its exception ports have
426 * to be reset. Its RPC reply port cannot have any
427 * rights outstanding, so it should be fine.
436 ipc_port_t old_kport
, new_kport
;
437 ipc_port_t old_sself
;
438 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
441 new_kport
= ipc_port_alloc_kernel();
442 if (new_kport
== IP_NULL
)
443 panic("ipc_task_reset");
445 thread_mtx_lock(thread
);
447 old_kport
= thread
->ith_self
;
449 if (old_kport
== IP_NULL
) {
450 /* the is already terminated (can this happen?) */
451 thread_mtx_unlock(thread
);
452 ipc_port_dealloc_kernel(new_kport
);
456 thread
->ith_self
= new_kport
;
457 old_sself
= thread
->ith_sself
;
458 thread
->ith_sself
= ipc_port_make_send(new_kport
);
459 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
460 ipc_kobject_set(new_kport
, (ipc_kobject_t
) thread
, IKOT_THREAD
);
462 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
463 if (!thread
->exc_actions
[i
].privileged
) {
464 old_exc_actions
[i
] = thread
->exc_actions
[i
].port
;
465 thread
->exc_actions
[i
].port
= IP_NULL
;
467 old_exc_actions
[i
] = IP_NULL
;
471 thread_mtx_unlock(thread
);
473 /* release the naked send rights */
475 if (IP_VALID(old_sself
))
476 ipc_port_release_send(old_sself
);
478 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
479 if (IP_VALID(old_exc_actions
[i
])) {
480 ipc_port_release_send(old_exc_actions
[i
]);
484 /* destroy the kernel port */
485 ipc_port_dealloc_kernel(old_kport
);
489 * Routine: retrieve_task_self_fast
491 * Optimized version of retrieve_task_self,
492 * that only works for the current task.
494 * Return a send right (possibly null/dead)
495 * for the task's user-visible self port.
501 retrieve_task_self_fast(
502 register task_t task
)
504 register ipc_port_t port
;
506 assert(task
== current_task());
509 assert(task
->itk_self
!= IP_NULL
);
511 if ((port
= task
->itk_sself
) == task
->itk_self
) {
515 assert(ip_active(port
));
520 port
= ipc_port_copy_send(port
);
527 * Routine: retrieve_thread_self_fast
529 * Return a send right (possibly null/dead)
530 * for the thread's user-visible self port.
532 * Only works for the current thread.
539 retrieve_thread_self_fast(
542 register ipc_port_t port
;
544 assert(thread
== current_thread());
546 thread_mtx_lock(thread
);
548 assert(thread
->ith_self
!= IP_NULL
);
550 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
554 assert(ip_active(port
));
560 port
= ipc_port_copy_send(port
);
562 thread_mtx_unlock(thread
);
568 * Routine: task_self_trap [mach trap]
570 * Give the caller send rights for his own task port.
574 * MACH_PORT_NULL if there are any resource failures
580 __unused
struct task_self_trap_args
*args
)
582 task_t task
= current_task();
584 mach_port_name_t name
;
586 sright
= retrieve_task_self_fast(task
);
587 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
592 * Routine: thread_self_trap [mach trap]
594 * Give the caller send rights for his own thread port.
598 * MACH_PORT_NULL if there are any resource failures
604 __unused
struct thread_self_trap_args
*args
)
606 thread_t thread
= current_thread();
607 task_t task
= thread
->task
;
609 mach_port_name_t name
;
611 sright
= retrieve_thread_self_fast(thread
);
612 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
618 * Routine: mach_reply_port [mach trap]
620 * Allocate a port for the caller.
624 * MACH_PORT_NULL if there are any resource failures
630 __unused
struct mach_reply_port_args
*args
)
633 mach_port_name_t name
;
636 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
637 if (kr
== KERN_SUCCESS
)
640 name
= MACH_PORT_NULL
;
645 * Routine: thread_get_special_port [kernel call]
647 * Clones a send right for one of the thread's
652 * KERN_SUCCESS Extracted a send right.
653 * KERN_INVALID_ARGUMENT The thread is null.
654 * KERN_FAILURE The thread is dead.
655 * KERN_INVALID_ARGUMENT Invalid special port.
659 thread_get_special_port(
664 kern_return_t result
= KERN_SUCCESS
;
667 if (thread
== THREAD_NULL
)
668 return (KERN_INVALID_ARGUMENT
);
672 case THREAD_KERNEL_PORT
:
673 whichp
= &thread
->ith_sself
;
677 return (KERN_INVALID_ARGUMENT
);
680 thread_mtx_lock(thread
);
683 *portp
= ipc_port_copy_send(*whichp
);
685 result
= KERN_FAILURE
;
687 thread_mtx_unlock(thread
);
693 * Routine: thread_set_special_port [kernel call]
695 * Changes one of the thread's special ports,
696 * setting it to the supplied send right.
698 * Nothing locked. If successful, consumes
699 * the supplied send right.
701 * KERN_SUCCESS Changed the special port.
702 * KERN_INVALID_ARGUMENT The thread is null.
703 * KERN_FAILURE The thread is dead.
704 * KERN_INVALID_ARGUMENT Invalid special port.
708 thread_set_special_port(
713 kern_return_t result
= KERN_SUCCESS
;
714 ipc_port_t
*whichp
, old
= IP_NULL
;
716 if (thread
== THREAD_NULL
)
717 return (KERN_INVALID_ARGUMENT
);
721 case THREAD_KERNEL_PORT
:
722 whichp
= &thread
->ith_sself
;
726 return (KERN_INVALID_ARGUMENT
);
729 thread_mtx_lock(thread
);
731 if (thread
->active
) {
736 result
= KERN_FAILURE
;
738 thread_mtx_unlock(thread
);
741 ipc_port_release_send(old
);
747 * Routine: task_get_special_port [kernel call]
749 * Clones a send right for one of the task's
754 * KERN_SUCCESS Extracted a send right.
755 * KERN_INVALID_ARGUMENT The task is null.
756 * KERN_FAILURE The task/space is dead.
757 * KERN_INVALID_ARGUMENT Invalid special port.
761 task_get_special_port(
769 if (task
== TASK_NULL
)
770 return KERN_INVALID_ARGUMENT
;
773 case TASK_KERNEL_PORT
:
774 whichp
= &task
->itk_sself
;
778 whichp
= &task
->itk_host
;
781 case TASK_BOOTSTRAP_PORT
:
782 whichp
= &task
->itk_bootstrap
;
785 case TASK_WIRED_LEDGER_PORT
:
786 whichp
= &task
->wired_ledger_port
;
789 case TASK_PAGED_LEDGER_PORT
:
790 whichp
= &task
->paged_ledger_port
;
794 return KERN_INVALID_ARGUMENT
;
798 if (task
->itk_self
== IP_NULL
) {
803 port
= ipc_port_copy_send(*whichp
);
811 * Routine: task_set_special_port [kernel call]
813 * Changes one of the task's special ports,
814 * setting it to the supplied send right.
816 * Nothing locked. If successful, consumes
817 * the supplied send right.
819 * KERN_SUCCESS Changed the special port.
820 * KERN_INVALID_ARGUMENT The task is null.
821 * KERN_FAILURE The task/space is dead.
822 * KERN_INVALID_ARGUMENT Invalid special port.
826 task_set_special_port(
834 if (task
== TASK_NULL
)
835 return KERN_INVALID_ARGUMENT
;
838 case TASK_KERNEL_PORT
:
839 whichp
= &task
->itk_sself
;
843 whichp
= &task
->itk_host
;
846 case TASK_BOOTSTRAP_PORT
:
847 whichp
= &task
->itk_bootstrap
;
850 case TASK_WIRED_LEDGER_PORT
:
851 whichp
= &task
->wired_ledger_port
;
854 case TASK_PAGED_LEDGER_PORT
:
855 whichp
= &task
->paged_ledger_port
;
859 return KERN_INVALID_ARGUMENT
;
863 if (task
->itk_self
== IP_NULL
) {
873 ipc_port_release_send(old
);
879 * Routine: mach_ports_register [kernel call]
881 * Stash a handful of port send rights in the task.
882 * Child tasks will inherit these rights, but they
883 * must use mach_ports_lookup to acquire them.
885 * The rights are supplied in a (wired) kalloc'd segment.
886 * Rights which aren't supplied are assumed to be null.
888 * Nothing locked. If successful, consumes
889 * the supplied rights and memory.
891 * KERN_SUCCESS Stashed the port rights.
892 * KERN_INVALID_ARGUMENT The task is null.
893 * KERN_INVALID_ARGUMENT The task is dead.
894 * KERN_INVALID_ARGUMENT Too many port rights supplied.
900 mach_port_array_t memory
,
901 mach_msg_type_number_t portsCnt
)
903 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
906 if ((task
== TASK_NULL
) ||
907 (portsCnt
> TASK_PORT_REGISTER_MAX
))
908 return KERN_INVALID_ARGUMENT
;
911 * Pad the port rights with nulls.
914 for (i
= 0; i
< portsCnt
; i
++)
915 ports
[i
] = memory
[i
];
916 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
920 if (task
->itk_self
== IP_NULL
) {
922 return KERN_INVALID_ARGUMENT
;
926 * Replace the old send rights with the new.
927 * Release the old rights after unlocking.
930 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
933 old
= task
->itk_registered
[i
];
934 task
->itk_registered
[i
] = ports
[i
];
940 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
941 if (IP_VALID(ports
[i
]))
942 ipc_port_release_send(ports
[i
]);
945 * Now that the operation is known to be successful,
946 * we can free the memory.
951 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
957 * Routine: mach_ports_lookup [kernel call]
959 * Retrieves (clones) the stashed port send rights.
961 * Nothing locked. If successful, the caller gets
964 * KERN_SUCCESS Retrieved the send rights.
965 * KERN_INVALID_ARGUMENT The task is null.
966 * KERN_INVALID_ARGUMENT The task is dead.
967 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
973 mach_port_array_t
*portsp
,
974 mach_msg_type_number_t
*portsCnt
)
981 if (task
== TASK_NULL
)
982 return KERN_INVALID_ARGUMENT
;
984 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
986 memory
= kalloc(size
);
988 return KERN_RESOURCE_SHORTAGE
;
991 if (task
->itk_self
== IP_NULL
) {
995 return KERN_INVALID_ARGUMENT
;
998 ports
= (ipc_port_t
*) memory
;
1001 * Clone port rights. Because kalloc'd memory
1002 * is wired, we won't fault while holding the task lock.
1005 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
1006 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
1010 *portsp
= (mach_port_array_t
) ports
;
1011 *portsCnt
= TASK_PORT_REGISTER_MAX
;
1012 return KERN_SUCCESS
;
1016 * Routine: convert_port_to_locked_task
1018 * Internal helper routine to convert from a port to a locked
1019 * task. Used by several routines that try to convert from a
1020 * task port to a reference on some task related object.
1022 * Nothing locked, blocking OK.
1025 convert_port_to_locked_task(ipc_port_t port
)
1027 while (IP_VALID(port
)) {
1031 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
1035 task
= (task_t
) port
->ip_kobject
;
1036 assert(task
!= TASK_NULL
);
1039 * Normal lock ordering puts task_lock() before ip_lock().
1040 * Attempt out-of-order locking here.
1042 if (task_lock_try(task
)) {
1054 * Routine: convert_port_to_task
1056 * Convert from a port to a task.
1057 * Doesn't consume the port ref; produces a task ref,
1058 * which may be null.
1063 convert_port_to_task(
1066 task_t task
= TASK_NULL
;
1068 if (IP_VALID(port
)) {
1071 if ( ip_active(port
) &&
1072 ip_kotype(port
) == IKOT_TASK
) {
1073 task
= (task_t
)port
->ip_kobject
;
1074 assert(task
!= TASK_NULL
);
1076 task_reference_internal(task
);
1086 * Routine: convert_port_to_space
1088 * Convert from a port to a space.
1089 * Doesn't consume the port ref; produces a space ref,
1090 * which may be null.
1095 convert_port_to_space(
1101 task
= convert_port_to_locked_task(port
);
1103 if (task
== TASK_NULL
)
1104 return IPC_SPACE_NULL
;
1106 if (!task
->active
) {
1108 return IPC_SPACE_NULL
;
1111 space
= task
->itk_space
;
1112 is_reference(space
);
1118 * Routine: convert_port_to_map
1120 * Convert from a port to a map.
1121 * Doesn't consume the port ref; produces a map ref,
1122 * which may be null.
1128 convert_port_to_map(
1134 task
= convert_port_to_locked_task(port
);
1136 if (task
== TASK_NULL
)
1139 if (!task
->active
) {
1145 vm_map_reference_swap(map
);
1152 * Routine: convert_port_to_thread
1154 * Convert from a port to a thread.
1155 * Doesn't consume the port ref; produces an thread ref,
1156 * which may be null.
1162 convert_port_to_thread(
1165 thread_t thread
= THREAD_NULL
;
1167 if (IP_VALID(port
)) {
1170 if ( ip_active(port
) &&
1171 ip_kotype(port
) == IKOT_THREAD
) {
1172 thread
= (thread_t
)port
->ip_kobject
;
1173 assert(thread
!= THREAD_NULL
);
1175 thread_reference_internal(thread
);
1185 * Routine: port_name_to_thread
1187 * Convert from a port name to an thread reference
1188 * A name of MACH_PORT_NULL is valid for the null thread.
1193 port_name_to_thread(
1194 mach_port_name_t name
)
1196 thread_t thread
= THREAD_NULL
;
1199 if (MACH_PORT_VALID(name
)) {
1200 if (ipc_object_copyin(current_space(), name
,
1201 MACH_MSG_TYPE_COPY_SEND
,
1202 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1203 return (THREAD_NULL
);
1205 thread
= convert_port_to_thread(kport
);
1207 if (IP_VALID(kport
))
1208 ipc_port_release_send(kport
);
1216 mach_port_name_t name
)
1218 ipc_port_t kern_port
;
1220 task_t task
= TASK_NULL
;
1222 if (MACH_PORT_VALID(name
)) {
1223 kr
= ipc_object_copyin(current_space(), name
,
1224 MACH_MSG_TYPE_COPY_SEND
,
1225 (ipc_object_t
*) &kern_port
);
1226 if (kr
!= KERN_SUCCESS
)
1229 task
= convert_port_to_task(kern_port
);
1231 if (IP_VALID(kern_port
))
1232 ipc_port_release_send(kern_port
);
1238 * Routine: convert_task_to_port
1240 * Convert from a task to a port.
1241 * Consumes a task ref; produces a naked send right
1242 * which may be invalid.
1248 convert_task_to_port(
1254 if (task
->itk_self
!= IP_NULL
)
1255 port
= ipc_port_make_send(task
->itk_self
);
1260 task_deallocate(task
);
1265 * Routine: convert_thread_to_port
1267 * Convert from a thread to a port.
1268 * Consumes an thread ref; produces a naked send right
1269 * which may be invalid.
1275 convert_thread_to_port(
1280 thread_mtx_lock(thread
);
1282 if (thread
->ith_self
!= IP_NULL
)
1283 port
= ipc_port_make_send(thread
->ith_self
);
1287 thread_mtx_unlock(thread
);
1289 thread_deallocate(thread
);
1295 * Routine: space_deallocate
1297 * Deallocate a space ref produced by convert_port_to_space.
1306 if (space
!= IS_NULL
)
1311 * Routine: thread/task_set_exception_ports [kernel call]
1313 * Sets the thread/task exception port, flavor and
1314 * behavior for the exception types specified by the mask.
1315 * There will be one send right per exception per valid
1318 * Nothing locked. If successful, consumes
1319 * the supplied send right.
1321 * KERN_SUCCESS Changed the special port.
1322 * KERN_INVALID_ARGUMENT The thread is null,
1323 * Illegal mask bit set.
1324 * Illegal exception behavior
1325 * KERN_FAILURE The thread is dead.
1329 thread_set_exception_ports(
1331 exception_mask_t exception_mask
,
1332 ipc_port_t new_port
,
1333 exception_behavior_t new_behavior
,
1334 thread_state_flavor_t new_flavor
)
1336 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1337 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1340 if (thread
== THREAD_NULL
)
1341 return (KERN_INVALID_ARGUMENT
);
1343 if (exception_mask
& ~EXC_MASK_ALL
)
1344 return (KERN_INVALID_ARGUMENT
);
1346 if (IP_VALID(new_port
)) {
1347 switch (new_behavior
) {
1349 case EXCEPTION_DEFAULT
:
1350 case EXCEPTION_STATE
:
1351 case EXCEPTION_STATE_IDENTITY
:
1355 return (KERN_INVALID_ARGUMENT
);
1360 * Check the validity of the thread_state_flavor by calling the
1361 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1362 * osfmk/mach/ARCHITECTURE/thread_status.h
1364 if (!VALID_THREAD_STATE_FLAVOR(new_flavor
))
1365 return (KERN_INVALID_ARGUMENT
);
1367 thread_mtx_lock(thread
);
1369 if (!thread
->active
) {
1370 thread_mtx_unlock(thread
);
1372 return (KERN_FAILURE
);
1375 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1376 if (exception_mask
& (1 << i
)) {
1377 old_port
[i
] = thread
->exc_actions
[i
].port
;
1378 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1379 thread
->exc_actions
[i
].behavior
= new_behavior
;
1380 thread
->exc_actions
[i
].flavor
= new_flavor
;
1381 thread
->exc_actions
[i
].privileged
= privileged
;
1384 old_port
[i
] = IP_NULL
;
1387 thread_mtx_unlock(thread
);
1389 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1390 if (IP_VALID(old_port
[i
]))
1391 ipc_port_release_send(old_port
[i
]);
1393 if (IP_VALID(new_port
)) /* consume send right */
1394 ipc_port_release_send(new_port
);
1396 return (KERN_SUCCESS
);
1400 task_set_exception_ports(
1402 exception_mask_t exception_mask
,
1403 ipc_port_t new_port
,
1404 exception_behavior_t new_behavior
,
1405 thread_state_flavor_t new_flavor
)
1407 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1408 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1411 if (task
== TASK_NULL
)
1412 return (KERN_INVALID_ARGUMENT
);
1414 if (exception_mask
& ~EXC_MASK_ALL
)
1415 return (KERN_INVALID_ARGUMENT
);
1417 if (IP_VALID(new_port
)) {
1418 switch (new_behavior
) {
1420 case EXCEPTION_DEFAULT
:
1421 case EXCEPTION_STATE
:
1422 case EXCEPTION_STATE_IDENTITY
:
1426 return (KERN_INVALID_ARGUMENT
);
1432 if (task
->itk_self
== IP_NULL
) {
1435 return (KERN_FAILURE
);
1438 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1439 if (exception_mask
& (1 << i
)) {
1440 old_port
[i
] = task
->exc_actions
[i
].port
;
1441 task
->exc_actions
[i
].port
=
1442 ipc_port_copy_send(new_port
);
1443 task
->exc_actions
[i
].behavior
= new_behavior
;
1444 task
->exc_actions
[i
].flavor
= new_flavor
;
1445 task
->exc_actions
[i
].privileged
= privileged
;
1448 old_port
[i
] = IP_NULL
;
1453 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1454 if (IP_VALID(old_port
[i
]))
1455 ipc_port_release_send(old_port
[i
]);
1457 if (IP_VALID(new_port
)) /* consume send right */
1458 ipc_port_release_send(new_port
);
1460 return (KERN_SUCCESS
);
1464 * Routine: thread/task_swap_exception_ports [kernel call]
1466 * Sets the thread/task exception port, flavor and
1467 * behavior for the exception types specified by the
1470 * The old ports, behavior and flavors are returned
1471 * Count specifies the array sizes on input and
1472 * the number of returned ports etc. on output. The
1473 * arrays must be large enough to hold all the returned
1474 * data, MIG returnes an error otherwise. The masks
1475 * array specifies the corresponding exception type(s).
1478 * Nothing locked. If successful, consumes
1479 * the supplied send right.
1481 * Returns upto [in} CountCnt elements.
1483 * KERN_SUCCESS Changed the special port.
1484 * KERN_INVALID_ARGUMENT The thread is null,
1485 * Illegal mask bit set.
1486 * Illegal exception behavior
1487 * KERN_FAILURE The thread is dead.
1491 thread_swap_exception_ports(
1493 exception_mask_t exception_mask
,
1494 ipc_port_t new_port
,
1495 exception_behavior_t new_behavior
,
1496 thread_state_flavor_t new_flavor
,
1497 exception_mask_array_t masks
,
1498 mach_msg_type_number_t
*CountCnt
,
1499 exception_port_array_t ports
,
1500 exception_behavior_array_t behaviors
,
1501 thread_state_flavor_array_t flavors
)
1503 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1504 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1505 unsigned int i
, j
, count
;
1507 if (thread
== THREAD_NULL
)
1508 return (KERN_INVALID_ARGUMENT
);
1510 if (exception_mask
& ~EXC_MASK_ALL
)
1511 return (KERN_INVALID_ARGUMENT
);
1513 if (IP_VALID(new_port
)) {
1514 switch (new_behavior
) {
1516 case EXCEPTION_DEFAULT
:
1517 case EXCEPTION_STATE
:
1518 case EXCEPTION_STATE_IDENTITY
:
1522 return (KERN_INVALID_ARGUMENT
);
1526 thread_mtx_lock(thread
);
1528 if (!thread
->active
) {
1529 thread_mtx_unlock(thread
);
1531 return (KERN_FAILURE
);
1536 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1537 if (exception_mask
& (1 << i
)) {
1538 for (j
= 0; j
< count
; ++j
) {
1540 * search for an identical entry, if found
1541 * set corresponding mask for this exception.
1543 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1544 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1545 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1546 masks
[j
] |= (1 << i
);
1552 masks
[j
] = (1 << i
);
1553 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1555 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1556 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1560 old_port
[i
] = thread
->exc_actions
[i
].port
;
1561 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1562 thread
->exc_actions
[i
].behavior
= new_behavior
;
1563 thread
->exc_actions
[i
].flavor
= new_flavor
;
1564 thread
->exc_actions
[i
].privileged
= privileged
;
1565 if (count
> *CountCnt
)
1569 old_port
[i
] = IP_NULL
;
1572 thread_mtx_unlock(thread
);
1574 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1575 if (IP_VALID(old_port
[i
]))
1576 ipc_port_release_send(old_port
[i
]);
1578 if (IP_VALID(new_port
)) /* consume send right */
1579 ipc_port_release_send(new_port
);
1583 return (KERN_SUCCESS
);
1587 task_swap_exception_ports(
1589 exception_mask_t exception_mask
,
1590 ipc_port_t new_port
,
1591 exception_behavior_t new_behavior
,
1592 thread_state_flavor_t new_flavor
,
1593 exception_mask_array_t masks
,
1594 mach_msg_type_number_t
*CountCnt
,
1595 exception_port_array_t ports
,
1596 exception_behavior_array_t behaviors
,
1597 thread_state_flavor_array_t flavors
)
1599 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1600 boolean_t privileged
= current_task()->sec_token
.val
[0] == 0;
1601 unsigned int i
, j
, count
;
1603 if (task
== TASK_NULL
)
1604 return (KERN_INVALID_ARGUMENT
);
1606 if (exception_mask
& ~EXC_MASK_ALL
)
1607 return (KERN_INVALID_ARGUMENT
);
1609 if (IP_VALID(new_port
)) {
1610 switch (new_behavior
) {
1612 case EXCEPTION_DEFAULT
:
1613 case EXCEPTION_STATE
:
1614 case EXCEPTION_STATE_IDENTITY
:
1618 return (KERN_INVALID_ARGUMENT
);
1624 if (task
->itk_self
== IP_NULL
) {
1627 return (KERN_FAILURE
);
1632 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1633 if (exception_mask
& (1 << i
)) {
1634 for (j
= 0; j
< count
; j
++) {
1636 * search for an identical entry, if found
1637 * set corresponding mask for this exception.
1639 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1640 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1641 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1642 masks
[j
] |= (1 << i
);
1648 masks
[j
] = (1 << i
);
1649 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1650 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1651 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1655 old_port
[i
] = task
->exc_actions
[i
].port
;
1656 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1657 task
->exc_actions
[i
].behavior
= new_behavior
;
1658 task
->exc_actions
[i
].flavor
= new_flavor
;
1659 task
->exc_actions
[i
].privileged
= privileged
;
1660 if (count
> *CountCnt
)
1664 old_port
[i
] = IP_NULL
;
1669 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++)
1670 if (IP_VALID(old_port
[i
]))
1671 ipc_port_release_send(old_port
[i
]);
1673 if (IP_VALID(new_port
)) /* consume send right */
1674 ipc_port_release_send(new_port
);
1678 return (KERN_SUCCESS
);
1682 * Routine: thread/task_get_exception_ports [kernel call]
1684 * Clones a send right for each of the thread/task's exception
1685 * ports specified in the mask and returns the behaviour
1686 * and flavor of said port.
1688 * Returns upto [in} CountCnt elements.
1693 * KERN_SUCCESS Extracted a send right.
1694 * KERN_INVALID_ARGUMENT The thread is null,
1695 * Invalid special port,
1696 * Illegal mask bit set.
1697 * KERN_FAILURE The thread is dead.
1701 thread_get_exception_ports(
1703 exception_mask_t exception_mask
,
1704 exception_mask_array_t masks
,
1705 mach_msg_type_number_t
*CountCnt
,
1706 exception_port_array_t ports
,
1707 exception_behavior_array_t behaviors
,
1708 thread_state_flavor_array_t flavors
)
1710 unsigned int i
, j
, count
;
1712 if (thread
== THREAD_NULL
)
1713 return (KERN_INVALID_ARGUMENT
);
1715 if (exception_mask
& ~EXC_MASK_ALL
)
1716 return (KERN_INVALID_ARGUMENT
);
1718 thread_mtx_lock(thread
);
1720 if (!thread
->active
) {
1721 thread_mtx_unlock(thread
);
1723 return (KERN_FAILURE
);
1728 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1729 if (exception_mask
& (1 << i
)) {
1730 for (j
= 0; j
< count
; ++j
) {
1732 * search for an identical entry, if found
1733 * set corresponding mask for this exception.
1735 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1736 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
1737 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1738 masks
[j
] |= (1 << i
);
1744 masks
[j
] = (1 << i
);
1745 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1746 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1747 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1749 if (count
>= *CountCnt
)
1755 thread_mtx_unlock(thread
);
1759 return (KERN_SUCCESS
);
1763 task_get_exception_ports(
1765 exception_mask_t exception_mask
,
1766 exception_mask_array_t masks
,
1767 mach_msg_type_number_t
*CountCnt
,
1768 exception_port_array_t ports
,
1769 exception_behavior_array_t behaviors
,
1770 thread_state_flavor_array_t flavors
)
1772 unsigned int i
, j
, count
;
1774 if (task
== TASK_NULL
)
1775 return (KERN_INVALID_ARGUMENT
);
1777 if (exception_mask
& ~EXC_MASK_ALL
)
1778 return (KERN_INVALID_ARGUMENT
);
1782 if (task
->itk_self
== IP_NULL
) {
1785 return (KERN_FAILURE
);
1790 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1791 if (exception_mask
& (1 << i
)) {
1792 for (j
= 0; j
< count
; ++j
) {
1794 * search for an identical entry, if found
1795 * set corresponding mask for this exception.
1797 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1798 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1799 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1800 masks
[j
] |= (1 << i
);
1806 masks
[j
] = (1 << i
);
1807 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1808 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1809 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1811 if (count
> *CountCnt
)
1821 return (KERN_SUCCESS
);