2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
56 * Task and thread related IPC functions.
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/memory_object_types.h>
68 #include <mach/mach_traps.h>
69 #include <mach/task_server.h>
70 #include <mach/thread_act_server.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/host_priv_server.h>
73 #include <mach/vm_map_server.h>
75 #include <kern/kern_types.h>
76 #include <kern/host.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/ipc_tt.h>
79 #include <kern/kalloc.h>
80 #include <kern/thread.h>
81 #include <kern/misc_protos.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_shared_memory_server.h>
86 #include <vm/vm_protos.h>
88 /* forward declarations */
89 task_t
convert_port_to_locked_task(ipc_port_t port
);
93 * Routine: ipc_task_init
95 * Initialize a task's IPC state.
97 * If non-null, some state will be inherited from the parent.
98 * The parent must be appropriately initialized.
114 kr
= ipc_space_create(&ipc_table_entries
[0], &space
);
115 if (kr
!= KERN_SUCCESS
)
116 panic("ipc_task_init");
119 kport
= ipc_port_alloc_kernel();
120 if (kport
== IP_NULL
)
121 panic("ipc_task_init");
124 task
->itk_self
= kport
;
125 task
->itk_sself
= ipc_port_make_send(kport
);
126 task
->itk_space
= space
;
127 space
->is_fast
= FALSE
;
129 if (parent
== TASK_NULL
) {
132 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
133 task
->exc_actions
[i
].port
= IP_NULL
;
136 kr
= host_get_host_port(host_priv_self(), &port
);
137 assert(kr
== KERN_SUCCESS
);
138 task
->itk_host
= port
;
140 task
->itk_bootstrap
= IP_NULL
;
142 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
143 task
->itk_registered
[i
] = IP_NULL
;
146 assert(parent
->itk_self
!= IP_NULL
);
148 /* inherit registered ports */
150 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
151 task
->itk_registered
[i
] =
152 ipc_port_copy_send(parent
->itk_registered
[i
]);
154 /* inherit exception and bootstrap ports */
156 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
157 task
->exc_actions
[i
].port
=
158 ipc_port_copy_send(parent
->exc_actions
[i
].port
);
159 task
->exc_actions
[i
].flavor
=
160 parent
->exc_actions
[i
].flavor
;
161 task
->exc_actions
[i
].behavior
=
162 parent
->exc_actions
[i
].behavior
;
165 ipc_port_copy_send(parent
->itk_host
);
167 task
->itk_bootstrap
=
168 ipc_port_copy_send(parent
->itk_bootstrap
);
175 * Routine: ipc_task_enable
177 * Enable a task for IPC access.
189 kport
= task
->itk_self
;
190 if (kport
!= IP_NULL
)
191 ipc_kobject_set(kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
196 * Routine: ipc_task_disable
198 * Disable IPC access to a task.
210 kport
= task
->itk_self
;
211 if (kport
!= IP_NULL
)
212 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
217 * Routine: ipc_task_terminate
219 * Clean up and destroy a task's IPC state.
221 * Nothing locked. The task must be suspended.
222 * (Or the current thread must be in the task.)
233 kport
= task
->itk_self
;
235 if (kport
== IP_NULL
) {
236 /* the task is already terminated (can this happen?) */
241 task
->itk_self
= IP_NULL
;
244 /* release the naked send rights */
246 if (IP_VALID(task
->itk_sself
))
247 ipc_port_release_send(task
->itk_sself
);
249 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
250 if (IP_VALID(task
->exc_actions
[i
].port
)) {
251 ipc_port_release_send(task
->exc_actions
[i
].port
);
255 if (IP_VALID(task
->itk_host
))
256 ipc_port_release_send(task
->itk_host
);
258 if (IP_VALID(task
->itk_bootstrap
))
259 ipc_port_release_send(task
->itk_bootstrap
);
261 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
262 if (IP_VALID(task
->itk_registered
[i
]))
263 ipc_port_release_send(task
->itk_registered
[i
]);
265 ipc_port_release_send(task
->wired_ledger_port
);
266 ipc_port_release_send(task
->paged_ledger_port
);
268 /* destroy the kernel port */
269 ipc_port_dealloc_kernel(kport
);
273 * Routine: ipc_task_reset
275 * Reset a task's IPC state to protect it when
276 * it enters an elevated security context.
278 * Nothing locked. The task must be suspended.
279 * (Or the current thread must be in the task.)
286 ipc_port_t old_kport
, new_kport
;
287 ipc_port_t old_sself
;
289 ipc_port_t old_exc_actions
[EXC_TYPES_COUNT
];
293 new_kport
= ipc_port_alloc_kernel();
294 if (new_kport
== IP_NULL
)
295 panic("ipc_task_reset");
299 old_kport
= task
->itk_self
;
301 if (old_kport
== IP_NULL
) {
302 /* the task is already terminated (can this happen?) */
304 ipc_port_dealloc_kernel(new_kport
);
308 task
->itk_self
= new_kport
;
309 old_sself
= task
->itk_sself
;
310 task
->itk_sself
= ipc_port_make_send(new_kport
);
311 ipc_kobject_set(old_kport
, IKO_NULL
, IKOT_NONE
);
312 ipc_kobject_set(new_kport
, (ipc_kobject_t
) task
, IKOT_TASK
);
315 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
316 old_exc_actions
[i
] = task
->exc_action
[i
].port
;
317 task
->exc_actions
[i
].port
= IP_NULL
;
323 /* release the naked send rights */
325 if (IP_VALID(old_sself
))
326 ipc_port_release_send(old_sself
);
329 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++) {
330 if (IP_VALID(old_exc_actions
[i
])) {
331 ipc_port_release_send(old_exc_actions
[i
]);
336 /* destroy the kernel port */
337 ipc_port_dealloc_kernel(old_kport
);
341 * Routine: ipc_thread_init
343 * Initialize a thread's IPC state.
355 kport
= ipc_port_alloc_kernel();
356 if (kport
== IP_NULL
)
357 panic("ipc_thread_init");
359 thread
->ith_self
= kport
;
360 thread
->ith_sself
= ipc_port_make_send(kport
);
362 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
363 thread
->exc_actions
[i
].port
= IP_NULL
;
365 ipc_kobject_set(kport
, (ipc_kobject_t
)thread
, IKOT_THREAD
);
367 ipc_kmsg_queue_init(&thread
->ith_messages
);
369 thread
->ith_rpc_reply
= IP_NULL
;
376 ipc_port_t kport
= thread
->ith_self
;
378 if (kport
!= IP_NULL
)
379 ipc_kobject_set(kport
, IKO_NULL
, IKOT_NONE
);
383 * Routine: ipc_thread_terminate
385 * Clean up and destroy a thread's IPC state.
391 ipc_thread_terminate(
394 ipc_port_t kport
= thread
->ith_self
;
396 if (kport
!= IP_NULL
) {
399 if (IP_VALID(thread
->ith_sself
))
400 ipc_port_release_send(thread
->ith_sself
);
402 thread
->ith_sself
= thread
->ith_self
= IP_NULL
;
404 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
405 if (IP_VALID(thread
->exc_actions
[i
].port
))
406 ipc_port_release_send(thread
->exc_actions
[i
].port
);
409 ipc_port_dealloc_kernel(kport
);
412 assert(ipc_kmsg_queue_empty(&thread
->ith_messages
));
414 if (thread
->ith_rpc_reply
!= IP_NULL
)
415 ipc_port_dealloc_reply(thread
->ith_rpc_reply
);
417 thread
->ith_rpc_reply
= IP_NULL
;
421 * Routine: retrieve_task_self_fast
423 * Optimized version of retrieve_task_self,
424 * that only works for the current task.
426 * Return a send right (possibly null/dead)
427 * for the task's user-visible self port.
433 retrieve_task_self_fast(
434 register task_t task
)
436 register ipc_port_t port
;
438 assert(task
== current_task());
441 assert(task
->itk_self
!= IP_NULL
);
443 if ((port
= task
->itk_sself
) == task
->itk_self
) {
447 assert(ip_active(port
));
452 port
= ipc_port_copy_send(port
);
459 * Routine: retrieve_thread_self_fast
461 * Return a send right (possibly null/dead)
462 * for the thread's user-visible self port.
464 * Only works for the current thread.
471 retrieve_thread_self_fast(
474 register ipc_port_t port
;
476 assert(thread
== current_thread());
478 thread_mtx_lock(thread
);
480 assert(thread
->ith_self
!= IP_NULL
);
482 if ((port
= thread
->ith_sself
) == thread
->ith_self
) {
486 assert(ip_active(port
));
492 port
= ipc_port_copy_send(port
);
494 thread_mtx_unlock(thread
);
500 * Routine: task_self_trap [mach trap]
502 * Give the caller send rights for his own task port.
506 * MACH_PORT_NULL if there are any resource failures
512 __unused
struct task_self_trap_args
*args
)
514 task_t task
= current_task();
516 mach_port_name_t name
;
518 sright
= retrieve_task_self_fast(task
);
519 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
524 * Routine: thread_self_trap [mach trap]
526 * Give the caller send rights for his own thread port.
530 * MACH_PORT_NULL if there are any resource failures
536 __unused
struct thread_self_trap_args
*args
)
538 thread_t thread
= current_thread();
539 task_t task
= thread
->task
;
541 mach_port_name_t name
;
543 sright
= retrieve_thread_self_fast(thread
);
544 name
= ipc_port_copyout_send(sright
, task
->itk_space
);
550 * Routine: mach_reply_port [mach trap]
552 * Allocate a port for the caller.
556 * MACH_PORT_NULL if there are any resource failures
562 __unused
struct mach_reply_port_args
*args
)
565 mach_port_name_t name
;
568 kr
= ipc_port_alloc(current_task()->itk_space
, &name
, &port
);
569 if (kr
== KERN_SUCCESS
)
572 name
= MACH_PORT_NULL
;
577 * Routine: thread_get_special_port [kernel call]
579 * Clones a send right for one of the thread's
584 * KERN_SUCCESS Extracted a send right.
585 * KERN_INVALID_ARGUMENT The thread is null.
586 * KERN_FAILURE The thread is dead.
587 * KERN_INVALID_ARGUMENT Invalid special port.
591 thread_get_special_port(
596 kern_return_t result
= KERN_SUCCESS
;
599 if (thread
== THREAD_NULL
)
600 return (KERN_INVALID_ARGUMENT
);
604 case THREAD_KERNEL_PORT
:
605 whichp
= &thread
->ith_sself
;
609 return (KERN_INVALID_ARGUMENT
);
612 thread_mtx_lock(thread
);
615 *portp
= ipc_port_copy_send(*whichp
);
617 result
= KERN_FAILURE
;
619 thread_mtx_unlock(thread
);
625 * Routine: thread_set_special_port [kernel call]
627 * Changes one of the thread's special ports,
628 * setting it to the supplied send right.
630 * Nothing locked. If successful, consumes
631 * the supplied send right.
633 * KERN_SUCCESS Changed the special port.
634 * KERN_INVALID_ARGUMENT The thread is null.
635 * KERN_FAILURE The thread is dead.
636 * KERN_INVALID_ARGUMENT Invalid special port.
640 thread_set_special_port(
645 kern_return_t result
= KERN_SUCCESS
;
646 ipc_port_t
*whichp
, old
= IP_NULL
;
648 if (thread
== THREAD_NULL
)
649 return (KERN_INVALID_ARGUMENT
);
653 case THREAD_KERNEL_PORT
:
654 whichp
= &thread
->ith_sself
;
658 return (KERN_INVALID_ARGUMENT
);
661 thread_mtx_lock(thread
);
663 if (thread
->active
) {
668 result
= KERN_FAILURE
;
670 thread_mtx_unlock(thread
);
673 ipc_port_release_send(old
);
679 * Routine: task_get_special_port [kernel call]
681 * Clones a send right for one of the task's
686 * KERN_SUCCESS Extracted a send right.
687 * KERN_INVALID_ARGUMENT The task is null.
688 * KERN_FAILURE The task/space is dead.
689 * KERN_INVALID_ARGUMENT Invalid special port.
693 task_get_special_port(
701 if (task
== TASK_NULL
)
702 return KERN_INVALID_ARGUMENT
;
705 case TASK_KERNEL_PORT
:
706 whichp
= &task
->itk_sself
;
710 whichp
= &task
->itk_host
;
713 case TASK_BOOTSTRAP_PORT
:
714 whichp
= &task
->itk_bootstrap
;
717 case TASK_WIRED_LEDGER_PORT
:
718 whichp
= &task
->wired_ledger_port
;
721 case TASK_PAGED_LEDGER_PORT
:
722 whichp
= &task
->paged_ledger_port
;
726 return KERN_INVALID_ARGUMENT
;
730 if (task
->itk_self
== IP_NULL
) {
735 port
= ipc_port_copy_send(*whichp
);
743 * Routine: task_set_special_port [kernel call]
745 * Changes one of the task's special ports,
746 * setting it to the supplied send right.
748 * Nothing locked. If successful, consumes
749 * the supplied send right.
751 * KERN_SUCCESS Changed the special port.
752 * KERN_INVALID_ARGUMENT The task is null.
753 * KERN_FAILURE The task/space is dead.
754 * KERN_INVALID_ARGUMENT Invalid special port.
758 task_set_special_port(
766 if (task
== TASK_NULL
)
767 return KERN_INVALID_ARGUMENT
;
770 case TASK_KERNEL_PORT
:
771 whichp
= &task
->itk_sself
;
775 whichp
= &task
->itk_host
;
778 case TASK_BOOTSTRAP_PORT
:
779 whichp
= &task
->itk_bootstrap
;
782 case TASK_WIRED_LEDGER_PORT
:
783 whichp
= &task
->wired_ledger_port
;
786 case TASK_PAGED_LEDGER_PORT
:
787 whichp
= &task
->paged_ledger_port
;
791 return KERN_INVALID_ARGUMENT
;
795 if (task
->itk_self
== IP_NULL
) {
805 ipc_port_release_send(old
);
811 * Routine: mach_ports_register [kernel call]
813 * Stash a handful of port send rights in the task.
814 * Child tasks will inherit these rights, but they
815 * must use mach_ports_lookup to acquire them.
817 * The rights are supplied in a (wired) kalloc'd segment.
818 * Rights which aren't supplied are assumed to be null.
820 * Nothing locked. If successful, consumes
821 * the supplied rights and memory.
823 * KERN_SUCCESS Stashed the port rights.
824 * KERN_INVALID_ARGUMENT The task is null.
825 * KERN_INVALID_ARGUMENT The task is dead.
826 * KERN_INVALID_ARGUMENT Too many port rights supplied.
832 mach_port_array_t memory
,
833 mach_msg_type_number_t portsCnt
)
835 ipc_port_t ports
[TASK_PORT_REGISTER_MAX
];
838 if ((task
== TASK_NULL
) ||
839 (portsCnt
> TASK_PORT_REGISTER_MAX
))
840 return KERN_INVALID_ARGUMENT
;
843 * Pad the port rights with nulls.
846 for (i
= 0; i
< portsCnt
; i
++)
847 ports
[i
] = memory
[i
];
848 for (; i
< TASK_PORT_REGISTER_MAX
; i
++)
852 if (task
->itk_self
== IP_NULL
) {
854 return KERN_INVALID_ARGUMENT
;
858 * Replace the old send rights with the new.
859 * Release the old rights after unlocking.
862 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++) {
865 old
= task
->itk_registered
[i
];
866 task
->itk_registered
[i
] = ports
[i
];
872 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
873 if (IP_VALID(ports
[i
]))
874 ipc_port_release_send(ports
[i
]);
877 * Now that the operation is known to be successful,
878 * we can free the memory.
883 (vm_size_t
) (portsCnt
* sizeof(mach_port_t
)));
889 * Routine: mach_ports_lookup [kernel call]
891 * Retrieves (clones) the stashed port send rights.
893 * Nothing locked. If successful, the caller gets
896 * KERN_SUCCESS Retrieved the send rights.
897 * KERN_INVALID_ARGUMENT The task is null.
898 * KERN_INVALID_ARGUMENT The task is dead.
899 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
905 mach_port_array_t
*portsp
,
906 mach_msg_type_number_t
*portsCnt
)
913 if (task
== TASK_NULL
)
914 return KERN_INVALID_ARGUMENT
;
916 size
= (vm_size_t
) (TASK_PORT_REGISTER_MAX
* sizeof(ipc_port_t
));
918 memory
= kalloc(size
);
920 return KERN_RESOURCE_SHORTAGE
;
923 if (task
->itk_self
== IP_NULL
) {
927 return KERN_INVALID_ARGUMENT
;
930 ports
= (ipc_port_t
*) memory
;
933 * Clone port rights. Because kalloc'd memory
934 * is wired, we won't fault while holding the task lock.
937 for (i
= 0; i
< TASK_PORT_REGISTER_MAX
; i
++)
938 ports
[i
] = ipc_port_copy_send(task
->itk_registered
[i
]);
942 *portsp
= (mach_port_array_t
) ports
;
943 *portsCnt
= TASK_PORT_REGISTER_MAX
;
948 * Routine: convert_port_to_locked_task
950 * Internal helper routine to convert from a port to a locked
951 * task. Used by several routines that try to convert from a
952 * task port to a reference on some task related object.
954 * Nothing locked, blocking OK.
957 convert_port_to_locked_task(ipc_port_t port
)
959 while (IP_VALID(port
)) {
963 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_TASK
)) {
967 task
= (task_t
) port
->ip_kobject
;
968 assert(task
!= TASK_NULL
);
971 * Normal lock ordering puts task_lock() before ip_lock().
972 * Attempt out-of-order locking here.
974 if (task_lock_try(task
)) {
986 * Routine: convert_port_to_task
988 * Convert from a port to a task.
989 * Doesn't consume the port ref; produces a task ref,
995 convert_port_to_task(
998 task_t task
= TASK_NULL
;
1000 if (IP_VALID(port
)) {
1003 if ( ip_active(port
) &&
1004 ip_kotype(port
) == IKOT_TASK
) {
1005 task
= (task_t
)port
->ip_kobject
;
1006 assert(task
!= TASK_NULL
);
1008 task_reference_internal(task
);
1018 * Routine: convert_port_to_space
1020 * Convert from a port to a space.
1021 * Doesn't consume the port ref; produces a space ref,
1022 * which may be null.
1027 convert_port_to_space(
1033 task
= convert_port_to_locked_task(port
);
1035 if (task
== TASK_NULL
)
1036 return IPC_SPACE_NULL
;
1038 if (!task
->active
) {
1040 return IPC_SPACE_NULL
;
1043 space
= task
->itk_space
;
1044 is_reference(space
);
1050 * Routine: convert_port_to_map
1052 * Convert from a port to a map.
1053 * Doesn't consume the port ref; produces a map ref,
1054 * which may be null.
1060 convert_port_to_map(
1066 task
= convert_port_to_locked_task(port
);
1068 if (task
== TASK_NULL
)
1071 if (!task
->active
) {
1077 vm_map_reference_swap(map
);
1084 * Routine: convert_port_to_thread
1086 * Convert from a port to a thread.
1087 * Doesn't consume the port ref; produces an thread ref,
1088 * which may be null.
1094 convert_port_to_thread(
1097 thread_t thread
= THREAD_NULL
;
1099 if (IP_VALID(port
)) {
1102 if ( ip_active(port
) &&
1103 ip_kotype(port
) == IKOT_THREAD
) {
1104 thread
= (thread_t
)port
->ip_kobject
;
1105 assert(thread
!= THREAD_NULL
);
1107 thread_reference_internal(thread
);
1117 * Routine: port_name_to_thread
1119 * Convert from a port name to an thread reference
1120 * A name of MACH_PORT_NULL is valid for the null thread.
1125 port_name_to_thread(
1126 mach_port_name_t name
)
1128 thread_t thread
= THREAD_NULL
;
1131 if (MACH_PORT_VALID(name
)) {
1132 if (ipc_object_copyin(current_space(), name
,
1133 MACH_MSG_TYPE_COPY_SEND
,
1134 (ipc_object_t
*)&kport
) != KERN_SUCCESS
)
1135 return (THREAD_NULL
);
1137 thread
= convert_port_to_thread(kport
);
1139 if (IP_VALID(kport
))
1140 ipc_port_release_send(kport
);
1148 mach_port_name_t name
)
1150 ipc_port_t kern_port
;
1152 task_t task
= TASK_NULL
;
1154 if (MACH_PORT_VALID(name
)) {
1155 kr
= ipc_object_copyin(current_space(), name
,
1156 MACH_MSG_TYPE_COPY_SEND
,
1157 (ipc_object_t
*) &kern_port
);
1158 if (kr
!= KERN_SUCCESS
)
1161 task
= convert_port_to_task(kern_port
);
1163 if (IP_VALID(kern_port
))
1164 ipc_port_release_send(kern_port
);
1170 * Routine: convert_task_to_port
1172 * Convert from a task to a port.
1173 * Consumes a task ref; produces a naked send right
1174 * which may be invalid.
1180 convert_task_to_port(
1186 if (task
->itk_self
!= IP_NULL
)
1187 port
= ipc_port_make_send(task
->itk_self
);
1192 task_deallocate(task
);
1197 * Routine: convert_thread_to_port
1199 * Convert from a thread to a port.
1200 * Consumes an thread ref; produces a naked send right
1201 * which may be invalid.
1207 convert_thread_to_port(
1212 thread_mtx_lock(thread
);
1214 if (thread
->ith_self
!= IP_NULL
)
1215 port
= ipc_port_make_send(thread
->ith_self
);
1219 thread_mtx_unlock(thread
);
1221 thread_deallocate(thread
);
1227 * Routine: space_deallocate
1229 * Deallocate a space ref produced by convert_port_to_space.
1238 if (space
!= IS_NULL
)
1243 * Routine: thread/task_set_exception_ports [kernel call]
1245 * Sets the thread/task exception port, flavor and
1246 * behavior for the exception types specified by the mask.
1247 * There will be one send right per exception per valid
1250 * Nothing locked. If successful, consumes
1251 * the supplied send right.
1253 * KERN_SUCCESS Changed the special port.
1254 * KERN_INVALID_ARGUMENT The thread is null,
1255 * Illegal mask bit set.
1256 * Illegal exception behavior
1257 * KERN_FAILURE The thread is dead.
1261 thread_set_exception_ports(
1263 exception_mask_t exception_mask
,
1264 ipc_port_t new_port
,
1265 exception_behavior_t new_behavior
,
1266 thread_state_flavor_t new_flavor
)
1268 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1271 if (thread
== THREAD_NULL
)
1272 return (KERN_INVALID_ARGUMENT
);
1274 if (exception_mask
& ~EXC_MASK_ALL
)
1275 return (KERN_INVALID_ARGUMENT
);
1277 if (IP_VALID(new_port
)) {
1278 switch (new_behavior
) {
1280 case EXCEPTION_DEFAULT
:
1281 case EXCEPTION_STATE
:
1282 case EXCEPTION_STATE_IDENTITY
:
1286 return (KERN_INVALID_ARGUMENT
);
1291 * Check the validity of the thread_state_flavor by calling the
1292 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1293 * osfmk/mach/ARCHITECTURE/thread_status.h
1295 if (!VALID_THREAD_STATE_FLAVOR(new_flavor
))
1296 return (KERN_INVALID_ARGUMENT
);
1298 thread_mtx_lock(thread
);
1300 if (!thread
->active
) {
1301 thread_mtx_unlock(thread
);
1303 return (KERN_FAILURE
);
1306 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1307 if (exception_mask
& (1 << i
)) {
1308 old_port
[i
] = thread
->exc_actions
[i
].port
;
1309 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1310 thread
->exc_actions
[i
].behavior
= new_behavior
;
1311 thread
->exc_actions
[i
].flavor
= new_flavor
;
1314 old_port
[i
] = IP_NULL
;
1317 thread_mtx_unlock(thread
);
1319 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1320 if (IP_VALID(old_port
[i
]))
1321 ipc_port_release_send(old_port
[i
]);
1323 if (IP_VALID(new_port
)) /* consume send right */
1324 ipc_port_release_send(new_port
);
1326 return (KERN_SUCCESS
);
1330 task_set_exception_ports(
1332 exception_mask_t exception_mask
,
1333 ipc_port_t new_port
,
1334 exception_behavior_t new_behavior
,
1335 thread_state_flavor_t new_flavor
)
1337 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1340 if (task
== TASK_NULL
)
1341 return (KERN_INVALID_ARGUMENT
);
1343 if (exception_mask
& ~EXC_MASK_ALL
)
1344 return (KERN_INVALID_ARGUMENT
);
1346 if (IP_VALID(new_port
)) {
1347 switch (new_behavior
) {
1349 case EXCEPTION_DEFAULT
:
1350 case EXCEPTION_STATE
:
1351 case EXCEPTION_STATE_IDENTITY
:
1355 return (KERN_INVALID_ARGUMENT
);
1361 if (task
->itk_self
== IP_NULL
) {
1364 return (KERN_FAILURE
);
1367 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1368 if (exception_mask
& (1 << i
)) {
1369 old_port
[i
] = task
->exc_actions
[i
].port
;
1370 task
->exc_actions
[i
].port
=
1371 ipc_port_copy_send(new_port
);
1372 task
->exc_actions
[i
].behavior
= new_behavior
;
1373 task
->exc_actions
[i
].flavor
= new_flavor
;
1376 old_port
[i
] = IP_NULL
;
1381 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1382 if (IP_VALID(old_port
[i
]))
1383 ipc_port_release_send(old_port
[i
]);
1385 if (IP_VALID(new_port
)) /* consume send right */
1386 ipc_port_release_send(new_port
);
1388 return (KERN_SUCCESS
);
1392 * Routine: thread/task_swap_exception_ports [kernel call]
1394 * Sets the thread/task exception port, flavor and
1395 * behavior for the exception types specified by the
1398 * The old ports, behavior and flavors are returned
1399 * Count specifies the array sizes on input and
1400 * the number of returned ports etc. on output. The
1401 * arrays must be large enough to hold all the returned
1402 * data, MIG returnes an error otherwise. The masks
1403 * array specifies the corresponding exception type(s).
1406 * Nothing locked. If successful, consumes
1407 * the supplied send right.
1409 * Returns upto [in} CountCnt elements.
1411 * KERN_SUCCESS Changed the special port.
1412 * KERN_INVALID_ARGUMENT The thread is null,
1413 * Illegal mask bit set.
1414 * Illegal exception behavior
1415 * KERN_FAILURE The thread is dead.
1419 thread_swap_exception_ports(
1421 exception_mask_t exception_mask
,
1422 ipc_port_t new_port
,
1423 exception_behavior_t new_behavior
,
1424 thread_state_flavor_t new_flavor
,
1425 exception_mask_array_t masks
,
1426 mach_msg_type_number_t
*CountCnt
,
1427 exception_port_array_t ports
,
1428 exception_behavior_array_t behaviors
,
1429 thread_state_flavor_array_t flavors
)
1431 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1432 unsigned int i
, j
, count
;
1434 if (thread
== THREAD_NULL
)
1435 return (KERN_INVALID_ARGUMENT
);
1437 if (exception_mask
& ~EXC_MASK_ALL
)
1438 return (KERN_INVALID_ARGUMENT
);
1440 if (IP_VALID(new_port
)) {
1441 switch (new_behavior
) {
1443 case EXCEPTION_DEFAULT
:
1444 case EXCEPTION_STATE
:
1445 case EXCEPTION_STATE_IDENTITY
:
1449 return (KERN_INVALID_ARGUMENT
);
1453 thread_mtx_lock(thread
);
1455 if (!thread
->active
) {
1456 thread_mtx_unlock(thread
);
1458 return (KERN_FAILURE
);
1463 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1464 if (exception_mask
& (1 << i
)) {
1465 for (j
= 0; j
< count
; ++j
) {
1467 * search for an identical entry, if found
1468 * set corresponding mask for this exception.
1470 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1471 thread
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1472 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1473 masks
[j
] |= (1 << i
);
1479 masks
[j
] = (1 << i
);
1480 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1482 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1483 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1487 old_port
[i
] = thread
->exc_actions
[i
].port
;
1488 thread
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1489 thread
->exc_actions
[i
].behavior
= new_behavior
;
1490 thread
->exc_actions
[i
].flavor
= new_flavor
;
1491 if (count
> *CountCnt
)
1495 old_port
[i
] = IP_NULL
;
1498 thread_mtx_unlock(thread
);
1500 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
)
1501 if (IP_VALID(old_port
[i
]))
1502 ipc_port_release_send(old_port
[i
]);
1504 if (IP_VALID(new_port
)) /* consume send right */
1505 ipc_port_release_send(new_port
);
1509 return (KERN_SUCCESS
);
1513 task_swap_exception_ports(
1515 exception_mask_t exception_mask
,
1516 ipc_port_t new_port
,
1517 exception_behavior_t new_behavior
,
1518 thread_state_flavor_t new_flavor
,
1519 exception_mask_array_t masks
,
1520 mach_msg_type_number_t
*CountCnt
,
1521 exception_port_array_t ports
,
1522 exception_behavior_array_t behaviors
,
1523 thread_state_flavor_array_t flavors
)
1525 ipc_port_t old_port
[EXC_TYPES_COUNT
];
1526 unsigned int i
, j
, count
;
1528 if (task
== TASK_NULL
)
1529 return (KERN_INVALID_ARGUMENT
);
1531 if (exception_mask
& ~EXC_MASK_ALL
)
1532 return (KERN_INVALID_ARGUMENT
);
1534 if (IP_VALID(new_port
)) {
1535 switch (new_behavior
) {
1537 case EXCEPTION_DEFAULT
:
1538 case EXCEPTION_STATE
:
1539 case EXCEPTION_STATE_IDENTITY
:
1543 return (KERN_INVALID_ARGUMENT
);
1549 if (task
->itk_self
== IP_NULL
) {
1552 return (KERN_FAILURE
);
1557 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1558 if (exception_mask
& (1 << i
)) {
1559 for (j
= 0; j
< count
; j
++) {
1561 * search for an identical entry, if found
1562 * set corresponding mask for this exception.
1564 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1565 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1566 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1567 masks
[j
] |= (1 << i
);
1573 masks
[j
] = (1 << i
);
1574 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1575 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1576 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1580 old_port
[i
] = task
->exc_actions
[i
].port
;
1581 task
->exc_actions
[i
].port
= ipc_port_copy_send(new_port
);
1582 task
->exc_actions
[i
].behavior
= new_behavior
;
1583 task
->exc_actions
[i
].flavor
= new_flavor
;
1584 if (count
> *CountCnt
)
1588 old_port
[i
] = IP_NULL
;
1593 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; i
++)
1594 if (IP_VALID(old_port
[i
]))
1595 ipc_port_release_send(old_port
[i
]);
1597 if (IP_VALID(new_port
)) /* consume send right */
1598 ipc_port_release_send(new_port
);
1602 return (KERN_SUCCESS
);
1606 * Routine: thread/task_get_exception_ports [kernel call]
1608 * Clones a send right for each of the thread/task's exception
1609 * ports specified in the mask and returns the behaviour
1610 * and flavor of said port.
1612 * Returns upto [in} CountCnt elements.
1617 * KERN_SUCCESS Extracted a send right.
1618 * KERN_INVALID_ARGUMENT The thread is null,
1619 * Invalid special port,
1620 * Illegal mask bit set.
1621 * KERN_FAILURE The thread is dead.
1625 thread_get_exception_ports(
1627 exception_mask_t exception_mask
,
1628 exception_mask_array_t masks
,
1629 mach_msg_type_number_t
*CountCnt
,
1630 exception_port_array_t ports
,
1631 exception_behavior_array_t behaviors
,
1632 thread_state_flavor_array_t flavors
)
1634 unsigned int i
, j
, count
;
1636 if (thread
== THREAD_NULL
)
1637 return (KERN_INVALID_ARGUMENT
);
1639 if (exception_mask
& ~EXC_MASK_ALL
)
1640 return (KERN_INVALID_ARGUMENT
);
1642 thread_mtx_lock(thread
);
1644 if (!thread
->active
) {
1645 thread_mtx_unlock(thread
);
1647 return (KERN_FAILURE
);
1652 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1653 if (exception_mask
& (1 << i
)) {
1654 for (j
= 0; j
< count
; ++j
) {
1656 * search for an identical entry, if found
1657 * set corresponding mask for this exception.
1659 if ( thread
->exc_actions
[i
].port
== ports
[j
] &&
1660 thread
->exc_actions
[i
].behavior
==behaviors
[j
] &&
1661 thread
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1662 masks
[j
] |= (1 << i
);
1668 masks
[j
] = (1 << i
);
1669 ports
[j
] = ipc_port_copy_send(thread
->exc_actions
[i
].port
);
1670 behaviors
[j
] = thread
->exc_actions
[i
].behavior
;
1671 flavors
[j
] = thread
->exc_actions
[i
].flavor
;
1673 if (count
>= *CountCnt
)
1679 thread_mtx_unlock(thread
);
1683 return (KERN_SUCCESS
);
1687 task_get_exception_ports(
1689 exception_mask_t exception_mask
,
1690 exception_mask_array_t masks
,
1691 mach_msg_type_number_t
*CountCnt
,
1692 exception_port_array_t ports
,
1693 exception_behavior_array_t behaviors
,
1694 thread_state_flavor_array_t flavors
)
1696 unsigned int i
, j
, count
;
1698 if (task
== TASK_NULL
)
1699 return (KERN_INVALID_ARGUMENT
);
1701 if (exception_mask
& ~EXC_MASK_ALL
)
1702 return (KERN_INVALID_ARGUMENT
);
1706 if (task
->itk_self
== IP_NULL
) {
1709 return (KERN_FAILURE
);
1714 for (i
= FIRST_EXCEPTION
; i
< EXC_TYPES_COUNT
; ++i
) {
1715 if (exception_mask
& (1 << i
)) {
1716 for (j
= 0; j
< count
; ++j
) {
1718 * search for an identical entry, if found
1719 * set corresponding mask for this exception.
1721 if ( task
->exc_actions
[i
].port
== ports
[j
] &&
1722 task
->exc_actions
[i
].behavior
== behaviors
[j
] &&
1723 task
->exc_actions
[i
].flavor
== flavors
[j
] ) {
1724 masks
[j
] |= (1 << i
);
1730 masks
[j
] = (1 << i
);
1731 ports
[j
] = ipc_port_copy_send(task
->exc_actions
[i
].port
);
1732 behaviors
[j
] = task
->exc_actions
[i
].behavior
;
1733 flavors
[j
] = task
->exc_actions
[i
].flavor
;
1735 if (count
> *CountCnt
)
1745 return (KERN_SUCCESS
);