]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 /* forward declarations */
102 task_t convert_port_to_locked_task(ipc_port_t port);
103
104
105 /*
106 * Routine: ipc_task_init
107 * Purpose:
108 * Initialize a task's IPC state.
109 *
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
112 * Conditions:
113 * Nothing locked.
114 */
115
116 void
117 ipc_task_init(
118 task_t task,
119 task_t parent)
120 {
121 ipc_space_t space;
122 ipc_port_t kport;
123 ipc_port_t nport;
124 kern_return_t kr;
125 int i;
126
127
128 kr = ipc_space_create(&ipc_table_entries[0], &space);
129 if (kr != KERN_SUCCESS)
130 panic("ipc_task_init");
131
132 space->is_task = task;
133
134 kport = ipc_port_alloc_kernel();
135 if (kport == IP_NULL)
136 panic("ipc_task_init");
137
138 nport = ipc_port_alloc_kernel();
139 if (nport == IP_NULL)
140 panic("ipc_task_init");
141
142 itk_lock_init(task);
143 task->itk_self = kport;
144 task->itk_nself = nport;
145 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
146 if (task_is_a_corpse_fork(task)) {
147 /*
148 * No sender's notification for corpse would not
149 * work with a naked send right in kernel.
150 */
151 task->itk_sself = IP_NULL;
152 } else {
153 task->itk_sself = ipc_port_make_send(kport);
154 }
155 task->itk_debug_control = IP_NULL;
156 task->itk_space = space;
157
158 if (parent == TASK_NULL) {
159 ipc_port_t port;
160
161 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
162 task->exc_actions[i].port = IP_NULL;
163 #if CONFIG_MACF
164 mac_exc_action_label_init(task->exc_actions + i);
165 #endif
166 }/* for */
167
168 kr = host_get_host_port(host_priv_self(), &port);
169 assert(kr == KERN_SUCCESS);
170 task->itk_host = port;
171
172 task->itk_bootstrap = IP_NULL;
173 task->itk_seatbelt = IP_NULL;
174 task->itk_gssd = IP_NULL;
175 task->itk_task_access = IP_NULL;
176
177 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
178 task->itk_registered[i] = IP_NULL;
179 } else {
180 itk_lock(parent);
181 assert(parent->itk_self != IP_NULL);
182
183 /* inherit registered ports */
184
185 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
186 task->itk_registered[i] =
187 ipc_port_copy_send(parent->itk_registered[i]);
188
189 /* inherit exception and bootstrap ports */
190
191 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
192 task->exc_actions[i].port =
193 ipc_port_copy_send(parent->exc_actions[i].port);
194 task->exc_actions[i].flavor =
195 parent->exc_actions[i].flavor;
196 task->exc_actions[i].behavior =
197 parent->exc_actions[i].behavior;
198 task->exc_actions[i].privileged =
199 parent->exc_actions[i].privileged;
200 #if CONFIG_MACF
201 mac_exc_action_label_inherit(parent->exc_actions + i, task->exc_actions + i);
202 #endif
203 }/* for */
204 task->itk_host =
205 ipc_port_copy_send(parent->itk_host);
206
207 task->itk_bootstrap =
208 ipc_port_copy_send(parent->itk_bootstrap);
209
210 task->itk_seatbelt =
211 ipc_port_copy_send(parent->itk_seatbelt);
212
213 task->itk_gssd =
214 ipc_port_copy_send(parent->itk_gssd);
215
216 task->itk_task_access =
217 ipc_port_copy_send(parent->itk_task_access);
218
219 itk_unlock(parent);
220 }
221 }
222
223 /*
224 * Routine: ipc_task_enable
225 * Purpose:
226 * Enable a task for IPC access.
227 * Conditions:
228 * Nothing locked.
229 */
230
231 void
232 ipc_task_enable(
233 task_t task)
234 {
235 ipc_port_t kport;
236 ipc_port_t nport;
237
238 itk_lock(task);
239 kport = task->itk_self;
240 if (kport != IP_NULL)
241 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
242 nport = task->itk_nself;
243 if (nport != IP_NULL)
244 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
245 itk_unlock(task);
246 }
247
248 /*
249 * Routine: ipc_task_disable
250 * Purpose:
251 * Disable IPC access to a task.
252 * Conditions:
253 * Nothing locked.
254 */
255
256 void
257 ipc_task_disable(
258 task_t task)
259 {
260 ipc_port_t kport;
261 ipc_port_t nport;
262 ipc_port_t rport;
263
264 itk_lock(task);
265 kport = task->itk_self;
266 if (kport != IP_NULL)
267 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
268 nport = task->itk_nself;
269 if (nport != IP_NULL)
270 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
271
272 rport = task->itk_resume;
273 if (rport != IP_NULL) {
274 /*
275 * From this point onwards this task is no longer accepting
276 * resumptions.
277 *
278 * There are still outstanding suspensions on this task,
279 * even as it is being torn down. Disconnect the task
280 * from the rport, thereby "orphaning" the rport. The rport
281 * itself will go away only when the last suspension holder
282 * destroys his SO right to it -- when he either
283 * exits, or tries to actually use that last SO right to
284 * resume this (now non-existent) task.
285 */
286 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
287 }
288 itk_unlock(task);
289 }
290
291 /*
292 * Routine: ipc_task_terminate
293 * Purpose:
294 * Clean up and destroy a task's IPC state.
295 * Conditions:
296 * Nothing locked. The task must be suspended.
297 * (Or the current thread must be in the task.)
298 */
299
300 void
301 ipc_task_terminate(
302 task_t task)
303 {
304 ipc_port_t kport;
305 ipc_port_t nport;
306 ipc_port_t rport;
307 int i;
308
309 itk_lock(task);
310 kport = task->itk_self;
311
312 if (kport == IP_NULL) {
313 /* the task is already terminated (can this happen?) */
314 itk_unlock(task);
315 return;
316 }
317 task->itk_self = IP_NULL;
318
319 nport = task->itk_nself;
320 assert(nport != IP_NULL);
321 task->itk_nself = IP_NULL;
322
323 rport = task->itk_resume;
324 task->itk_resume = IP_NULL;
325
326 itk_unlock(task);
327
328 /* release the naked send rights */
329
330 if (IP_VALID(task->itk_sself))
331 ipc_port_release_send(task->itk_sself);
332
333 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
334 if (IP_VALID(task->exc_actions[i].port)) {
335 ipc_port_release_send(task->exc_actions[i].port);
336 }
337 #if CONFIG_MACF
338 mac_exc_action_label_destroy(task->exc_actions + i);
339 #endif
340 }
341
342 if (IP_VALID(task->itk_host))
343 ipc_port_release_send(task->itk_host);
344
345 if (IP_VALID(task->itk_bootstrap))
346 ipc_port_release_send(task->itk_bootstrap);
347
348 if (IP_VALID(task->itk_seatbelt))
349 ipc_port_release_send(task->itk_seatbelt);
350
351 if (IP_VALID(task->itk_gssd))
352 ipc_port_release_send(task->itk_gssd);
353
354 if (IP_VALID(task->itk_task_access))
355 ipc_port_release_send(task->itk_task_access);
356
357 if (IP_VALID(task->itk_debug_control))
358 ipc_port_release_send(task->itk_debug_control);
359
360 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
361 if (IP_VALID(task->itk_registered[i]))
362 ipc_port_release_send(task->itk_registered[i]);
363
364 /* destroy the kernel ports */
365 ipc_port_dealloc_kernel(kport);
366 ipc_port_dealloc_kernel(nport);
367 if (rport != IP_NULL)
368 ipc_port_dealloc_kernel(rport);
369
370 itk_lock_destroy(task);
371 }
372
373 /*
374 * Routine: ipc_task_reset
375 * Purpose:
376 * Reset a task's IPC state to protect it when
377 * it enters an elevated security context. The
378 * task name port can remain the same - since
379 * it represents no specific privilege.
380 * Conditions:
381 * Nothing locked. The task must be suspended.
382 * (Or the current thread must be in the task.)
383 */
384
385 void
386 ipc_task_reset(
387 task_t task)
388 {
389 ipc_port_t old_kport, new_kport;
390 ipc_port_t old_sself;
391 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
392 int i;
393
394 new_kport = ipc_port_alloc_kernel();
395 if (new_kport == IP_NULL)
396 panic("ipc_task_reset");
397
398 itk_lock(task);
399
400 old_kport = task->itk_self;
401
402 if (old_kport == IP_NULL) {
403 /* the task is already terminated (can this happen?) */
404 itk_unlock(task);
405 ipc_port_dealloc_kernel(new_kport);
406 return;
407 }
408
409 task->itk_self = new_kport;
410 old_sself = task->itk_sself;
411 task->itk_sself = ipc_port_make_send(new_kport);
412
413 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
414 ip_lock(old_kport);
415 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
416 task->exec_token += 1;
417 ip_unlock(old_kport);
418
419 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
420
421 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
422 old_exc_actions[i] = IP_NULL;
423
424 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
425 continue;
426 }
427
428 if (!task->exc_actions[i].privileged) {
429 #if CONFIG_MACF
430 mac_exc_action_label_reset(task->exc_actions + i);
431 #endif
432 old_exc_actions[i] = task->exc_actions[i].port;
433 task->exc_actions[i].port = IP_NULL;
434 }
435 }/* for */
436
437 if (IP_VALID(task->itk_debug_control)) {
438 ipc_port_release_send(task->itk_debug_control);
439 }
440 task->itk_debug_control = IP_NULL;
441
442 itk_unlock(task);
443
444 /* release the naked send rights */
445
446 if (IP_VALID(old_sself))
447 ipc_port_release_send(old_sself);
448
449 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
450 if (IP_VALID(old_exc_actions[i])) {
451 ipc_port_release_send(old_exc_actions[i]);
452 }
453 }/* for */
454
455 /* destroy the kernel port */
456 ipc_port_dealloc_kernel(old_kport);
457 }
458
459 /*
460 * Routine: ipc_thread_init
461 * Purpose:
462 * Initialize a thread's IPC state.
463 * Conditions:
464 * Nothing locked.
465 */
466
467 void
468 ipc_thread_init(
469 thread_t thread)
470 {
471 ipc_port_t kport;
472
473 kport = ipc_port_alloc_kernel();
474 if (kport == IP_NULL)
475 panic("ipc_thread_init");
476
477 thread->ith_self = kport;
478 thread->ith_sself = ipc_port_make_send(kport);
479 thread->exc_actions = NULL;
480
481 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
482
483 #if IMPORTANCE_INHERITANCE
484 thread->ith_assertions = 0;
485 #endif
486
487 ipc_kmsg_queue_init(&thread->ith_messages);
488
489 thread->ith_rpc_reply = IP_NULL;
490 }
491
492 void
493 ipc_thread_init_exc_actions(
494 thread_t thread)
495 {
496 assert(thread->exc_actions == NULL);
497
498 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
499 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
500
501 #if CONFIG_MACF
502 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
503 mac_exc_action_label_init(thread->exc_actions + i);
504 }
505 #endif
506 }
507
508 void
509 ipc_thread_destroy_exc_actions(
510 thread_t thread)
511 {
512 if (thread->exc_actions != NULL) {
513 #if CONFIG_MACF
514 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
515 mac_exc_action_label_destroy(thread->exc_actions + i);
516 }
517 #endif
518
519 kfree(thread->exc_actions,
520 sizeof(struct exception_action) * EXC_TYPES_COUNT);
521 thread->exc_actions = NULL;
522 }
523 }
524
525 void
526 ipc_thread_disable(
527 thread_t thread)
528 {
529 ipc_port_t kport = thread->ith_self;
530
531 if (kport != IP_NULL)
532 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
533 }
534
535 /*
536 * Routine: ipc_thread_terminate
537 * Purpose:
538 * Clean up and destroy a thread's IPC state.
539 * Conditions:
540 * Nothing locked.
541 */
542
543 void
544 ipc_thread_terminate(
545 thread_t thread)
546 {
547 ipc_port_t kport = thread->ith_self;
548
549 if (kport != IP_NULL) {
550 int i;
551
552 if (IP_VALID(thread->ith_sself))
553 ipc_port_release_send(thread->ith_sself);
554
555 thread->ith_sself = thread->ith_self = IP_NULL;
556
557 if (thread->exc_actions != NULL) {
558 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
559 if (IP_VALID(thread->exc_actions[i].port))
560 ipc_port_release_send(thread->exc_actions[i].port);
561 }
562 ipc_thread_destroy_exc_actions(thread);
563 }
564
565 ipc_port_dealloc_kernel(kport);
566 }
567
568 #if IMPORTANCE_INHERITANCE
569 assert(thread->ith_assertions == 0);
570 #endif
571
572 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
573
574 if (thread->ith_rpc_reply != IP_NULL)
575 ipc_port_dealloc_reply(thread->ith_rpc_reply);
576
577 thread->ith_rpc_reply = IP_NULL;
578 }
579
580 /*
581 * Routine: ipc_thread_reset
582 * Purpose:
583 * Reset the IPC state for a given Mach thread when
584 * its task enters an elevated security context.
585 * Both the thread port and its exception ports have
586 * to be reset. Its RPC reply port cannot have any
587 * rights outstanding, so it should be fine.
588 * Conditions:
589 * Nothing locked.
590 */
591
592 void
593 ipc_thread_reset(
594 thread_t thread)
595 {
596 ipc_port_t old_kport, new_kport;
597 ipc_port_t old_sself;
598 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
599 boolean_t has_old_exc_actions = FALSE;
600 int i;
601
602 new_kport = ipc_port_alloc_kernel();
603 if (new_kport == IP_NULL)
604 panic("ipc_task_reset");
605
606 thread_mtx_lock(thread);
607
608 old_kport = thread->ith_self;
609
610 if (old_kport == IP_NULL && thread->inspection == FALSE) {
611 /* the is already terminated (can this happen?) */
612 thread_mtx_unlock(thread);
613 ipc_port_dealloc_kernel(new_kport);
614 return;
615 }
616
617 thread->ith_self = new_kport;
618 old_sself = thread->ith_sself;
619 thread->ith_sself = ipc_port_make_send(new_kport);
620 if (old_kport != IP_NULL) {
621 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
622 }
623 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
624
625 /*
626 * Only ports that were set by root-owned processes
627 * (privileged ports) should survive
628 */
629 if (thread->exc_actions != NULL) {
630 has_old_exc_actions = TRUE;
631 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
632 if (thread->exc_actions[i].privileged) {
633 old_exc_actions[i] = IP_NULL;
634 } else {
635 #if CONFIG_MACF
636 mac_exc_action_label_reset(thread->exc_actions + i);
637 #endif
638 old_exc_actions[i] = thread->exc_actions[i].port;
639 thread->exc_actions[i].port = IP_NULL;
640 }
641 }
642 }
643
644 thread_mtx_unlock(thread);
645
646 /* release the naked send rights */
647
648 if (IP_VALID(old_sself))
649 ipc_port_release_send(old_sself);
650
651 if (has_old_exc_actions) {
652 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
653 ipc_port_release_send(old_exc_actions[i]);
654 }
655 }
656
657 /* destroy the kernel port */
658 if (old_kport != IP_NULL) {
659 ipc_port_dealloc_kernel(old_kport);
660 }
661 }
662
663 /*
664 * Routine: retrieve_task_self_fast
665 * Purpose:
666 * Optimized version of retrieve_task_self,
667 * that only works for the current task.
668 *
669 * Return a send right (possibly null/dead)
670 * for the task's user-visible self port.
671 * Conditions:
672 * Nothing locked.
673 */
674
675 ipc_port_t
676 retrieve_task_self_fast(
677 task_t task)
678 {
679 ipc_port_t port;
680
681 assert(task == current_task());
682
683 itk_lock(task);
684 assert(task->itk_self != IP_NULL);
685
686 if ((port = task->itk_sself) == task->itk_self) {
687 /* no interposing */
688
689 ip_lock(port);
690 assert(ip_active(port));
691 ip_reference(port);
692 port->ip_srights++;
693 ip_unlock(port);
694 } else
695 port = ipc_port_copy_send(port);
696 itk_unlock(task);
697
698 return port;
699 }
700
701 /*
702 * Routine: retrieve_thread_self_fast
703 * Purpose:
704 * Return a send right (possibly null/dead)
705 * for the thread's user-visible self port.
706 *
707 * Only works for the current thread.
708 *
709 * Conditions:
710 * Nothing locked.
711 */
712
713 ipc_port_t
714 retrieve_thread_self_fast(
715 thread_t thread)
716 {
717 ipc_port_t port;
718
719 assert(thread == current_thread());
720
721 thread_mtx_lock(thread);
722
723 assert(thread->ith_self != IP_NULL);
724
725 if ((port = thread->ith_sself) == thread->ith_self) {
726 /* no interposing */
727
728 ip_lock(port);
729 assert(ip_active(port));
730 ip_reference(port);
731 port->ip_srights++;
732 ip_unlock(port);
733 }
734 else
735 port = ipc_port_copy_send(port);
736
737 thread_mtx_unlock(thread);
738
739 return port;
740 }
741
742 /*
743 * Routine: task_self_trap [mach trap]
744 * Purpose:
745 * Give the caller send rights for his own task port.
746 * Conditions:
747 * Nothing locked.
748 * Returns:
749 * MACH_PORT_NULL if there are any resource failures
750 * or other errors.
751 */
752
753 mach_port_name_t
754 task_self_trap(
755 __unused struct task_self_trap_args *args)
756 {
757 task_t task = current_task();
758 ipc_port_t sright;
759 mach_port_name_t name;
760
761 sright = retrieve_task_self_fast(task);
762 name = ipc_port_copyout_send(sright, task->itk_space);
763 return name;
764 }
765
766 /*
767 * Routine: thread_self_trap [mach trap]
768 * Purpose:
769 * Give the caller send rights for his own thread port.
770 * Conditions:
771 * Nothing locked.
772 * Returns:
773 * MACH_PORT_NULL if there are any resource failures
774 * or other errors.
775 */
776
777 mach_port_name_t
778 thread_self_trap(
779 __unused struct thread_self_trap_args *args)
780 {
781 thread_t thread = current_thread();
782 task_t task = thread->task;
783 ipc_port_t sright;
784 mach_port_name_t name;
785
786 sright = retrieve_thread_self_fast(thread);
787 name = ipc_port_copyout_send(sright, task->itk_space);
788 return name;
789
790 }
791
792 /*
793 * Routine: mach_reply_port [mach trap]
794 * Purpose:
795 * Allocate a port for the caller.
796 * Conditions:
797 * Nothing locked.
798 * Returns:
799 * MACH_PORT_NULL if there are any resource failures
800 * or other errors.
801 */
802
803 mach_port_name_t
804 mach_reply_port(
805 __unused struct mach_reply_port_args *args)
806 {
807 ipc_port_t port;
808 mach_port_name_t name;
809 kern_return_t kr;
810
811 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
812 if (kr == KERN_SUCCESS)
813 ip_unlock(port);
814 else
815 name = MACH_PORT_NULL;
816 return name;
817 }
818
819 /*
820 * Routine: thread_get_special_port [kernel call]
821 * Purpose:
822 * Clones a send right for one of the thread's
823 * special ports.
824 * Conditions:
825 * Nothing locked.
826 * Returns:
827 * KERN_SUCCESS Extracted a send right.
828 * KERN_INVALID_ARGUMENT The thread is null.
829 * KERN_FAILURE The thread is dead.
830 * KERN_INVALID_ARGUMENT Invalid special port.
831 */
832
833 kern_return_t
834 thread_get_special_port(
835 thread_t thread,
836 int which,
837 ipc_port_t *portp)
838 {
839 kern_return_t result = KERN_SUCCESS;
840 ipc_port_t *whichp;
841
842 if (thread == THREAD_NULL)
843 return (KERN_INVALID_ARGUMENT);
844
845 switch (which) {
846
847 case THREAD_KERNEL_PORT:
848 whichp = &thread->ith_sself;
849 break;
850
851 default:
852 return (KERN_INVALID_ARGUMENT);
853 }
854
855 thread_mtx_lock(thread);
856
857 if (thread->active)
858 *portp = ipc_port_copy_send(*whichp);
859 else
860 result = KERN_FAILURE;
861
862 thread_mtx_unlock(thread);
863
864 return (result);
865 }
866
867 /*
868 * Routine: thread_set_special_port [kernel call]
869 * Purpose:
870 * Changes one of the thread's special ports,
871 * setting it to the supplied send right.
872 * Conditions:
873 * Nothing locked. If successful, consumes
874 * the supplied send right.
875 * Returns:
876 * KERN_SUCCESS Changed the special port.
877 * KERN_INVALID_ARGUMENT The thread is null.
878 * KERN_FAILURE The thread is dead.
879 * KERN_INVALID_ARGUMENT Invalid special port.
880 */
881
882 kern_return_t
883 thread_set_special_port(
884 thread_t thread,
885 int which,
886 ipc_port_t port)
887 {
888 kern_return_t result = KERN_SUCCESS;
889 ipc_port_t *whichp, old = IP_NULL;
890
891 if (thread == THREAD_NULL)
892 return (KERN_INVALID_ARGUMENT);
893
894 switch (which) {
895
896 case THREAD_KERNEL_PORT:
897 whichp = &thread->ith_sself;
898 break;
899
900 default:
901 return (KERN_INVALID_ARGUMENT);
902 }
903
904 thread_mtx_lock(thread);
905
906 if (thread->active) {
907 old = *whichp;
908 *whichp = port;
909 }
910 else
911 result = KERN_FAILURE;
912
913 thread_mtx_unlock(thread);
914
915 if (IP_VALID(old))
916 ipc_port_release_send(old);
917
918 return (result);
919 }
920
921 /*
922 * Routine: task_get_special_port [kernel call]
923 * Purpose:
924 * Clones a send right for one of the task's
925 * special ports.
926 * Conditions:
927 * Nothing locked.
928 * Returns:
929 * KERN_SUCCESS Extracted a send right.
930 * KERN_INVALID_ARGUMENT The task is null.
931 * KERN_FAILURE The task/space is dead.
932 * KERN_INVALID_ARGUMENT Invalid special port.
933 */
934
935 kern_return_t
936 task_get_special_port(
937 task_t task,
938 int which,
939 ipc_port_t *portp)
940 {
941 ipc_port_t port;
942
943 if (task == TASK_NULL)
944 return KERN_INVALID_ARGUMENT;
945
946 itk_lock(task);
947 if (task->itk_self == IP_NULL) {
948 itk_unlock(task);
949 return KERN_FAILURE;
950 }
951
952 switch (which) {
953 case TASK_KERNEL_PORT:
954 port = ipc_port_copy_send(task->itk_sself);
955 break;
956
957 case TASK_NAME_PORT:
958 port = ipc_port_make_send(task->itk_nself);
959 break;
960
961 case TASK_HOST_PORT:
962 port = ipc_port_copy_send(task->itk_host);
963 break;
964
965 case TASK_BOOTSTRAP_PORT:
966 port = ipc_port_copy_send(task->itk_bootstrap);
967 break;
968
969 case TASK_SEATBELT_PORT:
970 port = ipc_port_copy_send(task->itk_seatbelt);
971 break;
972
973 case TASK_ACCESS_PORT:
974 port = ipc_port_copy_send(task->itk_task_access);
975 break;
976
977 case TASK_DEBUG_CONTROL_PORT:
978 port = ipc_port_copy_send(task->itk_debug_control);
979 break;
980
981 default:
982 itk_unlock(task);
983 return KERN_INVALID_ARGUMENT;
984 }
985 itk_unlock(task);
986
987 *portp = port;
988 return KERN_SUCCESS;
989 }
990
991 /*
992 * Routine: task_set_special_port [kernel call]
993 * Purpose:
994 * Changes one of the task's special ports,
995 * setting it to the supplied send right.
996 * Conditions:
997 * Nothing locked. If successful, consumes
998 * the supplied send right.
999 * Returns:
1000 * KERN_SUCCESS Changed the special port.
1001 * KERN_INVALID_ARGUMENT The task is null.
1002 * KERN_FAILURE The task/space is dead.
1003 * KERN_INVALID_ARGUMENT Invalid special port.
1004 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1005 */
1006
1007 kern_return_t
1008 task_set_special_port(
1009 task_t task,
1010 int which,
1011 ipc_port_t port)
1012 {
1013 ipc_port_t *whichp;
1014 ipc_port_t old;
1015
1016 if (task == TASK_NULL)
1017 return KERN_INVALID_ARGUMENT;
1018
1019 switch (which) {
1020 case TASK_KERNEL_PORT:
1021 whichp = &task->itk_sself;
1022 break;
1023
1024 case TASK_HOST_PORT:
1025 whichp = &task->itk_host;
1026 break;
1027
1028 case TASK_BOOTSTRAP_PORT:
1029 whichp = &task->itk_bootstrap;
1030 break;
1031
1032 case TASK_SEATBELT_PORT:
1033 whichp = &task->itk_seatbelt;
1034 break;
1035
1036 case TASK_ACCESS_PORT:
1037 whichp = &task->itk_task_access;
1038 break;
1039
1040 case TASK_DEBUG_CONTROL_PORT:
1041 whichp = &task->itk_debug_control;
1042 break;
1043
1044 default:
1045 return KERN_INVALID_ARGUMENT;
1046 }/* switch */
1047
1048 itk_lock(task);
1049 if (task->itk_self == IP_NULL) {
1050 itk_unlock(task);
1051 return KERN_FAILURE;
1052 }
1053
1054 /* do not allow overwrite of seatbelt or task access ports */
1055 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
1056 && IP_VALID(*whichp)) {
1057 itk_unlock(task);
1058 return KERN_NO_ACCESS;
1059 }
1060
1061 old = *whichp;
1062 *whichp = port;
1063 itk_unlock(task);
1064
1065 if (IP_VALID(old))
1066 ipc_port_release_send(old);
1067 return KERN_SUCCESS;
1068 }
1069
1070
1071 /*
1072 * Routine: mach_ports_register [kernel call]
1073 * Purpose:
1074 * Stash a handful of port send rights in the task.
1075 * Child tasks will inherit these rights, but they
1076 * must use mach_ports_lookup to acquire them.
1077 *
1078 * The rights are supplied in a (wired) kalloc'd segment.
1079 * Rights which aren't supplied are assumed to be null.
1080 * Conditions:
1081 * Nothing locked. If successful, consumes
1082 * the supplied rights and memory.
1083 * Returns:
1084 * KERN_SUCCESS Stashed the port rights.
1085 * KERN_INVALID_ARGUMENT The task is null.
1086 * KERN_INVALID_ARGUMENT The task is dead.
1087 * KERN_INVALID_ARGUMENT The memory param is null.
1088 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1089 */
1090
1091 kern_return_t
1092 mach_ports_register(
1093 task_t task,
1094 mach_port_array_t memory,
1095 mach_msg_type_number_t portsCnt)
1096 {
1097 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1098 unsigned int i;
1099
1100 if ((task == TASK_NULL) ||
1101 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1102 (portsCnt && memory == NULL))
1103 return KERN_INVALID_ARGUMENT;
1104
1105 /*
1106 * Pad the port rights with nulls.
1107 */
1108
1109 for (i = 0; i < portsCnt; i++)
1110 ports[i] = memory[i];
1111 for (; i < TASK_PORT_REGISTER_MAX; i++)
1112 ports[i] = IP_NULL;
1113
1114 itk_lock(task);
1115 if (task->itk_self == IP_NULL) {
1116 itk_unlock(task);
1117 return KERN_INVALID_ARGUMENT;
1118 }
1119
1120 /*
1121 * Replace the old send rights with the new.
1122 * Release the old rights after unlocking.
1123 */
1124
1125 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1126 ipc_port_t old;
1127
1128 old = task->itk_registered[i];
1129 task->itk_registered[i] = ports[i];
1130 ports[i] = old;
1131 }
1132
1133 itk_unlock(task);
1134
1135 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1136 if (IP_VALID(ports[i]))
1137 ipc_port_release_send(ports[i]);
1138
1139 /*
1140 * Now that the operation is known to be successful,
1141 * we can free the memory.
1142 */
1143
1144 if (portsCnt != 0)
1145 kfree(memory,
1146 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1147
1148 return KERN_SUCCESS;
1149 }
1150
1151 /*
1152 * Routine: mach_ports_lookup [kernel call]
1153 * Purpose:
1154 * Retrieves (clones) the stashed port send rights.
1155 * Conditions:
1156 * Nothing locked. If successful, the caller gets
1157 * rights and memory.
1158 * Returns:
1159 * KERN_SUCCESS Retrieved the send rights.
1160 * KERN_INVALID_ARGUMENT The task is null.
1161 * KERN_INVALID_ARGUMENT The task is dead.
1162 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1163 */
1164
1165 kern_return_t
1166 mach_ports_lookup(
1167 task_t task,
1168 mach_port_array_t *portsp,
1169 mach_msg_type_number_t *portsCnt)
1170 {
1171 void *memory;
1172 vm_size_t size;
1173 ipc_port_t *ports;
1174 int i;
1175
1176 if (task == TASK_NULL)
1177 return KERN_INVALID_ARGUMENT;
1178
1179 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1180
1181 memory = kalloc(size);
1182 if (memory == 0)
1183 return KERN_RESOURCE_SHORTAGE;
1184
1185 itk_lock(task);
1186 if (task->itk_self == IP_NULL) {
1187 itk_unlock(task);
1188
1189 kfree(memory, size);
1190 return KERN_INVALID_ARGUMENT;
1191 }
1192
1193 ports = (ipc_port_t *) memory;
1194
1195 /*
1196 * Clone port rights. Because kalloc'd memory
1197 * is wired, we won't fault while holding the task lock.
1198 */
1199
1200 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1201 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1202
1203 itk_unlock(task);
1204
1205 *portsp = (mach_port_array_t) ports;
1206 *portsCnt = TASK_PORT_REGISTER_MAX;
1207 return KERN_SUCCESS;
1208 }
1209
1210 /*
1211 * Routine: convert_port_to_locked_task
1212 * Purpose:
1213 * Internal helper routine to convert from a port to a locked
1214 * task. Used by several routines that try to convert from a
1215 * task port to a reference on some task related object.
1216 * Conditions:
1217 * Nothing locked, blocking OK.
1218 */
1219 task_t
1220 convert_port_to_locked_task(ipc_port_t port)
1221 {
1222 int try_failed_count = 0;
1223
1224 while (IP_VALID(port)) {
1225 task_t task;
1226
1227 ip_lock(port);
1228 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1229 ip_unlock(port);
1230 return TASK_NULL;
1231 }
1232 task = (task_t) port->ip_kobject;
1233 assert(task != TASK_NULL);
1234
1235 /*
1236 * Normal lock ordering puts task_lock() before ip_lock().
1237 * Attempt out-of-order locking here.
1238 */
1239 if (task_lock_try(task)) {
1240 ip_unlock(port);
1241 return(task);
1242 }
1243 try_failed_count++;
1244
1245 ip_unlock(port);
1246 mutex_pause(try_failed_count);
1247 }
1248 return TASK_NULL;
1249 }
1250
1251 /*
1252 * Routine: convert_port_to_task
1253 * Purpose:
1254 * Convert from a port to a task.
1255 * Doesn't consume the port ref; produces a task ref,
1256 * which may be null.
1257 * Conditions:
1258 * Nothing locked.
1259 */
1260 task_t
1261 convert_port_to_task(
1262 ipc_port_t port)
1263 {
1264 return convert_port_to_task_with_exec_token(port, NULL);
1265 }
1266
1267 /*
1268 * Routine: convert_port_to_task_with_exec_token
1269 * Purpose:
1270 * Convert from a port to a task and return
1271 * the exec token stored in the task.
1272 * Doesn't consume the port ref; produces a task ref,
1273 * which may be null.
1274 * Conditions:
1275 * Nothing locked.
1276 */
1277 task_t
1278 convert_port_to_task_with_exec_token(
1279 ipc_port_t port,
1280 uint32_t *exec_token)
1281 {
1282 task_t task = TASK_NULL;
1283
1284 if (IP_VALID(port)) {
1285 ip_lock(port);
1286
1287 if ( ip_active(port) &&
1288 ip_kotype(port) == IKOT_TASK ) {
1289 task = (task_t)port->ip_kobject;
1290 assert(task != TASK_NULL);
1291
1292 if (exec_token) {
1293 *exec_token = task->exec_token;
1294 }
1295 task_reference_internal(task);
1296 }
1297
1298 ip_unlock(port);
1299 }
1300
1301 return (task);
1302 }
1303
1304 /*
1305 * Routine: convert_port_to_task_name
1306 * Purpose:
1307 * Convert from a port to a task name.
1308 * Doesn't consume the port ref; produces a task name ref,
1309 * which may be null.
1310 * Conditions:
1311 * Nothing locked.
1312 */
1313 task_name_t
1314 convert_port_to_task_name(
1315 ipc_port_t port)
1316 {
1317 task_name_t task = TASK_NULL;
1318
1319 if (IP_VALID(port)) {
1320 ip_lock(port);
1321
1322 if ( ip_active(port) &&
1323 (ip_kotype(port) == IKOT_TASK ||
1324 ip_kotype(port) == IKOT_TASK_NAME)) {
1325 task = (task_name_t)port->ip_kobject;
1326 assert(task != TASK_NAME_NULL);
1327
1328 task_reference_internal(task);
1329 }
1330
1331 ip_unlock(port);
1332 }
1333
1334 return (task);
1335 }
1336
1337 /*
1338 * Routine: convert_port_to_task_suspension_token
1339 * Purpose:
1340 * Convert from a port to a task suspension token.
1341 * Doesn't consume the port ref; produces a suspension token ref,
1342 * which may be null.
1343 * Conditions:
1344 * Nothing locked.
1345 */
1346 task_suspension_token_t
1347 convert_port_to_task_suspension_token(
1348 ipc_port_t port)
1349 {
1350 task_suspension_token_t task = TASK_NULL;
1351
1352 if (IP_VALID(port)) {
1353 ip_lock(port);
1354
1355 if ( ip_active(port) &&
1356 ip_kotype(port) == IKOT_TASK_RESUME) {
1357 task = (task_suspension_token_t)port->ip_kobject;
1358 assert(task != TASK_NULL);
1359
1360 task_reference_internal(task);
1361 }
1362
1363 ip_unlock(port);
1364 }
1365
1366 return (task);
1367 }
1368
1369 /*
1370 * Routine: convert_port_to_space
1371 * Purpose:
1372 * Convert from a port to a space.
1373 * Doesn't consume the port ref; produces a space ref,
1374 * which may be null.
1375 * Conditions:
1376 * Nothing locked.
1377 */
1378 ipc_space_t
1379 convert_port_to_space(
1380 ipc_port_t port)
1381 {
1382 ipc_space_t space;
1383 task_t task;
1384
1385 task = convert_port_to_locked_task(port);
1386
1387 if (task == TASK_NULL)
1388 return IPC_SPACE_NULL;
1389
1390 if (!task->active) {
1391 task_unlock(task);
1392 return IPC_SPACE_NULL;
1393 }
1394
1395 space = task->itk_space;
1396 is_reference(space);
1397 task_unlock(task);
1398 return (space);
1399 }
1400
1401 /*
1402 * Routine: convert_port_to_map
1403 * Purpose:
1404 * Convert from a port to a map.
1405 * Doesn't consume the port ref; produces a map ref,
1406 * which may be null.
1407 * Conditions:
1408 * Nothing locked.
1409 */
1410
1411 vm_map_t
1412 convert_port_to_map(
1413 ipc_port_t port)
1414 {
1415 task_t task;
1416 vm_map_t map;
1417
1418 task = convert_port_to_locked_task(port);
1419
1420 if (task == TASK_NULL)
1421 return VM_MAP_NULL;
1422
1423 if (!task->active) {
1424 task_unlock(task);
1425 return VM_MAP_NULL;
1426 }
1427
1428 map = task->map;
1429 vm_map_reference_swap(map);
1430 task_unlock(task);
1431 return map;
1432 }
1433
1434
1435 /*
1436 * Routine: convert_port_to_thread
1437 * Purpose:
1438 * Convert from a port to a thread.
1439 * Doesn't consume the port ref; produces an thread ref,
1440 * which may be null.
1441 * Conditions:
1442 * Nothing locked.
1443 */
1444
1445 thread_t
1446 convert_port_to_thread(
1447 ipc_port_t port)
1448 {
1449 thread_t thread = THREAD_NULL;
1450
1451 if (IP_VALID(port)) {
1452 ip_lock(port);
1453
1454 if ( ip_active(port) &&
1455 ip_kotype(port) == IKOT_THREAD ) {
1456 thread = (thread_t)port->ip_kobject;
1457 assert(thread != THREAD_NULL);
1458
1459 thread_reference_internal(thread);
1460 }
1461
1462 ip_unlock(port);
1463 }
1464
1465 return (thread);
1466 }
1467
1468 /*
1469 * Routine: port_name_to_thread
1470 * Purpose:
1471 * Convert from a port name to an thread reference
1472 * A name of MACH_PORT_NULL is valid for the null thread.
1473 * Conditions:
1474 * Nothing locked.
1475 *
1476 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1477 * We could avoid extra lock/unlock and extra ref operations on the port.
1478 */
1479 thread_t
1480 port_name_to_thread(
1481 mach_port_name_t name)
1482 {
1483 thread_t thread = THREAD_NULL;
1484 ipc_port_t kport;
1485
1486 if (MACH_PORT_VALID(name)) {
1487 if (ipc_object_copyin(current_space(), name,
1488 MACH_MSG_TYPE_COPY_SEND,
1489 (ipc_object_t *)&kport) != KERN_SUCCESS)
1490 return (THREAD_NULL);
1491
1492 thread = convert_port_to_thread(kport);
1493
1494 if (IP_VALID(kport))
1495 ipc_port_release_send(kport);
1496 }
1497
1498 return (thread);
1499 }
1500
1501 task_t
1502 port_name_to_task(
1503 mach_port_name_t name)
1504 {
1505 ipc_port_t kern_port;
1506 kern_return_t kr;
1507 task_t task = TASK_NULL;
1508
1509 if (MACH_PORT_VALID(name)) {
1510 kr = ipc_object_copyin(current_space(), name,
1511 MACH_MSG_TYPE_COPY_SEND,
1512 (ipc_object_t *) &kern_port);
1513 if (kr != KERN_SUCCESS)
1514 return TASK_NULL;
1515
1516 task = convert_port_to_task(kern_port);
1517
1518 if (IP_VALID(kern_port))
1519 ipc_port_release_send(kern_port);
1520 }
1521 return task;
1522 }
1523
1524 /*
1525 * Routine: port_name_to_host
1526 * Purpose:
1527 * Convert from a port name to a host pointer.
1528 * NOTE: This does _not_ return a +1 reference to the host_t
1529 * Conditions:
1530 * Nothing locked.
1531 */
1532 host_t
1533 port_name_to_host(
1534 mach_port_name_t name)
1535 {
1536
1537 host_t host = HOST_NULL;
1538 kern_return_t kr;
1539 ipc_port_t port;
1540
1541 if (MACH_PORT_VALID(name)) {
1542 kr = ipc_port_translate_send(current_space(), name, &port);
1543 if (kr == KERN_SUCCESS) {
1544 host = convert_port_to_host(port);
1545 ip_unlock(port);
1546 }
1547 }
1548 return host;
1549 }
1550
1551 /*
1552 * Routine: convert_task_to_port
1553 * Purpose:
1554 * Convert from a task to a port.
1555 * Consumes a task ref; produces a naked send right
1556 * which may be invalid.
1557 * Conditions:
1558 * Nothing locked.
1559 */
1560
1561 ipc_port_t
1562 convert_task_to_port(
1563 task_t task)
1564 {
1565 ipc_port_t port;
1566
1567 itk_lock(task);
1568 if (task->itk_self != IP_NULL)
1569 port = ipc_port_make_send(task->itk_self);
1570 else
1571 port = IP_NULL;
1572 itk_unlock(task);
1573
1574 task_deallocate(task);
1575 return port;
1576 }
1577
1578 /*
1579 * Routine: convert_task_suspend_token_to_port
1580 * Purpose:
1581 * Convert from a task suspension token to a port.
1582 * Consumes a task suspension token ref; produces a naked send-once right
1583 * which may be invalid.
1584 * Conditions:
1585 * Nothing locked.
1586 */
1587 ipc_port_t
1588 convert_task_suspension_token_to_port(
1589 task_suspension_token_t task)
1590 {
1591 ipc_port_t port;
1592
1593 task_lock(task);
1594 if (task->active) {
1595 if (task->itk_resume == IP_NULL) {
1596 task->itk_resume = ipc_port_alloc_kernel();
1597 if (!IP_VALID(task->itk_resume)) {
1598 panic("failed to create resume port");
1599 }
1600
1601 ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
1602 }
1603
1604 /*
1605 * Create a send-once right for each instance of a direct user-called
1606 * task_suspend2 call. Each time one of these send-once rights is abandoned,
1607 * the notification handler will resume the target task.
1608 */
1609 port = ipc_port_make_sonce(task->itk_resume);
1610 assert(IP_VALID(port));
1611 } else {
1612 port = IP_NULL;
1613 }
1614
1615 task_unlock(task);
1616 task_suspension_token_deallocate(task);
1617
1618 return port;
1619 }
1620
1621
1622 /*
1623 * Routine: convert_task_name_to_port
1624 * Purpose:
1625 * Convert from a task name ref to a port.
1626 * Consumes a task name ref; produces a naked send right
1627 * which may be invalid.
1628 * Conditions:
1629 * Nothing locked.
1630 */
1631
1632 ipc_port_t
1633 convert_task_name_to_port(
1634 task_name_t task_name)
1635 {
1636 ipc_port_t port;
1637
1638 itk_lock(task_name);
1639 if (task_name->itk_nself != IP_NULL)
1640 port = ipc_port_make_send(task_name->itk_nself);
1641 else
1642 port = IP_NULL;
1643 itk_unlock(task_name);
1644
1645 task_name_deallocate(task_name);
1646 return port;
1647 }
1648
1649 /*
1650 * Routine: convert_thread_to_port
1651 * Purpose:
1652 * Convert from a thread to a port.
1653 * Consumes an thread ref; produces a naked send right
1654 * which may be invalid.
1655 * Conditions:
1656 * Nothing locked.
1657 */
1658
1659 ipc_port_t
1660 convert_thread_to_port(
1661 thread_t thread)
1662 {
1663 ipc_port_t port;
1664
1665 thread_mtx_lock(thread);
1666
1667 if (thread->ith_self != IP_NULL)
1668 port = ipc_port_make_send(thread->ith_self);
1669 else
1670 port = IP_NULL;
1671
1672 thread_mtx_unlock(thread);
1673
1674 thread_deallocate(thread);
1675
1676 return (port);
1677 }
1678
1679 /*
1680 * Routine: space_deallocate
1681 * Purpose:
1682 * Deallocate a space ref produced by convert_port_to_space.
1683 * Conditions:
1684 * Nothing locked.
1685 */
1686
1687 void
1688 space_deallocate(
1689 ipc_space_t space)
1690 {
1691 if (space != IS_NULL)
1692 is_release(space);
1693 }
1694
1695 /*
1696 * Routine: thread/task_set_exception_ports [kernel call]
1697 * Purpose:
1698 * Sets the thread/task exception port, flavor and
1699 * behavior for the exception types specified by the mask.
1700 * There will be one send right per exception per valid
1701 * port.
1702 * Conditions:
1703 * Nothing locked. If successful, consumes
1704 * the supplied send right.
1705 * Returns:
1706 * KERN_SUCCESS Changed the special port.
1707 * KERN_INVALID_ARGUMENT The thread is null,
1708 * Illegal mask bit set.
1709 * Illegal exception behavior
1710 * KERN_FAILURE The thread is dead.
1711 */
1712
1713 kern_return_t
1714 thread_set_exception_ports(
1715 thread_t thread,
1716 exception_mask_t exception_mask,
1717 ipc_port_t new_port,
1718 exception_behavior_t new_behavior,
1719 thread_state_flavor_t new_flavor)
1720 {
1721 ipc_port_t old_port[EXC_TYPES_COUNT];
1722 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1723 register int i;
1724
1725 if (thread == THREAD_NULL)
1726 return (KERN_INVALID_ARGUMENT);
1727
1728 if (exception_mask & ~EXC_MASK_VALID)
1729 return (KERN_INVALID_ARGUMENT);
1730
1731 if (IP_VALID(new_port)) {
1732 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1733
1734 case EXCEPTION_DEFAULT:
1735 case EXCEPTION_STATE:
1736 case EXCEPTION_STATE_IDENTITY:
1737 break;
1738
1739 default:
1740 return (KERN_INVALID_ARGUMENT);
1741 }
1742 }
1743
1744 /*
1745 * Check the validity of the thread_state_flavor by calling the
1746 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1747 * osfmk/mach/ARCHITECTURE/thread_status.h
1748 */
1749 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1750 return (KERN_INVALID_ARGUMENT);
1751
1752 thread_mtx_lock(thread);
1753
1754 if (!thread->active) {
1755 thread_mtx_unlock(thread);
1756
1757 return (KERN_FAILURE);
1758 }
1759
1760 if (thread->exc_actions == NULL) {
1761 ipc_thread_init_exc_actions(thread);
1762 }
1763 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1764 if ((exception_mask & (1 << i))
1765 #if CONFIG_MACF
1766 && mac_exc_action_label_update(current_task(), thread->exc_actions + i) == 0
1767 #endif
1768 ) {
1769 old_port[i] = thread->exc_actions[i].port;
1770 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1771 thread->exc_actions[i].behavior = new_behavior;
1772 thread->exc_actions[i].flavor = new_flavor;
1773 thread->exc_actions[i].privileged = privileged;
1774 }
1775 else
1776 old_port[i] = IP_NULL;
1777 }
1778
1779 thread_mtx_unlock(thread);
1780
1781 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1782 if (IP_VALID(old_port[i]))
1783 ipc_port_release_send(old_port[i]);
1784
1785 if (IP_VALID(new_port)) /* consume send right */
1786 ipc_port_release_send(new_port);
1787
1788 return (KERN_SUCCESS);
1789 }
1790
1791 kern_return_t
1792 task_set_exception_ports(
1793 task_t task,
1794 exception_mask_t exception_mask,
1795 ipc_port_t new_port,
1796 exception_behavior_t new_behavior,
1797 thread_state_flavor_t new_flavor)
1798 {
1799 ipc_port_t old_port[EXC_TYPES_COUNT];
1800 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1801 register int i;
1802
1803 if (task == TASK_NULL)
1804 return (KERN_INVALID_ARGUMENT);
1805
1806 if (exception_mask & ~EXC_MASK_VALID)
1807 return (KERN_INVALID_ARGUMENT);
1808
1809 if (IP_VALID(new_port)) {
1810 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1811
1812 case EXCEPTION_DEFAULT:
1813 case EXCEPTION_STATE:
1814 case EXCEPTION_STATE_IDENTITY:
1815 break;
1816
1817 default:
1818 return (KERN_INVALID_ARGUMENT);
1819 }
1820 }
1821
1822 /*
1823 * Check the validity of the thread_state_flavor by calling the
1824 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1825 * osfmk/mach/ARCHITECTURE/thread_status.h
1826 */
1827 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1828 return (KERN_INVALID_ARGUMENT);
1829
1830 itk_lock(task);
1831
1832 if (task->itk_self == IP_NULL) {
1833 itk_unlock(task);
1834
1835 return (KERN_FAILURE);
1836 }
1837
1838 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1839 if ((exception_mask & (1 << i))
1840 #if CONFIG_MACF
1841 && mac_exc_action_label_update(current_task(), task->exc_actions + i) == 0
1842 #endif
1843 ) {
1844 old_port[i] = task->exc_actions[i].port;
1845 task->exc_actions[i].port =
1846 ipc_port_copy_send(new_port);
1847 task->exc_actions[i].behavior = new_behavior;
1848 task->exc_actions[i].flavor = new_flavor;
1849 task->exc_actions[i].privileged = privileged;
1850 }
1851 else
1852 old_port[i] = IP_NULL;
1853 }
1854
1855 itk_unlock(task);
1856
1857 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1858 if (IP_VALID(old_port[i]))
1859 ipc_port_release_send(old_port[i]);
1860
1861 if (IP_VALID(new_port)) /* consume send right */
1862 ipc_port_release_send(new_port);
1863
1864 return (KERN_SUCCESS);
1865 }
1866
1867 /*
1868 * Routine: thread/task_swap_exception_ports [kernel call]
1869 * Purpose:
1870 * Sets the thread/task exception port, flavor and
1871 * behavior for the exception types specified by the
1872 * mask.
1873 *
1874 * The old ports, behavior and flavors are returned
1875 * Count specifies the array sizes on input and
1876 * the number of returned ports etc. on output. The
1877 * arrays must be large enough to hold all the returned
1878 * data, MIG returnes an error otherwise. The masks
1879 * array specifies the corresponding exception type(s).
1880 *
1881 * Conditions:
1882 * Nothing locked. If successful, consumes
1883 * the supplied send right.
1884 *
1885 * Returns upto [in} CountCnt elements.
1886 * Returns:
1887 * KERN_SUCCESS Changed the special port.
1888 * KERN_INVALID_ARGUMENT The thread is null,
1889 * Illegal mask bit set.
1890 * Illegal exception behavior
1891 * KERN_FAILURE The thread is dead.
1892 */
1893
1894 kern_return_t
1895 thread_swap_exception_ports(
1896 thread_t thread,
1897 exception_mask_t exception_mask,
1898 ipc_port_t new_port,
1899 exception_behavior_t new_behavior,
1900 thread_state_flavor_t new_flavor,
1901 exception_mask_array_t masks,
1902 mach_msg_type_number_t *CountCnt,
1903 exception_port_array_t ports,
1904 exception_behavior_array_t behaviors,
1905 thread_state_flavor_array_t flavors)
1906 {
1907 ipc_port_t old_port[EXC_TYPES_COUNT];
1908 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1909 unsigned int i, j, count;
1910
1911 if (thread == THREAD_NULL)
1912 return (KERN_INVALID_ARGUMENT);
1913
1914 if (exception_mask & ~EXC_MASK_VALID)
1915 return (KERN_INVALID_ARGUMENT);
1916
1917 if (IP_VALID(new_port)) {
1918 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1919
1920 case EXCEPTION_DEFAULT:
1921 case EXCEPTION_STATE:
1922 case EXCEPTION_STATE_IDENTITY:
1923 break;
1924
1925 default:
1926 return (KERN_INVALID_ARGUMENT);
1927 }
1928 }
1929
1930 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1931 return (KERN_INVALID_ARGUMENT);
1932
1933 thread_mtx_lock(thread);
1934
1935 if (!thread->active) {
1936 thread_mtx_unlock(thread);
1937
1938 return (KERN_FAILURE);
1939 }
1940
1941 if (thread->exc_actions == NULL) {
1942 ipc_thread_init_exc_actions(thread);
1943 }
1944
1945 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
1946 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
1947 if ((exception_mask & (1 << i))
1948 #if CONFIG_MACF
1949 && mac_exc_action_label_update(current_task(), thread->exc_actions + i) == 0
1950 #endif
1951 ) {
1952 for (j = 0; j < count; ++j) {
1953 /*
1954 * search for an identical entry, if found
1955 * set corresponding mask for this exception.
1956 */
1957 if ( thread->exc_actions[i].port == ports[j] &&
1958 thread->exc_actions[i].behavior == behaviors[j] &&
1959 thread->exc_actions[i].flavor == flavors[j] ) {
1960 masks[j] |= (1 << i);
1961 break;
1962 }
1963 }
1964
1965 if (j == count) {
1966 masks[j] = (1 << i);
1967 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1968
1969 behaviors[j] = thread->exc_actions[i].behavior;
1970 flavors[j] = thread->exc_actions[i].flavor;
1971 ++count;
1972 }
1973
1974 old_port[i] = thread->exc_actions[i].port;
1975 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1976 thread->exc_actions[i].behavior = new_behavior;
1977 thread->exc_actions[i].flavor = new_flavor;
1978 thread->exc_actions[i].privileged = privileged;
1979 }
1980 else
1981 old_port[i] = IP_NULL;
1982 }
1983
1984 thread_mtx_unlock(thread);
1985
1986 while (--i >= FIRST_EXCEPTION) {
1987 if (IP_VALID(old_port[i]))
1988 ipc_port_release_send(old_port[i]);
1989 }
1990
1991 if (IP_VALID(new_port)) /* consume send right */
1992 ipc_port_release_send(new_port);
1993
1994 *CountCnt = count;
1995
1996 return (KERN_SUCCESS);
1997 }
1998
1999 kern_return_t
2000 task_swap_exception_ports(
2001 task_t task,
2002 exception_mask_t exception_mask,
2003 ipc_port_t new_port,
2004 exception_behavior_t new_behavior,
2005 thread_state_flavor_t new_flavor,
2006 exception_mask_array_t masks,
2007 mach_msg_type_number_t *CountCnt,
2008 exception_port_array_t ports,
2009 exception_behavior_array_t behaviors,
2010 thread_state_flavor_array_t flavors)
2011 {
2012 ipc_port_t old_port[EXC_TYPES_COUNT];
2013 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2014 unsigned int i, j, count;
2015
2016 if (task == TASK_NULL)
2017 return (KERN_INVALID_ARGUMENT);
2018
2019 if (exception_mask & ~EXC_MASK_VALID)
2020 return (KERN_INVALID_ARGUMENT);
2021
2022 if (IP_VALID(new_port)) {
2023 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2024
2025 case EXCEPTION_DEFAULT:
2026 case EXCEPTION_STATE:
2027 case EXCEPTION_STATE_IDENTITY:
2028 break;
2029
2030 default:
2031 return (KERN_INVALID_ARGUMENT);
2032 }
2033 }
2034
2035 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2036 return (KERN_INVALID_ARGUMENT);
2037
2038 itk_lock(task);
2039
2040 if (task->itk_self == IP_NULL) {
2041 itk_unlock(task);
2042
2043 return (KERN_FAILURE);
2044 }
2045
2046 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2047 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2048 if ((exception_mask & (1 << i))
2049 #if CONFIG_MACF
2050 && mac_exc_action_label_update(current_task(), task->exc_actions + i) == 0
2051 #endif
2052 ) {
2053 for (j = 0; j < count; j++) {
2054 /*
2055 * search for an identical entry, if found
2056 * set corresponding mask for this exception.
2057 */
2058 if ( task->exc_actions[i].port == ports[j] &&
2059 task->exc_actions[i].behavior == behaviors[j] &&
2060 task->exc_actions[i].flavor == flavors[j] ) {
2061 masks[j] |= (1 << i);
2062 break;
2063 }
2064 }
2065
2066 if (j == count) {
2067 masks[j] = (1 << i);
2068 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2069 behaviors[j] = task->exc_actions[i].behavior;
2070 flavors[j] = task->exc_actions[i].flavor;
2071 ++count;
2072 }
2073
2074 old_port[i] = task->exc_actions[i].port;
2075
2076 task->exc_actions[i].port = ipc_port_copy_send(new_port);
2077 task->exc_actions[i].behavior = new_behavior;
2078 task->exc_actions[i].flavor = new_flavor;
2079 task->exc_actions[i].privileged = privileged;
2080 }
2081 else
2082 old_port[i] = IP_NULL;
2083 }
2084
2085 itk_unlock(task);
2086
2087 while (--i >= FIRST_EXCEPTION) {
2088 if (IP_VALID(old_port[i]))
2089 ipc_port_release_send(old_port[i]);
2090 }
2091
2092 if (IP_VALID(new_port)) /* consume send right */
2093 ipc_port_release_send(new_port);
2094
2095 *CountCnt = count;
2096
2097 return (KERN_SUCCESS);
2098 }
2099
2100 /*
2101 * Routine: thread/task_get_exception_ports [kernel call]
2102 * Purpose:
2103 * Clones a send right for each of the thread/task's exception
2104 * ports specified in the mask and returns the behaviour
2105 * and flavor of said port.
2106 *
2107 * Returns upto [in} CountCnt elements.
2108 *
2109 * Conditions:
2110 * Nothing locked.
2111 * Returns:
2112 * KERN_SUCCESS Extracted a send right.
2113 * KERN_INVALID_ARGUMENT The thread is null,
2114 * Invalid special port,
2115 * Illegal mask bit set.
2116 * KERN_FAILURE The thread is dead.
2117 */
2118
2119 kern_return_t
2120 thread_get_exception_ports(
2121 thread_t thread,
2122 exception_mask_t exception_mask,
2123 exception_mask_array_t masks,
2124 mach_msg_type_number_t *CountCnt,
2125 exception_port_array_t ports,
2126 exception_behavior_array_t behaviors,
2127 thread_state_flavor_array_t flavors)
2128 {
2129 unsigned int i, j, count;
2130
2131 if (thread == THREAD_NULL)
2132 return (KERN_INVALID_ARGUMENT);
2133
2134 if (exception_mask & ~EXC_MASK_VALID)
2135 return (KERN_INVALID_ARGUMENT);
2136
2137 thread_mtx_lock(thread);
2138
2139 if (!thread->active) {
2140 thread_mtx_unlock(thread);
2141
2142 return (KERN_FAILURE);
2143 }
2144
2145 count = 0;
2146
2147 if (thread->exc_actions == NULL) {
2148 goto done;
2149 }
2150
2151 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2152 if (exception_mask & (1 << i)) {
2153 for (j = 0; j < count; ++j) {
2154 /*
2155 * search for an identical entry, if found
2156 * set corresponding mask for this exception.
2157 */
2158 if ( thread->exc_actions[i].port == ports[j] &&
2159 thread->exc_actions[i].behavior ==behaviors[j] &&
2160 thread->exc_actions[i].flavor == flavors[j] ) {
2161 masks[j] |= (1 << i);
2162 break;
2163 }
2164 }
2165
2166 if (j == count) {
2167 masks[j] = (1 << i);
2168 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2169 behaviors[j] = thread->exc_actions[i].behavior;
2170 flavors[j] = thread->exc_actions[i].flavor;
2171 ++count;
2172 if (count >= *CountCnt)
2173 break;
2174 }
2175 }
2176 }
2177
2178 done:
2179 thread_mtx_unlock(thread);
2180
2181 *CountCnt = count;
2182
2183 return (KERN_SUCCESS);
2184 }
2185
2186 kern_return_t
2187 task_get_exception_ports(
2188 task_t task,
2189 exception_mask_t exception_mask,
2190 exception_mask_array_t masks,
2191 mach_msg_type_number_t *CountCnt,
2192 exception_port_array_t ports,
2193 exception_behavior_array_t behaviors,
2194 thread_state_flavor_array_t flavors)
2195 {
2196 unsigned int i, j, count;
2197
2198 if (task == TASK_NULL)
2199 return (KERN_INVALID_ARGUMENT);
2200
2201 if (exception_mask & ~EXC_MASK_VALID)
2202 return (KERN_INVALID_ARGUMENT);
2203
2204 itk_lock(task);
2205
2206 if (task->itk_self == IP_NULL) {
2207 itk_unlock(task);
2208
2209 return (KERN_FAILURE);
2210 }
2211
2212 count = 0;
2213
2214 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2215 if (exception_mask & (1 << i)) {
2216 for (j = 0; j < count; ++j) {
2217 /*
2218 * search for an identical entry, if found
2219 * set corresponding mask for this exception.
2220 */
2221 if ( task->exc_actions[i].port == ports[j] &&
2222 task->exc_actions[i].behavior == behaviors[j] &&
2223 task->exc_actions[i].flavor == flavors[j] ) {
2224 masks[j] |= (1 << i);
2225 break;
2226 }
2227 }
2228
2229 if (j == count) {
2230 masks[j] = (1 << i);
2231 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2232 behaviors[j] = task->exc_actions[i].behavior;
2233 flavors[j] = task->exc_actions[i].flavor;
2234 ++count;
2235 if (count > *CountCnt)
2236 break;
2237 }
2238 }
2239 }
2240
2241 itk_unlock(task);
2242
2243 *CountCnt = count;
2244
2245 return (KERN_SUCCESS);
2246 }